Add cpu and io indicates to audit log (#513)
Record query consumption into fe audit log. Its basic mode of work is as follows, one of instance of parent plan is responsible for accumulating sub plan's consumption and send to it's parent, BE coordinator will get total consumption because it's a single instance.
This commit is contained in:
@ -79,8 +79,8 @@ public class CurrentQueryFragmentProcNode implements ProcNodeInterface {
|
||||
rowData.add(instanceConsumption.getFragmentId());
|
||||
rowData.add(instanceConsumption.getInstanceId().toString());
|
||||
rowData.add(instanceConsumption.getAddress().toString());
|
||||
rowData.add(String.valueOf(instanceConsumption.getTotalIoConsumption()));
|
||||
rowData.add(String.valueOf(instanceConsumption.getTotalCpuConsumption()));
|
||||
rowData.add(instanceConsumption.getFormattingIoConsumption());
|
||||
rowData.add(instanceConsumption.getFormattingCpuConsumption());
|
||||
sortedRowDatas.add(rowData);
|
||||
}
|
||||
|
||||
|
||||
@ -36,6 +36,7 @@ import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Formatter;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
@ -334,7 +335,7 @@ public class CurrentQueryInfoProvider {
|
||||
}
|
||||
}
|
||||
|
||||
public long getTotalCpuConsumption() {
|
||||
private long getTotalCpuConsumption() {
|
||||
long cpu = 0;
|
||||
for (ConsumptionCalculator consumption : calculators) {
|
||||
cpu += consumption.getCpu();
|
||||
@ -342,13 +343,27 @@ public class CurrentQueryInfoProvider {
|
||||
return cpu;
|
||||
}
|
||||
|
||||
public long getTotalIoConsumption() {
|
||||
private long getTotalIoConsumption() {
|
||||
long io = 0;
|
||||
for (ConsumptionCalculator consumption : calculators) {
|
||||
io += consumption.getIo();
|
||||
}
|
||||
return io;
|
||||
}
|
||||
|
||||
public String getFormattingCpuConsumption() {
|
||||
final StringBuilder builder = new StringBuilder();
|
||||
builder.append(getTotalCpuConsumption()).append(" Rows");
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
public String getFormattingIoConsumption() {
|
||||
final Pair<Double, String> pair = DebugUtil.getByteUint(getTotalIoConsumption());
|
||||
final Formatter fmt = new Formatter();
|
||||
final StringBuilder builder = new StringBuilder();
|
||||
builder.append(fmt.format("%.2f", pair.first)).append(" ").append(pair.second);
|
||||
return builder.toString();
|
||||
}
|
||||
}
|
||||
|
||||
public static class InstanceConsumption extends Consumption {
|
||||
|
||||
@ -76,8 +76,8 @@ public class CurrentQueryStatisticsProcDir implements ProcDirInterface {
|
||||
values.add(item.getDb());
|
||||
values.add(item.getUser());
|
||||
final CurrentQueryInfoProvider.Consumption consumption = consumptions.get(item.getQueryId());
|
||||
values.add(String.valueOf(consumption.getTotalIoConsumption()));
|
||||
values.add(String.valueOf(consumption.getTotalCpuConsumption()));
|
||||
values.add(consumption.getFormattingIoConsumption());
|
||||
values.add(consumption.getFormattingCpuConsumption());
|
||||
values.add(item.getQueryExecTime());
|
||||
sortedRowData.add(values);
|
||||
}
|
||||
|
||||
@ -37,6 +37,7 @@ import org.apache.doris.mysql.MysqlSerializer;
|
||||
import org.apache.doris.thrift.TMasterOpRequest;
|
||||
import org.apache.doris.thrift.TMasterOpResult;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Strings;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
@ -92,12 +93,16 @@ public class ConnectProcessor {
|
||||
ctx.getState().setOk();
|
||||
}
|
||||
|
||||
private void auditAfterExec(String origStmt, StatementBase parsedStmt) {
|
||||
private void auditAfterExec(String origStmt, StatementBase parsedStmt,
|
||||
StmtExecutor.QueryConsumption queryConsumption) {
|
||||
// slow query
|
||||
long elapseMs = System.currentTimeMillis() - ctx.getStartTime();
|
||||
// query state log
|
||||
ctx.getAuditBuilder().put("state", ctx.getState());
|
||||
ctx.getAuditBuilder().put("time", elapseMs);
|
||||
Preconditions.checkNotNull(queryConsumption);
|
||||
ctx.getAuditBuilder().put("cpu", queryConsumption.getFormattingCpuConsumption());
|
||||
ctx.getAuditBuilder().put("io", queryConsumption.getFormattingIoConsumption());
|
||||
ctx.getAuditBuilder().put("returnRows", ctx.getReturnRows());
|
||||
ctx.getAuditBuilder().put("stmt_id", ctx.getStmtId());
|
||||
|
||||
@ -177,7 +182,8 @@ public class ConnectProcessor {
|
||||
|
||||
// audit after exec
|
||||
// replace '\n' to '\\n' to make string in one line
|
||||
auditAfterExec(stmt.replace("\n", " \\n"), executor.getParsedStmt());
|
||||
auditAfterExec(stmt.replace("\n", " \\n"), executor.getParsedStmt(),
|
||||
executor.getQueryConsumptionForAuditLog());
|
||||
}
|
||||
|
||||
// Get the column definitions of a table
|
||||
|
||||
@ -574,12 +574,12 @@ public class Coordinator {
|
||||
}
|
||||
}
|
||||
|
||||
TResultBatch getNext() throws Exception {
|
||||
public RowBatch getNext() throws Exception {
|
||||
if (receiver == null) {
|
||||
throw new UserException("There is no receiver.");
|
||||
}
|
||||
|
||||
TResultBatch resultBatch;
|
||||
RowBatch resultBatch;
|
||||
Status status = new Status();
|
||||
|
||||
resultBatch = receiver.getNext(status);
|
||||
@ -611,7 +611,7 @@ public class Coordinator {
|
||||
}
|
||||
}
|
||||
|
||||
if (resultBatch == null) {
|
||||
if (resultBatch.isEos()) {
|
||||
this.returnedAllResults = true;
|
||||
|
||||
// if this query is a block query do not cancel.
|
||||
@ -622,7 +622,7 @@ public class Coordinator {
|
||||
cancelInternal();
|
||||
}
|
||||
} else {
|
||||
numReceivedRows += resultBatch.getRowsSize();
|
||||
numReceivedRows += resultBatch.getBatch().getRowsSize();
|
||||
}
|
||||
|
||||
return resultBatch;
|
||||
@ -746,6 +746,12 @@ public class Coordinator {
|
||||
dest.fragment_instance_id = destParams.instanceExecParams.get(j).instanceId;
|
||||
dest.server = toRpcHost(destParams.instanceExecParams.get(j).host);
|
||||
dest.setBrpc_server(toBrpcHost(destParams.instanceExecParams.get(j).host));
|
||||
// select first dest as consumption transfer chain.
|
||||
if (j == 0) {
|
||||
dest.setIs_transfer_chain(true);
|
||||
} else {
|
||||
dest.setIs_transfer_chain(false);
|
||||
}
|
||||
params.destinations.add(dest);
|
||||
}
|
||||
}
|
||||
@ -1038,7 +1044,7 @@ public class Coordinator {
|
||||
return result;
|
||||
}
|
||||
|
||||
public void createScanInstance(PlanNodeId leftMostScanId, FragmentExecParams fragmentExecParams)
|
||||
private void createScanInstance(PlanNodeId leftMostScanId, FragmentExecParams fragmentExecParams)
|
||||
throws UserException {
|
||||
int maxNumInstance = queryOptions.mt_dop;
|
||||
if (maxNumInstance == 0) {
|
||||
@ -1150,7 +1156,7 @@ public class Coordinator {
|
||||
}
|
||||
|
||||
// create collocated instance according to inputFragments
|
||||
public void createCollocatedInstance(FragmentExecParams fragmentExecParams) {
|
||||
private void createCollocatedInstance(FragmentExecParams fragmentExecParams) {
|
||||
Preconditions.checkState(fragmentExecParams.inputFragments.size() >= 1);
|
||||
final FragmentExecParams inputFragmentParams = fragmentExecParamsMap.get(fragmentExecParams.
|
||||
inputFragments.get(0));
|
||||
@ -1169,7 +1175,7 @@ public class Coordinator {
|
||||
}
|
||||
|
||||
|
||||
public void createUnionInstance(FragmentExecParams fragmentExecParams) {
|
||||
private void createUnionInstance(FragmentExecParams fragmentExecParams) {
|
||||
final PlanFragment fragment = fragmentExecParams.fragment;
|
||||
// Add hosts of scan nodes
|
||||
List<PlanNodeId> scanNodeIds = findScanNodes(fragment.getPlanRoot());
|
||||
@ -1563,7 +1569,6 @@ public class Coordinator {
|
||||
params.setResource_info(tResourceInfo);
|
||||
params.params.setQuery_id(queryId);
|
||||
params.params.setFragment_instance_id(instanceExecParam.instanceId);
|
||||
|
||||
Map<Integer, List<TScanRangeParams>> scanRanges = instanceExecParam.perNodeScanRanges;
|
||||
if (scanRanges == null) {
|
||||
scanRanges = Maps.newHashMap();
|
||||
|
||||
@ -58,11 +58,11 @@ public class ResultReceiver {
|
||||
this.timeoutTs = System.currentTimeMillis() + timeoutMs;
|
||||
}
|
||||
|
||||
public TResultBatch getNext(Status status) throws TException {
|
||||
public RowBatch getNext(Status status) throws TException {
|
||||
if (isDone) {
|
||||
return null;
|
||||
}
|
||||
|
||||
final RowBatch rowBatch = new RowBatch();
|
||||
try {
|
||||
while (!isDone && !isCancel) {
|
||||
PFetchDataRequest request = new PFetchDataRequest(finstId);
|
||||
@ -90,6 +90,10 @@ public class ResultReceiver {
|
||||
if (code != TStatusCode.OK) {
|
||||
status.setPstatus(pResult.status);
|
||||
return null;
|
||||
}
|
||||
|
||||
if (pResult.queryConsumption != null) {
|
||||
rowBatch.setQueryConsumption(pResult.queryConsumption);
|
||||
}
|
||||
|
||||
if (packetIdx != pResult.packetSeq) {
|
||||
@ -106,7 +110,9 @@ public class ResultReceiver {
|
||||
TResultBatch resultBatch = new TResultBatch();
|
||||
TDeserializer deserializer = new TDeserializer();
|
||||
deserializer.deserialize(resultBatch, serialResult);
|
||||
return resultBatch;
|
||||
rowBatch.setBatch(resultBatch);
|
||||
rowBatch.setEos(pResult.eos);
|
||||
return rowBatch;
|
||||
}
|
||||
}
|
||||
} catch (RpcException e) {
|
||||
@ -134,7 +140,7 @@ public class ResultReceiver {
|
||||
if (isCancel) {
|
||||
status.setStatus(Status.CANCELLED);
|
||||
}
|
||||
return null;
|
||||
return rowBatch;
|
||||
}
|
||||
|
||||
public void cancel() {
|
||||
|
||||
55
fe/src/main/java/org/apache/doris/qe/RowBatch.java
Normal file
55
fe/src/main/java/org/apache/doris/qe/RowBatch.java
Normal file
@ -0,0 +1,55 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package org.apache.doris.qe;
|
||||
|
||||
import org.apache.doris.rpc.PQueryConsumption;
|
||||
import org.apache.doris.thrift.TResultBatch;
|
||||
|
||||
public final class RowBatch {
|
||||
private TResultBatch batch;
|
||||
private PQueryConsumption queryConsumption;
|
||||
private boolean eos;
|
||||
|
||||
public RowBatch() {
|
||||
eos = true;
|
||||
}
|
||||
|
||||
public TResultBatch getBatch() {
|
||||
return batch;
|
||||
}
|
||||
|
||||
public void setBatch(TResultBatch batch) {
|
||||
this.batch = batch;
|
||||
}
|
||||
|
||||
public PQueryConsumption getQueryConsumption() {
|
||||
return queryConsumption;
|
||||
}
|
||||
|
||||
public void setQueryConsumption(PQueryConsumption queryConsumption) {
|
||||
this.queryConsumption = queryConsumption;
|
||||
}
|
||||
|
||||
public boolean isEos() {
|
||||
return eos;
|
||||
}
|
||||
|
||||
public void setEos(boolean eos) {
|
||||
this.eos = eos;
|
||||
}
|
||||
}
|
||||
@ -17,6 +17,7 @@
|
||||
|
||||
package org.apache.doris.qe;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Strings;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
@ -52,6 +53,7 @@ import org.apache.doris.common.DdlException;
|
||||
import org.apache.doris.common.ErrorCode;
|
||||
import org.apache.doris.common.ErrorReport;
|
||||
import org.apache.doris.common.NotImplementedException;
|
||||
import org.apache.doris.common.Pair;
|
||||
import org.apache.doris.common.UserException;
|
||||
import org.apache.doris.common.util.DebugUtil;
|
||||
import org.apache.doris.common.util.ProfileManager;
|
||||
@ -63,6 +65,7 @@ import org.apache.doris.mysql.MysqlSerializer;
|
||||
import org.apache.doris.mysql.privilege.PrivPredicate;
|
||||
import org.apache.doris.planner.Planner;
|
||||
import org.apache.doris.rewrite.ExprRewriter;
|
||||
import org.apache.doris.rpc.PQueryConsumption;
|
||||
import org.apache.doris.rpc.RpcException;
|
||||
import org.apache.doris.thrift.TExplainLevel;
|
||||
import org.apache.doris.thrift.TQueryOptions;
|
||||
@ -75,6 +78,7 @@ import org.apache.logging.log4j.Logger;
|
||||
import java.io.IOException;
|
||||
import java.io.StringReader;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Formatter;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
@ -101,6 +105,7 @@ public class StmtExecutor {
|
||||
private Planner planner;
|
||||
private boolean isProxy;
|
||||
private ShowResultSet proxyResultSet = null;
|
||||
private QueryConsumption consumptionForAuditLog;
|
||||
|
||||
public StmtExecutor(ConnectContext context, String stmt, boolean isProxy) {
|
||||
this.context = context;
|
||||
@ -537,26 +542,34 @@ public class StmtExecutor {
|
||||
// so We need to send fields after first batch arrived
|
||||
|
||||
// send result
|
||||
TResultBatch batch;
|
||||
RowBatch batch;
|
||||
MysqlChannel channel = context.getMysqlChannel();
|
||||
boolean isSendFields = false;
|
||||
while ((batch = coord.getNext()) != null) {
|
||||
|
||||
while ((batch = coord.getNext()) != null && !batch.isEos()) {
|
||||
if (!isSendFields) {
|
||||
sendFields(queryStmt.getColLabels(), queryStmt.getResultExprs());
|
||||
}
|
||||
isSendFields = true;
|
||||
|
||||
for (ByteBuffer row : batch.getRows()) {
|
||||
for (ByteBuffer row : batch.getBatch().getRows()) {
|
||||
channel.sendOnePacket(row);
|
||||
}
|
||||
context.updateReturnRows(batch.getRows().size());
|
||||
context.updateReturnRows(batch.getBatch().getRows().size());
|
||||
}
|
||||
setConsumptionForAuditLog(batch);
|
||||
if (!isSendFields) {
|
||||
sendFields(queryStmt.getColLabels(), queryStmt.getResultExprs());
|
||||
}
|
||||
context.getState().setEof();
|
||||
}
|
||||
|
||||
private void setConsumptionForAuditLog(RowBatch batch) {
|
||||
if (batch != null) {
|
||||
final PQueryConsumption queryConsumption = batch.getQueryConsumption();
|
||||
consumptionForAuditLog = new QueryConsumption(queryConsumption.cpu, queryConsumption.io);
|
||||
}
|
||||
}
|
||||
|
||||
// Process a select statement.
|
||||
private void handleInsertStmt() throws Exception {
|
||||
// Every time set no send flag and clean all data in buffer
|
||||
@ -774,4 +787,40 @@ public class StmtExecutor {
|
||||
ExportStmt exportStmt = (ExportStmt) parsedStmt;
|
||||
context.getCatalog().getExportMgr().addExportJob(exportStmt);
|
||||
}
|
||||
|
||||
public QueryConsumption getQueryConsumptionForAuditLog() {
|
||||
if (consumptionForAuditLog == null) {
|
||||
consumptionForAuditLog = new QueryConsumption();
|
||||
}
|
||||
return consumptionForAuditLog;
|
||||
}
|
||||
|
||||
public static class QueryConsumption {
|
||||
private final long cpu;
|
||||
private final long io;
|
||||
|
||||
public QueryConsumption() {
|
||||
this.cpu = 0;
|
||||
this.io = 0;
|
||||
}
|
||||
|
||||
public QueryConsumption(long cpu, long io) {
|
||||
this.cpu = cpu;
|
||||
this.io = io;
|
||||
}
|
||||
|
||||
public String getFormattingCpuConsumption() {
|
||||
final StringBuilder builder = new StringBuilder();
|
||||
builder.append(cpu).append(" Rows");
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
public String getFormattingIoConsumption() {
|
||||
final Pair<Double, String> pair = DebugUtil.getByteUint(io);
|
||||
final Formatter fmt = new Formatter();
|
||||
final StringBuilder builder = new StringBuilder();
|
||||
builder.append(fmt.format("%.2f", pair.first)).append(" ").append(pair.second);
|
||||
return builder.toString();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -28,4 +28,6 @@ public class PFetchDataResult {
|
||||
public long packetSeq;
|
||||
@Protobuf(order = 3, required = false)
|
||||
public boolean eos;
|
||||
@Protobuf(order = 4, required = false)
|
||||
public PQueryConsumption queryConsumption;
|
||||
}
|
||||
|
||||
29
fe/src/main/java/org/apache/doris/rpc/PQueryConsumption.java
Normal file
29
fe/src/main/java/org/apache/doris/rpc/PQueryConsumption.java
Normal file
@ -0,0 +1,29 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package org.apache.doris.rpc;
|
||||
|
||||
import com.baidu.bjf.remoting.protobuf.annotation.Protobuf;
|
||||
import com.baidu.bjf.remoting.protobuf.annotation.ProtobufClass;
|
||||
|
||||
@ProtobufClass
|
||||
public class PQueryConsumption {
|
||||
@Protobuf(order = 1, required = false)
|
||||
public long cpu;
|
||||
@Protobuf(order = 2, required = false)
|
||||
public long io;
|
||||
}
|
||||
@ -231,6 +231,7 @@ public class ConnectProcessorTest {
|
||||
// Mock statement executor
|
||||
StmtExecutor qe = EasyMock.createNiceMock(StmtExecutor.class);
|
||||
qe.execute();
|
||||
EasyMock.expect(qe.getQueryConsumptionForAuditLog()).andReturn(new StmtExecutor.QueryConsumption());
|
||||
EasyMock.expectLastCall().anyTimes();
|
||||
EasyMock.replay(qe);
|
||||
PowerMock.expectNew(
|
||||
@ -254,11 +255,11 @@ public class ConnectProcessorTest {
|
||||
StmtExecutor qe = EasyMock.createNiceMock(StmtExecutor.class);
|
||||
qe.execute();
|
||||
EasyMock.expectLastCall().andThrow(new IOException("Fail")).anyTimes();
|
||||
EasyMock.expect(qe.getQueryConsumptionForAuditLog()).andReturn(new StmtExecutor.QueryConsumption());
|
||||
EasyMock.replay(qe);
|
||||
PowerMock.expectNew(StmtExecutor.class, EasyMock.isA(ConnectContext.class), EasyMock.isA(String.class))
|
||||
.andReturn(qe).anyTimes();
|
||||
PowerMock.replay(StmtExecutor.class);
|
||||
|
||||
processor.processOnce();
|
||||
Assert.assertEquals(MysqlCommand.COM_QUERY, myContext.getCommand());
|
||||
}
|
||||
@ -272,6 +273,7 @@ public class ConnectProcessorTest {
|
||||
// Mock statement executor
|
||||
StmtExecutor qe = EasyMock.createNiceMock(StmtExecutor.class);
|
||||
qe.execute();
|
||||
EasyMock.expect(qe.getQueryConsumptionForAuditLog()).andReturn(new StmtExecutor.QueryConsumption());
|
||||
EasyMock.expectLastCall().andThrow(new NullPointerException("Fail")).anyTimes();
|
||||
EasyMock.replay(qe);
|
||||
PowerMock.expectNew(StmtExecutor.class, EasyMock.isA(ConnectContext.class), EasyMock.isA(String.class))
|
||||
|
||||
Reference in New Issue
Block a user