Implement the routine load process of Kafka on Backend (#671)

This commit is contained in:
Mingyu Chen
2019-02-28 15:15:31 +08:00
committed by ZHAO Chun
parent da308da17c
commit 0820a29b8d
57 changed files with 1542 additions and 608 deletions

View File

@ -39,6 +39,7 @@ import org.apache.doris.thrift.TMiniLoadEtlTaskRequest;
import org.apache.doris.thrift.TNetworkAddress;
import org.apache.doris.thrift.TPullLoadSubTaskInfo;
import org.apache.doris.thrift.TResultBatch;
import org.apache.doris.thrift.TRoutineLoadTask;
import org.apache.doris.thrift.TSnapshotRequest;
import org.apache.doris.thrift.TStatus;
import org.apache.doris.thrift.TTabletStatResult;
@ -217,6 +218,12 @@ public class GenericPoolTest {
// TODO Auto-generated method stub
return null;
}
@Override
public TStatus submit_routine_load_task(TRoutineLoadTask task) throws TException {
// TODO Auto-generated method stub
return null;
}
}
@Test

View File

@ -22,11 +22,6 @@ import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import com.google.common.collect.Maps;
import mockit.Deencapsulation;
import mockit.Expectations;
import mockit.Injectable;
import mockit.Mocked;
import org.apache.doris.catalog.Catalog;
import org.apache.doris.catalog.CatalogTestUtil;
import org.apache.doris.catalog.Database;
@ -51,12 +46,14 @@ import org.apache.doris.load.routineload.RoutineLoadTaskInfo;
import org.apache.doris.meta.MetaContext;
import org.apache.doris.qe.ConnectContext;
import org.apache.doris.thrift.TKafkaRLTaskProgress;
import org.apache.doris.thrift.TLoadSourceType;
import org.apache.doris.thrift.TRLTaskTxnCommitAttachment;
import org.apache.doris.thrift.TResourceInfo;
import org.apache.doris.thrift.TRoutineLoadType;
import org.apache.doris.thrift.TUniqueId;
import org.apache.doris.transaction.TransactionState.LoadJobSourceType;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import org.apache.kafka.clients.consumer.KafkaConsumer;
@ -70,6 +67,11 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import mockit.Deencapsulation;
import mockit.Expectations;
import mockit.Injectable;
import mockit.Mocked;
public class GlobalTransactionMgrTest {
private static FakeEditLog fakeEditLog;
@ -331,25 +333,21 @@ public class GlobalTransactionMgrTest {
routineLoadJob.setState(RoutineLoadJob.JobState.RUNNING);
TRLTaskTxnCommitAttachment rlTaskTxnCommitAttachment = new TRLTaskTxnCommitAttachment();
rlTaskTxnCommitAttachment.setBackendId(1L);
rlTaskTxnCommitAttachment.setTaskSignature(1L);
rlTaskTxnCommitAttachment.setNumOfTotalData(100);
rlTaskTxnCommitAttachment.setNumOfErrorData(1);
rlTaskTxnCommitAttachment.setTaskId("label");
rlTaskTxnCommitAttachment.setId(new TUniqueId());
rlTaskTxnCommitAttachment.setLoadedRows(100);
rlTaskTxnCommitAttachment.setFilteredRows(1);
rlTaskTxnCommitAttachment.setJobId(Deencapsulation.getField(routineLoadJob, "id"));
rlTaskTxnCommitAttachment.setRoutineLoadType(TRoutineLoadType.KAFKA);
rlTaskTxnCommitAttachment.setLoadSourceType(TLoadSourceType.KAFKA);
TKafkaRLTaskProgress tKafkaRLTaskProgress = new TKafkaRLTaskProgress();
Map<Integer, Long> kafkaProgress = Maps.newHashMap();
kafkaProgress.put(1, 10L);
tKafkaRLTaskProgress.setPartitionIdToOffset(kafkaProgress);
tKafkaRLTaskProgress.setPartitionCmtOffset(kafkaProgress);
rlTaskTxnCommitAttachment.setKafkaRLTaskProgress(tKafkaRLTaskProgress);
TxnCommitAttachment txnCommitAttachment = new RLTaskTxnCommitAttachment(rlTaskTxnCommitAttachment);
RoutineLoadManager routineLoadManager = new RoutineLoadManager();
routineLoadManager.addRoutineLoadJob(routineLoadJob);
new Expectations() {
{
catalog.getDb(1L);
@ -409,25 +407,21 @@ public class GlobalTransactionMgrTest {
routineLoadJob.setState(RoutineLoadJob.JobState.RUNNING);
TRLTaskTxnCommitAttachment rlTaskTxnCommitAttachment = new TRLTaskTxnCommitAttachment();
rlTaskTxnCommitAttachment.setBackendId(1L);
rlTaskTxnCommitAttachment.setTaskSignature(1L);
rlTaskTxnCommitAttachment.setNumOfTotalData(100);
rlTaskTxnCommitAttachment.setNumOfErrorData(11);
rlTaskTxnCommitAttachment.setTaskId("label");
rlTaskTxnCommitAttachment.setId(new TUniqueId());
rlTaskTxnCommitAttachment.setLoadedRows(100);
rlTaskTxnCommitAttachment.setFilteredRows(11);
rlTaskTxnCommitAttachment.setJobId(Deencapsulation.getField(routineLoadJob, "id"));
rlTaskTxnCommitAttachment.setRoutineLoadType(TRoutineLoadType.KAFKA);
rlTaskTxnCommitAttachment.setLoadSourceType(TLoadSourceType.KAFKA);
TKafkaRLTaskProgress tKafkaRLTaskProgress = new TKafkaRLTaskProgress();
Map<Integer, Long> kafkaProgress = Maps.newHashMap();
kafkaProgress.put(1, 10L);
tKafkaRLTaskProgress.setPartitionIdToOffset(kafkaProgress);
tKafkaRLTaskProgress.setPartitionCmtOffset(kafkaProgress);
rlTaskTxnCommitAttachment.setKafkaRLTaskProgress(tKafkaRLTaskProgress);
TxnCommitAttachment txnCommitAttachment = new RLTaskTxnCommitAttachment(rlTaskTxnCommitAttachment);
RoutineLoadManager routineLoadManager = new RoutineLoadManager();
routineLoadManager.addRoutineLoadJob(routineLoadJob);
new Expectations() {
{
catalog.getDb(1L);