Put begin txn into task scheduler (#687)
1. fix the nesting lock of db and txn 2. the txn of task will be init in task scheduler before take task from queue
This commit is contained in:
@ -63,7 +63,7 @@ public class CreateRoutineLoadStmtTest {
|
||||
Map<String, String> customProperties = Maps.newHashMap();
|
||||
|
||||
customProperties.put(CreateRoutineLoadStmt.KAFKA_TOPIC_PROPERTY, topicName);
|
||||
customProperties.put(CreateRoutineLoadStmt.KAFKA_ENDPOINT_PROPERTY, serverAddress);
|
||||
customProperties.put(CreateRoutineLoadStmt.KAFKA_BROKER_LIST_PROPERTY, serverAddress);
|
||||
customProperties.put(CreateRoutineLoadStmt.KAFKA_PARTITIONS_PROPERTY, kafkaPartitionString);
|
||||
|
||||
CreateRoutineLoadStmt createRoutineLoadStmt = new CreateRoutineLoadStmt(jobName, tableName,
|
||||
@ -109,7 +109,7 @@ public class CreateRoutineLoadStmtTest {
|
||||
Map<String, String> customProperties = Maps.newHashMap();
|
||||
|
||||
customProperties.put(CreateRoutineLoadStmt.KAFKA_TOPIC_PROPERTY, topicName);
|
||||
customProperties.put(CreateRoutineLoadStmt.KAFKA_ENDPOINT_PROPERTY, serverAddress);
|
||||
customProperties.put(CreateRoutineLoadStmt.KAFKA_BROKER_LIST_PROPERTY, serverAddress);
|
||||
customProperties.put(CreateRoutineLoadStmt.KAFKA_PARTITIONS_PROPERTY, kafkaPartitionString);
|
||||
|
||||
CreateRoutineLoadStmt createRoutineLoadStmt = new CreateRoutineLoadStmt(jobName, tableName,
|
||||
@ -129,7 +129,7 @@ public class CreateRoutineLoadStmtTest {
|
||||
Assert.assertEquals(partitionNames.getPartitionNames(), createRoutineLoadStmt.getRoutineLoadDesc().getPartitionNames());
|
||||
Assert.assertEquals(2, createRoutineLoadStmt.getDesiredConcurrentNum());
|
||||
Assert.assertEquals(0, createRoutineLoadStmt.getMaxErrorNum());
|
||||
Assert.assertEquals(serverAddress, createRoutineLoadStmt.getKafkaEndpoint());
|
||||
Assert.assertEquals(serverAddress, createRoutineLoadStmt.getKafkaBrokerList());
|
||||
Assert.assertEquals(topicName, createRoutineLoadStmt.getKafkaTopic());
|
||||
Assert.assertEquals(kafkaPartitionString, Joiner.on(",").join(createRoutineLoadStmt.getKafkaPartitions()));
|
||||
}
|
||||
|
||||
@ -367,7 +367,7 @@ public class KafkaRoutineLoadJobTest {
|
||||
Map<String, String> customProperties = Maps.newHashMap();
|
||||
|
||||
customProperties.put(CreateRoutineLoadStmt.KAFKA_TOPIC_PROPERTY, topicName);
|
||||
customProperties.put(CreateRoutineLoadStmt.KAFKA_ENDPOINT_PROPERTY, serverAddress);
|
||||
customProperties.put(CreateRoutineLoadStmt.KAFKA_BROKER_LIST_PROPERTY, serverAddress);
|
||||
customProperties.put(CreateRoutineLoadStmt.KAFKA_PARTITIONS_PROPERTY, kafkaPartitionString);
|
||||
|
||||
CreateRoutineLoadStmt createRoutineLoadStmt = new CreateRoutineLoadStmt(jobName, tableName,
|
||||
|
||||
@ -76,7 +76,7 @@ public class RoutineLoadManagerTest {
|
||||
String topicName = "topic1";
|
||||
customProperties.put(CreateRoutineLoadStmt.KAFKA_TOPIC_PROPERTY, topicName);
|
||||
String serverAddress = "http://127.0.0.1:8080";
|
||||
customProperties.put(CreateRoutineLoadStmt.KAFKA_ENDPOINT_PROPERTY, serverAddress);
|
||||
customProperties.put(CreateRoutineLoadStmt.KAFKA_BROKER_LIST_PROPERTY, serverAddress);
|
||||
CreateRoutineLoadStmt createRoutineLoadStmt = new CreateRoutineLoadStmt(jobName, tableName,
|
||||
loadPropertyList, properties,
|
||||
typeName, customProperties);
|
||||
@ -142,7 +142,7 @@ public class RoutineLoadManagerTest {
|
||||
String topicName = "topic1";
|
||||
customProperties.put(CreateRoutineLoadStmt.KAFKA_TOPIC_PROPERTY, topicName);
|
||||
String serverAddress = "http://127.0.0.1:8080";
|
||||
customProperties.put(CreateRoutineLoadStmt.KAFKA_ENDPOINT_PROPERTY, serverAddress);
|
||||
customProperties.put(CreateRoutineLoadStmt.KAFKA_BROKER_LIST_PROPERTY, serverAddress);
|
||||
CreateRoutineLoadStmt createRoutineLoadStmt = new CreateRoutineLoadStmt(jobName, tableName,
|
||||
loadPropertyList, properties,
|
||||
typeName, customProperties);
|
||||
@ -236,7 +236,7 @@ public class RoutineLoadManagerTest {
|
||||
|
||||
new Expectations() {
|
||||
{
|
||||
systemInfoService.getBackendIds(true);
|
||||
systemInfoService.getClusterBackendIds(anyString, true);
|
||||
result = beIds;
|
||||
Catalog.getCurrentSystemInfo();
|
||||
result = systemInfoService;
|
||||
@ -245,7 +245,7 @@ public class RoutineLoadManagerTest {
|
||||
|
||||
RoutineLoadManager routineLoadManager = new RoutineLoadManager();
|
||||
routineLoadManager.addNumOfConcurrentTasksByBeId(1L);
|
||||
Assert.assertEquals(2L, routineLoadManager.getMinTaskBeId());
|
||||
Assert.assertEquals(2L, routineLoadManager.getMinTaskBeId("default"));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
||||
@ -77,7 +77,7 @@ public class RoutineLoadSchedulerTest {
|
||||
new KafkaRoutineLoadJob(1L, "kafka_routine_load_job", 1L,
|
||||
1L, routineLoadDesc ,3, 0,
|
||||
"", "", new KafkaProgress());
|
||||
routineLoadJob.setState(RoutineLoadJob.JobState.NEED_SCHEDULE);
|
||||
Deencapsulation.setField(routineLoadJob,"state", RoutineLoadJob.JobState.NEED_SCHEDULE);
|
||||
List<RoutineLoadJob> routineLoadJobList = new ArrayList<>();
|
||||
routineLoadJobList.add(routineLoadJob);
|
||||
|
||||
|
||||
@ -113,7 +113,7 @@ public class RoutineLoadTaskSchedulerTest {
|
||||
|
||||
routineLoadManager.getNeedScheduleTasksQueue();
|
||||
result = routineLoadTaskInfoQueue;
|
||||
routineLoadManager.getMinTaskBeId();
|
||||
routineLoadManager.getMinTaskBeId(anyString);
|
||||
result = beId;
|
||||
routineLoadManager.getJobByTaskId(anyString);
|
||||
result = kafkaRoutineLoadJob1;
|
||||
|
||||
@ -321,7 +321,7 @@ public class GlobalTransactionMgrTest {
|
||||
routineLoadTaskInfoList.add(routineLoadTaskInfo);
|
||||
TransactionState transactionState = new TransactionState(1L, 1L, "label", 1L, LoadJobSourceType.ROUTINE_LOAD_TASK, "be1");
|
||||
transactionState.setTransactionStatus(TransactionStatus.PREPARE);
|
||||
transactionState.setTxnStateChangeListener(routineLoadJob);
|
||||
Deencapsulation.setField(transactionState, "txnStateChangeListener", routineLoadJob);
|
||||
Map<Long, TransactionState> idToTransactionState = Maps.newHashMap();
|
||||
idToTransactionState.put(1L, transactionState);
|
||||
Deencapsulation.setField(routineLoadJob, "maxErrorNum", 10);
|
||||
@ -330,7 +330,7 @@ public class GlobalTransactionMgrTest {
|
||||
KafkaProgress oldkafkaProgress = new KafkaProgress();
|
||||
oldkafkaProgress.setPartitionIdToOffset(oldKafkaProgressMap);
|
||||
Deencapsulation.setField(routineLoadJob, "progress", oldkafkaProgress);
|
||||
routineLoadJob.setState(RoutineLoadJob.JobState.RUNNING);
|
||||
Deencapsulation.setField(routineLoadJob, "state", RoutineLoadJob.JobState.RUNNING);
|
||||
|
||||
TRLTaskTxnCommitAttachment rlTaskTxnCommitAttachment = new TRLTaskTxnCommitAttachment();
|
||||
rlTaskTxnCommitAttachment.setId(new TUniqueId());
|
||||
@ -395,7 +395,7 @@ public class GlobalTransactionMgrTest {
|
||||
routineLoadTaskInfoList.add(routineLoadTaskInfo);
|
||||
TransactionState transactionState = new TransactionState(1L, 1L, "label", 1L, LoadJobSourceType.ROUTINE_LOAD_TASK, "be1");
|
||||
transactionState.setTransactionStatus(TransactionStatus.PREPARE);
|
||||
transactionState.setTxnStateChangeListener(routineLoadJob);
|
||||
Deencapsulation.setField(transactionState, "txnStateChangeListener", routineLoadJob);
|
||||
Map<Long, TransactionState> idToTransactionState = Maps.newHashMap();
|
||||
idToTransactionState.put(1L, transactionState);
|
||||
Deencapsulation.setField(routineLoadJob, "maxErrorNum", 10);
|
||||
@ -404,7 +404,7 @@ public class GlobalTransactionMgrTest {
|
||||
KafkaProgress oldkafkaProgress = new KafkaProgress();
|
||||
oldkafkaProgress.setPartitionIdToOffset(oldKafkaProgressMap);
|
||||
Deencapsulation.setField(routineLoadJob, "progress", oldkafkaProgress);
|
||||
routineLoadJob.setState(RoutineLoadJob.JobState.RUNNING);
|
||||
Deencapsulation.setField(routineLoadJob, "state", RoutineLoadJob.JobState.RUNNING);
|
||||
|
||||
TRLTaskTxnCommitAttachment rlTaskTxnCommitAttachment = new TRLTaskTxnCommitAttachment();
|
||||
rlTaskTxnCommitAttachment.setId(new TUniqueId());
|
||||
|
||||
Reference in New Issue
Block a user