[BUG] Fix Memory Leak in SchemaChange And Fix some DCHECK error (#5491)
This commit is contained in:
@ -753,7 +753,14 @@ Status AnalyticEvalNode::get_next_output_batch(RuntimeState* state, RowBatch* ou
|
||||
// CopyRow works as expected: input_batch tuples form a prefix of output_batch
|
||||
// tuples.
|
||||
TupleRow* dest = output_batch->get_row(output_batch->add_row());
|
||||
input_batch.copy_row(input_batch.get_row(i), dest);
|
||||
// input_batch is from a tuple_buffer_stream,
|
||||
// It can only guarantee that the life cycle is valid in a batch stage.
|
||||
// If the ancestor node is a no-spilling blocking node (such as hash_join_node except_node ...)
|
||||
// these node may acquire a invalid tuple pointer,
|
||||
// so we should use deep_copy, and copy tuple to the tuple_pool, to ensure tuple not finalized.
|
||||
// reference issue #5466
|
||||
input_batch.get_row(i)->deep_copy(dest, child(0)->row_desc().tuple_descriptors(),
|
||||
output_batch->tuple_data_pool(), false);
|
||||
dest->set_tuple(num_child_tuples, _result_tuples.front().second);
|
||||
|
||||
if (ExecNode::eval_conjuncts(ctxs, num_ctxs, dest)) {
|
||||
|
||||
Reference in New Issue
Block a user