PATCH bugfix to opensource branch

This commit is contained in:
obdev 2021-07-19 22:33:13 +08:00 committed by wangzelin.wzl
parent 5b5c04ff49
commit e03cb03357
34 changed files with 1471 additions and 1150 deletions

View File

@ -1976,7 +1976,17 @@ int ObCodeGeneratorImpl::convert_normal_table_scan(
LOG_WARN("check is table get failed", K(ret));
} else if (!is_get) {
ObTableLocation part_filter;
if (OB_FAIL(part_filter.init_table_location_with_rowkey(*schema_guard, filter_table_id, *session))) {
ObDMLStmt *root_stmt = NULL;
bool is_dml_table = false;
if (OB_ISNULL(root_stmt = op.get_plan()->get_optimizer_context().get_root_stmt())) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("root stmt is invalid", K(ret), K(root_stmt));
} else if (FALSE_IT(is_dml_table = root_stmt->check_table_be_modified(filter_table_id))) {
// do nothing
} else if (OB_FAIL(part_filter.init_table_location_with_rowkey(*schema_guard,
filter_table_id,
*session,
is_dml_table))) {
LOG_WARN("init table location with rowkey failed", K(ret), K(filter_table_id));
} else if (OB_FAIL(phy_op->set_part_filter(part_filter))) {
LOG_WARN("set part filter failed", K(ret));
@ -6111,13 +6121,17 @@ int ObCodeGeneratorImpl::convert_duplicate_key_scan_info(
RowDesc& row_desc, ObLogicalOperator* log_scan_op, ObUniqueIndexScanInfo& scan_info)
{
int ret = OB_SUCCESS;
ObLogPlan* log_plan = NULL;
ObPhyOperator* scan_root = NULL;
ObSqlSchemaGuard* schema_guard = NULL;
ObSQLSessionInfo* session_info = NULL;
if (OB_ISNULL(log_scan_op) || OB_ISNULL(phy_plan_) || OB_ISNULL(log_plan = log_scan_op->get_plan()) ||
OB_ISNULL(session_info = log_plan->get_optimizer_context().get_session_info()) ||
OB_ISNULL(schema_guard = log_plan->get_optimizer_context().get_sql_schema_guard())) {
ObLogPlan *log_plan = NULL;
ObPhyOperator *scan_root = NULL;
ObSqlSchemaGuard *schema_guard = NULL;
ObSQLSessionInfo *session_info = NULL;
ObDMLStmt *root_stmt = NULL;
if (OB_ISNULL(log_scan_op)
|| OB_ISNULL(phy_plan_)
|| OB_ISNULL(log_plan = log_scan_op->get_plan())
|| OB_ISNULL(session_info = log_plan->get_optimizer_context().get_session_info())
|| OB_ISNULL(schema_guard = log_plan->get_optimizer_context().get_sql_schema_guard())
|| OB_ISNULL(root_stmt = log_plan->get_optimizer_context().get_root_stmt())) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("invalid argument", K(log_scan_op), K(phy_plan_), K(log_plan), K(session_info), K(schema_guard));
} else if (OB_UNLIKELY(!log_scan_op->is_duplicated_checker_op())) {
@ -6157,8 +6171,12 @@ int ObCodeGeneratorImpl::convert_duplicate_key_scan_info(
LOG_WARN("allocate table location buffer failed", K(ret), K(sizeof(ObTableLocation)));
} else {
scan_info.index_location_ = new (buf) ObTableLocation(phy_plan_->get_allocator());
bool is_dml_table = root_stmt->check_table_be_modified(scan_info.index_tid_);
if (OB_FAIL(scan_info.index_location_->init_table_location_with_rowkey(
*schema_guard, scan_info.index_tid_, *session_info))) {
*schema_guard,
scan_info.index_tid_,
*session_info,
is_dml_table))) {
LOG_WARN("init index location failed", K(ret), KPC(log_scan_op));
} else {
scan_info.index_location_->set_table_id(scan_info.table_id_);
@ -6897,9 +6915,10 @@ int ObCodeGeneratorImpl::convert_table_lookup(ObLogTableLookup& op, const PhyOps
RowDesc* out_row_desc = NULL;
PhyOpsDesc table_scan_child_ops;
PhyOpsDesc table_scan_out_ops;
ObSchemaGetterGuard* schema_guard = NULL;
const ObTableSchema* table_schema = NULL;
ObSQLSessionInfo* my_session = NULL;
ObSchemaGetterGuard *schema_guard = NULL;
const ObTableSchema *table_schema = NULL;
ObSQLSessionInfo *my_session = NULL;
ObDMLStmt *root_stmt = NULL;
if (OB_ISNULL(op.get_index_back_scan())) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("null table scan operator", K(ret));
@ -6921,7 +6940,8 @@ int ObCodeGeneratorImpl::convert_table_lookup(ObLogTableLookup& op, const PhyOps
} else if (OB_FAIL(copy_row_desc(*table_scan_out_ops.at(0).second, *out_row_desc))) {
LOG_WARN("failed to copy row desc", K(ret), K(*child_ops.at(0).second));
} else if (OB_ISNULL(op.get_plan()) ||
OB_ISNULL(schema_guard = op.get_plan()->get_optimizer_context().get_schema_guard())) {
OB_ISNULL(schema_guard = op.get_plan()->get_optimizer_context().get_schema_guard()) ||
OB_ISNULL(root_stmt = op.get_plan()->get_optimizer_context().get_root_stmt())) {
ret = OB_ERR_UNEXPECTED;
LOG_ERROR("get unexpected null", K(schema_guard), K(ret));
} else if (OB_FAIL(schema_guard->get_table_schema(op.get_ref_table_id(), table_schema))) {
@ -6951,11 +6971,13 @@ int ObCodeGeneratorImpl::convert_table_lookup(ObLogTableLookup& op, const PhyOps
if (OB_SUCC(ret)) {
ObTableLocation& partition_id_getter = table_lookup->get_part_id_getter();
// the other function may be more effective, TODO
bool is_dml_table = root_stmt->check_table_be_modified(lookup_info.table_id_);
if (OB_FAIL(partition_id_getter.init_table_location_with_row_desc(
*op.get_plan()->get_optimizer_context().get_sql_schema_guard(),
lookup_info.ref_table_id_,
*child_ops.at(0).second,
*my_session))) {
*op.get_plan()->get_optimizer_context().get_sql_schema_guard(),
lookup_info.ref_table_id_,
*child_ops.at(0).second,
*my_session,
is_dml_table))) {
LOG_WARN("the partition id init failed", K(ret));
} else {
partition_id_getter.set_table_id(op.get_table_id());

View File

@ -219,6 +219,8 @@ int ObOutlineExecutor::generate_logical_plan(
ctx,
outline_stmt))) {
LOG_WARN("fail to transform outline stmt", K(ret));
} else if (FALSE_IT(opt_ctx.set_root_stmt(outline_stmt))) {
// do nothing
} else if (OB_FAIL(ObSql::optimize_stmt(optimizer, *session_info, *outline_stmt, logical_plan))) {
LOG_WARN("fail to optimize stmt", K(ret));
} else { /*do nothing*/

View File

@ -120,13 +120,14 @@ int ObMergeStmtPrinter::print_update_clause(const ObMergeStmt& merge_stmt)
}
DATA_PRINTF(" update set ");
for (int64_t i = 0; OB_SUCC(ret) && i < assigns.assignments_.count(); ++i) {
ObColumnRefRawExpr* column = assigns.assignments_.at(i).column_expr_;
ObRawExpr* value = assigns.assignments_.at(i).expr_;
ObRawExpr* real_value = NULL;
const ObAssignment &assign = assigns.assignments_.at(i);
ObColumnRefRawExpr *column = assign.column_expr_;
ObRawExpr *value = assign.expr_;
ObRawExpr *real_value = NULL;
if (OB_ISNULL(column) || OB_ISNULL(value)) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("invalid column assign", K(ret));
} else if (!column->is_generated_column()) {
} else if (!assign.is_implicit_) {
if (!first_assign) {
DATA_PRINTF(", ");
} else {

View File

@ -1591,12 +1591,9 @@ int ObSql::generate_physical_plan(ParseResult& parse_result, ObPlanCacheCtx* pc_
result.get_exec_context(),
stmt))) { // rewrite stmt
LOG_WARN("Failed to transforme stmt", K(ret));
// } else if (!optctx.use_default_stat() &&
// OB_FAIL(analyze_table_stat_version(
// sql_ctx.schema_guard_, optctx.get_opt_stat_manager(), *stmt))) {
// LOG_WARN("Failed to analyze table stat version", K(ret));
} else if (OB_FAIL(
optimize_stmt(optimizer, *(sql_ctx.session_info_), *stmt, logical_plan))) { // gen logical plan
} else if (OB_FALSE_IT(optctx.set_root_stmt(stmt))) {
} else if (OB_FAIL(optimize_stmt(optimizer, *(sql_ctx.session_info_),
*stmt, logical_plan))) { //gen logical plan
LOG_WARN("Failed to optimizer stmt", K(ret));
} else if (OB_ISNULL(logical_plan)) {
ret = OB_INVALID_ARGUMENT;

View File

@ -1807,15 +1807,13 @@ int ObSQLUtils::construct_outline_sql(ObIAllocator& allocator, const ObSQLSessio
LOG_WARN("fail to filter head space", K(ret));
}
if (OB_SUCC(ret)) {
ObString first_token = filter_sql.split_on(' ');
if (OB_FAIL(sql_helper.assign_fmt("%.*s %.*s%.*s",
first_token.length(),
first_token.ptr(),
outline_content.length(),
outline_content.ptr(),
filter_sql.length(),
filter_sql.ptr()))) {
LOG_WARN("failed to construct new sql", K(first_token), K(orig_sql), K(filter_sql), K(outline_content), K(ret));
char empty_split = find_first_empty_char(filter_sql);
ObString first_token = filter_sql.split_on(empty_split);
if (OB_FAIL(sql_helper.assign_fmt("%.*s %.*s%.*s", first_token.length(), first_token.ptr(),
outline_content.length(), outline_content.ptr(),
filter_sql.length(), filter_sql.ptr()))) {
LOG_WARN("failed to construct new sql", K(first_token), K(orig_sql),
K(filter_sql), K(outline_content), K(ret));
} else if (OB_FAIL(ob_write_string(allocator, sql_helper.string(), outline_sql))) {
LOG_WARN("failed to write string", K(first_token), K(orig_sql), K(filter_sql), K(outline_content), K(ret));
} else { /*do nothing*/
@ -1841,8 +1839,21 @@ int ObSQLUtils::filter_head_space(ObString& sql)
return ret;
}
int ObSQLUtils::reconstruct_sql(
ObIAllocator& allocator, const ObStmt* stmt, ObString& sql, ObObjPrintParams print_params)
char ObSQLUtils::find_first_empty_char(const ObString &sql)
{
char empty_char = ' '; // default split
for (int64_t i = 0; i < sql.length(); ++i) {
char ch = sql[i];
if (' ' == ch || '\r' == ch || '\n' == ch || '\t' == ch || '\f' == ch) {
empty_char = ch;
break;
}
}
return empty_char;
}
int ObSQLUtils::reconstruct_sql(ObIAllocator &allocator, const ObStmt *stmt, ObString &sql,
ObObjPrintParams print_params)
{
int ret = OB_SUCCESS;
if (OB_ISNULL(stmt)) {

View File

@ -216,6 +216,7 @@ public:
static int filter_hint_in_query_sql(common::ObIAllocator& allocator, const ObSQLSessionInfo& session,
const common::ObString& sql, common::ObString& param_sql);
static int filter_head_space(ObString& sql);
static char find_first_empty_char(const ObString &sql);
static int construct_outline_sql(common::ObIAllocator& allocator, const ObSQLSessionInfo& session,
const common::ObString& outline_content, const common::ObString& orig_sql, bool is_need_filter_hint,
common::ObString& outline_sql);

View File

@ -132,7 +132,7 @@ int ObUpdateStmtPrinter::print_set()
if (OB_ISNULL(assign.column_expr_) || OB_ISNULL(assign.expr_)) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("column expr is null", K(ret), K(assign.column_expr_), K(assign.expr_));
} else if (assign.column_expr_->is_generated_column()) {
} else if (assign.is_implicit_) {
continue;
} else if (OB_FAIL(ObRawExprUtils::find_alias_expr(assign.expr_, alias))) {
LOG_WARN("failed to find alias expr", K(ret));

View File

@ -102,8 +102,12 @@ int ObLogJoin::allocate_exchange_post(AllocExchContext* ctx)
LOG_WARN("failed to get calc types", K(ret));
} else if (OB_FAIL(get_sharding_input_equal_sets(sharding_input_esets))) {
LOG_WARN("failed to get sharding input esets", K(ret));
} else if (OB_FAIL(get_candidate_join_distribution_method(
*get_plan(), sharding_input_esets, left_join_keys, right_join_keys, candidate_method))) {
} else if (OB_FAIL(get_candidate_join_distribution_method(*get_plan(),
sharding_input_esets,
left_join_keys,
right_join_keys,
ctx->exchange_allocated_,
candidate_method))) {
LOG_WARN("failed to get valid dist method", K(ret));
} else if (OB_FAIL(choose_best_distribution_method(
*ctx, candidate_method, pq_map_hint_, join_dist_algo_, slave_mapping_type_))) {
@ -1688,8 +1692,12 @@ int ObLogJoin::is_left_unique(bool& left_unique) const
return ret;
}
int ObLogJoin::get_candidate_join_distribution_method(ObLogPlan& log_plan, const EqualSets& equal_sets,
const ObIArray<ObRawExpr*>& left_join_keys, const ObIArray<ObRawExpr*>& right_join_keys, uint64_t& candidate_method)
int ObLogJoin::get_candidate_join_distribution_method(ObLogPlan &log_plan,
const EqualSets &equal_sets,
const ObIArray<ObRawExpr*> &left_join_keys,
const ObIArray<ObRawExpr*> &right_join_keys,
const bool exchange_allocated,
uint64_t &candidate_method)
{
int ret = OB_SUCCESS;
ObLogicalOperator* left_child = NULL;
@ -1701,17 +1709,24 @@ int ObLogJoin::get_candidate_join_distribution_method(ObLogPlan& log_plan, const
LOG_WARN("get unexpected null", K(left_child), K(right_child), K(ret));
} else if (is_nlj_with_param_down()) {
add_join_dist_flag(candidate_method, DIST_PULL_TO_LOCAL);
add_join_dist_flag(candidate_method, DIST_PARTITION_WISE);
add_join_dist_flag(candidate_method, DIST_PARTITION_NONE);
bool is_table_scan = false;
if (OB_FAIL(check_is_table_scan(*right_child, is_table_scan))) {
LOG_WARN("failed to check is table scan", K(ret));
} else if (INNER_JOIN == join_type_ && is_table_scan) {
/*
bool has_exch = false;
if (exchange_allocated && OB_FAIL(right_child->check_has_exchange_below(has_exch))) {
LOG_WARN("failed to check has exchange below", K(ret));
} else if (!has_exch) {
add_join_dist_flag(candidate_method, DIST_PARTITION_WISE);
add_join_dist_flag(candidate_method, DIST_PARTITION_NONE);
}
if (OB_SUCC(ret)) {
bool is_table_scan = false;
if (OB_FAIL(check_is_table_scan(*right_child, is_table_scan))) {
LOG_WARN("failed to check is table scan", K(ret));
} else if (INNER_JOIN == join_type_ && is_table_scan) {
/*
* todo we should use strategy to choose
* between DIST_BC2HOST_NONE and DIST_BROADCAST_NONE in future
*/
add_join_dist_flag(candidate_method, DIST_BC2HOST_NONE);
add_join_dist_flag(candidate_method, DIST_BC2HOST_NONE);
}
}
} else {
// without BC2HOST

View File

@ -323,6 +323,7 @@ private:
virtual int allocate_granule_pre(AllocGIContext& ctx) override;
int get_candidate_join_distribution_method(ObLogPlan& log_plan, const EqualSets& equal_sets,
const common::ObIArray<ObRawExpr*>& left_join_keys, const common::ObIArray<ObRawExpr*>& right_join_keys,
const bool exchange_allocator,
uint64_t& candidate_method);
int check_if_match_join_partition_wise(ObLogPlan& log_plan, const EqualSets& equal_sets,
const common::ObIArray<ObRawExpr*>& left_keys, const common::ObIArray<ObRawExpr*>& right_keys,

View File

@ -75,6 +75,16 @@ int ObLogLimit::allocate_exchange_post(AllocExchContext* ctx)
return ret;
}
int ObLogLimit::allocate_granule_pre(AllocGIContext &ctx)
{
return pw_allocate_granule_pre(ctx);
}
int ObLogLimit::allocate_granule_post(AllocGIContext &ctx)
{
return pw_allocate_granule_post(ctx);
}
int ObLogLimit::transmit_op_ordering()
{
int ret = OB_SUCCESS;

View File

@ -88,6 +88,8 @@ public:
return has_union_child_;
}
virtual int est_cost() override;
virtual int allocate_granule_pre(AllocGIContext &ctx);
virtual int allocate_granule_post(AllocGIContext &ctx);
virtual int allocate_exchange_post(AllocExchContext* ctx);
virtual int transmit_op_ordering() override;
virtual int re_est_cost(const ObLogicalOperator* parent, double need_row_count, bool& re_est) override;

View File

@ -6461,11 +6461,13 @@ int ObLogPlan::classify_rownum_exprs(const ObIArray<ObRawExpr*>& rownum_exprs, O
ObItemType limit_rownum_type = T_INVALID;
limit_expr = NULL;
for (int64_t i = 0; OB_SUCC(ret) && i < rownum_exprs.count(); i++) {
ObRawExpr* rownum_expr = rownum_exprs.at(i);
ObRawExpr* const_expr = NULL;
ObRawExpr *rownum_expr = rownum_exprs.at(i);
ObRawExpr *dummy = NULL;
ObRawExpr *const_expr = NULL;
ObItemType expr_type = T_INVALID;
bool dummy_flag = false;
if (OB_FAIL(ObOptimizerUtil::get_rownum_filter_info(rownum_expr, expr_type, const_expr, dummy_flag))) {
if (OB_FAIL(ObOptimizerUtil::get_rownum_filter_info(
rownum_expr, expr_type, dummy, const_expr, dummy_flag))) {
LOG_WARN("failed to check is rownum expr used as filter", K(ret));
} else if (OB_FAIL(
classify_rownum_expr(expr_type, rownum_expr, const_expr, filter_exprs, start_exprs, limit_expr))) {

View File

@ -138,6 +138,7 @@ int ObLogSubPlanFilter::check_if_match_partition_wise(const AllocExchContext& ct
} else { /*do nothing*/
}
} else {
bool has_exch = false;
if (child->get_sharding_info().is_match_all()) {
is_partition_wise = false;
} else if (OB_FAIL(get_equal_key(ctx, child, left_key, right_key))) {
@ -150,8 +151,11 @@ int ObLogSubPlanFilter::check_if_match_partition_wise(const AllocExchContext& ct
child->get_sharding_info(),
is_partition_wise))) {
LOG_WARN("failed to check match partition wise join", K(ret));
} else { /*do nothing*/
}
} else if (ctx.exchange_allocated_ && child->check_has_exchange_below(has_exch)) {
LOG_WARN("failed to check has exchange blew", K(ret));
} else if (has_exch) {
is_partition_wise = false;
} else { /*do nothing*/}
}
}
if (OB_SUCC(ret)) {

View File

@ -681,6 +681,7 @@ int ObLogTableScan::extract_access_exprs(
expr = static_cast<ObColumnRefRawExpr*>(raw_expr);
} else if (OB_FAIL(ObRawExprUtils::build_column_expr(opt_ctx->get_expr_factory(), *column_schema, expr))) {
LOG_WARN("build column expr failed", K(ret));
} else if (FALSE_IT(expr->set_table_id(get_table_id()))) {
} else if (OB_ISNULL(expr)) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("expr is null", K(col_idx), K(ret));

View File

@ -6207,10 +6207,19 @@ int ObLogicalOperator::allocate_granule_nodes_above(AllocGIContext& ctx)
} else if (OB_ISNULL(get_plan())) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("Get unexpected null", K(ret), K(get_plan()));
} else if (LOG_TABLE_SCAN != get_type() && LOG_JOIN != get_type() && LOG_SET != get_type() &&
LOG_GROUP_BY != get_type() && LOG_DISTINCT != get_type() && LOG_SUBPLAN_FILTER != get_type() &&
LOG_WINDOW_FUNCTION != get_type() && LOG_UPDATE != get_type() && LOG_DELETE != get_type() &&
LOG_INSERT != get_type() && LOG_MERGE != get_type() && LOG_FOR_UPD != get_type()) {
} else if (LOG_TABLE_SCAN != get_type()
&& LOG_JOIN != get_type()
&& LOG_SET != get_type()
&& LOG_GROUP_BY != get_type()
&& LOG_DISTINCT != get_type()
&& LOG_LIMIT != get_type()
&& LOG_SUBPLAN_FILTER != get_type()
&& LOG_WINDOW_FUNCTION != get_type()
&& LOG_UPDATE != get_type()
&& LOG_DELETE != get_type()
&& LOG_INSERT != get_type()
&& LOG_MERGE != get_type()
&& LOG_FOR_UPD != get_type()) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("Only special op can allocate a granule iterator", K(get_type()));
} else {
@ -6407,6 +6416,8 @@ int ObLogicalOperator::push_down_limit(AllocExchContext* ctx, ObRawExpr* limit_c
}
// push down limit expr
if (OB_SUCC(ret)) {
bool need_child_limit = true;
bool need_pws_limit = false;
if (OB_ISNULL(child = exchange_point->get_child(first_child))) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("get unexpected null", K(exchange_point), K(ret));
@ -6414,10 +6425,21 @@ int ObLogicalOperator::push_down_limit(AllocExchContext* ctx, ObRawExpr* limit_c
!is_virtual_table(static_cast<ObLogTableScan*>(child)->get_ref_table_id()) &&
NULL == static_cast<ObLogTableScan*>(child)->get_limit_expr()) {
// Do NOT allocate LIMIT operator, and push down limit onto table scan directly.
ObLogTableScan* table_scan = static_cast<ObLogTableScan*>(child);
ObLogTableScan *table_scan = static_cast<ObLogTableScan *>(child);
table_scan->set_limit_offset(new_limit_count_expr, NULL);
} else {
if (OB_FAIL(exchange_point->allocate_limit_below(first_child, new_limit_count_expr))) {
need_child_limit = true;
} else if (child->is_partition_wise()) {
ObLogicalOperator *exch = NULL;
if (ctx->exchange_allocated_ && OB_FAIL(child->find_first_recursive(LOG_EXCHANGE, exch))) {
LOG_WARN("failed to find first exchange", K(ret));
} else if (exch == NULL) {
need_pws_limit = true;
}
}
if (OB_SUCC(ret) && need_child_limit) {
if (OB_FAIL(exchange_point->allocate_limit_below(first_child,
new_limit_count_expr))) {
LOG_WARN("failed to allocte limit below", K(ret));
} else if (OB_ISNULL(child_limit = exchange_point->get_child(first_child))) {
ret = OB_ERR_UNEXPECTED;
@ -6433,6 +6455,27 @@ int ObLogicalOperator::push_down_limit(AllocExchContext* ctx, ObRawExpr* limit_c
child_limit->set_width(get_width());
}
}
if (OB_SUCC(ret) && need_pws_limit) {
ObLogicalOperator *pws_limit = NULL;
if (OB_FAIL(child_limit->allocate_limit_below(first_child,
new_limit_count_expr))) {
LOG_WARN("failed to allocte limit below", K(ret));
} else if (OB_ISNULL(pws_limit = child_limit->get_child(first_child))) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("get unexpected error", K(pws_limit), K(child), K(ret));
} else if (OB_FAIL(pws_limit->set_expected_ordering(get_expected_ordering()))) {
LOG_WARN("failed to set expected ordering", K(ret));
} else if (OB_FAIL(pws_limit->replace_generated_agg_expr(ctx->group_push_down_replaced_exprs_))) {
LOG_WARN("failed to replace agg expr", K(ret));
} else {
static_cast<ObLogLimit*>(pws_limit)->set_fetch_with_ties(is_fetch_with_ties);
pws_limit->set_card(get_card());
pws_limit->set_op_cost(get_op_cost());
pws_limit->set_width(get_width());
pws_limit->set_is_partition_wise(true);
}
}
}
}
return ret;

View File

@ -3568,33 +3568,43 @@ int ObOptimizerUtil::flip_op_type(const ObItemType expr_type, ObItemType& rotate
return ret;
}
int ObOptimizerUtil::get_rownum_filter_info(
ObRawExpr* rownum_expr, ObItemType& expr_type, ObRawExpr*& const_expr, bool& is_const_filter)
int ObOptimizerUtil::get_rownum_filter_info(ObRawExpr *rownum_cond,
ObItemType &expr_type,
ObRawExpr *&rownum_expr,
ObRawExpr *&const_expr,
bool &is_const_filter)
{
int ret = OB_SUCCESS;
is_const_filter = false;
ObRawExpr* first_param = NULL;
ObRawExpr* second_param = NULL;
if (OB_ISNULL(rownum_expr)) {
ObRawExpr *first_param = NULL;
ObRawExpr *second_param = NULL;
if (OB_ISNULL(rownum_cond)) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("expr is null", K(ret));
} else if (rownum_expr->is_op_expr() && 2 == rownum_expr->get_param_count() &&
(IS_RANGE_CMP_OP(rownum_expr->get_expr_type()) || T_OP_EQ == rownum_expr->get_expr_type())) {
first_param = rownum_expr->get_param_expr(0);
second_param = rownum_expr->get_param_expr(1);
} else if (rownum_cond->is_op_expr() && 2 == rownum_cond->get_param_count() &&
(IS_RANGE_CMP_OP(rownum_cond->get_expr_type())
|| T_OP_EQ == rownum_cond->get_expr_type())) {
first_param = rownum_cond->get_param_expr(0);
second_param = rownum_cond->get_param_expr(1);
if (OB_ISNULL(first_param) || OB_ISNULL(second_param)) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("null expr", K(ret), K(first_param), K(second_param));
} else if (first_param->has_flag(IS_ROWNUM) && second_param->has_flag(IS_CONST) &&
(second_param->get_result_type().is_int() || second_param->get_result_type().is_number())) {
expr_type = rownum_expr->get_expr_type();
} else if (first_param->has_flag(IS_ROWNUM) &&
second_param->has_flag(IS_CONST) &&
(second_param->get_result_type().is_int()
|| second_param->get_result_type().is_number())) {
expr_type = rownum_cond->get_expr_type();
rownum_expr = first_param;
const_expr = second_param;
is_const_filter = true;
} else if (first_param->has_flag(IS_CONST) && second_param->has_flag(IS_ROWNUM) &&
(first_param->get_result_type().is_int() || first_param->get_result_type().is_number())) {
if (OB_FAIL(flip_op_type(rownum_expr->get_expr_type(), expr_type))) {
} else if (first_param->has_flag(IS_CONST) &&
second_param->has_flag(IS_ROWNUM) &&
(first_param->get_result_type().is_int()
|| first_param->get_result_type().is_number())) {
if (OB_FAIL(flip_op_type(rownum_cond->get_expr_type(), expr_type))) {
LOG_WARN("failed to retate rownum_expr type", K(ret));
} else {
rownum_expr = second_param;
const_expr = first_param;
is_const_filter = true;
}

View File

@ -465,7 +465,7 @@ public:
static int flip_op_type(const ObItemType expr_type, ObItemType& rotated_expr_type);
static int get_rownum_filter_info(
ObRawExpr* rownum_expr, ObItemType& expr_type, ObRawExpr*& const_expr, bool& is_const_filter);
ObRawExpr* rownum_expr, ObItemType& expr_type, ObRawExpr*& rownum, ObRawExpr*& const_expr, bool& is_const_filter);
static int convert_rownum_filter_as_offset(ObRawExprFactory& expr_factory, ObSQLSessionInfo* session_info,
const ObItemType filter_type, ObRawExpr* const_expr, ObRawExpr*& offset_int_expr);

View File

@ -1306,8 +1306,10 @@ int ObTableLocation::init_table_location(ObSqlSchemaGuard& schema_guard, uint64_
return ret;
}
int ObTableLocation::init_table_location_with_rowkey(ObSqlSchemaGuard& schema_guard, uint64_t table_id,
ObSQLSessionInfo& session_info, const bool is_dml_table /*= false*/)
int ObTableLocation::init_table_location_with_rowkey(ObSqlSchemaGuard &schema_guard,
uint64_t table_id,
ObSQLSessionInfo &session_info,
const bool is_dml_table /*= true*/)
{
int ret = OB_SUCCESS;
ObSchemaChecker schema_checker;
@ -5415,8 +5417,11 @@ int ObTableLocation::calc_partition_ids_by_range(ObExecContext& exec_ctx, ObPart
return ret;
}
int ObTableLocation::init_table_location_with_row_desc(
ObSqlSchemaGuard& schema_guard, uint64_t table_id, RowDesc& input_row_desc, ObSQLSessionInfo& session_info)
int ObTableLocation::init_table_location_with_row_desc(ObSqlSchemaGuard &schema_guard,
uint64_t table_id,
RowDesc &input_row_desc,
ObSQLSessionInfo &session_info,
const bool is_dml_table)
{
int ret = OB_SUCCESS;
ObSchemaChecker schema_checker;
@ -5466,12 +5471,12 @@ int ObTableLocation::init_table_location_with_row_desc(
*delete_stmt, real_table_id, expr_factory, input_row_desc, row_desc))) {
LOG_WARN("generate rowkey desc failed", K(ret), K(real_table_id));
} else if (OB_FAIL(init_table_location(schema_guard,
real_table_id,
real_table_id,
*delete_stmt,
row_desc,
false,
default_asc_direction()))) {
real_table_id,
real_table_id,
*delete_stmt,
row_desc,
is_dml_table,
default_asc_direction()))) {
LOG_WARN("init table location failed", K(ret), K(real_table_id));
} else if (OB_FAIL(clear_columnlized_in_row_desc(row_desc))) {
LOG_WARN("Failed to clear columnlized in row desc", K(ret));

View File

@ -648,7 +648,7 @@ public:
int init_table_location(ObSqlSchemaGuard& schema_guard, uint64_t table_id, uint64_t ref_table_id, ObDMLStmt& stmt,
RowDesc& row_desc, const bool is_dml_table, const ObOrderDirection& direction = default_asc_direction());
int init_table_location_with_rowkey(ObSqlSchemaGuard& schema_guard, uint64_t table_id, ObSQLSessionInfo& session_info,
const bool is_dml_table = false);
const bool is_dml_table = true);
int calculate_partition_ids_by_row(ObExecContext& exec_ctx, ObPartMgr* part_mgr, const common::ObNewRow& row,
ObIArray<int64_t>& part_ids, int64_t& part_idx) const;
int calculate_partition_id_by_row(
@ -817,7 +817,8 @@ public:
const ObSqlExpression* gen_col_expr = NULL) const;
int init_table_location_with_row_desc(
ObSqlSchemaGuard& schema_guard, uint64_t table_id, RowDesc& input_row_desc, ObSQLSessionInfo& session_info);
ObSqlSchemaGuard& schema_guard, uint64_t table_id, RowDesc& input_row_desc, ObSQLSessionInfo& session_info,
const bool is_dml_table);
int generate_row_desc_from_row_desc(ObDMLStmt& stmt, const uint64_t data_table_id, ObRawExprFactory& expr_factory,
const RowDesc& input_row_desc, RowDesc& row_desc);

View File

@ -325,7 +325,7 @@ int ObDMLResolver::resolve_columns_field_list_first(
} else if (select_item_expr->is_column_ref_expr()) {
ObColumnRefRawExpr* column_ref_expr = static_cast<ObColumnRefRawExpr*>(select_item_expr);
if (ObCharset::case_insensitive_equal(sel_stmt->get_select_item(j).is_real_alias_
? column_ref_expr->get_alias_column_name()
? sel_stmt->get_select_item(j).alias_name_
: column_ref_expr->get_column_name(),
columns.at(i).col_name_)) {
if (found) {
@ -4242,8 +4242,6 @@ int ObDMLResolver::do_resolve_subquery_info(const ObSubQueryInfo& subquery_info,
LOG_WARN("Unknown statement type in subquery", "stmt_type", subquery_info.sub_query_->type_);
} else if (OB_FAIL(child_resolver.resolve_child_stmt(*(subquery_info.sub_query_)))) {
LOG_WARN("resolve select subquery failed", K(ret));
} else if (OB_FAIL(try_add_remove_const_epxr(*child_resolver.get_child_stmt()))) {
LOG_WARN("try add subquery value expr failed", K(ret));
} else {
sub_stmt = child_resolver.get_child_stmt();
subquery_info.ref_expr_->set_output_column(sub_stmt->get_select_item_size());
@ -4281,22 +4279,7 @@ int ObDMLResolver::try_add_remove_const_epxr(ObSelectStmt& stmt)
int ret = OB_SUCCESS;
CK(NULL != session_info_);
CK(NULL != params_.expr_factory_);
// do not add remove_const for select ... from dual.
bool is_from_dual = false;
ObSelectStmt* ref_query = NULL;
if (0 == stmt.get_having_expr_size() && 0 == stmt.get_condition_size()) {
if (0 == stmt.get_from_item_size()) {
is_from_dual = true;
} else if (stmt.is_single_table_stmt() && OB_NOT_NULL(stmt.get_table_item(0)) &&
stmt.get_table_item(0)->is_generated_table() &&
OB_NOT_NULL(ref_query = stmt.get_table_item(0)->ref_query_) && 0 == ref_query->get_from_item_size() &&
0 == ref_query->get_having_expr_size() && 0 == ref_query->get_condition_size() &&
1 == ref_query->get_select_item_size() && OB_NOT_NULL(ref_query->get_select_item(0).expr_) &&
ref_query->get_select_item(0).expr_->is_const_expr()) {
is_from_dual = true;
}
}
if (OB_SUCC(ret) && session_info_->use_static_typing_engine() && !is_from_dual) {
if (OB_SUCC(ret) && session_info_->use_static_typing_engine()) {
for (int64_t i = 0; OB_SUCC(ret) && i < stmt.get_select_item_size(); ++i) {
ObRawExpr*& expr = stmt.get_select_item(i).expr_;
CK(NULL != expr);

View File

@ -112,8 +112,8 @@ int ObExprRelationAnalyzer::init_expr_info(ObRawExpr& expr)
LOG_WARN("failed to add relation ids", K(ret));
}
}
} else if (OB_FAIL(expr.is_query_ref_expr())) {
ObQueryRefRawExpr& query = static_cast<ObQueryRefRawExpr&>(expr);
} else if (expr.is_query_ref_expr()) {
ObQueryRefRawExpr &query = static_cast<ObQueryRefRawExpr &>(expr);
if (OB_FAIL(query_exprs_.push_back(&query))) {
LOG_WARN("failed to push back query ref", K(ret));
} else if (OB_UNLIKELY(!query.is_ref_stmt())) {

View File

@ -589,6 +589,8 @@ int ObPredicateDeduce::find_similar_expr(
ObRawExpr* pred, ObIArray<ObRawExpr*>& general_preds, ObIArray<ObRawExpr*>& first_params)
{
int ret = OB_SUCCESS;
ObExprEqualCheckContext equal_ctx;
equal_ctx.override_const_compare_ = true;
if (OB_ISNULL(pred)) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("invalid param expr", K(ret), K(pred));
@ -609,7 +611,7 @@ int ObPredicateDeduce::find_similar_expr(
ret = OB_ERR_UNEXPECTED;
LOG_WARN("param expr is null", K(ret));
} else {
is_similar = param1->same_as(*param2);
is_similar = param1->same_as(*param2, &equal_ctx);
}
}
}

View File

@ -533,50 +533,50 @@ int ObTransformAggrSubquery::deduce_query_values(ObDMLStmt& stmt, TransformParam
int ret = OB_SUCCESS;
ObRawExpr* not_null_expr = param.not_null_expr_;
ObIArray<bool>& is_null_prop = param.is_null_prop_;
const bool is_outer_join = use_outer_join(param.pullup_flag_);
if (OB_ISNULL(ctx_->session_info_) || OB_UNLIKELY(is_null_prop.count() > view_select.count()) ||
OB_UNLIKELY(is_null_prop.count() > view_columns.count())) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("invalid session or invalid array size", K(ret));
} else if (OB_FAIL(real_values.assign(view_columns))) {
LOG_WARN("failed to assign real values", K(ret));
} else if (use_outer_join(param.pullup_flag_)) {
for (int64_t i = 0; OB_SUCC(ret) && i < is_null_prop.count(); ++i) {
// replace_columns_and_aggrs() may change expr result type, e.g.: sum() from ObNumberType
// to ObNullType. This may cause operand implicit cast be added twice, so we erase it first.
ObRawExpr* default_expr = NULL;
if (is_null_prop.at(i)) {
continue;
} else if (OB_ISNULL(not_null_expr)) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("not null expr is null", K(ret));
} else if (OB_FAIL(ObRawExprUtils::copy_expr(
*ctx_->expr_factory_, view_select.at(i), default_expr, COPY_REF_DEFAULT))) {
LOG_WARN("failed to copy select expr", K(ret));
} else if (OB_FAIL(ObRawExprUtils::erase_operand_implicit_cast(default_expr, default_expr))) {
LOG_WARN("remove operand implicit cast failed", K(ret));
} else if (OB_FAIL(replace_columns_and_aggrs(default_expr, ctx_))) {
LOG_WARN("failed to replace variables", K(ret));
}
}
for (int64_t i = 0; OB_SUCC(ret) && i < is_null_prop.count(); ++i) {
// replace_columns_and_aggrs() may change expr result type, e.g.: sum() from ObNumberType
// to ObNullType. This may cause operand implicit cast be added twice, so we erase it first.
ObRawExpr* default_expr = NULL;
if (OB_FAIL(real_values.push_back(view_columns.at(i)))) {
LOG_WARN("failed to push back view columns", K(ret));
} else if (is_null_prop.at(i) || !is_outer_join) {
continue;
} else if (OB_ISNULL(not_null_expr)) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("not null expr is null", K(ret));
} else if (OB_FAIL(ObRawExprUtils::copy_expr(
*ctx_->expr_factory_, view_select.at(i), default_expr, COPY_REF_DEFAULT))) {
LOG_WARN("failed to copy select expr", K(ret));
} else if (OB_FAIL(ObRawExprUtils::erase_operand_implicit_cast(default_expr, default_expr))) {
LOG_WARN("remove operand implicit cast failed", K(ret));
} else if (OB_FAIL(replace_columns_and_aggrs(default_expr, ctx_))) {
LOG_WARN("failed to replace variables", K(ret));
}
if (OB_SUCC(ret) && ctx_->session_info_->use_static_typing_engine()) {
// After replace, result type of %default_expr may differ with original expr,
// may cause unexpected implicit cast be added too, cast to original expr type first.
if (OB_FAIL(default_expr->formalize(ctx_->session_info_))) {
LOG_WARN("formalize expr failed", K(ret));
} else if (OB_FAIL(ObRawExprUtils::try_add_cast_expr_above(ctx_->expr_factory_,
ctx_->session_info_,
*default_expr,
view_select.at(i)->get_result_type(),
default_expr))) {
LOG_WARN("try add cast expr failed", K(ret));
}
if (OB_SUCC(ret) && ctx_->session_info_->use_static_typing_engine()) {
// After replace, result type of %default_expr may differ with original expr,
// may cause unexpected implicit cast be added too, cast to original expr type first.
if (OB_FAIL(default_expr->formalize(ctx_->session_info_))) {
LOG_WARN("formalize expr failed", K(ret));
} else if (OB_FAIL(ObRawExprUtils::try_add_cast_expr_above(ctx_->expr_factory_,
ctx_->session_info_,
*default_expr,
view_select.at(i)->get_result_type(),
default_expr))) {
LOG_WARN("try add cast expr failed", K(ret));
}
}
if (OB_SUCC(ret)) {
if (OB_FAIL(ObTransformUtils::build_case_when_expr(
stmt, not_null_expr, view_columns.at(i), default_expr, real_values.at(i), ctx_))) {
LOG_WARN("failed to build case when expr", K(ret));
}
if (OB_SUCC(ret)) {
if (OB_FAIL(ObTransformUtils::build_case_when_expr(
stmt, not_null_expr, view_columns.at(i), default_expr, real_values.at(i), ctx_))) {
LOG_WARN("failed to build case when expr", K(ret));
}
}
}
@ -1392,6 +1392,10 @@ int ObTransformAggrSubquery::is_const_null_value(
LOG_WARN("failed to check is question mark pre param", K(ret));
} else if (OB_UNLIKELY(!is_pre_param)) {
// do nothing pre-calc expr
} else if (OB_UNLIKELY(value.get_unknown() < 0 ||
value.get_unknown() >= plan_ctx->get_param_store().count())) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("param index is invalid", K(ret), K(value.get_unknown()), K(plan_ctx->get_param_store().count()));
} else if (plan_ctx->get_param_store().at(value.get_unknown()).is_null()) {
// do nothing null param expr
} else {
@ -1650,4 +1654,4 @@ int ObTransformAggrSubquery::check_subquery_semi_conditions(ObSelectStmt &subque
}
}
return ret;
}
}

View File

@ -3618,27 +3618,35 @@ int ObTransformPreProcess::try_transform_common_rownum_as_limit(ObDMLStmt* stmt,
{
int ret = OB_SUCCESS;
limit_expr = NULL;
ObRawExpr* limit_value = NULL;
ObRawExpr *my_rownum = NULL;
ObRawExpr *limit_value = NULL;
ObItemType op_type = T_INVALID;
bool is_valid = false;
if (OB_ISNULL(stmt) || OB_ISNULL(ctx_) || OB_ISNULL(ctx_->expr_factory_) || OB_ISNULL(ctx_->session_info_)) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("unexpected null", K(ret));
} else if (OB_FAIL(stmt->get_rownum_expr(my_rownum))) {
LOG_WARN("failed to get my rownum expr", K(ret));
} else {
ObIArray<ObRawExpr*>& conditions = stmt->get_condition_exprs();
int64_t expr_idx = -1;
bool is_eq_cond = false;
bool is_const_filter = false;
for (int64_t i = 0; OB_SUCC(ret) && !is_eq_cond && i < conditions.count(); ++i) {
ObRawExpr* cond_expr = NULL;
ObRawExpr* const_expr = NULL;
ObRawExpr *cond_expr = NULL;
ObRawExpr *const_expr = NULL;
ObRawExpr *rownum_expr = NULL;
ObItemType expr_type = T_INVALID;
if (OB_ISNULL(cond_expr = conditions.at(i))) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("condition expr is null", K(ret), K(i));
} else if (OB_FAIL(ObOptimizerUtil::get_rownum_filter_info(cond_expr, expr_type, const_expr, is_const_filter))) {
} else if (OB_FAIL(ObOptimizerUtil::get_rownum_filter_info(cond_expr,
expr_type,
rownum_expr,
const_expr,
is_const_filter))) {
LOG_WARN("failed to check is filter rownum", K(ret));
} else if (!is_const_filter) {
} else if (!is_const_filter || rownum_expr != my_rownum) {
// do nothing
} else if (T_OP_LE == expr_type || T_OP_LT == expr_type) {
limit_value = const_expr;

View File

@ -921,6 +921,9 @@ int ObTransformSimplify::extract_null_expr(
LOG_WARN("failed to check is question mark pre param", K(ret));
} else if (!is_pre_param) {
// do nothing
} else if (OB_UNLIKELY(value.get_unknown() < 0 || value.get_unknown() >= param_store.count())) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("invalid param index", K(ret), K(value.get_unknown()), K(param_store.count()));
} else if (param_store.at(value.get_unknown()).is_null()) {
if (OB_FAIL(null_expr_lists.push_back(expr))) {
LOG_WARN("failed to push back expr", K(ret));

View File

@ -916,23 +916,32 @@ int ObTransformUtils::replace_expr(
{
int ret = OB_SUCCESS;
if (OB_NOT_NULL(expr)) {
ObRawExpr* temp_expr = NULL;
ObRawExpr *temp_old_expr = NULL;
int64_t idx = -1;
if (!ObOptimizerUtil::find_item(other_exprs, expr, &idx)) {
/*do nothing*/
} else if (OB_UNLIKELY(idx < 0 || idx >= new_exprs.count()) || OB_ISNULL(new_exprs.at(idx))) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("invalid index", K(ret), K(idx), K(new_exprs.count()), K(new_exprs));
} else {
expr = new_exprs.at(idx);
temp_expr = other_exprs.at(idx);
const_cast<ObIArray<ObRawExpr*>&>(other_exprs).at(idx) = NULL;
if (ObOptimizerUtil::find_item(other_exprs, expr, &idx)) {
if (OB_UNLIKELY(idx < 0 || idx >= new_exprs.count()) ||
OB_ISNULL(new_exprs.at(idx))) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("invalid index", K(ret), K(idx), K(new_exprs.count()), K(new_exprs));
} else {
expr = new_exprs.at(idx);
temp_old_expr = other_exprs.at(idx);
const_cast<ObIArray<ObRawExpr*>&>(other_exprs).at(idx) = NULL;
}
} else if (ObOptimizerUtil::find_item(new_exprs, expr, &idx)) {
if (OB_UNLIKELY(idx < 0 || idx >= other_exprs.count())) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("invalid index", K(ret), K(idx), K(new_exprs), K(other_exprs));
} else {
temp_old_expr = other_exprs.at(idx);
const_cast<ObIArray<ObRawExpr*>&>(other_exprs).at(idx) = NULL;
}
}
if (OB_SUCC(ret)) {
if (OB_FAIL(expr->replace_expr(other_exprs, new_exprs))) {
LOG_WARN("failed to replace expr", K(ret));
} else if (NULL != temp_expr) {
const_cast<ObIArray<ObRawExpr*>&>(other_exprs).at(idx) = temp_expr;
} else if (NULL != temp_old_expr) {
const_cast<ObIArray<ObRawExpr*>&>(other_exprs).at(idx) = temp_old_expr;
}
}
}

View File

@ -1095,7 +1095,9 @@ int ObWhereSubQueryPullup::pullup_correlated_subquery_as_view(
subquery->set_limit_offset(NULL, NULL);
}
if (OB_FAIL(subquery->get_select_exprs(right_hand_exprs))) {
if (OB_FAIL(ret)) {
// do nothing
} else if (OB_FAIL(subquery->get_select_exprs(right_hand_exprs))) {
LOG_WARN("failed to get select exprs", K(ret));
} else if (OB_FAIL(generate_conditions(stmt, right_hand_exprs, subquery, expr, new_conditions))) {
// create conditions with left_hand and subquery's original targetlist
@ -2048,15 +2050,19 @@ int ObWhereSubQueryPullup::generate_conditions(ObDMLStmt* stmt, ObIArray<ObRawEx
}
right_vector = row_expr;
}
if (OB_SUCC(ret) && OB_FAIL(ObRawExprUtils::create_double_op_expr(
*expr_factory, ctx_->session_info_, oper_type, cmp_expr, left_hand, right_vector))) {
LOG_WARN("failed to create comparison expr", K(ret));
} else if (!expr->has_flag(IS_WITH_ALL)) {
// do nothing
} else if (OB_FAIL(ObRawExprUtils::build_lnnvl_expr(*expr_factory, cmp_expr, lnnvl_expr))) {
LOG_WARN("failed to build lnnvl expr", K(ret));
} else {
cmp_expr = lnnvl_expr;
if (OB_SUCC(ret)) {
if (OB_FAIL(ObRawExprUtils::create_double_op_expr(
*expr_factory, ctx_->session_info_, oper_type,
cmp_expr, left_hand, right_vector))) {
LOG_WARN("failed to create comparison expr", K(ret));
} else if (!expr->has_flag(IS_WITH_ALL)) {
// do nothing
} else if (OB_FAIL(ObRawExprUtils::build_lnnvl_expr(
*expr_factory, cmp_expr, lnnvl_expr))) {
LOG_WARN("failed to build lnnvl expr", K(ret));
} else {
cmp_expr = lnnvl_expr;
}
}
if (OB_SUCC(ret)) {
if (OB_FAIL(cmp_expr->formalize(ctx_->session_info_))) {
@ -2139,17 +2145,28 @@ int ObWhereSubQueryPullup::transform_single_set_query(ObDMLStmt* stmt, bool& tra
}
}
for (int64_t i = 0; OB_SUCC(ret) && i < cond_exprs.count(); ++i) {
ObSEArray<ObQueryRefRawExpr*, 4> queries;
ObSEArray<const ObRawExpr*, 1> tmp;
ObSEArray<ObQueryRefRawExpr *, 4> queries;
bool is_null_reject = false;
if (OB_FAIL(get_single_set_subquery(stmt->get_current_level(), cond_exprs.at(i), queries))) {
LOG_WARN("failed to get single set subquery", K(ret));
}
for (int64_t j = 0; OB_SUCC(ret) && j < queries.count(); ++j) {
if (is_vector_query(queries.at(j))) {
ObSEArray<const ObRawExpr *, 1> tmp;
if (OB_ISNULL(queries.at(j)) ||
OB_UNLIKELY(!queries.at(j)->is_ref_stmt()) ||
OB_ISNULL(queries.at(j)->get_ref_stmt())) {
ret = OB_ERR_UNEXPECTED;
LOG_WARN("invalid subquery", K(ret));
} else if (is_vector_query(queries.at(j))) {
// not necessary limitation
} else if (OB_FAIL(ObTransformUtils::is_null_reject_condition(cond_exprs.at(i), tmp, is_null_reject))) {
} else if (OB_FAIL(tmp.push_back(queries.at(j)))) {
LOG_WARN("failed to push back query", K(ret));
} else if (OB_FAIL(ObTransformUtils::is_null_reject_condition(
cond_exprs.at(i), tmp, is_null_reject))) {
LOG_WARN("failed to check is null reject condition", K(ret));
} else if (!is_null_reject &&
queries.at(j)->get_ref_stmt()->get_semi_info_size() > 0) {
// do nothing
} else if (OB_FAIL(unnest_single_set_subquery(stmt, queries.at(j), !is_null_reject, false))) {
LOG_WARN("failed to unnest single set subquery", K(ret));
} else {

View File

@ -855,7 +855,15 @@ public:
void set_session_in_retry(ObSessionRetryStatus is_retry)
{
LockGuard lock_guard(thread_data_mutex_);
thread_data_.is_in_retry_ = is_retry;
if (OB_LIKELY(SESS_NOT_IN_RETRY == is_retry ||
SESS_IN_RETRY_FOR_DUP_TBL != thread_data_.is_in_retry_)) {
thread_data_.is_in_retry_ = is_retry;
} else {
// if the last retry is for duplicate table
// and the SQL is retried again
// we still keep the retry for dup table status.
thread_data_.is_in_retry_ = SESS_IN_RETRY_FOR_DUP_TBL;
}
}
void set_session_in_retry(bool is_retry, int ret)
@ -868,8 +876,7 @@ public:
} else {
status = SESS_IN_RETRY;
}
LockGuard lock_guard(thread_data_mutex_);
thread_data_.is_in_retry_ = status;
set_session_in_retry(status);
}
bool get_is_in_retry()
{

View File

@ -558,23 +558,26 @@ Outputs & filters:
SQL: select t1.c1 from t1, (select * from t2 where c2>1 order by c1 limit 10) as t where t1.c1=t.c1;
================================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
----------------------------------------------------------------
|0 |PX COORDINATOR | |10 |412 |
|1 | EXCHANGE OUT DISTR |:EX10002|10 |411 |
|2 | NESTED-LOOP JOIN | |10 |411 |
|3 | EXCHANGE IN DISTR | |10 |44 |
|4 | EXCHANGE OUT DISTR (PKEY) |:EX10001|10 |44 |
|5 | SUBPLAN SCAN |t |10 |44 |
|6 | LIMIT | |10 |42 |
|7 | EXCHANGE IN MERGE SORT DISTR| |10 |41 |
|8 | EXCHANGE OUT DISTR |:EX10000|10 |40 |
|9 | PX PARTITION ITERATOR | |10 |40 |
|10| TABLE SCAN |t2 |10 |40 |
|11| PX PARTITION ITERATOR | |1 |36 |
|12| TABLE GET |t1 |1 |36 |
================================================================
=================================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
-----------------------------------------------------------------
|0 |PX COORDINATOR | |10 |425 |
|1 | EXCHANGE OUT DISTR |:EX10002|10 |424 |
|2 | NESTED-LOOP JOIN | |10 |424 |
|3 | EXCHANGE IN DISTR | |10 |57 |
|4 | EXCHANGE OUT DISTR (PKEY) |:EX10001|10 |57 |
|5 | MATERIAL | |10 |57 |
|6 | SUBPLAN SCAN |t |10 |55 |
|7 | LIMIT | |10 |54 |
|8 | EXCHANGE IN MERGE SORT DISTR| |10 |52 |
|9 | EXCHANGE OUT DISTR |:EX10000|10 |51 |
|10| LIMIT | |10 |51 |
|11| TOP-N SORT | |10 |50 |
|12| PX PARTITION ITERATOR | |10 |40 |
|13| TABLE SCAN |t2 |10 |40 |
|14| PX PARTITION ITERATOR | |1 |36 |
|15| TABLE GET |t1 |1 |36 |
=================================================================
Outputs & filters:
-------------------------------------
@ -584,21 +587,24 @@ Outputs & filters:
conds(nil), nl_params_([t.c1]), batch_join=false
3 - output([t.c1], [PARTITION_ID]), filter(nil)
4 - (#keys=1, [t.c1]), output([t.c1], [PARTITION_ID]), filter(nil), is_single, dop=1
5 - output([t.c1]), filter(nil),
5 - output([t.c1]), filter(nil)
6 - output([t.c1]), filter(nil),
access([t.c1])
6 - output([t2.c1]), filter(nil), limit(10), offset(nil)
7 - output([t2.c1]), filter(nil), sort_keys([t2.c1, ASC]), Local Order
8 - output([t2.c1]), filter(nil), dop=1
9 - output([t2.c1]), filter(nil),
7 - output([t2.c1]), filter(nil), limit(10), offset(nil)
8 - output([t2.c1]), filter(nil), sort_keys([t2.c1, ASC])
9 - output([t2.c1]), filter(nil), dop=1
10 - output([t2.c1]), filter(nil), limit(10), offset(nil)
11 - output([t2.c1]), filter(nil), sort_keys([t2.c1, ASC]), topn(10), local merge sort
12 - output([t2.c1]), filter(nil),
force partition granule, asc.
10 - output([t2.c1]), filter([t2.c2 > ?]),
13 - output([t2.c1]), filter([t2.c2 > ?]),
access([t2.c1], [t2.c2]), partitions(p[0-2]),
limit(10), offset(nil),
is_index_back=false, filter_before_indexback[false],
range_key([t2.c1]), range(MIN ; MAX)always true
11 - output([t1.c1]), filter(nil),
14 - output([t1.c1]), filter(nil),
affinitize, force partition granule, asc.
12 - output([t1.c1]), filter(nil),
15 - output([t1.c1]), filter(nil),
access([t1.c1]), partitions(p[0-4]),
is_index_back=false,
range_key([t1.c1]), range(MIN ; MAX),
@ -2555,20 +2561,21 @@ Outputs & filters:
SQL: select t1.c1 from t1 left join t2 on t1.c1 = t2.c1 where exists (select c1 from t3 limit 1);
======================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
------------------------------------------------------
|0 |SUBPLAN FILTER | |250 |460 |
|1 | PX COORDINATOR | |500 |354 |
|2 | EXCHANGE OUT DISTR |:EX10000|500 |331 |
|3 | PX PARTITION ITERATOR | |500 |331 |
|4 | TABLE SCAN |t1 |500 |331 |
|5 | LIMIT | |1 |37 |
|6 | PX COORDINATOR | |1 |36 |
|7 | EXCHANGE OUT DISTR |:EX20000|1 |36 |
|8 | PX PARTITION ITERATOR| |1 |36 |
|9 | TABLE SCAN |t3 |1 |36 |
======================================================
=======================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
-------------------------------------------------------
|0 |SUBPLAN FILTER | |250 |460 |
|1 | PX COORDINATOR | |500 |354 |
|2 | EXCHANGE OUT DISTR |:EX10000|500 |331 |
|3 | PX PARTITION ITERATOR | |500 |331 |
|4 | TABLE SCAN |t1 |500 |331 |
|5 | LIMIT | |1 |37 |
|6 | PX COORDINATOR | |1 |37 |
|7 | EXCHANGE OUT DISTR |:EX20000|1 |37 |
|8 | LIMIT | |1 |37 |
|9 | PX PARTITION ITERATOR| |1 |36 |
|10| TABLE SCAN |t3 |1 |36 |
=======================================================
Outputs & filters:
-------------------------------------
@ -2585,9 +2592,10 @@ Outputs & filters:
5 - output([1]), filter(nil), limit(1), offset(nil)
6 - output([1]), filter(nil)
7 - output([1]), filter(nil), dop=1
8 - output([1]), filter(nil),
force partition granule, asc.
8 - output([1]), filter(nil), limit(1), offset(nil)
9 - output([1]), filter(nil),
force partition granule, asc.
10 - output([1]), filter(nil),
access([t3.c1]), partitions(p[0-1]),
limit(1), offset(nil),
is_index_back=false,

View File

@ -162,24 +162,26 @@ Outputs & filters:
SQL: select c1 from t1 limit 100;
=====================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
-----------------------------------------------------
|0 |LIMIT | |100 |107 |
|1 | PX COORDINATOR | |100 |93 |
|2 | EXCHANGE OUT DISTR |:EX10000|100 |88 |
|3 | PX PARTITION ITERATOR| |100 |88 |
|4 | TABLE SCAN |t1 |100 |88 |
=====================================================
======================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
------------------------------------------------------
|0 |LIMIT | |100 |120 |
|1 | PX COORDINATOR | |100 |107 |
|2 | EXCHANGE OUT DISTR |:EX10000|100 |102 |
|3 | LIMIT | |100 |102 |
|4 | PX PARTITION ITERATOR| |100 |88 |
|5 | TABLE SCAN |t1 |100 |88 |
======================================================
Outputs & filters:
-------------------------------------
0 - output([t1.c1]), filter(nil), limit(100), offset(nil)
1 - output([t1.c1]), filter(nil)
2 - output([t1.c1]), filter(nil), dop=1
3 - output([t1.c1]), filter(nil),
force partition granule, asc.
3 - output([t1.c1]), filter(nil), limit(100), offset(nil)
4 - output([t1.c1]), filter(nil),
force partition granule, asc.
5 - output([t1.c1]), filter(nil),
access([t1.c1]), partitions(p[0-4]),
limit(100), offset(nil),
is_index_back=false,
@ -191,24 +193,26 @@ Outputs & filters:
SQL: select * from t1 limit 100;
=====================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
-----------------------------------------------------
|0 |LIMIT | |100 |113 |
|1 | PX COORDINATOR | |100 |100 |
|2 | EXCHANGE OUT DISTR |:EX10000|100 |90 |
|3 | PX PARTITION ITERATOR| |100 |90 |
|4 | TABLE SCAN |t1 |100 |90 |
=====================================================
======================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
------------------------------------------------------
|0 |LIMIT | |100 |127 |
|1 | PX COORDINATOR | |100 |113 |
|2 | EXCHANGE OUT DISTR |:EX10000|100 |104 |
|3 | LIMIT | |100 |104 |
|4 | PX PARTITION ITERATOR| |100 |90 |
|5 | TABLE SCAN |t1 |100 |90 |
======================================================
Outputs & filters:
-------------------------------------
0 - output([t1.c1], [t1.c2]), filter(nil), limit(100), offset(nil)
1 - output([t1.c1], [t1.c2]), filter(nil)
2 - output([t1.c1], [t1.c2]), filter(nil), dop=1
3 - output([t1.c1], [t1.c2]), filter(nil),
force partition granule, asc.
3 - output([t1.c1], [t1.c2]), filter(nil), limit(100), offset(nil)
4 - output([t1.c1], [t1.c2]), filter(nil),
force partition granule, asc.
5 - output([t1.c1], [t1.c2]), filter(nil),
access([t1.c1], [t1.c2]), partitions(p[0-4]),
limit(100), offset(nil),
is_index_back=false,
@ -223,12 +227,13 @@ SQL: select * from t1 order by c1,c2 limit 100;
========================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
--------------------------------------------------------
|0 |LIMIT | |100 |241 |
|1 | PX COORDINATOR MERGE SORT | |100 |227 |
|2 | EXCHANGE OUT DISTR |:EX10000|100 |217 |
|3 | SORT | |100 |217 |
|4 | PX PARTITION ITERATOR | |100 |90 |
|5 | TABLE SCAN |t1 |100 |90 |
|0 |LIMIT | |100 |254 |
|1 | PX COORDINATOR MERGE SORT | |100 |241 |
|2 | EXCHANGE OUT DISTR |:EX10000|100 |231 |
|3 | LIMIT | |100 |231 |
|4 | TOP-N SORT | |100 |217 |
|5 | PX PARTITION ITERATOR | |100 |90 |
|6 | TABLE SCAN |t1 |100 |90 |
========================================================
Outputs & filters:
@ -236,10 +241,11 @@ Outputs & filters:
0 - output([t1.c1], [t1.c2]), filter(nil), limit(100), offset(nil)
1 - output([t1.c1], [t1.c2]), filter(nil), sort_keys([t1.c1, ASC])
2 - output([t1.c1], [t1.c2]), filter(nil), dop=1
3 - output([t1.c1], [t1.c2]), filter(nil), sort_keys([t1.c1, ASC]), local merge sort
4 - output([t1.c1], [t1.c2]), filter(nil),
force partition granule, asc.
3 - output([t1.c1], [t1.c2]), filter(nil), limit(100), offset(nil)
4 - output([t1.c1], [t1.c2]), filter(nil), sort_keys([t1.c1, ASC]), topn(100), local merge sort
5 - output([t1.c1], [t1.c2]), filter(nil),
force partition granule, asc.
6 - output([t1.c1], [t1.c2]), filter(nil),
access([t1.c1], [t1.c2]), partitions(p[0-4]),
limit(100), offset(nil),
is_index_back=false,
@ -254,12 +260,13 @@ SQL: select c1 from t1 order by c1 limit 100;
========================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
--------------------------------------------------------
|0 |LIMIT | |100 |214 |
|1 | PX COORDINATOR MERGE SORT | |100 |200 |
|2 | EXCHANGE OUT DISTR |:EX10000|100 |196 |
|3 | SORT | |100 |196 |
|4 | PX PARTITION ITERATOR | |100 |88 |
|5 | TABLE SCAN |t1 |100 |88 |
|0 |LIMIT | |100 |228 |
|1 | PX COORDINATOR MERGE SORT | |100 |214 |
|2 | EXCHANGE OUT DISTR |:EX10000|100 |209 |
|3 | LIMIT | |100 |209 |
|4 | TOP-N SORT | |100 |196 |
|5 | PX PARTITION ITERATOR | |100 |88 |
|6 | TABLE SCAN |t1 |100 |88 |
========================================================
Outputs & filters:
@ -267,10 +274,11 @@ Outputs & filters:
0 - output([t1.c1]), filter(nil), limit(100), offset(nil)
1 - output([t1.c1]), filter(nil), sort_keys([t1.c1, ASC])
2 - output([t1.c1]), filter(nil), dop=1
3 - output([t1.c1]), filter(nil), sort_keys([t1.c1, ASC]), local merge sort
4 - output([t1.c1]), filter(nil),
force partition granule, asc.
3 - output([t1.c1]), filter(nil), limit(100), offset(nil)
4 - output([t1.c1]), filter(nil), sort_keys([t1.c1, ASC]), topn(100), local merge sort
5 - output([t1.c1]), filter(nil),
force partition granule, asc.
6 - output([t1.c1]), filter(nil),
access([t1.c1]), partitions(p[0-4]),
limit(100), offset(nil),
is_index_back=false,
@ -285,12 +293,13 @@ SQL: select c1 from t1 order by c2 limit 100;
=============================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
-------------------------------------------------------------
|0 |LIMIT | |100 |241 |
|1 | PX COORDINATOR MERGE SORT | |100 |227 |
|2 | EXCHANGE OUT DISTR |:EX10000 |100 |217 |
|3 | SORT | |100 |217 |
|4 | PX PARTITION ITERATOR | |100 |90 |
|5 | TABLE SCAN |t1(idx_t1_c2)|100 |90 |
|0 |LIMIT | |100 |254 |
|1 | PX COORDINATOR MERGE SORT | |100 |241 |
|2 | EXCHANGE OUT DISTR |:EX10000 |100 |231 |
|3 | LIMIT | |100 |231 |
|4 | TOP-N SORT | |100 |217 |
|5 | PX PARTITION ITERATOR | |100 |90 |
|6 | TABLE SCAN |t1(idx_t1_c2)|100 |90 |
=============================================================
Outputs & filters:
@ -298,10 +307,11 @@ Outputs & filters:
0 - output([t1.c1]), filter(nil), limit(100), offset(nil)
1 - output([t1.c1], [t1.c2]), filter(nil), sort_keys([t1.c2, ASC])
2 - output([t1.c1], [t1.c2]), filter(nil), dop=1
3 - output([t1.c1], [t1.c2]), filter(nil), sort_keys([t1.c2, ASC]), local merge sort
4 - output([t1.c1], [t1.c2]), filter(nil),
force partition granule, asc.
3 - output([t1.c1], [t1.c2]), filter(nil), limit(100), offset(nil)
4 - output([t1.c1], [t1.c2]), filter(nil), sort_keys([t1.c2, ASC]), topn(100), local merge sort
5 - output([t1.c1], [t1.c2]), filter(nil),
force partition granule, asc.
6 - output([t1.c1], [t1.c2]), filter(nil),
access([t1.c1], [t1.c2]), partitions(p[0-4]),
limit(100), offset(nil),
is_index_back=false,
@ -316,12 +326,13 @@ SQL: select c1 from t1 where c1 > 0 order by c2 limit 100;
=============================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
-------------------------------------------------------------
|0 |LIMIT | |100 |965 |
|1 | PX COORDINATOR MERGE SORT | |100 |951 |
|2 | EXCHANGE OUT DISTR |:EX10000 |100 |942 |
|3 | SORT | |100 |942 |
|4 | PX PARTITION ITERATOR | |100 |815 |
|5 | TABLE SCAN |t1(idx_t1_c2)|100 |815 |
|0 |LIMIT | |100 |979 |
|1 | PX COORDINATOR MERGE SORT | |100 |965 |
|2 | EXCHANGE OUT DISTR |:EX10000 |100 |956 |
|3 | LIMIT | |100 |956 |
|4 | TOP-N SORT | |100 |942 |
|5 | PX PARTITION ITERATOR | |100 |815 |
|6 | TABLE SCAN |t1(idx_t1_c2)|100 |815 |
=============================================================
Outputs & filters:
@ -329,10 +340,11 @@ Outputs & filters:
0 - output([t1.c1]), filter(nil), limit(100), offset(nil)
1 - output([t1.c1], [t1.c2]), filter(nil), sort_keys([t1.c2, ASC])
2 - output([t1.c1], [t1.c2]), filter(nil), dop=1
3 - output([t1.c1], [t1.c2]), filter(nil), sort_keys([t1.c2, ASC]), local merge sort
4 - output([t1.c1], [t1.c2]), filter(nil),
3 - output([t1.c1], [t1.c2]), filter(nil), limit(100), offset(nil)
4 - output([t1.c1], [t1.c2]), filter(nil), sort_keys([t1.c2, ASC]), topn(100), local merge sort
5 - output([t1.c1], [t1.c2]), filter(nil),
force partition granule, asc.
5 - output([t1.c1], [t1.c2]), filter([t1.c1 > ?]),
6 - output([t1.c1], [t1.c2]), filter([t1.c1 > ?]),
access([t1.c1], [t1.c2]), partitions(p[0-4]),
limit(100), offset(nil),
is_index_back=false, filter_before_indexback[false],
@ -347,12 +359,13 @@ SQL: select c1 from t1 where c1 > 0 and c2 < 0 order by c2 limit 100;
=============================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
-------------------------------------------------------------
|0 |LIMIT | |100 |965 |
|1 | PX COORDINATOR MERGE SORT | |100 |951 |
|2 | EXCHANGE OUT DISTR |:EX10000 |100 |942 |
|3 | SORT | |100 |942 |
|4 | PX PARTITION ITERATOR | |100 |815 |
|5 | TABLE SCAN |t1(idx_t1_c2)|100 |815 |
|0 |LIMIT | |100 |979 |
|1 | PX COORDINATOR MERGE SORT | |100 |965 |
|2 | EXCHANGE OUT DISTR |:EX10000 |100 |956 |
|3 | LIMIT | |100 |956 |
|4 | TOP-N SORT | |100 |942 |
|5 | PX PARTITION ITERATOR | |100 |815 |
|6 | TABLE SCAN |t1(idx_t1_c2)|100 |815 |
=============================================================
Outputs & filters:
@ -360,10 +373,11 @@ Outputs & filters:
0 - output([t1.c1]), filter(nil), limit(100), offset(nil)
1 - output([t1.c1], [t1.c2]), filter(nil), sort_keys([t1.c2, ASC])
2 - output([t1.c1], [t1.c2]), filter(nil), dop=1
3 - output([t1.c1], [t1.c2]), filter(nil), sort_keys([t1.c2, ASC]), local merge sort
4 - output([t1.c1], [t1.c2]), filter(nil),
3 - output([t1.c1], [t1.c2]), filter(nil), limit(100), offset(nil)
4 - output([t1.c1], [t1.c2]), filter(nil), sort_keys([t1.c2, ASC]), topn(100), local merge sort
5 - output([t1.c1], [t1.c2]), filter(nil),
force partition granule, asc.
5 - output([t1.c1], [t1.c2]), filter([t1.c1 > ?]),
6 - output([t1.c1], [t1.c2]), filter([t1.c1 > ?]),
access([t1.c1], [t1.c2]), partitions(p[0-4]),
limit(100), offset(nil),
is_index_back=false, filter_before_indexback[false],
@ -474,24 +488,26 @@ Outputs & filters:
SQL: select c1, c1 + c2 from t1 where c1 > 0 limit 100;
=====================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
-----------------------------------------------------
|0 |LIMIT | |100 |113 |
|1 | PX COORDINATOR | |100 |100 |
|2 | EXCHANGE OUT DISTR |:EX10000|100 |90 |
|3 | PX PARTITION ITERATOR| |100 |90 |
|4 | TABLE SCAN |t1 |100 |90 |
=====================================================
======================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
------------------------------------------------------
|0 |LIMIT | |100 |127 |
|1 | PX COORDINATOR | |100 |113 |
|2 | EXCHANGE OUT DISTR |:EX10000|100 |104 |
|3 | LIMIT | |100 |104 |
|4 | PX PARTITION ITERATOR| |100 |90 |
|5 | TABLE SCAN |t1 |100 |90 |
======================================================
Outputs & filters:
-------------------------------------
0 - output([t1.c1], [t1.c1 + t1.c2]), filter(nil), limit(100), offset(nil)
1 - output([t1.c1], [t1.c2]), filter(nil)
2 - output([t1.c1], [t1.c2]), filter(nil), dop=1
3 - output([t1.c1], [t1.c2]), filter(nil),
force partition granule, asc.
3 - output([t1.c1], [t1.c2]), filter(nil), limit(100), offset(nil)
4 - output([t1.c1], [t1.c2]), filter(nil),
force partition granule, asc.
5 - output([t1.c1], [t1.c2]), filter(nil),
access([t1.c1], [t1.c2]), partitions(p[0-4]),
limit(100), offset(nil),
is_index_back=false,
@ -1258,23 +1274,26 @@ Outputs & filters:
SQL: select t1.c1 from t1, (select * from t2 where c2>1 order by c1 limit 10) as t where t1.c1=t.c1;
================================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
----------------------------------------------------------------
|0 |PX COORDINATOR | |10 |479 |
|1 | EXCHANGE OUT DISTR |:EX10002|10 |478 |
|2 | NESTED-LOOP JOIN | |10 |478 |
|3 | EXCHANGE IN DISTR | |10 |111 |
|4 | EXCHANGE OUT DISTR (PKEY) |:EX10001|10 |111 |
|5 | SUBPLAN SCAN |t |10 |111 |
|6 | LIMIT | |10 |109 |
|7 | EXCHANGE IN MERGE SORT DISTR| |10 |108 |
|8 | EXCHANGE OUT DISTR |:EX10000|10 |107 |
|9 | PX PARTITION ITERATOR | |10 |107 |
|10| TABLE SCAN |t2 |10 |107 |
|11| PX PARTITION ITERATOR | |1 |36 |
|12| TABLE GET |t1 |1 |36 |
================================================================
=================================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
-----------------------------------------------------------------
|0 |PX COORDINATOR | |10 |492 |
|1 | EXCHANGE OUT DISTR |:EX10002|10 |491 |
|2 | NESTED-LOOP JOIN | |10 |491 |
|3 | EXCHANGE IN DISTR | |10 |124 |
|4 | EXCHANGE OUT DISTR (PKEY) |:EX10001|10 |124 |
|5 | MATERIAL | |10 |124 |
|6 | SUBPLAN SCAN |t |10 |122 |
|7 | LIMIT | |10 |121 |
|8 | EXCHANGE IN MERGE SORT DISTR| |10 |119 |
|9 | EXCHANGE OUT DISTR |:EX10000|10 |118 |
|10| LIMIT | |10 |118 |
|11| TOP-N SORT | |10 |117 |
|12| PX PARTITION ITERATOR | |10 |107 |
|13| TABLE SCAN |t2 |10 |107 |
|14| PX PARTITION ITERATOR | |1 |36 |
|15| TABLE GET |t1 |1 |36 |
=================================================================
Outputs & filters:
-------------------------------------
@ -1284,21 +1303,24 @@ Outputs & filters:
conds(nil), nl_params_([t.c1]), batch_join=false
3 - output([t.c1], [PARTITION_ID]), filter(nil)
4 - (#keys=1, [t.c1]), output([t.c1], [PARTITION_ID]), filter(nil), is_single, dop=1
5 - output([t.c1]), filter(nil),
5 - output([t.c1]), filter(nil)
6 - output([t.c1]), filter(nil),
access([t.c1])
6 - output([t2.c1]), filter(nil), limit(10), offset(nil)
7 - output([t2.c1]), filter(nil), sort_keys([t2.c1, ASC]), Local Order
8 - output([t2.c1]), filter(nil), dop=1
9 - output([t2.c1]), filter(nil),
7 - output([t2.c1]), filter(nil), limit(10), offset(nil)
8 - output([t2.c1]), filter(nil), sort_keys([t2.c1, ASC])
9 - output([t2.c1]), filter(nil), dop=1
10 - output([t2.c1]), filter(nil), limit(10), offset(nil)
11 - output([t2.c1]), filter(nil), sort_keys([t2.c1, ASC]), topn(10), local merge sort
12 - output([t2.c1]), filter(nil),
force partition granule, asc.
10 - output([t2.c1]), filter([t2.c2 > ?]),
13 - output([t2.c1]), filter([t2.c2 > ?]),
access([t2.c1], [t2.c2]), partitions(p[0-2]),
limit(10), offset(nil),
is_index_back=false, filter_before_indexback[false],
range_key([t2.c1]), range(MIN ; MAX)always true
11 - output([t1.c1]), filter(nil),
14 - output([t1.c1]), filter(nil),
affinitize, force partition granule, asc.
12 - output([t1.c1]), filter(nil),
15 - output([t1.c1]), filter(nil),
access([t1.c1]), partitions(p[0-4]),
is_index_back=false,
range_key([t1.c1]), range(MIN ; MAX),
@ -1552,24 +1574,26 @@ Outputs & filters:
SQL: select c1, c1+c2 from t1 where c1 > 100 limit 1, 10;
=====================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
-----------------------------------------------------
|0 |LIMIT | |10 |41 |
|1 | PX COORDINATOR | |11 |39 |
|2 | EXCHANGE OUT DISTR |:EX10000|11 |38 |
|3 | PX PARTITION ITERATOR| |11 |38 |
|4 | TABLE SCAN |t1 |11 |38 |
=====================================================
======================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
------------------------------------------------------
|0 |LIMIT | |10 |42 |
|1 | PX COORDINATOR | |10 |41 |
|2 | EXCHANGE OUT DISTR |:EX10000|10 |40 |
|3 | LIMIT | |10 |40 |
|4 | PX PARTITION ITERATOR| |11 |38 |
|5 | TABLE SCAN |t1 |11 |38 |
======================================================
Outputs & filters:
-------------------------------------
0 - output([t1.c1], [t1.c1 + t1.c2]), filter(nil), limit(10), offset(?)
1 - output([t1.c1], [t1.c2]), filter(nil)
2 - output([t1.c1], [t1.c2]), filter(nil), dop=1
3 - output([t1.c1], [t1.c2]), filter(nil),
force partition granule, asc.
3 - output([t1.c1], [t1.c2]), filter(nil), limit(10 + ?), offset(nil)
4 - output([t1.c1], [t1.c2]), filter(nil),
force partition granule, asc.
5 - output([t1.c1], [t1.c2]), filter(nil),
access([t1.c1], [t1.c2]), partitions(p[0-4]),
limit(10 + ?), offset(nil),
is_index_back=false,
@ -2269,12 +2293,13 @@ SQL: select c1, repeat('ob', 2) as db_name from t1 order by c2 limit 100;
=============================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
-------------------------------------------------------------
|0 |LIMIT | |100 |241 |
|1 | PX COORDINATOR MERGE SORT | |100 |227 |
|2 | EXCHANGE OUT DISTR |:EX10000 |100 |217 |
|3 | SORT | |100 |217 |
|4 | PX PARTITION ITERATOR | |100 |90 |
|5 | TABLE SCAN |t1(idx_t1_c2)|100 |90 |
|0 |LIMIT | |100 |254 |
|1 | PX COORDINATOR MERGE SORT | |100 |241 |
|2 | EXCHANGE OUT DISTR |:EX10000 |100 |231 |
|3 | LIMIT | |100 |231 |
|4 | TOP-N SORT | |100 |217 |
|5 | PX PARTITION ITERATOR | |100 |90 |
|6 | TABLE SCAN |t1(idx_t1_c2)|100 |90 |
=============================================================
Outputs & filters:
@ -2282,10 +2307,11 @@ Outputs & filters:
0 - output([t1.c1], [?]), filter(nil), limit(100), offset(nil)
1 - output([t1.c1], [t1.c2]), filter(nil), sort_keys([t1.c2, ASC])
2 - output([t1.c1], [t1.c2]), filter(nil), dop=1
3 - output([t1.c1], [t1.c2]), filter(nil), sort_keys([t1.c2, ASC]), local merge sort
4 - output([t1.c1], [t1.c2]), filter(nil),
force partition granule, asc.
3 - output([t1.c1], [t1.c2]), filter(nil), limit(100), offset(nil)
4 - output([t1.c1], [t1.c2]), filter(nil), sort_keys([t1.c2, ASC]), topn(100), local merge sort
5 - output([t1.c1], [t1.c2]), filter(nil),
force partition granule, asc.
6 - output([t1.c1], [t1.c2]), filter(nil),
access([t1.c1], [t1.c2]), partitions(p[0-4]),
limit(100), offset(nil),
is_index_back=false,
@ -2381,12 +2407,13 @@ SQL: select c1, 1 + 1 from t1 order by c1 limit 100;
========================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
--------------------------------------------------------
|0 |LIMIT | |100 |214 |
|1 | PX COORDINATOR MERGE SORT | |100 |200 |
|2 | EXCHANGE OUT DISTR |:EX10000|100 |196 |
|3 | SORT | |100 |196 |
|4 | PX PARTITION ITERATOR | |100 |88 |
|5 | TABLE SCAN |t1 |100 |88 |
|0 |LIMIT | |100 |228 |
|1 | PX COORDINATOR MERGE SORT | |100 |214 |
|2 | EXCHANGE OUT DISTR |:EX10000|100 |209 |
|3 | LIMIT | |100 |209 |
|4 | TOP-N SORT | |100 |196 |
|5 | PX PARTITION ITERATOR | |100 |88 |
|6 | TABLE SCAN |t1 |100 |88 |
========================================================
Outputs & filters:
@ -2394,10 +2421,11 @@ Outputs & filters:
0 - output([t1.c1], [?]), filter(nil), limit(100), offset(nil)
1 - output([t1.c1]), filter(nil), sort_keys([t1.c1, ASC])
2 - output([t1.c1]), filter(nil), dop=1
3 - output([t1.c1]), filter(nil), sort_keys([t1.c1, ASC]), local merge sort
4 - output([t1.c1]), filter(nil),
force partition granule, asc.
3 - output([t1.c1]), filter(nil), limit(100), offset(nil)
4 - output([t1.c1]), filter(nil), sort_keys([t1.c1, ASC]), topn(100), local merge sort
5 - output([t1.c1]), filter(nil),
force partition granule, asc.
6 - output([t1.c1]), filter(nil),
access([t1.c1]), partitions(p[0-4]),
limit(100), offset(nil),
is_index_back=false,
@ -2644,37 +2672,38 @@ SQL: (select /*+no_use_px*/ c1 from t1) union (select c2 from t1) union (select
=========================================================================
|ID|OPERATOR |NAME |EST. ROWS|COST |
-------------------------------------------------------------------------
|0 |LIMIT | |100 |914254|
|1 | MERGE UNION DISTINCT | |100 |914241|
|2 | LIMIT | |100 |531348|
|3 | MERGE UNION DISTINCT | |100 |531335|
|4 | LIMIT | |100 |214 |
|5 | PX COORDINATOR MERGE SORT | |100 |200 |
|6 | EXCHANGE OUT DISTR |:EX10000 |100 |196 |
|7 | SORT | |100 |196 |
|8 | PX PARTITION ITERATOR | |100 |88 |
|9 | TABLE SCAN |t1 |100 |88 |
|10| LIMIT | |100 |531055|
|11| PX COORDINATOR MERGE SORT | |100 |531041|
|12| EXCHANGE OUT DISTR |:EX20001 |100 |531032|
|13| LIMIT | |100 |531032|
|14| MERGE DISTINCT | |100 |531018|
|15| EXCHANGE IN MERGE SORT DISTR| |100 |420145|
|16| EXCHANGE OUT DISTR (HASH) |:EX20000 |100 |420135|
|17| PX PARTITION ITERATOR | |100 |420135|
|18| MERGE DISTINCT | |100 |420135|
|19| TABLE SCAN |t1(idx_t1_c2)|500000 |309262|
|20| LIMIT | |100 |382827|
|21| PX COORDINATOR MERGE SORT | |100 |382813|
|22| EXCHANGE OUT DISTR |:EX30001 |100 |382804|
|23| LIMIT | |100 |382804|
|24| TOP-N SORT | |100 |382790|
|25| HASH DISTINCT | |101 |382684|
|26| EXCHANGE IN DISTR | |101 |284136|
|27| EXCHANGE OUT DISTR (HASH) |:EX30000 |101 |284127|
|28| HASH DISTINCT | |101 |284127|
|29| PX PARTITION ITERATOR | |300000 |185579|
|30| TABLE SCAN |t2 |300000 |185579|
|0 |LIMIT | |100 |914268|
|1 | MERGE UNION DISTINCT | |100 |914254|
|2 | LIMIT | |100 |531362|
|3 | MERGE UNION DISTINCT | |100 |531348|
|4 | LIMIT | |100 |228 |
|5 | PX COORDINATOR MERGE SORT | |100 |214 |
|6 | EXCHANGE OUT DISTR |:EX10000 |100 |209 |
|7 | LIMIT | |100 |209 |
|8 | TOP-N SORT | |100 |196 |
|9 | PX PARTITION ITERATOR | |100 |88 |
|10| TABLE SCAN |t1 |100 |88 |
|11| LIMIT | |100 |531055|
|12| PX COORDINATOR MERGE SORT | |100 |531041|
|13| EXCHANGE OUT DISTR |:EX20001 |100 |531032|
|14| LIMIT | |100 |531032|
|15| MERGE DISTINCT | |100 |531018|
|16| EXCHANGE IN MERGE SORT DISTR| |100 |420145|
|17| EXCHANGE OUT DISTR (HASH) |:EX20000 |100 |420135|
|18| PX PARTITION ITERATOR | |100 |420135|
|19| MERGE DISTINCT | |100 |420135|
|20| TABLE SCAN |t1(idx_t1_c2)|500000 |309262|
|21| LIMIT | |100 |382827|
|22| PX COORDINATOR MERGE SORT | |100 |382813|
|23| EXCHANGE OUT DISTR |:EX30001 |100 |382804|
|24| LIMIT | |100 |382804|
|25| TOP-N SORT | |100 |382790|
|26| HASH DISTINCT | |101 |382684|
|27| EXCHANGE IN DISTR | |101 |284136|
|28| EXCHANGE OUT DISTR (HASH) |:EX30000 |101 |284127|
|29| HASH DISTINCT | |101 |284127|
|30| PX PARTITION ITERATOR | |300000 |185579|
|31| TABLE SCAN |t2 |300000 |185579|
=========================================================================
Outputs & filters:
@ -2686,44 +2715,45 @@ Outputs & filters:
4 - output([t1.c1]), filter(nil), limit(100), offset(nil)
5 - output([t1.c1]), filter(nil), sort_keys([t1.c1, ASC])
6 - output([t1.c1]), filter(nil), dop=1
7 - output([t1.c1]), filter(nil), sort_keys([t1.c1, ASC]), local merge sort
8 - output([t1.c1]), filter(nil),
force partition granule, asc.
7 - output([t1.c1]), filter(nil), limit(100), offset(nil)
8 - output([t1.c1]), filter(nil), sort_keys([t1.c1, ASC]), topn(100), local merge sort
9 - output([t1.c1]), filter(nil),
force partition granule, asc.
10 - output([t1.c1]), filter(nil),
access([t1.c1]), partitions(p[0-4]),
limit(100), offset(nil),
is_index_back=false,
range_key([t1.c1]), range(MIN ; MAX)always true
10 - output([t1.c2]), filter(nil), limit(100), offset(nil)
11 - output([t1.c2]), filter(nil), sort_keys([t1.c2, ASC])
12 - output([t1.c2]), filter(nil), dop=1
13 - output([t1.c2]), filter(nil), limit(100), offset(nil)
14 - output([t1.c2]), filter(nil),
11 - output([t1.c2]), filter(nil), limit(100), offset(nil)
12 - output([t1.c2]), filter(nil), sort_keys([t1.c2, ASC])
13 - output([t1.c2]), filter(nil), dop=1
14 - output([t1.c2]), filter(nil), limit(100), offset(nil)
15 - output([t1.c2]), filter(nil),
distinct([t1.c2])
15 - output([t1.c2]), filter(nil), sort_keys([t1.c2, ASC]), Local Order
16 - (#keys=1, [t1.c2]), output([t1.c2]), filter(nil), dop=1
17 - output([t1.c2]), filter(nil),
force partition granule, asc.
16 - output([t1.c2]), filter(nil), sort_keys([t1.c2, ASC]), Local Order
17 - (#keys=1, [t1.c2]), output([t1.c2]), filter(nil), dop=1
18 - output([t1.c2]), filter(nil),
distinct([t1.c2])
force partition granule, asc.
19 - output([t1.c2]), filter(nil),
distinct([t1.c2])
20 - output([t1.c2]), filter(nil),
access([t1.c2]), partitions(p[0-4]),
is_index_back=false,
range_key([t1.c2], [t1.c1]), range(MIN,MIN ; MAX,MAX)always true
20 - output([t2.c2]), filter(nil), limit(100), offset(nil)
21 - output([t2.c2]), filter(nil), sort_keys([t2.c2, ASC])
22 - output([t2.c2]), filter(nil), dop=1
23 - output([t2.c2]), filter(nil), limit(100), offset(nil)
24 - output([t2.c2]), filter(nil), sort_keys([t2.c2, ASC]), topn(100)
25 - output([t2.c2]), filter(nil),
distinct([t2.c2])
26 - output([t2.c2]), filter(nil)
27 - (#keys=1, [t2.c2]), output([t2.c2]), filter(nil), dop=1
28 - output([t2.c2]), filter(nil),
21 - output([t2.c2]), filter(nil), limit(100), offset(nil)
22 - output([t2.c2]), filter(nil), sort_keys([t2.c2, ASC])
23 - output([t2.c2]), filter(nil), dop=1
24 - output([t2.c2]), filter(nil), limit(100), offset(nil)
25 - output([t2.c2]), filter(nil), sort_keys([t2.c2, ASC]), topn(100)
26 - output([t2.c2]), filter(nil),
distinct([t2.c2])
27 - output([t2.c2]), filter(nil)
28 - (#keys=1, [t2.c2]), output([t2.c2]), filter(nil), dop=1
29 - output([t2.c2]), filter(nil),
force partition granule, asc.
distinct([t2.c2])
30 - output([t2.c2]), filter(nil),
force partition granule, asc.
31 - output([t2.c2]), filter(nil),
access([t2.c2]), partitions(p[0-2]),
is_index_back=false,
range_key([t2.c1]), range(MIN ; MAX)always true
@ -3130,24 +3160,26 @@ Outputs & filters:
SQL: select distinct 1, 2 from t1;
=====================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
-----------------------------------------------------
|0 |LIMIT | |1 |37 |
|1 | PX COORDINATOR | |1 |36 |
|2 | EXCHANGE OUT DISTR |:EX10000|1 |36 |
|3 | PX PARTITION ITERATOR| |1 |36 |
|4 | TABLE SCAN |t1 |1 |36 |
=====================================================
======================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
------------------------------------------------------
|0 |LIMIT | |1 |37 |
|1 | PX COORDINATOR | |1 |37 |
|2 | EXCHANGE OUT DISTR |:EX10000|1 |37 |
|3 | LIMIT | |1 |37 |
|4 | PX PARTITION ITERATOR| |1 |36 |
|5 | TABLE SCAN |t1 |1 |36 |
======================================================
Outputs & filters:
-------------------------------------
0 - output([?], [?]), filter(nil), limit(1), offset(nil)
1 - output([1]), filter(nil)
2 - output([1]), filter(nil), dop=1
3 - output([1]), filter(nil),
force partition granule, asc.
3 - output([1]), filter(nil), limit(1), offset(nil)
4 - output([1]), filter(nil),
force partition granule, asc.
5 - output([1]), filter(nil),
access([t1.c1]), partitions(p[0-4]),
limit(1), offset(nil),
is_index_back=false,
@ -4845,24 +4877,26 @@ Outputs & filters:
SQL: select @@sql_mode, c1 from t1 limit 1;
=====================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
-----------------------------------------------------
|0 |LIMIT | |1 |37 |
|1 | PX COORDINATOR | |1 |36 |
|2 | EXCHANGE OUT DISTR |:EX10000|1 |36 |
|3 | PX PARTITION ITERATOR| |1 |36 |
|4 | TABLE SCAN |t1 |1 |36 |
=====================================================
======================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
------------------------------------------------------
|0 |LIMIT | |1 |37 |
|1 | PX COORDINATOR | |1 |37 |
|2 | EXCHANGE OUT DISTR |:EX10000|1 |37 |
|3 | LIMIT | |1 |37 |
|4 | PX PARTITION ITERATOR| |1 |36 |
|5 | TABLE SCAN |t1 |1 |36 |
======================================================
Outputs & filters:
-------------------------------------
0 - output([?], [t1.c1]), filter(nil), limit(1), offset(nil)
1 - output([t1.c1]), filter(nil)
2 - output([t1.c1]), filter(nil), dop=1
3 - output([t1.c1]), filter(nil),
force partition granule, asc.
3 - output([t1.c1]), filter(nil), limit(1), offset(nil)
4 - output([t1.c1]), filter(nil),
force partition granule, asc.
5 - output([t1.c1]), filter(nil),
access([t1.c1]), partitions(p[0-4]),
limit(1), offset(nil),
is_index_back=false,
@ -5705,20 +5739,21 @@ Outputs & filters:
SQL: select * from t1 where exists (select * from t2 limit 0);
========================================================
|ID|OPERATOR |NAME |EST. ROWS|COST |
--------------------------------------------------------
|0 |SUBPLAN FILTER | |250000 |425603|
|1 | PX COORDINATOR | |500000 |356592|
|2 | EXCHANGE OUT DISTR |:EX10000|500000 |309262|
|3 | PX PARTITION ITERATOR | |500000 |309262|
|4 | TABLE SCAN |t1 |500000 |309262|
|5 | LIMIT | |0 |0 |
|6 | PX COORDINATOR | |0 |0 |
|7 | EXCHANGE OUT DISTR |:EX20000|0 |0 |
|8 | PX PARTITION ITERATOR| |0 |0 |
|9 | TABLE SCAN |t2 |0 |0 |
========================================================
=========================================================
|ID|OPERATOR |NAME |EST. ROWS|COST |
---------------------------------------------------------
|0 |SUBPLAN FILTER | |250000 |425603|
|1 | PX COORDINATOR | |500000 |356592|
|2 | EXCHANGE OUT DISTR |:EX10000|500000 |309262|
|3 | PX PARTITION ITERATOR | |500000 |309262|
|4 | TABLE SCAN |t1 |500000 |309262|
|5 | LIMIT | |0 |0 |
|6 | PX COORDINATOR | |0 |0 |
|7 | EXCHANGE OUT DISTR |:EX20000|0 |0 |
|8 | LIMIT | |0 |0 |
|9 | PX PARTITION ITERATOR| |0 |0 |
|10| TABLE SCAN |t2 |0 |0 |
=========================================================
Outputs & filters:
-------------------------------------
@ -5735,9 +5770,10 @@ Outputs & filters:
5 - output([1]), filter(nil), limit(0), offset(nil)
6 - output([1]), filter(nil)
7 - output([1]), filter(nil), dop=1
8 - output([1]), filter(nil),
force partition granule, asc.
8 - output([1]), filter(nil), limit(0), offset(nil)
9 - output([1]), filter(nil),
force partition granule, asc.
10 - output([1]), filter(nil),
access([t2.c1]), partitions(p[0-2]),
limit(0), offset(nil),
is_index_back=false,
@ -6329,24 +6365,26 @@ Outputs & filters:
SQL: select c1 from (select c1, c2 from t1 limit 1) t;
=====================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
-----------------------------------------------------
|0 |LIMIT | |1 |37 |
|1 | PX COORDINATOR | |1 |36 |
|2 | EXCHANGE OUT DISTR |:EX10000|1 |36 |
|3 | PX PARTITION ITERATOR| |1 |36 |
|4 | TABLE SCAN |t1 |1 |36 |
=====================================================
======================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
------------------------------------------------------
|0 |LIMIT | |1 |37 |
|1 | PX COORDINATOR | |1 |37 |
|2 | EXCHANGE OUT DISTR |:EX10000|1 |37 |
|3 | LIMIT | |1 |37 |
|4 | PX PARTITION ITERATOR| |1 |36 |
|5 | TABLE SCAN |t1 |1 |36 |
======================================================
Outputs & filters:
-------------------------------------
0 - output([t1.c1]), filter(nil), limit(1), offset(nil)
1 - output([t1.c1]), filter(nil)
2 - output([t1.c1]), filter(nil), dop=1
3 - output([t1.c1]), filter(nil),
force partition granule, asc.
3 - output([t1.c1]), filter(nil), limit(1), offset(nil)
4 - output([t1.c1]), filter(nil),
force partition granule, asc.
5 - output([t1.c1]), filter(nil),
access([t1.c1]), partitions(p[0-4]),
limit(1), offset(nil),
is_index_back=false,
@ -7980,18 +8018,19 @@ SQL: SELECT (select max(t1.c1) from t1) as field from t1 group by field;
|0 |HASH GROUP BY | |1 |809753|
|1 | NESTED-LOOP JOIN CARTESIAN | |500000 |645625|
|2 | SUBPLAN SCAN |VIEW4 |1 |38 |
|3 | SCALAR GROUP BY | |1 |37 |
|3 | SCALAR GROUP BY | |1 |38 |
|4 | SUBPLAN SCAN |VIEW5 |1 |37 |
|5 | LIMIT | |1 |37 |
|6 | PX COORDINATOR MERGE SORT | |1 |37 |
|7 | EXCHANGE OUT DISTR |:EX10000 |1 |37 |
|8 | SORT | |1 |37 |
|9 | PX PARTITION ITERATOR | |1 |36 |
|10| TABLE SCAN |t1(Reverse)|1 |36 |
|11| PX COORDINATOR | |500000 |321898|
|12| EXCHANGE OUT DISTR |:EX20000 |500000 |298233|
|13| PX PARTITION ITERATOR | |500000 |298233|
|14| TABLE SCAN |t1 |500000 |298233|
|8 | LIMIT | |1 |37 |
|9 | TOP-N SORT | |1 |37 |
|10| PX PARTITION ITERATOR | |1 |36 |
|11| TABLE SCAN |t1(Reverse)|1 |36 |
|12| PX COORDINATOR | |500000 |321898|
|13| EXCHANGE OUT DISTR |:EX20000 |500000 |298233|
|14| PX PARTITION ITERATOR | |500000 |298233|
|15| TABLE SCAN |t1 |500000 |298233|
==================================================================
Outputs & filters:
@ -8009,19 +8048,20 @@ Outputs & filters:
5 - output([t1.c1]), filter(nil), limit(1), offset(nil)
6 - output([t1.c1]), filter(nil), sort_keys([t1.c1, DESC])
7 - output([t1.c1]), filter(nil), dop=1
8 - output([t1.c1]), filter(nil), sort_keys([t1.c1, DESC]), local merge sort
9 - output([t1.c1]), filter(nil),
force partition granule, asc.
8 - output([t1.c1]), filter(nil), limit(1), offset(nil)
9 - output([t1.c1]), filter(nil), sort_keys([t1.c1, DESC]), topn(1), local merge sort
10 - output([t1.c1]), filter(nil),
force partition granule, asc.
11 - output([t1.c1]), filter(nil),
access([t1.c1]), partitions(p[0-4]),
limit(1), offset(nil),
is_index_back=false,
range_key([t1.c1]), range(MIN ; MAX)always true
11 - output([1]), filter(nil)
12 - output([1]), filter(nil), dop=1
13 - output([1]), filter(nil),
force partition granule, asc.
12 - output([1]), filter(nil)
13 - output([1]), filter(nil), dop=1
14 - output([1]), filter(nil),
force partition granule, asc.
15 - output([1]), filter(nil),
access([t1.c1]), partitions(p[0-4]),
is_index_back=false,
range_key([t1.c1]), range(MIN ; MAX)always true
@ -8827,20 +8867,21 @@ Outputs & filters:
SQL: select c1 from t1 where (1, 2) in (select distinct t1.c1, t1.c2 from t2 where t1.c1);
=======================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
-------------------------------------------------------
|0 |NESTED-LOOP JOIN CARTESIAN| |1 |53 |
|1 | PX COORDINATOR | |1 |53 |
|2 | EXCHANGE OUT DISTR |:EX10000|1 |53 |
|3 | TABLE GET |t1 |1 |53 |
|4 | SUBPLAN SCAN |VIEW2 |1 |37 |
|5 | LIMIT | |1 |37 |
|6 | PX COORDINATOR | |1 |36 |
|7 | EXCHANGE OUT DISTR |:EX20000|1 |36 |
|8 | PX PARTITION ITERATOR| |1 |36 |
|9 | TABLE SCAN |t2 |1 |36 |
=======================================================
========================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
--------------------------------------------------------
|0 |NESTED-LOOP JOIN CARTESIAN | |1 |53 |
|1 | PX COORDINATOR | |1 |53 |
|2 | EXCHANGE OUT DISTR |:EX10000|1 |53 |
|3 | TABLE GET |t1 |1 |53 |
|4 | SUBPLAN SCAN |VIEW2 |1 |37 |
|5 | LIMIT | |1 |37 |
|6 | PX COORDINATOR | |1 |37 |
|7 | EXCHANGE OUT DISTR |:EX20000|1 |37 |
|8 | LIMIT | |1 |37 |
|9 | PX PARTITION ITERATOR| |1 |36 |
|10| TABLE SCAN |t2 |1 |36 |
========================================================
Outputs & filters:
-------------------------------------
@ -8858,9 +8899,10 @@ Outputs & filters:
5 - output([1]), filter(nil), limit(1), offset(nil)
6 - output([1]), filter(nil)
7 - output([1]), filter(nil), dop=1
8 - output([1]), filter(nil),
force partition granule, asc.
8 - output([1]), filter(nil), limit(1), offset(nil)
9 - output([1]), filter(nil),
force partition granule, asc.
10 - output([1]), filter(nil),
access([t2.c1]), partitions(p[0-2]),
limit(1), offset(nil),
is_index_back=false,
@ -8872,22 +8914,23 @@ Outputs & filters:
SQL: select c2 from t1 where exists (select * from t2 where t1.c1 and t1.c2 limit 1);
==========================================================
|ID|OPERATOR |NAME |EST. ROWS|COST |
----------------------------------------------------------
|0 |NESTED-LOOP JOIN CARTESIAN | |490050 |786189|
|1 | PX COORDINATOR | |490050 |439375|
|2 | EXCHANGE OUT DISTR |:EX10000|490050 |392987|
|3 | PX PARTITION ITERATOR | |490050 |392987|
|4 | TABLE SCAN |t1 |490050 |392987|
|5 | MATERIAL | |1 |37 |
|6 | SUBPLAN SCAN |VIEW2 |1 |37 |
|7 | LIMIT | |1 |37 |
|8 | PX COORDINATOR | |1 |36 |
|9 | EXCHANGE OUT DISTR |:EX20000|1 |36 |
|10| PX PARTITION ITERATOR| |1 |36 |
|11| TABLE SCAN |t2 |1 |36 |
==========================================================
===========================================================
|ID|OPERATOR |NAME |EST. ROWS|COST |
-----------------------------------------------------------
|0 |NESTED-LOOP JOIN CARTESIAN | |490050 |786189|
|1 | PX COORDINATOR | |490050 |439375|
|2 | EXCHANGE OUT DISTR |:EX10000|490050 |392987|
|3 | PX PARTITION ITERATOR | |490050 |392987|
|4 | TABLE SCAN |t1 |490050 |392987|
|5 | MATERIAL | |1 |37 |
|6 | SUBPLAN SCAN |VIEW2 |1 |37 |
|7 | LIMIT | |1 |37 |
|8 | PX COORDINATOR | |1 |37 |
|9 | EXCHANGE OUT DISTR |:EX20000|1 |37 |
|10| LIMIT | |1 |37 |
|11| PX PARTITION ITERATOR| |1 |36 |
|12| TABLE SCAN |t2 |1 |36 |
===========================================================
Outputs & filters:
-------------------------------------
@ -8907,9 +8950,10 @@ Outputs & filters:
7 - output([1]), filter(nil), limit(1), offset(nil)
8 - output([1]), filter(nil)
9 - output([1]), filter(nil), dop=1
10 - output([1]), filter(nil),
force partition granule, asc.
10 - output([1]), filter(nil), limit(1), offset(nil)
11 - output([1]), filter(nil),
force partition granule, asc.
12 - output([1]), filter(nil),
access([t2.c1]), partitions(p[0-2]),
limit(1), offset(nil),
is_index_back=false,
@ -9313,15 +9357,15 @@ SQL: select * from t1, t2 where t1.c1 > any(select t3.c2 from t2,t3 where t2.c1
|8 | EXCHANGE OUT DISTR |:EX20000 |500000 |309262 |
|9 | PX PARTITION ITERATOR | |500000 |309262 |
|10| TABLE SCAN |t1 |500000 |309262 |
|11| MATERIAL | |20000000000|28633829101 |
|12| SUBPLAN SCAN |VIEW2 |20000000000|24958754009 |
|13| NESTED-LOOP JOIN CARTESIAN | |20000000000|22198315287 |
|11| MATERIAL | |20000000000|28633870508 |
|12| SUBPLAN SCAN |VIEW2 |20000000000|24958795416 |
|13| NESTED-LOOP JOIN CARTESIAN | |20000000000|22198356694 |
|14| PX COORDINATOR | |200000 |142652 |
|15| EXCHANGE OUT DISTR |:EX30000 |200000 |123720 |
|16| PX PARTITION ITERATOR | |200000 |123720 |
|17| TABLE SCAN |t3 |200000 |123720 |
|18| MATERIAL | |100000 |8045386168 |
|19| NESTED-LOOP SEMI JOIN | |100000 |8045367793 |
|18| MATERIAL | |100000 |8045427575 |
|19| NESTED-LOOP SEMI JOIN | |100000 |8045409199 |
|20| PX COORDINATOR | |300000 |193161 |
|21| EXCHANGE OUT DISTR |:EX40000 |300000 |178962 |
|22| PX PARTITION ITERATOR | |300000 |178962 |
@ -9336,10 +9380,11 @@ SQL: select * from t1, t2 where t1.c1 > any(select t3.c2 from t2,t3 where t2.c1
|31| MATERIAL | |33334 |26745 |
|32| TABLE SCAN |t4 |33334 |20620 |
|33| LIMIT | |1 |37 |
|34| PX COORDINATOR | |1 |36 |
|35| EXCHANGE OUT DISTR |:EX60000 |1 |36 |
|36| PX PARTITION ITERATOR | |1 |36 |
|37| TABLE SCAN |t2 |1 |36 |
|34| PX COORDINATOR | |1 |37 |
|35| EXCHANGE OUT DISTR |:EX60000 |1 |37 |
|36| LIMIT | |1 |37 |
|37| PX PARTITION ITERATOR | |1 |36 |
|38| TABLE SCAN |t2 |1 |36 |
====================================================================================
Outputs & filters:
@ -9412,9 +9457,10 @@ Outputs & filters:
33 - output([1]), filter(nil), limit(1), offset(nil)
34 - output([1]), filter(nil)
35 - output([1]), filter(nil), dop=1
36 - output([1]), filter(nil),
force partition granule, asc.
36 - output([1]), filter(nil), limit(1), offset(nil)
37 - output([1]), filter(nil),
force partition granule, asc.
38 - output([1]), filter(nil),
access([t2.c1]), partitions(p[0-2]),
limit(1), offset(nil),
is_index_back=false,

View File

@ -206,15 +206,16 @@ SQL: select * from t4 where c1 in (select max(c1) from t1);
---------------------------------------------------------------
|0 |NESTED-LOOP JOIN | |1 |74 |
|1 | SUBPLAN SCAN |VIEW1 |1 |38 |
|2 | SCALAR GROUP BY | |1 |37 |
|2 | SCALAR GROUP BY | |1 |38 |
|3 | SUBPLAN SCAN |VIEW2 |1 |37 |
|4 | LIMIT | |1 |37 |
|5 | PX COORDINATOR MERGE SORT | |1 |37 |
|6 | EXCHANGE OUT DISTR |:EX10000 |1 |37 |
|7 | SORT | |1 |37 |
|8 | PX PARTITION ITERATOR | |1 |36 |
|9 | TABLE SCAN |t1(Reverse)|1 |36 |
|10| TABLE SCAN |t4 |1 |36 |
|7 | LIMIT | |1 |37 |
|8 | TOP-N SORT | |1 |37 |
|9 | PX PARTITION ITERATOR | |1 |36 |
|10| TABLE SCAN |t1(Reverse)|1 |36 |
|11| TABLE SCAN |t4 |1 |36 |
===============================================================
Outputs & filters:
@ -230,12 +231,13 @@ Outputs & filters:
4 - output([t1.c1]), filter(nil), limit(1), offset(nil)
5 - output([t1.c1]), filter(nil), sort_keys([t1.c1, DESC])
6 - output([t1.c1]), filter(nil), dop=1
7 - output([t1.c1]), filter(nil), sort_keys([t1.c1, DESC]), local merge sort
8 - output([t1.c1]), filter(nil)
9 - output([t1.c1]), filter(nil),
7 - output([t1.c1]), filter(nil), limit(1), offset(nil)
8 - output([t1.c1]), filter(nil), sort_keys([t1.c1, DESC]), topn(1), local merge sort
9 - output([t1.c1]), filter(nil)
10 - output([t1.c1]), filter(nil),
access([t1.c1]), partitions(p[0-4]),
limit(1), offset(nil)
10 - output([t4.c1], [t4.c2], [t4.c3]), filter(nil),
11 - output([t4.c1], [t4.c2], [t4.c3]), filter(nil),
access([t4.c1], [t4.c2], [t4.c3]), partitions(p0)
Outline Data:
@ -971,25 +973,27 @@ SQL: select * from t8, (select count(*) from t1 where t1.c1 > any (select t2.c1
|ID|OPERATOR |NAME |EST. ROWS|COST|
----------------------------------------------------------------------
|0 |NESTED-LOOP JOIN CARTESIAN | |100 |498 |
|1 | SUBPLAN SCAN |tt |1 |343 |
|1 | SUBPLAN SCAN |tt |1 |344 |
|2 | SCALAR GROUP BY | |1 |343 |
|3 | PX COORDINATOR | |1 |311 |
|4 | EXCHANGE OUT DISTR |:EX10002|1 |311 |
|5 | MERGE GROUP BY | |1 |311 |
|6 | NESTED-LOOP JOIN | |167 |279 |
|7 | EXCHANGE IN DISTR | |1 |37 |
|8 | EXCHANGE OUT DISTR (BC2HOST) |:EX10001|1 |37 |
|9 | SUBPLAN SCAN |VIEW1 |1 |37 |
|10| SCALAR GROUP BY | |1 |37 |
|3 | PX COORDINATOR | |1 |312 |
|4 | EXCHANGE OUT DISTR |:EX10002|1 |312 |
|5 | MERGE GROUP BY | |1 |312 |
|6 | NESTED-LOOP JOIN | |167 |280 |
|7 | EXCHANGE IN DISTR | |1 |38 |
|8 | EXCHANGE OUT DISTR (BC2HOST) |:EX10001|1 |38 |
|9 | SUBPLAN SCAN |VIEW1 |1 |38 |
|10| SCALAR GROUP BY | |1 |38 |
|11| SUBPLAN SCAN |VIEW2 |1 |37 |
|12| LIMIT | |1 |37 |
|13| EXCHANGE IN MERGE SORT DISTR| |1 |36 |
|14| EXCHANGE OUT DISTR |:EX10000|1 |36 |
|15| PX PARTITION ITERATOR | |1 |36 |
|16| TABLE SCAN |t2 |1 |36 |
|17| PX PARTITION ITERATOR | |167 |134 |
|18| TABLE SCAN |t1 |167 |134 |
|19| TABLE SCAN |t8 |100 |90 |
|13| EXCHANGE IN MERGE SORT DISTR| |1 |37 |
|14| EXCHANGE OUT DISTR |:EX10000|1 |37 |
|15| LIMIT | |1 |37 |
|16| TOP-N SORT | |1 |37 |
|17| PX PARTITION ITERATOR | |1 |36 |
|18| TABLE SCAN |t2 |1 |36 |
|19| PX PARTITION ITERATOR | |167 |134 |
|20| TABLE SCAN |t1 |167 |134 |
|21| TABLE SCAN |t8 |100 |90 |
======================================================================
Outputs & filters:
@ -1015,16 +1019,18 @@ Outputs & filters:
11 - output([VIEW2.c1]), filter(nil),
access([VIEW2.c1])
12 - output([t2.c1]), filter(nil), limit(1), offset(nil)
13 - output([t2.c1]), filter(nil), sort_keys([t2.c1, ASC]), Local Order
13 - output([t2.c1]), filter(nil), sort_keys([t2.c1, ASC])
14 - output([t2.c1]), filter(nil), dop=1
15 - output([t2.c1]), filter(nil)
16 - output([t2.c1]), filter(nil),
15 - output([t2.c1]), filter(nil), limit(1), offset(nil)
16 - output([t2.c1]), filter(nil), sort_keys([t2.c1, ASC]), topn(1), local merge sort
17 - output([t2.c1]), filter(nil)
18 - output([t2.c1]), filter(nil),
access([t2.c1]), partitions(p[0-2]),
limit(1), offset(nil)
17 - output([1]), filter(nil)
18 - output([1]), filter(nil),
19 - output([1]), filter(nil)
20 - output([1]), filter(nil),
access([t1.c1]), partitions(p[0-4])
19 - output([t8.c1], [t8.c2]), filter(nil),
21 - output([t8.c1], [t8.c2]), filter(nil),
access([t8.c1], [t8.c2]), partitions(p0)
Outline Data:

File diff suppressed because it is too large Load Diff

View File

@ -2195,18 +2195,19 @@ SQL: select /*+leading(tt t5) use_nl(t5)*/* from (select max(c1) from t1) tt, t5
---------------------------------------------------------------
|0 |NESTED-LOOP JOIN CARTESIAN | |300 |479 |
|1 | SUBPLAN SCAN |tt |1 |38 |
|2 | SCALAR GROUP BY | |1 |37 |
|2 | SCALAR GROUP BY | |1 |38 |
|3 | SUBPLAN SCAN |VIEW1 |1 |37 |
|4 | LIMIT | |1 |37 |
|5 | PX COORDINATOR MERGE SORT | |1 |37 |
|6 | EXCHANGE OUT DISTR |:EX10000 |1 |37 |
|7 | SORT | |1 |37 |
|8 | PX PARTITION ITERATOR | |1 |36 |
|9 | TABLE SCAN |t1(Reverse)|1 |36 |
|10| PX COORDINATOR | |300 |247 |
|11| EXCHANGE OUT DISTR |:EX20000 |300 |205 |
|12| PX PARTITION ITERATOR | |300 |205 |
|13| TABLE SCAN |t5 |300 |205 |
|7 | LIMIT | |1 |37 |
|8 | TOP-N SORT | |1 |37 |
|9 | PX PARTITION ITERATOR | |1 |36 |
|10| TABLE SCAN |t1(Reverse)|1 |36 |
|11| PX COORDINATOR | |300 |247 |
|12| EXCHANGE OUT DISTR |:EX20000 |300 |205 |
|13| PX PARTITION ITERATOR | |300 |205 |
|14| TABLE SCAN |t5 |300 |205 |
===============================================================
Outputs & filters:
@ -2222,19 +2223,20 @@ Outputs & filters:
4 - output([t1.c1]), filter(nil), limit(1), offset(nil)
5 - output([t1.c1]), filter(nil), sort_keys([t1.c1, DESC])
6 - output([t1.c1]), filter(nil), dop=1
7 - output([t1.c1]), filter(nil), sort_keys([t1.c1, DESC]), local merge sort
8 - output([t1.c1]), filter(nil),
force partition granule, asc.
7 - output([t1.c1]), filter(nil), limit(1), offset(nil)
8 - output([t1.c1]), filter(nil), sort_keys([t1.c1, DESC]), topn(1), local merge sort
9 - output([t1.c1]), filter(nil),
force partition granule, asc.
10 - output([t1.c1]), filter(nil),
access([t1.c1]), partitions(p[0-4]),
limit(1), offset(nil),
is_index_back=false,
range_key([t1.c1]), range(MIN ; MAX)always true
10 - output([t5.c2], [t5.c3], [t5.c1]), filter(nil)
11 - output([t5.c2], [t5.c3], [t5.c1]), filter(nil), dop=1
12 - output([t5.c2], [t5.c3], [t5.c1]), filter(nil),
force partition granule, asc.
11 - output([t5.c2], [t5.c3], [t5.c1]), filter(nil)
12 - output([t5.c2], [t5.c3], [t5.c1]), filter(nil), dop=1
13 - output([t5.c2], [t5.c3], [t5.c1]), filter(nil),
force partition granule, asc.
14 - output([t5.c2], [t5.c3], [t5.c1]), filter(nil),
access([t5.c2], [t5.c3], [t5.c1]), partitions(p[0-2]),
is_index_back=false,
range_key([t5.c2], [t5.c3]), range(MIN,MIN ; MAX,MAX)always true
@ -2274,6 +2276,7 @@ Optimization Info:
t1:table_rows:500, physical_range_rows:500, logical_range_rows:500, index_back_rows:0, output_rows:500, est_method:local_storage, optimization_method=cost_based, avaiable_index_name[t1], pruned_index_name[idx_t1_c2]
@ -2299,14 +2302,15 @@ SQL: select /*+leading(t5 tt) use_nl(tt)*/* from (select max(c1) from t1) tt, t5
|4 | TABLE SCAN |t5 |300 |205 |
|5 | MATERIAL | |1 |38 |
|6 | SUBPLAN SCAN |tt |1 |38 |
|7 | SCALAR GROUP BY | |1 |37 |
|7 | SCALAR GROUP BY | |1 |38 |
|8 | SUBPLAN SCAN |VIEW1 |1 |37 |
|9 | LIMIT | |1 |37 |
|10| PX COORDINATOR MERGE SORT | |1 |37 |
|11| EXCHANGE OUT DISTR |:EX20000 |1 |37 |
|12| SORT | |1 |37 |
|13| PX PARTITION ITERATOR | |1 |36 |
|14| TABLE SCAN |t1(Reverse)|1 |36 |
|12| LIMIT | |1 |37 |
|13| TOP-N SORT | |1 |37 |
|14| PX PARTITION ITERATOR | |1 |36 |
|15| TABLE SCAN |t1(Reverse)|1 |36 |
================================================================
Outputs & filters:
@ -2331,10 +2335,11 @@ Outputs & filters:
9 - output([t1.c1]), filter(nil), limit(1), offset(nil)
10 - output([t1.c1]), filter(nil), sort_keys([t1.c1, DESC])
11 - output([t1.c1]), filter(nil), dop=1
12 - output([t1.c1]), filter(nil), sort_keys([t1.c1, DESC]), local merge sort
13 - output([t1.c1]), filter(nil),
force partition granule, asc.
12 - output([t1.c1]), filter(nil), limit(1), offset(nil)
13 - output([t1.c1]), filter(nil), sort_keys([t1.c1, DESC]), topn(1), local merge sort
14 - output([t1.c1]), filter(nil),
force partition granule, asc.
15 - output([t1.c1]), filter(nil),
access([t1.c1]), partitions(p[0-4]),
limit(1), offset(nil),
is_index_back=false,
@ -2381,6 +2386,7 @@ t5:table_rows:300, physical_range_rows:300, logical_range_rows:300, index_back_r
t1:table_rows:500, physical_range_rows:500, logical_range_rows:500, index_back_rows:0, output_rows:500, est_method:local_storage, optimization_method=cost_based, avaiable_index_name[t1], pruned_index_name[idx_t1_c2]
Parameters
-------------------------------------

View File

@ -301,14 +301,15 @@ SQL: select max(c1) from pt2;
==============================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
--------------------------------------------------------------
|0 |SCALAR GROUP BY | |1 |37 |
|0 |SCALAR GROUP BY | |1 |38 |
|1 | SUBPLAN SCAN |VIEW1 |1 |37 |
|2 | LIMIT | |1 |37 |
|3 | PX COORDINATOR MERGE SORT | |1 |37 |
|4 | EXCHANGE OUT DISTR |:EX10000 |1 |37 |
|5 | SORT | |1 |37 |
|6 | PX PARTITION ITERATOR | |1 |36 |
|7 | TABLE SCAN |pt2(Reverse)|1 |36 |
|5 | LIMIT | |1 |37 |
|6 | TOP-N SORT | |1 |37 |
|7 | PX PARTITION ITERATOR | |1 |36 |
|8 | TABLE SCAN |pt2(Reverse)|1 |36 |
==============================================================
Outputs & filters:
@ -320,9 +321,10 @@ Outputs & filters:
2 - output([pt2.c1]), filter(nil), limit(1), offset(nil)
3 - output([pt2.c1]), filter(nil), sort_keys([pt2.c1, DESC])
4 - output([pt2.c1]), filter(nil), dop=1
5 - output([pt2.c1]), filter(nil), sort_keys([pt2.c1, DESC]), local merge sort
6 - output([pt2.c1]), filter(nil)
7 - output([pt2.c1]), filter(nil),
5 - output([pt2.c1]), filter(nil), limit(1), offset(nil)
6 - output([pt2.c1]), filter(nil), sort_keys([pt2.c1, DESC]), topn(1), local merge sort
7 - output([pt2.c1]), filter(nil)
8 - output([pt2.c1]), filter(nil),
access([pt2.c1]), partitions(p[0-2]),
limit(1), offset(nil)
@ -331,14 +333,15 @@ SQL: select max(c1) as max from pt2;
==============================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
--------------------------------------------------------------
|0 |SCALAR GROUP BY | |1 |37 |
|0 |SCALAR GROUP BY | |1 |38 |
|1 | SUBPLAN SCAN |VIEW1 |1 |37 |
|2 | LIMIT | |1 |37 |
|3 | PX COORDINATOR MERGE SORT | |1 |37 |
|4 | EXCHANGE OUT DISTR |:EX10000 |1 |37 |
|5 | SORT | |1 |37 |
|6 | PX PARTITION ITERATOR | |1 |36 |
|7 | TABLE SCAN |pt2(Reverse)|1 |36 |
|5 | LIMIT | |1 |37 |
|6 | TOP-N SORT | |1 |37 |
|7 | PX PARTITION ITERATOR | |1 |36 |
|8 | TABLE SCAN |pt2(Reverse)|1 |36 |
==============================================================
Outputs & filters:
@ -350,9 +353,10 @@ Outputs & filters:
2 - output([pt2.c1]), filter(nil), limit(1), offset(nil)
3 - output([pt2.c1]), filter(nil), sort_keys([pt2.c1, DESC])
4 - output([pt2.c1]), filter(nil), dop=1
5 - output([pt2.c1]), filter(nil), sort_keys([pt2.c1, DESC]), local merge sort
6 - output([pt2.c1]), filter(nil)
7 - output([pt2.c1]), filter(nil),
5 - output([pt2.c1]), filter(nil), limit(1), offset(nil)
6 - output([pt2.c1]), filter(nil), sort_keys([pt2.c1, DESC]), topn(1), local merge sort
7 - output([pt2.c1]), filter(nil)
8 - output([pt2.c1]), filter(nil),
access([pt2.c1]), partitions(p[0-2]),
limit(1), offset(nil)
@ -361,14 +365,15 @@ SQL: select min(c1) from pt2;
==========================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
----------------------------------------------------------
|0 |SCALAR GROUP BY | |1 |37 |
|0 |SCALAR GROUP BY | |1 |38 |
|1 | SUBPLAN SCAN |VIEW1 |1 |37 |
|2 | LIMIT | |1 |37 |
|3 | PX COORDINATOR MERGE SORT | |1 |37 |
|4 | EXCHANGE OUT DISTR |:EX10000|1 |37 |
|5 | SORT | |1 |37 |
|6 | PX PARTITION ITERATOR | |1 |36 |
|7 | TABLE SCAN |pt2 |1 |36 |
|5 | LIMIT | |1 |37 |
|6 | TOP-N SORT | |1 |37 |
|7 | PX PARTITION ITERATOR | |1 |36 |
|8 | TABLE SCAN |pt2 |1 |36 |
==========================================================
Outputs & filters:
@ -380,9 +385,10 @@ Outputs & filters:
2 - output([pt2.c1]), filter(nil), limit(1), offset(nil)
3 - output([pt2.c1]), filter(nil), sort_keys([pt2.c1, ASC])
4 - output([pt2.c1]), filter(nil), dop=1
5 - output([pt2.c1]), filter(nil), sort_keys([pt2.c1, ASC]), local merge sort
6 - output([pt2.c1]), filter(nil)
7 - output([pt2.c1]), filter(nil),
5 - output([pt2.c1]), filter(nil), limit(1), offset(nil)
6 - output([pt2.c1]), filter(nil), sort_keys([pt2.c1, ASC]), topn(1), local merge sort
7 - output([pt2.c1]), filter(nil)
8 - output([pt2.c1]), filter(nil),
access([pt2.c1]), partitions(p[0-2]),
limit(1), offset(nil)
@ -391,14 +397,15 @@ SQL: select min(c1) as min from pt2;
==========================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
----------------------------------------------------------
|0 |SCALAR GROUP BY | |1 |37 |
|0 |SCALAR GROUP BY | |1 |38 |
|1 | SUBPLAN SCAN |VIEW1 |1 |37 |
|2 | LIMIT | |1 |37 |
|3 | PX COORDINATOR MERGE SORT | |1 |37 |
|4 | EXCHANGE OUT DISTR |:EX10000|1 |37 |
|5 | SORT | |1 |37 |
|6 | PX PARTITION ITERATOR | |1 |36 |
|7 | TABLE SCAN |pt2 |1 |36 |
|5 | LIMIT | |1 |37 |
|6 | TOP-N SORT | |1 |37 |
|7 | PX PARTITION ITERATOR | |1 |36 |
|8 | TABLE SCAN |pt2 |1 |36 |
==========================================================
Outputs & filters:
@ -410,9 +417,10 @@ Outputs & filters:
2 - output([pt2.c1]), filter(nil), limit(1), offset(nil)
3 - output([pt2.c1]), filter(nil), sort_keys([pt2.c1, ASC])
4 - output([pt2.c1]), filter(nil), dop=1
5 - output([pt2.c1]), filter(nil), sort_keys([pt2.c1, ASC]), local merge sort
6 - output([pt2.c1]), filter(nil)
7 - output([pt2.c1]), filter(nil),
5 - output([pt2.c1]), filter(nil), limit(1), offset(nil)
6 - output([pt2.c1]), filter(nil), sort_keys([pt2.c1, ASC]), topn(1), local merge sort
7 - output([pt2.c1]), filter(nil)
8 - output([pt2.c1]), filter(nil),
access([pt2.c1]), partitions(p[0-2]),
limit(1), offset(nil)
@ -421,14 +429,15 @@ SQL: select * from (select max(c1) from pt2) as tmp_table;
==============================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
--------------------------------------------------------------
|0 |SCALAR GROUP BY | |1 |37 |
|0 |SCALAR GROUP BY | |1 |38 |
|1 | SUBPLAN SCAN |VIEW1 |1 |37 |
|2 | LIMIT | |1 |37 |
|3 | PX COORDINATOR MERGE SORT | |1 |37 |
|4 | EXCHANGE OUT DISTR |:EX10000 |1 |37 |
|5 | SORT | |1 |37 |
|6 | PX PARTITION ITERATOR | |1 |36 |
|7 | TABLE SCAN |pt2(Reverse)|1 |36 |
|5 | LIMIT | |1 |37 |
|6 | TOP-N SORT | |1 |37 |
|7 | PX PARTITION ITERATOR | |1 |36 |
|8 | TABLE SCAN |pt2(Reverse)|1 |36 |
==============================================================
Outputs & filters:
@ -440,9 +449,10 @@ Outputs & filters:
2 - output([pt2.c1]), filter(nil), limit(1), offset(nil)
3 - output([pt2.c1]), filter(nil), sort_keys([pt2.c1, DESC])
4 - output([pt2.c1]), filter(nil), dop=1
5 - output([pt2.c1]), filter(nil), sort_keys([pt2.c1, DESC]), local merge sort
6 - output([pt2.c1]), filter(nil)
7 - output([pt2.c1]), filter(nil),
5 - output([pt2.c1]), filter(nil), limit(1), offset(nil)
6 - output([pt2.c1]), filter(nil), sort_keys([pt2.c1, DESC]), topn(1), local merge sort
7 - output([pt2.c1]), filter(nil)
8 - output([pt2.c1]), filter(nil),
access([pt2.c1]), partitions(p[0-2]),
limit(1), offset(nil)
@ -453,15 +463,16 @@ SQL: select * from t2 where t2.c1 > (select max(c1) from pt2);
----------------------------------------------------------------
|0 |NESTED-LOOP JOIN | |34 |102 |
|1 | SUBPLAN SCAN |VIEW1 |1 |38 |
|2 | SCALAR GROUP BY | |1 |37 |
|2 | SCALAR GROUP BY | |1 |38 |
|3 | SUBPLAN SCAN |VIEW2 |1 |37 |
|4 | LIMIT | |1 |37 |
|5 | PX COORDINATOR MERGE SORT | |1 |37 |
|6 | EXCHANGE OUT DISTR |:EX10000 |1 |37 |
|7 | SORT | |1 |37 |
|8 | PX PARTITION ITERATOR | |1 |36 |
|9 | TABLE SCAN |pt2(Reverse)|1 |36 |
|10| TABLE SCAN |t2 |34 |43 |
|7 | LIMIT | |1 |37 |
|8 | TOP-N SORT | |1 |37 |
|9 | PX PARTITION ITERATOR | |1 |36 |
|10| TABLE SCAN |pt2(Reverse)|1 |36 |
|11| TABLE SCAN |t2 |34 |43 |
================================================================
Outputs & filters:
@ -477,12 +488,13 @@ Outputs & filters:
4 - output([pt2.c1]), filter(nil), limit(1), offset(nil)
5 - output([pt2.c1]), filter(nil), sort_keys([pt2.c1, DESC])
6 - output([pt2.c1]), filter(nil), dop=1
7 - output([pt2.c1]), filter(nil), sort_keys([pt2.c1, DESC]), local merge sort
8 - output([pt2.c1]), filter(nil)
9 - output([pt2.c1]), filter(nil),
7 - output([pt2.c1]), filter(nil), limit(1), offset(nil)
8 - output([pt2.c1]), filter(nil), sort_keys([pt2.c1, DESC]), topn(1), local merge sort
9 - output([pt2.c1]), filter(nil)
10 - output([pt2.c1]), filter(nil),
access([pt2.c1]), partitions(p[0-2]),
limit(1), offset(nil)
10 - output([t2.c1], [t2.c2], [t2.c3]), filter(nil),
11 - output([t2.c1], [t2.c2], [t2.c3]), filter(nil),
access([t2.c1], [t2.c2], [t2.c3]), partitions(p0)
*************** Case 20 ***************
@ -490,14 +502,15 @@ SQL: select * from (select min(c1) as min from pt2) as tmp_table;
==========================================================
|ID|OPERATOR |NAME |EST. ROWS|COST|
----------------------------------------------------------
|0 |SCALAR GROUP BY | |1 |37 |
|0 |SCALAR GROUP BY | |1 |38 |
|1 | SUBPLAN SCAN |VIEW1 |1 |37 |
|2 | LIMIT | |1 |37 |
|3 | PX COORDINATOR MERGE SORT | |1 |37 |
|4 | EXCHANGE OUT DISTR |:EX10000|1 |37 |
|5 | SORT | |1 |37 |
|6 | PX PARTITION ITERATOR | |1 |36 |
|7 | TABLE SCAN |pt2 |1 |36 |
|5 | LIMIT | |1 |37 |
|6 | TOP-N SORT | |1 |37 |
|7 | PX PARTITION ITERATOR | |1 |36 |
|8 | TABLE SCAN |pt2 |1 |36 |
==========================================================
Outputs & filters:
@ -509,9 +522,10 @@ Outputs & filters:
2 - output([pt2.c1]), filter(nil), limit(1), offset(nil)
3 - output([pt2.c1]), filter(nil), sort_keys([pt2.c1, ASC])
4 - output([pt2.c1]), filter(nil), dop=1
5 - output([pt2.c1]), filter(nil), sort_keys([pt2.c1, ASC]), local merge sort
6 - output([pt2.c1]), filter(nil)
7 - output([pt2.c1]), filter(nil),
5 - output([pt2.c1]), filter(nil), limit(1), offset(nil)
6 - output([pt2.c1]), filter(nil), sort_keys([pt2.c1, ASC]), topn(1), local merge sort
7 - output([pt2.c1]), filter(nil)
8 - output([pt2.c1]), filter(nil),
access([pt2.c1]), partitions(p[0-2]),
limit(1), offset(nil)
@ -522,15 +536,16 @@ SQL: select * from t2 where t2.c1 > (select min(c1) as min from pt2);
------------------------------------------------------------
|0 |NESTED-LOOP JOIN | |34 |102 |
|1 | SUBPLAN SCAN |VIEW1 |1 |38 |
|2 | SCALAR GROUP BY | |1 |37 |
|2 | SCALAR GROUP BY | |1 |38 |
|3 | SUBPLAN SCAN |VIEW2 |1 |37 |
|4 | LIMIT | |1 |37 |
|5 | PX COORDINATOR MERGE SORT | |1 |37 |
|6 | EXCHANGE OUT DISTR |:EX10000|1 |37 |
|7 | SORT | |1 |37 |
|8 | PX PARTITION ITERATOR | |1 |36 |
|9 | TABLE SCAN |pt2 |1 |36 |
|10| TABLE SCAN |t2 |34 |43 |
|7 | LIMIT | |1 |37 |
|8 | TOP-N SORT | |1 |37 |
|9 | PX PARTITION ITERATOR | |1 |36 |
|10| TABLE SCAN |pt2 |1 |36 |
|11| TABLE SCAN |t2 |34 |43 |
============================================================
Outputs & filters:
@ -546,12 +561,13 @@ Outputs & filters:
4 - output([pt2.c1]), filter(nil), limit(1), offset(nil)
5 - output([pt2.c1]), filter(nil), sort_keys([pt2.c1, ASC])
6 - output([pt2.c1]), filter(nil), dop=1
7 - output([pt2.c1]), filter(nil), sort_keys([pt2.c1, ASC]), local merge sort
8 - output([pt2.c1]), filter(nil)
9 - output([pt2.c1]), filter(nil),
7 - output([pt2.c1]), filter(nil), limit(1), offset(nil)
8 - output([pt2.c1]), filter(nil), sort_keys([pt2.c1, ASC]), topn(1), local merge sort
9 - output([pt2.c1]), filter(nil)
10 - output([pt2.c1]), filter(nil),
access([pt2.c1]), partitions(p[0-2]),
limit(1), offset(nil)
10 - output([t2.c1], [t2.c2], [t2.c3]), filter(nil),
11 - output([t2.c1], [t2.c2], [t2.c3]), filter(nil),
access([t2.c1], [t2.c2], [t2.c3]), partitions(p0)
*************** Case 22 ***************