diff --git a/cmake/RPM.cmake b/cmake/RPM.cmake index d84be2668..4b11c9c44 100644 --- a/cmake/RPM.cmake +++ b/cmake/RPM.cmake @@ -114,6 +114,8 @@ endif() install(FILES src/sql/fill_help_tables-ob.sql + src/share/parameter/default_parameter.json + src/share/system_variable/default_system_variable.json tools/timezone_V1.log tools/default_srs_data_mysql.sql tools/upgrade/upgrade_pre.py diff --git a/src/share/parameter/default_parameter.json b/src/share/parameter/default_parameter.json new file mode 100644 index 000000000..da41ad10c --- /dev/null +++ b/src/share/parameter/default_parameter.json @@ -0,0 +1,108 @@ +[ + { + "scenario": "express_oltp", + "comment" : "for workloads like trade, payment core system, internet high throughput application, etc. no restrictions like foreign key, no stored procedure, no long transaction, no large transaction, no complex join, no complex subquery", + "parameters": { + "cluster": [ + + ], + "tenant": [ + { + "name":"_rowsets_max_rows", + "value": 1, + "comment":"for simple OLTP workloads, rowset = 1 is most effective." + } + ] + } + }, + { + "scenario": "complex_oltp", + "comment" : "for workloads like bank, insurance system. they often have complex join, complex correlated subquery, batch jobs written in PL, have both long and large transactions. Sometimes use parallel execution for short running queries", + "parameters": { + "cluster": [ + { + "name":"large_query_threshold", + "value":"600s", + "comment":"for complex OLTP scenario, some query will run for very long time." + } + ], + "tenant": [ + { + "name":"_rowsets_max_rows", + "value": 4, + "comment":"for complex OLTP workloads, rowset = 4 is most effective." + } + ] + } + }, + { + "scenario": "olap", + "comment": "for real-time data warehouse analytics scenarios.", + "parameters": { + "cluster": [ + { + "name": "enable_record_trace_log", + "value": false, + "comment": "disable trace log for better AP performance" + }, + { + "name":"trace_log_slow_query_watermark", + "value":"7d", + "comment":"7 days. no 'slow query' concept for AP query" + }, + { + "name":"large_query_threshold", + "value":"0ms", + "comment":"disable large query detection for AP query" + } + ], + "tenant": [ + { + "name":"default_table_store_format", + "value":"column", + "comment":"default to column format for AP" + }, + { + "name":"_rowsets_max_rows", + "value": 256, + "comment":"for classic OLAP workloads, rowset 256 is adequate" + } + ] + } + }, + { + "scenario": "kv", + "comment": "for key-value workloads and hbase-like wide-column workloads, which commonly experience very high throughput and are sensitive to latency", + "parameters": { + "cluster": [ + { + "name":"large_query_threshold", + "value":"0ms", + "comment":"disable large query detection for KV mode" + } + ], + "tenant": [ + ] + } + }, + { + "scenario": "htap", + "comment": "for mixed OLAP and OLTP workload. Typically utilized for obtaining instant insights from active operational data, fraud detection, and personalized recommendations", + "parameters": { + "cluster": [ + { + "name":"large_query_threshold", + "value":"600s", + "comment":"AP query exist in HTAP workload, we need it running fast too." + } + ], + "tenant": [ + { + "name":"_rowsets_max_rows", + "value": 32, + "comment":"for classic HTAP workloads, rowset 32 is tradeoff" + } + ] + } + } +] diff --git a/src/share/system_variable/default_system_variable.json b/src/share/system_variable/default_system_variable.json new file mode 100644 index 000000000..cd7ac2d61 --- /dev/null +++ b/src/share/system_variable/default_system_variable.json @@ -0,0 +1,62 @@ +[ + { + "scenario": "express_oltp", + "comment" : "for workloads like trade, payment core system, internet high throughput application, etc. no restrictions like foreign key, no stored procedure, no long transaction, no large transaction, no complex join, no complex subquery", + "variables": { + "tenant": [ + ] + } + }, + { + "scenario": "complex_oltp", + "comment" : "for workloads like bank, insurance system. they often have complex join, complex correlated subquery, batch jobs written in PL, have both long and large transactions. Sometimes use parallel execution for short running queries", + "variables": { + "tenant": [ + ] + } + }, + { + "scenario": "olap", + "comment": "for real-time data warehouse analytics scenarios.", + "variables": { + "tenant": [ + { + "name": "ob_query_timeout", + "value": 604800000000, + "comment":"query timeout for AP is 7 days" + }, + { + "name": "ob_trx_timeout", + "value": 604800000000, + "comment":"transaction timeout for AP is 7 days" + }, + { + "name": "parallel_min_scan_time_threshold", + "value": 10, + "comment":"10ms. enable best parallel performance for query which require 100ms+ only" + }, + { + "name": "ob_sql_work_area_percentage", + "value": 30, + "comment":"larger sql work area can save spill cost" + } + ] + } + }, + { + "scenario": "kv", + "comment": "for key-value workloads and hbase-like wide-column workloads, which commonly experience very high throughput and are sensitive to latency", + "variables": { + "tenant": [ + ] + } + }, + { + "scenario": "htap", + "comment": "for mixed OLAP and OLTP workload. Typically utilized for obtaining instant insights from active operational data, fraud detection, and personalized recommendations", + "variables": { + "tenant": [ + ] + } + } +]