From 65fe7f65c191edaef9be601d0ac356e121a3bcfd Mon Sep 17 00:00:00 2001 From: morningman Date: Mon, 1 Oct 2018 19:58:41 +0800 Subject: [PATCH] Fixed: privilege logic error: 1. No one can set root password expect for root user itself 2. NODE_PRIV cannot be granted. 3. ADMIN_PRIV and GRANT_PRIV can only be granted or revoked on *.* 4. No one can modifly privs of default role 'operator' and 'admin'. 5. No user can be granted to role 'operator'. Fixed: the running load limit should not be applied to replay logic. It will cause replay or loading image fail. Changed: optimize the problem of too many directories under mini load directory. Fixed: missing password and auth check when handling mini load request in Frontend. Fixed: DomainResolver should start after Frontends transfer to a certain ROLE, not in Catalog construction methods. Fixed: a stupid bug that no one can set password for root user... fix it: only root user can set password for root. Fixed: read null data twice When reading data with a null value, in some cases, the same data will be read twice by the storage engine, resulting in a wrong result.The reason for this problem is that when splitting, and the start key is the minimum value, the data with null is read. Fixed: add a flag to prevent DomainResovler thread start twice. Fixed: fixed a mem leak of using ByteBuf when parsing auth info of http request. Fixed: add a new config 'disable_hadoop_load', default is false, set to true to disable hadoop load. Changed: add detail error msg of submitting hadoop load job in show load result. Fixed: Backend process should be crashed if failed to saving header. Added: exposure backend info to user when encounter error on Backend. for debugging it more convenient. Fixed: Should remove fd from map when inputstream or outputstream is closed in Broker process. Fixed: Change all files' LF to unix format. Internal commit id: merge from dfcd0aca18eed9ff99d188eb3d01c60d419be1b8 --- be/CMakeLists.txt | 2 +- be/src/exec/hash_table.cpp | 1566 ++++++++--------- be/src/exec/hash_table.h | 836 ++++----- be/src/exec/hash_table.hpp | 312 ++-- be/src/exec/olap_common.h | 58 +- be/src/exec/olap_meta_reader.cpp | 102 +- be/src/exec/olap_meta_reader.h | 32 +- be/src/exec/olap_scan_node.cpp | 17 +- be/src/exec/olap_scanner.cpp | 21 +- be/src/exec/olap_utils.h | 9 +- be/src/http/action/mini_load.cpp | 6 + be/src/http/http_request.cpp | 5 + be/src/http/http_request.h | 2 + be/src/olap/CMakeLists.txt | 1 - be/src/olap/olap_reader.cpp | 103 -- be/src/olap/olap_reader.h | 42 - be/src/olap/olap_table.cpp | 78 +- be/src/olap/olap_table.h | 15 +- be/src/olap/push_handler.cpp | 16 +- be/src/olap/reader.cpp | 24 +- be/src/olap/reader.h | 12 +- be/src/olap/row_cursor.cpp | 40 +- be/src/olap/row_cursor.h | 5 +- be/src/olap/tuple.h | 79 + be/src/runtime/data_spliter.cpp | 4 +- be/src/runtime/export_sink.cpp | 125 +- be/src/runtime/load_path_mgr.cpp | 56 +- be/src/runtime/load_path_mgr.h | 2 + be/test/exec/hash_table_test.cpp | 678 +++---- be/test/exec/mysql_scan_node_test.cpp | 554 +++--- be/test/exec/mysql_scanner_test.cpp | 220 +-- be/test/exec/schema_scan_node_test.cpp | 450 ++--- .../schema_authors_scanner_test.cpp | 182 +- .../schema_charsets_scanner_test.cpp | 182 +- .../schema_collations_scanner_test.cpp | 182 +- .../schema_columns_scanner_test.cpp | 396 ++--- .../schema_create_table_scanner_test.cpp | 396 ++--- .../schema_engines_scanner_test.cpp | 182 +- .../schema_open_tables_scanner_test.cpp | 396 ++--- .../schema_schemata_scanner_test.cpp | 328 ++-- .../schema_table_names_scanner_test.cpp | 362 ++-- .../schema_tables_scanner_test.cpp | 396 ++--- .../schema_variables_scanner_test.cpp | 184 +- be/test/exec/schema_scanner_test.cpp | 204 +-- be/test/exec/set_executor_test.cpp | 220 +-- build.sh | 2 +- docs/help/Contents/Account Management/help.md | 8 +- .../Data Manipulation/manipulation_stmt.md | 10 + docs/help/Contents/Utility/util_stmt.md | 24 +- .../palo/analysis/AddFollowerClause.java | 34 +- .../palo/analysis/AddObserverClause.java | 34 +- .../palo/analysis/AlterDatabaseQuotaStmt.java | 78 +- .../palo/analysis/AlterDatabaseRename.java | 88 +- .../baidu/palo/analysis/AlterUserClause.java | 208 +-- .../baidu/palo/analysis/AlterUserType.java | 22 +- .../baidu/palo/analysis/BackendClause.java | 94 +- .../analysis/BuiltinAggregateFunction.java | 264 +-- .../palo/analysis/CancelAlterTableStmt.java | 104 +- .../baidu/palo/analysis/CreateRoleStmt.java | 2 +- .../baidu/palo/analysis/CreateUserStmt.java | 2 +- fe/src/com/baidu/palo/analysis/DdlStmt.java | 16 +- .../palo/analysis/DropBackendClause.java | 46 +- .../palo/analysis/DropFollowerClause.java | 34 +- .../palo/analysis/DropObserverClause.java | 34 +- .../com/baidu/palo/analysis/DropRoleStmt.java | 2 +- .../baidu/palo/analysis/ExistsPredicate.java | 112 +- .../baidu/palo/analysis/FrontendClause.java | 88 +- .../com/baidu/palo/analysis/FunctionArgs.java | 62 +- fe/src/com/baidu/palo/analysis/GrantStmt.java | 21 +- fe/src/com/baidu/palo/analysis/HdfsURI.java | 132 +- .../baidu/palo/analysis/RedirectStatus.java | 72 +- .../com/baidu/palo/analysis/RevokeStmt.java | 20 +- .../com/baidu/palo/analysis/SetPassVar.java | 9 +- .../palo/analysis/ShowPartitionsStmt.java | 192 +- .../palo/analysis/ShowWhiteListStmt.java | 44 +- .../analysis/SingleRangePartitionDesc.java | 402 ++--- fe/src/com/baidu/palo/analysis/Subquery.java | 334 ++-- fe/src/com/baidu/palo/analysis/SyncStmt.java | 34 +- .../baidu/palo/catalog/AccessPrivilege.java | 15 +- fe/src/com/baidu/palo/catalog/Catalog.java | 5 +- fe/src/com/baidu/palo/catalog/ColumnType.java | 666 +++---- .../baidu/palo/catalog/DistributionInfo.java | 188 +- .../baidu/palo/catalog/DomainResolver.java | 12 +- fe/src/com/baidu/palo/catalog/Function.java | 828 ++++----- .../palo/catalog/HashDistributionInfo.java | 250 +-- fe/src/com/baidu/palo/catalog/KeysType.java | 82 +- .../baidu/palo/catalog/MaterializedIndex.java | 404 ++--- fe/src/com/baidu/palo/catalog/MysqlTable.java | 420 ++--- fe/src/com/baidu/palo/catalog/Partition.java | 474 ++--- .../com/baidu/palo/catalog/PartitionInfo.java | 248 +-- .../com/baidu/palo/catalog/PartitionKey.java | 602 +++---- .../palo/catalog/RandomDistributionInfo.java | 150 +- fe/src/com/baidu/palo/catalog/Tablet.java | 496 +++--- fe/src/com/baidu/palo/catalog/Uda.java | 210 +-- fe/src/com/baidu/palo/catalog/Udf.java | 96 +- .../com/baidu/palo/common/AliasGenerator.java | 70 +- .../palo/common/AuthorizationException.java | 28 +- .../palo/common/ColumnAliasGenerator.java | 32 +- fe/src/com/baidu/palo/common/Config.java | 20 + .../com/baidu/palo/common/ConfigWatcher.java | 196 +-- .../com/baidu/palo/common/FeNameFormat.java | 8 +- fe/src/com/baidu/palo/common/GenericPool.java | 266 +-- fe/src/com/baidu/palo/common/Status.java | 2 +- .../palo/common/TableAliasGenerator.java | 44 +- .../palo/common/proc/BackendProcNode.java | 132 +- .../baidu/palo/common/proc/CloneProcNode.java | 96 +- .../proc/CurrentQueryFragmentProcNode.java | 6 +- .../palo/common/proc/IndexInfoProcDir.java | 254 +-- .../palo/common/proc/IndexSchemaProcNode.java | 128 +- .../palo/common/proc/IndicesProcDir.java | 220 +-- .../baidu/palo/common/proc/JobsDbProcDir.java | 150 +- .../baidu/palo/common/proc/LoadProcDir.java | 156 +- .../palo/common/proc/PartitionsProcDir.java | 412 ++--- .../baidu/palo/common/proc/RollupProcDir.java | 156 +- .../common/proc/SchemaChangeProcNode.java | 78 +- .../palo/common/proc/StatisticProcDir.java | 360 ++-- .../baidu/palo/common/proc/TableProcDir.java | 152 +- .../common/proc/UserPropertyProcNode.java | 48 +- .../baidu/palo/common/util/CommandResult.java | 118 +- .../com/baidu/palo/common/util/Counter.java | 60 +- .../palo/common/util/ListComparator.java | 122 +- .../palo/common/util/ProfileManager.java | 298 ++-- .../palo/common/util/RuntimeProfile.java | 654 +++---- .../com/baidu/palo/common/util/TimeUtils.java | 302 ++-- fe/src/com/baidu/palo/common/util/Util.java | 530 +++--- fe/src/com/baidu/palo/ha/BDBHA.java | 340 ++-- .../baidu/palo/ha/BDBStateChangeListener.java | 176 +- .../com/baidu/palo/ha/FrontendNodeType.java | 18 +- fe/src/com/baidu/palo/ha/HAProtocol.java | 72 +- fe/src/com/baidu/palo/ha/MasterInfo.java | 106 +- fe/src/com/baidu/palo/http/BaseAction.java | 10 +- .../com/baidu/palo/http/rest/LoadAction.java | 9 +- fe/src/com/baidu/palo/journal/Journal.java | 86 +- .../com/baidu/palo/journal/JournalCursor.java | 22 +- .../palo/journal/bdbje/BDBEnvironment.java | 616 +++---- .../palo/journal/bdbje/BDBJournalCursor.java | 226 +-- .../baidu/palo/journal/bdbje/Timestamp.java | 68 +- .../palo/journal/local/LocalJournal.java | 346 ++-- .../journal/local/LocalJournalCursor.java | 672 +++---- fe/src/com/baidu/palo/load/DppScheduler.java | 43 +- fe/src/com/baidu/palo/load/EtlStatus.java | 340 ++-- fe/src/com/baidu/palo/load/FailMsg.java | 160 +- fe/src/com/baidu/palo/load/Load.java | 10 +- fe/src/com/baidu/palo/load/LoadChecker.java | 2 +- .../baidu/palo/load/PartitionLoadInfo.java | 266 +-- fe/src/com/baidu/palo/load/Source.java | 550 +++--- .../com/baidu/palo/load/TabletLoadInfo.java | 212 +-- fe/src/com/baidu/palo/master/Checkpoint.java | 1 + fe/src/com/baidu/palo/master/MetaHelper.java | 164 +- .../palo/mysql/privilege/PrivBitSet.java | 8 +- fe/src/com/baidu/palo/persist/CloneInfo.java | 290 +-- .../baidu/palo/persist/CreateTableInfo.java | 150 +- .../com/baidu/palo/persist/DatabaseInfo.java | 188 +- fe/src/com/baidu/palo/persist/DropInfo.java | 160 +- .../palo/persist/EditLogInputStream.java | 52 +- .../palo/persist/EditLogOutputStream.java | 172 +- .../palo/persist/ReplicaPersistInfo.java | 494 +++--- .../com/baidu/palo/persist/StorageInfo.java | 90 +- fe/src/com/baidu/palo/qe/DdlExecutor.java | 5 + .../com/baidu/palo/qe/JournalObservable.java | 190 +- fe/src/com/baidu/palo/qe/JournalObserver.java | 140 +- .../com/baidu/palo/qe/MasterOpExecutor.java | 202 +-- fe/src/com/baidu/palo/qe/SimpleScheduler.java | 408 ++--- .../palo/service/FrontendServiceImpl.java | 33 +- .../com/baidu/palo/system/BackendEvent.java | 86 +- .../baidu/palo/system/SystemInfoObserver.java | 64 +- .../com/baidu/palo/task/AgentBatchTask.java | 406 ++--- fe/src/com/baidu/palo/task/AgentTask.java | 170 +- .../baidu/palo/task/AgentTaskExecutor.java | 32 +- .../com/baidu/palo/task/AgentTaskQueue.java | 390 ++-- .../com/baidu/palo/task/CancelDeleteTask.java | 70 +- fe/src/com/baidu/palo/task/CloneTask.java | 52 +- .../baidu/palo/task/CreateReplicaTask.java | 190 +- .../com/baidu/palo/task/CreateRollupTask.java | 236 +-- .../com/baidu/palo/task/DropReplicaTask.java | 48 +- .../baidu/palo/task/HadoopLoadEtlTask.java | 194 +- fe/src/com/baidu/palo/task/LoadEtlTask.java | 672 +++---- .../com/baidu/palo/task/LoadPendingTask.java | 184 +- fe/src/com/baidu/palo/task/MasterTask.java | 58 +- .../baidu/palo/task/MasterTaskExecutor.java | 158 +- fe/src/com/baidu/palo/task/PushTask.java | 314 ++-- .../com/baidu/palo/task/SchemaChangeTask.java | 206 +-- .../baidu/palo/analysis/BackendStmtTest.java | 148 +- .../palo/analysis/CancelAlterStmtTest.java | 82 +- .../baidu/palo/analysis/DeleteStmtTest.java | 254 +-- .../palo/analysis/LiteralExprCompareTest.java | 600 +++---- .../baidu/palo/analysis/ShowDataStmtTest.java | 48 +- .../com/baidu/palo/catalog/BackendTest.java | 290 +-- .../com/baidu/palo/catalog/CatalogTest.java | 24 +- .../baidu/palo/catalog/ColumnStatsTest.java | 140 +- .../com/baidu/palo/catalog/ColumnTest.java | 222 +-- .../com/baidu/palo/catalog/DatabaseTest.java | 314 ++-- .../palo/catalog/MaterializedIndexTest.java | 158 +- .../baidu/palo/catalog/PartitionKeyTest.java | 346 ++-- .../palo/catalog/RangePartitionInfoTest.java | 246 +-- .../com/baidu/palo/catalog/ReplicaTest.java | 270 +-- fe/test/com/baidu/palo/catalog/TableTest.java | 162 +- .../com/baidu/palo/catalog/TabletTest.java | 272 +-- .../palo/cluster/SystemInfoServiceTest.java | 440 ++--- .../palo/common/proc/BackendProcNodeTest.java | 116 +- .../palo/common/proc/BackendsProcDirTest.java | 262 +-- .../baidu/palo/common/util/DebugUtilTest.java | 136 +- .../palo/common/util/ListComparatorTest.java | 338 ++-- .../palo/common/util/RuntimeProfileTest.java | 278 +-- .../baidu/palo/common/util/TimeUtilsTest.java | 246 +-- .../palo/persist/CreateTableInfoTest.java | 200 +-- .../com/baidu/palo/persist/DropInfoTest.java | 102 +- .../com/baidu/palo/persist/EditLogTest.java | 176 +- .../palo/persist/ReplicaPersistInfoTest.java | 122 +- .../baidu/palo/persist/StorageInfoTest.java | 54 +- .../baidu/palo/planner/OlapScanNodeTest.java | 290 +-- .../baidu/palo/qe/JournalObservableTest.java | 264 +-- .../com/baidu/palo/qe/ShowExecutorTest.java | 4 +- .../baidu/palo/qe/SimpleSchedulerTest.java | 326 ++-- .../com/baidu/palo/task/AgentTaskTest.java | 468 ++--- fs_brokers/apache_hdfs_broker/build.sh | 4 +- fs_brokers/apache_hdfs_broker/deps/build.sh | 2 +- .../broker/hdfs/ClientContextManager.java | 4 +- gensrc/script/gen_build_version.sh | 2 +- gensrc/thrift/PaloInternalService.thrift | 35 - 220 files changed, 20004 insertions(+), 20022 deletions(-) delete mode 100644 be/src/olap/olap_reader.cpp delete mode 100644 be/src/olap/olap_reader.h create mode 100644 be/src/olap/tuple.h diff --git a/be/CMakeLists.txt b/be/CMakeLists.txt index d7e8092a47..768ddd1f6a 100644 --- a/be/CMakeLists.txt +++ b/be/CMakeLists.txt @@ -507,7 +507,7 @@ else() endif() set(PALO_LINK_LIBS ${PALO_LINK_LIBS} - -lrt -lbfd -liberty -lc -lm -ldl -pthread + -lrt -lbfd -liberty -lc -lm -ldl -pthread -lz ) # Set libraries for test diff --git a/be/src/exec/hash_table.cpp b/be/src/exec/hash_table.cpp index 2686962fcb..3bc63be2b9 100644 --- a/be/src/exec/hash_table.cpp +++ b/be/src/exec/hash_table.cpp @@ -18,253 +18,253 @@ // specific language governing permissions and limitations // under the License. -#include "exec/hash_table.hpp" - -#include "codegen/codegen_anyval.h" -#include "codegen/llvm_codegen.h" - -#include "exprs/expr.h" -#include "runtime/raw_value.h" -#include "runtime/string_value.hpp" -#include "runtime/mem_tracker.h" -#include "runtime/runtime_state.h" -#include "util/debug_util.h" -#include "util/palo_metrics.h" - -using llvm::BasicBlock; -using llvm::Value; -using llvm::Function; -using llvm::Type; -using llvm::PointerType; -using llvm::LLVMContext; -using llvm::PHINode; - -namespace palo { - -const float HashTable::MAX_BUCKET_OCCUPANCY_FRACTION = 0.75f; -const char* HashTable::_s_llvm_class_name = "class.palo::HashTable"; - -HashTable::HashTable(const vector& build_expr_ctxs, - const vector& probe_expr_ctxs, - int num_build_tuples, bool stores_nulls, int32_t initial_seed, - MemTracker* mem_tracker, int64_t num_buckets) : - _build_expr_ctxs(build_expr_ctxs), - _probe_expr_ctxs(probe_expr_ctxs), - _num_build_tuples(num_build_tuples), - _stores_nulls(stores_nulls), - _initial_seed(initial_seed), - _node_byte_size(sizeof(Node) + sizeof(Tuple*) * _num_build_tuples), - _num_filled_buckets(0), - _nodes(NULL), - _num_nodes(0), - _exceeded_limit(false), - _mem_tracker(mem_tracker), - _mem_limit_exceeded(false) { - DCHECK(mem_tracker != NULL); - DCHECK_EQ(_build_expr_ctxs.size(), _probe_expr_ctxs.size()); - - DCHECK_EQ((num_buckets & (num_buckets - 1)), 0) << "num_buckets must be a power of 2"; - _buckets.resize(num_buckets); - _num_buckets = num_buckets; - _num_buckets_till_resize = MAX_BUCKET_OCCUPANCY_FRACTION * _num_buckets; - _mem_tracker->consume(_buckets.capacity() * sizeof(Bucket)); - - // Compute the layout and buffer size to store the evaluated expr results - _results_buffer_size = Expr::compute_results_layout(_build_expr_ctxs, - &_expr_values_buffer_offsets, &_var_result_begin); - _expr_values_buffer = new uint8_t[_results_buffer_size]; - memset(_expr_values_buffer, 0, sizeof(uint8_t) * _results_buffer_size); - _expr_value_null_bits = new uint8_t[_build_expr_ctxs.size()]; - - _nodes_capacity = 1024; +#include "exec/hash_table.hpp" + +#include "codegen/codegen_anyval.h" +#include "codegen/llvm_codegen.h" + +#include "exprs/expr.h" +#include "runtime/raw_value.h" +#include "runtime/string_value.hpp" +#include "runtime/mem_tracker.h" +#include "runtime/runtime_state.h" +#include "util/debug_util.h" +#include "util/palo_metrics.h" + +using llvm::BasicBlock; +using llvm::Value; +using llvm::Function; +using llvm::Type; +using llvm::PointerType; +using llvm::LLVMContext; +using llvm::PHINode; + +namespace palo { + +const float HashTable::MAX_BUCKET_OCCUPANCY_FRACTION = 0.75f; +const char* HashTable::_s_llvm_class_name = "class.palo::HashTable"; + +HashTable::HashTable(const vector& build_expr_ctxs, + const vector& probe_expr_ctxs, + int num_build_tuples, bool stores_nulls, int32_t initial_seed, + MemTracker* mem_tracker, int64_t num_buckets) : + _build_expr_ctxs(build_expr_ctxs), + _probe_expr_ctxs(probe_expr_ctxs), + _num_build_tuples(num_build_tuples), + _stores_nulls(stores_nulls), + _initial_seed(initial_seed), + _node_byte_size(sizeof(Node) + sizeof(Tuple*) * _num_build_tuples), + _num_filled_buckets(0), + _nodes(NULL), + _num_nodes(0), + _exceeded_limit(false), + _mem_tracker(mem_tracker), + _mem_limit_exceeded(false) { + DCHECK(mem_tracker != NULL); + DCHECK_EQ(_build_expr_ctxs.size(), _probe_expr_ctxs.size()); + + DCHECK_EQ((num_buckets & (num_buckets - 1)), 0) << "num_buckets must be a power of 2"; + _buckets.resize(num_buckets); + _num_buckets = num_buckets; + _num_buckets_till_resize = MAX_BUCKET_OCCUPANCY_FRACTION * _num_buckets; + _mem_tracker->consume(_buckets.capacity() * sizeof(Bucket)); + + // Compute the layout and buffer size to store the evaluated expr results + _results_buffer_size = Expr::compute_results_layout(_build_expr_ctxs, + &_expr_values_buffer_offsets, &_var_result_begin); + _expr_values_buffer = new uint8_t[_results_buffer_size]; + memset(_expr_values_buffer, 0, sizeof(uint8_t) * _results_buffer_size); + _expr_value_null_bits = new uint8_t[_build_expr_ctxs.size()]; + + _nodes_capacity = 1024; _nodes = reinterpret_cast(malloc(_nodes_capacity * _node_byte_size)); memset(_nodes, 0, _nodes_capacity * _node_byte_size); #if 0 - if (PaloMetrics::hash_table_total_bytes() != NULL) { - PaloMetrics::hash_table_total_bytes()->increment(_nodes_capacity * _node_byte_size); - } + if (PaloMetrics::hash_table_total_bytes() != NULL) { + PaloMetrics::hash_table_total_bytes()->increment(_nodes_capacity * _node_byte_size); + } #endif - _mem_tracker->consume(_nodes_capacity * _node_byte_size); - if (_mem_tracker->limit_exceeded()) { - mem_limit_exceeded(_nodes_capacity * _node_byte_size); - } -} - -HashTable::~HashTable() { -} - -void HashTable::close() { - // TODO: use tr1::array? - delete[] _expr_values_buffer; - delete[] _expr_value_null_bits; - free(_nodes); + _mem_tracker->consume(_nodes_capacity * _node_byte_size); + if (_mem_tracker->limit_exceeded()) { + mem_limit_exceeded(_nodes_capacity * _node_byte_size); + } +} + +HashTable::~HashTable() { +} + +void HashTable::close() { + // TODO: use tr1::array? + delete[] _expr_values_buffer; + delete[] _expr_value_null_bits; + free(_nodes); #if 0 - if (PaloMetrics::hash_table_total_bytes() != NULL) { - PaloMetrics::hash_table_total_bytes()->increment(-_nodes_capacity * _node_byte_size); - } + if (PaloMetrics::hash_table_total_bytes() != NULL) { + PaloMetrics::hash_table_total_bytes()->increment(-_nodes_capacity * _node_byte_size); + } #endif - _mem_tracker->release(_nodes_capacity * _node_byte_size); - _mem_tracker->release(_buckets.size() * sizeof(Bucket)); -} - -bool HashTable::eval_row(TupleRow* row, const vector& ctxs) { - // Put a non-zero constant in the result location for NULL. - // We don't want(NULL, 1) to hash to the same as (0, 1). - // This needs to be as big as the biggest primitive type since the bytes - // get copied directly. - - // the 10 is experience value which need bigger than sizeof(Decimal)/sizeof(int64). - // for if slot is null, we need copy the null value to all type. - static int64_t null_value[10] = {HashUtil::FNV_SEED, HashUtil::FNV_SEED, 0}; - bool has_null = false; - - for (int i = 0; i < ctxs.size(); ++i) { - void* loc = _expr_values_buffer + _expr_values_buffer_offsets[i]; - void* val = ctxs[i]->get_value(row); - - if (val == NULL) { - // If the table doesn't store nulls, no reason to keep evaluating - if (!_stores_nulls) { - return true; - } - - _expr_value_null_bits[i] = true; - val = &null_value; - has_null = true; - } else { - _expr_value_null_bits[i] = false; - } - - RawValue::write(val, loc, _build_expr_ctxs[i]->root()->type(), NULL); - } - - return has_null; -} - -uint32_t HashTable::hash_variable_len_row() { - uint32_t hash = _initial_seed; - // Hash the non-var length portions (if there are any) - if (_var_result_begin != 0) { - hash = HashUtil::hash(_expr_values_buffer, _var_result_begin, hash); - } - - for (int i = 0; i < _build_expr_ctxs.size(); ++i) { - // non-string and null slots are already part of expr_values_buffer - if (_build_expr_ctxs[i]->root()->type().is_string_type()) { - void* loc = _expr_values_buffer + _expr_values_buffer_offsets[i]; - - if (_expr_value_null_bits[i]) { - // Hash the null random seed values at 'loc' - hash = HashUtil::hash(loc, sizeof(StringValue), hash); - } else { - // Hash the string - StringValue* str = reinterpret_cast(loc); - hash = HashUtil::hash(str->ptr, str->len, hash); - } - } else if (_build_expr_ctxs[i]->root()->type().is_decimal_type()) { - void* loc = _expr_values_buffer + _expr_values_buffer_offsets[i]; - if (_expr_value_null_bits[i]) { - // Hash the null random seed values at 'loc' - hash = HashUtil::hash(loc, sizeof(StringValue), hash); - } else { - DecimalValue* decimal = reinterpret_cast(loc); - hash = decimal->hash(hash); - } - } - - } - - return hash; -} - -bool HashTable::equals(TupleRow* build_row) { - for (int i = 0; i < _build_expr_ctxs.size(); ++i) { - void* val = _build_expr_ctxs[i]->get_value(build_row); - - if (val == NULL) { - if (!_stores_nulls) { - return false; - } - - if (!_expr_value_null_bits[i]) { - return false; - } - - continue; - } - - void* loc = _expr_values_buffer + _expr_values_buffer_offsets[i]; - - if (!RawValue::eq(loc, val, _build_expr_ctxs[i]->root()->type())) { - return false; - - } - } - - return true; -} - -void HashTable::resize_buckets(int64_t num_buckets) { - DCHECK_EQ((num_buckets & (num_buckets - 1)), 0) << "num_buckets must be a power of 2"; - - int64_t old_num_buckets = _num_buckets; - int64_t delta_bytes = (num_buckets - old_num_buckets) * sizeof(Bucket); - if (!_mem_tracker->try_consume(delta_bytes)) { - mem_limit_exceeded(delta_bytes); - return; - } - - _buckets.resize(num_buckets); - - // If we're doubling the number of buckets, all nodes in a particular bucket - // either remain there, or move down to an analogous bucket in the other half. - // In order to efficiently check which of the two buckets a node belongs in, the number - // of buckets must be a power of 2. - bool doubled_buckets = (num_buckets == old_num_buckets * 2); - - for (int i = 0; i < _num_buckets; ++i) { - Bucket* bucket = &_buckets[i]; - Bucket* sister_bucket = &_buckets[i + old_num_buckets]; - Node* last_node = NULL; - int node_idx = bucket->_node_idx; - - while (node_idx != -1) { - Node* node = get_node(node_idx); - int64_t next_idx = node->_next_idx; - uint32_t hash = node->_hash; - - bool node_must_move = true; - Bucket* move_to = NULL; - - if (doubled_buckets) { - node_must_move = ((hash & old_num_buckets) != 0); - move_to = sister_bucket; - } else { - int64_t bucket_idx = hash & (num_buckets - 1); - node_must_move = (bucket_idx != i); - move_to = &_buckets[bucket_idx]; - } - - if (node_must_move) { - move_node(bucket, move_to, node_idx, node, last_node); - } else { - last_node = node; - } - - node_idx = next_idx; - } - } - - _num_buckets = num_buckets; - _num_buckets_till_resize = MAX_BUCKET_OCCUPANCY_FRACTION * _num_buckets; -} - -void HashTable::grow_node_array() { - int64_t old_size = _nodes_capacity * _node_byte_size; - _nodes_capacity = _nodes_capacity + _nodes_capacity / 2; - int64_t new_size = _nodes_capacity * _node_byte_size; + _mem_tracker->release(_nodes_capacity * _node_byte_size); + _mem_tracker->release(_buckets.size() * sizeof(Bucket)); +} + +bool HashTable::eval_row(TupleRow* row, const vector& ctxs) { + // Put a non-zero constant in the result location for NULL. + // We don't want(NULL, 1) to hash to the same as (0, 1). + // This needs to be as big as the biggest primitive type since the bytes + // get copied directly. + + // the 10 is experience value which need bigger than sizeof(Decimal)/sizeof(int64). + // for if slot is null, we need copy the null value to all type. + static int64_t null_value[10] = {HashUtil::FNV_SEED, HashUtil::FNV_SEED, 0}; + bool has_null = false; + + for (int i = 0; i < ctxs.size(); ++i) { + void* loc = _expr_values_buffer + _expr_values_buffer_offsets[i]; + void* val = ctxs[i]->get_value(row); + + if (val == NULL) { + // If the table doesn't store nulls, no reason to keep evaluating + if (!_stores_nulls) { + return true; + } + + _expr_value_null_bits[i] = true; + val = &null_value; + has_null = true; + } else { + _expr_value_null_bits[i] = false; + } + + RawValue::write(val, loc, _build_expr_ctxs[i]->root()->type(), NULL); + } + + return has_null; +} + +uint32_t HashTable::hash_variable_len_row() { + uint32_t hash = _initial_seed; + // Hash the non-var length portions (if there are any) + if (_var_result_begin != 0) { + hash = HashUtil::hash(_expr_values_buffer, _var_result_begin, hash); + } + + for (int i = 0; i < _build_expr_ctxs.size(); ++i) { + // non-string and null slots are already part of expr_values_buffer + if (_build_expr_ctxs[i]->root()->type().is_string_type()) { + void* loc = _expr_values_buffer + _expr_values_buffer_offsets[i]; + + if (_expr_value_null_bits[i]) { + // Hash the null random seed values at 'loc' + hash = HashUtil::hash(loc, sizeof(StringValue), hash); + } else { + // Hash the string + StringValue* str = reinterpret_cast(loc); + hash = HashUtil::hash(str->ptr, str->len, hash); + } + } else if (_build_expr_ctxs[i]->root()->type().is_decimal_type()) { + void* loc = _expr_values_buffer + _expr_values_buffer_offsets[i]; + if (_expr_value_null_bits[i]) { + // Hash the null random seed values at 'loc' + hash = HashUtil::hash(loc, sizeof(StringValue), hash); + } else { + DecimalValue* decimal = reinterpret_cast(loc); + hash = decimal->hash(hash); + } + } + + } + + return hash; +} + +bool HashTable::equals(TupleRow* build_row) { + for (int i = 0; i < _build_expr_ctxs.size(); ++i) { + void* val = _build_expr_ctxs[i]->get_value(build_row); + + if (val == NULL) { + if (!_stores_nulls) { + return false; + } + + if (!_expr_value_null_bits[i]) { + return false; + } + + continue; + } + + void* loc = _expr_values_buffer + _expr_values_buffer_offsets[i]; + + if (!RawValue::eq(loc, val, _build_expr_ctxs[i]->root()->type())) { + return false; + + } + } + + return true; +} + +void HashTable::resize_buckets(int64_t num_buckets) { + DCHECK_EQ((num_buckets & (num_buckets - 1)), 0) << "num_buckets must be a power of 2"; + + int64_t old_num_buckets = _num_buckets; + int64_t delta_bytes = (num_buckets - old_num_buckets) * sizeof(Bucket); + if (!_mem_tracker->try_consume(delta_bytes)) { + mem_limit_exceeded(delta_bytes); + return; + } + + _buckets.resize(num_buckets); + + // If we're doubling the number of buckets, all nodes in a particular bucket + // either remain there, or move down to an analogous bucket in the other half. + // In order to efficiently check which of the two buckets a node belongs in, the number + // of buckets must be a power of 2. + bool doubled_buckets = (num_buckets == old_num_buckets * 2); + + for (int i = 0; i < _num_buckets; ++i) { + Bucket* bucket = &_buckets[i]; + Bucket* sister_bucket = &_buckets[i + old_num_buckets]; + Node* last_node = NULL; + int node_idx = bucket->_node_idx; + + while (node_idx != -1) { + Node* node = get_node(node_idx); + int64_t next_idx = node->_next_idx; + uint32_t hash = node->_hash; + + bool node_must_move = true; + Bucket* move_to = NULL; + + if (doubled_buckets) { + node_must_move = ((hash & old_num_buckets) != 0); + move_to = sister_bucket; + } else { + int64_t bucket_idx = hash & (num_buckets - 1); + node_must_move = (bucket_idx != i); + move_to = &_buckets[bucket_idx]; + } + + if (node_must_move) { + move_node(bucket, move_to, node_idx, node, last_node); + } else { + last_node = node; + } + + node_idx = next_idx; + } + } + + _num_buckets = num_buckets; + _num_buckets_till_resize = MAX_BUCKET_OCCUPANCY_FRACTION * _num_buckets; +} + +void HashTable::grow_node_array() { + int64_t old_size = _nodes_capacity * _node_byte_size; + _nodes_capacity = _nodes_capacity + _nodes_capacity / 2; + int64_t new_size = _nodes_capacity * _node_byte_size; uint8_t* new_nodes = reinterpret_cast(malloc(new_size)); memset(new_nodes, 0, new_size); @@ -273,549 +273,549 @@ void HashTable::grow_node_array() { _nodes = new_nodes; #if 0 - if (PaloMetrics::hash_table_total_bytes() != NULL) { - PaloMetrics::hash_table_total_bytes()->increment(new_size - old_size); - } + if (PaloMetrics::hash_table_total_bytes() != NULL) { + PaloMetrics::hash_table_total_bytes()->increment(new_size - old_size); + } #endif - _mem_tracker->consume(new_size - old_size); - if (_mem_tracker->limit_exceeded()) { - mem_limit_exceeded(new_size - old_size); - } -} - -void HashTable::mem_limit_exceeded(int64_t allocation_size) { - _mem_limit_exceeded = true; - _exceeded_limit = true; - // if (_state != NULL) { - // _state->set_mem_limit_exceeded(_mem_tracker, allocation_size); - // } -} - -std::string HashTable::debug_string(bool skip_empty, const RowDescriptor* desc) { - std::stringstream ss; - ss << std::endl; - - for (int i = 0; i < _buckets.size(); ++i) { - int64_t node_idx = _buckets[i]._node_idx; - bool first = true; - - if (skip_empty && node_idx == -1) { - continue; - } - - ss << i << ": "; - - while (node_idx != -1) { - Node* node = get_node(node_idx); - - if (!first) { - ss << ","; - } - - if (desc == NULL) { - ss << node_idx << "(" << (void*)node->data() << ")"; - } else { - ss << (void*)node->data() << " " << print_row(node->data(), *desc); - } - - node_idx = node->_next_idx; - first = false; - } - - ss << std::endl; - } - - return ss.str(); -} - -// Helper function to store a value into the results buffer if the expr -// evaluated to NULL. We don't want (NULL, 1) to hash to the same as (0,1) so -// we'll pick a more random value. -static void codegen_assign_null_value( - LlvmCodeGen* codegen, LlvmCodeGen::LlvmBuilder* builder, - Value* dst, const TypeDescriptor& type) { - int64_t fvn_seed = HashUtil::FNV_SEED; - - if (type.type == TYPE_CHAR || type.type == TYPE_VARCHAR) { - Value* dst_ptr = builder->CreateStructGEP(dst, 0, "string_ptr"); - Value* dst_len = builder->CreateStructGEP(dst, 1, "string_len"); - Value* null_len = codegen->get_int_constant(TYPE_INT, fvn_seed); - Value* null_ptr = builder->CreateIntToPtr(null_len, codegen->ptr_type()); - builder->CreateStore(null_ptr, dst_ptr); - builder->CreateStore(null_len, dst_len); - return; - } else { - Value* null_value = NULL; - // Get a type specific representation of fvn_seed - switch (type.type) { - case TYPE_BOOLEAN: - // In results, booleans are stored as 1 byte - dst = builder->CreateBitCast(dst, codegen->ptr_type()); - null_value = codegen->get_int_constant(TYPE_TINYINT, fvn_seed); - break; - case TYPE_TINYINT: - case TYPE_SMALLINT: - case TYPE_INT: - case TYPE_BIGINT: - null_value = codegen->get_int_constant(type.type, fvn_seed); - break; - case TYPE_FLOAT: { - // Don't care about the value, just the bit pattern - float fvn_seed_float = *reinterpret_cast(&fvn_seed); - null_value = llvm::ConstantFP::get( - codegen->context(), llvm::APFloat(fvn_seed_float)); - break; - } - case TYPE_DOUBLE: { - // Don't care about the value, just the bit pattern - double fvn_seed_double = *reinterpret_cast(&fvn_seed); - null_value = llvm::ConstantFP::get( - codegen->context(), llvm::APFloat(fvn_seed_double)); - break; - } - default: - DCHECK(false); - } - builder->CreateStore(null_value, dst); - } -} - -// Codegen for evaluating a tuple row over either _build_expr_ctxs or _probe_expr_ctxs. -// For the case where we are joining on a single int, the IR looks like -// define i1 @EvaBuildRow(%"class.impala::HashTable"* %this_ptr, -// %"class.impala::TupleRow"* %row) { -// entry: -// %null_ptr = alloca i1 -// %0 = bitcast %"class.palo::TupleRow"* %row to i8** -// %eval = call i32 @SlotRef(i8** %0, i8* null, i1* %null_ptr) -// %1 = load i1* %null_ptr -// br i1 %1, label %null, label %not_null -// -// null: ; preds = %entry -// ret i1 true -// -// not_null: ; preds = %entry -// store i32 %eval, i32* inttoptr (i64 46146336 to i32*) -// br label %continue -// -// continue: ; preds = %not_null -// %2 = zext i1 %1 to i8 -// store i8 %2, i8* inttoptr (i64 46146248 to i8*) -// ret i1 false -// } -// For each expr, we create 3 code blocks. The null, not null and continue blocks. -// Both the null and not null branch into the continue block. The continue block -// becomes the start of the next block for codegen (either the next expr or just the -// end of the function). -Function* HashTable::codegen_eval_tuple_row(RuntimeState* state, bool build) { - // TODO: codegen_assign_null_value() can't handle TYPE_TIMESTAMP or TYPE_DECIMAL yet - const std::vector& ctxs = build ? _build_expr_ctxs : _probe_expr_ctxs; - for (int i = 0; i < ctxs.size(); ++i) { - PrimitiveType type = ctxs[i]->root()->type().type; - if (type == TYPE_DATE || type == TYPE_DATETIME - || type == TYPE_DECIMAL || type == TYPE_CHAR) { - return NULL; - } - } - - LlvmCodeGen* codegen = NULL; - if (!state->get_codegen(&codegen).ok()) { - return NULL; - } - - // Get types to generate function prototype - Type* tuple_row_type = codegen->get_type(TupleRow::_s_llvm_class_name); - DCHECK(tuple_row_type != NULL); - PointerType* tuple_row_ptr_type = PointerType::get(tuple_row_type, 0); - - Type* this_type = codegen->get_type(HashTable::_s_llvm_class_name); - DCHECK(this_type != NULL); - PointerType* this_ptr_type = PointerType::get(this_type, 0); - - LlvmCodeGen::FnPrototype prototype( - codegen, build ? "eval_build_row" : "eval_probe_row", codegen->get_type(TYPE_BOOLEAN)); - prototype.add_argument(LlvmCodeGen::NamedVariable("this_ptr", this_ptr_type)); - prototype.add_argument(LlvmCodeGen::NamedVariable("row", tuple_row_ptr_type)); - - LLVMContext& context = codegen->context(); - LlvmCodeGen::LlvmBuilder builder(context); - Value* args[2]; - Function* fn = prototype.generate_prototype(&builder, args); - - Value* row = args[1]; - Value* has_null = codegen->false_value(); - - // Aggregation with no grouping exprs also use the hash table interface for - // code simplicity. In that case, there are no build exprs. - if (!_build_expr_ctxs.empty()) { - const std::vector& ctxs = build ? _build_expr_ctxs : _probe_expr_ctxs; - for (int i = 0; i < ctxs.size(); ++i) { - // TODO: refactor this to somewhere else? This is not hash table specific - // except for the null handling bit and would be used for anyone that needs - // to materialize a vector of exprs - // Convert result buffer to llvm ptr type - void* loc = _expr_values_buffer + _expr_values_buffer_offsets[i]; - Value* llvm_loc = codegen->cast_ptr_to_llvm_ptr( - codegen->get_ptr_type(ctxs[i]->root()->type()), loc); - - BasicBlock* null_block = BasicBlock::Create(context, "null", fn); - BasicBlock* not_null_block = BasicBlock::Create(context, "not_null", fn); - BasicBlock* continue_block = BasicBlock::Create(context, "continue", fn); - - // Call expr - Function* expr_fn = NULL; - Status status = ctxs[i]->root()->get_codegend_compute_fn(state, &expr_fn); - if (!status.ok()) { - std::stringstream ss; - ss << "Problem with codegen: " << status.get_error_msg(); - // TODO(zc ) - // state->LogError(ErrorMsg(TErrorCode::GENERAL, ss.str())); - fn->eraseFromParent(); // deletes function - return NULL; - } - - Value* ctx_arg = codegen->cast_ptr_to_llvm_ptr( - codegen->get_ptr_type(ExprContext::_s_llvm_class_name), ctxs[i]); - Value* expr_fn_args[] = { ctx_arg, row }; - CodegenAnyVal result = CodegenAnyVal::create_call_wrapped( - codegen, &builder, ctxs[i]->root()->type(), - expr_fn, expr_fn_args, "result", NULL); - Value* is_null = result.get_is_null(); - - // Set null-byte result - Value* null_byte = builder.CreateZExt(is_null, codegen->get_type(TYPE_TINYINT)); - uint8_t* null_byte_loc = &_expr_value_null_bits[i]; - Value* llvm_null_byte_loc = - codegen->cast_ptr_to_llvm_ptr(codegen->ptr_type(), null_byte_loc); - builder.CreateStore(null_byte, llvm_null_byte_loc); - - builder.CreateCondBr(is_null, null_block, not_null_block); - - // Null block - builder.SetInsertPoint(null_block); - if (!_stores_nulls) { - // hash table doesn't store nulls, no reason to keep evaluating exprs - builder.CreateRet(codegen->true_value()); - } else { - codegen_assign_null_value(codegen, &builder, llvm_loc, ctxs[i]->root()->type()); - has_null = codegen->true_value(); - builder.CreateBr(continue_block); - } - - // Not null block - builder.SetInsertPoint(not_null_block); - result.to_native_ptr(llvm_loc); - builder.CreateBr(continue_block); - - builder.SetInsertPoint(continue_block); - } - } - builder.CreateRet(has_null); - - return codegen->finalize_function(fn); -} - -// Codegen for hashing the current row. In the case with both string and non-string data -// (group by int_col, string_col), the IR looks like: -// define i32 @hash_current_row(%"class.impala::HashTable"* %this_ptr) { -// entry: -// %0 = call i32 @IrCrcHash(i8* inttoptr (i64 51107808 to i8*), i32 16, i32 0) -// %1 = load i8* inttoptr (i64 29500112 to i8*) -// %2 = icmp ne i8 %1, 0 -// br i1 %2, label %null, label %not_null -// -// null: ; preds = %entry -// %3 = call i32 @IrCrcHash(i8* inttoptr (i64 51107824 to i8*), i32 16, i32 %0) -// br label %continue -// -// not_null: ; preds = %entry -// %4 = load i8** getelementptr inbounds ( -// %"struct.impala::StringValue"* inttoptr -// (i64 51107824 to %"struct.impala::StringValue"*), i32 0, i32 0) -// %5 = load i32* getelementptr inbounds ( -// %"struct.impala::StringValue"* inttoptr -// (i64 51107824 to %"struct.impala::StringValue"*), i32 0, i32 1) -// %6 = call i32 @IrCrcHash(i8* %4, i32 %5, i32 %0) -// br label %continue -// -// continue: ; preds = %not_null, %null -// %7 = phi i32 [ %6, %not_null ], [ %3, %null ] -// ret i32 %7 -// } -// TODO: can this be cross-compiled? -Function* HashTable::codegen_hash_current_row(RuntimeState* state) { - for (int i = 0; i < _build_expr_ctxs.size(); ++i) { - // Disable codegen for CHAR - if (_build_expr_ctxs[i]->root()->type().type == TYPE_CHAR) { - return NULL; - } - } - - LlvmCodeGen* codegen = NULL; - if (!state->get_codegen(&codegen).ok()) { - return NULL; - } - - // Get types to generate function prototype - Type* this_type = codegen->get_type(HashTable::_s_llvm_class_name); - DCHECK(this_type != NULL); - PointerType* this_ptr_type = PointerType::get(this_type, 0); - - LlvmCodeGen::FnPrototype prototype(codegen, "hash_current_row", codegen->get_type(TYPE_INT)); - prototype.add_argument(LlvmCodeGen::NamedVariable("this_ptr", this_ptr_type)); - - LLVMContext& context = codegen->context(); - LlvmCodeGen::LlvmBuilder builder(context); - Value* this_arg = NULL; - Function* fn = prototype.generate_prototype(&builder, &this_arg); - - Value* hash_result = codegen->get_int_constant(TYPE_INT, _initial_seed); - Value* data = codegen->cast_ptr_to_llvm_ptr(codegen->ptr_type(), _expr_values_buffer); - if (_var_result_begin == -1) { - // No variable length slots, just hash what is in '_expr_values_buffer' - if (_results_buffer_size > 0) { - Function* hash_fn = codegen->get_hash_function(_results_buffer_size); - Value* len = codegen->get_int_constant(TYPE_INT, _results_buffer_size); - hash_result = builder.CreateCall3(hash_fn, data, len, hash_result); - } - } else { - if (_var_result_begin > 0) { - Function* hash_fn = codegen->get_hash_function(_var_result_begin); - Value* len = codegen->get_int_constant(TYPE_INT, _var_result_begin); - hash_result = builder.CreateCall3(hash_fn, data, len, hash_result); - } - - // Hash string slots - for (int i = 0; i < _build_expr_ctxs.size(); ++i) { - if (_build_expr_ctxs[i]->root()->type().type != TYPE_CHAR - && _build_expr_ctxs[i]->root()->type().type != TYPE_VARCHAR) { - continue; - } - - BasicBlock* null_block = NULL; - BasicBlock* not_null_block = NULL; - BasicBlock* continue_block = NULL; - Value* str_null_result = NULL; - - void* loc = _expr_values_buffer + _expr_values_buffer_offsets[i]; - - // If the hash table stores nulls, we need to check if the stringval - // evaluated to NULL - if (_stores_nulls) { - null_block = BasicBlock::Create(context, "null", fn); - not_null_block = BasicBlock::Create(context, "not_null", fn); - continue_block = BasicBlock::Create(context, "continue", fn); - - uint8_t* null_byte_loc = &_expr_value_null_bits[i]; - Value* llvm_null_byte_loc = - codegen->cast_ptr_to_llvm_ptr(codegen->ptr_type(), null_byte_loc); - Value* null_byte = builder.CreateLoad(llvm_null_byte_loc); - Value* is_null = builder.CreateICmpNE( - null_byte, codegen->get_int_constant(TYPE_TINYINT, 0)); - builder.CreateCondBr(is_null, null_block, not_null_block); - - // For null, we just want to call the hash function on the portion of - // the data - builder.SetInsertPoint(null_block); - Function* null_hash_fn = codegen->get_hash_function(sizeof(StringValue)); - Value* llvm_loc = codegen->cast_ptr_to_llvm_ptr(codegen->ptr_type(), loc); - Value* len = codegen->get_int_constant(TYPE_INT, sizeof(StringValue)); - str_null_result = builder.CreateCall3(null_hash_fn, llvm_loc, len, hash_result); - builder.CreateBr(continue_block); - - builder.SetInsertPoint(not_null_block); - } - - // Convert _expr_values_buffer loc to llvm value - Value* str_val = codegen->cast_ptr_to_llvm_ptr( - codegen->get_ptr_type(TYPE_VARCHAR), loc); - - Value* ptr = builder.CreateStructGEP(str_val, 0, "ptr"); - Value* len = builder.CreateStructGEP(str_val, 1, "len"); - ptr = builder.CreateLoad(ptr); - len = builder.CreateLoad(len); - - // Call hash(ptr, len, hash_result); - Function* general_hash_fn = codegen->get_hash_function(); - Value* string_hash_result = - builder.CreateCall3(general_hash_fn, ptr, len, hash_result); - - if (_stores_nulls) { - builder.CreateBr(continue_block); - builder.SetInsertPoint(continue_block); - // Use phi node to reconcile that we could have come from the string-null - // path and string not null paths. - PHINode* phi_node = builder.CreatePHI(codegen->get_type(TYPE_INT), 2); - phi_node->addIncoming(string_hash_result, not_null_block); - phi_node->addIncoming(str_null_result, null_block); - hash_result = phi_node; - } else { - hash_result = string_hash_result; - } - } - } - - builder.CreateRet(hash_result); - return codegen->finalize_function(fn); -} - -// Codegen for HashTable::Equals. For a hash table with two exprs (string,int), the -// IR looks like: -// -// define i1 @Equals(%"class.impala::OldHashTable"* %this_ptr, -// %"class.impala::TupleRow"* %row) { -// entry: -// %result = call i64 @get_slot_ref(%"class.impala::ExprContext"* inttoptr -// (i64 146381856 to %"class.impala::ExprContext"*), -// %"class.impala::TupleRow"* %row) -// %0 = trunc i64 %result to i1 -// br i1 %0, label %null, label %not_null -// -// false_block: ; preds = %not_null2, %null1, %not_null, %null -// ret i1 false -// -// null: ; preds = %entry -// br i1 false, label %continue, label %false_block -// -// not_null: ; preds = %entry -// %1 = load i32* inttoptr (i64 104774368 to i32*) -// %2 = ashr i64 %result, 32 -// %3 = trunc i64 %2 to i32 -// %cmp_raw = icmp eq i32 %3, %1 -// br i1 %cmp_raw, label %continue, label %false_block -// -// continue: ; preds = %not_null, %null -// %result4 = call { i64, i8* } @get_slot_ref( -// %"class.impala::ExprContext"* inttoptr -// (i64 146381696 to %"class.impala::ExprContext"*), -// %"class.impala::TupleRow"* %row) -// %4 = extractvalue { i64, i8* } %result4, 0 -// %5 = trunc i64 %4 to i1 -// br i1 %5, label %null1, label %not_null2 -// -// null1: ; preds = %continue -// br i1 false, label %continue3, label %false_block -// -// not_null2: ; preds = %continue -// %6 = extractvalue { i64, i8* } %result4, 0 -// %7 = ashr i64 %6, 32 -// %8 = trunc i64 %7 to i32 -// %result5 = extractvalue { i64, i8* } %result4, 1 -// %cmp_raw6 = call i1 @_Z11StringValEQPciPKN6impala11StringValueE( -// i8* %result5, i32 %8, %"struct.impala::StringValue"* inttoptr -// (i64 104774384 to %"struct.impala::StringValue"*)) -// br i1 %cmp_raw6, label %continue3, label %false_block -// -// continue3: ; preds = %not_null2, %null1 -// ret i1 true -// } -Function* HashTable::codegen_equals(RuntimeState* state) { - for (int i = 0; i < _build_expr_ctxs.size(); ++i) { - // Disable codegen for CHAR - if (_build_expr_ctxs[i]->root()->type().type == TYPE_CHAR) { - return NULL; - } - } - - LlvmCodeGen* codegen = NULL; - if (!state->get_codegen(&codegen).ok()) { - return NULL; - } - // Get types to generate function prototype - Type* tuple_row_type = codegen->get_type(TupleRow::_s_llvm_class_name); - DCHECK(tuple_row_type != NULL); - PointerType* tuple_row_ptr_type = PointerType::get(tuple_row_type, 0); - - Type* this_type = codegen->get_type(HashTable::_s_llvm_class_name); - DCHECK(this_type != NULL); - PointerType* this_ptr_type = PointerType::get(this_type, 0); - - LlvmCodeGen::FnPrototype prototype(codegen, "equals", codegen->get_type(TYPE_BOOLEAN)); - prototype.add_argument(LlvmCodeGen::NamedVariable("this_ptr", this_ptr_type)); - prototype.add_argument(LlvmCodeGen::NamedVariable("row", tuple_row_ptr_type)); - - LLVMContext& context = codegen->context(); - LlvmCodeGen::LlvmBuilder builder(context); - Value* args[2]; - Function* fn = prototype.generate_prototype(&builder, args); - Value* row = args[1]; - - if (!_build_expr_ctxs.empty()) { - BasicBlock* false_block = BasicBlock::Create(context, "false_block", fn); - - for (int i = 0; i < _build_expr_ctxs.size(); ++i) { - BasicBlock* null_block = BasicBlock::Create(context, "null", fn); - BasicBlock* not_null_block = BasicBlock::Create(context, "not_null", fn); - BasicBlock* continue_block = BasicBlock::Create(context, "continue", fn); - - // call GetValue on build_exprs[i] - Function* expr_fn = NULL; - Status status = _build_expr_ctxs[i]->root()->get_codegend_compute_fn(state, &expr_fn); - if (!status.ok()) { - std::stringstream ss; - ss << "Problem with codegen: " << status.get_error_msg(); - // TODO(zc) - // state->LogError(ErrorMsg(TErrorCode::GENERAL, ss.str())); - fn->eraseFromParent(); // deletes function - return NULL; - } - - Value* ctx_arg = codegen->cast_ptr_to_llvm_ptr( - codegen->get_ptr_type(ExprContext::_s_llvm_class_name), _build_expr_ctxs[i]); - Value* expr_fn_args[] = { ctx_arg, row }; - CodegenAnyVal result = CodegenAnyVal::create_call_wrapped( - codegen, &builder, _build_expr_ctxs[i]->root()->type(), - expr_fn, expr_fn_args, "result", NULL); - Value* is_null = result.get_is_null(); - - // Determine if probe is null (i.e. _expr_value_null_bits[i] == true). In - // the case where the hash table does not store nulls, this is always false. - Value* probe_is_null = codegen->false_value(); - uint8_t* null_byte_loc = &_expr_value_null_bits[i]; - if (_stores_nulls) { - Value* llvm_null_byte_loc = - codegen->cast_ptr_to_llvm_ptr(codegen->ptr_type(), null_byte_loc); - Value* null_byte = builder.CreateLoad(llvm_null_byte_loc); - probe_is_null = builder.CreateICmpNE( - null_byte, codegen->get_int_constant(TYPE_TINYINT, 0)); - } - - // Get llvm value for probe_val from '_expr_values_buffer' - void* loc = _expr_values_buffer + _expr_values_buffer_offsets[i]; - Value* probe_val = codegen->cast_ptr_to_llvm_ptr( - codegen->get_ptr_type(_build_expr_ctxs[i]->root()->type()), loc); - - // Branch for GetValue() returning NULL - builder.CreateCondBr(is_null, null_block, not_null_block); - - // Null block - builder.SetInsertPoint(null_block); - builder.CreateCondBr(probe_is_null, continue_block, false_block); - - // Not-null block - builder.SetInsertPoint(not_null_block); - if (_stores_nulls) { - BasicBlock* cmp_block = BasicBlock::Create(context, "cmp", fn); - // First need to compare that probe expr[i] is not null - builder.CreateCondBr(probe_is_null, false_block, cmp_block); - builder.SetInsertPoint(cmp_block); - } - // Check result == probe_val - Value* is_equal = result.eq_to_native_ptr(probe_val); - builder.CreateCondBr(is_equal, continue_block, false_block); - - builder.SetInsertPoint(continue_block); - } - builder.CreateRet(codegen->true_value()); - - builder.SetInsertPoint(false_block); - builder.CreateRet(codegen->false_value()); - } else { - builder.CreateRet(codegen->true_value()); - } - - return codegen->finalize_function(fn); -} - -} + _mem_tracker->consume(new_size - old_size); + if (_mem_tracker->limit_exceeded()) { + mem_limit_exceeded(new_size - old_size); + } +} + +void HashTable::mem_limit_exceeded(int64_t allocation_size) { + _mem_limit_exceeded = true; + _exceeded_limit = true; + // if (_state != NULL) { + // _state->set_mem_limit_exceeded(_mem_tracker, allocation_size); + // } +} + +std::string HashTable::debug_string(bool skip_empty, const RowDescriptor* desc) { + std::stringstream ss; + ss << std::endl; + + for (int i = 0; i < _buckets.size(); ++i) { + int64_t node_idx = _buckets[i]._node_idx; + bool first = true; + + if (skip_empty && node_idx == -1) { + continue; + } + + ss << i << ": "; + + while (node_idx != -1) { + Node* node = get_node(node_idx); + + if (!first) { + ss << ","; + } + + if (desc == NULL) { + ss << node_idx << "(" << (void*)node->data() << ")"; + } else { + ss << (void*)node->data() << " " << print_row(node->data(), *desc); + } + + node_idx = node->_next_idx; + first = false; + } + + ss << std::endl; + } + + return ss.str(); +} + +// Helper function to store a value into the results buffer if the expr +// evaluated to NULL. We don't want (NULL, 1) to hash to the same as (0,1) so +// we'll pick a more random value. +static void codegen_assign_null_value( + LlvmCodeGen* codegen, LlvmCodeGen::LlvmBuilder* builder, + Value* dst, const TypeDescriptor& type) { + int64_t fvn_seed = HashUtil::FNV_SEED; + + if (type.type == TYPE_CHAR || type.type == TYPE_VARCHAR) { + Value* dst_ptr = builder->CreateStructGEP(dst, 0, "string_ptr"); + Value* dst_len = builder->CreateStructGEP(dst, 1, "string_len"); + Value* null_len = codegen->get_int_constant(TYPE_INT, fvn_seed); + Value* null_ptr = builder->CreateIntToPtr(null_len, codegen->ptr_type()); + builder->CreateStore(null_ptr, dst_ptr); + builder->CreateStore(null_len, dst_len); + return; + } else { + Value* null_value = NULL; + // Get a type specific representation of fvn_seed + switch (type.type) { + case TYPE_BOOLEAN: + // In results, booleans are stored as 1 byte + dst = builder->CreateBitCast(dst, codegen->ptr_type()); + null_value = codegen->get_int_constant(TYPE_TINYINT, fvn_seed); + break; + case TYPE_TINYINT: + case TYPE_SMALLINT: + case TYPE_INT: + case TYPE_BIGINT: + null_value = codegen->get_int_constant(type.type, fvn_seed); + break; + case TYPE_FLOAT: { + // Don't care about the value, just the bit pattern + float fvn_seed_float = *reinterpret_cast(&fvn_seed); + null_value = llvm::ConstantFP::get( + codegen->context(), llvm::APFloat(fvn_seed_float)); + break; + } + case TYPE_DOUBLE: { + // Don't care about the value, just the bit pattern + double fvn_seed_double = *reinterpret_cast(&fvn_seed); + null_value = llvm::ConstantFP::get( + codegen->context(), llvm::APFloat(fvn_seed_double)); + break; + } + default: + DCHECK(false); + } + builder->CreateStore(null_value, dst); + } +} + +// Codegen for evaluating a tuple row over either _build_expr_ctxs or _probe_expr_ctxs. +// For the case where we are joining on a single int, the IR looks like +// define i1 @EvaBuildRow(%"class.impala::HashTable"* %this_ptr, +// %"class.impala::TupleRow"* %row) { +// entry: +// %null_ptr = alloca i1 +// %0 = bitcast %"class.palo::TupleRow"* %row to i8** +// %eval = call i32 @SlotRef(i8** %0, i8* null, i1* %null_ptr) +// %1 = load i1* %null_ptr +// br i1 %1, label %null, label %not_null +// +// null: ; preds = %entry +// ret i1 true +// +// not_null: ; preds = %entry +// store i32 %eval, i32* inttoptr (i64 46146336 to i32*) +// br label %continue +// +// continue: ; preds = %not_null +// %2 = zext i1 %1 to i8 +// store i8 %2, i8* inttoptr (i64 46146248 to i8*) +// ret i1 false +// } +// For each expr, we create 3 code blocks. The null, not null and continue blocks. +// Both the null and not null branch into the continue block. The continue block +// becomes the start of the next block for codegen (either the next expr or just the +// end of the function). +Function* HashTable::codegen_eval_tuple_row(RuntimeState* state, bool build) { + // TODO: codegen_assign_null_value() can't handle TYPE_TIMESTAMP or TYPE_DECIMAL yet + const std::vector& ctxs = build ? _build_expr_ctxs : _probe_expr_ctxs; + for (int i = 0; i < ctxs.size(); ++i) { + PrimitiveType type = ctxs[i]->root()->type().type; + if (type == TYPE_DATE || type == TYPE_DATETIME + || type == TYPE_DECIMAL || type == TYPE_CHAR) { + return NULL; + } + } + + LlvmCodeGen* codegen = NULL; + if (!state->get_codegen(&codegen).ok()) { + return NULL; + } + + // Get types to generate function prototype + Type* tuple_row_type = codegen->get_type(TupleRow::_s_llvm_class_name); + DCHECK(tuple_row_type != NULL); + PointerType* tuple_row_ptr_type = PointerType::get(tuple_row_type, 0); + + Type* this_type = codegen->get_type(HashTable::_s_llvm_class_name); + DCHECK(this_type != NULL); + PointerType* this_ptr_type = PointerType::get(this_type, 0); + + LlvmCodeGen::FnPrototype prototype( + codegen, build ? "eval_build_row" : "eval_probe_row", codegen->get_type(TYPE_BOOLEAN)); + prototype.add_argument(LlvmCodeGen::NamedVariable("this_ptr", this_ptr_type)); + prototype.add_argument(LlvmCodeGen::NamedVariable("row", tuple_row_ptr_type)); + + LLVMContext& context = codegen->context(); + LlvmCodeGen::LlvmBuilder builder(context); + Value* args[2]; + Function* fn = prototype.generate_prototype(&builder, args); + + Value* row = args[1]; + Value* has_null = codegen->false_value(); + + // Aggregation with no grouping exprs also use the hash table interface for + // code simplicity. In that case, there are no build exprs. + if (!_build_expr_ctxs.empty()) { + const std::vector& ctxs = build ? _build_expr_ctxs : _probe_expr_ctxs; + for (int i = 0; i < ctxs.size(); ++i) { + // TODO: refactor this to somewhere else? This is not hash table specific + // except for the null handling bit and would be used for anyone that needs + // to materialize a vector of exprs + // Convert result buffer to llvm ptr type + void* loc = _expr_values_buffer + _expr_values_buffer_offsets[i]; + Value* llvm_loc = codegen->cast_ptr_to_llvm_ptr( + codegen->get_ptr_type(ctxs[i]->root()->type()), loc); + + BasicBlock* null_block = BasicBlock::Create(context, "null", fn); + BasicBlock* not_null_block = BasicBlock::Create(context, "not_null", fn); + BasicBlock* continue_block = BasicBlock::Create(context, "continue", fn); + + // Call expr + Function* expr_fn = NULL; + Status status = ctxs[i]->root()->get_codegend_compute_fn(state, &expr_fn); + if (!status.ok()) { + std::stringstream ss; + ss << "Problem with codegen: " << status.get_error_msg(); + // TODO(zc ) + // state->LogError(ErrorMsg(TErrorCode::GENERAL, ss.str())); + fn->eraseFromParent(); // deletes function + return NULL; + } + + Value* ctx_arg = codegen->cast_ptr_to_llvm_ptr( + codegen->get_ptr_type(ExprContext::_s_llvm_class_name), ctxs[i]); + Value* expr_fn_args[] = { ctx_arg, row }; + CodegenAnyVal result = CodegenAnyVal::create_call_wrapped( + codegen, &builder, ctxs[i]->root()->type(), + expr_fn, expr_fn_args, "result", NULL); + Value* is_null = result.get_is_null(); + + // Set null-byte result + Value* null_byte = builder.CreateZExt(is_null, codegen->get_type(TYPE_TINYINT)); + uint8_t* null_byte_loc = &_expr_value_null_bits[i]; + Value* llvm_null_byte_loc = + codegen->cast_ptr_to_llvm_ptr(codegen->ptr_type(), null_byte_loc); + builder.CreateStore(null_byte, llvm_null_byte_loc); + + builder.CreateCondBr(is_null, null_block, not_null_block); + + // Null block + builder.SetInsertPoint(null_block); + if (!_stores_nulls) { + // hash table doesn't store nulls, no reason to keep evaluating exprs + builder.CreateRet(codegen->true_value()); + } else { + codegen_assign_null_value(codegen, &builder, llvm_loc, ctxs[i]->root()->type()); + has_null = codegen->true_value(); + builder.CreateBr(continue_block); + } + + // Not null block + builder.SetInsertPoint(not_null_block); + result.to_native_ptr(llvm_loc); + builder.CreateBr(continue_block); + + builder.SetInsertPoint(continue_block); + } + } + builder.CreateRet(has_null); + + return codegen->finalize_function(fn); +} + +// Codegen for hashing the current row. In the case with both string and non-string data +// (group by int_col, string_col), the IR looks like: +// define i32 @hash_current_row(%"class.impala::HashTable"* %this_ptr) { +// entry: +// %0 = call i32 @IrCrcHash(i8* inttoptr (i64 51107808 to i8*), i32 16, i32 0) +// %1 = load i8* inttoptr (i64 29500112 to i8*) +// %2 = icmp ne i8 %1, 0 +// br i1 %2, label %null, label %not_null +// +// null: ; preds = %entry +// %3 = call i32 @IrCrcHash(i8* inttoptr (i64 51107824 to i8*), i32 16, i32 %0) +// br label %continue +// +// not_null: ; preds = %entry +// %4 = load i8** getelementptr inbounds ( +// %"struct.impala::StringValue"* inttoptr +// (i64 51107824 to %"struct.impala::StringValue"*), i32 0, i32 0) +// %5 = load i32* getelementptr inbounds ( +// %"struct.impala::StringValue"* inttoptr +// (i64 51107824 to %"struct.impala::StringValue"*), i32 0, i32 1) +// %6 = call i32 @IrCrcHash(i8* %4, i32 %5, i32 %0) +// br label %continue +// +// continue: ; preds = %not_null, %null +// %7 = phi i32 [ %6, %not_null ], [ %3, %null ] +// ret i32 %7 +// } +// TODO: can this be cross-compiled? +Function* HashTable::codegen_hash_current_row(RuntimeState* state) { + for (int i = 0; i < _build_expr_ctxs.size(); ++i) { + // Disable codegen for CHAR + if (_build_expr_ctxs[i]->root()->type().type == TYPE_CHAR) { + return NULL; + } + } + + LlvmCodeGen* codegen = NULL; + if (!state->get_codegen(&codegen).ok()) { + return NULL; + } + + // Get types to generate function prototype + Type* this_type = codegen->get_type(HashTable::_s_llvm_class_name); + DCHECK(this_type != NULL); + PointerType* this_ptr_type = PointerType::get(this_type, 0); + + LlvmCodeGen::FnPrototype prototype(codegen, "hash_current_row", codegen->get_type(TYPE_INT)); + prototype.add_argument(LlvmCodeGen::NamedVariable("this_ptr", this_ptr_type)); + + LLVMContext& context = codegen->context(); + LlvmCodeGen::LlvmBuilder builder(context); + Value* this_arg = NULL; + Function* fn = prototype.generate_prototype(&builder, &this_arg); + + Value* hash_result = codegen->get_int_constant(TYPE_INT, _initial_seed); + Value* data = codegen->cast_ptr_to_llvm_ptr(codegen->ptr_type(), _expr_values_buffer); + if (_var_result_begin == -1) { + // No variable length slots, just hash what is in '_expr_values_buffer' + if (_results_buffer_size > 0) { + Function* hash_fn = codegen->get_hash_function(_results_buffer_size); + Value* len = codegen->get_int_constant(TYPE_INT, _results_buffer_size); + hash_result = builder.CreateCall3(hash_fn, data, len, hash_result); + } + } else { + if (_var_result_begin > 0) { + Function* hash_fn = codegen->get_hash_function(_var_result_begin); + Value* len = codegen->get_int_constant(TYPE_INT, _var_result_begin); + hash_result = builder.CreateCall3(hash_fn, data, len, hash_result); + } + + // Hash string slots + for (int i = 0; i < _build_expr_ctxs.size(); ++i) { + if (_build_expr_ctxs[i]->root()->type().type != TYPE_CHAR + && _build_expr_ctxs[i]->root()->type().type != TYPE_VARCHAR) { + continue; + } + + BasicBlock* null_block = NULL; + BasicBlock* not_null_block = NULL; + BasicBlock* continue_block = NULL; + Value* str_null_result = NULL; + + void* loc = _expr_values_buffer + _expr_values_buffer_offsets[i]; + + // If the hash table stores nulls, we need to check if the stringval + // evaluated to NULL + if (_stores_nulls) { + null_block = BasicBlock::Create(context, "null", fn); + not_null_block = BasicBlock::Create(context, "not_null", fn); + continue_block = BasicBlock::Create(context, "continue", fn); + + uint8_t* null_byte_loc = &_expr_value_null_bits[i]; + Value* llvm_null_byte_loc = + codegen->cast_ptr_to_llvm_ptr(codegen->ptr_type(), null_byte_loc); + Value* null_byte = builder.CreateLoad(llvm_null_byte_loc); + Value* is_null = builder.CreateICmpNE( + null_byte, codegen->get_int_constant(TYPE_TINYINT, 0)); + builder.CreateCondBr(is_null, null_block, not_null_block); + + // For null, we just want to call the hash function on the portion of + // the data + builder.SetInsertPoint(null_block); + Function* null_hash_fn = codegen->get_hash_function(sizeof(StringValue)); + Value* llvm_loc = codegen->cast_ptr_to_llvm_ptr(codegen->ptr_type(), loc); + Value* len = codegen->get_int_constant(TYPE_INT, sizeof(StringValue)); + str_null_result = builder.CreateCall3(null_hash_fn, llvm_loc, len, hash_result); + builder.CreateBr(continue_block); + + builder.SetInsertPoint(not_null_block); + } + + // Convert _expr_values_buffer loc to llvm value + Value* str_val = codegen->cast_ptr_to_llvm_ptr( + codegen->get_ptr_type(TYPE_VARCHAR), loc); + + Value* ptr = builder.CreateStructGEP(str_val, 0, "ptr"); + Value* len = builder.CreateStructGEP(str_val, 1, "len"); + ptr = builder.CreateLoad(ptr); + len = builder.CreateLoad(len); + + // Call hash(ptr, len, hash_result); + Function* general_hash_fn = codegen->get_hash_function(); + Value* string_hash_result = + builder.CreateCall3(general_hash_fn, ptr, len, hash_result); + + if (_stores_nulls) { + builder.CreateBr(continue_block); + builder.SetInsertPoint(continue_block); + // Use phi node to reconcile that we could have come from the string-null + // path and string not null paths. + PHINode* phi_node = builder.CreatePHI(codegen->get_type(TYPE_INT), 2); + phi_node->addIncoming(string_hash_result, not_null_block); + phi_node->addIncoming(str_null_result, null_block); + hash_result = phi_node; + } else { + hash_result = string_hash_result; + } + } + } + + builder.CreateRet(hash_result); + return codegen->finalize_function(fn); +} + +// Codegen for HashTable::Equals. For a hash table with two exprs (string,int), the +// IR looks like: +// +// define i1 @Equals(%"class.impala::OldHashTable"* %this_ptr, +// %"class.impala::TupleRow"* %row) { +// entry: +// %result = call i64 @get_slot_ref(%"class.impala::ExprContext"* inttoptr +// (i64 146381856 to %"class.impala::ExprContext"*), +// %"class.impala::TupleRow"* %row) +// %0 = trunc i64 %result to i1 +// br i1 %0, label %null, label %not_null +// +// false_block: ; preds = %not_null2, %null1, %not_null, %null +// ret i1 false +// +// null: ; preds = %entry +// br i1 false, label %continue, label %false_block +// +// not_null: ; preds = %entry +// %1 = load i32* inttoptr (i64 104774368 to i32*) +// %2 = ashr i64 %result, 32 +// %3 = trunc i64 %2 to i32 +// %cmp_raw = icmp eq i32 %3, %1 +// br i1 %cmp_raw, label %continue, label %false_block +// +// continue: ; preds = %not_null, %null +// %result4 = call { i64, i8* } @get_slot_ref( +// %"class.impala::ExprContext"* inttoptr +// (i64 146381696 to %"class.impala::ExprContext"*), +// %"class.impala::TupleRow"* %row) +// %4 = extractvalue { i64, i8* } %result4, 0 +// %5 = trunc i64 %4 to i1 +// br i1 %5, label %null1, label %not_null2 +// +// null1: ; preds = %continue +// br i1 false, label %continue3, label %false_block +// +// not_null2: ; preds = %continue +// %6 = extractvalue { i64, i8* } %result4, 0 +// %7 = ashr i64 %6, 32 +// %8 = trunc i64 %7 to i32 +// %result5 = extractvalue { i64, i8* } %result4, 1 +// %cmp_raw6 = call i1 @_Z11StringValEQPciPKN6impala11StringValueE( +// i8* %result5, i32 %8, %"struct.impala::StringValue"* inttoptr +// (i64 104774384 to %"struct.impala::StringValue"*)) +// br i1 %cmp_raw6, label %continue3, label %false_block +// +// continue3: ; preds = %not_null2, %null1 +// ret i1 true +// } +Function* HashTable::codegen_equals(RuntimeState* state) { + for (int i = 0; i < _build_expr_ctxs.size(); ++i) { + // Disable codegen for CHAR + if (_build_expr_ctxs[i]->root()->type().type == TYPE_CHAR) { + return NULL; + } + } + + LlvmCodeGen* codegen = NULL; + if (!state->get_codegen(&codegen).ok()) { + return NULL; + } + // Get types to generate function prototype + Type* tuple_row_type = codegen->get_type(TupleRow::_s_llvm_class_name); + DCHECK(tuple_row_type != NULL); + PointerType* tuple_row_ptr_type = PointerType::get(tuple_row_type, 0); + + Type* this_type = codegen->get_type(HashTable::_s_llvm_class_name); + DCHECK(this_type != NULL); + PointerType* this_ptr_type = PointerType::get(this_type, 0); + + LlvmCodeGen::FnPrototype prototype(codegen, "equals", codegen->get_type(TYPE_BOOLEAN)); + prototype.add_argument(LlvmCodeGen::NamedVariable("this_ptr", this_ptr_type)); + prototype.add_argument(LlvmCodeGen::NamedVariable("row", tuple_row_ptr_type)); + + LLVMContext& context = codegen->context(); + LlvmCodeGen::LlvmBuilder builder(context); + Value* args[2]; + Function* fn = prototype.generate_prototype(&builder, args); + Value* row = args[1]; + + if (!_build_expr_ctxs.empty()) { + BasicBlock* false_block = BasicBlock::Create(context, "false_block", fn); + + for (int i = 0; i < _build_expr_ctxs.size(); ++i) { + BasicBlock* null_block = BasicBlock::Create(context, "null", fn); + BasicBlock* not_null_block = BasicBlock::Create(context, "not_null", fn); + BasicBlock* continue_block = BasicBlock::Create(context, "continue", fn); + + // call GetValue on build_exprs[i] + Function* expr_fn = NULL; + Status status = _build_expr_ctxs[i]->root()->get_codegend_compute_fn(state, &expr_fn); + if (!status.ok()) { + std::stringstream ss; + ss << "Problem with codegen: " << status.get_error_msg(); + // TODO(zc) + // state->LogError(ErrorMsg(TErrorCode::GENERAL, ss.str())); + fn->eraseFromParent(); // deletes function + return NULL; + } + + Value* ctx_arg = codegen->cast_ptr_to_llvm_ptr( + codegen->get_ptr_type(ExprContext::_s_llvm_class_name), _build_expr_ctxs[i]); + Value* expr_fn_args[] = { ctx_arg, row }; + CodegenAnyVal result = CodegenAnyVal::create_call_wrapped( + codegen, &builder, _build_expr_ctxs[i]->root()->type(), + expr_fn, expr_fn_args, "result", NULL); + Value* is_null = result.get_is_null(); + + // Determine if probe is null (i.e. _expr_value_null_bits[i] == true). In + // the case where the hash table does not store nulls, this is always false. + Value* probe_is_null = codegen->false_value(); + uint8_t* null_byte_loc = &_expr_value_null_bits[i]; + if (_stores_nulls) { + Value* llvm_null_byte_loc = + codegen->cast_ptr_to_llvm_ptr(codegen->ptr_type(), null_byte_loc); + Value* null_byte = builder.CreateLoad(llvm_null_byte_loc); + probe_is_null = builder.CreateICmpNE( + null_byte, codegen->get_int_constant(TYPE_TINYINT, 0)); + } + + // Get llvm value for probe_val from '_expr_values_buffer' + void* loc = _expr_values_buffer + _expr_values_buffer_offsets[i]; + Value* probe_val = codegen->cast_ptr_to_llvm_ptr( + codegen->get_ptr_type(_build_expr_ctxs[i]->root()->type()), loc); + + // Branch for GetValue() returning NULL + builder.CreateCondBr(is_null, null_block, not_null_block); + + // Null block + builder.SetInsertPoint(null_block); + builder.CreateCondBr(probe_is_null, continue_block, false_block); + + // Not-null block + builder.SetInsertPoint(not_null_block); + if (_stores_nulls) { + BasicBlock* cmp_block = BasicBlock::Create(context, "cmp", fn); + // First need to compare that probe expr[i] is not null + builder.CreateCondBr(probe_is_null, false_block, cmp_block); + builder.SetInsertPoint(cmp_block); + } + // Check result == probe_val + Value* is_equal = result.eq_to_native_ptr(probe_val); + builder.CreateCondBr(is_equal, continue_block, false_block); + + builder.SetInsertPoint(continue_block); + } + builder.CreateRet(codegen->true_value()); + + builder.SetInsertPoint(false_block); + builder.CreateRet(codegen->false_value()); + } else { + builder.CreateRet(codegen->true_value()); + } + + return codegen->finalize_function(fn); +} + +} diff --git a/be/src/exec/hash_table.h b/be/src/exec/hash_table.h index 9c707f2ba7..1e71424ee4 100644 --- a/be/src/exec/hash_table.h +++ b/be/src/exec/hash_table.h @@ -18,427 +18,427 @@ // specific language governing permissions and limitations // under the License. -#ifndef BDG_PALO_BE_SRC_QUERY_EXEC_HASH_TABLE_H -#define BDG_PALO_BE_SRC_QUERY_EXEC_HASH_TABLE_H - -#include -#include - -#include "codegen/palo_ir.h" -#include "common/logging.h" -#include "util/hash_util.hpp" - -namespace llvm { - -class Function; - -} - -namespace palo { - -class Expr; -class ExprContext; -class LlvmCodeGen; -class RowDescriptor; -class Tuple; -class TupleRow; -class MemTracker; -class RuntimeState; - -using std::vector; - -// Hash table implementation designed for hash aggregation and hash joins. This is not -// templatized and is tailored to the usage pattern for aggregation and joins. The -// hash table store TupleRows and allows for different exprs for insertions and finds. -// This is the pattern we use for joins and aggregation where the input/build tuple -// row descriptor is different from the find/probe descriptor. -// The table is optimized for the query engine's use case as much as possible and is not -// intended to be a generic hash table implementation. The API loosely mimics the -// std::hashset API. -// -// The hash table stores evaluated expr results for the current row being processed -// when possible into a contiguous memory buffer. This allows for very efficient -// computation for hashing. The implementation is also designed to allow codegen -// for some paths. -// -// The hash table does not support removes. The hash table is not thread safe. -// -// The implementation is based on the boost multiset. The hashtable is implemented by -// two data structures: a vector of buckets and a vector of nodes. Inserted values -// are stored as nodes (in the order they are inserted). The buckets (indexed by the -// mod of the hash) contain pointers to the node vector. Nodes that fall in the same -// bucket are linked together (the bucket pointer gets you the head of that linked list). -// When growing the hash table, the number of buckets is doubled, and nodes from a -// particular bucket either stay in place or move to an analogous bucket in the second -// half of buckets. This behavior allows us to avoid moving about half the nodes each -// time, and maintains good cache properties by only accessing 2 buckets at a time. -// The node vector is modified in place. -// Due to the doubling nature of the buckets, we require that the number of buckets is a -// power of 2. This allows us to determine if a node needs to move by simply checking a -// single bit, and further allows us to initially hash nodes using a bitmask. -// -// TODO: this is not a fancy hash table in terms of memory access patterns (cuckoo-hashing -// or something that spills to disk). We will likely want to invest more time into this. -// TODO: hash-join and aggregation have very different access patterns. Joins insert -// all the rows and then calls scan to find them. Aggregation interleaves find() and -// inserts(). We can want to optimize joins more heavily for inserts() (in particular -// growing). -class HashTable { -private: - struct Node; -public: - class Iterator; - - // Create a hash table. - // - build_exprs are the exprs that should be used to evaluate rows during insert(). - // - probe_exprs are used during find() - // - num_build_tuples: number of Tuples in the build tuple row - // - stores_nulls: if false, TupleRows with nulls are ignored during Insert - // - num_buckets: number of buckets that the hash table should be initialized to - // - mem_limits: if non-empty, all memory allocation for nodes and for buckets is - // tracked against those limits; the limits must be valid until the d'tor is called - // - initial_seed: Initial seed value to use when computing hashes for rows - HashTable( - const std::vector& build_exprs, - const std::vector& probe_exprs, - int num_build_tuples, bool stores_nulls, int32_t initial_seed, - MemTracker* mem_tracker, - int64_t num_buckets); - - ~HashTable(); - - // Call to cleanup any resources. Must be called once. - void close(); - - // Insert row into the hash table. Row will be evaluated over _build_expr_ctxs - // This will grow the hash table if necessary - void IR_ALWAYS_INLINE insert(TupleRow* row) { - if (_num_filled_buckets > _num_buckets_till_resize) { - // TODO: next prime instead of double? - resize_buckets(_num_buckets * 2); - } - - insert_impl(row); - } - - // Returns the start iterator for all rows that match 'probe_row'. 'probe_row' is - // evaluated with _probe_expr_ctxs. The iterator can be iterated until HashTable::end() - // to find all the matching rows. - // Only one scan be in progress at any time (i.e. it is not legal to call - // find(), begin iterating through all the matches, call another find(), - // and continuing iterator from the first scan iterator). - // Advancing the returned iterator will go to the next matching row. The matching - // rows are evaluated lazily (i.e. computed as the Iterator is moved). - // Returns HashTable::end() if there is no match. - Iterator IR_ALWAYS_INLINE find(TupleRow* probe_row); - - // Returns number of elements in the hash table - int64_t size() { - return _num_nodes; - } - - // Returns the number of buckets - int64_t num_buckets() { - return _buckets.size(); - } - - // true if any of the MemTrackers was exceeded - bool exceeded_limit() const { - return _exceeded_limit; - } - - // Returns the load factor (the number of non-empty buckets) - float load_factor() { - return _num_filled_buckets / static_cast(_buckets.size()); - } - - // Returns the number of bytes allocated to the hash table - int64_t byte_size() const { - return _node_byte_size * _nodes_capacity + sizeof(Bucket) * _buckets.size(); - } - - // Returns the results of the exprs at 'expr_idx' evaluated over the last row - // processed by the HashTable. - // This value is invalid if the expr evaluated to NULL. - // TODO: this is an awkward abstraction but aggregation node can take advantage of - // it and save some expr evaluation calls. - void* last_expr_value(int expr_idx) const { - return _expr_values_buffer + _expr_values_buffer_offsets[expr_idx]; - } - - // Returns if the expr at 'expr_idx' evaluated to NULL for the last row. - bool last_expr_value_null(int expr_idx) const { - return _expr_value_null_bits[expr_idx]; - } - - // Return beginning of hash table. Advancing this iterator will traverse all - // elements. - Iterator begin(); - - // Returns end marker - Iterator end() { - return Iterator(); - } - - /// Codegen for evaluating a tuple row. Codegen'd function matches the signature - /// for EvalBuildRow and EvalTupleRow. - /// if build_row is true, the codegen uses the build_exprs, otherwise the probe_exprs - llvm::Function* codegen_eval_tuple_row(RuntimeState* state, bool build_row); - - /// Codegen for hashing the expr values in '_expr_values_buffer'. Function - /// prototype matches hash_current_row identically. - llvm::Function* codegen_hash_current_row(RuntimeState* state); - - /// Codegen for evaluating a TupleRow and comparing equality against - /// '_expr_values_buffer'. Function signature matches HashTable::Equals() - llvm::Function* codegen_equals(RuntimeState* state); - - static const char* _s_llvm_class_name; - - // Dump out the entire hash table to string. If skip_empty, empty buckets are - // skipped. If build_desc is non-null, the build rows will be output. Otherwise - // just the build row addresses. - std::string debug_string(bool skip_empty, const RowDescriptor* build_desc); - - // stl-like iterator interface. - class Iterator { - public: - Iterator() : _table(NULL), _bucket_idx(-1), _node_idx(-1) { - } - - // Iterates to the next element. In the case where the iterator was - // from a Find, this will lazily evaluate that bucket, only returning - // TupleRows that match the current scan row. - template - void IR_ALWAYS_INLINE next(); - - // Returns the current row or NULL if at end. - TupleRow* get_row() { - if (_node_idx == -1) { - return NULL; - } - return _table->get_node(_node_idx)->data(); - } - - // Returns if the iterator is at the end - bool has_next() { - return _node_idx != -1; - } - - // Returns true if this iterator is at the end, i.e. get_row() cannot be called. - bool at_end() { - return _node_idx == -1; - } - - // Sets as matched the node currently pointed by the iterator. The iterator - // cannot be AtEnd(). - void set_matched() { - DCHECK(!at_end()); - Node *node = _table->get_node(_node_idx); - node->matched = true; - } - - bool matched() { - DCHECK(!at_end()); - Node *node = _table->get_node(_node_idx); - return node->matched; - } - - bool operator==(const Iterator& rhs) { - return _bucket_idx == rhs._bucket_idx && _node_idx == rhs._node_idx; - } - - bool operator!=(const Iterator& rhs) { - return _bucket_idx != rhs._bucket_idx || _node_idx != rhs._node_idx; - } - - private: - friend class HashTable; - - Iterator(HashTable* table, int bucket_idx, int64_t node, uint32_t hash) : - _table(table), - _bucket_idx(bucket_idx), - _node_idx(node), - _scan_hash(hash) { - } - - HashTable* _table; - // Current bucket idx - int64_t _bucket_idx; - // Current node idx (within current bucket) - int64_t _node_idx; - // cached hash value for the row passed to find()() - uint32_t _scan_hash; - }; - -private: - friend class Iterator; - friend class HashTableTest; - - // Header portion of a Node. The node data (TupleRow) is right after the - // node memory to maximize cache hits. - struct Node { - int64_t _next_idx; // chain to next node for collisions - uint32_t _hash; // Cache of the hash for _data - bool matched; +#ifndef BDG_PALO_BE_SRC_QUERY_EXEC_HASH_TABLE_H +#define BDG_PALO_BE_SRC_QUERY_EXEC_HASH_TABLE_H + +#include +#include + +#include "codegen/palo_ir.h" +#include "common/logging.h" +#include "util/hash_util.hpp" + +namespace llvm { + +class Function; + +} + +namespace palo { + +class Expr; +class ExprContext; +class LlvmCodeGen; +class RowDescriptor; +class Tuple; +class TupleRow; +class MemTracker; +class RuntimeState; + +using std::vector; + +// Hash table implementation designed for hash aggregation and hash joins. This is not +// templatized and is tailored to the usage pattern for aggregation and joins. The +// hash table store TupleRows and allows for different exprs for insertions and finds. +// This is the pattern we use for joins and aggregation where the input/build tuple +// row descriptor is different from the find/probe descriptor. +// The table is optimized for the query engine's use case as much as possible and is not +// intended to be a generic hash table implementation. The API loosely mimics the +// std::hashset API. +// +// The hash table stores evaluated expr results for the current row being processed +// when possible into a contiguous memory buffer. This allows for very efficient +// computation for hashing. The implementation is also designed to allow codegen +// for some paths. +// +// The hash table does not support removes. The hash table is not thread safe. +// +// The implementation is based on the boost multiset. The hashtable is implemented by +// two data structures: a vector of buckets and a vector of nodes. Inserted values +// are stored as nodes (in the order they are inserted). The buckets (indexed by the +// mod of the hash) contain pointers to the node vector. Nodes that fall in the same +// bucket are linked together (the bucket pointer gets you the head of that linked list). +// When growing the hash table, the number of buckets is doubled, and nodes from a +// particular bucket either stay in place or move to an analogous bucket in the second +// half of buckets. This behavior allows us to avoid moving about half the nodes each +// time, and maintains good cache properties by only accessing 2 buckets at a time. +// The node vector is modified in place. +// Due to the doubling nature of the buckets, we require that the number of buckets is a +// power of 2. This allows us to determine if a node needs to move by simply checking a +// single bit, and further allows us to initially hash nodes using a bitmask. +// +// TODO: this is not a fancy hash table in terms of memory access patterns (cuckoo-hashing +// or something that spills to disk). We will likely want to invest more time into this. +// TODO: hash-join and aggregation have very different access patterns. Joins insert +// all the rows and then calls scan to find them. Aggregation interleaves find() and +// inserts(). We can want to optimize joins more heavily for inserts() (in particular +// growing). +class HashTable { +private: + struct Node; +public: + class Iterator; + + // Create a hash table. + // - build_exprs are the exprs that should be used to evaluate rows during insert(). + // - probe_exprs are used during find() + // - num_build_tuples: number of Tuples in the build tuple row + // - stores_nulls: if false, TupleRows with nulls are ignored during Insert + // - num_buckets: number of buckets that the hash table should be initialized to + // - mem_limits: if non-empty, all memory allocation for nodes and for buckets is + // tracked against those limits; the limits must be valid until the d'tor is called + // - initial_seed: Initial seed value to use when computing hashes for rows + HashTable( + const std::vector& build_exprs, + const std::vector& probe_exprs, + int num_build_tuples, bool stores_nulls, int32_t initial_seed, + MemTracker* mem_tracker, + int64_t num_buckets); + + ~HashTable(); + + // Call to cleanup any resources. Must be called once. + void close(); + + // Insert row into the hash table. Row will be evaluated over _build_expr_ctxs + // This will grow the hash table if necessary + void IR_ALWAYS_INLINE insert(TupleRow* row) { + if (_num_filled_buckets > _num_buckets_till_resize) { + // TODO: next prime instead of double? + resize_buckets(_num_buckets * 2); + } + + insert_impl(row); + } + + // Returns the start iterator for all rows that match 'probe_row'. 'probe_row' is + // evaluated with _probe_expr_ctxs. The iterator can be iterated until HashTable::end() + // to find all the matching rows. + // Only one scan be in progress at any time (i.e. it is not legal to call + // find(), begin iterating through all the matches, call another find(), + // and continuing iterator from the first scan iterator). + // Advancing the returned iterator will go to the next matching row. The matching + // rows are evaluated lazily (i.e. computed as the Iterator is moved). + // Returns HashTable::end() if there is no match. + Iterator IR_ALWAYS_INLINE find(TupleRow* probe_row); + + // Returns number of elements in the hash table + int64_t size() { + return _num_nodes; + } + + // Returns the number of buckets + int64_t num_buckets() { + return _buckets.size(); + } + + // true if any of the MemTrackers was exceeded + bool exceeded_limit() const { + return _exceeded_limit; + } + + // Returns the load factor (the number of non-empty buckets) + float load_factor() { + return _num_filled_buckets / static_cast(_buckets.size()); + } + + // Returns the number of bytes allocated to the hash table + int64_t byte_size() const { + return _node_byte_size * _nodes_capacity + sizeof(Bucket) * _buckets.size(); + } + + // Returns the results of the exprs at 'expr_idx' evaluated over the last row + // processed by the HashTable. + // This value is invalid if the expr evaluated to NULL. + // TODO: this is an awkward abstraction but aggregation node can take advantage of + // it and save some expr evaluation calls. + void* last_expr_value(int expr_idx) const { + return _expr_values_buffer + _expr_values_buffer_offsets[expr_idx]; + } + + // Returns if the expr at 'expr_idx' evaluated to NULL for the last row. + bool last_expr_value_null(int expr_idx) const { + return _expr_value_null_bits[expr_idx]; + } + + // Return beginning of hash table. Advancing this iterator will traverse all + // elements. + Iterator begin(); + + // Returns end marker + Iterator end() { + return Iterator(); + } + + /// Codegen for evaluating a tuple row. Codegen'd function matches the signature + /// for EvalBuildRow and EvalTupleRow. + /// if build_row is true, the codegen uses the build_exprs, otherwise the probe_exprs + llvm::Function* codegen_eval_tuple_row(RuntimeState* state, bool build_row); + + /// Codegen for hashing the expr values in '_expr_values_buffer'. Function + /// prototype matches hash_current_row identically. + llvm::Function* codegen_hash_current_row(RuntimeState* state); + + /// Codegen for evaluating a TupleRow and comparing equality against + /// '_expr_values_buffer'. Function signature matches HashTable::Equals() + llvm::Function* codegen_equals(RuntimeState* state); + + static const char* _s_llvm_class_name; + + // Dump out the entire hash table to string. If skip_empty, empty buckets are + // skipped. If build_desc is non-null, the build rows will be output. Otherwise + // just the build row addresses. + std::string debug_string(bool skip_empty, const RowDescriptor* build_desc); + + // stl-like iterator interface. + class Iterator { + public: + Iterator() : _table(NULL), _bucket_idx(-1), _node_idx(-1) { + } + + // Iterates to the next element. In the case where the iterator was + // from a Find, this will lazily evaluate that bucket, only returning + // TupleRows that match the current scan row. + template + void IR_ALWAYS_INLINE next(); + + // Returns the current row or NULL if at end. + TupleRow* get_row() { + if (_node_idx == -1) { + return NULL; + } + return _table->get_node(_node_idx)->data(); + } + + // Returns if the iterator is at the end + bool has_next() { + return _node_idx != -1; + } + + // Returns true if this iterator is at the end, i.e. get_row() cannot be called. + bool at_end() { + return _node_idx == -1; + } + + // Sets as matched the node currently pointed by the iterator. The iterator + // cannot be AtEnd(). + void set_matched() { + DCHECK(!at_end()); + Node *node = _table->get_node(_node_idx); + node->matched = true; + } + + bool matched() { + DCHECK(!at_end()); + Node *node = _table->get_node(_node_idx); + return node->matched; + } + + bool operator==(const Iterator& rhs) { + return _bucket_idx == rhs._bucket_idx && _node_idx == rhs._node_idx; + } + + bool operator!=(const Iterator& rhs) { + return _bucket_idx != rhs._bucket_idx || _node_idx != rhs._node_idx; + } + + private: + friend class HashTable; + + Iterator(HashTable* table, int bucket_idx, int64_t node, uint32_t hash) : + _table(table), + _bucket_idx(bucket_idx), + _node_idx(node), + _scan_hash(hash) { + } + + HashTable* _table; + // Current bucket idx + int64_t _bucket_idx; + // Current node idx (within current bucket) + int64_t _node_idx; + // cached hash value for the row passed to find()() + uint32_t _scan_hash; + }; + +private: + friend class Iterator; + friend class HashTableTest; + + // Header portion of a Node. The node data (TupleRow) is right after the + // node memory to maximize cache hits. + struct Node { + int64_t _next_idx; // chain to next node for collisions + uint32_t _hash; // Cache of the hash for _data + bool matched; Node():_next_idx(-1), _hash(-1), matched(false) { } - TupleRow* data() { - uint8_t* mem = reinterpret_cast(this); - DCHECK_EQ(reinterpret_cast(mem) % 8, 0); - return reinterpret_cast(mem + sizeof(Node)); - } - }; - - struct Bucket { - int64_t _node_idx; - - Bucket() { - _node_idx = -1; - } - }; - - // Returns the next non-empty bucket and updates idx to be the index of that bucket. - // If there are no more buckets, returns NULL and sets idx to -1 - Bucket* next_bucket(int64_t* bucket_idx); - - // Returns node at idx. Tracking structures do not use pointers since they will - // change as the HashTable grows. - Node* get_node(int64_t idx) { - DCHECK_NE(idx, -1); - return reinterpret_cast(_nodes + _node_byte_size * idx); - } - - // Resize the hash table to 'num_buckets' - void resize_buckets(int64_t num_buckets); - - // Insert row into the hash table - void IR_ALWAYS_INLINE insert_impl(TupleRow* row); - - // Chains the node at 'node_idx' to 'bucket'. Nodes in a bucket are chained - // as a linked list; this places the new node at the beginning of the list. - void add_to_bucket(Bucket* bucket, int64_t node_idx, Node* node); - - // Moves a node from one bucket to another. 'previous_node' refers to the - // node (if any) that's chained before this node in from_bucket's linked list. - void move_node(Bucket* from_bucket, Bucket* to_bucket, int64_t node_idx, Node* node, - Node* previous_node); - - // Evaluate the exprs over row and cache the results in '_expr_values_buffer'. - // Returns whether any expr evaluated to NULL - // This will be replaced by codegen - bool eval_row(TupleRow* row, const std::vector& exprs); - - // Evaluate 'row' over _build_expr_ctxs caching the results in '_expr_values_buffer' - // This will be replaced by codegen. We do not want this function inlined when - // cross compiled because we need to be able to differentiate between EvalBuildRow - // and EvalProbeRow by name and the _build_expr_ctxs/_probe_expr_ctxs are baked into - // the codegen'd function. - bool IR_NO_INLINE eval_build_row(TupleRow* row) { - return eval_row(row, _build_expr_ctxs); - } - - // Evaluate 'row' over _probe_expr_ctxs caching the results in '_expr_values_buffer' - // This will be replaced by codegen. - bool IR_NO_INLINE eval_probe_row(TupleRow* row) { - return eval_row(row, _probe_expr_ctxs); - } - - // Compute the hash of the values in _expr_values_buffer. - // This will be replaced by codegen. We don't want this inlined for replacing - // with codegen'd functions so the function name does not change. - uint32_t IR_NO_INLINE hash_current_row() { - if (_var_result_begin == -1) { - // This handles NULLs implicitly since a constant seed value was put - // into results buffer for nulls. - return HashUtil::hash(_expr_values_buffer, _results_buffer_size, _initial_seed); - } else { - return hash_variable_len_row(); - } - } - - // Compute the hash of the values in _expr_values_buffer for rows with variable length - // fields (e.g. strings) - uint32_t hash_variable_len_row(); - - // Returns true if the values of build_exprs evaluated over 'build_row' equal - // the values cached in _expr_values_buffer - // This will be replaced by codegen. - bool equals(TupleRow* build_row); - - // Grow the node array. - void grow_node_array(); - - // Sets _mem_tracker_exceeded to true and MEM_LIMIT_EXCEEDED for the query. - // allocation_size is the attempted size of the allocation that would have - // brought us over the mem limit. - void mem_limit_exceeded(int64_t allocation_size); - - // Load factor that will trigger growing the hash table on insert. This is - // defined as the number of non-empty buckets / total_buckets - static const float MAX_BUCKET_OCCUPANCY_FRACTION; - - const std::vector& _build_expr_ctxs; - const std::vector& _probe_expr_ctxs; - - // Number of Tuple* in the build tuple row - const int _num_build_tuples; - const bool _stores_nulls; - - const int32_t _initial_seed; - - // Size of hash table nodes. This includes a fixed size header and the Tuple*'s that - // follow. - const int _node_byte_size; - // Number of non-empty buckets. Used to determine when to grow and rehash - int64_t _num_filled_buckets; - // Memory to store node data. This is not allocated from a pool to take advantage - // of realloc. - // TODO: integrate with mem pools - uint8_t* _nodes; - // number of nodes stored (i.e. size of hash table) - int64_t _num_nodes; - // max number of nodes that can be stored in '_nodes' before realloc - int64_t _nodes_capacity; - - bool _exceeded_limit; // true if any of _mem_trackers[].limit_exceeded() - - MemTracker* _mem_tracker; - // Set to true if the hash table exceeds the memory limit. If this is set, - // subsequent calls to Insert() will be ignored. - bool _mem_limit_exceeded; - - std::vector _buckets; - - // equal to _buckets.size() but more efficient than the size function - int64_t _num_buckets; - - // The number of filled buckets to trigger a resize. This is cached for efficiency - int64_t _num_buckets_till_resize; - - // Cache of exprs values for the current row being evaluated. This can either - // be a build row (during insert()) or probe row (during find()). - std::vector _expr_values_buffer_offsets; - - // byte offset into _expr_values_buffer that begins the variable length results - int _var_result_begin; - - // byte size of '_expr_values_buffer' - int _results_buffer_size; - - // buffer to store evaluated expr results. This address must not change once - // allocated since the address is baked into the codegen - uint8_t* _expr_values_buffer; - - // Use bytes instead of bools to be compatible with llvm. This address must - // not change once allocated. - uint8_t* _expr_value_null_bits; -}; - -} - -#endif + TupleRow* data() { + uint8_t* mem = reinterpret_cast(this); + DCHECK_EQ(reinterpret_cast(mem) % 8, 0); + return reinterpret_cast(mem + sizeof(Node)); + } + }; + + struct Bucket { + int64_t _node_idx; + + Bucket() { + _node_idx = -1; + } + }; + + // Returns the next non-empty bucket and updates idx to be the index of that bucket. + // If there are no more buckets, returns NULL and sets idx to -1 + Bucket* next_bucket(int64_t* bucket_idx); + + // Returns node at idx. Tracking structures do not use pointers since they will + // change as the HashTable grows. + Node* get_node(int64_t idx) { + DCHECK_NE(idx, -1); + return reinterpret_cast(_nodes + _node_byte_size * idx); + } + + // Resize the hash table to 'num_buckets' + void resize_buckets(int64_t num_buckets); + + // Insert row into the hash table + void IR_ALWAYS_INLINE insert_impl(TupleRow* row); + + // Chains the node at 'node_idx' to 'bucket'. Nodes in a bucket are chained + // as a linked list; this places the new node at the beginning of the list. + void add_to_bucket(Bucket* bucket, int64_t node_idx, Node* node); + + // Moves a node from one bucket to another. 'previous_node' refers to the + // node (if any) that's chained before this node in from_bucket's linked list. + void move_node(Bucket* from_bucket, Bucket* to_bucket, int64_t node_idx, Node* node, + Node* previous_node); + + // Evaluate the exprs over row and cache the results in '_expr_values_buffer'. + // Returns whether any expr evaluated to NULL + // This will be replaced by codegen + bool eval_row(TupleRow* row, const std::vector& exprs); + + // Evaluate 'row' over _build_expr_ctxs caching the results in '_expr_values_buffer' + // This will be replaced by codegen. We do not want this function inlined when + // cross compiled because we need to be able to differentiate between EvalBuildRow + // and EvalProbeRow by name and the _build_expr_ctxs/_probe_expr_ctxs are baked into + // the codegen'd function. + bool IR_NO_INLINE eval_build_row(TupleRow* row) { + return eval_row(row, _build_expr_ctxs); + } + + // Evaluate 'row' over _probe_expr_ctxs caching the results in '_expr_values_buffer' + // This will be replaced by codegen. + bool IR_NO_INLINE eval_probe_row(TupleRow* row) { + return eval_row(row, _probe_expr_ctxs); + } + + // Compute the hash of the values in _expr_values_buffer. + // This will be replaced by codegen. We don't want this inlined for replacing + // with codegen'd functions so the function name does not change. + uint32_t IR_NO_INLINE hash_current_row() { + if (_var_result_begin == -1) { + // This handles NULLs implicitly since a constant seed value was put + // into results buffer for nulls. + return HashUtil::hash(_expr_values_buffer, _results_buffer_size, _initial_seed); + } else { + return hash_variable_len_row(); + } + } + + // Compute the hash of the values in _expr_values_buffer for rows with variable length + // fields (e.g. strings) + uint32_t hash_variable_len_row(); + + // Returns true if the values of build_exprs evaluated over 'build_row' equal + // the values cached in _expr_values_buffer + // This will be replaced by codegen. + bool equals(TupleRow* build_row); + + // Grow the node array. + void grow_node_array(); + + // Sets _mem_tracker_exceeded to true and MEM_LIMIT_EXCEEDED for the query. + // allocation_size is the attempted size of the allocation that would have + // brought us over the mem limit. + void mem_limit_exceeded(int64_t allocation_size); + + // Load factor that will trigger growing the hash table on insert. This is + // defined as the number of non-empty buckets / total_buckets + static const float MAX_BUCKET_OCCUPANCY_FRACTION; + + const std::vector& _build_expr_ctxs; + const std::vector& _probe_expr_ctxs; + + // Number of Tuple* in the build tuple row + const int _num_build_tuples; + const bool _stores_nulls; + + const int32_t _initial_seed; + + // Size of hash table nodes. This includes a fixed size header and the Tuple*'s that + // follow. + const int _node_byte_size; + // Number of non-empty buckets. Used to determine when to grow and rehash + int64_t _num_filled_buckets; + // Memory to store node data. This is not allocated from a pool to take advantage + // of realloc. + // TODO: integrate with mem pools + uint8_t* _nodes; + // number of nodes stored (i.e. size of hash table) + int64_t _num_nodes; + // max number of nodes that can be stored in '_nodes' before realloc + int64_t _nodes_capacity; + + bool _exceeded_limit; // true if any of _mem_trackers[].limit_exceeded() + + MemTracker* _mem_tracker; + // Set to true if the hash table exceeds the memory limit. If this is set, + // subsequent calls to Insert() will be ignored. + bool _mem_limit_exceeded; + + std::vector _buckets; + + // equal to _buckets.size() but more efficient than the size function + int64_t _num_buckets; + + // The number of filled buckets to trigger a resize. This is cached for efficiency + int64_t _num_buckets_till_resize; + + // Cache of exprs values for the current row being evaluated. This can either + // be a build row (during insert()) or probe row (during find()). + std::vector _expr_values_buffer_offsets; + + // byte offset into _expr_values_buffer that begins the variable length results + int _var_result_begin; + + // byte size of '_expr_values_buffer' + int _results_buffer_size; + + // buffer to store evaluated expr results. This address must not change once + // allocated since the address is baked into the codegen + uint8_t* _expr_values_buffer; + + // Use bytes instead of bools to be compatible with llvm. This address must + // not change once allocated. + uint8_t* _expr_value_null_bits; +}; + +} + +#endif diff --git a/be/src/exec/hash_table.hpp b/be/src/exec/hash_table.hpp index d4236fdb10..638a304d16 100644 --- a/be/src/exec/hash_table.hpp +++ b/be/src/exec/hash_table.hpp @@ -18,159 +18,159 @@ // specific language governing permissions and limitations // under the License. -#ifndef BDG_PALO_BE_SRC_QUERY_EXEC_HASH_TABLE_HPP -#define BDG_PALO_BE_SRC_QUERY_EXEC_HASH_TABLE_HPP - -#include "exec/hash_table.h" - -namespace palo { - -inline HashTable::Iterator HashTable::find(TupleRow* probe_row) { - bool has_nulls = eval_probe_row(probe_row); - - if (!_stores_nulls && has_nulls) { - return end(); - } - - uint32_t hash = hash_current_row(); - int64_t bucket_idx = hash & (_num_buckets - 1); - - Bucket* bucket = &_buckets[bucket_idx]; - int64_t node_idx = bucket->_node_idx; - - while (node_idx != -1) { - Node* node = get_node(node_idx); - - if (node->_hash == hash && equals(node->data())) { - return Iterator(this, bucket_idx, node_idx, hash); - } - - node_idx = node->_next_idx; - } - - return end(); -} - -inline HashTable::Iterator HashTable::begin() { - int64_t bucket_idx = -1; - Bucket* bucket = next_bucket(&bucket_idx); - - if (bucket != NULL) { - return Iterator(this, bucket_idx, bucket->_node_idx, 0); - } - - return end(); -} - -inline HashTable::Bucket* HashTable::next_bucket(int64_t* bucket_idx) { - ++*bucket_idx; - - for (; *bucket_idx < _num_buckets; ++*bucket_idx) { - if (_buckets[*bucket_idx]._node_idx != -1) { - return &_buckets[*bucket_idx]; - } - } - - *bucket_idx = -1; - return NULL; -} - -inline void HashTable::insert_impl(TupleRow* row) { - bool has_null = eval_build_row(row); - - if (!_stores_nulls && has_null) { - return; - } - - uint32_t hash = hash_current_row(); - int64_t bucket_idx = hash & (_num_buckets - 1); - - if (_num_nodes == _nodes_capacity) { - grow_node_array(); - } - - Node* node = get_node(_num_nodes); - TupleRow* data = node->data(); - node->_hash = hash; - memcpy(data, row, sizeof(Tuple*) * _num_build_tuples); - add_to_bucket(&_buckets[bucket_idx], _num_nodes, node); - ++_num_nodes; -} - -inline void HashTable::add_to_bucket(Bucket* bucket, int64_t node_idx, Node* node) { - if (bucket->_node_idx == -1) { - ++_num_filled_buckets; - } - - node->_next_idx = bucket->_node_idx; - bucket->_node_idx = node_idx; -} - -inline void HashTable::move_node(Bucket* from_bucket, Bucket* to_bucket, - int64_t node_idx, Node* node, Node* previous_node) { - int64_t next_idx = node->_next_idx; - - if (previous_node != NULL) { - previous_node->_next_idx = next_idx; - } else { - // Update bucket directly - from_bucket->_node_idx = next_idx; - - if (next_idx == -1) { - --_num_filled_buckets; - } - } - - add_to_bucket(to_bucket, node_idx, node); -} - -template -inline void HashTable::Iterator::next() { - if (_bucket_idx == -1) { - return; - } - - // TODO: this should prefetch the next tuplerow - Node* node = _table->get_node(_node_idx); - - // Iterator is not from a full table scan, evaluate equality now. Only the current - // bucket needs to be scanned. '_expr_values_buffer' contains the results - // for the current probe row. - if (check_match) { - // TODO: this should prefetch the next node - int64_t next_idx = node->_next_idx; - - while (next_idx != -1) { - node = _table->get_node(next_idx); - - if (node->_hash == _scan_hash && _table->equals(node->data())) { - _node_idx = next_idx; - return; - } - - next_idx = node->_next_idx; - } - - *this = _table->end(); - } else { - // Move onto the next chained node - if (node->_next_idx != -1) { - _node_idx = node->_next_idx; - return; - } - - // Move onto the next bucket - Bucket* bucket = _table->next_bucket(&_bucket_idx); - - if (bucket == NULL) { - _bucket_idx = -1; - _node_idx = -1; - } else { - _node_idx = bucket->_node_idx; - } - } -} - -} - -#endif +#ifndef BDG_PALO_BE_SRC_QUERY_EXEC_HASH_TABLE_HPP +#define BDG_PALO_BE_SRC_QUERY_EXEC_HASH_TABLE_HPP + +#include "exec/hash_table.h" + +namespace palo { + +inline HashTable::Iterator HashTable::find(TupleRow* probe_row) { + bool has_nulls = eval_probe_row(probe_row); + + if (!_stores_nulls && has_nulls) { + return end(); + } + + uint32_t hash = hash_current_row(); + int64_t bucket_idx = hash & (_num_buckets - 1); + + Bucket* bucket = &_buckets[bucket_idx]; + int64_t node_idx = bucket->_node_idx; + + while (node_idx != -1) { + Node* node = get_node(node_idx); + + if (node->_hash == hash && equals(node->data())) { + return Iterator(this, bucket_idx, node_idx, hash); + } + + node_idx = node->_next_idx; + } + + return end(); +} + +inline HashTable::Iterator HashTable::begin() { + int64_t bucket_idx = -1; + Bucket* bucket = next_bucket(&bucket_idx); + + if (bucket != NULL) { + return Iterator(this, bucket_idx, bucket->_node_idx, 0); + } + + return end(); +} + +inline HashTable::Bucket* HashTable::next_bucket(int64_t* bucket_idx) { + ++*bucket_idx; + + for (; *bucket_idx < _num_buckets; ++*bucket_idx) { + if (_buckets[*bucket_idx]._node_idx != -1) { + return &_buckets[*bucket_idx]; + } + } + + *bucket_idx = -1; + return NULL; +} + +inline void HashTable::insert_impl(TupleRow* row) { + bool has_null = eval_build_row(row); + + if (!_stores_nulls && has_null) { + return; + } + + uint32_t hash = hash_current_row(); + int64_t bucket_idx = hash & (_num_buckets - 1); + + if (_num_nodes == _nodes_capacity) { + grow_node_array(); + } + + Node* node = get_node(_num_nodes); + TupleRow* data = node->data(); + node->_hash = hash; + memcpy(data, row, sizeof(Tuple*) * _num_build_tuples); + add_to_bucket(&_buckets[bucket_idx], _num_nodes, node); + ++_num_nodes; +} + +inline void HashTable::add_to_bucket(Bucket* bucket, int64_t node_idx, Node* node) { + if (bucket->_node_idx == -1) { + ++_num_filled_buckets; + } + + node->_next_idx = bucket->_node_idx; + bucket->_node_idx = node_idx; +} + +inline void HashTable::move_node(Bucket* from_bucket, Bucket* to_bucket, + int64_t node_idx, Node* node, Node* previous_node) { + int64_t next_idx = node->_next_idx; + + if (previous_node != NULL) { + previous_node->_next_idx = next_idx; + } else { + // Update bucket directly + from_bucket->_node_idx = next_idx; + + if (next_idx == -1) { + --_num_filled_buckets; + } + } + + add_to_bucket(to_bucket, node_idx, node); +} + +template +inline void HashTable::Iterator::next() { + if (_bucket_idx == -1) { + return; + } + + // TODO: this should prefetch the next tuplerow + Node* node = _table->get_node(_node_idx); + + // Iterator is not from a full table scan, evaluate equality now. Only the current + // bucket needs to be scanned. '_expr_values_buffer' contains the results + // for the current probe row. + if (check_match) { + // TODO: this should prefetch the next node + int64_t next_idx = node->_next_idx; + + while (next_idx != -1) { + node = _table->get_node(next_idx); + + if (node->_hash == _scan_hash && _table->equals(node->data())) { + _node_idx = next_idx; + return; + } + + next_idx = node->_next_idx; + } + + *this = _table->end(); + } else { + // Move onto the next chained node + if (node->_next_idx != -1) { + _node_idx = node->_next_idx; + return; + } + + // Move onto the next bucket + Bucket* bucket = _table->next_bucket(&_bucket_idx); + + if (bucket == NULL) { + _bucket_idx = -1; + _node_idx = -1; + } else { + _node_idx = bucket->_node_idx; + } + } +} + +} + +#endif diff --git a/be/src/exec/olap_common.h b/be/src/exec/olap_common.h index 9a4091d9d0..c5f9e12bcb 100644 --- a/be/src/exec/olap_common.h +++ b/be/src/exec/olap_common.h @@ -30,6 +30,8 @@ #include "runtime/string_value.hpp" #include "runtime/datetime_value.h" +#include "olap/tuple.h" + namespace palo { template @@ -86,6 +88,10 @@ public: return _low_value; } + bool is_low_value_mininum() const { + return _low_value == _type_min; + } + bool is_begin_include() const { return _low_op == FILTER_LARGER_OR_EQUAL; } @@ -193,8 +199,8 @@ public: for (int i = 0; i < _begin_scan_keys.size(); ++i) { VLOG(1) << "ScanKey=" << (_begin_include ? "[" : "(") - << to_print_key(_begin_scan_keys[i]) << " : " - << to_print_key(_end_scan_keys[i]) + << _begin_scan_keys[i] << " : " + << _end_scan_keys[i] << (_end_include ? "]" : ")"); } } @@ -224,23 +230,9 @@ public: _is_convertible = is_convertible; } - static std::string to_print_key(const std::vector& key_vec) { - std::string print_key; - - for (std::string key : key_vec) { - print_key += key; - print_key += ","; - } - - if (!print_key.empty()) { - print_key.pop_back(); - } - - return print_key; - } private: - std::vector> _begin_scan_keys; - std::vector> _end_scan_keys; + std::vector _begin_scan_keys; + std::vector _end_scan_keys; bool _has_range_value; bool _begin_include; bool _end_include; @@ -701,9 +693,9 @@ Status OlapScanKeys::extend_scan_key(ColumnValueRange& range) { for (; iter != fixed_value_set.end(); ++iter) { _begin_scan_keys.emplace_back(); - _begin_scan_keys.back().push_back(cast_to_string(*iter)); + _begin_scan_keys.back().add_value(cast_to_string(*iter)); _end_scan_keys.emplace_back(); - _end_scan_keys.back().push_back(cast_to_string(*iter)); + _end_scan_keys.back().add_value(cast_to_string(*iter)); } } // 3.1.2 produces the Cartesian product of ScanKey and fixed_value else { @@ -711,22 +703,22 @@ Status OlapScanKeys::extend_scan_key(ColumnValueRange& range) { int original_key_range_size = _begin_scan_keys.size(); for (int i = 0; i < original_key_range_size; ++i) { - vector start_base_key_range = _begin_scan_keys[i]; - vector end_base_key_range = _end_scan_keys[i]; + OlapTuple start_base_key_range = _begin_scan_keys[i]; + OlapTuple end_base_key_range = _end_scan_keys[i]; const_iterator_type iter = fixed_value_set.begin(); for (; iter != fixed_value_set.end(); ++iter) { // alter the first ScanKey in original place if (iter == fixed_value_set.begin()) { - _begin_scan_keys[i].push_back(cast_to_string(*iter)); - _end_scan_keys[i].push_back(cast_to_string(*iter)); + _begin_scan_keys[i].add_value(cast_to_string(*iter)); + _end_scan_keys[i].add_value(cast_to_string(*iter)); } // append follow ScanKey else { _begin_scan_keys.push_back(start_base_key_range); - _begin_scan_keys.back().push_back(cast_to_string(*iter)); + _begin_scan_keys.back().add_value(cast_to_string(*iter)); _end_scan_keys.push_back(end_base_key_range); - _end_scan_keys.back().push_back(cast_to_string(*iter)); + _end_scan_keys.back().add_value(cast_to_string(*iter)); } } } @@ -740,19 +732,21 @@ Status OlapScanKeys::extend_scan_key(ColumnValueRange& range) { if (_begin_scan_keys.empty()) { _begin_scan_keys.emplace_back(); - _begin_scan_keys.back().push_back( - cast_to_string(range.get_range_min_value())); + _begin_scan_keys.back().add_value( + cast_to_string(range.get_range_min_value()), + range.is_low_value_mininum()); _end_scan_keys.emplace_back(); - _end_scan_keys.back().push_back( + _end_scan_keys.back().add_value( cast_to_string(range.get_range_max_value())); } else { for (int i = 0; i < _begin_scan_keys.size(); ++i) { - _begin_scan_keys[i].push_back( - cast_to_string(range.get_range_min_value())); + _begin_scan_keys[i].add_value( + cast_to_string(range.get_range_min_value()), + range.is_low_value_mininum()); } for (int i = 0; i < _end_scan_keys.size(); ++i) { - _end_scan_keys[i].push_back( + _end_scan_keys[i].add_value( cast_to_string(range.get_range_max_value())); } } diff --git a/be/src/exec/olap_meta_reader.cpp b/be/src/exec/olap_meta_reader.cpp index 88106b138d..75ade22ed4 100644 --- a/be/src/exec/olap_meta_reader.cpp +++ b/be/src/exec/olap_meta_reader.cpp @@ -25,75 +25,71 @@ #include "olap_scanner.h" #include "olap_scan_node.h" #include "olap_utils.h" -#include "olap/olap_reader.h" +#include "olap/olap_table.h" #include "runtime/descriptors.h" #include "runtime/runtime_state.h" #include "runtime/mem_pool.h" namespace palo { -EngineMetaReader::EngineMetaReader( - boost::shared_ptr scan_range) : - _scan_range(scan_range) { -} - -EngineMetaReader::~EngineMetaReader() { -} - -Status EngineMetaReader::close() { - return Status::OK; -} - -Status EngineMetaReader::open() { - return Status::OK; -} - Status EngineMetaReader::get_hints( - int block_row_count, - bool is_begin_include, - bool is_end_include, - std::vector& scan_key_range, - std::vector* sub_scan_range, - RuntimeProfile* profile) { - TShowHintsRequest show_hints_request; - show_hints_request.__set_tablet_id(_scan_range->scan_range().tablet_id); - show_hints_request.__set_schema_hash( - strtoul(_scan_range->scan_range().schema_hash.c_str(), NULL, 10)); - show_hints_request.__set_block_row_count(block_row_count); - show_hints_request.__set_end_range("lt"); - - for (auto key_range : scan_key_range) { - if (key_range.begin_scan_range.size() == 1 - && key_range.begin_scan_range[0] == NEGATIVE_INFINITY) { - continue; - } - TFetchEndKey end_key; - TFetchStartKey start_key; - - for (auto key : key_range.begin_scan_range) { - start_key.key.push_back(key); - } - for (auto key : key_range.end_scan_range) { - end_key.key.push_back(key); - } - - show_hints_request.start_key.push_back(start_key); - show_hints_request.end_key.push_back(end_key); + boost::shared_ptr scan_range, + int block_row_count, + bool is_begin_include, + bool is_end_include, + std::vector& scan_key_range, + std::vector* sub_scan_range, + RuntimeProfile* profile) { + auto tablet_id = scan_range->scan_range().tablet_id; + int32_t schema_hash = strtoul(scan_range->scan_range().schema_hash.c_str(), NULL, 10); + auto table = OLAPEngine::get_instance()->get_table(tablet_id, schema_hash); + if (table.get() == NULL) { + LOG(WARNING) << "tablet does not exist. tablet_id=" << tablet_id << ", schema_hash=" + << schema_hash; + std::stringstream ss; + ss << "tablet does not exist: " << tablet_id; + return Status(ss.str()); } - std::vector>> ranges; + RuntimeProfile::Counter* show_hints_timer = profile->get_counter("ShowHintsTime"); + std::vector> ranges; + bool have_valid_range = false; + for (auto& key_range : scan_key_range) { + if (key_range.begin_scan_range.size() == 1 + && key_range.begin_scan_range.get_value(0) == NEGATIVE_INFINITY) { + continue; + } + SCOPED_TIMER(show_hints_timer); + + OLAPStatus res = OLAP_SUCCESS; + std::vector range; + res = table->split_range(key_range.begin_scan_range, + key_range.end_scan_range, + block_row_count, &range); + if (res != OLAP_SUCCESS) { + OLAP_LOG_WARNING("fail to show hints by split range. [res=%d]", res); + return Status("fail to show hints"); + } + ranges.emplace_back(std::move(range)); + have_valid_range = true; + } - if (!OLAPShowHints::show_hints(show_hints_request, &ranges, profile).ok()) { - LOG(WARNING) << "Failed to show_hints."; - return Status("Show hints execute fail."); + if (!have_valid_range) { + std::vector range; + auto res = table->split_range({}, {}, block_row_count, &range); + if (res != OLAP_SUCCESS) { + OLAP_LOG_WARNING("fail to show hints by split range. [res=%d]", res); + return Status("fail to show hints"); + } + ranges.emplace_back(std::move(range)); } for (int i = 0; i < ranges.size(); ++i) { for (int j = 0; j < ranges[i].size(); j += 2) { OlapScanRange range; - range.begin_scan_range.clear(); + range.begin_scan_range.reset(); range.begin_scan_range = ranges[i][j]; - range.end_scan_range.clear(); + range.end_scan_range.reset(); range.end_scan_range = ranges[i][j + 1]; if (0 == j) { diff --git a/be/src/exec/olap_meta_reader.h b/be/src/exec/olap_meta_reader.h index 207edf1493..988c3d82e2 100644 --- a/be/src/exec/olap_meta_reader.h +++ b/be/src/exec/olap_meta_reader.h @@ -38,42 +38,14 @@ class RuntimeProfile; */ class EngineMetaReader { public: - EngineMetaReader( - boost::shared_ptr scan_range); - - ~EngineMetaReader(); - - /** - * @brief ´ò¿ªreader£¬readerµÄʵÏÖ¿ÉÒÔÔÚÕâ¸ö½Ó¿ÚÖÐÍê³É³õʼ»¯¡¢ - * ½¨Á¢Á´½Ó¡¢·¢ËÍÇëÇóµÈ²Ù×÷. - * - * @author Hu Jie - * @date 2013/8/30 - * - * @return ³É¹¦·µ»ØE_OK£¬´íÎ󷵻ظºÖµ. - */ - Status open(); - - /** - * @brief - * - * @author Hu Jie - * @date 2013/8/30 - * - * @return ³É¹¦·µ»ØE_OK£¬´íÎ󷵻ظºÖµ. - */ - Status get_hints( + static Status get_hints( + boost::shared_ptr scan_range, int block_row_count, bool is_begin_include, bool is_end_include, std::vector& scan_key_range, std::vector* sub_scan_range, RuntimeProfile* profile); - - Status close(); - -private: - boost::shared_ptr _scan_range; }; } // namespace palo diff --git a/be/src/exec/olap_scan_node.cpp b/be/src/exec/olap_scan_node.cpp index 155f9a5b5a..9f0b72159d 100644 --- a/be/src/exec/olap_scan_node.cpp +++ b/be/src/exec/olap_scan_node.cpp @@ -38,7 +38,6 @@ #include "util/debug_util.h" #include "agent/cgroups_mgr.h" #include "common/resource_tls.h" -#include "olap/olap_reader.h" #include using llvm::Function; @@ -573,8 +572,8 @@ Status OlapScanNode::split_scan_range() { for (auto sub_range : sub_ranges) { VLOG(1) << "SubScanKey=" << (sub_range.begin_include ? "[" : "(") - << OlapScanKeys::to_print_key(sub_range.begin_scan_range) - << " : " << OlapScanKeys::to_print_key(sub_range.end_scan_range) << + << sub_range.begin_scan_range + << " : " << sub_range.end_scan_range << (sub_range.end_include ? "]" : ")"); _query_key_ranges.push_back(sub_range); _query_scan_ranges.push_back(scan_range); @@ -957,8 +956,8 @@ bool OlapScanNode::select_scan_range(boost::shared_ptr scan_range } Status OlapScanNode::get_sub_scan_range( - boost::shared_ptr scan_range, - std::vector* sub_range) { + boost::shared_ptr scan_range, + std::vector* sub_range) { std::vector scan_key_range; RETURN_IF_ERROR(_scan_keys.get_key_range(&scan_key_range)); @@ -970,10 +969,8 @@ Status OlapScanNode::get_sub_scan_range( sub_range->resize(1); } } else { - EngineMetaReader olap_meta_reader(scan_range); - RETURN_IF_ERROR(olap_meta_reader.open()); - - if (!olap_meta_reader.get_hints( + if (!EngineMetaReader::get_hints( + scan_range, config::palo_scan_range_row_count, _scan_keys.begin_include(), _scan_keys.end_include(), @@ -986,8 +983,6 @@ Status OlapScanNode::get_sub_scan_range( sub_range->resize(1); } } - - RETURN_IF_ERROR(olap_meta_reader.close()); } return Status::OK; diff --git a/be/src/exec/olap_scanner.cpp b/be/src/exec/olap_scanner.cpp index a42b43f261..54035b45be 100644 --- a/be/src/exec/olap_scanner.cpp +++ b/be/src/exec/olap_scanner.cpp @@ -20,7 +20,6 @@ #include "olap_scanner.h" #include "olap_scan_node.h" #include "olap_utils.h" -#include "olap/olap_reader.h" #include "olap/field.h" #include "service/backend_options.h" #include "runtime/descriptors.h" @@ -120,7 +119,10 @@ Status OlapScanner::open() { auto res = _reader->init(_params); if (res != OLAP_SUCCESS) { OLAP_LOG_WARNING("fail to init reader.[res=%d]", res); - return Status("failed to initialize storage reader"); + std::stringstream ss; + ss << "failed to initialize storage reader. tablet=" << _params.olap_table->full_name() + << ", res=" << res << ", backend=" << BackendOptions::get_localhost(); + return Status(ss.str().c_str()); } return Status::OK; } @@ -146,24 +148,15 @@ Status OlapScanner::_init_params( // Range for (auto& key_range : key_ranges) { if (key_range.begin_scan_range.size() == 1 && - key_range.begin_scan_range[0] == NEGATIVE_INFINITY) { + key_range.begin_scan_range.get_value(0) == NEGATIVE_INFINITY) { continue; } _params.range = (key_range.begin_include ? "ge" : "gt"); _params.end_range = (key_range.end_include ? "le" : "lt"); - TFetchStartKey start_key; - for (auto key : key_range.begin_scan_range) { - start_key.key.push_back(key); - } - _params.start_key.push_back(start_key); - - TFetchEndKey end_key; - for (auto key : key_range.end_scan_range) { - end_key.key.push_back(key); - } - _params.end_key.push_back(end_key); + _params.start_key.push_back(key_range.begin_scan_range); + _params.end_key.push_back(key_range.end_scan_range); } // TODO(zc) diff --git a/be/src/exec/olap_utils.h b/be/src/exec/olap_utils.h index 4c419a7a1e..ec8cad741e 100644 --- a/be/src/exec/olap_utils.h +++ b/be/src/exec/olap_utils.h @@ -22,6 +22,7 @@ #include "gen_cpp/Opcodes_types.h" #include "runtime/primitive_type.h" #include "runtime/datetime_value.h" +#include "olap/tuple.h" namespace palo { @@ -80,8 +81,8 @@ static const char* POSITIVE_INFINITY = "+oo"; typedef struct OlapScanRange { public: OlapScanRange() : begin_include(true), end_include(true) { - begin_scan_range.push_back(NEGATIVE_INFINITY); - end_scan_range.push_back(POSITIVE_INFINITY); + begin_scan_range.add_value(NEGATIVE_INFINITY); + end_scan_range.add_value(POSITIVE_INFINITY); } OlapScanRange( bool begin, @@ -93,8 +94,8 @@ public: bool begin_include; bool end_include; - std::vector begin_scan_range; - std::vector end_scan_range; + OlapTuple begin_scan_range; + OlapTuple end_scan_range; } OlapScanRange; static char encoding_table[] = { diff --git a/be/src/http/action/mini_load.cpp b/be/src/http/action/mini_load.cpp index 52fce714a7..a309011745 100644 --- a/be/src/http/action/mini_load.cpp +++ b/be/src/http/action/mini_load.cpp @@ -453,6 +453,12 @@ Status MiniLoadAction::generate_check_load_req( gettimeofday(&tv, NULL); check_load_req->__set_timestamp(tv.tv_sec * 1000 + tv.tv_usec / 1000); } + + if (http_req->remote_host() != nullptr) { + std::string user_ip(http_req->remote_host()); + check_load_req->__set_user_ip(user_ip); + } + return Status::OK; } diff --git a/be/src/http/http_request.cpp b/be/src/http/http_request.cpp index feb6c69267..ea03c06ee4 100644 --- a/be/src/http/http_request.cpp +++ b/be/src/http/http_request.cpp @@ -24,6 +24,7 @@ #include #include #include +#include #include #include "http/http_handler.h" @@ -138,4 +139,8 @@ std::string HttpRequest::get_request_body() { return _request_body; } +const char* HttpRequest::remote_host() const { + return _ev_req->remote_host; +} + } diff --git a/be/src/http/http_request.h b/be/src/http/http_request.h index 6832bd1caf..ccd196409e 100644 --- a/be/src/http/http_request.h +++ b/be/src/http/http_request.h @@ -82,6 +82,8 @@ public: void* handler_ctx() const { return _handler_ctx; } void set_handler_ctx(void* ctx) { _handler_ctx = ctx; } + const char* remote_host() const; + private: HttpMethod _method; std::string _uri; diff --git a/be/src/olap/CMakeLists.txt b/be/src/olap/CMakeLists.txt index 2a8b9117cf..3543d1dcaa 100644 --- a/be/src/olap/CMakeLists.txt +++ b/be/src/olap/CMakeLists.txt @@ -23,7 +23,6 @@ add_library(Olap STATIC comparison_predicate.cpp in_list_predicate.cpp null_predicate.cpp - olap_reader.cpp base_compaction.cpp command_executor.cpp cumulative_compaction.cpp diff --git a/be/src/olap/olap_reader.cpp b/be/src/olap/olap_reader.cpp deleted file mode 100644 index c6e55afcae..0000000000 --- a/be/src/olap/olap_reader.cpp +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (c) 2017, Baidu.com, Inc. All Rights Reserved - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#include "olap/olap_reader.h" - -#include - -#include "runtime/datetime_value.h" -#include "util/palo_metrics.h" - -using std::exception; -using std::string; -using std::stringstream; -using std::vector; -using std::map; -using std::nothrow; - -namespace palo { - -Status OLAPShowHints::show_hints( - TShowHintsRequest& fetch_request, - std::vector>>* ranges, - RuntimeProfile* profile) { - OLAP_LOG_DEBUG("Show hints:%s", apache::thrift::ThriftDebugString(fetch_request).c_str()); - { - RuntimeProfile::Counter* show_hints_timer = profile->get_counter("ShowHintsTime"); - SCOPED_TIMER(show_hints_timer); - - OLAPStatus res = OLAP_SUCCESS; - ranges->clear(); - - SmartOLAPTable table = OLAPEngine::get_instance()->get_table( - fetch_request.tablet_id, fetch_request.schema_hash); - if (table.get() == NULL) { - OLAP_LOG_WARNING("table does not exists. [tablet_id=%ld schema_hash=%d]", - fetch_request.tablet_id, fetch_request.schema_hash); - return Status("table does not exists"); - } - - vector start_key_strings; - vector end_key_strings; - vector> range; - if (fetch_request.start_key.size() == 0) { - res = table->split_range(start_key_strings, - end_key_strings, - fetch_request.block_row_count, - &range); - - if (res != OLAP_SUCCESS) { - OLAP_LOG_WARNING("fail to show hints by split range. [res=%d]", res); - return Status("fail to show hints"); - } - - ranges->push_back(range); - } else { - for (int key_pair_index = 0; - key_pair_index < fetch_request.start_key.size(); ++key_pair_index) { - start_key_strings.clear(); - end_key_strings.clear(); - range.clear(); - - TFetchStartKey& start_key_field = fetch_request.start_key[key_pair_index]; - for (vector::const_iterator it = start_key_field.key.begin(); - it != start_key_field.key.end(); ++it) { - start_key_strings.push_back(*it); - } - - TFetchEndKey& end_key_field = fetch_request.end_key[key_pair_index]; - for (vector::const_iterator it = end_key_field.key.begin(); - it != end_key_field.key.end(); ++it) { - end_key_strings.push_back(*it); - } - - res = table->split_range(start_key_strings, - end_key_strings, - fetch_request.block_row_count, - &range); - if (res != OLAP_SUCCESS) { - OLAP_LOG_WARNING("fail to show hints by split range. [res=%d]", res); - return Status("fail to show hints"); - } - - ranges->push_back(range); - } - } - } - - return Status::OK; -} - -} // namespace palo diff --git a/be/src/olap/olap_reader.h b/be/src/olap/olap_reader.h deleted file mode 100644 index 2979c84ad7..0000000000 --- a/be/src/olap/olap_reader.h +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) 2017, Baidu.com, Inc. All Rights Reserved - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -#ifndef BDG_PALO_BE_SRC_OLAP_OLAP_READER_H -#define BDG_PALO_BE_SRC_OLAP_OLAP_READER_H - -#include -#include - -#include "common/object_pool.h" -#include "common/status.h" -#include "olap/delete_handler.h" -#include "olap/i_data.h" -#include "olap/olap_cond.h" -#include "olap/olap_engine.h" -#include "olap/reader.h" - -namespace palo { - -class OLAPShowHints { -public: - static Status show_hints( - TShowHintsRequest& fetch_request, - std::vector>>* ranges, - RuntimeProfile* profile); -}; - -} // namespace palo - -#endif // BDG_PALO_BE_SRC_OLAP_OLAP_READER_H diff --git a/be/src/olap/olap_table.cpp b/be/src/olap/olap_table.cpp index 51aeec71b0..7b2e78466a 100644 --- a/be/src/olap/olap_table.cpp +++ b/be/src/olap/olap_table.cpp @@ -780,10 +780,10 @@ OLAPIndex* OLAPTable::_get_largest_index() { } OLAPStatus OLAPTable::split_range( - const vector& start_key_strings, - const vector& end_key_strings, + const OlapTuple& start_key_strings, + const OlapTuple& end_key_strings, uint64_t request_block_row_count, - std::vector>* ranges) { + std::vector* ranges) { if (ranges == NULL) { OLAP_LOG_WARNING("parameter end_row is null."); return OLAP_ERR_INPUT_PARAMETER_ERROR; @@ -805,12 +805,12 @@ OLAPStatus OLAPTable::split_range( // 如果有startkey,用startkeyåˆå§‹åŒ–ï¼›å之则用minkeyåˆå§‹åŒ– if (start_key_strings.size() > 0) { - if (start_key.init_scan_key(_tablet_schema, start_key_strings) != OLAP_SUCCESS) { + if (start_key.init_scan_key(_tablet_schema, start_key_strings.values()) != OLAP_SUCCESS) { OLAP_LOG_WARNING("fail to initial key strings with RowCursor type."); return OLAP_ERR_INIT_FAILED; } - if (start_key.from_string(start_key_strings) != OLAP_SUCCESS) { + if (start_key.from_tuple(start_key_strings) != OLAP_SUCCESS) { OLAP_LOG_WARNING("init end key failed"); return OLAP_ERR_INVALID_SCHEMA; } @@ -826,12 +826,12 @@ OLAPStatus OLAPTable::split_range( // å’Œstartkey一样处ç†ï¼Œæ²¡æœ‰åˆ™ç”¨maxkeyåˆå§‹åŒ– if (end_key_strings.size() > 0) { - if (OLAP_SUCCESS != end_key.init_scan_key(_tablet_schema, end_key_strings)) { + if (OLAP_SUCCESS != end_key.init_scan_key(_tablet_schema, end_key_strings.values())) { OLAP_LOG_WARNING("fail to parse strings to key with RowCursor type."); return OLAP_ERR_INVALID_SCHEMA; } - if (end_key.from_string(end_key_strings) != OLAP_SUCCESS) { + if (end_key.from_tuple(end_key_strings) != OLAP_SUCCESS) { OLAP_LOG_WARNING("init end key failed"); return OLAP_ERR_INVALID_SCHEMA; } @@ -852,8 +852,8 @@ OLAPStatus OLAPTable::split_range( if (base_index == NULL) { OLAP_LOG_DEBUG("there is no base file now, may be tablet is empty."); // it may be right if the table is empty, so we return success. - ranges->push_back(start_key.to_string_vector()); - ranges->push_back(end_key.to_string_vector()); + ranges->emplace_back(start_key.to_tuple()); + ranges->emplace_back(end_key.to_tuple()); return OLAP_SUCCESS; } @@ -906,7 +906,7 @@ OLAPStatus OLAPTable::split_range( last_start_key.allocate_memory_for_string_type(_tablet_schema); last_start_key.copy_without_pool(cur_start_key); // start_key是last start_key, 但返回的实际上是查询层给出的key - ranges->push_back(start_key.to_string_vector()); + ranges->emplace_back(start_key.to_tuple()); while (end_pos > step_pos) { res = base_index->advance_row_block(expected_rows, &step_pos); @@ -924,65 +924,13 @@ OLAPStatus OLAPTable::split_range( cur_start_key.attach(entry.data); if (cur_start_key.cmp(last_start_key) != 0) { - ranges->push_back(cur_start_key.to_string_vector()); // end of last section - ranges->push_back(cur_start_key.to_string_vector()); // start a new section + ranges->emplace_back(cur_start_key.to_tuple()); // end of last section + ranges->emplace_back(cur_start_key.to_tuple()); // start a new section last_start_key.copy_without_pool(cur_start_key); } } - ranges->push_back(end_key.to_string_vector()); - - return OLAP_SUCCESS; -} - -OLAPStatus OLAPTable::_get_block_pos(const vector& key_strings, - bool is_start_key, - OLAPIndex* base_index, - bool find_last, - RowBlockPosition* pos) { - if (key_strings.size() == 0) { - if (is_start_key) { - if (base_index->find_first_row_block(pos) != OLAP_SUCCESS) { - OLAP_LOG_WARNING("fail to find first row block."); - return OLAP_ERR_TABLE_INDEX_FIND_ERROR; - } - - OLAP_LOG_DEBUG("get first row block. [pos='%s']", pos->to_string().c_str()); - } else { - if (base_index->find_last_row_block(pos) != OLAP_SUCCESS) { - OLAP_LOG_WARNING("fail to find last row block."); - return OLAP_ERR_TABLE_INDEX_FIND_ERROR; - } - - OLAP_LOG_DEBUG("get last row block. [pos='%s']", pos->to_string().c_str()); - } - - return OLAP_SUCCESS; - } - - RowCursor key; - if (key.init(_tablet_schema, key_strings.size()) != OLAP_SUCCESS) { - OLAP_LOG_WARNING("fail to initial key strings with RowCursor type."); - return OLAP_ERR_INIT_FAILED; - } - if (key.from_string(key_strings) != OLAP_SUCCESS) { - OLAP_LOG_WARNING("fail to parse strings to key with RowCursor type."); - return OLAP_ERR_INVALID_SCHEMA; - } - - RowCursor helper_cursor; - if (helper_cursor.init(_tablet_schema, num_short_key_fields()) != OLAP_SUCCESS) { - OLAP_LOG_WARNING("fail to init helper cursor."); - return OLAP_ERR_INIT_FAILED; - } - - OLAP_LOG_DEBUG("show num_short_key_field. [num=%lu]", num_short_key_fields()); - - // get the row block position. - if (base_index->find_row_block(key, &helper_cursor, find_last, pos) != OLAP_SUCCESS) { - OLAP_LOG_WARNING("fail to find row block. [key='%s']", key.to_string().c_str()); - return OLAP_ERR_TABLE_INDEX_FIND_ERROR; - } + ranges->emplace_back(end_key.to_tuple()); return OLAP_SUCCESS; } diff --git a/be/src/olap/olap_table.h b/be/src/olap/olap_table.h index c047956dc2..d36ee76bc8 100644 --- a/be/src/olap/olap_table.h +++ b/be/src/olap/olap_table.h @@ -28,6 +28,7 @@ #include "olap/field.h" #include "olap/olap_define.h" #include "olap/olap_header.h" +#include "olap/tuple.h" #include "olap/row_cursor.h" #include "olap/utils.h" @@ -567,10 +568,10 @@ public: } OLAPStatus split_range( - const std::vector& start_key_strings, - const std::vector& end_key_strings, + const OlapTuple& start_key_strings, + const OlapTuple& end_key_strings, uint64_t request_block_row_count, - std::vector>* ranges); + std::vector* ranges); uint32_t segment_size() const { return _header->segment_size(); @@ -663,14 +664,6 @@ private: explicit OLAPTable(OLAPHeader* header); - // Get block pos in base file according to key_strings. - // pos is returned when succeed. - OLAPStatus _get_block_pos(const std::vector& key_strings, - bool is_start_key, - OLAPIndex* base_index, - bool find_last, - RowBlockPosition* pos); - // List files with suffix "idx" or "dat". void _list_files_with_suffix(const std::string& file_suffix, std::set* file_names) const; diff --git a/be/src/olap/push_handler.cpp b/be/src/olap/push_handler.cpp index 4e2d3db009..b6a0a4668d 100644 --- a/be/src/olap/push_handler.cpp +++ b/be/src/olap/push_handler.cpp @@ -211,8 +211,8 @@ OLAPStatus PushHandler::process( res = table_var.olap_table->save_header(); if (res != OLAP_SUCCESS) { - OLAP_LOG_WARNING("fail to save header. [res=%d table='%s']", - res, table_var.olap_table->full_name().c_str()); + LOG(FATAL) << "fail to save header. res=" << res << ", " + << "table=" << table_var.olap_table->full_name(); goto EXIT; } } @@ -635,8 +635,8 @@ OLAPStatus PushHandler::_update_header( // Note we don't return fail here. res = olap_table->save_header(); if (res != OLAP_SUCCESS) { - OLAP_LOG_FATAL("fail to save header. [res=%d table='%s']", - res, olap_table->full_name().c_str()); + LOG(FATAL) << "fail to save header. res=" << res << ", " + << "table=" << olap_table->full_name(); } return res; @@ -670,8 +670,8 @@ OLAPStatus PushHandler::_clear_alter_table_info( res = tablet->save_header(); if (res != OLAP_SUCCESS) { - OLAP_LOG_FATAL("fail to save header. [res=%d table='%s']", - res, tablet->full_name().c_str()); + LOG(FATAL) << "fail to save header. res=" << res << ", " + << "table=" << tablet->full_name(); break; } @@ -690,8 +690,8 @@ OLAPStatus PushHandler::_clear_alter_table_info( res = related_tablet->save_header(); if (res != OLAP_SUCCESS) { - OLAP_LOG_FATAL("fail to save header. [res=%d table='%s']", - res, related_tablet->full_name().c_str()); + LOG(FATAL) << "fail to save header. res=" << res << ", " + << "table=" << related_tablet->full_name(); break; } } diff --git a/be/src/olap/reader.cpp b/be/src/olap/reader.cpp index 37fdd48f1f..d2c3aac9ab 100644 --- a/be/src/olap/reader.cpp +++ b/be/src/olap/reader.cpp @@ -764,23 +764,17 @@ OLAPStatus Reader::_init_keys_param(const ReaderParams& read_params) { } res = _keys_param.start_keys[i]->init_scan_key(_olap_table->tablet_schema(), - read_params.start_key[i].key); + read_params.start_key[i].values()); if (res != OLAP_SUCCESS) { OLAP_LOG_WARNING("fail to init row cursor. [res=%d]", res); return res; } - res = _keys_param.start_keys[i]->from_string(read_params.start_key[i].key); + res = _keys_param.start_keys[i]->from_tuple(read_params.start_key[i]); if (res != OLAP_SUCCESS) { OLAP_LOG_WARNING("fail to init row cursor from Keys. [res=%d key_index=%ld]", res, i); return res; } - for (size_t j = 0; j < _keys_param.start_keys[i]->field_count(); ++j) { - if (_olap_table->tablet_schema()[j].is_allow_null - && _keys_param.start_keys[i]->is_min(j)) { - _keys_param.start_keys[i]->set_null(j); - } - } } size_t end_key_size = read_params.end_key.size(); @@ -792,13 +786,13 @@ OLAPStatus Reader::_init_keys_param(const ReaderParams& read_params) { } res = _keys_param.end_keys[i]->init_scan_key(_olap_table->tablet_schema(), - read_params.end_key[i].key); + read_params.end_key[i].values()); if (res != OLAP_SUCCESS) { OLAP_LOG_WARNING("fail to init row cursor. [res=%d]", res); return res; } - res = _keys_param.end_keys[i]->from_string(read_params.end_key[i].key); + res = _keys_param.end_keys[i]->from_tuple(read_params.end_key[i]); if (res != OLAP_SUCCESS) { OLAP_LOG_WARNING("fail to init row cursor from Keys. [res=%d key_index=%ld]", res, i); return res; @@ -1085,14 +1079,14 @@ OLAPStatus Reader::_init_load_bf_columns(const ReaderParams& read_params) { // remove columns which have same value between start_key and end_key int min_scan_key_len = _olap_table->tablet_schema().size(); for (int i = 0; i < read_params.start_key.size(); ++i) { - if (read_params.start_key[i].key.size() < min_scan_key_len) { - min_scan_key_len = read_params.start_key[i].key.size(); + if (read_params.start_key[i].size() < min_scan_key_len) { + min_scan_key_len = read_params.start_key[i].size(); } } for (int i = 0; i < read_params.end_key.size(); ++i) { - if (read_params.end_key[i].key.size() < min_scan_key_len) { - min_scan_key_len = read_params.end_key[i].key.size(); + if (read_params.end_key[i].size() < min_scan_key_len) { + min_scan_key_len = read_params.end_key[i].size(); } } @@ -1100,7 +1094,7 @@ OLAPStatus Reader::_init_load_bf_columns(const ReaderParams& read_params) { for (int i = 0; i < read_params.start_key.size(); ++i) { int j = 0; for (; j < min_scan_key_len; ++j) { - if (read_params.start_key[i].key[j] != read_params.end_key[i].key[j]) { + if (read_params.start_key[i].get_value(j) != read_params.end_key[i].get_value(j)) { break; } } diff --git a/be/src/olap/reader.h b/be/src/olap/reader.h index a8f1e96c77..1303cc127c 100644 --- a/be/src/olap/reader.h +++ b/be/src/olap/reader.h @@ -52,8 +52,8 @@ struct ReaderParams { Version version; std::string range; std::string end_range; - std::vector start_key; - std::vector end_key; + std::vector start_key; + std::vector end_key; std::vector conditions; // The IData will be set when using Merger, eg Cumulative, BE. std::vector olap_data_arr; @@ -82,12 +82,12 @@ struct ReaderParams { << " range=" << range << " end_range=" << end_range; - for (int i = 0, size = start_key.size(); i < size; ++i) { - ss << " keys=" << apache::thrift::ThriftDebugString(start_key[i]); + for (auto& key : start_key) { + ss << " keys=" << key; } - for (int i = 0, size = end_key.size(); i < size; ++i) { - ss << " end_keys=" << apache::thrift::ThriftDebugString(end_key[i]); + for (auto& key : end_key){ + ss << " end_keys=" << key; } for (int i = 0, size = conditions.size(); i < size; ++i) { diff --git a/be/src/olap/row_cursor.cpp b/be/src/olap/row_cursor.cpp index 58c9bbb8cc..aedd6769fb 100644 --- a/be/src/olap/row_cursor.cpp +++ b/be/src/olap/row_cursor.cpp @@ -357,6 +357,7 @@ OLAPStatus RowCursor::build_max_key() { Field* field = _field_array[cid]; char* dest = field->get_ptr(_fixed_buf); field->set_to_max(dest); + field->set_not_null(_fixed_buf); } return OLAP_SUCCESS; } @@ -366,26 +367,31 @@ OLAPStatus RowCursor::build_min_key() { Field* field = _field_array[cid]; char* dest = field->get_ptr(_fixed_buf); field->set_to_min(dest); + field->set_null(_fixed_buf); } return OLAP_SUCCESS; } -OLAPStatus RowCursor::from_string(const vector& val_string_array) { - if (val_string_array.size() != _columns.size()) { - OLAP_LOG_WARNING("column count does not match. [string_array_size=%lu; field_count=%lu]", - val_string_array.size(), - _field_array.size()); +OLAPStatus RowCursor::from_tuple(const OlapTuple& tuple) { + if (tuple.size() != _columns.size()) { + LOG(WARNING) << "column count does not match. tuple_size=" << tuple.size() + << ", field_count=" << _field_array.size(); return OLAP_ERR_INPUT_PARAMETER_ERROR; } - for (size_t i = 0; i < val_string_array.size(); ++i) { + for (size_t i = 0; i < tuple.size(); ++i) { Field* field = _field_array[_columns[i]]; + if (tuple.is_null(i)) { + field->set_null(_fixed_buf); + continue; + } + field->set_not_null(_fixed_buf); char* buf = field->get_ptr(_fixed_buf); - OLAPStatus res = field->from_string(buf, val_string_array[i]); - if (OLAP_SUCCESS != res) { - OLAP_LOG_WARNING("Fail to convert field from string.[val_string=%s res=%d]", - val_string_array[i].c_str(), res); + OLAPStatus res = field->from_string(buf, tuple.get_value(i)); + if (res != OLAP_SUCCESS) { + LOG(WARNING) << "fail to convert field from string. string=" << tuple.get_value(i) + << ", res=" << res; return res; } } @@ -393,20 +399,24 @@ OLAPStatus RowCursor::from_string(const vector& val_string_array) { return OLAP_SUCCESS; } -std::vector RowCursor::to_string_vector() const { - std::vector result; +OlapTuple RowCursor::to_tuple() const { + OlapTuple tuple; for (auto cid : _columns) { if (_field_array[cid] != NULL) { Field* field = _field_array[cid]; char* src = field->get_ptr(_fixed_buf); - result.push_back(field->to_string(src)); + if (field->is_null(_fixed_buf)) { + tuple.add_null(); + } else { + tuple.add_value(field->to_string(src)); + } } else { - result.push_back(""); + tuple.add_value(""); } } - return result; + return tuple; } string RowCursor::to_string() const { diff --git a/be/src/olap/row_cursor.h b/be/src/olap/row_cursor.h index 393e45a8b9..57ee24eb8a 100644 --- a/be/src/olap/row_cursor.h +++ b/be/src/olap/row_cursor.h @@ -22,6 +22,7 @@ #include "olap/field.h" #include "olap/olap_common.h" #include "olap/olap_define.h" +#include "olap/tuple.h" namespace palo { class Field; @@ -120,7 +121,7 @@ public: // 从传入的字符串数组ååºåˆ—化内部å„field的值 // æ¯ä¸ªå­—符串必须是一个\0结尾的字符串 // è¦æ±‚输入字符串和row cursor有相åŒçš„列数, - OLAPStatus from_string(const std::vector& val_string_arr); + OLAPStatus from_tuple(const OlapTuple& tuple); // 返回当å‰row cursor中列的个数 size_t field_count() const { @@ -130,7 +131,7 @@ public: // 以stringæ ¼å¼è¾“出rowcursor内容,仅供logåŠdebug使用 std::string to_string() const; std::string to_string(std::string sep) const; - std::vector to_string_vector() const; + OlapTuple to_tuple() const; // 从å¦å¤–一个RowCursorå¤åˆ¶å®Œæ•´çš„内容,需è¦ä¸¤ä¸ªcursoråœ¨å­—æ®µé•¿åº¦å’Œç±»åž‹ä¸Šå®Œå…¨åŒ¹é… inline OLAPStatus copy(const RowCursor& other, MemPool* mem_pool); diff --git a/be/src/olap/tuple.h b/be/src/olap/tuple.h new file mode 100644 index 0000000000..384e0aa5e4 --- /dev/null +++ b/be/src/olap/tuple.h @@ -0,0 +1,79 @@ +// Copyright (c) 2017, Baidu.com, Inc. All Rights Reserved + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include +#include + +namespace palo { + +class OlapTuple { +public: + OlapTuple() { } + OlapTuple(const std::vector& values) + : _values(values), _nulls(values.size(), false) { + } + + void add_null() { + _values.push_back(""); + _nulls.push_back(true); + } + + void add_value(const std::string& value, bool is_null = false) { + _values.push_back(value); + _nulls.push_back(is_null); + } + + size_t size() const { return _values.size(); } + + void reserve(size_t size) { + _values.reserve(size); + _nulls.reserve(size); + } + + void set_value(size_t i, const std::string& value, bool is_null = false) { + _values[i] = value; + _nulls[i] = is_null; + } + + bool is_null(size_t i) const { return _nulls[i]; } + const std::string& get_value(size_t i) const { return _values[i]; } + const std::vector& values() const { return _values; } + + void reset() { + _values.clear(); + _nulls.clear(); + } +private: + friend std::ostream& operator<<(std::ostream& os, const OlapTuple& tuple); + + std::vector _values; + std::vector _nulls; +}; + +inline std::ostream& operator<<(std::ostream& os, const OlapTuple& tuple) { + for (int i = 0; i < tuple._values.size(); ++i) { + if (i > 0) { os << ","; } + if (tuple._nulls[i]) { + os << "null"; + } else { + os << tuple._values[i]; + } + } + return os; +} + +} diff --git a/be/src/runtime/data_spliter.cpp b/be/src/runtime/data_spliter.cpp index c0622b7e83..56f496deaa 100644 --- a/be/src/runtime/data_spliter.cpp +++ b/be/src/runtime/data_spliter.cpp @@ -21,6 +21,7 @@ #include "exprs/expr.h" #include "common/object_pool.h" +#include "service/backend_options.h" #include "runtime/runtime_state.h" #include "runtime/raw_value.h" #include "runtime/row_batch.h" @@ -297,7 +298,8 @@ Status DataSpliter::close(RuntimeState* state, Status close_status) { Status status = iter->finish(state); if (UNLIKELY(is_ok && !status.ok())) { LOG(WARNING) << "finish dpp_sink error" - << " err_msg=" << status.get_error_msg(); + << " err_msg=" << status.get_error_msg() + << " backend=" << BackendOptions::get_localhost(); is_ok = false; err_status = status; } diff --git a/be/src/runtime/export_sink.cpp b/be/src/runtime/export_sink.cpp index 9f29c4ae8c..d790049a99 100644 --- a/be/src/runtime/export_sink.cpp +++ b/be/src/runtime/export_sink.cpp @@ -121,72 +121,73 @@ Status ExportSink::gen_row_buffer(TupleRow* row, std::stringstream* ss) { void* item = _output_expr_ctxs[i]->get_value(row); if (item == nullptr) { (*ss) << "NULL"; - continue; - } - switch (_output_expr_ctxs[i]->root()->type().type) { - case TYPE_BOOLEAN: - case TYPE_TINYINT: - (*ss) << (int)*static_cast(item); - break; - case TYPE_SMALLINT: - (*ss) << *static_cast(item); - break; - case TYPE_INT: - (*ss) << *static_cast(item); - break; - case TYPE_BIGINT: - (*ss) << *static_cast(item); - break; - case TYPE_LARGEINT: - (*ss) << reinterpret_cast(item)->value; - break; - case TYPE_FLOAT: - (*ss) << *static_cast(item); - break; - case TYPE_DOUBLE: - (*ss) << *static_cast(item); - break; - case TYPE_DATE: - case TYPE_DATETIME: { - char buf[64]; - const DateTimeValue* time_val = (const DateTimeValue*)(item); - time_val->to_string(buf); - (*ss) << buf; - break; - } - case TYPE_VARCHAR: - case TYPE_CHAR: { - const StringValue* string_val = (const StringValue*)(item); - - if (string_val->ptr == NULL) { - if (string_val->len == 0) { - } else { - (*ss) << "NULL"; + } else { + switch (_output_expr_ctxs[i]->root()->type().type) { + case TYPE_BOOLEAN: + case TYPE_TINYINT: + (*ss) << (int)*static_cast(item); + break; + case TYPE_SMALLINT: + (*ss) << *static_cast(item); + break; + case TYPE_INT: + (*ss) << *static_cast(item); + break; + case TYPE_BIGINT: + (*ss) << *static_cast(item); + break; + case TYPE_LARGEINT: + (*ss) << reinterpret_cast(item)->value; + break; + case TYPE_FLOAT: + (*ss) << *static_cast(item); + break; + case TYPE_DOUBLE: + (*ss) << *static_cast(item); + break; + case TYPE_DATE: + case TYPE_DATETIME: { + char buf[64]; + const DateTimeValue* time_val = (const DateTimeValue*)(item); + time_val->to_string(buf); + (*ss) << buf; + break; } - } else { - (*ss) << std::string(string_val->ptr, string_val->len); - } - break; - } - case TYPE_DECIMAL: { - const DecimalValue* decimal_val = reinterpret_cast(item); - std::string decimal_str; - int output_scale = _output_expr_ctxs[i]->root()->output_scale(); + case TYPE_VARCHAR: + case TYPE_CHAR: { + const StringValue* string_val = (const StringValue*)(item); - if (output_scale > 0 && output_scale <= 30) { - decimal_str = decimal_val->to_string(output_scale); - } else { - decimal_str = decimal_val->to_string(); + if (string_val->ptr == NULL) { + if (string_val->len == 0) { + } else { + (*ss) << "NULL"; + } + } else { + (*ss) << std::string(string_val->ptr, string_val->len); + } + break; + } + case TYPE_DECIMAL: { + const DecimalValue* decimal_val = reinterpret_cast(item); + std::string decimal_str; + int output_scale = _output_expr_ctxs[i]->root()->output_scale(); + + if (output_scale > 0 && output_scale <= 30) { + decimal_str = decimal_val->to_string(output_scale); + } else { + decimal_str = decimal_val->to_string(); + } + (*ss) << decimal_str; + break; + } + default: { + std::stringstream err_ss; + err_ss << "can't export this type. type = " << _output_expr_ctxs[i]->root()->type(); + return Status(err_ss.str()); + } } - (*ss) << decimal_str; - break; - } - default: { - std::stringstream err_ss; - err_ss << "can't export this type. type = " << _output_expr_ctxs[i]->root()->type(); - return Status(err_ss.str()); - } } + if (i < num_columns - 1) { (*ss) << _t_export_sink.column_separator; } diff --git a/be/src/runtime/load_path_mgr.cpp b/be/src/runtime/load_path_mgr.cpp index 12483fb89d..18e22ebcdb 100644 --- a/be/src/runtime/load_path_mgr.cpp +++ b/be/src/runtime/load_path_mgr.cpp @@ -29,7 +29,10 @@ namespace palo { -LoadPathMgr::LoadPathMgr() : _idx(0) { } +static const uint32_t MAX_SHARD_NUM = 1024; +static const std::string SHARD_PREFIX = "__shard_"; + +LoadPathMgr::LoadPathMgr() : _idx(0), _next_shard(0) { } Status LoadPathMgr::init() { OLAPRootPath::RootPathVec all_available_root_path; @@ -75,14 +78,18 @@ Status LoadPathMgr::allocate_dir( Status status = Status::OK; while (retry--) { { + // add SHARD_PREFIX for compatible purpose std::lock_guard l(_lock); - path = _path_vec[_idx] + "/" + db + "/" + label; + std::string shard = SHARD_PREFIX + std::to_string(_next_shard++ % MAX_SHARD_NUM); + path = _path_vec[_idx] + "/" + db + "/" + shard + "/" + label; _idx = (_idx + 1) % size; } status = FileUtils::create_dir(path); if (LIKELY(status.ok())) { *prefix = path; return Status::OK; + } else { + LOG(WARNING) << "create dir failed:" << path << ", error msg:" << status.get_error_msg(); } } @@ -134,6 +141,19 @@ std::string LoadPathMgr::get_load_error_absolute_path(const std::string& file_na return path; } +void LoadPathMgr::process_label_dir(time_t now, const std::string& label_dir) { + if (!is_too_old(now, label_dir)) { + return; + } + LOG(INFO) << "Going to remove load directory. path=" << label_dir; + Status status = FileUtils::remove_all(label_dir); + if (status.ok()) { + LOG(INFO) << "Remove load directory success. path=" << label_dir; + } else { + LOG(WARNING) << "Remove load directory failed. path=" << label_dir; + } +} + void LoadPathMgr::clean_one_path(const std::string& path) { std::vector dbs; Status status = FileUtils::scan_dir(path, &dbs); @@ -145,24 +165,32 @@ void LoadPathMgr::clean_one_path(const std::string& path) { time_t now = time(nullptr); for (auto& db : dbs) { std::string db_dir = path + "/" + db; - std::vector labels; - status = FileUtils::scan_dir(db_dir, &labels); + std::vector sub_dirs; + status = FileUtils::scan_dir(db_dir, &sub_dirs); if (!status.ok()) { LOG(WARNING) << "scan db of trash dir failed, continue. dir=" << db_dir; continue; } // delete this file - for (auto& label : labels) { - std::string label_dir = db_dir + "/" + label; - if (!is_too_old(now, label_dir)) { - continue; - } - LOG(INFO) << "Going to remove load directory. path=" << label_dir; - status = FileUtils::remove_all(label_dir); - if (status.ok()) { - LOG(INFO) << "Remove load directory success. path=" << label_dir; + for (auto& sub_dir : sub_dirs) { + std::string sub_path = db_dir + "/" + sub_dir; + // for compatible + if (sub_dir.find(SHARD_PREFIX) == 0) { + // sub_dir starts with SHARD_PREFIX + // process shard sub dir + std::vector labels; + Status status = FileUtils::scan_dir(sub_path, &labels); + if (!status.ok()) { + LOG(WARNING) << "scan one path to delete directory failed. path=" << path; + continue; + } + for (auto& label : labels) { + std::string label_dir = sub_path + "/" + label; + process_label_dir(now, label_dir); + } } else { - LOG(WARNING) << "Remove load directory failed. path=" << label_dir; + // process label dir + process_label_dir(now, sub_path); } } } diff --git a/be/src/runtime/load_path_mgr.h b/be/src/runtime/load_path_mgr.h index 97147bc365..c67ca738d8 100644 --- a/be/src/runtime/load_path_mgr.h +++ b/be/src/runtime/load_path_mgr.h @@ -56,6 +56,7 @@ private: void clean_one_path(const std::string& path); void clean_error_log(); void clean(); + void process_label_dir(time_t now, const std::string& label_dir); static void* cleaner(void* param); @@ -65,6 +66,7 @@ private: int _reserved_hours; pthread_t _cleaner_id; std::string _error_log_dir; + uint32_t _next_shard; }; } diff --git a/be/test/exec/hash_table_test.cpp b/be/test/exec/hash_table_test.cpp index 9e739ea3b9..d6c8ddad71 100644 --- a/be/test/exec/hash_table_test.cpp +++ b/be/test/exec/hash_table_test.cpp @@ -13,342 +13,342 @@ // specific language governing permissions and limitations // under the License. -#include -#include -#include -#include -#include - -#include - -#include "common/compiler_util.h" -#include "exec/hash_table.hpp" -#include "exprs/expr.h" -#include "runtime/mem_pool.h" -#include "runtime/string_value.h" -#include "runtime/mem_limit.hpp" -#include "util/cpu_info.h" -#include "util/runtime_profile.h" - -namespace palo { - -using std::vector; -using std::map; - - -class HashTableTest : public testing::Test { -public: - HashTableTest() : _mem_pool() {} - -protected: - ObjectPool _pool; - MemPool _mem_pool; - vector _build_expr; - vector _probe_expr; - - virtual void SetUp() { - RowDescriptor desc; - Status status; - - // Not very easy to test complex tuple layouts so this test will use the - // simplest. The purpose of these tests is to exercise the hash map - // internals so a simple build/probe expr is fine. - _build_expr.push_back(_pool.add(new SlotRef(TYPE_INT, 0))); - status = Expr::prepare(_build_expr, NULL, desc); - EXPECT_TRUE(status.ok()); - - _probe_expr.push_back(_pool.add(new SlotRef(TYPE_INT, 0))); - status = Expr::prepare(_probe_expr, NULL, desc); - EXPECT_TRUE(status.ok()); - } - - TupleRow* create_tuple_row(int32_t val); - - // Wrapper to call private methods on HashTable - // TODO: understand google testing, there must be a more natural way to do this - void resize_table(HashTable* table, int64_t new_size) { - table->resize_buckets(new_size); - } - - // Do a full table scan on table. All values should be between [min,max). If - // all_unique, then each key(int value) should only appear once. Results are - // stored in results, indexed by the key. Results must have been preallocated to - // be at least max size. - void full_scan(HashTable* table, int min, int max, bool all_unique, - TupleRow** results, TupleRow** expected) { - HashTable::Iterator iter = table->begin(); - - while (iter != table->end()) { - TupleRow* row = iter.get_row(); - int32_t val = *reinterpret_cast(_build_expr[0]->get_value(row)); - EXPECT_GE(val, min); - EXPECT_LT(val, max); - - if (all_unique) { - EXPECT_TRUE(results[val] == NULL); - } - - EXPECT_EQ(row->get_tuple(0), expected[val]->get_tuple(0)); - results[val] = row; - iter.next(); - } - } - - // Validate that probe_row evaluates overs probe_exprs is equal to build_row - // evaluated over build_exprs - void validate_match(TupleRow* probe_row, TupleRow* build_row) { - EXPECT_TRUE(probe_row != build_row); - int32_t build_val = *reinterpret_cast(_build_expr[0]->get_value(probe_row)); - int32_t probe_val = *reinterpret_cast(_probe_expr[0]->get_value(build_row)); - EXPECT_EQ(build_val, probe_val); - } - - struct ProbeTestData { - TupleRow* probe_row; - vector expected_build_rows; - }; - - void probe_test(HashTable* table, ProbeTestData* data, int num_data, bool scan) { - for (int i = 0; i < num_data; ++i) { - TupleRow* row = data[i].probe_row; - - HashTable::Iterator iter; - iter = table->find(row); - - if (data[i].expected_build_rows.size() == 0) { - EXPECT_TRUE(iter == table->end()); - } else { - if (scan) { - map matched; - - while (iter != table->end()) { - EXPECT_TRUE(matched.find(iter.get_row()) == matched.end()); - matched[iter.get_row()] = true; - iter.next(); - } - - EXPECT_EQ(matched.size(), data[i].expected_build_rows.size()); - - for (int j = 0; i < data[j].expected_build_rows.size(); ++j) { - EXPECT_TRUE(matched[data[i].expected_build_rows[j]]); - } - } else { - EXPECT_EQ(data[i].expected_build_rows.size(), 1); - EXPECT_EQ(data[i].expected_build_rows[0]->get_tuple(0), - iter.get_row()->get_tuple(0)); - validate_match(row, iter.get_row()); - } - } - } - } -}; - -TupleRow* HashTableTest::create_tuple_row(int32_t val) { - uint8_t* tuple_row_mem = _mem_pool.allocate(sizeof(int32_t*)); - uint8_t* tuple_mem = _mem_pool.allocate(sizeof(int32_t)); - *reinterpret_cast(tuple_mem) = val; - TupleRow* row = reinterpret_cast(tuple_row_mem); - row->set_tuple(0, reinterpret_cast(tuple_mem)); - return row; -} - -TEST_F(HashTableTest, SetupTest) { - TupleRow* build_row1 = create_tuple_row(1); - TupleRow* build_row2 = create_tuple_row(2); - TupleRow* probe_row3 = create_tuple_row(3); - TupleRow* probe_row4 = create_tuple_row(4); - - int32_t* val_row1 = reinterpret_cast(_build_expr[0]->get_value(build_row1)); - int32_t* val_row2 = reinterpret_cast(_build_expr[0]->get_value(build_row2)); - int32_t* val_row3 = reinterpret_cast(_probe_expr[0]->get_value(probe_row3)); - int32_t* val_row4 = reinterpret_cast(_probe_expr[0]->get_value(probe_row4)); - - EXPECT_EQ(*val_row1, 1); - EXPECT_EQ(*val_row2, 2); - EXPECT_EQ(*val_row3, 3); - EXPECT_EQ(*val_row4, 4); -} - -// This tests inserts the build rows [0->5) to hash table. It validates that they -// are all there using a full table scan. It also validates that find() is correct -// testing for probe rows that are both there and not. -// The hash table is rehashed a few times and the scans/finds are tested again. -TEST_F(HashTableTest, BasicTest) { - TupleRow* build_rows[5]; - TupleRow* scan_rows[5] = {0}; - - for (int i = 0; i < 5; ++i) { - build_rows[i] = create_tuple_row(i); - } - - ProbeTestData probe_rows[10]; - - for (int i = 0; i < 10; ++i) { - probe_rows[i].probe_row = create_tuple_row(i); - - if (i < 5) { - probe_rows[i].expected_build_rows.push_back(build_rows[i]); - } - } - - // Create the hash table and insert the build rows - HashTable hash_table(_build_expr, _probe_expr, 1, false, 0); - - for (int i = 0; i < 5; ++i) { - hash_table.insert(build_rows[i]); - } - - EXPECT_EQ(hash_table.size(), 5); - - // Do a full table scan and validate returned pointers - full_scan(&hash_table, 0, 5, true, scan_rows, build_rows); - probe_test(&hash_table, probe_rows, 10, false); - - // Resize and scan again - resize_table(&hash_table, 64); - EXPECT_EQ(hash_table.num_buckets(), 64); - EXPECT_EQ(hash_table.size(), 5); - memset(scan_rows, 0, sizeof(scan_rows)); - full_scan(&hash_table, 0, 5, true, scan_rows, build_rows); - probe_test(&hash_table, probe_rows, 10, false); - - // Resize to two and cause some collisions - resize_table(&hash_table, 2); - EXPECT_EQ(hash_table.num_buckets(), 2); - EXPECT_EQ(hash_table.size(), 5); - memset(scan_rows, 0, sizeof(scan_rows)); - full_scan(&hash_table, 0, 5, true, scan_rows, build_rows); - probe_test(&hash_table, probe_rows, 10, false); - - // Resize to one and turn it into a linked list - resize_table(&hash_table, 1); - EXPECT_EQ(hash_table.num_buckets(), 1); - EXPECT_EQ(hash_table.size(), 5); - memset(scan_rows, 0, sizeof(scan_rows)); - full_scan(&hash_table, 0, 5, true, scan_rows, build_rows); - probe_test(&hash_table, probe_rows, 10, false); -} - -// This tests makes sure we can scan ranges of buckets -TEST_F(HashTableTest, ScanTest) { - HashTable hash_table(_build_expr, _probe_expr, 1, false, 0); - // Add 1 row with val 1, 2 with val 2, etc - vector build_rows; - ProbeTestData probe_rows[15]; - probe_rows[0].probe_row = create_tuple_row(0); - - for (int val = 1; val <= 10; ++val) { - probe_rows[val].probe_row = create_tuple_row(val); - - for (int i = 0; i < val; ++i) { - TupleRow* row = create_tuple_row(val); - hash_table.insert(row); - build_rows.push_back(row); - probe_rows[val].expected_build_rows.push_back(row); - } - } - - // Add some more probe rows that aren't there - for (int val = 11; val < 15; ++val) { - probe_rows[val].probe_row = create_tuple_row(val); - } - - // Test that all the builds were found - probe_test(&hash_table, probe_rows, 15, true); - - // Resize and try again - resize_table(&hash_table, 128); - EXPECT_EQ(hash_table.num_buckets(), 128); - probe_test(&hash_table, probe_rows, 15, true); - - resize_table(&hash_table, 16); - EXPECT_EQ(hash_table.num_buckets(), 16); - probe_test(&hash_table, probe_rows, 15, true); - - resize_table(&hash_table, 2); - EXPECT_EQ(hash_table.num_buckets(), 2); - probe_test(&hash_table, probe_rows, 15, true); -} - -// This test continues adding to the hash table to trigger the resize code paths -TEST_F(HashTableTest, GrowTableTest) { - int build_row_val = 0; - int num_to_add = 4; - int expected_size = 0; - MemTracker mem_limit(1024 * 1024); - vector mem_limits; - mem_limits.push_back(&mem_limit); - HashTable hash_table( - _build_expr, _probe_expr, 1, false, 0, mem_limits, num_to_add); - EXPECT_TRUE(!mem_limit.limit_exceeded()); - - // This inserts about 5M entries - for (int i = 0; i < 20; ++i) { - for (int j = 0; j < num_to_add; ++build_row_val, ++j) { - hash_table.insert(create_tuple_row(build_row_val)); - } - - expected_size += num_to_add; - num_to_add *= 2; - EXPECT_EQ(hash_table.size(), expected_size); - } - - EXPECT_TRUE(mem_limit.limit_exceeded()); - - // Validate that we can find the entries - for (int i = 0; i < expected_size * 5; i += 100000) { - TupleRow* probe_row = create_tuple_row(i); - HashTable::Iterator iter = hash_table.find(probe_row); - - if (i < expected_size) { - EXPECT_TRUE(iter != hash_table.end()); - validate_match(probe_row, iter.get_row()); - } else { - EXPECT_TRUE(iter == hash_table.end()); - } - } -} - -// This test continues adding to the hash table to trigger the resize code paths -TEST_F(HashTableTest, GrowTableTest2) { - int build_row_val = 0; - int num_to_add = 1024; - int expected_size = 0; - MemTracker mem_limit(1024 * 1024); - vector mem_limits; - mem_limits.push_back(&mem_limit); - HashTable hash_table( - _build_expr, _probe_expr, 1, false, 0, mem_limits, num_to_add); - - LOG(INFO) << time(NULL); - - // This inserts about 5M entries - for (int i = 0; i < 5 * 1024 * 1024; ++i) { - hash_table.insert(create_tuple_row(build_row_val)); - expected_size += num_to_add; - } - - LOG(INFO) << time(NULL); - - // Validate that we can find the entries - for (int i = 0; i < 5 * 1024 * 1024; ++i) { - TupleRow* probe_row = create_tuple_row(i); - hash_table.find(probe_row); - } - - LOG(INFO) << time(NULL); -} - -} - -int main(int argc, char** argv) { - std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; - if (!palo::config::init(conffile.c_str(), false)) { - fprintf(stderr, "error read config file. \n"); - return -1; - } - init_glog("be-test"); - ::testing::InitGoogleTest(&argc, argv); - palo::CpuInfo::init(); - return RUN_ALL_TESTS(); -} +#include +#include +#include +#include +#include + +#include + +#include "common/compiler_util.h" +#include "exec/hash_table.hpp" +#include "exprs/expr.h" +#include "runtime/mem_pool.h" +#include "runtime/string_value.h" +#include "runtime/mem_limit.hpp" +#include "util/cpu_info.h" +#include "util/runtime_profile.h" + +namespace palo { + +using std::vector; +using std::map; + + +class HashTableTest : public testing::Test { +public: + HashTableTest() : _mem_pool() {} + +protected: + ObjectPool _pool; + MemPool _mem_pool; + vector _build_expr; + vector _probe_expr; + + virtual void SetUp() { + RowDescriptor desc; + Status status; + + // Not very easy to test complex tuple layouts so this test will use the + // simplest. The purpose of these tests is to exercise the hash map + // internals so a simple build/probe expr is fine. + _build_expr.push_back(_pool.add(new SlotRef(TYPE_INT, 0))); + status = Expr::prepare(_build_expr, NULL, desc); + EXPECT_TRUE(status.ok()); + + _probe_expr.push_back(_pool.add(new SlotRef(TYPE_INT, 0))); + status = Expr::prepare(_probe_expr, NULL, desc); + EXPECT_TRUE(status.ok()); + } + + TupleRow* create_tuple_row(int32_t val); + + // Wrapper to call private methods on HashTable + // TODO: understand google testing, there must be a more natural way to do this + void resize_table(HashTable* table, int64_t new_size) { + table->resize_buckets(new_size); + } + + // Do a full table scan on table. All values should be between [min,max). If + // all_unique, then each key(int value) should only appear once. Results are + // stored in results, indexed by the key. Results must have been preallocated to + // be at least max size. + void full_scan(HashTable* table, int min, int max, bool all_unique, + TupleRow** results, TupleRow** expected) { + HashTable::Iterator iter = table->begin(); + + while (iter != table->end()) { + TupleRow* row = iter.get_row(); + int32_t val = *reinterpret_cast(_build_expr[0]->get_value(row)); + EXPECT_GE(val, min); + EXPECT_LT(val, max); + + if (all_unique) { + EXPECT_TRUE(results[val] == NULL); + } + + EXPECT_EQ(row->get_tuple(0), expected[val]->get_tuple(0)); + results[val] = row; + iter.next(); + } + } + + // Validate that probe_row evaluates overs probe_exprs is equal to build_row + // evaluated over build_exprs + void validate_match(TupleRow* probe_row, TupleRow* build_row) { + EXPECT_TRUE(probe_row != build_row); + int32_t build_val = *reinterpret_cast(_build_expr[0]->get_value(probe_row)); + int32_t probe_val = *reinterpret_cast(_probe_expr[0]->get_value(build_row)); + EXPECT_EQ(build_val, probe_val); + } + + struct ProbeTestData { + TupleRow* probe_row; + vector expected_build_rows; + }; + + void probe_test(HashTable* table, ProbeTestData* data, int num_data, bool scan) { + for (int i = 0; i < num_data; ++i) { + TupleRow* row = data[i].probe_row; + + HashTable::Iterator iter; + iter = table->find(row); + + if (data[i].expected_build_rows.size() == 0) { + EXPECT_TRUE(iter == table->end()); + } else { + if (scan) { + map matched; + + while (iter != table->end()) { + EXPECT_TRUE(matched.find(iter.get_row()) == matched.end()); + matched[iter.get_row()] = true; + iter.next(); + } + + EXPECT_EQ(matched.size(), data[i].expected_build_rows.size()); + + for (int j = 0; i < data[j].expected_build_rows.size(); ++j) { + EXPECT_TRUE(matched[data[i].expected_build_rows[j]]); + } + } else { + EXPECT_EQ(data[i].expected_build_rows.size(), 1); + EXPECT_EQ(data[i].expected_build_rows[0]->get_tuple(0), + iter.get_row()->get_tuple(0)); + validate_match(row, iter.get_row()); + } + } + } + } +}; + +TupleRow* HashTableTest::create_tuple_row(int32_t val) { + uint8_t* tuple_row_mem = _mem_pool.allocate(sizeof(int32_t*)); + uint8_t* tuple_mem = _mem_pool.allocate(sizeof(int32_t)); + *reinterpret_cast(tuple_mem) = val; + TupleRow* row = reinterpret_cast(tuple_row_mem); + row->set_tuple(0, reinterpret_cast(tuple_mem)); + return row; +} + +TEST_F(HashTableTest, SetupTest) { + TupleRow* build_row1 = create_tuple_row(1); + TupleRow* build_row2 = create_tuple_row(2); + TupleRow* probe_row3 = create_tuple_row(3); + TupleRow* probe_row4 = create_tuple_row(4); + + int32_t* val_row1 = reinterpret_cast(_build_expr[0]->get_value(build_row1)); + int32_t* val_row2 = reinterpret_cast(_build_expr[0]->get_value(build_row2)); + int32_t* val_row3 = reinterpret_cast(_probe_expr[0]->get_value(probe_row3)); + int32_t* val_row4 = reinterpret_cast(_probe_expr[0]->get_value(probe_row4)); + + EXPECT_EQ(*val_row1, 1); + EXPECT_EQ(*val_row2, 2); + EXPECT_EQ(*val_row3, 3); + EXPECT_EQ(*val_row4, 4); +} + +// This tests inserts the build rows [0->5) to hash table. It validates that they +// are all there using a full table scan. It also validates that find() is correct +// testing for probe rows that are both there and not. +// The hash table is rehashed a few times and the scans/finds are tested again. +TEST_F(HashTableTest, BasicTest) { + TupleRow* build_rows[5]; + TupleRow* scan_rows[5] = {0}; + + for (int i = 0; i < 5; ++i) { + build_rows[i] = create_tuple_row(i); + } + + ProbeTestData probe_rows[10]; + + for (int i = 0; i < 10; ++i) { + probe_rows[i].probe_row = create_tuple_row(i); + + if (i < 5) { + probe_rows[i].expected_build_rows.push_back(build_rows[i]); + } + } + + // Create the hash table and insert the build rows + HashTable hash_table(_build_expr, _probe_expr, 1, false, 0); + + for (int i = 0; i < 5; ++i) { + hash_table.insert(build_rows[i]); + } + + EXPECT_EQ(hash_table.size(), 5); + + // Do a full table scan and validate returned pointers + full_scan(&hash_table, 0, 5, true, scan_rows, build_rows); + probe_test(&hash_table, probe_rows, 10, false); + + // Resize and scan again + resize_table(&hash_table, 64); + EXPECT_EQ(hash_table.num_buckets(), 64); + EXPECT_EQ(hash_table.size(), 5); + memset(scan_rows, 0, sizeof(scan_rows)); + full_scan(&hash_table, 0, 5, true, scan_rows, build_rows); + probe_test(&hash_table, probe_rows, 10, false); + + // Resize to two and cause some collisions + resize_table(&hash_table, 2); + EXPECT_EQ(hash_table.num_buckets(), 2); + EXPECT_EQ(hash_table.size(), 5); + memset(scan_rows, 0, sizeof(scan_rows)); + full_scan(&hash_table, 0, 5, true, scan_rows, build_rows); + probe_test(&hash_table, probe_rows, 10, false); + + // Resize to one and turn it into a linked list + resize_table(&hash_table, 1); + EXPECT_EQ(hash_table.num_buckets(), 1); + EXPECT_EQ(hash_table.size(), 5); + memset(scan_rows, 0, sizeof(scan_rows)); + full_scan(&hash_table, 0, 5, true, scan_rows, build_rows); + probe_test(&hash_table, probe_rows, 10, false); +} + +// This tests makes sure we can scan ranges of buckets +TEST_F(HashTableTest, ScanTest) { + HashTable hash_table(_build_expr, _probe_expr, 1, false, 0); + // Add 1 row with val 1, 2 with val 2, etc + vector build_rows; + ProbeTestData probe_rows[15]; + probe_rows[0].probe_row = create_tuple_row(0); + + for (int val = 1; val <= 10; ++val) { + probe_rows[val].probe_row = create_tuple_row(val); + + for (int i = 0; i < val; ++i) { + TupleRow* row = create_tuple_row(val); + hash_table.insert(row); + build_rows.push_back(row); + probe_rows[val].expected_build_rows.push_back(row); + } + } + + // Add some more probe rows that aren't there + for (int val = 11; val < 15; ++val) { + probe_rows[val].probe_row = create_tuple_row(val); + } + + // Test that all the builds were found + probe_test(&hash_table, probe_rows, 15, true); + + // Resize and try again + resize_table(&hash_table, 128); + EXPECT_EQ(hash_table.num_buckets(), 128); + probe_test(&hash_table, probe_rows, 15, true); + + resize_table(&hash_table, 16); + EXPECT_EQ(hash_table.num_buckets(), 16); + probe_test(&hash_table, probe_rows, 15, true); + + resize_table(&hash_table, 2); + EXPECT_EQ(hash_table.num_buckets(), 2); + probe_test(&hash_table, probe_rows, 15, true); +} + +// This test continues adding to the hash table to trigger the resize code paths +TEST_F(HashTableTest, GrowTableTest) { + int build_row_val = 0; + int num_to_add = 4; + int expected_size = 0; + MemTracker mem_limit(1024 * 1024); + vector mem_limits; + mem_limits.push_back(&mem_limit); + HashTable hash_table( + _build_expr, _probe_expr, 1, false, 0, mem_limits, num_to_add); + EXPECT_TRUE(!mem_limit.limit_exceeded()); + + // This inserts about 5M entries + for (int i = 0; i < 20; ++i) { + for (int j = 0; j < num_to_add; ++build_row_val, ++j) { + hash_table.insert(create_tuple_row(build_row_val)); + } + + expected_size += num_to_add; + num_to_add *= 2; + EXPECT_EQ(hash_table.size(), expected_size); + } + + EXPECT_TRUE(mem_limit.limit_exceeded()); + + // Validate that we can find the entries + for (int i = 0; i < expected_size * 5; i += 100000) { + TupleRow* probe_row = create_tuple_row(i); + HashTable::Iterator iter = hash_table.find(probe_row); + + if (i < expected_size) { + EXPECT_TRUE(iter != hash_table.end()); + validate_match(probe_row, iter.get_row()); + } else { + EXPECT_TRUE(iter == hash_table.end()); + } + } +} + +// This test continues adding to the hash table to trigger the resize code paths +TEST_F(HashTableTest, GrowTableTest2) { + int build_row_val = 0; + int num_to_add = 1024; + int expected_size = 0; + MemTracker mem_limit(1024 * 1024); + vector mem_limits; + mem_limits.push_back(&mem_limit); + HashTable hash_table( + _build_expr, _probe_expr, 1, false, 0, mem_limits, num_to_add); + + LOG(INFO) << time(NULL); + + // This inserts about 5M entries + for (int i = 0; i < 5 * 1024 * 1024; ++i) { + hash_table.insert(create_tuple_row(build_row_val)); + expected_size += num_to_add; + } + + LOG(INFO) << time(NULL); + + // Validate that we can find the entries + for (int i = 0; i < 5 * 1024 * 1024; ++i) { + TupleRow* probe_row = create_tuple_row(i); + hash_table.find(probe_row); + } + + LOG(INFO) << time(NULL); +} + +} + +int main(int argc, char** argv) { + std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; + if (!palo::config::init(conffile.c_str(), false)) { + fprintf(stderr, "error read config file. \n"); + return -1; + } + init_glog("be-test"); + ::testing::InitGoogleTest(&argc, argv); + palo::CpuInfo::init(); + return RUN_ALL_TESTS(); +} diff --git a/be/test/exec/mysql_scan_node_test.cpp b/be/test/exec/mysql_scan_node_test.cpp index b4fb228f93..b37ed61924 100644 --- a/be/test/exec/mysql_scan_node_test.cpp +++ b/be/test/exec/mysql_scan_node_test.cpp @@ -13,280 +13,280 @@ // specific language governing permissions and limitations // under the License. -#include -#include -#include - -#include "common/object_pool.h" -#include "exec/mysql_scan_node.h" -#include "exec/text_converter.inline.h" -#include "gen_cpp/PlanNodes_types.h" -#include "runtime/mem_pool.h" -#include "runtime/descriptors.h" -#include "runtime/runtime_state.h" -#include "runtime/row_batch.h" -#include "runtime/string_value.h" -#include "runtime/tuple_row.h" -#include "schema_scan_node.h" -#include "util/runtime_profile.h" -#include "util/debug_util.h" - -using std::vector; - -namespace palo { - -// mock -class MysqlScanNodeTest : public testing::Test { -public: - MysqlScanNodeTest() : _runtim_state("test") { - TDescriptorTable t_desc_table; - - // table descriptors - TTableDescriptor t_table_desc; - - t_table_desc.id = 0; - t_table_desc.tableType = TTableType::MYSQL_TABLE; - t_table_desc.numCols = 0; - t_table_desc.numClusteringCols = 0; - t_table_desc.mysqlTable.tableName = "table"; - t_table_desc.mysqlTable.mysqlHost = "host"; - t_table_desc.mysqlTable.mysqlPort = "port"; - t_table_desc.mysqlTable.mysqlUser = "user"; - t_table_desc.mysqlTable.mysqlPasswd = "passwd"; - t_table_desc.tableName = "table"; - t_table_desc.dbName = "db"; - t_table_desc.__isset.mysqlTable = true; - t_desc_table.tableDescriptors.push_back(t_table_desc); - t_desc_table.__isset.tableDescriptors = true; - // TSlotDescriptor - int offset = 1; - int i = 0; - // dummy - { - TSlotDescriptor t_slot_desc; - t_slot_desc.__set_slotType(to_thrift(TYPE_INT)); - t_slot_desc.__set_columnPos(i); - t_slot_desc.__set_byteOffset(offset); - t_slot_desc.__set_nullIndicatorByte(0); - t_slot_desc.__set_nullIndicatorBit(-1); - t_slot_desc.__set_slotIdx(i); - t_slot_desc.__set_isMaterialized(false); - t_desc_table.slotDescriptors.push_back(t_slot_desc); - offset += sizeof(int); - } - // id - { - TSlotDescriptor t_slot_desc; - t_slot_desc.__set_slotType(to_thrift(TYPE_INT)); - t_slot_desc.__set_columnPos(i); - t_slot_desc.__set_byteOffset(offset); - t_slot_desc.__set_nullIndicatorByte(0); - t_slot_desc.__set_nullIndicatorBit(-1); - t_slot_desc.__set_slotIdx(i); - t_slot_desc.__set_isMaterialized(true); - t_desc_table.slotDescriptors.push_back(t_slot_desc); - offset += sizeof(int); - } - ++i; - // model - { - TSlotDescriptor t_slot_desc; - t_slot_desc.__set_slotType(to_thrift(TYPE_STRING)); - t_slot_desc.__set_columnPos(i); - t_slot_desc.__set_byteOffset(offset); - t_slot_desc.__set_nullIndicatorByte(0); - t_slot_desc.__set_nullIndicatorBit(0); - t_slot_desc.__set_slotIdx(i); - t_slot_desc.__set_isMaterialized(true); - t_desc_table.slotDescriptors.push_back(t_slot_desc); - offset += sizeof(StringValue); - } - ++i; - // price - { - TSlotDescriptor t_slot_desc; - t_slot_desc.__set_slotType(to_thrift(TYPE_STRING)); - t_slot_desc.__set_columnPos(i); - t_slot_desc.__set_byteOffset(offset); - t_slot_desc.__set_nullIndicatorByte(0); - t_slot_desc.__set_nullIndicatorBit(1); - t_slot_desc.__set_slotIdx(i); - t_slot_desc.__set_isMaterialized(true); - t_desc_table.slotDescriptors.push_back(t_slot_desc); - offset += sizeof(StringValue); - } - ++i; - // grade - { - TSlotDescriptor t_slot_desc; - t_slot_desc.__set_slotType(to_thrift(TYPE_STRING)); - t_slot_desc.__set_columnPos(i); - t_slot_desc.__set_byteOffset(offset); - t_slot_desc.__set_nullIndicatorByte(0); - t_slot_desc.__set_nullIndicatorBit(2); - t_slot_desc.__set_slotIdx(i); - t_slot_desc.__set_isMaterialized(true); - t_desc_table.slotDescriptors.push_back(t_slot_desc); - offset += sizeof(StringValue); - } - - t_desc_table.__isset.slotDescriptors = true; - // TTupleDescriptor - TTupleDescriptor t_tuple_desc; - t_tuple_desc.id = 0; - t_tuple_desc.byteSize = offset; - t_tuple_desc.numNullBytes = 1; - t_tuple_desc.tableId = 0; - t_tuple_desc.__isset.tableId = true; - t_desc_table.tupleDescriptors.push_back(t_tuple_desc); - - DescriptorTbl::create(&_obj_pool, t_desc_table, &_desc_tbl); - - _runtim_state.set_desc_tbl(_desc_tbl); - - // Node Id - _tnode.node_id = 0; - _tnode.node_type = TPlanNodeType::SCHEMA_SCAN_NODE; - _tnode.num_children = 0; - _tnode.limit = -1; - _tnode.row_tuples.push_back(0); - _tnode.nullable_tuples.push_back(false); - _tnode.mysql_scan_node.tuple_id = 0; - _tnode.mysql_scan_node.table_name = "dim_lbs_device"; - _tnode.mysql_scan_node.columns.push_back("*"); - _tnode.mysql_scan_node.filters.push_back("id = 1"); - _tnode.__isset.mysql_scan_node = true; - } - -protected: - virtual void SetUp() { - } - virtual void TearDown() { - } - TPlanNode _tnode; - ObjectPool _obj_pool; - DescriptorTbl* _desc_tbl; - RuntimeState _runtim_state; -}; - - -TEST_F(MysqlScanNodeTest, normal_use) { - MysqlScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); - Status status = scan_node.prepare(&_runtim_state); - ASSERT_TRUE(status.ok()); - std::vector scan_ranges; - status = scan_node.set_scan_ranges(scan_ranges); - ASSERT_TRUE(status.ok()); - std::stringstream out; - scan_node.debug_string(1, &out); - LOG(WARNING) << out.str(); - - status = scan_node.open(&_runtim_state); - ASSERT_TRUE(status.ok()); - RowBatch row_batch(scan_node._row_descriptor, 100); - bool eos = false; - - while (!eos) { - status = scan_node.get_next(&_runtim_state, &row_batch, &eos); - ASSERT_TRUE(status.ok()); - - if (!eos) { - for (int i = 0; i < row_batch.num_rows(); ++i) { - TupleRow* row = row_batch.get_row(i); - LOG(WARNING) << "input row: " << print_row(row, scan_node._row_descriptor); - } - } - } - - status = scan_node.close(&_runtim_state); - ASSERT_TRUE(status.ok()); -} -TEST_F(MysqlScanNodeTest, Prepare_fail_1) { - MysqlScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); - scan_node._tuple_id = 1; - Status status = scan_node.prepare(&_runtim_state); - ASSERT_FALSE(status.ok()); -} -TEST_F(MysqlScanNodeTest, Prepare_fail_2) { - MysqlScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); - TableDescriptor* old = _desc_tbl->_tuple_desc_map[(TupleId)0]->_table_desc; - _desc_tbl->_tuple_desc_map[(TupleId)0]->_table_desc = NULL; - Status status = scan_node.prepare(&_runtim_state); - ASSERT_FALSE(status.ok()); - _desc_tbl->_tuple_desc_map[(TupleId)0]->_table_desc = old; -} -TEST_F(MysqlScanNodeTest, open_fail_1) { - MysqlScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); - Status status = scan_node.prepare(&_runtim_state); - ASSERT_TRUE(status.ok()); - scan_node._table_name = "no_such_table"; - status = scan_node.open(&_runtim_state); - ASSERT_FALSE(status.ok()); -} -TEST_F(MysqlScanNodeTest, open_fail_3) { - MysqlScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); - Status status = scan_node.prepare(&_runtim_state); - ASSERT_TRUE(status.ok()); - scan_node._columns.clear(); - scan_node._columns.push_back("id"); - status = scan_node.open(&_runtim_state); - ASSERT_FALSE(status.ok()); -} -TEST_F(MysqlScanNodeTest, open_fail_2) { - MysqlScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); - Status status = scan_node.prepare(&_runtim_state); - ASSERT_TRUE(status.ok()); - scan_node._my_param.host = ""; - status = scan_node.open(&_runtim_state); - ASSERT_FALSE(status.ok()); -} -TEST_F(MysqlScanNodeTest, invalid_input) { - MysqlScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); - Status status = scan_node.prepare(NULL); - ASSERT_FALSE(status.ok()); - status = scan_node.prepare(&_runtim_state); - ASSERT_TRUE(status.ok()); - status = scan_node.prepare(&_runtim_state); - ASSERT_TRUE(status.ok()); - status = scan_node.open(NULL); - ASSERT_FALSE(status.ok()); - status = scan_node.open(&_runtim_state); - ASSERT_TRUE(status.ok()); - RowBatch row_batch(scan_node._row_descriptor, 100); - bool eos = false; - status = scan_node.get_next(NULL, &row_batch, &eos); - ASSERT_FALSE(status.ok()); - - while (!eos) { - status = scan_node.get_next(&_runtim_state, &row_batch, &eos); - ASSERT_TRUE(status.ok()); - - for (int i = 0; i < row_batch.num_rows(); ++i) { - TupleRow* row = row_batch.get_row(i); - LOG(WARNING) << "input row: " << print_row(row, scan_node._row_descriptor); - } - } -} -TEST_F(MysqlScanNodeTest, no_init) { - MysqlScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); - Status status = scan_node.open(&_runtim_state); - ASSERT_FALSE(status.ok()); - RowBatch row_batch(scan_node._row_descriptor, 100); - bool eos = false; - status = scan_node.get_next(&_runtim_state, &row_batch, &eos); - ASSERT_FALSE(status.ok()); -} - -} - -int main(int argc, char** argv) { - std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; - if (!palo::config::init(conffile.c_str(), false)) { - fprintf(stderr, "error read config file. \n"); - return -1; - } - init_glog("be-test"); - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} - +#include +#include +#include + +#include "common/object_pool.h" +#include "exec/mysql_scan_node.h" +#include "exec/text_converter.inline.h" +#include "gen_cpp/PlanNodes_types.h" +#include "runtime/mem_pool.h" +#include "runtime/descriptors.h" +#include "runtime/runtime_state.h" +#include "runtime/row_batch.h" +#include "runtime/string_value.h" +#include "runtime/tuple_row.h" +#include "schema_scan_node.h" +#include "util/runtime_profile.h" +#include "util/debug_util.h" + +using std::vector; + +namespace palo { + +// mock +class MysqlScanNodeTest : public testing::Test { +public: + MysqlScanNodeTest() : _runtim_state("test") { + TDescriptorTable t_desc_table; + + // table descriptors + TTableDescriptor t_table_desc; + + t_table_desc.id = 0; + t_table_desc.tableType = TTableType::MYSQL_TABLE; + t_table_desc.numCols = 0; + t_table_desc.numClusteringCols = 0; + t_table_desc.mysqlTable.tableName = "table"; + t_table_desc.mysqlTable.mysqlHost = "host"; + t_table_desc.mysqlTable.mysqlPort = "port"; + t_table_desc.mysqlTable.mysqlUser = "user"; + t_table_desc.mysqlTable.mysqlPasswd = "passwd"; + t_table_desc.tableName = "table"; + t_table_desc.dbName = "db"; + t_table_desc.__isset.mysqlTable = true; + t_desc_table.tableDescriptors.push_back(t_table_desc); + t_desc_table.__isset.tableDescriptors = true; + // TSlotDescriptor + int offset = 1; + int i = 0; + // dummy + { + TSlotDescriptor t_slot_desc; + t_slot_desc.__set_slotType(to_thrift(TYPE_INT)); + t_slot_desc.__set_columnPos(i); + t_slot_desc.__set_byteOffset(offset); + t_slot_desc.__set_nullIndicatorByte(0); + t_slot_desc.__set_nullIndicatorBit(-1); + t_slot_desc.__set_slotIdx(i); + t_slot_desc.__set_isMaterialized(false); + t_desc_table.slotDescriptors.push_back(t_slot_desc); + offset += sizeof(int); + } + // id + { + TSlotDescriptor t_slot_desc; + t_slot_desc.__set_slotType(to_thrift(TYPE_INT)); + t_slot_desc.__set_columnPos(i); + t_slot_desc.__set_byteOffset(offset); + t_slot_desc.__set_nullIndicatorByte(0); + t_slot_desc.__set_nullIndicatorBit(-1); + t_slot_desc.__set_slotIdx(i); + t_slot_desc.__set_isMaterialized(true); + t_desc_table.slotDescriptors.push_back(t_slot_desc); + offset += sizeof(int); + } + ++i; + // model + { + TSlotDescriptor t_slot_desc; + t_slot_desc.__set_slotType(to_thrift(TYPE_STRING)); + t_slot_desc.__set_columnPos(i); + t_slot_desc.__set_byteOffset(offset); + t_slot_desc.__set_nullIndicatorByte(0); + t_slot_desc.__set_nullIndicatorBit(0); + t_slot_desc.__set_slotIdx(i); + t_slot_desc.__set_isMaterialized(true); + t_desc_table.slotDescriptors.push_back(t_slot_desc); + offset += sizeof(StringValue); + } + ++i; + // price + { + TSlotDescriptor t_slot_desc; + t_slot_desc.__set_slotType(to_thrift(TYPE_STRING)); + t_slot_desc.__set_columnPos(i); + t_slot_desc.__set_byteOffset(offset); + t_slot_desc.__set_nullIndicatorByte(0); + t_slot_desc.__set_nullIndicatorBit(1); + t_slot_desc.__set_slotIdx(i); + t_slot_desc.__set_isMaterialized(true); + t_desc_table.slotDescriptors.push_back(t_slot_desc); + offset += sizeof(StringValue); + } + ++i; + // grade + { + TSlotDescriptor t_slot_desc; + t_slot_desc.__set_slotType(to_thrift(TYPE_STRING)); + t_slot_desc.__set_columnPos(i); + t_slot_desc.__set_byteOffset(offset); + t_slot_desc.__set_nullIndicatorByte(0); + t_slot_desc.__set_nullIndicatorBit(2); + t_slot_desc.__set_slotIdx(i); + t_slot_desc.__set_isMaterialized(true); + t_desc_table.slotDescriptors.push_back(t_slot_desc); + offset += sizeof(StringValue); + } + + t_desc_table.__isset.slotDescriptors = true; + // TTupleDescriptor + TTupleDescriptor t_tuple_desc; + t_tuple_desc.id = 0; + t_tuple_desc.byteSize = offset; + t_tuple_desc.numNullBytes = 1; + t_tuple_desc.tableId = 0; + t_tuple_desc.__isset.tableId = true; + t_desc_table.tupleDescriptors.push_back(t_tuple_desc); + + DescriptorTbl::create(&_obj_pool, t_desc_table, &_desc_tbl); + + _runtim_state.set_desc_tbl(_desc_tbl); + + // Node Id + _tnode.node_id = 0; + _tnode.node_type = TPlanNodeType::SCHEMA_SCAN_NODE; + _tnode.num_children = 0; + _tnode.limit = -1; + _tnode.row_tuples.push_back(0); + _tnode.nullable_tuples.push_back(false); + _tnode.mysql_scan_node.tuple_id = 0; + _tnode.mysql_scan_node.table_name = "dim_lbs_device"; + _tnode.mysql_scan_node.columns.push_back("*"); + _tnode.mysql_scan_node.filters.push_back("id = 1"); + _tnode.__isset.mysql_scan_node = true; + } + +protected: + virtual void SetUp() { + } + virtual void TearDown() { + } + TPlanNode _tnode; + ObjectPool _obj_pool; + DescriptorTbl* _desc_tbl; + RuntimeState _runtim_state; +}; + + +TEST_F(MysqlScanNodeTest, normal_use) { + MysqlScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); + Status status = scan_node.prepare(&_runtim_state); + ASSERT_TRUE(status.ok()); + std::vector scan_ranges; + status = scan_node.set_scan_ranges(scan_ranges); + ASSERT_TRUE(status.ok()); + std::stringstream out; + scan_node.debug_string(1, &out); + LOG(WARNING) << out.str(); + + status = scan_node.open(&_runtim_state); + ASSERT_TRUE(status.ok()); + RowBatch row_batch(scan_node._row_descriptor, 100); + bool eos = false; + + while (!eos) { + status = scan_node.get_next(&_runtim_state, &row_batch, &eos); + ASSERT_TRUE(status.ok()); + + if (!eos) { + for (int i = 0; i < row_batch.num_rows(); ++i) { + TupleRow* row = row_batch.get_row(i); + LOG(WARNING) << "input row: " << print_row(row, scan_node._row_descriptor); + } + } + } + + status = scan_node.close(&_runtim_state); + ASSERT_TRUE(status.ok()); +} +TEST_F(MysqlScanNodeTest, Prepare_fail_1) { + MysqlScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); + scan_node._tuple_id = 1; + Status status = scan_node.prepare(&_runtim_state); + ASSERT_FALSE(status.ok()); +} +TEST_F(MysqlScanNodeTest, Prepare_fail_2) { + MysqlScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); + TableDescriptor* old = _desc_tbl->_tuple_desc_map[(TupleId)0]->_table_desc; + _desc_tbl->_tuple_desc_map[(TupleId)0]->_table_desc = NULL; + Status status = scan_node.prepare(&_runtim_state); + ASSERT_FALSE(status.ok()); + _desc_tbl->_tuple_desc_map[(TupleId)0]->_table_desc = old; +} +TEST_F(MysqlScanNodeTest, open_fail_1) { + MysqlScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); + Status status = scan_node.prepare(&_runtim_state); + ASSERT_TRUE(status.ok()); + scan_node._table_name = "no_such_table"; + status = scan_node.open(&_runtim_state); + ASSERT_FALSE(status.ok()); +} +TEST_F(MysqlScanNodeTest, open_fail_3) { + MysqlScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); + Status status = scan_node.prepare(&_runtim_state); + ASSERT_TRUE(status.ok()); + scan_node._columns.clear(); + scan_node._columns.push_back("id"); + status = scan_node.open(&_runtim_state); + ASSERT_FALSE(status.ok()); +} +TEST_F(MysqlScanNodeTest, open_fail_2) { + MysqlScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); + Status status = scan_node.prepare(&_runtim_state); + ASSERT_TRUE(status.ok()); + scan_node._my_param.host = ""; + status = scan_node.open(&_runtim_state); + ASSERT_FALSE(status.ok()); +} +TEST_F(MysqlScanNodeTest, invalid_input) { + MysqlScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); + Status status = scan_node.prepare(NULL); + ASSERT_FALSE(status.ok()); + status = scan_node.prepare(&_runtim_state); + ASSERT_TRUE(status.ok()); + status = scan_node.prepare(&_runtim_state); + ASSERT_TRUE(status.ok()); + status = scan_node.open(NULL); + ASSERT_FALSE(status.ok()); + status = scan_node.open(&_runtim_state); + ASSERT_TRUE(status.ok()); + RowBatch row_batch(scan_node._row_descriptor, 100); + bool eos = false; + status = scan_node.get_next(NULL, &row_batch, &eos); + ASSERT_FALSE(status.ok()); + + while (!eos) { + status = scan_node.get_next(&_runtim_state, &row_batch, &eos); + ASSERT_TRUE(status.ok()); + + for (int i = 0; i < row_batch.num_rows(); ++i) { + TupleRow* row = row_batch.get_row(i); + LOG(WARNING) << "input row: " << print_row(row, scan_node._row_descriptor); + } + } +} +TEST_F(MysqlScanNodeTest, no_init) { + MysqlScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); + Status status = scan_node.open(&_runtim_state); + ASSERT_FALSE(status.ok()); + RowBatch row_batch(scan_node._row_descriptor, 100); + bool eos = false; + status = scan_node.get_next(&_runtim_state, &row_batch, &eos); + ASSERT_FALSE(status.ok()); +} + +} + +int main(int argc, char** argv) { + std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; + if (!palo::config::init(conffile.c_str(), false)) { + fprintf(stderr, "error read config file. \n"); + return -1; + } + init_glog("be-test"); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + diff --git a/be/test/exec/mysql_scanner_test.cpp b/be/test/exec/mysql_scanner_test.cpp index 32b80e5ede..f97e25eb1b 100644 --- a/be/test/exec/mysql_scanner_test.cpp +++ b/be/test/exec/mysql_scanner_test.cpp @@ -13,113 +13,113 @@ // specific language governing permissions and limitations // under the License. -#include -#include - -#include "common/object_pool.h" -#include "exec/mysql_scanner.h" -#include "runtime/mem_pool.h" -#include "runtime/descriptors.h" - -namespace palo { - -class MysqlScannerTest : public testing::Test { -public: - MysqlScannerTest() { - _param.host = "host"; - _param.port = "port"; - _param.user = "user"; - _param.passwd = "passwd"; - _param.db = "db"; - } - -protected: - virtual void SetUp() { - } - MysqlScannerParam _param; -}; - -TEST_F(MysqlScannerTest, normal_use) { - MysqlScanner scanner(_param); - Status status = scanner.open(); - ASSERT_TRUE(status.ok()); - std::vector fields; - fields.push_back("*"); - std::vector filters; - filters.push_back("id = 1"); - status = scanner.query("dim_lbs_device", fields, filters); - ASSERT_TRUE(status.ok()); - bool eos = false; - char** buf; - unsigned long* length; - status = scanner.get_next_row(NULL, &length, &eos); - ASSERT_FALSE(status.ok()); - - while (!eos) { - status = scanner.get_next_row(&buf, &length, &eos); - - if (eos) { - break; - } - - ASSERT_TRUE(status.ok()); - - for (int i = 0; i < scanner.field_num(); ++i) { - if (buf[i]) { - LOG(WARNING) << buf[i]; - } else { - LOG(WARNING) << "NULL"; - } - } - } -} - -TEST_F(MysqlScannerTest, no_init) { - MysqlScanner scanner(_param); - std::vector fields; - fields.push_back("*"); - std::vector filters; - filters.push_back("id = 1"); - Status status = scanner.query("dim_lbs_device", fields, filters); - ASSERT_FALSE(status.ok()); - status = scanner.query("select 1"); - ASSERT_FALSE(status.ok()); - bool eos = false; - char** buf; - unsigned long* length; - status = scanner.get_next_row(&buf, &length, &eos); - ASSERT_FALSE(status.ok()); -} - -TEST_F(MysqlScannerTest, query_failed) { - MysqlScanner scanner(_param); - Status status = scanner.open(); - ASSERT_TRUE(status.ok()); - std::vector fields; - fields.push_back("*"); - std::vector filters; - filters.push_back("id = 1"); - status = scanner.query("no_such_table", fields, filters); - ASSERT_FALSE(status.ok()); -} - -TEST_F(MysqlScannerTest, open_failed) { - MysqlScannerParam invalid_param; - MysqlScanner scanner(invalid_param); - Status status = scanner.open(); - ASSERT_FALSE(status.ok()); -} - -} - -int main(int argc, char** argv) { - std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; - if (!palo::config::init(conffile.c_str(), false)) { - fprintf(stderr, "error read config file. \n"); - return -1; - } - init_glog("be-test"); - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} - +#include +#include + +#include "common/object_pool.h" +#include "exec/mysql_scanner.h" +#include "runtime/mem_pool.h" +#include "runtime/descriptors.h" + +namespace palo { + +class MysqlScannerTest : public testing::Test { +public: + MysqlScannerTest() { + _param.host = "host"; + _param.port = "port"; + _param.user = "user"; + _param.passwd = "passwd"; + _param.db = "db"; + } + +protected: + virtual void SetUp() { + } + MysqlScannerParam _param; +}; + +TEST_F(MysqlScannerTest, normal_use) { + MysqlScanner scanner(_param); + Status status = scanner.open(); + ASSERT_TRUE(status.ok()); + std::vector fields; + fields.push_back("*"); + std::vector filters; + filters.push_back("id = 1"); + status = scanner.query("dim_lbs_device", fields, filters); + ASSERT_TRUE(status.ok()); + bool eos = false; + char** buf; + unsigned long* length; + status = scanner.get_next_row(NULL, &length, &eos); + ASSERT_FALSE(status.ok()); + + while (!eos) { + status = scanner.get_next_row(&buf, &length, &eos); + + if (eos) { + break; + } + + ASSERT_TRUE(status.ok()); + + for (int i = 0; i < scanner.field_num(); ++i) { + if (buf[i]) { + LOG(WARNING) << buf[i]; + } else { + LOG(WARNING) << "NULL"; + } + } + } +} + +TEST_F(MysqlScannerTest, no_init) { + MysqlScanner scanner(_param); + std::vector fields; + fields.push_back("*"); + std::vector filters; + filters.push_back("id = 1"); + Status status = scanner.query("dim_lbs_device", fields, filters); + ASSERT_FALSE(status.ok()); + status = scanner.query("select 1"); + ASSERT_FALSE(status.ok()); + bool eos = false; + char** buf; + unsigned long* length; + status = scanner.get_next_row(&buf, &length, &eos); + ASSERT_FALSE(status.ok()); +} + +TEST_F(MysqlScannerTest, query_failed) { + MysqlScanner scanner(_param); + Status status = scanner.open(); + ASSERT_TRUE(status.ok()); + std::vector fields; + fields.push_back("*"); + std::vector filters; + filters.push_back("id = 1"); + status = scanner.query("no_such_table", fields, filters); + ASSERT_FALSE(status.ok()); +} + +TEST_F(MysqlScannerTest, open_failed) { + MysqlScannerParam invalid_param; + MysqlScanner scanner(invalid_param); + Status status = scanner.open(); + ASSERT_FALSE(status.ok()); +} + +} + +int main(int argc, char** argv) { + std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; + if (!palo::config::init(conffile.c_str(), false)) { + fprintf(stderr, "error read config file. \n"); + return -1; + } + init_glog("be-test"); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + diff --git a/be/test/exec/schema_scan_node_test.cpp b/be/test/exec/schema_scan_node_test.cpp index 3e57cf90f6..d68e837886 100644 --- a/be/test/exec/schema_scan_node_test.cpp +++ b/be/test/exec/schema_scan_node_test.cpp @@ -13,228 +13,228 @@ // specific language governing permissions and limitations // under the License. -#include -#include -#include - -#include "common/object_pool.h" -#include "exec/text_converter.hpp" -#include "exec/schema_scan_node.h" -#include "gen_cpp/PlanNodes_types.h" -#include "runtime/mem_pool.h" -#include "runtime/descriptors.h" -#include "runtime/runtime_state.h" -#include "runtime/row_batch.h" -#include "runtime/string_value.h" -#include "runtime/tuple_row.h" -#include "schema_scan_node.h" -#include "util/runtime_profile.h" -#include "util/debug_util.h" - -namespace palo { - -// mock -class SchemaScanNodeTest : public testing::Test { -public: - SchemaScanNodeTest() : _runtim_state("test") { - TDescriptorTable t_desc_table; - - // table descriptors - TTableDescriptor t_table_desc; - - t_table_desc.id = 0; - t_table_desc.tableType = TTableType::SCHEMA_TABLE; - t_table_desc.numCols = 0; - t_table_desc.numClusteringCols = 0; - t_table_desc.schemaTable.tableType = TSchemaTableType::SCH_AUTHORS; - t_table_desc.tableName = "test_table"; - t_table_desc.dbName = "test_db"; - t_table_desc.__isset.schemaTable = true; - t_desc_table.tableDescriptors.push_back(t_table_desc); - t_desc_table.__isset.tableDescriptors = true; - // TSlotDescriptor - int offset = 0; - - for (int i = 0; i < 3; ++i) { - TSlotDescriptor t_slot_desc; - t_slot_desc.__set_slotType(to_thrift(TYPE_STRING)); - t_slot_desc.__set_columnPos(i); - t_slot_desc.__set_byteOffset(offset); - t_slot_desc.__set_nullIndicatorByte(0); - t_slot_desc.__set_nullIndicatorBit(-1); - t_slot_desc.__set_slotIdx(i); - t_slot_desc.__set_isMaterialized(true); - t_desc_table.slotDescriptors.push_back(t_slot_desc); - offset += sizeof(StringValue); - } - - t_desc_table.__isset.slotDescriptors = true; - // TTupleDescriptor - TTupleDescriptor t_tuple_desc; - t_tuple_desc.id = 0; - t_tuple_desc.byteSize = offset; - t_tuple_desc.numNullBytes = 0; - t_tuple_desc.tableId = 0; - t_tuple_desc.__isset.tableId = true; - t_desc_table.tupleDescriptors.push_back(t_tuple_desc); - - DescriptorTbl::create(&_obj_pool, t_desc_table, &_desc_tbl); - - _runtim_state.set_desc_tbl(_desc_tbl); - - // Node Id - _tnode.node_id = 0; - _tnode.node_type = TPlanNodeType::SCHEMA_SCAN_NODE; - _tnode.num_children = 0; - _tnode.limit = -1; - _tnode.row_tuples.push_back(0); - _tnode.nullable_tuples.push_back(false); - _tnode.schema_scan_node.table_name = "test_table"; - _tnode.schema_scan_node.tuple_id = 0; - _tnode.__isset.schema_scan_node = true; - } - - virtual ~SchemaScanNodeTest() { } - - virtual void SetUp() { - } - virtual void TearDown() { - } -private: - TPlanNode _tnode; - ObjectPool _obj_pool; - DescriptorTbl* _desc_tbl; - RuntimeState _runtim_state; -}; - -TEST_F(SchemaScanNodeTest, normal_use) { - SchemaScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); - Status status = scan_node.prepare(&_runtim_state); - ASSERT_TRUE(status.ok()); - status = scan_node.prepare(&_runtim_state); - ASSERT_TRUE(status.ok()); - std::vector scan_ranges; - status = scan_node.set_scan_ranges(scan_ranges); - ASSERT_TRUE(status.ok()); - std::stringstream out; - scan_node.debug_string(1, &out); - LOG(WARNING) << out.str(); - - status = scan_node.open(&_runtim_state); - ASSERT_TRUE(status.ok()); - RowBatch row_batch(scan_node._row_descriptor, 100); - bool eos = false; - - while (!eos) { - status = scan_node.get_next(&_runtim_state, &row_batch, &eos); - ASSERT_TRUE(status.ok()); - - if (!eos) { - for (int i = 0; i < row_batch.num_rows(); ++i) { - TupleRow* row = row_batch.get_row(i); - LOG(WARNING) << "input row: " << print_row(row, scan_node._row_descriptor); - } - } - } - - status = scan_node.close(&_runtim_state); - ASSERT_TRUE(status.ok()); -} -TEST_F(SchemaScanNodeTest, Prepare_fail_1) { - SchemaScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); - TableDescriptor* old = _desc_tbl->_tuple_desc_map[(TupleId)0]->_table_desc; - _desc_tbl->_tuple_desc_map[(TupleId)0]->_table_desc = NULL; - Status status = scan_node.prepare(&_runtim_state); - ASSERT_FALSE(status.ok()); - _desc_tbl->_tuple_desc_map[(TupleId)0]->_table_desc = old; -} -TEST_F(SchemaScanNodeTest, Prepare_fail_2) { - SchemaScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); - scan_node._tuple_id = 1; - Status status = scan_node.prepare(&_runtim_state); - ASSERT_FALSE(status.ok()); -} -TEST_F(SchemaScanNodeTest, dummy) { - SchemaTableDescriptor* t_desc = (SchemaTableDescriptor*) - _desc_tbl->_tuple_desc_map[(TupleId)0]->_table_desc; - t_desc->_schema_table_type = TSchemaTableType::SCH_EVENTS; - SchemaScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); - Status status = scan_node.prepare(&_runtim_state); - ASSERT_TRUE(status.ok()); - status = scan_node.prepare(&_runtim_state); - ASSERT_TRUE(status.ok()); - std::vector scan_ranges; - status = scan_node.set_scan_ranges(scan_ranges); - ASSERT_TRUE(status.ok()); - std::stringstream out; - scan_node.debug_string(1, &out); - LOG(WARNING) << out.str(); - - status = scan_node.open(&_runtim_state); - ASSERT_TRUE(status.ok()); - RowBatch row_batch(scan_node._row_descriptor, 100); - bool eos = false; - - while (!eos) { - status = scan_node.get_next(&_runtim_state, &row_batch, &eos); - ASSERT_TRUE(status.ok()); - - if (!eos) { - for (int i = 0; i < row_batch.num_rows(); ++i) { - TupleRow* row = row_batch.get_row(i); - LOG(WARNING) << "input row: " << print_row(row, scan_node._row_descriptor); - } - } - } - - status = scan_node.close(&_runtim_state); - ASSERT_TRUE(status.ok()); - t_desc->_schema_table_type = TSchemaTableType::SCH_AUTHORS; -} -TEST_F(SchemaScanNodeTest, get_dest_desc_fail) { - SchemaScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); - scan_node._tuple_id = 1; - Status status = scan_node.prepare(&_runtim_state); - ASSERT_FALSE(status.ok()); -} -TEST_F(SchemaScanNodeTest, invalid_param) { - SchemaScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); - Status status = scan_node.prepare(NULL); - ASSERT_FALSE(status.ok()); - status = scan_node.prepare(&_runtim_state); - ASSERT_TRUE(status.ok()); - status = scan_node.open(NULL); - ASSERT_FALSE(status.ok()); - status = scan_node.open(&_runtim_state); - ASSERT_TRUE(status.ok()); - RowBatch row_batch(scan_node._row_descriptor, 100); - bool eos; - status = scan_node.get_next(NULL, &row_batch, &eos); - ASSERT_FALSE(status.ok()); -} - -TEST_F(SchemaScanNodeTest, no_init) { - SchemaScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); - //Status status = scan_node.prepare(&_runtim_state); - //ASSERT_TRUE(status.ok()); - Status status = scan_node.open(&_runtim_state); - ASSERT_FALSE(status.ok()); - RowBatch row_batch(scan_node._row_descriptor, 100); - bool eos; - status = scan_node.get_next(&_runtim_state, &row_batch, &eos); - ASSERT_FALSE(status.ok()); -} - -} - -int main(int argc, char** argv) { - std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; - if (!palo::config::init(conffile.c_str(), false)) { - fprintf(stderr, "error read config file. \n"); - return -1; - } - init_glog("be-test"); - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} - +#include +#include +#include + +#include "common/object_pool.h" +#include "exec/text_converter.hpp" +#include "exec/schema_scan_node.h" +#include "gen_cpp/PlanNodes_types.h" +#include "runtime/mem_pool.h" +#include "runtime/descriptors.h" +#include "runtime/runtime_state.h" +#include "runtime/row_batch.h" +#include "runtime/string_value.h" +#include "runtime/tuple_row.h" +#include "schema_scan_node.h" +#include "util/runtime_profile.h" +#include "util/debug_util.h" + +namespace palo { + +// mock +class SchemaScanNodeTest : public testing::Test { +public: + SchemaScanNodeTest() : _runtim_state("test") { + TDescriptorTable t_desc_table; + + // table descriptors + TTableDescriptor t_table_desc; + + t_table_desc.id = 0; + t_table_desc.tableType = TTableType::SCHEMA_TABLE; + t_table_desc.numCols = 0; + t_table_desc.numClusteringCols = 0; + t_table_desc.schemaTable.tableType = TSchemaTableType::SCH_AUTHORS; + t_table_desc.tableName = "test_table"; + t_table_desc.dbName = "test_db"; + t_table_desc.__isset.schemaTable = true; + t_desc_table.tableDescriptors.push_back(t_table_desc); + t_desc_table.__isset.tableDescriptors = true; + // TSlotDescriptor + int offset = 0; + + for (int i = 0; i < 3; ++i) { + TSlotDescriptor t_slot_desc; + t_slot_desc.__set_slotType(to_thrift(TYPE_STRING)); + t_slot_desc.__set_columnPos(i); + t_slot_desc.__set_byteOffset(offset); + t_slot_desc.__set_nullIndicatorByte(0); + t_slot_desc.__set_nullIndicatorBit(-1); + t_slot_desc.__set_slotIdx(i); + t_slot_desc.__set_isMaterialized(true); + t_desc_table.slotDescriptors.push_back(t_slot_desc); + offset += sizeof(StringValue); + } + + t_desc_table.__isset.slotDescriptors = true; + // TTupleDescriptor + TTupleDescriptor t_tuple_desc; + t_tuple_desc.id = 0; + t_tuple_desc.byteSize = offset; + t_tuple_desc.numNullBytes = 0; + t_tuple_desc.tableId = 0; + t_tuple_desc.__isset.tableId = true; + t_desc_table.tupleDescriptors.push_back(t_tuple_desc); + + DescriptorTbl::create(&_obj_pool, t_desc_table, &_desc_tbl); + + _runtim_state.set_desc_tbl(_desc_tbl); + + // Node Id + _tnode.node_id = 0; + _tnode.node_type = TPlanNodeType::SCHEMA_SCAN_NODE; + _tnode.num_children = 0; + _tnode.limit = -1; + _tnode.row_tuples.push_back(0); + _tnode.nullable_tuples.push_back(false); + _tnode.schema_scan_node.table_name = "test_table"; + _tnode.schema_scan_node.tuple_id = 0; + _tnode.__isset.schema_scan_node = true; + } + + virtual ~SchemaScanNodeTest() { } + + virtual void SetUp() { + } + virtual void TearDown() { + } +private: + TPlanNode _tnode; + ObjectPool _obj_pool; + DescriptorTbl* _desc_tbl; + RuntimeState _runtim_state; +}; + +TEST_F(SchemaScanNodeTest, normal_use) { + SchemaScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); + Status status = scan_node.prepare(&_runtim_state); + ASSERT_TRUE(status.ok()); + status = scan_node.prepare(&_runtim_state); + ASSERT_TRUE(status.ok()); + std::vector scan_ranges; + status = scan_node.set_scan_ranges(scan_ranges); + ASSERT_TRUE(status.ok()); + std::stringstream out; + scan_node.debug_string(1, &out); + LOG(WARNING) << out.str(); + + status = scan_node.open(&_runtim_state); + ASSERT_TRUE(status.ok()); + RowBatch row_batch(scan_node._row_descriptor, 100); + bool eos = false; + + while (!eos) { + status = scan_node.get_next(&_runtim_state, &row_batch, &eos); + ASSERT_TRUE(status.ok()); + + if (!eos) { + for (int i = 0; i < row_batch.num_rows(); ++i) { + TupleRow* row = row_batch.get_row(i); + LOG(WARNING) << "input row: " << print_row(row, scan_node._row_descriptor); + } + } + } + + status = scan_node.close(&_runtim_state); + ASSERT_TRUE(status.ok()); +} +TEST_F(SchemaScanNodeTest, Prepare_fail_1) { + SchemaScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); + TableDescriptor* old = _desc_tbl->_tuple_desc_map[(TupleId)0]->_table_desc; + _desc_tbl->_tuple_desc_map[(TupleId)0]->_table_desc = NULL; + Status status = scan_node.prepare(&_runtim_state); + ASSERT_FALSE(status.ok()); + _desc_tbl->_tuple_desc_map[(TupleId)0]->_table_desc = old; +} +TEST_F(SchemaScanNodeTest, Prepare_fail_2) { + SchemaScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); + scan_node._tuple_id = 1; + Status status = scan_node.prepare(&_runtim_state); + ASSERT_FALSE(status.ok()); +} +TEST_F(SchemaScanNodeTest, dummy) { + SchemaTableDescriptor* t_desc = (SchemaTableDescriptor*) + _desc_tbl->_tuple_desc_map[(TupleId)0]->_table_desc; + t_desc->_schema_table_type = TSchemaTableType::SCH_EVENTS; + SchemaScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); + Status status = scan_node.prepare(&_runtim_state); + ASSERT_TRUE(status.ok()); + status = scan_node.prepare(&_runtim_state); + ASSERT_TRUE(status.ok()); + std::vector scan_ranges; + status = scan_node.set_scan_ranges(scan_ranges); + ASSERT_TRUE(status.ok()); + std::stringstream out; + scan_node.debug_string(1, &out); + LOG(WARNING) << out.str(); + + status = scan_node.open(&_runtim_state); + ASSERT_TRUE(status.ok()); + RowBatch row_batch(scan_node._row_descriptor, 100); + bool eos = false; + + while (!eos) { + status = scan_node.get_next(&_runtim_state, &row_batch, &eos); + ASSERT_TRUE(status.ok()); + + if (!eos) { + for (int i = 0; i < row_batch.num_rows(); ++i) { + TupleRow* row = row_batch.get_row(i); + LOG(WARNING) << "input row: " << print_row(row, scan_node._row_descriptor); + } + } + } + + status = scan_node.close(&_runtim_state); + ASSERT_TRUE(status.ok()); + t_desc->_schema_table_type = TSchemaTableType::SCH_AUTHORS; +} +TEST_F(SchemaScanNodeTest, get_dest_desc_fail) { + SchemaScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); + scan_node._tuple_id = 1; + Status status = scan_node.prepare(&_runtim_state); + ASSERT_FALSE(status.ok()); +} +TEST_F(SchemaScanNodeTest, invalid_param) { + SchemaScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); + Status status = scan_node.prepare(NULL); + ASSERT_FALSE(status.ok()); + status = scan_node.prepare(&_runtim_state); + ASSERT_TRUE(status.ok()); + status = scan_node.open(NULL); + ASSERT_FALSE(status.ok()); + status = scan_node.open(&_runtim_state); + ASSERT_TRUE(status.ok()); + RowBatch row_batch(scan_node._row_descriptor, 100); + bool eos; + status = scan_node.get_next(NULL, &row_batch, &eos); + ASSERT_FALSE(status.ok()); +} + +TEST_F(SchemaScanNodeTest, no_init) { + SchemaScanNode scan_node(&_obj_pool, _tnode, *_desc_tbl); + //Status status = scan_node.prepare(&_runtim_state); + //ASSERT_TRUE(status.ok()); + Status status = scan_node.open(&_runtim_state); + ASSERT_FALSE(status.ok()); + RowBatch row_batch(scan_node._row_descriptor, 100); + bool eos; + status = scan_node.get_next(&_runtim_state, &row_batch, &eos); + ASSERT_FALSE(status.ok()); +} + +} + +int main(int argc, char** argv) { + std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; + if (!palo::config::init(conffile.c_str(), false)) { + fprintf(stderr, "error read config file. \n"); + return -1; + } + init_glog("be-test"); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + diff --git a/be/test/exec/schema_scanner/schema_authors_scanner_test.cpp b/be/test/exec/schema_scanner/schema_authors_scanner_test.cpp index fdce30dc80..ee42696b41 100644 --- a/be/test/exec/schema_scanner/schema_authors_scanner_test.cpp +++ b/be/test/exec/schema_scanner/schema_authors_scanner_test.cpp @@ -13,94 +13,94 @@ // specific language governing permissions and limitations // under the License. -#include -#include - -#include "common/object_pool.h" -#include "exec/schema_scanner/schema_authors_scanner.h" -#include "runtime/mem_pool.h" -#include "runtime/descriptors.h" - -namespace palo { - -class SchemaAuthorScannerTest : public testing::Test { -public: - SchemaAuthorScannerTest() { - } - - virtual void SetUp() { - _param.db = &_db; - _param.table = &_table; - _param.wild = &_wild; - } -private: - ObjectPool _obj_pool; - MemPool _mem_pool; - SchemaScannerParam _param; - std::string _db; - std::string _table; - std::string _wild; -}; - -char g_tuple_buf[10000];// enougth for tuple -TEST_F(SchemaAuthorScannerTest, normal_use) { - SchemaAuthorsScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - while (!eos) { - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_TRUE(status.ok()); - for (int i = 0; i < 3; ++i) { - LOG(INFO) - << ((StringValue *)tuple->get_slot(tuple_desc->slots()[i]->tuple_offset()))->ptr; - } - } -} - -TEST_F(SchemaAuthorScannerTest, use_with_no_init) { - SchemaAuthorsScanner scanner; - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL == tuple_desc); - Status status = scanner.start((RuntimeState *)1); - ASSERT_FALSE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} - -TEST_F(SchemaAuthorScannerTest, invalid_param) { - SchemaAuthorsScanner scanner; - Status status = scanner.init(&_param, NULL); - ASSERT_FALSE(status.ok()); - status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, NULL, &eos); - ASSERT_FALSE(status.ok()); -} - -} - -int main(int argc, char** argv) { - std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; - if (!palo::config::init(conffile.c_str(), false)) { - fprintf(stderr, "error read config file. \n"); - return -1; - } - init_glog("be-test"); - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} - +#include +#include + +#include "common/object_pool.h" +#include "exec/schema_scanner/schema_authors_scanner.h" +#include "runtime/mem_pool.h" +#include "runtime/descriptors.h" + +namespace palo { + +class SchemaAuthorScannerTest : public testing::Test { +public: + SchemaAuthorScannerTest() { + } + + virtual void SetUp() { + _param.db = &_db; + _param.table = &_table; + _param.wild = &_wild; + } +private: + ObjectPool _obj_pool; + MemPool _mem_pool; + SchemaScannerParam _param; + std::string _db; + std::string _table; + std::string _wild; +}; + +char g_tuple_buf[10000];// enougth for tuple +TEST_F(SchemaAuthorScannerTest, normal_use) { + SchemaAuthorsScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + while (!eos) { + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_TRUE(status.ok()); + for (int i = 0; i < 3; ++i) { + LOG(INFO) + << ((StringValue *)tuple->get_slot(tuple_desc->slots()[i]->tuple_offset()))->ptr; + } + } +} + +TEST_F(SchemaAuthorScannerTest, use_with_no_init) { + SchemaAuthorsScanner scanner; + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL == tuple_desc); + Status status = scanner.start((RuntimeState *)1); + ASSERT_FALSE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} + +TEST_F(SchemaAuthorScannerTest, invalid_param) { + SchemaAuthorsScanner scanner; + Status status = scanner.init(&_param, NULL); + ASSERT_FALSE(status.ok()); + status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, NULL, &eos); + ASSERT_FALSE(status.ok()); +} + +} + +int main(int argc, char** argv) { + std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; + if (!palo::config::init(conffile.c_str(), false)) { + fprintf(stderr, "error read config file. \n"); + return -1; + } + init_glog("be-test"); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + diff --git a/be/test/exec/schema_scanner/schema_charsets_scanner_test.cpp b/be/test/exec/schema_scanner/schema_charsets_scanner_test.cpp index 6c21322f82..3fae7316d2 100644 --- a/be/test/exec/schema_scanner/schema_charsets_scanner_test.cpp +++ b/be/test/exec/schema_scanner/schema_charsets_scanner_test.cpp @@ -13,94 +13,94 @@ // specific language governing permissions and limitations // under the License. -#include -#include - -#include "common/object_pool.h" -#include "exec/schema_scanner/schema_charsets_scanner.h" -#include "runtime/mem_pool.h" -#include "runtime/descriptors.h" -#include "util/debug_util.h" - -namespace palo { - -class SchemaCharsetsScannerTest : public testing::Test { -public: - SchemaCharsetsScannerTest() { - } - - virtual void SetUp() { - _param.db = &_db; - _param.table = &_table; - _param.wild = &_wild; - } -private: - ObjectPool _obj_pool; - MemPool _mem_pool; - SchemaScannerParam _param; - std::string _db; - std::string _table; - std::string _wild; -}; - -char g_tuple_buf[10000];// enougth for tuple -TEST_F(SchemaCharsetsScannerTest, normal_use) { - SchemaCharsetsScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - while (!eos) { - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_TRUE(status.ok()); - if (!eos) { - LOG(INFO) << print_tuple(tuple, *tuple_desc); - } - } -} - -TEST_F(SchemaCharsetsScannerTest, use_with_no_init) { - SchemaCharsetsScanner scanner; - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL == tuple_desc); - Status status = scanner.start((RuntimeState *)1); - ASSERT_FALSE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} - -TEST_F(SchemaCharsetsScannerTest, invalid_param) { - SchemaCharsetsScanner scanner; - Status status = scanner.init(&_param, NULL); - ASSERT_FALSE(status.ok()); - status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, NULL, &eos); - ASSERT_FALSE(status.ok()); -} - -} - -int main(int argc, char** argv) { - std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; - if (!palo::config::init(conffile.c_str(), false)) { - fprintf(stderr, "error read config file. \n"); - return -1; - } - init_glog("be-test"); - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} - +#include +#include + +#include "common/object_pool.h" +#include "exec/schema_scanner/schema_charsets_scanner.h" +#include "runtime/mem_pool.h" +#include "runtime/descriptors.h" +#include "util/debug_util.h" + +namespace palo { + +class SchemaCharsetsScannerTest : public testing::Test { +public: + SchemaCharsetsScannerTest() { + } + + virtual void SetUp() { + _param.db = &_db; + _param.table = &_table; + _param.wild = &_wild; + } +private: + ObjectPool _obj_pool; + MemPool _mem_pool; + SchemaScannerParam _param; + std::string _db; + std::string _table; + std::string _wild; +}; + +char g_tuple_buf[10000];// enougth for tuple +TEST_F(SchemaCharsetsScannerTest, normal_use) { + SchemaCharsetsScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + while (!eos) { + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_TRUE(status.ok()); + if (!eos) { + LOG(INFO) << print_tuple(tuple, *tuple_desc); + } + } +} + +TEST_F(SchemaCharsetsScannerTest, use_with_no_init) { + SchemaCharsetsScanner scanner; + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL == tuple_desc); + Status status = scanner.start((RuntimeState *)1); + ASSERT_FALSE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} + +TEST_F(SchemaCharsetsScannerTest, invalid_param) { + SchemaCharsetsScanner scanner; + Status status = scanner.init(&_param, NULL); + ASSERT_FALSE(status.ok()); + status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, NULL, &eos); + ASSERT_FALSE(status.ok()); +} + +} + +int main(int argc, char** argv) { + std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; + if (!palo::config::init(conffile.c_str(), false)) { + fprintf(stderr, "error read config file. \n"); + return -1; + } + init_glog("be-test"); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + diff --git a/be/test/exec/schema_scanner/schema_collations_scanner_test.cpp b/be/test/exec/schema_scanner/schema_collations_scanner_test.cpp index c55b5fef4d..e530775fde 100644 --- a/be/test/exec/schema_scanner/schema_collations_scanner_test.cpp +++ b/be/test/exec/schema_scanner/schema_collations_scanner_test.cpp @@ -13,94 +13,94 @@ // specific language governing permissions and limitations // under the License. -#include -#include - -#include "common/object_pool.h" -#include "exec/schema_scanner/schema_collations_scanner.h" -#include "runtime/mem_pool.h" -#include "runtime/descriptors.h" -#include "util/debug_util.h" - -namespace palo { - -class SchemaCollationsScannerTest : public testing::Test { -public: - SchemaCollationsScannerTest() { - } - - virtual void SetUp() { - _param.db = &_db; - _param.table = &_table; - _param.wild = &_wild; - } -private: - ObjectPool _obj_pool; - MemPool _mem_pool; - SchemaScannerParam _param; - std::string _db; - std::string _table; - std::string _wild; -}; - -char g_tuple_buf[10000];// enougth for tuple -TEST_F(SchemaCollationsScannerTest, normal_use) { - SchemaCollationsScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - while (!eos) { - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_TRUE(status.ok()); - if (!eos) { - LOG(INFO) << print_tuple(tuple, *tuple_desc); - } - } -} - -TEST_F(SchemaCollationsScannerTest, use_with_no_init) { - SchemaCollationsScanner scanner; - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL == tuple_desc); - Status status = scanner.start((RuntimeState *)1); - ASSERT_FALSE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} - -TEST_F(SchemaCollationsScannerTest, invalid_param) { - SchemaCollationsScanner scanner; - Status status = scanner.init(&_param, NULL); - ASSERT_FALSE(status.ok()); - status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, NULL, &eos); - ASSERT_FALSE(status.ok()); -} - -} - -int main(int argc, char** argv) { - std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; - if (!palo::config::init(conffile.c_str(), false)) { - fprintf(stderr, "error read config file. \n"); - return -1; - } - init_glog("be-test"); - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} - +#include +#include + +#include "common/object_pool.h" +#include "exec/schema_scanner/schema_collations_scanner.h" +#include "runtime/mem_pool.h" +#include "runtime/descriptors.h" +#include "util/debug_util.h" + +namespace palo { + +class SchemaCollationsScannerTest : public testing::Test { +public: + SchemaCollationsScannerTest() { + } + + virtual void SetUp() { + _param.db = &_db; + _param.table = &_table; + _param.wild = &_wild; + } +private: + ObjectPool _obj_pool; + MemPool _mem_pool; + SchemaScannerParam _param; + std::string _db; + std::string _table; + std::string _wild; +}; + +char g_tuple_buf[10000];// enougth for tuple +TEST_F(SchemaCollationsScannerTest, normal_use) { + SchemaCollationsScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + while (!eos) { + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_TRUE(status.ok()); + if (!eos) { + LOG(INFO) << print_tuple(tuple, *tuple_desc); + } + } +} + +TEST_F(SchemaCollationsScannerTest, use_with_no_init) { + SchemaCollationsScanner scanner; + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL == tuple_desc); + Status status = scanner.start((RuntimeState *)1); + ASSERT_FALSE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} + +TEST_F(SchemaCollationsScannerTest, invalid_param) { + SchemaCollationsScanner scanner; + Status status = scanner.init(&_param, NULL); + ASSERT_FALSE(status.ok()); + status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, NULL, &eos); + ASSERT_FALSE(status.ok()); +} + +} + +int main(int argc, char** argv) { + std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; + if (!palo::config::init(conffile.c_str(), false)) { + fprintf(stderr, "error read config file. \n"); + return -1; + } + init_glog("be-test"); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + diff --git a/be/test/exec/schema_scanner/schema_columns_scanner_test.cpp b/be/test/exec/schema_scanner/schema_columns_scanner_test.cpp index 9bf5e14a22..4a7971f704 100644 --- a/be/test/exec/schema_scanner/schema_columns_scanner_test.cpp +++ b/be/test/exec/schema_scanner/schema_columns_scanner_test.cpp @@ -13,201 +13,201 @@ // specific language governing permissions and limitations // under the License. -#include -#include - -#include "common/object_pool.h" -#include "gen_cpp/Frontend_types.h" -#include "exec/schema_scanner/schema_columns_scanner.h" -#include "exec/schema_scanner/schema_jni_helper.h" -#include "runtime/mem_pool.h" -#include "runtime/descriptors.h" - -namespace palo { - -int db_num = 0; -Status s_db_result; -Status SchemaJniHelper::get_db_names(const TGetDbsParams &db_params, - TGetDbsResult *db_result) { - for (int i = 0; i < db_num; ++i) { - db_result->dbs.push_back("abc"); - } - return s_db_result; -} - -int table_num = 0; -Status s_table_result; -Status SchemaJniHelper::get_table_names(const TGetTablesParams &table_params, - TGetTablesResult *table_result) { - for (int i = 0; i < table_num; ++i) { - table_result->tables.push_back("bac"); - } - return s_table_result; -} - -int desc_num = 0; -Status s_desc_result; -Status SchemaJniHelper::describe_table(const TDescribeTableParams &desc_params, - TDescribeTableResult *desc_result) { - for (int i = 0; i < desc_num; ++i) { - TColumnDesc column_desc; - column_desc.__set_columnName("abc"); - column_desc.__set_columnType(TPrimitiveType::BOOLEAN); - TColumnDef column_def; - column_def.columnDesc = column_desc; - column_def.comment = "bac"; - desc_result->columns.push_back(column_def); - } - return s_desc_result; -} - -void init_mock() { - db_num = 0; - table_num = 0; - desc_num = 0; - s_db_result = Status::OK; - s_table_result = Status::OK; - s_desc_result = Status::OK; -} - -class SchemaColumnsScannerTest : public testing::Test { -public: - SchemaColumnsScannerTest() { - } - - virtual void SetUp() { - _param.db = &_db; - _param.table = &_table; - _param.wild = &_wild; - } -private: - ObjectPool _obj_pool; - MemPool _mem_pool; - SchemaScannerParam _param; - std::string _db; - std::string _table; - std::string _wild; -}; - -char g_tuple_buf[10000];// enougth for tuple -TEST_F(SchemaColumnsScannerTest, normal_use) { - SchemaColumnsScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_TRUE(status.ok()); - ASSERT_TRUE(eos); -} -TEST_F(SchemaColumnsScannerTest, one_column) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaColumnsScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_TRUE(status.ok()); - ASSERT_FALSE(eos); - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_TRUE(status.ok()); - ASSERT_TRUE(eos); -} -TEST_F(SchemaColumnsScannerTest, op_before_init) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaColumnsScanner scanner; - Status status = scanner.start((RuntimeState *)1); - ASSERT_FALSE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} -TEST_F(SchemaColumnsScannerTest, input_fail) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaColumnsScanner scanner; - Status status = scanner.init(NULL, &_obj_pool); - ASSERT_FALSE(status.ok()); - status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - bool eos = false; - status = scanner.get_next_row(NULL, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} -TEST_F(SchemaColumnsScannerTest, table_fail) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaColumnsScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - s_table_result = Status("get table failed"); - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} -TEST_F(SchemaColumnsScannerTest, desc_fail) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaColumnsScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - s_desc_result = Status("get desc failed"); - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} - -TEST_F(SchemaColumnsScannerTest, start_fail) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaColumnsScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - s_db_result = Status("get db failed."); - status = scanner.start((RuntimeState *)1); - ASSERT_FALSE(status.ok()); -} - -} - -int main(int argc, char** argv) { - std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; - if (!palo::config::init(conffile.c_str(), false)) { - fprintf(stderr, "error read config file. \n"); - return -1; - } - init_glog("be-test"); - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} - +#include +#include + +#include "common/object_pool.h" +#include "gen_cpp/Frontend_types.h" +#include "exec/schema_scanner/schema_columns_scanner.h" +#include "exec/schema_scanner/schema_jni_helper.h" +#include "runtime/mem_pool.h" +#include "runtime/descriptors.h" + +namespace palo { + +int db_num = 0; +Status s_db_result; +Status SchemaJniHelper::get_db_names(const TGetDbsParams &db_params, + TGetDbsResult *db_result) { + for (int i = 0; i < db_num; ++i) { + db_result->dbs.push_back("abc"); + } + return s_db_result; +} + +int table_num = 0; +Status s_table_result; +Status SchemaJniHelper::get_table_names(const TGetTablesParams &table_params, + TGetTablesResult *table_result) { + for (int i = 0; i < table_num; ++i) { + table_result->tables.push_back("bac"); + } + return s_table_result; +} + +int desc_num = 0; +Status s_desc_result; +Status SchemaJniHelper::describe_table(const TDescribeTableParams &desc_params, + TDescribeTableResult *desc_result) { + for (int i = 0; i < desc_num; ++i) { + TColumnDesc column_desc; + column_desc.__set_columnName("abc"); + column_desc.__set_columnType(TPrimitiveType::BOOLEAN); + TColumnDef column_def; + column_def.columnDesc = column_desc; + column_def.comment = "bac"; + desc_result->columns.push_back(column_def); + } + return s_desc_result; +} + +void init_mock() { + db_num = 0; + table_num = 0; + desc_num = 0; + s_db_result = Status::OK; + s_table_result = Status::OK; + s_desc_result = Status::OK; +} + +class SchemaColumnsScannerTest : public testing::Test { +public: + SchemaColumnsScannerTest() { + } + + virtual void SetUp() { + _param.db = &_db; + _param.table = &_table; + _param.wild = &_wild; + } +private: + ObjectPool _obj_pool; + MemPool _mem_pool; + SchemaScannerParam _param; + std::string _db; + std::string _table; + std::string _wild; +}; + +char g_tuple_buf[10000];// enougth for tuple +TEST_F(SchemaColumnsScannerTest, normal_use) { + SchemaColumnsScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_TRUE(status.ok()); + ASSERT_TRUE(eos); +} +TEST_F(SchemaColumnsScannerTest, one_column) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaColumnsScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_TRUE(status.ok()); + ASSERT_FALSE(eos); + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_TRUE(status.ok()); + ASSERT_TRUE(eos); +} +TEST_F(SchemaColumnsScannerTest, op_before_init) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaColumnsScanner scanner; + Status status = scanner.start((RuntimeState *)1); + ASSERT_FALSE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} +TEST_F(SchemaColumnsScannerTest, input_fail) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaColumnsScanner scanner; + Status status = scanner.init(NULL, &_obj_pool); + ASSERT_FALSE(status.ok()); + status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + bool eos = false; + status = scanner.get_next_row(NULL, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} +TEST_F(SchemaColumnsScannerTest, table_fail) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaColumnsScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + s_table_result = Status("get table failed"); + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} +TEST_F(SchemaColumnsScannerTest, desc_fail) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaColumnsScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + s_desc_result = Status("get desc failed"); + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} + +TEST_F(SchemaColumnsScannerTest, start_fail) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaColumnsScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + s_db_result = Status("get db failed."); + status = scanner.start((RuntimeState *)1); + ASSERT_FALSE(status.ok()); +} + +} + +int main(int argc, char** argv) { + std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; + if (!palo::config::init(conffile.c_str(), false)) { + fprintf(stderr, "error read config file. \n"); + return -1; + } + init_glog("be-test"); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + diff --git a/be/test/exec/schema_scanner/schema_create_table_scanner_test.cpp b/be/test/exec/schema_scanner/schema_create_table_scanner_test.cpp index 8b56e0969b..f01700d095 100644 --- a/be/test/exec/schema_scanner/schema_create_table_scanner_test.cpp +++ b/be/test/exec/schema_scanner/schema_create_table_scanner_test.cpp @@ -13,201 +13,201 @@ // specific language governing permissions and limitations // under the License. -#include -#include - -#include "common/object_pool.h" -#include "exec/schema_scanner/schema_create_table_scanner.h" -#include "exec/schema_scanner/schema_jni_helper.h" -#include "gen_cpp/Frontend_types.h" -#include "runtime/mem_pool.h" -#include "runtime/descriptors.h" - -namespace palo { - -int db_num = 0; -Status s_db_result; -Status SchemaJniHelper::get_db_names(const TGetDbsParams &db_params, - TGetDbsResult *db_result) { - for (int i = 0; i < db_num; ++i) { - db_result->dbs.push_back("abc"); - } - return s_db_result; -} - -int table_num = 0; -Status s_table_result; -Status SchemaJniHelper::get_table_names(const TGetTablesParams &table_params, - TGetTablesResult *table_result) { - for (int i = 0; i < table_num; ++i) { - table_result->tables.push_back("bac"); - } - return s_table_result; -} - -int desc_num = 0; -Status s_desc_result; -Status SchemaJniHelper::describe_table(const TDescribeTableParams &desc_params, - TDescribeTableResult *desc_result) { - for (int i = 0; i < desc_num; ++i) { - TColumnDesc column_desc; - column_desc.__set_columnName("abc"); - column_desc.__set_columnType(TPrimitiveType::BOOLEAN); - TColumnDef column_def; - column_def.columnDesc = column_desc; - column_def.comment = "bac"; - desc_result->columns.push_back(column_def); - } - return s_desc_result; -} - -void init_mock() { - db_num = 0; - table_num = 0; - desc_num = 0; - s_db_result = Status::OK; - s_table_result = Status::OK; - s_desc_result = Status::OK; -} - -class SchemaCreateTableScannerTest : public testing::Test { -public: - SchemaCreateTableScannerTest() { - } - - virtual void SetUp() { - _param.db = &_db; - _param.table = &_table; - _param.wild = &_wild; - } -private: - ObjectPool _obj_pool; - MemPool _mem_pool; - SchemaScannerParam _param; - std::string _db; - std::string _table; - std::string _wild; -}; - -char g_tuple_buf[10000];// enougth for tuple -TEST_F(SchemaCreateTableScannerTest, normal_use) { - SchemaCreateTableScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_TRUE(status.ok()); - ASSERT_TRUE(eos); -} -TEST_F(SchemaCreateTableScannerTest, one_column) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaCreateTableScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_TRUE(status.ok()); - ASSERT_FALSE(eos); - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_TRUE(status.ok()); - ASSERT_TRUE(eos); -} -TEST_F(SchemaCreateTableScannerTest, op_before_init) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaCreateTableScanner scanner; - Status status = scanner.start((RuntimeState *)1); - ASSERT_FALSE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} -TEST_F(SchemaCreateTableScannerTest, input_fail) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaCreateTableScanner scanner; - Status status = scanner.init(NULL, &_obj_pool); - ASSERT_FALSE(status.ok()); - status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - bool eos = false; - status = scanner.get_next_row(NULL, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} -TEST_F(SchemaCreateTableScannerTest, table_fail) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaCreateTableScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - s_table_result = Status("get table failed"); - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} -TEST_F(SchemaCreateTableScannerTest, desc_fail) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaCreateTableScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - s_desc_result = Status("get desc failed"); - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} - -TEST_F(SchemaCreateTableScannerTest, start_fail) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaCreateTableScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - s_db_result = Status("get db failed."); - status = scanner.start((RuntimeState *)1); - ASSERT_FALSE(status.ok()); -} - -} - -int main(int argc, char** argv) { - std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; - if (!palo::config::init(conffile.c_str(), false)) { - fprintf(stderr, "error read config file. \n"); - return -1; - } - init_glog("be-test"); - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} - +#include +#include + +#include "common/object_pool.h" +#include "exec/schema_scanner/schema_create_table_scanner.h" +#include "exec/schema_scanner/schema_jni_helper.h" +#include "gen_cpp/Frontend_types.h" +#include "runtime/mem_pool.h" +#include "runtime/descriptors.h" + +namespace palo { + +int db_num = 0; +Status s_db_result; +Status SchemaJniHelper::get_db_names(const TGetDbsParams &db_params, + TGetDbsResult *db_result) { + for (int i = 0; i < db_num; ++i) { + db_result->dbs.push_back("abc"); + } + return s_db_result; +} + +int table_num = 0; +Status s_table_result; +Status SchemaJniHelper::get_table_names(const TGetTablesParams &table_params, + TGetTablesResult *table_result) { + for (int i = 0; i < table_num; ++i) { + table_result->tables.push_back("bac"); + } + return s_table_result; +} + +int desc_num = 0; +Status s_desc_result; +Status SchemaJniHelper::describe_table(const TDescribeTableParams &desc_params, + TDescribeTableResult *desc_result) { + for (int i = 0; i < desc_num; ++i) { + TColumnDesc column_desc; + column_desc.__set_columnName("abc"); + column_desc.__set_columnType(TPrimitiveType::BOOLEAN); + TColumnDef column_def; + column_def.columnDesc = column_desc; + column_def.comment = "bac"; + desc_result->columns.push_back(column_def); + } + return s_desc_result; +} + +void init_mock() { + db_num = 0; + table_num = 0; + desc_num = 0; + s_db_result = Status::OK; + s_table_result = Status::OK; + s_desc_result = Status::OK; +} + +class SchemaCreateTableScannerTest : public testing::Test { +public: + SchemaCreateTableScannerTest() { + } + + virtual void SetUp() { + _param.db = &_db; + _param.table = &_table; + _param.wild = &_wild; + } +private: + ObjectPool _obj_pool; + MemPool _mem_pool; + SchemaScannerParam _param; + std::string _db; + std::string _table; + std::string _wild; +}; + +char g_tuple_buf[10000];// enougth for tuple +TEST_F(SchemaCreateTableScannerTest, normal_use) { + SchemaCreateTableScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_TRUE(status.ok()); + ASSERT_TRUE(eos); +} +TEST_F(SchemaCreateTableScannerTest, one_column) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaCreateTableScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_TRUE(status.ok()); + ASSERT_FALSE(eos); + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_TRUE(status.ok()); + ASSERT_TRUE(eos); +} +TEST_F(SchemaCreateTableScannerTest, op_before_init) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaCreateTableScanner scanner; + Status status = scanner.start((RuntimeState *)1); + ASSERT_FALSE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} +TEST_F(SchemaCreateTableScannerTest, input_fail) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaCreateTableScanner scanner; + Status status = scanner.init(NULL, &_obj_pool); + ASSERT_FALSE(status.ok()); + status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + bool eos = false; + status = scanner.get_next_row(NULL, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} +TEST_F(SchemaCreateTableScannerTest, table_fail) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaCreateTableScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + s_table_result = Status("get table failed"); + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} +TEST_F(SchemaCreateTableScannerTest, desc_fail) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaCreateTableScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + s_desc_result = Status("get desc failed"); + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} + +TEST_F(SchemaCreateTableScannerTest, start_fail) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaCreateTableScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + s_db_result = Status("get db failed."); + status = scanner.start((RuntimeState *)1); + ASSERT_FALSE(status.ok()); +} + +} + +int main(int argc, char** argv) { + std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; + if (!palo::config::init(conffile.c_str(), false)) { + fprintf(stderr, "error read config file. \n"); + return -1; + } + init_glog("be-test"); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + diff --git a/be/test/exec/schema_scanner/schema_engines_scanner_test.cpp b/be/test/exec/schema_scanner/schema_engines_scanner_test.cpp index 96419d6d6e..ee281cb700 100644 --- a/be/test/exec/schema_scanner/schema_engines_scanner_test.cpp +++ b/be/test/exec/schema_scanner/schema_engines_scanner_test.cpp @@ -13,94 +13,94 @@ // specific language governing permissions and limitations // under the License. -#include -#include - -#include "common/object_pool.h" -#include "exec/schema_scanner/schema_engines_scanner.h" -#include "runtime/mem_pool.h" -#include "runtime/descriptors.h" -#include "util/debug_util.h" - -namespace palo { - -class SchemaEnginesScannerTest : public testing::Test { -public: - SchemaEnginesScannerTest() { - } - - virtual void SetUp() { - _param.db = &_db; - _param.table = &_table; - _param.wild = &_wild; - } -private: - ObjectPool _obj_pool; - MemPool _mem_pool; - SchemaScannerParam _param; - std::string _db; - std::string _table; - std::string _wild; -}; - -char g_tuple_buf[10000];// enougth for tuple -TEST_F(SchemaEnginesScannerTest, normal_use) { - SchemaEnginesScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - while (!eos) { - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_TRUE(status.ok()); - if (!eos) { - LOG(INFO) << print_tuple(tuple, *tuple_desc); - } - } -} - -TEST_F(SchemaEnginesScannerTest, use_with_no_init) { - SchemaEnginesScanner scanner; - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL == tuple_desc); - Status status = scanner.start((RuntimeState *)1); - ASSERT_FALSE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} - -TEST_F(SchemaEnginesScannerTest, invalid_param) { - SchemaEnginesScanner scanner; - Status status = scanner.init(&_param, NULL); - ASSERT_FALSE(status.ok()); - status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, NULL, &eos); - ASSERT_FALSE(status.ok()); -} - -} - -int main(int argc, char** argv) { - std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; - if (!palo::config::init(conffile.c_str(), false)) { - fprintf(stderr, "error read config file. \n"); - return -1; - } - init_glog("be-test"); - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} - +#include +#include + +#include "common/object_pool.h" +#include "exec/schema_scanner/schema_engines_scanner.h" +#include "runtime/mem_pool.h" +#include "runtime/descriptors.h" +#include "util/debug_util.h" + +namespace palo { + +class SchemaEnginesScannerTest : public testing::Test { +public: + SchemaEnginesScannerTest() { + } + + virtual void SetUp() { + _param.db = &_db; + _param.table = &_table; + _param.wild = &_wild; + } +private: + ObjectPool _obj_pool; + MemPool _mem_pool; + SchemaScannerParam _param; + std::string _db; + std::string _table; + std::string _wild; +}; + +char g_tuple_buf[10000];// enougth for tuple +TEST_F(SchemaEnginesScannerTest, normal_use) { + SchemaEnginesScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + while (!eos) { + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_TRUE(status.ok()); + if (!eos) { + LOG(INFO) << print_tuple(tuple, *tuple_desc); + } + } +} + +TEST_F(SchemaEnginesScannerTest, use_with_no_init) { + SchemaEnginesScanner scanner; + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL == tuple_desc); + Status status = scanner.start((RuntimeState *)1); + ASSERT_FALSE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} + +TEST_F(SchemaEnginesScannerTest, invalid_param) { + SchemaEnginesScanner scanner; + Status status = scanner.init(&_param, NULL); + ASSERT_FALSE(status.ok()); + status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, NULL, &eos); + ASSERT_FALSE(status.ok()); +} + +} + +int main(int argc, char** argv) { + std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; + if (!palo::config::init(conffile.c_str(), false)) { + fprintf(stderr, "error read config file. \n"); + return -1; + } + init_glog("be-test"); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + diff --git a/be/test/exec/schema_scanner/schema_open_tables_scanner_test.cpp b/be/test/exec/schema_scanner/schema_open_tables_scanner_test.cpp index 7359e181b6..f415410e07 100644 --- a/be/test/exec/schema_scanner/schema_open_tables_scanner_test.cpp +++ b/be/test/exec/schema_scanner/schema_open_tables_scanner_test.cpp @@ -13,201 +13,201 @@ // specific language governing permissions and limitations // under the License. -#include -#include - -#include "common/object_pool.h" -#include "exec/schema_scanner/schema_open_tables_scanner.h" -#include "exec/schema_scanner/schema_jni_helper.h" -#include "gen_cpp/Frontend_types.h" -#include "runtime/mem_pool.h" -#include "runtime/descriptors.h" - -namespace palo { - -int db_num = 0; -Status s_db_result; -Status SchemaJniHelper::get_db_names(const TGetDbsParams &db_params, - TGetDbsResult *db_result) { - for (int i = 0; i < db_num; ++i) { - db_result->dbs.push_back("abc"); - } - return s_db_result; -} - -int table_num = 0; -Status s_table_result; -Status SchemaJniHelper::get_table_names(const TGetTablesParams &table_params, - TGetTablesResult *table_result) { - for (int i = 0; i < table_num; ++i) { - table_result->tables.push_back("bac"); - } - return s_table_result; -} - -int desc_num = 0; -Status s_desc_result; -Status SchemaJniHelper::describe_table(const TDescribeTableParams &desc_params, - TDescribeTableResult *desc_result) { - for (int i = 0; i < desc_num; ++i) { - TColumnDesc column_desc; - column_desc.__set_columnName("abc"); - column_desc.__set_columnType(TPrimitiveType::BOOLEAN); - TColumnDef column_def; - column_def.columnDesc = column_desc; - column_def.comment = "bac"; - desc_result->columns.push_back(column_def); - } - return s_desc_result; -} - -void init_mock() { - db_num = 0; - table_num = 0; - desc_num = 0; - s_db_result = Status::OK; - s_table_result = Status::OK; - s_desc_result = Status::OK; -} - -class SchemaOpenTablesScannerTest : public testing::Test { -public: - SchemaOpenTablesScannerTest() { - } - - virtual void SetUp() { - _param.db = &_db; - _param.table = &_table; - _param.wild = &_wild; - } -private: - ObjectPool _obj_pool; - MemPool _mem_pool; - SchemaScannerParam _param; - std::string _db; - std::string _table; - std::string _wild; -}; - -char g_tuple_buf[10000];// enougth for tuple -TEST_F(SchemaOpenTablesScannerTest, normal_use) { - SchemaOpenTablesScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_TRUE(status.ok()); - ASSERT_TRUE(eos); -} -TEST_F(SchemaOpenTablesScannerTest, one_column) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaOpenTablesScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_TRUE(status.ok()); - ASSERT_FALSE(eos); - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_TRUE(status.ok()); - ASSERT_TRUE(eos); -} -TEST_F(SchemaOpenTablesScannerTest, op_before_init) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaOpenTablesScanner scanner; - Status status = scanner.start((RuntimeState *)1); - ASSERT_FALSE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} -TEST_F(SchemaOpenTablesScannerTest, input_fail) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaOpenTablesScanner scanner; - Status status = scanner.init(NULL, &_obj_pool); - ASSERT_FALSE(status.ok()); - status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - bool eos = false; - status = scanner.get_next_row(NULL, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} -TEST_F(SchemaOpenTablesScannerTest, table_fail) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaOpenTablesScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - s_table_result = Status("get table failed"); - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} -TEST_F(SchemaOpenTablesScannerTest, desc_fail) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaOpenTablesScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - s_desc_result = Status("get desc failed"); - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} - -TEST_F(SchemaOpenTablesScannerTest, start_fail) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaOpenTablesScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - s_db_result = Status("get db failed."); - status = scanner.start((RuntimeState *)1); - ASSERT_FALSE(status.ok()); -} - -} - -int main(int argc, char** argv) { - std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; - if (!palo::config::init(conffile.c_str(), false)) { - fprintf(stderr, "error read config file. \n"); - return -1; - } - init_glog("be-test"); - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} - +#include +#include + +#include "common/object_pool.h" +#include "exec/schema_scanner/schema_open_tables_scanner.h" +#include "exec/schema_scanner/schema_jni_helper.h" +#include "gen_cpp/Frontend_types.h" +#include "runtime/mem_pool.h" +#include "runtime/descriptors.h" + +namespace palo { + +int db_num = 0; +Status s_db_result; +Status SchemaJniHelper::get_db_names(const TGetDbsParams &db_params, + TGetDbsResult *db_result) { + for (int i = 0; i < db_num; ++i) { + db_result->dbs.push_back("abc"); + } + return s_db_result; +} + +int table_num = 0; +Status s_table_result; +Status SchemaJniHelper::get_table_names(const TGetTablesParams &table_params, + TGetTablesResult *table_result) { + for (int i = 0; i < table_num; ++i) { + table_result->tables.push_back("bac"); + } + return s_table_result; +} + +int desc_num = 0; +Status s_desc_result; +Status SchemaJniHelper::describe_table(const TDescribeTableParams &desc_params, + TDescribeTableResult *desc_result) { + for (int i = 0; i < desc_num; ++i) { + TColumnDesc column_desc; + column_desc.__set_columnName("abc"); + column_desc.__set_columnType(TPrimitiveType::BOOLEAN); + TColumnDef column_def; + column_def.columnDesc = column_desc; + column_def.comment = "bac"; + desc_result->columns.push_back(column_def); + } + return s_desc_result; +} + +void init_mock() { + db_num = 0; + table_num = 0; + desc_num = 0; + s_db_result = Status::OK; + s_table_result = Status::OK; + s_desc_result = Status::OK; +} + +class SchemaOpenTablesScannerTest : public testing::Test { +public: + SchemaOpenTablesScannerTest() { + } + + virtual void SetUp() { + _param.db = &_db; + _param.table = &_table; + _param.wild = &_wild; + } +private: + ObjectPool _obj_pool; + MemPool _mem_pool; + SchemaScannerParam _param; + std::string _db; + std::string _table; + std::string _wild; +}; + +char g_tuple_buf[10000];// enougth for tuple +TEST_F(SchemaOpenTablesScannerTest, normal_use) { + SchemaOpenTablesScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_TRUE(status.ok()); + ASSERT_TRUE(eos); +} +TEST_F(SchemaOpenTablesScannerTest, one_column) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaOpenTablesScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_TRUE(status.ok()); + ASSERT_FALSE(eos); + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_TRUE(status.ok()); + ASSERT_TRUE(eos); +} +TEST_F(SchemaOpenTablesScannerTest, op_before_init) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaOpenTablesScanner scanner; + Status status = scanner.start((RuntimeState *)1); + ASSERT_FALSE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} +TEST_F(SchemaOpenTablesScannerTest, input_fail) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaOpenTablesScanner scanner; + Status status = scanner.init(NULL, &_obj_pool); + ASSERT_FALSE(status.ok()); + status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + bool eos = false; + status = scanner.get_next_row(NULL, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} +TEST_F(SchemaOpenTablesScannerTest, table_fail) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaOpenTablesScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + s_table_result = Status("get table failed"); + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} +TEST_F(SchemaOpenTablesScannerTest, desc_fail) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaOpenTablesScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + s_desc_result = Status("get desc failed"); + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} + +TEST_F(SchemaOpenTablesScannerTest, start_fail) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaOpenTablesScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + s_db_result = Status("get db failed."); + status = scanner.start((RuntimeState *)1); + ASSERT_FALSE(status.ok()); +} + +} + +int main(int argc, char** argv) { + std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; + if (!palo::config::init(conffile.c_str(), false)) { + fprintf(stderr, "error read config file. \n"); + return -1; + } + init_glog("be-test"); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + diff --git a/be/test/exec/schema_scanner/schema_schemata_scanner_test.cpp b/be/test/exec/schema_scanner/schema_schemata_scanner_test.cpp index d8fe90df3e..202e1358fb 100644 --- a/be/test/exec/schema_scanner/schema_schemata_scanner_test.cpp +++ b/be/test/exec/schema_scanner/schema_schemata_scanner_test.cpp @@ -13,167 +13,167 @@ // specific language governing permissions and limitations // under the License. -#include -#include - -#include "common/object_pool.h" -#include "exec/schema_scanner/schema_schemata_scanner.h" -#include "exec/schema_scanner/schema_jni_helper.h" -#include "gen_cpp/Frontend_types.h" -#include "runtime/mem_pool.h" -#include "runtime/descriptors.h" - -namespace palo { - -int db_num = 0; -Status s_db_result; -Status SchemaJniHelper::get_db_names(const TGetDbsParams &db_params, - TGetDbsResult *db_result) { - for (int i = 0; i < db_num; ++i) { - db_result->dbs.push_back("abc"); - } - return s_db_result; -} - -int table_num = 0; -Status s_table_result; -Status SchemaJniHelper::get_table_names(const TGetTablesParams &table_params, - TGetTablesResult *table_result) { - for (int i = 0; i < table_num; ++i) { - table_result->tables.push_back("bac"); - } - return s_table_result; -} - -int desc_num = 0; -Status s_desc_result; -Status SchemaJniHelper::describe_table(const TDescribeTableParams &desc_params, - TDescribeTableResult *desc_result) { - for (int i = 0; i < desc_num; ++i) { - TColumnDesc column_desc; - column_desc.__set_columnName("abc"); - column_desc.__set_columnType(TPrimitiveType::BOOLEAN); - TColumnDef column_def; - column_def.columnDesc = column_desc; - column_def.comment = "bac"; - desc_result->columns.push_back(column_def); - } - return s_desc_result; -} - -void init_mock() { - db_num = 0; - table_num = 0; - desc_num = 0; - s_db_result = Status::OK; - s_table_result = Status::OK; - s_desc_result = Status::OK; -} - -class SchemaSchemataScannerTest : public testing::Test { -public: - SchemaSchemataScannerTest() { - } - - virtual void SetUp() { - _param.db = &_db; - _param.table = &_table; - _param.wild = &_wild; - } -private: - ObjectPool _obj_pool; - MemPool _mem_pool; - SchemaScannerParam _param; - std::string _db; - std::string _table; - std::string _wild; -}; - -char g_tuple_buf[10000];// enougth for tuple -TEST_F(SchemaSchemataScannerTest, normal_use) { - SchemaSchemataScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_TRUE(status.ok()); - ASSERT_TRUE(eos); -} -TEST_F(SchemaSchemataScannerTest, one_column) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaSchemataScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_TRUE(status.ok()); - ASSERT_FALSE(eos); - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_TRUE(status.ok()); - ASSERT_TRUE(eos); -} -TEST_F(SchemaSchemataScannerTest, op_before_init) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaSchemataScanner scanner; - Status status = scanner.start((RuntimeState *)1); - ASSERT_FALSE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} -TEST_F(SchemaSchemataScannerTest, input_fail) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaSchemataScanner scanner; - Status status = scanner.init(NULL, &_obj_pool); - ASSERT_FALSE(status.ok()); - status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - bool eos = false; - status = scanner.get_next_row(NULL, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} - -TEST_F(SchemaSchemataScannerTest, start_fail) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaSchemataScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - s_db_result = Status("get db failed."); - status = scanner.start((RuntimeState *)1); - ASSERT_FALSE(status.ok()); -} - -} - -int main(int argc, char** argv) { - std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; - if (!palo::config::init(conffile.c_str(), false)) { - fprintf(stderr, "error read config file. \n"); - return -1; - } - init_glog("be-test"); - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} - +#include +#include + +#include "common/object_pool.h" +#include "exec/schema_scanner/schema_schemata_scanner.h" +#include "exec/schema_scanner/schema_jni_helper.h" +#include "gen_cpp/Frontend_types.h" +#include "runtime/mem_pool.h" +#include "runtime/descriptors.h" + +namespace palo { + +int db_num = 0; +Status s_db_result; +Status SchemaJniHelper::get_db_names(const TGetDbsParams &db_params, + TGetDbsResult *db_result) { + for (int i = 0; i < db_num; ++i) { + db_result->dbs.push_back("abc"); + } + return s_db_result; +} + +int table_num = 0; +Status s_table_result; +Status SchemaJniHelper::get_table_names(const TGetTablesParams &table_params, + TGetTablesResult *table_result) { + for (int i = 0; i < table_num; ++i) { + table_result->tables.push_back("bac"); + } + return s_table_result; +} + +int desc_num = 0; +Status s_desc_result; +Status SchemaJniHelper::describe_table(const TDescribeTableParams &desc_params, + TDescribeTableResult *desc_result) { + for (int i = 0; i < desc_num; ++i) { + TColumnDesc column_desc; + column_desc.__set_columnName("abc"); + column_desc.__set_columnType(TPrimitiveType::BOOLEAN); + TColumnDef column_def; + column_def.columnDesc = column_desc; + column_def.comment = "bac"; + desc_result->columns.push_back(column_def); + } + return s_desc_result; +} + +void init_mock() { + db_num = 0; + table_num = 0; + desc_num = 0; + s_db_result = Status::OK; + s_table_result = Status::OK; + s_desc_result = Status::OK; +} + +class SchemaSchemataScannerTest : public testing::Test { +public: + SchemaSchemataScannerTest() { + } + + virtual void SetUp() { + _param.db = &_db; + _param.table = &_table; + _param.wild = &_wild; + } +private: + ObjectPool _obj_pool; + MemPool _mem_pool; + SchemaScannerParam _param; + std::string _db; + std::string _table; + std::string _wild; +}; + +char g_tuple_buf[10000];// enougth for tuple +TEST_F(SchemaSchemataScannerTest, normal_use) { + SchemaSchemataScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_TRUE(status.ok()); + ASSERT_TRUE(eos); +} +TEST_F(SchemaSchemataScannerTest, one_column) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaSchemataScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_TRUE(status.ok()); + ASSERT_FALSE(eos); + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_TRUE(status.ok()); + ASSERT_TRUE(eos); +} +TEST_F(SchemaSchemataScannerTest, op_before_init) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaSchemataScanner scanner; + Status status = scanner.start((RuntimeState *)1); + ASSERT_FALSE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} +TEST_F(SchemaSchemataScannerTest, input_fail) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaSchemataScanner scanner; + Status status = scanner.init(NULL, &_obj_pool); + ASSERT_FALSE(status.ok()); + status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + bool eos = false; + status = scanner.get_next_row(NULL, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} + +TEST_F(SchemaSchemataScannerTest, start_fail) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaSchemataScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + s_db_result = Status("get db failed."); + status = scanner.start((RuntimeState *)1); + ASSERT_FALSE(status.ok()); +} + +} + +int main(int argc, char** argv) { + std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; + if (!palo::config::init(conffile.c_str(), false)) { + fprintf(stderr, "error read config file. \n"); + return -1; + } + init_glog("be-test"); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + diff --git a/be/test/exec/schema_scanner/schema_table_names_scanner_test.cpp b/be/test/exec/schema_scanner/schema_table_names_scanner_test.cpp index 9c883384e5..d7f2267a63 100644 --- a/be/test/exec/schema_scanner/schema_table_names_scanner_test.cpp +++ b/be/test/exec/schema_scanner/schema_table_names_scanner_test.cpp @@ -13,184 +13,184 @@ // specific language governing permissions and limitations // under the License. -#include -#include - -#include "common/object_pool.h" -#include "exec/schema_scanner/schema_table_names_scanner.h" -#include "exec/schema_scanner/schema_jni_helper.h" -#include "gen_cpp/Frontend_types.h" -#include "runtime/mem_pool.h" -#include "runtime/descriptors.h" - -namespace palo { - -int db_num = 0; -Status s_db_result; -Status SchemaJniHelper::get_db_names(const TGetDbsParams &db_params, - TGetDbsResult *db_result) { - for (int i = 0; i < db_num; ++i) { - db_result->dbs.push_back("abc"); - } - return s_db_result; -} - -int table_num = 0; -Status s_table_result; -Status SchemaJniHelper::get_table_names(const TGetTablesParams &table_params, - TGetTablesResult *table_result) { - for (int i = 0; i < table_num; ++i) { - table_result->tables.push_back("bac"); - } - return s_table_result; -} - -int desc_num = 0; -Status s_desc_result; -Status SchemaJniHelper::describe_table(const TDescribeTableParams &desc_params, - TDescribeTableResult *desc_result) { - for (int i = 0; i < desc_num; ++i) { - TColumnDesc column_desc; - column_desc.__set_columnName("abc"); - column_desc.__set_columnType(TPrimitiveType::BOOLEAN); - TColumnDef column_def; - column_def.columnDesc = column_desc; - column_def.comment = "bac"; - desc_result->columns.push_back(column_def); - } - return s_desc_result; -} - -void init_mock() { - db_num = 0; - table_num = 0; - desc_num = 0; - s_db_result = Status::OK; - s_table_result = Status::OK; - s_desc_result = Status::OK; -} - -class SchemaTableNamesScannerTest : public testing::Test { -public: - SchemaTableNamesScannerTest() { - } - - virtual void SetUp() { - _param.db = &_db; - _param.table = &_table; - _param.wild = &_wild; - } -private: - ObjectPool _obj_pool; - MemPool _mem_pool; - SchemaScannerParam _param; - std::string _db; - std::string _table; - std::string _wild; -}; - -char g_tuple_buf[10000];// enougth for tuple -TEST_F(SchemaTableNamesScannerTest, normal_use) { - SchemaTableNamesScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_TRUE(status.ok()); - ASSERT_TRUE(eos); -} -TEST_F(SchemaTableNamesScannerTest, one_column) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaTableNamesScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_TRUE(status.ok()); - ASSERT_FALSE(eos); - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_TRUE(status.ok()); - ASSERT_TRUE(eos); -} -TEST_F(SchemaTableNamesScannerTest, op_before_init) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaTableNamesScanner scanner; - Status status = scanner.start((RuntimeState *)1); - ASSERT_FALSE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} -TEST_F(SchemaTableNamesScannerTest, input_fail) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaTableNamesScanner scanner; - Status status = scanner.init(NULL, &_obj_pool); - ASSERT_FALSE(status.ok()); - status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - bool eos = false; - status = scanner.get_next_row(NULL, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} -TEST_F(SchemaTableNamesScannerTest, table_fail) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaTableNamesScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - s_table_result = Status("get table failed"); - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} - -TEST_F(SchemaTableNamesScannerTest, start_fail) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaTableNamesScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - s_db_result = Status("get db failed."); - status = scanner.start((RuntimeState *)1); - ASSERT_FALSE(status.ok()); -} - -} - -int main(int argc, char** argv) { - std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; - if (!palo::config::init(conffile.c_str(), false)) { - fprintf(stderr, "error read config file. \n"); - return -1; - } - init_glog("be-test"); - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} - +#include +#include + +#include "common/object_pool.h" +#include "exec/schema_scanner/schema_table_names_scanner.h" +#include "exec/schema_scanner/schema_jni_helper.h" +#include "gen_cpp/Frontend_types.h" +#include "runtime/mem_pool.h" +#include "runtime/descriptors.h" + +namespace palo { + +int db_num = 0; +Status s_db_result; +Status SchemaJniHelper::get_db_names(const TGetDbsParams &db_params, + TGetDbsResult *db_result) { + for (int i = 0; i < db_num; ++i) { + db_result->dbs.push_back("abc"); + } + return s_db_result; +} + +int table_num = 0; +Status s_table_result; +Status SchemaJniHelper::get_table_names(const TGetTablesParams &table_params, + TGetTablesResult *table_result) { + for (int i = 0; i < table_num; ++i) { + table_result->tables.push_back("bac"); + } + return s_table_result; +} + +int desc_num = 0; +Status s_desc_result; +Status SchemaJniHelper::describe_table(const TDescribeTableParams &desc_params, + TDescribeTableResult *desc_result) { + for (int i = 0; i < desc_num; ++i) { + TColumnDesc column_desc; + column_desc.__set_columnName("abc"); + column_desc.__set_columnType(TPrimitiveType::BOOLEAN); + TColumnDef column_def; + column_def.columnDesc = column_desc; + column_def.comment = "bac"; + desc_result->columns.push_back(column_def); + } + return s_desc_result; +} + +void init_mock() { + db_num = 0; + table_num = 0; + desc_num = 0; + s_db_result = Status::OK; + s_table_result = Status::OK; + s_desc_result = Status::OK; +} + +class SchemaTableNamesScannerTest : public testing::Test { +public: + SchemaTableNamesScannerTest() { + } + + virtual void SetUp() { + _param.db = &_db; + _param.table = &_table; + _param.wild = &_wild; + } +private: + ObjectPool _obj_pool; + MemPool _mem_pool; + SchemaScannerParam _param; + std::string _db; + std::string _table; + std::string _wild; +}; + +char g_tuple_buf[10000];// enougth for tuple +TEST_F(SchemaTableNamesScannerTest, normal_use) { + SchemaTableNamesScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_TRUE(status.ok()); + ASSERT_TRUE(eos); +} +TEST_F(SchemaTableNamesScannerTest, one_column) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaTableNamesScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_TRUE(status.ok()); + ASSERT_FALSE(eos); + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_TRUE(status.ok()); + ASSERT_TRUE(eos); +} +TEST_F(SchemaTableNamesScannerTest, op_before_init) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaTableNamesScanner scanner; + Status status = scanner.start((RuntimeState *)1); + ASSERT_FALSE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} +TEST_F(SchemaTableNamesScannerTest, input_fail) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaTableNamesScanner scanner; + Status status = scanner.init(NULL, &_obj_pool); + ASSERT_FALSE(status.ok()); + status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + bool eos = false; + status = scanner.get_next_row(NULL, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} +TEST_F(SchemaTableNamesScannerTest, table_fail) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaTableNamesScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + s_table_result = Status("get table failed"); + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} + +TEST_F(SchemaTableNamesScannerTest, start_fail) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaTableNamesScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + s_db_result = Status("get db failed."); + status = scanner.start((RuntimeState *)1); + ASSERT_FALSE(status.ok()); +} + +} + +int main(int argc, char** argv) { + std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; + if (!palo::config::init(conffile.c_str(), false)) { + fprintf(stderr, "error read config file. \n"); + return -1; + } + init_glog("be-test"); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + diff --git a/be/test/exec/schema_scanner/schema_tables_scanner_test.cpp b/be/test/exec/schema_scanner/schema_tables_scanner_test.cpp index 09467055f3..f970b5b1bd 100644 --- a/be/test/exec/schema_scanner/schema_tables_scanner_test.cpp +++ b/be/test/exec/schema_scanner/schema_tables_scanner_test.cpp @@ -13,201 +13,201 @@ // specific language governing permissions and limitations // under the License. -#include -#include - -#include "common/object_pool.h" -#include "exec/schema_scanner/schema_tables_scanner.h" -#include "exec/schema_scanner/schema_jni_helper.h" -#include "gen_cpp/Frontend_types.h" -#include "runtime/mem_pool.h" -#include "runtime/descriptors.h" - -namespace palo { - -int db_num = 0; -Status s_db_result; -Status SchemaJniHelper::get_db_names(const TGetDbsParams &db_params, - TGetDbsResult *db_result) { - for (int i = 0; i < db_num; ++i) { - db_result->dbs.push_back("abc"); - } - return s_db_result; -} - -int table_num = 0; -Status s_table_result; -Status SchemaJniHelper::get_table_names(const TGetTablesParams &table_params, - TGetTablesResult *table_result) { - for (int i = 0; i < table_num; ++i) { - table_result->tables.push_back("bac"); - } - return s_table_result; -} - -int desc_num = 0; -Status s_desc_result; -Status SchemaJniHelper::describe_table(const TDescribeTableParams &desc_params, - TDescribeTableResult *desc_result) { - for (int i = 0; i < desc_num; ++i) { - TColumnDesc column_desc; - column_desc.__set_columnName("abc"); - column_desc.__set_columnType(TPrimitiveType::BOOLEAN); - TColumnDef column_def; - column_def.columnDesc = column_desc; - column_def.comment = "bac"; - desc_result->columns.push_back(column_def); - } - return s_desc_result; -} - -void init_mock() { - db_num = 0; - table_num = 0; - desc_num = 0; - s_db_result = Status::OK; - s_table_result = Status::OK; - s_desc_result = Status::OK; -} - -class SchemaTablesScannerTest : public testing::Test { -public: - SchemaTablesScannerTest() { - } - - virtual void SetUp() { - _param.db = &_db; - _param.table = &_table; - _param.wild = &_wild; - } -private: - ObjectPool _obj_pool; - MemPool _mem_pool; - SchemaScannerParam _param; - std::string _db; - std::string _table; - std::string _wild; -}; - -char g_tuple_buf[10000];// enougth for tuple -TEST_F(SchemaTablesScannerTest, normal_use) { - SchemaTablesScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_TRUE(status.ok()); - ASSERT_TRUE(eos); -} -TEST_F(SchemaTablesScannerTest, one_column) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaTablesScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_TRUE(status.ok()); - ASSERT_FALSE(eos); - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_TRUE(status.ok()); - ASSERT_TRUE(eos); -} -TEST_F(SchemaTablesScannerTest, op_before_init) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaTablesScanner scanner; - Status status = scanner.start((RuntimeState *)1); - ASSERT_FALSE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} -TEST_F(SchemaTablesScannerTest, input_fail) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaTablesScanner scanner; - Status status = scanner.init(NULL, &_obj_pool); - ASSERT_FALSE(status.ok()); - status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - bool eos = false; - status = scanner.get_next_row(NULL, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} -TEST_F(SchemaTablesScannerTest, table_fail) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaTablesScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - s_table_result = Status("get table failed"); - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} -TEST_F(SchemaTablesScannerTest, desc_fail) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaTablesScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - s_desc_result = Status("get desc failed"); - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} - -TEST_F(SchemaTablesScannerTest, start_fail) { - table_num = 1; - db_num = 1; - desc_num = 1; - SchemaTablesScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - s_db_result = Status("get db failed."); - status = scanner.start((RuntimeState *)1); - ASSERT_FALSE(status.ok()); -} - -} - -int main(int argc, char** argv) { - std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; - if (!palo::config::init(conffile.c_str(), false)) { - fprintf(stderr, "error read config file. \n"); - return -1; - } - init_glog("be-test"); - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} - +#include +#include + +#include "common/object_pool.h" +#include "exec/schema_scanner/schema_tables_scanner.h" +#include "exec/schema_scanner/schema_jni_helper.h" +#include "gen_cpp/Frontend_types.h" +#include "runtime/mem_pool.h" +#include "runtime/descriptors.h" + +namespace palo { + +int db_num = 0; +Status s_db_result; +Status SchemaJniHelper::get_db_names(const TGetDbsParams &db_params, + TGetDbsResult *db_result) { + for (int i = 0; i < db_num; ++i) { + db_result->dbs.push_back("abc"); + } + return s_db_result; +} + +int table_num = 0; +Status s_table_result; +Status SchemaJniHelper::get_table_names(const TGetTablesParams &table_params, + TGetTablesResult *table_result) { + for (int i = 0; i < table_num; ++i) { + table_result->tables.push_back("bac"); + } + return s_table_result; +} + +int desc_num = 0; +Status s_desc_result; +Status SchemaJniHelper::describe_table(const TDescribeTableParams &desc_params, + TDescribeTableResult *desc_result) { + for (int i = 0; i < desc_num; ++i) { + TColumnDesc column_desc; + column_desc.__set_columnName("abc"); + column_desc.__set_columnType(TPrimitiveType::BOOLEAN); + TColumnDef column_def; + column_def.columnDesc = column_desc; + column_def.comment = "bac"; + desc_result->columns.push_back(column_def); + } + return s_desc_result; +} + +void init_mock() { + db_num = 0; + table_num = 0; + desc_num = 0; + s_db_result = Status::OK; + s_table_result = Status::OK; + s_desc_result = Status::OK; +} + +class SchemaTablesScannerTest : public testing::Test { +public: + SchemaTablesScannerTest() { + } + + virtual void SetUp() { + _param.db = &_db; + _param.table = &_table; + _param.wild = &_wild; + } +private: + ObjectPool _obj_pool; + MemPool _mem_pool; + SchemaScannerParam _param; + std::string _db; + std::string _table; + std::string _wild; +}; + +char g_tuple_buf[10000];// enougth for tuple +TEST_F(SchemaTablesScannerTest, normal_use) { + SchemaTablesScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_TRUE(status.ok()); + ASSERT_TRUE(eos); +} +TEST_F(SchemaTablesScannerTest, one_column) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaTablesScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_TRUE(status.ok()); + ASSERT_FALSE(eos); + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_TRUE(status.ok()); + ASSERT_TRUE(eos); +} +TEST_F(SchemaTablesScannerTest, op_before_init) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaTablesScanner scanner; + Status status = scanner.start((RuntimeState *)1); + ASSERT_FALSE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} +TEST_F(SchemaTablesScannerTest, input_fail) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaTablesScanner scanner; + Status status = scanner.init(NULL, &_obj_pool); + ASSERT_FALSE(status.ok()); + status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + bool eos = false; + status = scanner.get_next_row(NULL, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} +TEST_F(SchemaTablesScannerTest, table_fail) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaTablesScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + s_table_result = Status("get table failed"); + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} +TEST_F(SchemaTablesScannerTest, desc_fail) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaTablesScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + s_desc_result = Status("get desc failed"); + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} + +TEST_F(SchemaTablesScannerTest, start_fail) { + table_num = 1; + db_num = 1; + desc_num = 1; + SchemaTablesScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + s_db_result = Status("get db failed."); + status = scanner.start((RuntimeState *)1); + ASSERT_FALSE(status.ok()); +} + +} + +int main(int argc, char** argv) { + std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; + if (!palo::config::init(conffile.c_str(), false)) { + fprintf(stderr, "error read config file. \n"); + return -1; + } + init_glog("be-test"); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + diff --git a/be/test/exec/schema_scanner/schema_variables_scanner_test.cpp b/be/test/exec/schema_scanner/schema_variables_scanner_test.cpp index 755dba595d..eb9f7711e4 100644 --- a/be/test/exec/schema_scanner/schema_variables_scanner_test.cpp +++ b/be/test/exec/schema_scanner/schema_variables_scanner_test.cpp @@ -13,95 +13,95 @@ // specific language governing permissions and limitations // under the License. -#include -#include - -#include "common/object_pool.h" -#include "exec/schema_scanner/schema_variables_scanner.h" -#include "runtime/mem_pool.h" -#include "runtime/descriptors.h" -#include "service/palo_server.h" -#include "util/debug_util.h" - -namespace palo { - -class SchemaVariablesScannerTest : public testing::Test { -public: - SchemaVariablesScannerTest() { - } - - virtual void SetUp() { - _param.db = &_db; - _param.table = &_table; - _param.wild = &_wild; - } -private: - ObjectPool _obj_pool; - MemPool _mem_pool; - SchemaScannerParam _param; - std::string _db; - std::string _table; - std::string _wild; -}; - -char g_tuple_buf[10000];// enougth for tuple -TEST_F(SchemaVariablesScannerTest, normal_use) { - SchemaVariablesScanner scanner; - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - while (!eos) { - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_TRUE(status.ok()); - if (!eos) { - LOG(INFO) << print_tuple(tuple, *tuple_desc); - } - } -} - -TEST_F(SchemaVariablesScannerTest, use_with_no_init) { - SchemaVariablesScanner scanner; - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL == tuple_desc); - Status status = scanner.start((RuntimeState *)1); - ASSERT_FALSE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} - -TEST_F(SchemaVariablesScannerTest, invalid_param) { - SchemaVariablesScanner scanner; - Status status = scanner.init(&_param, NULL); - ASSERT_FALSE(status.ok()); - status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - const TupleDescriptor *tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - status = scanner.start((RuntimeState *)1); - ASSERT_TRUE(status.ok()); - Tuple *tuple = (Tuple *)g_tuple_buf; - bool eos = false; - status = scanner.get_next_row(tuple, NULL, &eos); - ASSERT_FALSE(status.ok()); -} - -} - -int main(int argc, char** argv) { - std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; - if (!palo::config::init(conffile.c_str(), false)) { - fprintf(stderr, "error read config file. \n"); - return -1; - } - init_glog("be-test"); - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} - +#include +#include + +#include "common/object_pool.h" +#include "exec/schema_scanner/schema_variables_scanner.h" +#include "runtime/mem_pool.h" +#include "runtime/descriptors.h" +#include "service/palo_server.h" +#include "util/debug_util.h" + +namespace palo { + +class SchemaVariablesScannerTest : public testing::Test { +public: + SchemaVariablesScannerTest() { + } + + virtual void SetUp() { + _param.db = &_db; + _param.table = &_table; + _param.wild = &_wild; + } +private: + ObjectPool _obj_pool; + MemPool _mem_pool; + SchemaScannerParam _param; + std::string _db; + std::string _table; + std::string _wild; +}; + +char g_tuple_buf[10000];// enougth for tuple +TEST_F(SchemaVariablesScannerTest, normal_use) { + SchemaVariablesScanner scanner; + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + while (!eos) { + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_TRUE(status.ok()); + if (!eos) { + LOG(INFO) << print_tuple(tuple, *tuple_desc); + } + } +} + +TEST_F(SchemaVariablesScannerTest, use_with_no_init) { + SchemaVariablesScanner scanner; + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL == tuple_desc); + Status status = scanner.start((RuntimeState *)1); + ASSERT_FALSE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} + +TEST_F(SchemaVariablesScannerTest, invalid_param) { + SchemaVariablesScanner scanner; + Status status = scanner.init(&_param, NULL); + ASSERT_FALSE(status.ok()); + status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + const TupleDescriptor *tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + status = scanner.start((RuntimeState *)1); + ASSERT_TRUE(status.ok()); + Tuple *tuple = (Tuple *)g_tuple_buf; + bool eos = false; + status = scanner.get_next_row(tuple, NULL, &eos); + ASSERT_FALSE(status.ok()); +} + +} + +int main(int argc, char** argv) { + std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; + if (!palo::config::init(conffile.c_str(), false)) { + fprintf(stderr, "error read config file. \n"); + return -1; + } + init_glog("be-test"); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + diff --git a/be/test/exec/schema_scanner_test.cpp b/be/test/exec/schema_scanner_test.cpp index dfcf6f4d74..7a3a7b425f 100644 --- a/be/test/exec/schema_scanner_test.cpp +++ b/be/test/exec/schema_scanner_test.cpp @@ -13,105 +13,105 @@ // specific language governing permissions and limitations // under the License. -#include -#include - -#include "common/object_pool.h" -#include "exec/schema_scanner.h" -#include "runtime/mem_pool.h" -#include "runtime/descriptors.h" - -namespace palo { - -class SchemaScannerTest : public testing::Test { -public: - SchemaScannerTest() { - } - - virtual void SetUp() { - _param.db = &_db; - _param.table = &_table; - _param.wild = &_wild; - } -private: - ObjectPool _obj_pool; - MemPool _mem_pool; - SchemaScannerParam _param; - std::string _db; - std::string _table; - std::string _wild; -}; - -SchemaScanner::ColumnDesc s_test_columns[] = { - // name, type, size, is_null - { "Name", TYPE_VARCHAR, sizeof(StringValue), false }, - { "Location", TYPE_VARCHAR, sizeof(StringValue), false }, - { "Comment", TYPE_VARCHAR, sizeof(StringValue), false }, - { "is_null", TYPE_VARCHAR, sizeof(StringValue), true }, -}; - -char g_tuple_buf[10000];// enougth for tuple -TEST_F(SchemaScannerTest, normal_use) { - SchemaScanner scanner(s_test_columns, - sizeof(s_test_columns) / sizeof(SchemaScanner::ColumnDesc)); - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - status = scanner.start((RuntimeState*)1); - ASSERT_TRUE(status.ok()); - const TupleDescriptor* tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - ASSERT_EQ(65, tuple_desc->byte_size()); - Tuple* tuple = (Tuple*)g_tuple_buf; - bool eos; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_TRUE(status.ok()); -} -TEST_F(SchemaScannerTest, input_fail) { - SchemaScanner scanner(s_test_columns, - sizeof(s_test_columns) / sizeof(SchemaScanner::ColumnDesc)); - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - status = scanner.init(&_param, &_obj_pool); - ASSERT_TRUE(status.ok()); - status = scanner.start((RuntimeState*)1); - ASSERT_TRUE(status.ok()); - const TupleDescriptor* tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL != tuple_desc); - ASSERT_EQ(65, tuple_desc->byte_size()); - bool eos; - status = scanner.get_next_row(NULL, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} -TEST_F(SchemaScannerTest, invalid_param) { - SchemaScanner scanner(NULL, sizeof(s_test_columns) / sizeof(SchemaScanner::ColumnDesc)); - Status status = scanner.init(&_param, &_obj_pool); - ASSERT_FALSE(status.ok()); -} -TEST_F(SchemaScannerTest, no_init_use) { - SchemaScanner scanner(s_test_columns, - sizeof(s_test_columns) / sizeof(SchemaScanner::ColumnDesc)); - Status status = scanner.start((RuntimeState*)1); - ASSERT_FALSE(status.ok()); - const TupleDescriptor* tuple_desc = scanner.tuple_desc(); - ASSERT_TRUE(NULL == tuple_desc); - Tuple* tuple = (Tuple*)g_tuple_buf; - bool eos; - status = scanner.get_next_row(tuple, &_mem_pool, &eos); - ASSERT_FALSE(status.ok()); -} - -} - -int main(int argc, char** argv) { - std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; - if (!palo::config::init(conffile.c_str(), false)) { - fprintf(stderr, "error read config file. \n"); - return -1; - } - init_glog("be-test"); - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} - +#include +#include + +#include "common/object_pool.h" +#include "exec/schema_scanner.h" +#include "runtime/mem_pool.h" +#include "runtime/descriptors.h" + +namespace palo { + +class SchemaScannerTest : public testing::Test { +public: + SchemaScannerTest() { + } + + virtual void SetUp() { + _param.db = &_db; + _param.table = &_table; + _param.wild = &_wild; + } +private: + ObjectPool _obj_pool; + MemPool _mem_pool; + SchemaScannerParam _param; + std::string _db; + std::string _table; + std::string _wild; +}; + +SchemaScanner::ColumnDesc s_test_columns[] = { + // name, type, size, is_null + { "Name", TYPE_VARCHAR, sizeof(StringValue), false }, + { "Location", TYPE_VARCHAR, sizeof(StringValue), false }, + { "Comment", TYPE_VARCHAR, sizeof(StringValue), false }, + { "is_null", TYPE_VARCHAR, sizeof(StringValue), true }, +}; + +char g_tuple_buf[10000];// enougth for tuple +TEST_F(SchemaScannerTest, normal_use) { + SchemaScanner scanner(s_test_columns, + sizeof(s_test_columns) / sizeof(SchemaScanner::ColumnDesc)); + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + status = scanner.start((RuntimeState*)1); + ASSERT_TRUE(status.ok()); + const TupleDescriptor* tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + ASSERT_EQ(65, tuple_desc->byte_size()); + Tuple* tuple = (Tuple*)g_tuple_buf; + bool eos; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_TRUE(status.ok()); +} +TEST_F(SchemaScannerTest, input_fail) { + SchemaScanner scanner(s_test_columns, + sizeof(s_test_columns) / sizeof(SchemaScanner::ColumnDesc)); + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + status = scanner.init(&_param, &_obj_pool); + ASSERT_TRUE(status.ok()); + status = scanner.start((RuntimeState*)1); + ASSERT_TRUE(status.ok()); + const TupleDescriptor* tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL != tuple_desc); + ASSERT_EQ(65, tuple_desc->byte_size()); + bool eos; + status = scanner.get_next_row(NULL, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} +TEST_F(SchemaScannerTest, invalid_param) { + SchemaScanner scanner(NULL, sizeof(s_test_columns) / sizeof(SchemaScanner::ColumnDesc)); + Status status = scanner.init(&_param, &_obj_pool); + ASSERT_FALSE(status.ok()); +} +TEST_F(SchemaScannerTest, no_init_use) { + SchemaScanner scanner(s_test_columns, + sizeof(s_test_columns) / sizeof(SchemaScanner::ColumnDesc)); + Status status = scanner.start((RuntimeState*)1); + ASSERT_FALSE(status.ok()); + const TupleDescriptor* tuple_desc = scanner.tuple_desc(); + ASSERT_TRUE(NULL == tuple_desc); + Tuple* tuple = (Tuple*)g_tuple_buf; + bool eos; + status = scanner.get_next_row(tuple, &_mem_pool, &eos); + ASSERT_FALSE(status.ok()); +} + +} + +int main(int argc, char** argv) { + std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; + if (!palo::config::init(conffile.c_str(), false)) { + fprintf(stderr, "error read config file. \n"); + return -1; + } + init_glog("be-test"); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + diff --git a/be/test/exec/set_executor_test.cpp b/be/test/exec/set_executor_test.cpp index cfa3d893db..a8c1e2b32e 100644 --- a/be/test/exec/set_executor_test.cpp +++ b/be/test/exec/set_executor_test.cpp @@ -13,113 +13,113 @@ // specific language governing permissions and limitations // under the License. -#include - -#include "common/logging.h" -#include "exec/set_executor.h" -#include "runtime/exec_env.h" -#include "service/palo_server.h" - -namespace palo { - -class SetExecutorTest : public testing::Test { -public: - SetExecutorTest() : - _runtim_state("tmp") { - } - - virtual void SetUp() { - } -private: - RuntimeState _runtim_state; -}; - -TEST_F(SetExecutorTest, normal_case) { - ExecEnv exec_env; - PaloServer palo_server(&exec_env); - TSetParams params; - { - TSetVar set_var; - - set_var.type = TSetType::OPT_SESSION; - set_var.variable = "key1"; - TExprNode expr; - expr.node_type = TExprNodeType::STRING_LITERAL; - expr.type = TPrimitiveType::STRING; - expr.__isset.string_literal = true; - expr.string_literal.value = "value1"; - set_var.value.nodes.push_back(expr); - - params.set_vars.push_back(set_var); - } - { - TSetVar set_var; - - set_var.type = TSetType::OPT_GLOBAL; - set_var.variable = "key2"; - TExprNode expr; - expr.node_type = TExprNodeType::STRING_LITERAL; - expr.type = TPrimitiveType::STRING; - expr.__isset.string_literal = true; - expr.string_literal.value = "value2"; - set_var.value.nodes.push_back(expr); - - params.set_vars.push_back(set_var); - } - { - TSetVar set_var; - - set_var.type = TSetType::OPT_DEFAULT; - set_var.variable = "key3"; - TExprNode expr; - expr.node_type = TExprNodeType::STRING_LITERAL; - expr.type = TPrimitiveType::STRING; - expr.__isset.string_literal = true; - expr.string_literal.value = "value3"; - set_var.value.nodes.push_back(expr); - - params.set_vars.push_back(set_var); - } - SetExecutor executor(&palo_server, params); - RowDescriptor row_desc; - Status status = executor.prepare((RuntimeState*)&_runtim_state, row_desc); - ASSERT_TRUE(status.ok()); - LOG(INFO) << executor.debug_string(); -} -TEST_F(SetExecutorTest, failed_case) { - ExecEnv exec_env; - PaloServer palo_server(&exec_env); - TSetParams params; - { - TSetVar set_var; - - set_var.type = TSetType::OPT_SESSION; - set_var.variable = "key1"; - TExprNode expr; - expr.node_type = TExprNodeType::INT_LITERAL; - expr.type = TPrimitiveType::INT; - expr.__isset.int_literal = true; - set_var.value.nodes.push_back(expr); - - params.set_vars.push_back(set_var); - } - SetExecutor executor(&palo_server, params); - RowDescriptor row_desc; - Status status = executor.prepare((RuntimeState*)&_runtim_state, row_desc); - ASSERT_FALSE(status.ok()); - LOG(INFO) << executor.debug_string(); -} -} - -int main(int argc, char** argv) { - std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; - if (!palo::config::init(conffile.c_str(), false)) { - fprintf(stderr, "error read config file. \n"); - return -1; - } - init_glog("be-test"); - ::testing::InitGoogleTest(&argc, argv); - palo::CpuInfo::Init(); - return RUN_ALL_TESTS(); -} - +#include + +#include "common/logging.h" +#include "exec/set_executor.h" +#include "runtime/exec_env.h" +#include "service/palo_server.h" + +namespace palo { + +class SetExecutorTest : public testing::Test { +public: + SetExecutorTest() : + _runtim_state("tmp") { + } + + virtual void SetUp() { + } +private: + RuntimeState _runtim_state; +}; + +TEST_F(SetExecutorTest, normal_case) { + ExecEnv exec_env; + PaloServer palo_server(&exec_env); + TSetParams params; + { + TSetVar set_var; + + set_var.type = TSetType::OPT_SESSION; + set_var.variable = "key1"; + TExprNode expr; + expr.node_type = TExprNodeType::STRING_LITERAL; + expr.type = TPrimitiveType::STRING; + expr.__isset.string_literal = true; + expr.string_literal.value = "value1"; + set_var.value.nodes.push_back(expr); + + params.set_vars.push_back(set_var); + } + { + TSetVar set_var; + + set_var.type = TSetType::OPT_GLOBAL; + set_var.variable = "key2"; + TExprNode expr; + expr.node_type = TExprNodeType::STRING_LITERAL; + expr.type = TPrimitiveType::STRING; + expr.__isset.string_literal = true; + expr.string_literal.value = "value2"; + set_var.value.nodes.push_back(expr); + + params.set_vars.push_back(set_var); + } + { + TSetVar set_var; + + set_var.type = TSetType::OPT_DEFAULT; + set_var.variable = "key3"; + TExprNode expr; + expr.node_type = TExprNodeType::STRING_LITERAL; + expr.type = TPrimitiveType::STRING; + expr.__isset.string_literal = true; + expr.string_literal.value = "value3"; + set_var.value.nodes.push_back(expr); + + params.set_vars.push_back(set_var); + } + SetExecutor executor(&palo_server, params); + RowDescriptor row_desc; + Status status = executor.prepare((RuntimeState*)&_runtim_state, row_desc); + ASSERT_TRUE(status.ok()); + LOG(INFO) << executor.debug_string(); +} +TEST_F(SetExecutorTest, failed_case) { + ExecEnv exec_env; + PaloServer palo_server(&exec_env); + TSetParams params; + { + TSetVar set_var; + + set_var.type = TSetType::OPT_SESSION; + set_var.variable = "key1"; + TExprNode expr; + expr.node_type = TExprNodeType::INT_LITERAL; + expr.type = TPrimitiveType::INT; + expr.__isset.int_literal = true; + set_var.value.nodes.push_back(expr); + + params.set_vars.push_back(set_var); + } + SetExecutor executor(&palo_server, params); + RowDescriptor row_desc; + Status status = executor.prepare((RuntimeState*)&_runtim_state, row_desc); + ASSERT_FALSE(status.ok()); + LOG(INFO) << executor.debug_string(); +} +} + +int main(int argc, char** argv) { + std::string conffile = std::string(getenv("PALO_HOME")) + "/conf/be.conf"; + if (!palo::config::init(conffile.c_str(), false)) { + fprintf(stderr, "error read config file. \n"); + return -1; + } + init_glog("be-test"); + ::testing::InitGoogleTest(&argc, argv); + palo::CpuInfo::Init(); + return RUN_ALL_TESTS(); +} + diff --git a/build.sh b/build.sh index 8053470ade..4de56b8ea7 100755 --- a/build.sh +++ b/build.sh @@ -29,7 +29,7 @@ ROOT=`dirname "$0"` ROOT=`cd "$ROOT"; pwd` export PALO_HOME=$ROOT -PARALLEL=8 +PARALLEL=2 # Check java version if [ -z $JAVA_HOME ]; then diff --git a/docs/help/Contents/Account Management/help.md b/docs/help/Contents/Account Management/help.md index 0f9c053be5..0d17ef57f2 100644 --- a/docs/help/Contents/Account Management/help.md +++ b/docs/help/Contents/Account Management/help.md @@ -12,7 +12,7 @@ CREATE USER 命令用于创建一个 Palo 用户。在 Palo 中,一个 user_id host éƒ¨åˆ†ä¹Ÿå¯æŒ‡å®šä¸º domain,语法为:'user_name'@['domain'],å³ä½¿ç”¨ä¸­æ‹¬å·åŒ…围,则 Palo 会认为这个是一个 domain,并å°è¯•è§£æžå…¶ ip 地å€ã€‚ç›®å‰ä»…支æŒç™¾åº¦å†…部的 BNS è§£æžã€‚ -如果指定了角色(ROLE),则会自动将该角色所拥有的æƒé™èµ‹äºˆæ–°åˆ›å»ºçš„è¿™ä¸ªç”¨æˆ·ã€‚å¦‚æžœä¸æŒ‡å®šï¼Œåˆ™è¯¥ç”¨æˆ·é»˜è®¤æ²¡æœ‰ä»»ä½•æƒé™ã€‚ +如果指定了角色(ROLE),则会自动将该角色所拥有的æƒé™èµ‹äºˆæ–°åˆ›å»ºçš„è¿™ä¸ªç”¨æˆ·ã€‚å¦‚æžœä¸æŒ‡å®šï¼Œåˆ™è¯¥ç”¨æˆ·é»˜è®¤æ²¡æœ‰ä»»ä½•æƒé™ã€‚指定的 ROLE 必须已ç»å­˜åœ¨ã€‚ ## example @@ -40,6 +40,10 @@ host éƒ¨åˆ†ä¹Ÿå¯æŒ‡å®šä¸º domain,语法为:'user_name'@['domain'],å³ä½¿ CREATE USER 'jack'@['example_domain'] IDENTIFIED BY '12345'; +6. 创建一个用户,并指定一个角色 + + CREATE USER 'jack'@'%' IDENTIFIED BY '12345' DEFAULT ROLE 'my_role'; + ## keyword CREATE, USER @@ -140,7 +144,7 @@ user_identity: 3. 授予指定库表的æƒé™ç»™è§’色 - GRANT ADMIN_PRIV ON db1.* TO ROLE admin_role + GRANT LOAD_PRIV ON db1.* TO ROLE my_role; ## keyword GRANT diff --git a/docs/help/Contents/Data Manipulation/manipulation_stmt.md b/docs/help/Contents/Data Manipulation/manipulation_stmt.md index 81a2263824..dee0ceda80 100644 --- a/docs/help/Contents/Data Manipulation/manipulation_stmt.md +++ b/docs/help/Contents/Data Manipulation/manipulation_stmt.md @@ -521,6 +521,7 @@ ## description 该语å¥ç”¨äºŽå°†æŒ‡å®šè¡¨çš„æ•°æ®å¯¼å‡ºåˆ°æŒ‡å®šä½ç½®ã€‚ + 该功能通过 broker 进程实现。对于ä¸åŒçš„目的存储系统,需è¦éƒ¨ç½²ä¸åŒçš„ broker。å¯ä»¥é€šè¿‡ SHOW BROKER 查看已部署的 broker。 这是一个异步æ“作,任务æäº¤æˆåŠŸåˆ™è¿”å›žã€‚æ‰§è¡ŒåŽå¯ä½¿ç”¨ SHOW EXPORT 命令查看进度。 语法: @@ -532,10 +533,13 @@ 1. table_name 当å‰è¦å¯¼å‡ºçš„表的表åï¼Œç›®å‰æ”¯æŒengine为olapå’Œmysql的表的导出。 + 2. partition å¯ä»¥åªå¯¼å‡ºæŒ‡å®šè¡¨çš„æŸäº›æŒ‡å®šåˆ†åŒº + 3. export_path 导出的路径,需为目录。目å‰ä¸èƒ½å¯¼å‡ºåˆ°æœ¬åœ°ï¼Œéœ€è¦å¯¼å‡ºåˆ°broker。 + 4. opt_properties ç”¨äºŽæŒ‡å®šä¸€äº›ç‰¹æ®Šå‚æ•°ã€‚ 语法: @@ -544,19 +548,25 @@ å¯ä»¥æŒ‡å®šå¦‚䏋傿•°ï¼š column_separator:指定导出的列分隔符,默认为\t。 line_delimiter:指定导出的行分隔符,默认为\n。 + 5. broker 用于指定导出使用的broker 语法: WITH BROKER broker_name ("key"="value"[,...]) è¿™é‡Œéœ€è¦æŒ‡å®šå…·ä½“çš„broker name, ä»¥åŠæ‰€éœ€çš„broker属性 + 对于ä¸åŒå­˜å‚¨ç³»ç»Ÿå¯¹åº”çš„ broker,这里需è¦è¾“å…¥çš„å‚æ•°ä¸åŒã€‚具体傿•°å¯ä»¥å‚阅:`help broker load` 中 broker 所需属性。 + ## example 1. å°†testTbl表中的所有数æ®å¯¼å‡ºåˆ°hdfs上 EXPORT TABLE testTbl TO "hdfs://hdfs_host:port/a/b/c" WITH BROKER "broker_name" ("username"="xxx", "password"="yyy"); + 2. å°†testTbl表中的分区p1,p2导出到hdfs上 + EXPORT TABLE testTbl PARTITION (p1,p2) TO "hdfs://hdfs_host:port/a/b/c" WITH BROKER "broker_name" ("username"="xxx", "password"="yyy"); 3. å°†testTbl表中的所有数æ®å¯¼å‡ºåˆ°hdfs上,以","作为列分隔符 + EXPORT TABLE testTbl TO "hdfs://hdfs_host:port/a/b/c" PROPERTIES ("column_separator"=",") WITH BROKER "broker_name" ("username"="xxx", "password"="yyy"); ## keyword diff --git a/docs/help/Contents/Utility/util_stmt.md b/docs/help/Contents/Utility/util_stmt.md index 2fbafe8295..0df0f2d93c 100644 --- a/docs/help/Contents/Utility/util_stmt.md +++ b/docs/help/Contents/Utility/util_stmt.md @@ -1,13 +1,13 @@ -# DESCRIBE -## description - 该语å¥ç”¨äºŽå±•示指定 table çš„ schema ä¿¡æ¯ - 语法: - DESC[RIBE] [db_name.]table_name [ALL]; - - 说明: - 如果指定 ALL,则显示该 table 的所有 index çš„ schema - -## example - -## keyword +# DESCRIBE +## description + 该语å¥ç”¨äºŽå±•示指定 table çš„ schema ä¿¡æ¯ + 语法: + DESC[RIBE] [db_name.]table_name [ALL]; + + 说明: + 如果指定 ALL,则显示该 table 的所有 index çš„ schema + +## example + +## keyword DESCRIBE,DESC \ No newline at end of file diff --git a/fe/src/com/baidu/palo/analysis/AddFollowerClause.java b/fe/src/com/baidu/palo/analysis/AddFollowerClause.java index 5090b0db24..3bc178be3e 100644 --- a/fe/src/com/baidu/palo/analysis/AddFollowerClause.java +++ b/fe/src/com/baidu/palo/analysis/AddFollowerClause.java @@ -18,20 +18,20 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - -import com.baidu.palo.ha.FrontendNodeType; - -public class AddFollowerClause extends FrontendClause { - public AddFollowerClause(String hostPort) { - super(hostPort, FrontendNodeType.FOLLOWER); - } - - @Override - public String toSql() { - StringBuilder sb = new StringBuilder(); - sb.append("ALTER CLUSTER ADD FOLLOWER \""); - sb.append(hostPort).append("\""); - return sb.toString(); - } -} +package com.baidu.palo.analysis; + +import com.baidu.palo.ha.FrontendNodeType; + +public class AddFollowerClause extends FrontendClause { + public AddFollowerClause(String hostPort) { + super(hostPort, FrontendNodeType.FOLLOWER); + } + + @Override + public String toSql() { + StringBuilder sb = new StringBuilder(); + sb.append("ALTER CLUSTER ADD FOLLOWER \""); + sb.append(hostPort).append("\""); + return sb.toString(); + } +} diff --git a/fe/src/com/baidu/palo/analysis/AddObserverClause.java b/fe/src/com/baidu/palo/analysis/AddObserverClause.java index 10d43295af..41baed78c1 100644 --- a/fe/src/com/baidu/palo/analysis/AddObserverClause.java +++ b/fe/src/com/baidu/palo/analysis/AddObserverClause.java @@ -18,20 +18,20 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - -import com.baidu.palo.ha.FrontendNodeType; - -public class AddObserverClause extends FrontendClause { - public AddObserverClause(String hostPort) { - super(hostPort, FrontendNodeType.OBSERVER); - } - - @Override - public String toSql() { - StringBuilder sb = new StringBuilder(); - sb.append("ALTER CLUSTER ADD OBSERVER \""); - sb.append(hostPort).append("\""); - return sb.toString(); - } -} +package com.baidu.palo.analysis; + +import com.baidu.palo.ha.FrontendNodeType; + +public class AddObserverClause extends FrontendClause { + public AddObserverClause(String hostPort) { + super(hostPort, FrontendNodeType.OBSERVER); + } + + @Override + public String toSql() { + StringBuilder sb = new StringBuilder(); + sb.append("ALTER CLUSTER ADD OBSERVER \""); + sb.append(hostPort).append("\""); + return sb.toString(); + } +} diff --git a/fe/src/com/baidu/palo/analysis/AlterDatabaseQuotaStmt.java b/fe/src/com/baidu/palo/analysis/AlterDatabaseQuotaStmt.java index 64405651e0..f30a1efcc6 100644 --- a/fe/src/com/baidu/palo/analysis/AlterDatabaseQuotaStmt.java +++ b/fe/src/com/baidu/palo/analysis/AlterDatabaseQuotaStmt.java @@ -18,8 +18,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - +package com.baidu.palo.analysis; + import com.baidu.palo.catalog.Catalog; import com.baidu.palo.cluster.ClusterNamespace; import com.baidu.palo.common.AnalysisException; @@ -29,45 +29,45 @@ import com.baidu.palo.common.InternalException; import com.baidu.palo.mysql.privilege.PrivPredicate; import com.baidu.palo.qe.ConnectContext; -import com.google.common.base.Strings; - -public class AlterDatabaseQuotaStmt extends DdlStmt { - private String dbName; - private long quota; - - public AlterDatabaseQuotaStmt(String dbName, long quota) { - this.dbName = dbName; - this.quota = quota; - } - - public String getDbName() { - return dbName; - } - - public long getQuota() { - return quota; - } - - @Override - public void analyze(Analyzer analyzer) throws AnalysisException, InternalException { +import com.google.common.base.Strings; + +public class AlterDatabaseQuotaStmt extends DdlStmt { + private String dbName; + private long quota; + + public AlterDatabaseQuotaStmt(String dbName, long quota) { + this.dbName = dbName; + this.quota = quota; + } + + public String getDbName() { + return dbName; + } + + public long getQuota() { + return quota; + } + + @Override + public void analyze(Analyzer analyzer) throws AnalysisException, InternalException { super.analyze(analyzer); if (!Catalog.getCurrentCatalog().getAuth().checkGlobalPriv(ConnectContext.get(), PrivPredicate.ADMIN)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_DB_ACCESS_DENIED, analyzer.getQualifiedUser(), dbName); } - - if (Strings.isNullOrEmpty(dbName)) { - ErrorReport.reportAnalysisException(ErrorCode.ERR_NO_DB_ERROR); - } - dbName = ClusterNamespace.getFullName(getClusterName(), dbName); - if (quota < 0L) { - throw new AnalysisException("Quota must larger than 0"); - } - } - - @Override - public String toSql() { - return "ALTER DATABASE " + dbName + " SET DATA QUOTA " + quota; - } - -} + + if (Strings.isNullOrEmpty(dbName)) { + ErrorReport.reportAnalysisException(ErrorCode.ERR_NO_DB_ERROR); + } + dbName = ClusterNamespace.getFullName(getClusterName(), dbName); + if (quota < 0L) { + throw new AnalysisException("Quota must larger than 0"); + } + } + + @Override + public String toSql() { + return "ALTER DATABASE " + dbName + " SET DATA QUOTA " + quota; + } + +} diff --git a/fe/src/com/baidu/palo/analysis/AlterDatabaseRename.java b/fe/src/com/baidu/palo/analysis/AlterDatabaseRename.java index a7227908aa..3a089e336a 100644 --- a/fe/src/com/baidu/palo/analysis/AlterDatabaseRename.java +++ b/fe/src/com/baidu/palo/analysis/AlterDatabaseRename.java @@ -18,8 +18,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - +package com.baidu.palo.analysis; + import com.baidu.palo.analysis.CompoundPredicate.Operator; import com.baidu.palo.catalog.Catalog; import com.baidu.palo.cluster.ClusterNamespace; @@ -33,30 +33,30 @@ import com.baidu.palo.mysql.privilege.PrivBitSet; import com.baidu.palo.mysql.privilege.PrivPredicate; import com.baidu.palo.qe.ConnectContext; -import com.google.common.base.Strings; - -public class AlterDatabaseRename extends DdlStmt { - private String dbName; - private String newDbName; - - public AlterDatabaseRename(String dbName, String newDbName) { - this.dbName = dbName; - this.newDbName = newDbName; - } - - public String getDbName() { - return dbName; - } - - public String getNewDbName() { - return newDbName; - } - - @Override - public void analyze(Analyzer analyzer) throws AnalysisException, InternalException { - super.analyze(analyzer); - if (Strings.isNullOrEmpty(dbName)) { - throw new AnalysisException("Database name is not set"); +import com.google.common.base.Strings; + +public class AlterDatabaseRename extends DdlStmt { + private String dbName; + private String newDbName; + + public AlterDatabaseRename(String dbName, String newDbName) { + this.dbName = dbName; + this.newDbName = newDbName; + } + + public String getDbName() { + return dbName; + } + + public String getNewDbName() { + return newDbName; + } + + @Override + public void analyze(Analyzer analyzer) throws AnalysisException, InternalException { + super.analyze(analyzer); + if (Strings.isNullOrEmpty(dbName)) { + throw new AnalysisException("Database name is not set"); } if (!Catalog.getCurrentCatalog().getAuth().checkDbPriv(ConnectContext.get(), dbName, @@ -64,21 +64,21 @@ public class AlterDatabaseRename extends DdlStmt { PaloPrivilege.ALTER_PRIV), Operator.OR))) { ErrorReport.reportAnalysisException(ErrorCode.ERR_DB_ACCESS_DENIED, analyzer.getQualifiedUser(), dbName); - } - - if (Strings.isNullOrEmpty(newDbName)) { - throw new AnalysisException("New database name is not set"); - } - - FeNameFormat.checkDbName(newDbName); - - dbName = ClusterNamespace.getFullName(getClusterName(), dbName); - newDbName = ClusterNamespace.getFullName(getClusterName(), newDbName); - } - - @Override - public String toSql() { - return "ALTER DATABASE " + dbName + " RENAME " + newDbName; - } - -} + } + + if (Strings.isNullOrEmpty(newDbName)) { + throw new AnalysisException("New database name is not set"); + } + + FeNameFormat.checkDbName(newDbName); + + dbName = ClusterNamespace.getFullName(getClusterName(), dbName); + newDbName = ClusterNamespace.getFullName(getClusterName(), newDbName); + } + + @Override + public String toSql() { + return "ALTER DATABASE " + dbName + " RENAME " + newDbName; + } + +} diff --git a/fe/src/com/baidu/palo/analysis/AlterUserClause.java b/fe/src/com/baidu/palo/analysis/AlterUserClause.java index 56c47e2b2d..4a09835126 100644 --- a/fe/src/com/baidu/palo/analysis/AlterUserClause.java +++ b/fe/src/com/baidu/palo/analysis/AlterUserClause.java @@ -18,107 +18,107 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - -import java.util.List; -import java.util.Map; -import java.util.Set; - -import com.baidu.palo.common.AnalysisException; -import com.google.common.base.Strings; -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; - -import org.apache.commons.lang.NotImplementedException; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - -public class AlterUserClause extends AlterClause { - private static final Logger LOG = LogManager.getLogger(AlterUserClause.class); - private List hostOrIps; - - private List ips; // for 123.321.1.1 - private List starIps; // for 123.*.*.* - private List hosts; // for www.baidu.com - private AlterUserType type; - - public AlterUserClause(AlterUserType type, List hostOrIps) { - this.type = type; - this.hostOrIps = hostOrIps; - this.ips = Lists.newArrayList(); - this.starIps = Lists.newArrayList(); - this.hosts = Lists.newArrayList(); - } - - @Override - public String toSql() { - StringBuilder sb = new StringBuilder(); - sb.append(type); - for (int i = 0; i < hostOrIps.size(); i++) { - sb.append("\"").append(hostOrIps.get(i)).append("\""); - if (i != hostOrIps.size() - 1) { - sb.append(", "); - } - } - return sb.toString(); - } - - private boolean isHostName(String host) throws AnalysisException { - if (Strings.isNullOrEmpty(host)) { - throw new AnalysisException("host=[" + host + "] is empty"); - } - - for (char ch : host.toCharArray()) { - if (Character.isLetter(ch)) { - return true; - } - } - String[] ipArray = host.split("\\."); - if (ipArray.length != 4) { - String msg = "ip wrong, ip=" + host; - LOG.warn("{}", msg); - throw new AnalysisException(msg); - } - return false; - } - - @Override - public void analyze(Analyzer analyzer) throws AnalysisException { - // 1. del duplicate - Set sets = Sets.newHashSet(hostOrIps); - - // 2. extract hosts and realIps from hostOrIp - for (String host : sets) { - if (isHostName(host)) { - // may be bns or hostname - hosts.add(host); - } else if (host.contains("*")) { - starIps.add(host); - } else { - ips.add(host); - } - } - // NOTICE: if we del hostname from whiltList, the hostname must be totally equal with catalog's hostname; - } - - public List getIps() { - return ips; - } - - public List getStarIps() { - return starIps; - } - - public List getHosts() { - return hosts; - } - - public AlterUserType getAlterUserType() { - return type; - } - - @Override - public Map getProperties() { - throw new NotImplementedException(); - } -} +package com.baidu.palo.analysis; + +import java.util.List; +import java.util.Map; +import java.util.Set; + +import com.baidu.palo.common.AnalysisException; +import com.google.common.base.Strings; +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; + +import org.apache.commons.lang.NotImplementedException; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +public class AlterUserClause extends AlterClause { + private static final Logger LOG = LogManager.getLogger(AlterUserClause.class); + private List hostOrIps; + + private List ips; // for 123.321.1.1 + private List starIps; // for 123.*.*.* + private List hosts; // for www.baidu.com + private AlterUserType type; + + public AlterUserClause(AlterUserType type, List hostOrIps) { + this.type = type; + this.hostOrIps = hostOrIps; + this.ips = Lists.newArrayList(); + this.starIps = Lists.newArrayList(); + this.hosts = Lists.newArrayList(); + } + + @Override + public String toSql() { + StringBuilder sb = new StringBuilder(); + sb.append(type); + for (int i = 0; i < hostOrIps.size(); i++) { + sb.append("\"").append(hostOrIps.get(i)).append("\""); + if (i != hostOrIps.size() - 1) { + sb.append(", "); + } + } + return sb.toString(); + } + + private boolean isHostName(String host) throws AnalysisException { + if (Strings.isNullOrEmpty(host)) { + throw new AnalysisException("host=[" + host + "] is empty"); + } + + for (char ch : host.toCharArray()) { + if (Character.isLetter(ch)) { + return true; + } + } + String[] ipArray = host.split("\\."); + if (ipArray.length != 4) { + String msg = "ip wrong, ip=" + host; + LOG.warn("{}", msg); + throw new AnalysisException(msg); + } + return false; + } + + @Override + public void analyze(Analyzer analyzer) throws AnalysisException { + // 1. del duplicate + Set sets = Sets.newHashSet(hostOrIps); + + // 2. extract hosts and realIps from hostOrIp + for (String host : sets) { + if (isHostName(host)) { + // may be bns or hostname + hosts.add(host); + } else if (host.contains("*")) { + starIps.add(host); + } else { + ips.add(host); + } + } + // NOTICE: if we del hostname from whiltList, the hostname must be totally equal with catalog's hostname; + } + + public List getIps() { + return ips; + } + + public List getStarIps() { + return starIps; + } + + public List getHosts() { + return hosts; + } + + public AlterUserType getAlterUserType() { + return type; + } + + @Override + public Map getProperties() { + throw new NotImplementedException(); + } +} diff --git a/fe/src/com/baidu/palo/analysis/AlterUserType.java b/fe/src/com/baidu/palo/analysis/AlterUserType.java index 9c35f11f95..aa609919d1 100644 --- a/fe/src/com/baidu/palo/analysis/AlterUserType.java +++ b/fe/src/com/baidu/palo/analysis/AlterUserType.java @@ -18,15 +18,15 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - -public enum AlterUserType { - ADD_USER_WHITELIST("add_whiltList"), - DELETE_USER_WHITELIST("delete_whiltList"); - - private String type; - - private AlterUserType(String type) { - this.type = type; - } +package com.baidu.palo.analysis; + +public enum AlterUserType { + ADD_USER_WHITELIST("add_whiltList"), + DELETE_USER_WHITELIST("delete_whiltList"); + + private String type; + + private AlterUserType(String type) { + this.type = type; + } } \ No newline at end of file diff --git a/fe/src/com/baidu/palo/analysis/BackendClause.java b/fe/src/com/baidu/palo/analysis/BackendClause.java index 3c3591c90a..1642bc0cd9 100644 --- a/fe/src/com/baidu/palo/analysis/BackendClause.java +++ b/fe/src/com/baidu/palo/analysis/BackendClause.java @@ -18,50 +18,50 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - -import com.baidu.palo.common.AnalysisException; -import com.baidu.palo.common.Pair; -import com.baidu.palo.system.SystemInfoService; -import com.google.common.base.Preconditions; - -import org.apache.commons.lang.NotImplementedException; - -import java.util.LinkedList; -import java.util.List; -import java.util.Map; - -public class BackendClause extends AlterClause { - protected List hostPorts; - - protected List> hostPortPairs; - - protected BackendClause(List hostPorts) { - this.hostPorts = hostPorts; - this.hostPortPairs = new LinkedList>(); - } - - public List> getHostPortPairs() { - return hostPortPairs; - } - - @Override - public void analyze(Analyzer analyzer) throws AnalysisException { - for (String hostPort : hostPorts) { - Pair pair = SystemInfoService.validateHostAndPort(hostPort); - hostPortPairs.add(pair); - } - - Preconditions.checkState(!hostPortPairs.isEmpty()); - } - - @Override - public String toSql() { - throw new NotImplementedException(); - } - - @Override - public Map getProperties() { - throw new NotImplementedException(); - } -} +package com.baidu.palo.analysis; + +import com.baidu.palo.common.AnalysisException; +import com.baidu.palo.common.Pair; +import com.baidu.palo.system.SystemInfoService; +import com.google.common.base.Preconditions; + +import org.apache.commons.lang.NotImplementedException; + +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +public class BackendClause extends AlterClause { + protected List hostPorts; + + protected List> hostPortPairs; + + protected BackendClause(List hostPorts) { + this.hostPorts = hostPorts; + this.hostPortPairs = new LinkedList>(); + } + + public List> getHostPortPairs() { + return hostPortPairs; + } + + @Override + public void analyze(Analyzer analyzer) throws AnalysisException { + for (String hostPort : hostPorts) { + Pair pair = SystemInfoService.validateHostAndPort(hostPort); + hostPortPairs.add(pair); + } + + Preconditions.checkState(!hostPortPairs.isEmpty()); + } + + @Override + public String toSql() { + throw new NotImplementedException(); + } + + @Override + public Map getProperties() { + throw new NotImplementedException(); + } +} diff --git a/fe/src/com/baidu/palo/analysis/BuiltinAggregateFunction.java b/fe/src/com/baidu/palo/analysis/BuiltinAggregateFunction.java index ba876c69d9..40e9ee4fe2 100755 --- a/fe/src/com/baidu/palo/analysis/BuiltinAggregateFunction.java +++ b/fe/src/com/baidu/palo/analysis/BuiltinAggregateFunction.java @@ -18,135 +18,135 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - -import com.baidu.palo.catalog.Function; -import com.baidu.palo.catalog.ScalarType; -import com.baidu.palo.catalog.Type; -import com.baidu.palo.common.AnalysisException; -import com.baidu.palo.thrift.TAggregateFunction; -import com.baidu.palo.thrift.TAggregationOp; -import com.baidu.palo.thrift.TFunction; -import com.baidu.palo.thrift.TFunctionBinaryType; -import com.google.common.base.Preconditions; - -import java.util.ArrayList; - -/** - * Internal representation of a builtin aggregate function. - */ -public class BuiltinAggregateFunction extends Function { - private final Operator op_; - // this is to judge the analytic function - private boolean isAnalyticFn = false; - - public boolean isAnalyticFn() { - return isAnalyticFn; - } - // TODO: this is not used yet until the planner understand this. - private com.baidu.palo.catalog.Type intermediateType_; - private boolean reqIntermediateTuple = false; - - public boolean isReqIntermediateTuple() { - return reqIntermediateTuple; - } - - public BuiltinAggregateFunction(Operator op, ArrayList argTypes, - Type retType, com.baidu.palo.catalog.Type intermediateType, boolean isAnalyticFn) - throws AnalysisException { - super(FunctionName.CreateBuiltinName(op.toString()), argTypes, - retType, false); - Preconditions.checkState(intermediateType != null); - Preconditions.checkState(op != null); - // may be no need to analyze - // intermediateType.analyze(); - op_ = op; - intermediateType_ = intermediateType; - if (isAnalyticFn && !intermediateType.equals(retType)) { - reqIntermediateTuple = true; - } - setBinaryType(TFunctionBinaryType.BUILTIN); - this.isAnalyticFn = isAnalyticFn; - } - - @Override - public TFunction toThrift() { - TFunction fn = super.toThrift(); - // TODO: for now, just put the op_ enum as the id. - if (op_ == BuiltinAggregateFunction.Operator.FIRST_VALUE_REWRITE) { - fn.setId(0); - } else { - fn.setId(op_.thriftOp.ordinal()); - } - fn.setAggregate_fn(new TAggregateFunction(intermediateType_.toThrift())); - return fn; - } - - public Operator op() { - return op_; - } - - public com.baidu.palo.catalog.Type getIntermediateType() { - return intermediateType_; - } - - public void setIntermediateType(com.baidu.palo.catalog.Type t) { - intermediateType_ = t; - } - - // TODO: this is effectively a catalog of builtin aggregate functions. - // We should move this to something in the catalog instead of having it - // here like this. - public enum Operator { - COUNT("COUNT", TAggregationOp.COUNT, Type.BIGINT), - MIN("MIN", TAggregationOp.MIN, null), - MAX("MAX", TAggregationOp.MAX, null), - DISTINCT_PC("DISTINCT_PC", TAggregationOp.DISTINCT_PC, ScalarType.createVarcharType(64)), - DISTINCT_PCSA("DISTINCT_PCSA", TAggregationOp.DISTINCT_PCSA, ScalarType.createVarcharType(64)), - SUM("SUM", TAggregationOp.SUM, null), - AVG("AVG", TAggregationOp.INVALID, null), - GROUP_CONCAT("GROUP_CONCAT", TAggregationOp.GROUP_CONCAT, ScalarType.createVarcharType(16)), - - // NDV is the external facing name (i.e. queries should always be written with NDV) - // The current implementation of NDV is hyperloglog (but we could change this without - // external query changes if we find a better algorithm). - NDV("NDV", TAggregationOp.HLL, ScalarType.createVarcharType(64)), - HLL_UNION_AGG("HLL_UNION_AGG", TAggregationOp.HLL_C, ScalarType.createVarcharType(64)), - COUNT_DISTINCT("COUNT_DISITNCT", TAggregationOp.COUNT_DISTINCT, Type.BIGINT), - SUM_DISTINCT("SUM_DISTINCT", TAggregationOp.SUM_DISTINCT, null), - LAG("LAG", TAggregationOp.LAG, null), - FIRST_VALUE("FIRST_VALUE", TAggregationOp.FIRST_VALUE, null), - LAST_VALUE("LAST_VALUE", TAggregationOp.LAST_VALUE, null), - RANK("RANK", TAggregationOp.RANK, null), - DENSE_RANK("DENSE_RANK", TAggregationOp.DENSE_RANK, null), - ROW_NUMBER("ROW_NUMBER", TAggregationOp.ROW_NUMBER, null), - LEAD("LEAD", TAggregationOp.LEAD, null), - FIRST_VALUE_REWRITE("FIRST_VALUE_REWRITE", null, null); - - private final String description; - private final TAggregationOp thriftOp; - - // The intermediate type for this function if it is constant regardless of - // input type. Set to null if it can only be determined during analysis. - private final com.baidu.palo.catalog.Type intermediateType; - private Operator(String description, TAggregationOp thriftOp, - com.baidu.palo.catalog.Type intermediateType) { - this.description = description; - this.thriftOp = thriftOp; - this.intermediateType = intermediateType; - } - - @Override - public String toString() { - return description; - } - - public TAggregationOp toThrift() { - return thriftOp; - } - - public com.baidu.palo.catalog.Type intermediateType() { - return intermediateType; - } - } -} +package com.baidu.palo.analysis; + +import com.baidu.palo.catalog.Function; +import com.baidu.palo.catalog.ScalarType; +import com.baidu.palo.catalog.Type; +import com.baidu.palo.common.AnalysisException; +import com.baidu.palo.thrift.TAggregateFunction; +import com.baidu.palo.thrift.TAggregationOp; +import com.baidu.palo.thrift.TFunction; +import com.baidu.palo.thrift.TFunctionBinaryType; +import com.google.common.base.Preconditions; + +import java.util.ArrayList; + +/** + * Internal representation of a builtin aggregate function. + */ +public class BuiltinAggregateFunction extends Function { + private final Operator op_; + // this is to judge the analytic function + private boolean isAnalyticFn = false; + + public boolean isAnalyticFn() { + return isAnalyticFn; + } + // TODO: this is not used yet until the planner understand this. + private com.baidu.palo.catalog.Type intermediateType_; + private boolean reqIntermediateTuple = false; + + public boolean isReqIntermediateTuple() { + return reqIntermediateTuple; + } + + public BuiltinAggregateFunction(Operator op, ArrayList argTypes, + Type retType, com.baidu.palo.catalog.Type intermediateType, boolean isAnalyticFn) + throws AnalysisException { + super(FunctionName.CreateBuiltinName(op.toString()), argTypes, + retType, false); + Preconditions.checkState(intermediateType != null); + Preconditions.checkState(op != null); + // may be no need to analyze + // intermediateType.analyze(); + op_ = op; + intermediateType_ = intermediateType; + if (isAnalyticFn && !intermediateType.equals(retType)) { + reqIntermediateTuple = true; + } + setBinaryType(TFunctionBinaryType.BUILTIN); + this.isAnalyticFn = isAnalyticFn; + } + + @Override + public TFunction toThrift() { + TFunction fn = super.toThrift(); + // TODO: for now, just put the op_ enum as the id. + if (op_ == BuiltinAggregateFunction.Operator.FIRST_VALUE_REWRITE) { + fn.setId(0); + } else { + fn.setId(op_.thriftOp.ordinal()); + } + fn.setAggregate_fn(new TAggregateFunction(intermediateType_.toThrift())); + return fn; + } + + public Operator op() { + return op_; + } + + public com.baidu.palo.catalog.Type getIntermediateType() { + return intermediateType_; + } + + public void setIntermediateType(com.baidu.palo.catalog.Type t) { + intermediateType_ = t; + } + + // TODO: this is effectively a catalog of builtin aggregate functions. + // We should move this to something in the catalog instead of having it + // here like this. + public enum Operator { + COUNT("COUNT", TAggregationOp.COUNT, Type.BIGINT), + MIN("MIN", TAggregationOp.MIN, null), + MAX("MAX", TAggregationOp.MAX, null), + DISTINCT_PC("DISTINCT_PC", TAggregationOp.DISTINCT_PC, ScalarType.createVarcharType(64)), + DISTINCT_PCSA("DISTINCT_PCSA", TAggregationOp.DISTINCT_PCSA, ScalarType.createVarcharType(64)), + SUM("SUM", TAggregationOp.SUM, null), + AVG("AVG", TAggregationOp.INVALID, null), + GROUP_CONCAT("GROUP_CONCAT", TAggregationOp.GROUP_CONCAT, ScalarType.createVarcharType(16)), + + // NDV is the external facing name (i.e. queries should always be written with NDV) + // The current implementation of NDV is hyperloglog (but we could change this without + // external query changes if we find a better algorithm). + NDV("NDV", TAggregationOp.HLL, ScalarType.createVarcharType(64)), + HLL_UNION_AGG("HLL_UNION_AGG", TAggregationOp.HLL_C, ScalarType.createVarcharType(64)), + COUNT_DISTINCT("COUNT_DISITNCT", TAggregationOp.COUNT_DISTINCT, Type.BIGINT), + SUM_DISTINCT("SUM_DISTINCT", TAggregationOp.SUM_DISTINCT, null), + LAG("LAG", TAggregationOp.LAG, null), + FIRST_VALUE("FIRST_VALUE", TAggregationOp.FIRST_VALUE, null), + LAST_VALUE("LAST_VALUE", TAggregationOp.LAST_VALUE, null), + RANK("RANK", TAggregationOp.RANK, null), + DENSE_RANK("DENSE_RANK", TAggregationOp.DENSE_RANK, null), + ROW_NUMBER("ROW_NUMBER", TAggregationOp.ROW_NUMBER, null), + LEAD("LEAD", TAggregationOp.LEAD, null), + FIRST_VALUE_REWRITE("FIRST_VALUE_REWRITE", null, null); + + private final String description; + private final TAggregationOp thriftOp; + + // The intermediate type for this function if it is constant regardless of + // input type. Set to null if it can only be determined during analysis. + private final com.baidu.palo.catalog.Type intermediateType; + private Operator(String description, TAggregationOp thriftOp, + com.baidu.palo.catalog.Type intermediateType) { + this.description = description; + this.thriftOp = thriftOp; + this.intermediateType = intermediateType; + } + + @Override + public String toString() { + return description; + } + + public TAggregationOp toThrift() { + return thriftOp; + } + + public com.baidu.palo.catalog.Type intermediateType() { + return intermediateType; + } + } +} diff --git a/fe/src/com/baidu/palo/analysis/CancelAlterTableStmt.java b/fe/src/com/baidu/palo/analysis/CancelAlterTableStmt.java index 78050fe933..ad49af306a 100644 --- a/fe/src/com/baidu/palo/analysis/CancelAlterTableStmt.java +++ b/fe/src/com/baidu/palo/analysis/CancelAlterTableStmt.java @@ -13,47 +13,47 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - +package com.baidu.palo.analysis; + import com.baidu.palo.analysis.ShowAlterStmt.AlterType; import com.baidu.palo.catalog.Catalog; import com.baidu.palo.common.AnalysisException; import com.baidu.palo.common.ErrorCode; import com.baidu.palo.common.ErrorReport; import com.baidu.palo.mysql.privilege.PrivPredicate; -import com.baidu.palo.qe.ConnectContext; - -/* - * CANCEL ALTER COLUMN|ROLLUP FROM db_name.table_name - */ -public class CancelAlterTableStmt extends CancelStmt { - - private AlterType alterType; - - private TableName dbTableName; - - public AlterType getAlterType() { - return alterType; - } - - public String getDbName() { - return dbTableName.getDb(); - } - - public String getTableName() { - return dbTableName.getTbl(); - } - - public CancelAlterTableStmt(AlterType alterType, TableName dbTableName) { - this.alterType = alterType; - this.dbTableName = dbTableName; - } - - @Override - public void analyze(Analyzer analyzer) throws AnalysisException { - dbTableName.analyze(analyzer); - - // check access +import com.baidu.palo.qe.ConnectContext; + +/* + * CANCEL ALTER COLUMN|ROLLUP FROM db_name.table_name + */ +public class CancelAlterTableStmt extends CancelStmt { + + private AlterType alterType; + + private TableName dbTableName; + + public AlterType getAlterType() { + return alterType; + } + + public String getDbName() { + return dbTableName.getDb(); + } + + public String getTableName() { + return dbTableName.getTbl(); + } + + public CancelAlterTableStmt(AlterType alterType, TableName dbTableName) { + this.alterType = alterType; + this.dbTableName = dbTableName; + } + + @Override + public void analyze(Analyzer analyzer) throws AnalysisException { + dbTableName.analyze(analyzer); + + // check access if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), dbTableName.getDb(), dbTableName.getTbl(), PrivPredicate.ALTER)) { @@ -61,20 +61,20 @@ public class CancelAlterTableStmt extends CancelStmt { ConnectContext.get().getQualifiedUser(), ConnectContext.get().getRemoteIP(), dbTableName.getTbl()); - } - } - - @Override - public String toSql() { - StringBuilder stringBuilder = new StringBuilder(); - stringBuilder.append("CANCEL ALTER " + this.alterType); - stringBuilder.append(" FROM " + dbTableName.toSql()); - return stringBuilder.toString(); - } - - @Override - public String toString() { - return toSql(); - } - -} + } + } + + @Override + public String toSql() { + StringBuilder stringBuilder = new StringBuilder(); + stringBuilder.append("CANCEL ALTER " + this.alterType); + stringBuilder.append(" FROM " + dbTableName.toSql()); + return stringBuilder.toString(); + } + + @Override + public String toString() { + return toSql(); + } + +} diff --git a/fe/src/com/baidu/palo/analysis/CreateRoleStmt.java b/fe/src/com/baidu/palo/analysis/CreateRoleStmt.java index 84b11d59f8..a536d7f28d 100644 --- a/fe/src/com/baidu/palo/analysis/CreateRoleStmt.java +++ b/fe/src/com/baidu/palo/analysis/CreateRoleStmt.java @@ -35,7 +35,7 @@ public class CreateRoleStmt extends DdlStmt { @Override public void analyze(Analyzer analyzer) throws AnalysisException, InternalException { super.analyze(analyzer); - FeNameFormat.checkRoleName(role, false /* can not be admin */); + FeNameFormat.checkRoleName(role, false /* can not be admin */, "Can not create role"); role = ClusterNamespace.getFullName(analyzer.getClusterName(), role); } diff --git a/fe/src/com/baidu/palo/analysis/CreateUserStmt.java b/fe/src/com/baidu/palo/analysis/CreateUserStmt.java index b38d213755..293e9ce33d 100644 --- a/fe/src/com/baidu/palo/analysis/CreateUserStmt.java +++ b/fe/src/com/baidu/palo/analysis/CreateUserStmt.java @@ -127,7 +127,7 @@ public class CreateUserStmt extends DdlStmt { // for forward compatibility role = PaloRole.ADMIN_ROLE; } - FeNameFormat.checkRoleName(role, true /* can be admin */); + FeNameFormat.checkRoleName(role, true /* can be admin */, "Can not granted user to role"); role = ClusterNamespace.getFullName(analyzer.getClusterName(), role); } diff --git a/fe/src/com/baidu/palo/analysis/DdlStmt.java b/fe/src/com/baidu/palo/analysis/DdlStmt.java index bf8f795810..cc5ec40f90 100644 --- a/fe/src/com/baidu/palo/analysis/DdlStmt.java +++ b/fe/src/com/baidu/palo/analysis/DdlStmt.java @@ -18,11 +18,11 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - -public abstract class DdlStmt extends StatementBase { - @Override - public RedirectStatus getRedirectStatus() { - return RedirectStatus.FORWARD_WITH_SYNC; - } -} +package com.baidu.palo.analysis; + +public abstract class DdlStmt extends StatementBase { + @Override + public RedirectStatus getRedirectStatus() { + return RedirectStatus.FORWARD_WITH_SYNC; + } +} diff --git a/fe/src/com/baidu/palo/analysis/DropBackendClause.java b/fe/src/com/baidu/palo/analysis/DropBackendClause.java index 8ec77856f4..71770eddd3 100644 --- a/fe/src/com/baidu/palo/analysis/DropBackendClause.java +++ b/fe/src/com/baidu/palo/analysis/DropBackendClause.java @@ -13,37 +13,37 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - -import java.util.List; - +package com.baidu.palo.analysis; + +import java.util.List; + public class DropBackendClause extends BackendClause { - private boolean force; + private boolean force; public DropBackendClause(List hostPorts) { super(hostPorts); this.force = true; } - - public DropBackendClause(List hostPorts, boolean force) { + + public DropBackendClause(List hostPorts, boolean force) { super(hostPorts); - this.force = force; + this.force = force; } public boolean isForce() { return force; - } - - @Override - public String toSql() { - StringBuilder sb = new StringBuilder(); - sb.append("DROP BACKEND "); - for (int i = 0; i < hostPorts.size(); i++) { - sb.append("\"").append(hostPorts.get(i)).append("\""); - if (i != hostPorts.size() - 1) { - sb.append(", "); - } - } - return sb.toString(); - } -} + } + + @Override + public String toSql() { + StringBuilder sb = new StringBuilder(); + sb.append("DROP BACKEND "); + for (int i = 0; i < hostPorts.size(); i++) { + sb.append("\"").append(hostPorts.get(i)).append("\""); + if (i != hostPorts.size() - 1) { + sb.append(", "); + } + } + return sb.toString(); + } +} diff --git a/fe/src/com/baidu/palo/analysis/DropFollowerClause.java b/fe/src/com/baidu/palo/analysis/DropFollowerClause.java index 48cb7a2ad0..6011ac9319 100644 --- a/fe/src/com/baidu/palo/analysis/DropFollowerClause.java +++ b/fe/src/com/baidu/palo/analysis/DropFollowerClause.java @@ -13,20 +13,20 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - -import com.baidu.palo.ha.FrontendNodeType; - -public class DropFollowerClause extends FrontendClause { - public DropFollowerClause(String hostPort) { - super(hostPort, FrontendNodeType.FOLLOWER); - } - - @Override - public String toSql() { - StringBuilder sb = new StringBuilder(); - sb.append("ALTER CLUSTER DROP FOLLOWER \""); - sb.append(hostPort).append("\""); - return sb.toString(); - } -} +package com.baidu.palo.analysis; + +import com.baidu.palo.ha.FrontendNodeType; + +public class DropFollowerClause extends FrontendClause { + public DropFollowerClause(String hostPort) { + super(hostPort, FrontendNodeType.FOLLOWER); + } + + @Override + public String toSql() { + StringBuilder sb = new StringBuilder(); + sb.append("ALTER CLUSTER DROP FOLLOWER \""); + sb.append(hostPort).append("\""); + return sb.toString(); + } +} diff --git a/fe/src/com/baidu/palo/analysis/DropObserverClause.java b/fe/src/com/baidu/palo/analysis/DropObserverClause.java index dcc5e2eac9..e4807991a6 100644 --- a/fe/src/com/baidu/palo/analysis/DropObserverClause.java +++ b/fe/src/com/baidu/palo/analysis/DropObserverClause.java @@ -13,20 +13,20 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - -import com.baidu.palo.ha.FrontendNodeType; - -public class DropObserverClause extends FrontendClause { - public DropObserverClause(String hostPort) { - super(hostPort, FrontendNodeType.OBSERVER); - } - - @Override - public String toSql() { - StringBuilder sb = new StringBuilder(); - sb.append("ALTER CLUSTER DROP OBSERVER \""); - sb.append(hostPort).append("\""); - return sb.toString(); - } -} +package com.baidu.palo.analysis; + +import com.baidu.palo.ha.FrontendNodeType; + +public class DropObserverClause extends FrontendClause { + public DropObserverClause(String hostPort) { + super(hostPort, FrontendNodeType.OBSERVER); + } + + @Override + public String toSql() { + StringBuilder sb = new StringBuilder(); + sb.append("ALTER CLUSTER DROP OBSERVER \""); + sb.append(hostPort).append("\""); + return sb.toString(); + } +} diff --git a/fe/src/com/baidu/palo/analysis/DropRoleStmt.java b/fe/src/com/baidu/palo/analysis/DropRoleStmt.java index 3ec5efca1f..795d023262 100644 --- a/fe/src/com/baidu/palo/analysis/DropRoleStmt.java +++ b/fe/src/com/baidu/palo/analysis/DropRoleStmt.java @@ -35,7 +35,7 @@ public class DropRoleStmt extends DdlStmt { @Override public void analyze(Analyzer analyzer) throws AnalysisException, InternalException { super.analyze(analyzer); - FeNameFormat.checkRoleName(role, false /* can not be superuser */); + FeNameFormat.checkRoleName(role, false /* can not be superuser */, "Can not drop role"); role = ClusterNamespace.getFullName(analyzer.getClusterName(), role); } diff --git a/fe/src/com/baidu/palo/analysis/ExistsPredicate.java b/fe/src/com/baidu/palo/analysis/ExistsPredicate.java index 08c7152c73..9a3a388ae2 100644 --- a/fe/src/com/baidu/palo/analysis/ExistsPredicate.java +++ b/fe/src/com/baidu/palo/analysis/ExistsPredicate.java @@ -18,59 +18,59 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.baidu.palo.common.AnalysisException; -import com.baidu.palo.thrift.TExprNode; -import com.google.common.base.Preconditions; - -/** - * Class representing a [NOT] EXISTS predicate. - */ -public class ExistsPredicate extends Predicate { - private static final Logger LOG = LoggerFactory.getLogger( - ExistsPredicate.class); - private boolean notExists = false; - - public boolean isNotExists() { return notExists; } - - public ExistsPredicate(Subquery subquery, boolean notExists) { - Preconditions.checkNotNull(subquery); - children.add(subquery); - this.notExists = notExists; - } - - public ExistsPredicate(ExistsPredicate other) { - super(other); - notExists = other.notExists; - } - - @Override - public Expr negate() { - return new ExistsPredicate((Subquery) getChild(0), !notExists); - } - - @Override - protected void toThrift(TExprNode msg) { - // Cannot serialize a nested predicate - Preconditions.checkState(false); - } - - @Override - public Expr clone() { return new ExistsPredicate(this); } - - public String toSql() { - StringBuilder strBuilder = new StringBuilder(); - if (notExists) { - strBuilder.append("NOT "); - - } - strBuilder.append("EXISTS "); - strBuilder.append(getChild(0).toSql()); - return strBuilder.toString(); - } -} - +package com.baidu.palo.analysis; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.baidu.palo.common.AnalysisException; +import com.baidu.palo.thrift.TExprNode; +import com.google.common.base.Preconditions; + +/** + * Class representing a [NOT] EXISTS predicate. + */ +public class ExistsPredicate extends Predicate { + private static final Logger LOG = LoggerFactory.getLogger( + ExistsPredicate.class); + private boolean notExists = false; + + public boolean isNotExists() { return notExists; } + + public ExistsPredicate(Subquery subquery, boolean notExists) { + Preconditions.checkNotNull(subquery); + children.add(subquery); + this.notExists = notExists; + } + + public ExistsPredicate(ExistsPredicate other) { + super(other); + notExists = other.notExists; + } + + @Override + public Expr negate() { + return new ExistsPredicate((Subquery) getChild(0), !notExists); + } + + @Override + protected void toThrift(TExprNode msg) { + // Cannot serialize a nested predicate + Preconditions.checkState(false); + } + + @Override + public Expr clone() { return new ExistsPredicate(this); } + + public String toSql() { + StringBuilder strBuilder = new StringBuilder(); + if (notExists) { + strBuilder.append("NOT "); + + } + strBuilder.append("EXISTS "); + strBuilder.append(getChild(0).toSql()); + return strBuilder.toString(); + } +} + diff --git a/fe/src/com/baidu/palo/analysis/FrontendClause.java b/fe/src/com/baidu/palo/analysis/FrontendClause.java index eebe3579bc..305bf957a6 100644 --- a/fe/src/com/baidu/palo/analysis/FrontendClause.java +++ b/fe/src/com/baidu/palo/analysis/FrontendClause.java @@ -18,8 +18,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - +package com.baidu.palo.analysis; + import com.baidu.palo.catalog.Catalog; import com.baidu.palo.common.AnalysisException; import com.baidu.palo.common.ErrorCode; @@ -35,48 +35,48 @@ import com.google.common.base.Strings; import org.apache.commons.lang.NotImplementedException; -import java.util.Map; - -public class FrontendClause extends AlterClause { - protected String hostPort; - protected String host; - protected int port; - protected FrontendNodeType role; - - protected FrontendClause(String hostPort, FrontendNodeType role) { - this.hostPort = hostPort; - this.role = role; - } - - public String getHost() { - return host; - } - - public int getPort() { - return port; - } - - @Override - public void analyze(Analyzer analyzer) throws AnalysisException { +import java.util.Map; + +public class FrontendClause extends AlterClause { + protected String hostPort; + protected String host; + protected int port; + protected FrontendNodeType role; + + protected FrontendClause(String hostPort, FrontendNodeType role) { + this.hostPort = hostPort; + this.role = role; + } + + public String getHost() { + return host; + } + + public int getPort() { + return port; + } + + @Override + public void analyze(Analyzer analyzer) throws AnalysisException { if (!Catalog.getCurrentCatalog().getAuth().checkGlobalPriv(ConnectContext.get(), PrivPredicate.OPERATOR)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, analyzer.getQualifiedUser()); - } - - Pair pair = SystemInfoService.validateHostAndPort(hostPort); - this.host = pair.first; - this.port = pair.second; - Preconditions.checkState(!Strings.isNullOrEmpty(host)); - } - - @Override - public String toSql() { - throw new NotImplementedException(); - } - - @Override - public Map getProperties() { - throw new NotImplementedException(); - } - -} + } + + Pair pair = SystemInfoService.validateHostAndPort(hostPort); + this.host = pair.first; + this.port = pair.second; + Preconditions.checkState(!Strings.isNullOrEmpty(host)); + } + + @Override + public String toSql() { + throw new NotImplementedException(); + } + + @Override + public Map getProperties() { + throw new NotImplementedException(); + } + +} diff --git a/fe/src/com/baidu/palo/analysis/FunctionArgs.java b/fe/src/com/baidu/palo/analysis/FunctionArgs.java index 420872aeff..a0844a7ab1 100755 --- a/fe/src/com/baidu/palo/analysis/FunctionArgs.java +++ b/fe/src/com/baidu/palo/analysis/FunctionArgs.java @@ -18,34 +18,34 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - -import com.baidu.palo.catalog.PrimitiveType; -import com.baidu.palo.catalog.Type; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; - -import java.util.ArrayList; - -// Wrapper class around argument types and if it has varArgs -public class FunctionArgs { - public final ArrayList argTypes; - public boolean hasVarArgs; - - public FunctionArgs() { - argTypes = Lists.newArrayList(); - hasVarArgs = false; - } - - public FunctionArgs(ArrayList argTypes, boolean varArgs) { - this.argTypes = argTypes; - this.hasVarArgs = varArgs; - if (varArgs) { - Preconditions.checkState(argTypes.size() > 0); - } - } - - public void setHasVarArgs(boolean b) { - hasVarArgs = b; - } -} +package com.baidu.palo.analysis; + +import com.baidu.palo.catalog.PrimitiveType; +import com.baidu.palo.catalog.Type; +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; + +import java.util.ArrayList; + +// Wrapper class around argument types and if it has varArgs +public class FunctionArgs { + public final ArrayList argTypes; + public boolean hasVarArgs; + + public FunctionArgs() { + argTypes = Lists.newArrayList(); + hasVarArgs = false; + } + + public FunctionArgs(ArrayList argTypes, boolean varArgs) { + this.argTypes = argTypes; + this.hasVarArgs = varArgs; + if (varArgs) { + Preconditions.checkState(argTypes.size() > 0); + } + } + + public void setHasVarArgs(boolean b) { + hasVarArgs = b; + } +} diff --git a/fe/src/com/baidu/palo/analysis/GrantStmt.java b/fe/src/com/baidu/palo/analysis/GrantStmt.java index d160f4a5cb..b9fb92d6a9 100644 --- a/fe/src/com/baidu/palo/analysis/GrantStmt.java +++ b/fe/src/com/baidu/palo/analysis/GrantStmt.java @@ -28,6 +28,7 @@ import com.baidu.palo.common.ErrorCode; import com.baidu.palo.common.ErrorReport; import com.baidu.palo.common.FeNameFormat; import com.baidu.palo.common.InternalException; +import com.baidu.palo.mysql.privilege.PaloAuth.PrivLevel; import com.baidu.palo.mysql.privilege.PaloPrivilege; import com.baidu.palo.mysql.privilege.PrivBitSet; import com.baidu.palo.mysql.privilege.PrivPredicate; @@ -94,8 +95,26 @@ public class GrantStmt extends DdlStmt { throw new AnalysisException("No privileges in grant statement."); } + // can not grant NODE_PRIV to any other user(root has NODE_PRIV, no need to grant) + for (PaloPrivilege paloPrivilege : privileges) { + if (paloPrivilege == PaloPrivilege.NODE_PRIV) { + throw new AnalysisException("Can not grant NODE_PRIV to any other users or roles"); + } + } + + // ADMIN_PRIV and GRANT_PRIV can only be granted as global + if (tblPattern.getPrivLevel() != PrivLevel.GLOBAL) { + for (PaloPrivilege paloPrivilege : privileges) { + if (paloPrivilege == PaloPrivilege.ADMIN_PRIV || paloPrivilege == PaloPrivilege.GRANT_PRIV) { + throw new AnalysisException( + "Can not grant ADMIN_PRIV or GRANT_PRIV to specified database or table. Only support to *.*"); + } + } + } + if (role != null) { - FeNameFormat.checkRoleName(role, false /* can not be superuser */); + // can not grant to admin or operator role + FeNameFormat.checkRoleName(role, false /* can not be admin */, "Can not grant to role"); role = ClusterNamespace.getFullName(analyzer.getClusterName(), role); } diff --git a/fe/src/com/baidu/palo/analysis/HdfsURI.java b/fe/src/com/baidu/palo/analysis/HdfsURI.java index cec4274617..da6fd77176 100755 --- a/fe/src/com/baidu/palo/analysis/HdfsURI.java +++ b/fe/src/com/baidu/palo/analysis/HdfsURI.java @@ -18,69 +18,69 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - -import com.baidu.palo.common.AnalysisException; -import com.google.common.base.Preconditions; - -/* - * Represents an HDFS URI in a SQL statement. - */ -public class HdfsURI { - private final String location; - - // Set during analysis - // dhc to do - // private Path uriPath; - private String uriPath; - - public HdfsURI(String location) { - Preconditions.checkNotNull(location); - this.location = location.trim(); - } - - public String getPath() { - Preconditions.checkNotNull(uriPath); - return uriPath; - } - - public void analyze(Analyzer analyzer) throws AnalysisException { - if (location.isEmpty()) { - throw new AnalysisException("URI path cannot be empty."); - } - uriPath = new String(location); - //dhc to do - /* - uriPath = new Path(location); - if (!uriPath.isUriPathAbsolute()) { - throw new AnalysisException("URI path must be absolute: " + uriPath); - } - try { - FileSystem fs = uriPath.getFileSystem(FileSystemUtil.getConfiguration()); - if (!(fs instanceof DistributedFileSystem)) { - throw new AnalysisException(String.format("URI location '%s' " + - "must point to an HDFS file system.", uriPath)); - } - } catch (IOException e) { - throw new AnalysisException(e.getMessage(), e); - } - - // Fully-qualify the path - uriPath = FileSystemUtil.createFullyQualifiedPath(uriPath); - PrivilegeRequest req = new PrivilegeRequest( - new AuthorizeableURI(uriPath.toString()), privilege); - analyzer.getCatalog().checkAccess(analyzer.getUser(), req); - */ - } - - @Override - public String toString() { - // If uriPath is null (this HdfsURI has not been analyzed yet) just return the raw - // location string the caller passed in. - return uriPath == null ? location : uriPath.toString(); - } - - public String getLocation() { - return location; - } -} +package com.baidu.palo.analysis; + +import com.baidu.palo.common.AnalysisException; +import com.google.common.base.Preconditions; + +/* + * Represents an HDFS URI in a SQL statement. + */ +public class HdfsURI { + private final String location; + + // Set during analysis + // dhc to do + // private Path uriPath; + private String uriPath; + + public HdfsURI(String location) { + Preconditions.checkNotNull(location); + this.location = location.trim(); + } + + public String getPath() { + Preconditions.checkNotNull(uriPath); + return uriPath; + } + + public void analyze(Analyzer analyzer) throws AnalysisException { + if (location.isEmpty()) { + throw new AnalysisException("URI path cannot be empty."); + } + uriPath = new String(location); + //dhc to do + /* + uriPath = new Path(location); + if (!uriPath.isUriPathAbsolute()) { + throw new AnalysisException("URI path must be absolute: " + uriPath); + } + try { + FileSystem fs = uriPath.getFileSystem(FileSystemUtil.getConfiguration()); + if (!(fs instanceof DistributedFileSystem)) { + throw new AnalysisException(String.format("URI location '%s' " + + "must point to an HDFS file system.", uriPath)); + } + } catch (IOException e) { + throw new AnalysisException(e.getMessage(), e); + } + + // Fully-qualify the path + uriPath = FileSystemUtil.createFullyQualifiedPath(uriPath); + PrivilegeRequest req = new PrivilegeRequest( + new AuthorizeableURI(uriPath.toString()), privilege); + analyzer.getCatalog().checkAccess(analyzer.getUser(), req); + */ + } + + @Override + public String toString() { + // If uriPath is null (this HdfsURI has not been analyzed yet) just return the raw + // location string the caller passed in. + return uriPath == null ? location : uriPath.toString(); + } + + public String getLocation() { + return location; + } +} diff --git a/fe/src/com/baidu/palo/analysis/RedirectStatus.java b/fe/src/com/baidu/palo/analysis/RedirectStatus.java index f3fb68daa0..183bee4b4d 100644 --- a/fe/src/com/baidu/palo/analysis/RedirectStatus.java +++ b/fe/src/com/baidu/palo/analysis/RedirectStatus.java @@ -18,39 +18,39 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - -public class RedirectStatus { - private boolean isForwardToMaster; - private boolean needToWaitJournalSync; - - public RedirectStatus() { - isForwardToMaster = true; - needToWaitJournalSync = true; - } - - public RedirectStatus(boolean isForwardToMaster, boolean needToWaitJournalSync) { - this.isForwardToMaster = isForwardToMaster; - this.needToWaitJournalSync = needToWaitJournalSync; - } - - public boolean isForwardToMaster() { - return isForwardToMaster; - } - - public void setForwardToMaster(boolean isForwardToMaster) { - this.isForwardToMaster = isForwardToMaster; - } - - public boolean isNeedToWaitJournalSync() { - return needToWaitJournalSync; - } - - public void setNeedToWaitJournalSync(boolean needToWaitJournalSync) { - this.needToWaitJournalSync = needToWaitJournalSync; - } - - public static RedirectStatus FORWARD_NO_SYNC = new RedirectStatus(true, false); - public static RedirectStatus FORWARD_WITH_SYNC = new RedirectStatus(true, true); - public static RedirectStatus NO_FORWARD = new RedirectStatus(false, false); -} +package com.baidu.palo.analysis; + +public class RedirectStatus { + private boolean isForwardToMaster; + private boolean needToWaitJournalSync; + + public RedirectStatus() { + isForwardToMaster = true; + needToWaitJournalSync = true; + } + + public RedirectStatus(boolean isForwardToMaster, boolean needToWaitJournalSync) { + this.isForwardToMaster = isForwardToMaster; + this.needToWaitJournalSync = needToWaitJournalSync; + } + + public boolean isForwardToMaster() { + return isForwardToMaster; + } + + public void setForwardToMaster(boolean isForwardToMaster) { + this.isForwardToMaster = isForwardToMaster; + } + + public boolean isNeedToWaitJournalSync() { + return needToWaitJournalSync; + } + + public void setNeedToWaitJournalSync(boolean needToWaitJournalSync) { + this.needToWaitJournalSync = needToWaitJournalSync; + } + + public static RedirectStatus FORWARD_NO_SYNC = new RedirectStatus(true, false); + public static RedirectStatus FORWARD_WITH_SYNC = new RedirectStatus(true, true); + public static RedirectStatus NO_FORWARD = new RedirectStatus(false, false); +} diff --git a/fe/src/com/baidu/palo/analysis/RevokeStmt.java b/fe/src/com/baidu/palo/analysis/RevokeStmt.java index ed83d9be4b..45db890295 100644 --- a/fe/src/com/baidu/palo/analysis/RevokeStmt.java +++ b/fe/src/com/baidu/palo/analysis/RevokeStmt.java @@ -27,6 +27,7 @@ import com.baidu.palo.common.AnalysisException; import com.baidu.palo.common.ErrorCode; import com.baidu.palo.common.ErrorReport; import com.baidu.palo.common.FeNameFormat; +import com.baidu.palo.mysql.privilege.PaloAuth.PrivLevel; import com.baidu.palo.mysql.privilege.PaloPrivilege; import com.baidu.palo.mysql.privilege.PrivBitSet; import com.baidu.palo.mysql.privilege.PrivPredicate; @@ -79,7 +80,7 @@ public class RevokeStmt extends DdlStmt { if (userIdent != null) { userIdent.analyze(analyzer.getClusterName()); } else { - FeNameFormat.checkRoleName(role, false /* can not be superuser */); + FeNameFormat.checkRoleName(role, false /* can not be superuser */, "Can not revoke from role"); role = ClusterNamespace.getFullName(analyzer.getClusterName(), role); } @@ -89,6 +90,23 @@ public class RevokeStmt extends DdlStmt { throw new AnalysisException("No privileges in revoke statement."); } + // can not revoke NODE_PRIV from any user + for (PaloPrivilege paloPrivilege : privileges) { + if (paloPrivilege == PaloPrivilege.NODE_PRIV) { + throw new AnalysisException("Can not revoke NODE_PRIV from any users or roles"); + } + } + + // ADMIN_PRIV and GRANT_PRIV can only be revoked as global + if (tblPattern.getPrivLevel() != PrivLevel.GLOBAL) { + for (PaloPrivilege paloPrivilege : privileges) { + if (paloPrivilege == PaloPrivilege.ADMIN_PRIV || paloPrivilege == PaloPrivilege.GRANT_PRIV) { + throw new AnalysisException( + "Can not revoke ADMIN_PRIV or GRANT_PRIV from specified database or table. Only support from *.*"); + } + } + } + if (!Catalog.getCurrentCatalog().getAuth().checkGlobalPriv(ConnectContext.get(), PrivPredicate.GRANT)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "REVOKE"); diff --git a/fe/src/com/baidu/palo/analysis/SetPassVar.java b/fe/src/com/baidu/palo/analysis/SetPassVar.java index 953b1c7f18..ce3c904075 100644 --- a/fe/src/com/baidu/palo/analysis/SetPassVar.java +++ b/fe/src/com/baidu/palo/analysis/SetPassVar.java @@ -26,6 +26,7 @@ import com.baidu.palo.common.AnalysisException; import com.baidu.palo.common.ErrorCode; import com.baidu.palo.common.ErrorReport; import com.baidu.palo.mysql.MysqlPassword; +import com.baidu.palo.mysql.privilege.PaloAuth; import com.baidu.palo.mysql.privilege.PrivPredicate; import com.baidu.palo.qe.ConnectContext; @@ -75,7 +76,13 @@ public class SetPassVar extends SetVar { return; } - // 2. user has grant privs + // 2. No user can set password for root expect for root user itself + if (userIdent.getQualifiedUser().equals(PaloAuth.ROOT_USER) + && !ClusterNamespace.getNameFromFullName(ctx.getQualifiedUser()).equals(PaloAuth.ROOT_USER)) { + throw new AnalysisException("Can not set password for root user, except root itself"); + } + + // 3. user has grant privs if (!Catalog.getCurrentCatalog().getAuth().checkGlobalPriv(ConnectContext.get(), PrivPredicate.GRANT)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_SPECIFIC_ACCESS_DENIED_ERROR, "GRANT"); } diff --git a/fe/src/com/baidu/palo/analysis/ShowPartitionsStmt.java b/fe/src/com/baidu/palo/analysis/ShowPartitionsStmt.java index 5ee6c7e864..928911434e 100644 --- a/fe/src/com/baidu/palo/analysis/ShowPartitionsStmt.java +++ b/fe/src/com/baidu/palo/analysis/ShowPartitionsStmt.java @@ -13,8 +13,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - +package com.baidu.palo.analysis; + import com.baidu.palo.catalog.Catalog; import com.baidu.palo.catalog.Column; import com.baidu.palo.catalog.ColumnType; @@ -36,105 +36,105 @@ import com.baidu.palo.qe.ShowResultSetMetaData; import com.google.common.base.Strings; import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - -public class ShowPartitionsStmt extends ShowStmt { - private static final Logger LOG = LogManager.getLogger(ShowPartitionsStmt.class); - - private String dbName; - private String tableName; - private String partitionName; - - private ProcNodeInterface node; - - public ShowPartitionsStmt(TableName tableName, String partitionName) { - this.dbName = tableName.getDb(); - this.tableName = tableName.getTbl(); - this.partitionName = partitionName; - } - - public String getDbName() { - return dbName; - } - - public String getTableName() { - return tableName; - } - - public String getPartitionName() { - return partitionName; - } - - public ProcNodeInterface getNode() { - return node; - } - - @Override - public void analyze(Analyzer analyzer) throws AnalysisException, InternalException { +import org.apache.logging.log4j.Logger; + +public class ShowPartitionsStmt extends ShowStmt { + private static final Logger LOG = LogManager.getLogger(ShowPartitionsStmt.class); + + private String dbName; + private String tableName; + private String partitionName; + + private ProcNodeInterface node; + + public ShowPartitionsStmt(TableName tableName, String partitionName) { + this.dbName = tableName.getDb(); + this.tableName = tableName.getTbl(); + this.partitionName = partitionName; + } + + public String getDbName() { + return dbName; + } + + public String getTableName() { + return tableName; + } + + public String getPartitionName() { + return partitionName; + } + + public ProcNodeInterface getNode() { + return node; + } + + @Override + public void analyze(Analyzer analyzer) throws AnalysisException, InternalException { super.analyze(analyzer); - if (Strings.isNullOrEmpty(dbName)) { - dbName = analyzer.getDefaultDb(); - if (Strings.isNullOrEmpty(dbName)) { - throw new AnalysisException("No db name in show data statement."); - } - } else { - dbName = ClusterNamespace.getFullName(getClusterName(), dbName); - } - - // check access + if (Strings.isNullOrEmpty(dbName)) { + dbName = analyzer.getDefaultDb(); + if (Strings.isNullOrEmpty(dbName)) { + throw new AnalysisException("No db name in show data statement."); + } + } else { + dbName = ClusterNamespace.getFullName(getClusterName(), dbName); + } + + // check access if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(ConnectContext.get(), dbName, tableName, PrivPredicate.SHOW)) { ErrorReport.reportAnalysisException(ErrorCode.ERR_TABLEACCESS_DENIED_ERROR, "SHOW PARTITIONS", ConnectContext.get().getQualifiedUser(), ConnectContext.get().getRemoteIP(), tableName); - } - - Database db = Catalog.getInstance().getDb(dbName); - if (db == null) { - throw new AnalysisException("Database[" + dbName + "] does not exist"); - } - db.readLock(); - try { - Table table = db.getTable(tableName); - if (table == null || !(table instanceof OlapTable)) { - throw new AnalysisException("Table[" + tableName + "] does not exists or is not OLAP table"); - } - - // build proc path - StringBuilder stringBuilder = new StringBuilder(); - stringBuilder.append("/dbs/"); - stringBuilder.append(db.getId()); - stringBuilder.append("/").append(table.getId()); - stringBuilder.append("/").append("partitions"); - - LOG.debug("process SHOW PROC '{}';", stringBuilder.toString()); - - node = ProcService.getInstance().open(stringBuilder.toString()); - if (node == null) { - throw new AnalysisException("Failed to show partitions"); - } - } finally { - db.readUnlock(); - } - } - - @Override - public ShowResultSetMetaData getMetaData() { - ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder(); - - ProcResult result = null; - try { - result = node.fetchResult(); - } catch (AnalysisException e) { - return builder.build(); - } - - for (String col : result.getColumnNames()) { - builder.addColumn(new Column(col, ColumnType.createVarchar(30))); - } - return builder.build(); - } - -} + } + + Database db = Catalog.getInstance().getDb(dbName); + if (db == null) { + throw new AnalysisException("Database[" + dbName + "] does not exist"); + } + db.readLock(); + try { + Table table = db.getTable(tableName); + if (table == null || !(table instanceof OlapTable)) { + throw new AnalysisException("Table[" + tableName + "] does not exists or is not OLAP table"); + } + + // build proc path + StringBuilder stringBuilder = new StringBuilder(); + stringBuilder.append("/dbs/"); + stringBuilder.append(db.getId()); + stringBuilder.append("/").append(table.getId()); + stringBuilder.append("/").append("partitions"); + + LOG.debug("process SHOW PROC '{}';", stringBuilder.toString()); + + node = ProcService.getInstance().open(stringBuilder.toString()); + if (node == null) { + throw new AnalysisException("Failed to show partitions"); + } + } finally { + db.readUnlock(); + } + } + + @Override + public ShowResultSetMetaData getMetaData() { + ShowResultSetMetaData.Builder builder = ShowResultSetMetaData.builder(); + + ProcResult result = null; + try { + result = node.fetchResult(); + } catch (AnalysisException e) { + return builder.build(); + } + + for (String col : result.getColumnNames()) { + builder.addColumn(new Column(col, ColumnType.createVarchar(30))); + } + return builder.build(); + } + +} diff --git a/fe/src/com/baidu/palo/analysis/ShowWhiteListStmt.java b/fe/src/com/baidu/palo/analysis/ShowWhiteListStmt.java index 322283913f..2215098da3 100644 --- a/fe/src/com/baidu/palo/analysis/ShowWhiteListStmt.java +++ b/fe/src/com/baidu/palo/analysis/ShowWhiteListStmt.java @@ -13,25 +13,25 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - -import com.baidu.palo.catalog.Column; -import com.baidu.palo.catalog.ColumnType; -import com.baidu.palo.qe.ShowResultSetMetaData; - -public class ShowWhiteListStmt extends ShowStmt { - private static final ShowResultSetMetaData META_DATA = - ShowResultSetMetaData.builder() - .addColumn(new Column("user_name", ColumnType.createVarchar(20))) - .addColumn(new Column("white_list", ColumnType.createVarchar(1000))) - .build(); - - @Override - public void analyze(Analyzer analyzer) { - } - - @Override - public ShowResultSetMetaData getMetaData() { - return META_DATA; - } -} +package com.baidu.palo.analysis; + +import com.baidu.palo.catalog.Column; +import com.baidu.palo.catalog.ColumnType; +import com.baidu.palo.qe.ShowResultSetMetaData; + +public class ShowWhiteListStmt extends ShowStmt { + private static final ShowResultSetMetaData META_DATA = + ShowResultSetMetaData.builder() + .addColumn(new Column("user_name", ColumnType.createVarchar(20))) + .addColumn(new Column("white_list", ColumnType.createVarchar(1000))) + .build(); + + @Override + public void analyze(Analyzer analyzer) { + } + + @Override + public ShowResultSetMetaData getMetaData() { + return META_DATA; + } +} diff --git a/fe/src/com/baidu/palo/analysis/SingleRangePartitionDesc.java b/fe/src/com/baidu/palo/analysis/SingleRangePartitionDesc.java index cb57885b99..cb1d387ce0 100644 --- a/fe/src/com/baidu/palo/analysis/SingleRangePartitionDesc.java +++ b/fe/src/com/baidu/palo/analysis/SingleRangePartitionDesc.java @@ -18,204 +18,204 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - -import com.baidu.palo.catalog.DataProperty; -import com.baidu.palo.common.AnalysisException; -import com.baidu.palo.common.FeConstants; -import com.baidu.palo.common.FeNameFormat; -import com.baidu.palo.common.Pair; -import com.baidu.palo.common.io.Text; -import com.baidu.palo.common.io.Writable; -import com.baidu.palo.common.util.PrintableMap; -import com.baidu.palo.common.util.PropertyAnalyzer; - -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.base.Joiner.MapJoiner; -import com.google.common.collect.Maps; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -public class SingleRangePartitionDesc implements Writable { - private boolean isAnalyzed; - - private boolean ifNotExists; - - private String partName; - private PartitionKeyDesc partitionKeyDesc; - private Map properties; - - private DataProperty partitionDataProperty; - private Short replicationNum; - private Pair versionInfo; - - public SingleRangePartitionDesc() { - partitionKeyDesc = new PartitionKeyDesc(); - } - - public SingleRangePartitionDesc(boolean ifNotExists, String partName, PartitionKeyDesc partitionKeyDesc, - Map properties) { - this.ifNotExists = ifNotExists; - - this.isAnalyzed = false; - - this.partName = partName; - this.partitionKeyDesc = partitionKeyDesc; - this.properties = properties; - - this.partitionDataProperty = DataProperty.DEFAULT_HDD_DATA_PROPERTY; - this.replicationNum = FeConstants.default_replication_num; - } - - public boolean isSetIfNotExists() { - return ifNotExists; - } - - public String getPartitionName() { - return partName; - } - - public PartitionKeyDesc getPartitionKeyDesc() { - return partitionKeyDesc; - } - - public DataProperty getPartitionDataProperty() { - return partitionDataProperty; - } - - public short getReplicationNum() { - return replicationNum; - } - - public Pair getVersionInfo() { - return versionInfo; - } - - public Map getProperties() { - return this.properties; - } - - public void analyze(int partColNum, Map otherProperties) throws AnalysisException { - if (isAnalyzed) { - return; - } - - FeNameFormat.checkPartitionName(partName); - - if (!partitionKeyDesc.isMax()) { - if (partitionKeyDesc.getUpperValues().isEmpty() || partitionKeyDesc.getUpperValues().size() > partColNum) { - throw new AnalysisException("Invalid partition value number: " + partitionKeyDesc.toSql()); - } - } - - if (otherProperties != null) { - // use given properties - if (properties != null && !properties.isEmpty()) { - MapJoiner mapJoiner = Joiner.on(", ").withKeyValueSeparator(" = "); - throw new AnalysisException("Unknown properties: " + mapJoiner.join(properties)); - } - - this.properties = otherProperties; - } - - // analyze data property - partitionDataProperty = PropertyAnalyzer.analyzeDataProperty(properties, - DataProperty.DEFAULT_HDD_DATA_PROPERTY); - Preconditions.checkNotNull(partitionDataProperty); - - // analyze replication num - replicationNum = PropertyAnalyzer.analyzeReplicationNum(properties, FeConstants.default_replication_num); - if (replicationNum == null) { - throw new AnalysisException("Invalid replication number: " + replicationNum); - } - - // analyze version info - versionInfo = PropertyAnalyzer.analyzeVersionInfo(properties); - - if (otherProperties == null) { - // check unknown properties - if (properties != null && !properties.isEmpty()) { - MapJoiner mapJoiner = Joiner.on(", ").withKeyValueSeparator(" = "); - throw new AnalysisException("Unknown properties: " + mapJoiner.join(properties)); - } - } - - this.isAnalyzed = true; - } - - public boolean isAnalyzed() { - return this.isAnalyzed; - } - - public String toSql() { - StringBuilder sb = new StringBuilder(); - sb.append("PARTITION "); - sb.append(partName + " VALUES LESS THEN "); - sb.append(partitionKeyDesc.toSql()); - - if (properties != null && !properties.isEmpty()) { - sb.append(" ("); - sb.append(new PrintableMap(properties, "=", true, false)); - sb.append(")"); - } - - return sb.toString(); - } - - @Override - public String toString() { - return toSql(); - } - - @Override - public void write(DataOutput out) throws IOException { - out.writeBoolean(isAnalyzed); - out.writeBoolean(ifNotExists); - Text.writeString(out, partName); - - partitionKeyDesc.write(out); - - if (properties == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - int count = properties.size(); - out.writeInt(count); - for (Map.Entry enytry : properties.entrySet()) { - Text.writeString(out, enytry.getKey()); - Text.writeString(out, enytry.getValue()); - } - } - - partitionDataProperty.write(out); - out.writeShort(replicationNum); - } - - @Override - public void readFields(DataInput in) throws IOException { - isAnalyzed = in.readBoolean(); - ifNotExists = in.readBoolean(); - partName = Text.readString(in); - - partitionKeyDesc.readFields(in); - - boolean hasProp = in.readBoolean(); - if (hasProp) { - properties = Maps.newHashMap(); - int count = in.readInt(); - for (int i = 0; i < count; i++) { - String key = Text.readString(in); - String value = Text.readString(in); - properties.put(key, value); - } - } - - partitionDataProperty = DataProperty.read(in); - replicationNum = in.readShort(); - } -} +package com.baidu.palo.analysis; + +import com.baidu.palo.catalog.DataProperty; +import com.baidu.palo.common.AnalysisException; +import com.baidu.palo.common.FeConstants; +import com.baidu.palo.common.FeNameFormat; +import com.baidu.palo.common.Pair; +import com.baidu.palo.common.io.Text; +import com.baidu.palo.common.io.Writable; +import com.baidu.palo.common.util.PrintableMap; +import com.baidu.palo.common.util.PropertyAnalyzer; + +import com.google.common.base.Joiner; +import com.google.common.base.Preconditions; +import com.google.common.base.Joiner.MapJoiner; +import com.google.common.collect.Maps; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class SingleRangePartitionDesc implements Writable { + private boolean isAnalyzed; + + private boolean ifNotExists; + + private String partName; + private PartitionKeyDesc partitionKeyDesc; + private Map properties; + + private DataProperty partitionDataProperty; + private Short replicationNum; + private Pair versionInfo; + + public SingleRangePartitionDesc() { + partitionKeyDesc = new PartitionKeyDesc(); + } + + public SingleRangePartitionDesc(boolean ifNotExists, String partName, PartitionKeyDesc partitionKeyDesc, + Map properties) { + this.ifNotExists = ifNotExists; + + this.isAnalyzed = false; + + this.partName = partName; + this.partitionKeyDesc = partitionKeyDesc; + this.properties = properties; + + this.partitionDataProperty = DataProperty.DEFAULT_HDD_DATA_PROPERTY; + this.replicationNum = FeConstants.default_replication_num; + } + + public boolean isSetIfNotExists() { + return ifNotExists; + } + + public String getPartitionName() { + return partName; + } + + public PartitionKeyDesc getPartitionKeyDesc() { + return partitionKeyDesc; + } + + public DataProperty getPartitionDataProperty() { + return partitionDataProperty; + } + + public short getReplicationNum() { + return replicationNum; + } + + public Pair getVersionInfo() { + return versionInfo; + } + + public Map getProperties() { + return this.properties; + } + + public void analyze(int partColNum, Map otherProperties) throws AnalysisException { + if (isAnalyzed) { + return; + } + + FeNameFormat.checkPartitionName(partName); + + if (!partitionKeyDesc.isMax()) { + if (partitionKeyDesc.getUpperValues().isEmpty() || partitionKeyDesc.getUpperValues().size() > partColNum) { + throw new AnalysisException("Invalid partition value number: " + partitionKeyDesc.toSql()); + } + } + + if (otherProperties != null) { + // use given properties + if (properties != null && !properties.isEmpty()) { + MapJoiner mapJoiner = Joiner.on(", ").withKeyValueSeparator(" = "); + throw new AnalysisException("Unknown properties: " + mapJoiner.join(properties)); + } + + this.properties = otherProperties; + } + + // analyze data property + partitionDataProperty = PropertyAnalyzer.analyzeDataProperty(properties, + DataProperty.DEFAULT_HDD_DATA_PROPERTY); + Preconditions.checkNotNull(partitionDataProperty); + + // analyze replication num + replicationNum = PropertyAnalyzer.analyzeReplicationNum(properties, FeConstants.default_replication_num); + if (replicationNum == null) { + throw new AnalysisException("Invalid replication number: " + replicationNum); + } + + // analyze version info + versionInfo = PropertyAnalyzer.analyzeVersionInfo(properties); + + if (otherProperties == null) { + // check unknown properties + if (properties != null && !properties.isEmpty()) { + MapJoiner mapJoiner = Joiner.on(", ").withKeyValueSeparator(" = "); + throw new AnalysisException("Unknown properties: " + mapJoiner.join(properties)); + } + } + + this.isAnalyzed = true; + } + + public boolean isAnalyzed() { + return this.isAnalyzed; + } + + public String toSql() { + StringBuilder sb = new StringBuilder(); + sb.append("PARTITION "); + sb.append(partName + " VALUES LESS THEN "); + sb.append(partitionKeyDesc.toSql()); + + if (properties != null && !properties.isEmpty()) { + sb.append(" ("); + sb.append(new PrintableMap(properties, "=", true, false)); + sb.append(")"); + } + + return sb.toString(); + } + + @Override + public String toString() { + return toSql(); + } + + @Override + public void write(DataOutput out) throws IOException { + out.writeBoolean(isAnalyzed); + out.writeBoolean(ifNotExists); + Text.writeString(out, partName); + + partitionKeyDesc.write(out); + + if (properties == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + int count = properties.size(); + out.writeInt(count); + for (Map.Entry enytry : properties.entrySet()) { + Text.writeString(out, enytry.getKey()); + Text.writeString(out, enytry.getValue()); + } + } + + partitionDataProperty.write(out); + out.writeShort(replicationNum); + } + + @Override + public void readFields(DataInput in) throws IOException { + isAnalyzed = in.readBoolean(); + ifNotExists = in.readBoolean(); + partName = Text.readString(in); + + partitionKeyDesc.readFields(in); + + boolean hasProp = in.readBoolean(); + if (hasProp) { + properties = Maps.newHashMap(); + int count = in.readInt(); + for (int i = 0; i < count; i++) { + String key = Text.readString(in); + String value = Text.readString(in); + properties.put(key, value); + } + } + + partitionDataProperty = DataProperty.read(in); + replicationNum = in.readShort(); + } +} diff --git a/fe/src/com/baidu/palo/analysis/Subquery.java b/fe/src/com/baidu/palo/analysis/Subquery.java index cf34d41d88..46aab83ea7 100644 --- a/fe/src/com/baidu/palo/analysis/Subquery.java +++ b/fe/src/com/baidu/palo/analysis/Subquery.java @@ -18,170 +18,170 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - -import java.util.ArrayList; -import java.util.List; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.baidu.palo.catalog.ArrayType; -import com.baidu.palo.catalog.StructField; -import com.baidu.palo.catalog.StructType; -import com.baidu.palo.common.AnalysisException; -import com.baidu.palo.common.InternalException; - - -import com.baidu.palo.thrift.TExprNode; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; - -/** - * Class representing a subquery. A Subquery consists of a QueryStmt and has - * its own Analyzer context. - */ -public class Subquery extends Expr { - private final static Logger LOG = LoggerFactory.getLogger(Subquery.class); - - // The QueryStmt of the subquery. - protected QueryStmt stmt; - // A subquery has its own analysis context - protected Analyzer analyzer; - - public Analyzer getAnalyzer() { return analyzer; } - public QueryStmt getStatement() { return stmt; } - - @Override - public String toSql() { return "(" + stmt.toSql() + ")"; } - - /** - * C'tor that initializes a Subquery from a QueryStmt. - */ - public Subquery(QueryStmt queryStmt) { - super(); - Preconditions.checkNotNull(queryStmt); - stmt = queryStmt; - stmt.setNeedToSql(true); - } - - /** - * Copy c'tor. - */ - public Subquery(Subquery other) { - super(other); - stmt = other.stmt.clone(); - analyzer = other.analyzer; - } - - /** - * Analyzes the subquery in a child analyzer. - */ - @Override - public void analyzeImpl(Analyzer parentAnalyzer) throws AnalysisException { - if (!(stmt instanceof SelectStmt)) { - throw new AnalysisException("A subquery must contain a single select block: " + - toSql()); - } - // The subquery is analyzed with its own analyzer. - analyzer = new Analyzer(parentAnalyzer); - analyzer.setIsSubquery(); - try { - stmt.analyze(analyzer); - } catch (InternalException e) { - throw new AnalysisException(e.getMessage()); - } - // Check whether the stmt_ contains an illegal mix of un/correlated table refs. - stmt.getCorrelatedTupleIds(analyzer); - - // Set the subquery type based on the types of the exprs in the - // result list of the associated SelectStmt. - ArrayList stmtResultExprs = stmt.getResultExprs(); - if (stmtResultExprs.size() == 1) { - type = stmtResultExprs.get(0).getType(); - Preconditions.checkState(!type.isComplexType()); - } else { - type = createStructTypeFromExprList(); - } - - // If the subquery returns many rows, set its type to ArrayType. - if (!((SelectStmt)stmt).returnsSingleRow()) type = new ArrayType(type); - - // Preconditions.checkNotNull(type); - // type.analyze(); - } - - @Override - protected boolean isConstantImpl() { return false; } - - /** - * Check if the subquery's SelectStmt returns a single column of scalar type. - */ - public boolean returnsScalarColumn() { - ArrayList stmtResultExprs = stmt.getResultExprs(); - if (stmtResultExprs.size() == 1 && stmtResultExprs.get(0).getType().isScalarType()) { - return true; - } - return false; - } - - /** - * Create a StrucType from the result expr list of a subquery's SelectStmt. - */ - private StructType createStructTypeFromExprList() { - List stmtResultExprs = stmt.getResultExprs(); - ArrayList structFields = Lists.newArrayList(); - // Check if we have unique labels - List labels = stmt.getColLabels(); - boolean hasUniqueLabels = true; - if (Sets.newHashSet(labels).size() != labels.size()) hasUniqueLabels = false; - - // Construct a StructField from each expr in the select list - for (int i = 0; i < stmtResultExprs.size(); ++i) { - Expr expr = stmtResultExprs.get(i); - String fieldName = null; - // Check if the label meets the Metastore's requirements. - // TODO(zc) - // if (MetastoreShim.validateName(labels.get(i))) { - if (false) { - fieldName = labels.get(i); - // Make sure the field names are unique. - if (!hasUniqueLabels) { - fieldName = "_" + Integer.toString(i) + "_" + fieldName; - } - } else { - // Use the expr ordinal to construct a StructField. - fieldName = "_" + Integer.toString(i); - } - Preconditions.checkNotNull(fieldName); - structFields.add(new StructField(fieldName, expr.getType(), null)); - } - Preconditions.checkState(structFields.size() != 0); - return new StructType(structFields); - } - - /** - * Returns true if the toSql() of the Subqueries is identical. May return false for - * equivalent statements even due to minor syntactic differences like parenthesis. - * TODO: Switch to a less restrictive implementation. - */ - @Override - public boolean equals(Object o) { - if (!super.equals(o)) return false; - return stmt.toSql().equals(((Subquery)o).stmt.toSql()); - } - - @Override - public Subquery clone() { - Subquery ret = new Subquery(this); - LOG.debug("SUBQUERY clone old={} new={}", - System.identityHashCode(this), - System.identityHashCode(ret)); - return ret; - } - - @Override - protected void toThrift(TExprNode msg) {} -} - +package com.baidu.palo.analysis; + +import java.util.ArrayList; +import java.util.List; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.baidu.palo.catalog.ArrayType; +import com.baidu.palo.catalog.StructField; +import com.baidu.palo.catalog.StructType; +import com.baidu.palo.common.AnalysisException; +import com.baidu.palo.common.InternalException; + + +import com.baidu.palo.thrift.TExprNode; +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; + +/** + * Class representing a subquery. A Subquery consists of a QueryStmt and has + * its own Analyzer context. + */ +public class Subquery extends Expr { + private final static Logger LOG = LoggerFactory.getLogger(Subquery.class); + + // The QueryStmt of the subquery. + protected QueryStmt stmt; + // A subquery has its own analysis context + protected Analyzer analyzer; + + public Analyzer getAnalyzer() { return analyzer; } + public QueryStmt getStatement() { return stmt; } + + @Override + public String toSql() { return "(" + stmt.toSql() + ")"; } + + /** + * C'tor that initializes a Subquery from a QueryStmt. + */ + public Subquery(QueryStmt queryStmt) { + super(); + Preconditions.checkNotNull(queryStmt); + stmt = queryStmt; + stmt.setNeedToSql(true); + } + + /** + * Copy c'tor. + */ + public Subquery(Subquery other) { + super(other); + stmt = other.stmt.clone(); + analyzer = other.analyzer; + } + + /** + * Analyzes the subquery in a child analyzer. + */ + @Override + public void analyzeImpl(Analyzer parentAnalyzer) throws AnalysisException { + if (!(stmt instanceof SelectStmt)) { + throw new AnalysisException("A subquery must contain a single select block: " + + toSql()); + } + // The subquery is analyzed with its own analyzer. + analyzer = new Analyzer(parentAnalyzer); + analyzer.setIsSubquery(); + try { + stmt.analyze(analyzer); + } catch (InternalException e) { + throw new AnalysisException(e.getMessage()); + } + // Check whether the stmt_ contains an illegal mix of un/correlated table refs. + stmt.getCorrelatedTupleIds(analyzer); + + // Set the subquery type based on the types of the exprs in the + // result list of the associated SelectStmt. + ArrayList stmtResultExprs = stmt.getResultExprs(); + if (stmtResultExprs.size() == 1) { + type = stmtResultExprs.get(0).getType(); + Preconditions.checkState(!type.isComplexType()); + } else { + type = createStructTypeFromExprList(); + } + + // If the subquery returns many rows, set its type to ArrayType. + if (!((SelectStmt)stmt).returnsSingleRow()) type = new ArrayType(type); + + // Preconditions.checkNotNull(type); + // type.analyze(); + } + + @Override + protected boolean isConstantImpl() { return false; } + + /** + * Check if the subquery's SelectStmt returns a single column of scalar type. + */ + public boolean returnsScalarColumn() { + ArrayList stmtResultExprs = stmt.getResultExprs(); + if (stmtResultExprs.size() == 1 && stmtResultExprs.get(0).getType().isScalarType()) { + return true; + } + return false; + } + + /** + * Create a StrucType from the result expr list of a subquery's SelectStmt. + */ + private StructType createStructTypeFromExprList() { + List stmtResultExprs = stmt.getResultExprs(); + ArrayList structFields = Lists.newArrayList(); + // Check if we have unique labels + List labels = stmt.getColLabels(); + boolean hasUniqueLabels = true; + if (Sets.newHashSet(labels).size() != labels.size()) hasUniqueLabels = false; + + // Construct a StructField from each expr in the select list + for (int i = 0; i < stmtResultExprs.size(); ++i) { + Expr expr = stmtResultExprs.get(i); + String fieldName = null; + // Check if the label meets the Metastore's requirements. + // TODO(zc) + // if (MetastoreShim.validateName(labels.get(i))) { + if (false) { + fieldName = labels.get(i); + // Make sure the field names are unique. + if (!hasUniqueLabels) { + fieldName = "_" + Integer.toString(i) + "_" + fieldName; + } + } else { + // Use the expr ordinal to construct a StructField. + fieldName = "_" + Integer.toString(i); + } + Preconditions.checkNotNull(fieldName); + structFields.add(new StructField(fieldName, expr.getType(), null)); + } + Preconditions.checkState(structFields.size() != 0); + return new StructType(structFields); + } + + /** + * Returns true if the toSql() of the Subqueries is identical. May return false for + * equivalent statements even due to minor syntactic differences like parenthesis. + * TODO: Switch to a less restrictive implementation. + */ + @Override + public boolean equals(Object o) { + if (!super.equals(o)) return false; + return stmt.toSql().equals(((Subquery)o).stmt.toSql()); + } + + @Override + public Subquery clone() { + Subquery ret = new Subquery(this); + LOG.debug("SUBQUERY clone old={} new={}", + System.identityHashCode(this), + System.identityHashCode(ret)); + return ret; + } + + @Override + protected void toThrift(TExprNode msg) {} +} + diff --git a/fe/src/com/baidu/palo/analysis/SyncStmt.java b/fe/src/com/baidu/palo/analysis/SyncStmt.java index 2650b6cda1..fd64c879fa 100644 --- a/fe/src/com/baidu/palo/analysis/SyncStmt.java +++ b/fe/src/com/baidu/palo/analysis/SyncStmt.java @@ -18,21 +18,21 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - -import com.baidu.palo.common.AnalysisException; -import com.baidu.palo.common.InternalException; - -public class SyncStmt extends DdlStmt { - @Override - public void analyze(Analyzer analyzer) throws AnalysisException, InternalException { -// if (analyzer.getCatalog().isMaster()) { -// throw new AnalysisException("No need to Sync, for you are master"); -// } - } - - @Override - public RedirectStatus getRedirectStatus() { - return RedirectStatus.FORWARD_WITH_SYNC; - } +package com.baidu.palo.analysis; + +import com.baidu.palo.common.AnalysisException; +import com.baidu.palo.common.InternalException; + +public class SyncStmt extends DdlStmt { + @Override + public void analyze(Analyzer analyzer) throws AnalysisException, InternalException { +// if (analyzer.getCatalog().isMaster()) { +// throw new AnalysisException("No need to Sync, for you are master"); +// } + } + + @Override + public RedirectStatus getRedirectStatus() { + return RedirectStatus.FORWARD_WITH_SYNC; + } } \ No newline at end of file diff --git a/fe/src/com/baidu/palo/catalog/AccessPrivilege.java b/fe/src/com/baidu/palo/catalog/AccessPrivilege.java index 0ccff2ab16..29bc88fa10 100644 --- a/fe/src/com/baidu/palo/catalog/AccessPrivilege.java +++ b/fe/src/com/baidu/palo/catalog/AccessPrivilege.java @@ -39,7 +39,8 @@ public enum AccessPrivilege { LOAD_PRIV(7, "Privilege for loading data into tables"), ALTER_PRIV(8, "Privilege for alter database or table"), CREATE_PRIV(9, "Privilege for createing database or table"), - DROP_PRIV(10, "Privilege for dropping database or table"); + DROP_PRIV(10, "Privilege for dropping database or table"), + ADMIN_PRIV(11, "All privileges except NODE_PRIV"); private int flag; private String desc; @@ -50,7 +51,7 @@ public enum AccessPrivilege { } public PrivBitSet toPaloPrivilege() { - Preconditions.checkState(flag > 0 && flag < 11); + Preconditions.checkState(flag > 0 && flag < 12); switch (flag) { case 1: return PrivBitSet.of(PaloPrivilege.SELECT_PRIV); @@ -73,19 +74,13 @@ public enum AccessPrivilege { return PrivBitSet.of(PaloPrivilege.CREATE_PRIV); case 10: return PrivBitSet.of(PaloPrivilege.DROP_PRIV); + case 11: + return PrivBitSet.of(PaloPrivilege.ADMIN_PRIV); default: return null; } } - public static boolean contains(AccessPrivilege p1, AccessPrivilege p2) { - return p1.flag >= p2.flag; - } - - public boolean contains(AccessPrivilege priv) { - return contains(this, priv); - } - public static AccessPrivilege fromName(String privStr) { try { return AccessPrivilege.valueOf(privStr.toUpperCase()); diff --git a/fe/src/com/baidu/palo/catalog/Catalog.java b/fe/src/com/baidu/palo/catalog/Catalog.java index 7ea7b5ff4f..561899639d 100644 --- a/fe/src/com/baidu/palo/catalog/Catalog.java +++ b/fe/src/com/baidu/palo/catalog/Catalog.java @@ -410,7 +410,6 @@ public class Catalog { this.auth = new PaloAuth(); this.domainResolver = new DomainResolver(auth); - this.domainResolver.start(); } public static void destroyCheckpoint() { @@ -979,6 +978,7 @@ public class Catalog { deployManager.start(); } + domainResolver.start(); MetricRepo.init(); } @@ -1014,6 +1014,7 @@ public class Catalog { } formerFeType = feType; + domainResolver.start(); MetricRepo.init(); } @@ -1288,7 +1289,7 @@ public class Catalog { // This job must be FINISHED or CANCELLED if ((currentTimeMs - job.getCreateTimeMs()) / 1000 <= Config.label_keep_max_second || (job.getState() != JobState.FINISHED && job.getState() != JobState.CANCELLED)) { - load.unprotectAddLoadJob(job); + load.unprotectAddLoadJob(job, true /* replay */); } } } diff --git a/fe/src/com/baidu/palo/catalog/ColumnType.java b/fe/src/com/baidu/palo/catalog/ColumnType.java index 17ceb751d7..ed3ae17a88 100755 --- a/fe/src/com/baidu/palo/catalog/ColumnType.java +++ b/fe/src/com/baidu/palo/catalog/ColumnType.java @@ -18,8 +18,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.catalog; - +package com.baidu.palo.catalog; + import com.baidu.palo.common.AnalysisException; import com.baidu.palo.common.FeMetaVersion; import com.baidu.palo.common.io.Text; @@ -28,175 +28,175 @@ import com.baidu.palo.thrift.TColumnType; import java.io.DataInput; import java.io.DataOutput; -import java.io.IOException; - -/** - * 这个是对Column类型的一个å°è£…,对于大多数类型,primitive type足够了,这里有两个例外需è¦ç”¨åˆ°è¿™ä¸ªä¿¡æ¯ - * 1. 对于decimal,characterè¿™ç§æœ‰ä¸€äº›é™„加信æ¯çš„ - * 2. 如果在未æ¥éœ€è¦å¢žåŠ åµŒå¥—ç±»åž‹ï¼Œé‚£ä¹ˆè¿™ä¸ªColumnType就是必须的了 - */ -public class ColumnType implements Writable { - private static final int VAR_CHAR_UPPER_LIMIT = 65533; - private static Boolean[][] schemaChangeMatrix; - - static { - schemaChangeMatrix = new Boolean[PrimitiveType.BINARY.ordinal() + 1][PrimitiveType.BINARY.ordinal() + 1]; - - for (int i = 0; i < schemaChangeMatrix.length; i++) { - for (int j = 0; j < schemaChangeMatrix[i].length; j++) { - if (i == j) { - schemaChangeMatrix[i][j] = true; - } else { - schemaChangeMatrix[i][j] = false; - } - } - } - - schemaChangeMatrix[PrimitiveType.TINYINT.ordinal()][PrimitiveType.SMALLINT.ordinal()] = true; - schemaChangeMatrix[PrimitiveType.TINYINT.ordinal()][PrimitiveType.INT.ordinal()] = true; - schemaChangeMatrix[PrimitiveType.TINYINT.ordinal()][PrimitiveType.BIGINT.ordinal()] = true; - schemaChangeMatrix[PrimitiveType.TINYINT.ordinal()][PrimitiveType.LARGEINT.ordinal()] = true; - schemaChangeMatrix[PrimitiveType.TINYINT.ordinal()][PrimitiveType.DOUBLE.ordinal()] = true; - - schemaChangeMatrix[PrimitiveType.SMALLINT.ordinal()][PrimitiveType.INT.ordinal()] = true; - schemaChangeMatrix[PrimitiveType.SMALLINT.ordinal()][PrimitiveType.BIGINT.ordinal()] = true; - schemaChangeMatrix[PrimitiveType.SMALLINT.ordinal()][PrimitiveType.LARGEINT.ordinal()] = true; - schemaChangeMatrix[PrimitiveType.SMALLINT.ordinal()][PrimitiveType.DOUBLE.ordinal()] = true; - - schemaChangeMatrix[PrimitiveType.INT.ordinal()][PrimitiveType.BIGINT.ordinal()] = true; - schemaChangeMatrix[PrimitiveType.INT.ordinal()][PrimitiveType.LARGEINT.ordinal()] = true; - schemaChangeMatrix[PrimitiveType.INT.ordinal()][PrimitiveType.DOUBLE.ordinal()] = true; - - schemaChangeMatrix[PrimitiveType.BIGINT.ordinal()][PrimitiveType.LARGEINT.ordinal()] = true; - schemaChangeMatrix[PrimitiveType.BIGINT.ordinal()][PrimitiveType.DOUBLE.ordinal()] = true; - - schemaChangeMatrix[PrimitiveType.CHAR.ordinal()][PrimitiveType.VARCHAR.ordinal()] = true; - schemaChangeMatrix[PrimitiveType.CHAR.ordinal()][PrimitiveType.CHAR.ordinal()] = true; - - schemaChangeMatrix[PrimitiveType.DATE.ordinal()][PrimitiveType.DATETIME.ordinal()] = true; - } - - private PrimitiveType type; - - // Unused if type is always the same length. - private int len; - - // Used for decimal(precision, scale) - // precision: maximum number of digits - // scale: the number of digits to the right of the decimal point - private int precision; - private int scale; - // used for limiting varchar size - private boolean varcharLimit = true; - - public ColumnType() { - this.type = PrimitiveType.NULL_TYPE; - } - - public ColumnType(PrimitiveType type) { - this(type, -1, -1, -1); - } - - public ColumnType(PrimitiveType type, int len, int precision, int scale) { - this.type = type; - this.len = len; - this.precision = precision; - this.scale = scale; - if (this.type == null) { - this.type = PrimitiveType.NULL_TYPE; - } - } - - // This is used for built-in function to create intermediate type - public static ColumnType createInterType(PrimitiveType type) { - switch (type) { - case BOOLEAN: - case TINYINT: - case SMALLINT: - case INT: - case BIGINT: - case LARGEINT: - case FLOAT: - case DOUBLE: - case DATE: - case DATETIME: - return createType(type); - case DECIMAL: - return createDecimal(27, 9); - case CHAR: - case VARCHAR: - return createVarchar(64); - case HLL: - return createHll(); - default: - return null; - } - } - - public static ColumnType createType(PrimitiveType type) { - return new ColumnType(type); - } - - public static ColumnType createVarchar(int len) { - ColumnType type = new ColumnType(PrimitiveType.VARCHAR); - type.len = len; - return type; - } - - public static ColumnType createHll() { - ColumnType type = new ColumnType(PrimitiveType.HLL); - type.len = ScalarType.MAX_HLL_LENGTH; - return type; - } - - // Create varchar type - public static ColumnType createChar(int len) { - ColumnType type = new ColumnType(PrimitiveType.CHAR); - type.len = len; - return type; - } - - public static ColumnType createDecimal(int precision, int scale) { - ColumnType type = new ColumnType(PrimitiveType.DECIMAL); - type.precision = precision; - type.scale = scale; - return type; - } - - public PrimitiveType getType() { - return type; - } - - public int getLen() { - return len; - } - - public void setLen(int len) { - this.len = len; - } - - public void setPrecision(int precision) { - this.precision = precision; - } - - public void setVarcharLimit(boolean value) { - this.varcharLimit = value; - } - - public int getPrecision() { - return precision; - } - - public int getScale() { - return scale; - } - - public void setScale(int scale) { - this.scale = scale; - } - - public boolean isString() { - return type == PrimitiveType.CHAR || type == PrimitiveType.VARCHAR || type == PrimitiveType.HLL; +import java.io.IOException; + +/** + * 这个是对Column类型的一个å°è£…,对于大多数类型,primitive type足够了,这里有两个例外需è¦ç”¨åˆ°è¿™ä¸ªä¿¡æ¯ + * 1. 对于decimal,characterè¿™ç§æœ‰ä¸€äº›é™„加信æ¯çš„ + * 2. 如果在未æ¥éœ€è¦å¢žåŠ åµŒå¥—ç±»åž‹ï¼Œé‚£ä¹ˆè¿™ä¸ªColumnType就是必须的了 + */ +public class ColumnType implements Writable { + private static final int VAR_CHAR_UPPER_LIMIT = 65533; + private static Boolean[][] schemaChangeMatrix; + + static { + schemaChangeMatrix = new Boolean[PrimitiveType.BINARY.ordinal() + 1][PrimitiveType.BINARY.ordinal() + 1]; + + for (int i = 0; i < schemaChangeMatrix.length; i++) { + for (int j = 0; j < schemaChangeMatrix[i].length; j++) { + if (i == j) { + schemaChangeMatrix[i][j] = true; + } else { + schemaChangeMatrix[i][j] = false; + } + } + } + + schemaChangeMatrix[PrimitiveType.TINYINT.ordinal()][PrimitiveType.SMALLINT.ordinal()] = true; + schemaChangeMatrix[PrimitiveType.TINYINT.ordinal()][PrimitiveType.INT.ordinal()] = true; + schemaChangeMatrix[PrimitiveType.TINYINT.ordinal()][PrimitiveType.BIGINT.ordinal()] = true; + schemaChangeMatrix[PrimitiveType.TINYINT.ordinal()][PrimitiveType.LARGEINT.ordinal()] = true; + schemaChangeMatrix[PrimitiveType.TINYINT.ordinal()][PrimitiveType.DOUBLE.ordinal()] = true; + + schemaChangeMatrix[PrimitiveType.SMALLINT.ordinal()][PrimitiveType.INT.ordinal()] = true; + schemaChangeMatrix[PrimitiveType.SMALLINT.ordinal()][PrimitiveType.BIGINT.ordinal()] = true; + schemaChangeMatrix[PrimitiveType.SMALLINT.ordinal()][PrimitiveType.LARGEINT.ordinal()] = true; + schemaChangeMatrix[PrimitiveType.SMALLINT.ordinal()][PrimitiveType.DOUBLE.ordinal()] = true; + + schemaChangeMatrix[PrimitiveType.INT.ordinal()][PrimitiveType.BIGINT.ordinal()] = true; + schemaChangeMatrix[PrimitiveType.INT.ordinal()][PrimitiveType.LARGEINT.ordinal()] = true; + schemaChangeMatrix[PrimitiveType.INT.ordinal()][PrimitiveType.DOUBLE.ordinal()] = true; + + schemaChangeMatrix[PrimitiveType.BIGINT.ordinal()][PrimitiveType.LARGEINT.ordinal()] = true; + schemaChangeMatrix[PrimitiveType.BIGINT.ordinal()][PrimitiveType.DOUBLE.ordinal()] = true; + + schemaChangeMatrix[PrimitiveType.CHAR.ordinal()][PrimitiveType.VARCHAR.ordinal()] = true; + schemaChangeMatrix[PrimitiveType.CHAR.ordinal()][PrimitiveType.CHAR.ordinal()] = true; + + schemaChangeMatrix[PrimitiveType.DATE.ordinal()][PrimitiveType.DATETIME.ordinal()] = true; + } + + private PrimitiveType type; + + // Unused if type is always the same length. + private int len; + + // Used for decimal(precision, scale) + // precision: maximum number of digits + // scale: the number of digits to the right of the decimal point + private int precision; + private int scale; + // used for limiting varchar size + private boolean varcharLimit = true; + + public ColumnType() { + this.type = PrimitiveType.NULL_TYPE; + } + + public ColumnType(PrimitiveType type) { + this(type, -1, -1, -1); + } + + public ColumnType(PrimitiveType type, int len, int precision, int scale) { + this.type = type; + this.len = len; + this.precision = precision; + this.scale = scale; + if (this.type == null) { + this.type = PrimitiveType.NULL_TYPE; + } + } + + // This is used for built-in function to create intermediate type + public static ColumnType createInterType(PrimitiveType type) { + switch (type) { + case BOOLEAN: + case TINYINT: + case SMALLINT: + case INT: + case BIGINT: + case LARGEINT: + case FLOAT: + case DOUBLE: + case DATE: + case DATETIME: + return createType(type); + case DECIMAL: + return createDecimal(27, 9); + case CHAR: + case VARCHAR: + return createVarchar(64); + case HLL: + return createHll(); + default: + return null; + } + } + + public static ColumnType createType(PrimitiveType type) { + return new ColumnType(type); + } + + public static ColumnType createVarchar(int len) { + ColumnType type = new ColumnType(PrimitiveType.VARCHAR); + type.len = len; + return type; + } + + public static ColumnType createHll() { + ColumnType type = new ColumnType(PrimitiveType.HLL); + type.len = ScalarType.MAX_HLL_LENGTH; + return type; + } + + // Create varchar type + public static ColumnType createChar(int len) { + ColumnType type = new ColumnType(PrimitiveType.CHAR); + type.len = len; + return type; + } + + public static ColumnType createDecimal(int precision, int scale) { + ColumnType type = new ColumnType(PrimitiveType.DECIMAL); + type.precision = precision; + type.scale = scale; + return type; + } + + public PrimitiveType getType() { + return type; + } + + public int getLen() { + return len; + } + + public void setLen(int len) { + this.len = len; + } + + public void setPrecision(int precision) { + this.precision = precision; + } + + public void setVarcharLimit(boolean value) { + this.varcharLimit = value; + } + + public int getPrecision() { + return precision; + } + + public int getScale() { + return scale; + } + + public void setScale(int scale) { + this.scale = scale; + } + + public boolean isString() { + return type == PrimitiveType.CHAR || type == PrimitiveType.VARCHAR || type == PrimitiveType.HLL; } public int getMemlayoutBytes() { @@ -231,165 +231,165 @@ public class ColumnType implements Writable { default: return 0; } - } - - public void analyze() throws AnalysisException { - if (type == PrimitiveType.INVALID_TYPE) { - throw new AnalysisException("Invalid type."); - } - - // check parameter valid - switch (type) { - case CHAR: - if (len <= 0 || len > 255) { - throw new AnalysisException("Char size must between 1~255." - + " Size was set to: " + len + "."); - } - break; - case VARCHAR: - if (varcharLimit) { - if (len <= 0 || len > VAR_CHAR_UPPER_LIMIT) { - throw new AnalysisException("when engine=olap, varchar size must between 1~65533." - + " Size was set to: " + len + "."); - } - } else { - if (len <= 0) { - throw new AnalysisException("When engine=mysql, varchar size must be great than 1."); - } - } - break; - case HLL: - if (len <= 0 || len > 65533) { - throw new AnalysisException("Hll size must between 1~65533." - + " Size was set to: " + len + "."); - } - break; - case DECIMAL: - // precision: [1, 27] - if (precision < 1 || precision > 27) { - throw new AnalysisException("Precision of decimal must between 1 and 27." - + " Precision was set to: " + precision + "."); - } - // scale: [0, 9] - if (scale < 0 || scale > 9) { - throw new AnalysisException("Scale of decimal must between 0 and 9." - + " Scale was set to: " + scale + "."); - } - // scale < precision - if (scale >= precision) { - throw new AnalysisException("Scale of decimal must be smaller than precision." - + " Scale is " + scale + " and precision is " + precision); - } - break; - default: - // do nothing - } - } - - - public boolean isSchemaChangeAllowed(ColumnType other) { - return schemaChangeMatrix[type.ordinal()][other.type.ordinal()]; - } - - public String toSql() { - StringBuilder stringBuilder = new StringBuilder(); - switch (type) { - case CHAR: - stringBuilder.append("char").append("(").append(len).append(")"); - break; - case VARCHAR: - stringBuilder.append("varchar").append("(").append(len).append(")"); - break; - case DECIMAL: - stringBuilder.append("decimal").append("(").append(precision).append(", ").append(scale).append(")"); - break; - case BOOLEAN: - stringBuilder.append("tinyint(1)"); - break; - case TINYINT: - stringBuilder.append("tinyint(4)"); - break; - case SMALLINT: - stringBuilder.append("smallint(6)"); - break; - case INT: - stringBuilder.append("int(11)"); - break; - case BIGINT: - stringBuilder.append("bigint(20)"); - break; - case LARGEINT: - stringBuilder.append("largeint(40)"); - break; - case FLOAT: - case DOUBLE: - case DATE: - case DATETIME: - case HLL: - stringBuilder.append(type.toString().toLowerCase()); - break; - default: - stringBuilder.append("unknown"); - break; - } - return stringBuilder.toString(); - } - - public TColumnType toThrift() { - TColumnType thrift = new TColumnType(); - thrift.type = type.toThrift(); - if (type == PrimitiveType.CHAR || type == PrimitiveType.VARCHAR || type == PrimitiveType.HLL) { - thrift.setLen(len); - } - if (type == PrimitiveType.DECIMAL) { - thrift.setPrecision(precision); - thrift.setScale(scale); - } - return thrift; - } - - @Override - public boolean equals(Object o) { - if (!(o instanceof ColumnType)) { - return false; - } - ColumnType other = (ColumnType) o; - if (type != other.type) { - return false; - } - if (type == PrimitiveType.DECIMAL) { - return scale == other.scale && precision == other.precision; - } else if (type == PrimitiveType.CHAR) { - return len == other.len; - } else { - return true; - } - } - - @Override - public String toString() { - return toSql(); - } - - @Override - public void write(DataOutput out) throws IOException { - Text.writeString(out, type.name()); - out.writeInt(scale); - out.writeInt(precision); - out.writeInt(len); - out.writeBoolean(varcharLimit); - } - - @Override - public void readFields(DataInput in) throws IOException { - type = PrimitiveType.valueOf(Text.readString(in)); - scale = in.readInt(); - precision = in.readInt(); - len = in.readInt(); - if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_22) { - varcharLimit = in.readBoolean(); - } - } - -} - + } + + public void analyze() throws AnalysisException { + if (type == PrimitiveType.INVALID_TYPE) { + throw new AnalysisException("Invalid type."); + } + + // check parameter valid + switch (type) { + case CHAR: + if (len <= 0 || len > 255) { + throw new AnalysisException("Char size must between 1~255." + + " Size was set to: " + len + "."); + } + break; + case VARCHAR: + if (varcharLimit) { + if (len <= 0 || len > VAR_CHAR_UPPER_LIMIT) { + throw new AnalysisException("when engine=olap, varchar size must between 1~65533." + + " Size was set to: " + len + "."); + } + } else { + if (len <= 0) { + throw new AnalysisException("When engine=mysql, varchar size must be great than 1."); + } + } + break; + case HLL: + if (len <= 0 || len > 65533) { + throw new AnalysisException("Hll size must between 1~65533." + + " Size was set to: " + len + "."); + } + break; + case DECIMAL: + // precision: [1, 27] + if (precision < 1 || precision > 27) { + throw new AnalysisException("Precision of decimal must between 1 and 27." + + " Precision was set to: " + precision + "."); + } + // scale: [0, 9] + if (scale < 0 || scale > 9) { + throw new AnalysisException("Scale of decimal must between 0 and 9." + + " Scale was set to: " + scale + "."); + } + // scale < precision + if (scale >= precision) { + throw new AnalysisException("Scale of decimal must be smaller than precision." + + " Scale is " + scale + " and precision is " + precision); + } + break; + default: + // do nothing + } + } + + + public boolean isSchemaChangeAllowed(ColumnType other) { + return schemaChangeMatrix[type.ordinal()][other.type.ordinal()]; + } + + public String toSql() { + StringBuilder stringBuilder = new StringBuilder(); + switch (type) { + case CHAR: + stringBuilder.append("char").append("(").append(len).append(")"); + break; + case VARCHAR: + stringBuilder.append("varchar").append("(").append(len).append(")"); + break; + case DECIMAL: + stringBuilder.append("decimal").append("(").append(precision).append(", ").append(scale).append(")"); + break; + case BOOLEAN: + stringBuilder.append("tinyint(1)"); + break; + case TINYINT: + stringBuilder.append("tinyint(4)"); + break; + case SMALLINT: + stringBuilder.append("smallint(6)"); + break; + case INT: + stringBuilder.append("int(11)"); + break; + case BIGINT: + stringBuilder.append("bigint(20)"); + break; + case LARGEINT: + stringBuilder.append("largeint(40)"); + break; + case FLOAT: + case DOUBLE: + case DATE: + case DATETIME: + case HLL: + stringBuilder.append(type.toString().toLowerCase()); + break; + default: + stringBuilder.append("unknown"); + break; + } + return stringBuilder.toString(); + } + + public TColumnType toThrift() { + TColumnType thrift = new TColumnType(); + thrift.type = type.toThrift(); + if (type == PrimitiveType.CHAR || type == PrimitiveType.VARCHAR || type == PrimitiveType.HLL) { + thrift.setLen(len); + } + if (type == PrimitiveType.DECIMAL) { + thrift.setPrecision(precision); + thrift.setScale(scale); + } + return thrift; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof ColumnType)) { + return false; + } + ColumnType other = (ColumnType) o; + if (type != other.type) { + return false; + } + if (type == PrimitiveType.DECIMAL) { + return scale == other.scale && precision == other.precision; + } else if (type == PrimitiveType.CHAR) { + return len == other.len; + } else { + return true; + } + } + + @Override + public String toString() { + return toSql(); + } + + @Override + public void write(DataOutput out) throws IOException { + Text.writeString(out, type.name()); + out.writeInt(scale); + out.writeInt(precision); + out.writeInt(len); + out.writeBoolean(varcharLimit); + } + + @Override + public void readFields(DataInput in) throws IOException { + type = PrimitiveType.valueOf(Text.readString(in)); + scale = in.readInt(); + precision = in.readInt(); + len = in.readInt(); + if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_22) { + varcharLimit = in.readBoolean(); + } + } + +} + diff --git a/fe/src/com/baidu/palo/catalog/DistributionInfo.java b/fe/src/com/baidu/palo/catalog/DistributionInfo.java index ede6252aa8..a46435d1db 100644 --- a/fe/src/com/baidu/palo/catalog/DistributionInfo.java +++ b/fe/src/com/baidu/palo/catalog/DistributionInfo.java @@ -18,97 +18,97 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.catalog; - -import com.baidu.palo.analysis.DistributionDesc; -import com.baidu.palo.analysis.Expr; -import com.baidu.palo.common.io.Text; -import com.baidu.palo.common.io.Writable; - -import com.google.common.collect.Lists; - -import org.apache.commons.lang.NotImplementedException; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.util.List; -import java.util.Map; - -public abstract class DistributionInfo implements Writable { - - public enum DistributionInfoType { - HASH, - RANDOM - } - - protected DistributionInfoType type; - - public DistributionInfo() { - // for persist - } - - public DistributionInfo(DistributionInfoType type) { - this.type = type; - } - - public DistributionInfoType getType() { - return type; - } - - public int getBucketNum() { - // should override in sub class - throw new NotImplementedException("not implemented"); - } - - public DistributionDesc toDistributionDesc() { - throw new NotImplementedException(); - } - - @Override - public void write(DataOutput out) throws IOException { - Text.writeString(out, type.name()); - } - - @Override - public void readFields(DataInput in) throws IOException { - type = DistributionInfoType.valueOf(Text.readString(in)); - } - - public String toSql() { - return ""; - } - - public boolean eqauls(DistributionInfo info) { - return false; - } - - public static List toDistExpr(OlapTable tbl, DistributionInfo distInfo, Map exprByCol) { - List distExprs = Lists.newArrayList(); - if (distInfo instanceof RandomDistributionInfo) { - for (Column col : tbl.getBaseSchema()) { - if (col.isKey()) { - Expr distExpr = exprByCol.get(col.getName()); - // used to compute hash - if (col.getDataType() == PrimitiveType.CHAR) { - distExpr.setType(Type.CHAR); - } - distExprs.add(distExpr); - } else { - break; - } - } - } else if (distInfo instanceof HashDistributionInfo) { - HashDistributionInfo hashDistInfo = (HashDistributionInfo) distInfo; - for (Column col : hashDistInfo.getDistributionColumns()) { - Expr distExpr = exprByCol.get(col.getName()); - // used to compute hash - if (col.getDataType() == PrimitiveType.CHAR) { - distExpr.setType(Type.CHAR); - } - distExprs.add(distExpr); - } - } - return distExprs; - } -} +package com.baidu.palo.catalog; + +import com.baidu.palo.analysis.DistributionDesc; +import com.baidu.palo.analysis.Expr; +import com.baidu.palo.common.io.Text; +import com.baidu.palo.common.io.Writable; + +import com.google.common.collect.Lists; + +import org.apache.commons.lang.NotImplementedException; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.List; +import java.util.Map; + +public abstract class DistributionInfo implements Writable { + + public enum DistributionInfoType { + HASH, + RANDOM + } + + protected DistributionInfoType type; + + public DistributionInfo() { + // for persist + } + + public DistributionInfo(DistributionInfoType type) { + this.type = type; + } + + public DistributionInfoType getType() { + return type; + } + + public int getBucketNum() { + // should override in sub class + throw new NotImplementedException("not implemented"); + } + + public DistributionDesc toDistributionDesc() { + throw new NotImplementedException(); + } + + @Override + public void write(DataOutput out) throws IOException { + Text.writeString(out, type.name()); + } + + @Override + public void readFields(DataInput in) throws IOException { + type = DistributionInfoType.valueOf(Text.readString(in)); + } + + public String toSql() { + return ""; + } + + public boolean eqauls(DistributionInfo info) { + return false; + } + + public static List toDistExpr(OlapTable tbl, DistributionInfo distInfo, Map exprByCol) { + List distExprs = Lists.newArrayList(); + if (distInfo instanceof RandomDistributionInfo) { + for (Column col : tbl.getBaseSchema()) { + if (col.isKey()) { + Expr distExpr = exprByCol.get(col.getName()); + // used to compute hash + if (col.getDataType() == PrimitiveType.CHAR) { + distExpr.setType(Type.CHAR); + } + distExprs.add(distExpr); + } else { + break; + } + } + } else if (distInfo instanceof HashDistributionInfo) { + HashDistributionInfo hashDistInfo = (HashDistributionInfo) distInfo; + for (Column col : hashDistInfo.getDistributionColumns()) { + Expr distExpr = exprByCol.get(col.getName()); + // used to compute hash + if (col.getDataType() == PrimitiveType.CHAR) { + distExpr.setType(Type.CHAR); + } + distExprs.add(distExpr); + } + } + return distExprs; + } +} diff --git a/fe/src/com/baidu/palo/catalog/DomainResolver.java b/fe/src/com/baidu/palo/catalog/DomainResolver.java index 9d607d9e46..050a4acf0a 100644 --- a/fe/src/com/baidu/palo/catalog/DomainResolver.java +++ b/fe/src/com/baidu/palo/catalog/DomainResolver.java @@ -33,6 +33,7 @@ import java.net.InetAddress; import java.net.UnknownHostException; import java.util.Map; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; public class DomainResolver extends Daemon { private static final Logger LOG = LogManager.getLogger(DomainResolver.class); @@ -40,11 +41,20 @@ public class DomainResolver extends Daemon { private PaloAuth auth; + private AtomicBoolean isStart = new AtomicBoolean(false); + public DomainResolver(PaloAuth auth) { super("domain resolver", 10 * 1000); this.auth = auth; } + @Override + public synchronized void start() { + if (isStart.compareAndSet(false, true)) { + super.start(); + } + } + @Override public void runOneCycle() { // qualified user name -> domain name @@ -116,7 +126,7 @@ public class DomainResolver extends Daemon { public boolean resolveWithBNS(String domainName, Set resolvedIPs) { File binaryFile = new File(BNS_RESOLVER_TOOLS_PATH); if (!binaryFile.exists()) { - LOG.warn("{} does not exist", BNS_RESOLVER_TOOLS_PATH); + LOG.info("{} does not exist", BNS_RESOLVER_TOOLS_PATH); return false; } diff --git a/fe/src/com/baidu/palo/catalog/Function.java b/fe/src/com/baidu/palo/catalog/Function.java index b24daf1fe1..932c066a82 100755 --- a/fe/src/com/baidu/palo/catalog/Function.java +++ b/fe/src/com/baidu/palo/catalog/Function.java @@ -18,8 +18,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.catalog; - +package com.baidu.palo.catalog; + import com.baidu.palo.analysis.FunctionName; import com.baidu.palo.analysis.HdfsURI; import com.baidu.palo.thrift.TFunction; @@ -31,415 +31,415 @@ import com.google.common.base.Preconditions; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import java.util.List; - - -/** - * Base class for all functions. - */ -public class Function { - private static final Logger LOG = LogManager.getLogger(Function.class); - - // Enum for how to compare function signatures. - // For decimal types, the type in the function can be a wildcard, i.e. decimal(*,*). - // The wildcard can *only* exist as function type, the caller will always be a - // fully specified decimal. - // For the purposes of function type resolution, decimal(*,*) will match exactly - // with any fully specified decimal (i.e. fn(decimal(*,*)) matches identically for - // the call to fn(decimal(1,0)). - public enum CompareMode { - // Two signatures are identical if the number of arguments and their types match - // exactly and either both signatures are varargs or neither. - IS_IDENTICAL, - - // Two signatures are indistinguishable if there is no way to tell them apart - // when matching a particular instantiation. That is, their fixed arguments - // match exactly and the remaining varargs have the same type. - // e.g. fn(int, int, int) and fn(int...) - // Argument types that are NULL are ignored when doing this comparison. - // e.g. fn(NULL, int) is indistinguishable from fn(int, int) - IS_INDISTINGUISHABLE, - - // X is a supertype of Y if Y.arg[i] can be strictly implicitly cast to X.arg[i]. If - /// X has vargs, the remaining arguments of Y must be strictly implicitly castable - // to the var arg type. The key property this provides is that X can be used in place - // of Y. e.g. fn(int, double, string...) is a supertype of fn(tinyint, float, string, - // string) - IS_SUPERTYPE_OF, - - // Nonstrict supertypes broaden the definition of supertype to accept implicit casts - // of arguments that may result in loss of precision - e.g. decimal to float. - IS_NONSTRICT_SUPERTYPE_OF, - } - - public static final long UNIQUE_FUNCTION_ID = 0; - // User specified function name e.g. "Add" - private FunctionName name; - private final Type retType; - // Array of parameter types. empty array if this function does not have parameters. - private Type[] argTypes; - // If true, this function has variable arguments. - // TODO: we don't currently support varargs with no fixed types. i.e. fn(...) - private boolean hasVarArgs; - - // If true (default), this function is called directly by the user. For operators, - // this is false. If false, it also means the function is not visible from - // 'show functions'. - private boolean userVisible; - - // Absolute path in HDFS for the binary that contains this function. - // e.g. /udfs/udfs.jar - private HdfsURI location; - private TFunctionBinaryType binaryType; - - public Function(FunctionName name, Type[] argTypes, Type retType, boolean varArgs) { - this.name = name; - this.hasVarArgs = varArgs; - if (argTypes == null) { - this.argTypes = new Type[0]; - } else { - this.argTypes = argTypes; - } - this.retType = retType; - } - - public Function(FunctionName name, List args, Type retType, boolean varArgs) { - this(name, (Type[]) null, retType, varArgs); - if (args.size() > 0) { - argTypes = args.toArray(new Type[args.size()]); - } else { - argTypes = new Type[0]; - } - } - - public FunctionName getFunctionName() { - return name; - } - - public String functionName() { - return name.getFunction(); - } - - public String dbName() { - return name.getDb(); - } - - public Type getReturnType() { - return retType; - } - - public Type[] getArgs() { - return argTypes; - } - - // Returns the number of arguments to this function. - public int getNumArgs() { - return argTypes.length; - } - - public HdfsURI getLocation() { - return location; - } - - public void setLocation(HdfsURI loc) { - location = loc; - } - - public TFunctionBinaryType getBinaryType() { - return binaryType; - } - - public void setBinaryType(TFunctionBinaryType type) { - binaryType = type; - } - - public boolean hasVarArgs() { - return hasVarArgs; - } - - public boolean isUserVisible() { - return userVisible; - } - - public void setUserVisible(boolean userVisible) { - this.userVisible = userVisible; - } - - public Type getVarArgsType() { - if (!hasVarArgs) { - return Type.INVALID; - } - Preconditions.checkState(argTypes.length > 0); - return argTypes[argTypes.length - 1]; - } - - public void setHasVarArgs(boolean v) { - hasVarArgs = v; - } - - // Returns a string with the signature in human readable format: - // FnName(argtype1, argtyp2). e.g. Add(int, int) - public String signatureString() { - StringBuilder sb = new StringBuilder(); - sb.append(name.getFunction()).append("(").append(Joiner.on(", ").join(argTypes)); - if (hasVarArgs) { - sb.append("..."); - } - sb.append(")"); - return sb.toString(); - } - - // Compares this to 'other' for mode. - public boolean compare(Function other, CompareMode mode) { - switch (mode) { - case IS_IDENTICAL: - return isIdentical(other); - case IS_INDISTINGUISHABLE: - return isIndistinguishable(other); - case IS_SUPERTYPE_OF: - return isSubtype(other); - case IS_NONSTRICT_SUPERTYPE_OF: - return isAssignCompatible(other); - default: - Preconditions.checkState(false); - return false; - } - } - - /** - * Returns true if 'this' is a supertype of 'other'. Each argument in other must - * be implicitly castable to the matching argument in this. - * TODO: look into how we resolve implicitly castable functions. Is there a rule - * for "most" compatible or maybe return an error if it is ambiguous? - */ - private boolean isSubtype(Function other) { - if (!this.hasVarArgs && other.argTypes.length != this.argTypes.length) { - return false; - } - if (this.hasVarArgs && other.argTypes.length < this.argTypes.length) { - return false; - } - for (int i = 0; i < this.argTypes.length; ++i) { - if (!Type.isImplicitlyCastable(other.argTypes[i], this.argTypes[i], true)) { - return false; - } - } - // Check trailing varargs. - if (this.hasVarArgs) { - for (int i = this.argTypes.length; i < other.argTypes.length; ++i) { - if (!Type.isImplicitlyCastable(other.argTypes[i], getVarArgsType(), true)) { - return false; - } - } - } - return true; - } - - // return true if 'this' is assign-compatible from 'other'. - // Each argument in 'other' must be assign-compatible to the matching argument in 'this'. - private boolean isAssignCompatible(Function other) { - if (!this.hasVarArgs && other.argTypes.length != this.argTypes.length) { - return false; - } - if (this.hasVarArgs && other.argTypes.length < this.argTypes.length) { - return false; - } - for (int i = 0; i < this.argTypes.length; ++i) { - if (!Type.canCastTo(other.argTypes[i], argTypes[i])) { - return false; - } - } - // Check trailing varargs. - if (this.hasVarArgs) { - for (int i = this.argTypes.length; i < other.argTypes.length; ++i) { - if (!Type.canCastTo(other.argTypes[i], getVarArgsType())) { - return false; - } - } - } - return true; - } - - private boolean isIdentical(Function o) { - if (!o.name.equals(name)) { - return false; - } - if (o.argTypes.length != this.argTypes.length) { - return false; - } - if (o.hasVarArgs != this.hasVarArgs) { - return false; - } - for (int i = 0; i < this.argTypes.length; ++i) { - if (!o.argTypes[i].matchesType(this.argTypes[i])) { - return false; - } - } - return true; - } - - private boolean isIndistinguishable(Function o) { - if (!o.name.equals(name)) { - return false; - } - int minArgs = Math.min(o.argTypes.length, this.argTypes.length); - // The first fully specified args must be identical. - for (int i = 0; i < minArgs; ++i) { - if (o.argTypes[i].isNull() || this.argTypes[i].isNull()) { - continue; - } - if (!o.argTypes[i].matchesType(this.argTypes[i])) { - return false; - } - } - if (o.argTypes.length == this.argTypes.length) { - return true; - } - - if (o.hasVarArgs && this.hasVarArgs) { - if (!o.getVarArgsType().matchesType(this.getVarArgsType())) { - return false; - } - if (this.getNumArgs() > o.getNumArgs()) { - for (int i = minArgs; i < this.getNumArgs(); ++i) { - if (this.argTypes[i].isNull()) { - continue; - } - if (!this.argTypes[i].matchesType(o.getVarArgsType())) { - return false; - } - } - } else { - for (int i = minArgs; i < o.getNumArgs(); ++i) { - if (o.argTypes[i].isNull()) { - continue; - } - if (!o.argTypes[i].matchesType(this.getVarArgsType())) { - return false; - } - } - } - return true; - } else if (o.hasVarArgs) { - // o has var args so check the remaining arguments from this - if (o.getNumArgs() > minArgs) { - return false; - } - for (int i = minArgs; i < this.getNumArgs(); ++i) { - if (this.argTypes[i].isNull()) { - continue; - } - if (!this.argTypes[i].matchesType(o.getVarArgsType())) { - return false; - } - } - return true; - } else if (this.hasVarArgs) { - // this has var args so check the remaining arguments from s - if (this.getNumArgs() > minArgs) { - return false; - } - for (int i = minArgs; i < o.getNumArgs(); ++i) { - if (o.argTypes[i].isNull()) { - continue; - } - if (!o.argTypes[i].matchesType(this.getVarArgsType())) { - return false; - } - } - return true; - } else { - // Neither has var args and the lengths don't match - return false; - } - } - - public TFunction toThrift() { - TFunction fn = new TFunction(); - fn.setSignature(signatureString()); - fn.setName(name.toThrift()); - fn.setBinary_type(binaryType); - if (location != null) { - fn.setHdfs_location(location.toString()); - } - fn.setArg_types(Type.toThrift(argTypes)); - fn.setRet_type(getReturnType().toThrift()); - fn.setHas_var_args(hasVarArgs); - // TODO: Comment field is missing? - // fn.setComment(comment) - return fn; - } - - // Child classes must override this function. - public String toSql(boolean ifNotExists) { - return ""; - } - - public static String getUdfTypeName(PrimitiveType t) { - switch (t) { - case BOOLEAN: - return "boolean_val"; - case TINYINT: - return "tiny_int_val"; - case SMALLINT: - return "small_int_val"; - case INT: - return "int_val"; - case BIGINT: - return "big_int_val"; - case LARGEINT: - return "large_int_val"; - case FLOAT: - return "float_val"; - case DOUBLE: - return "double_val"; - case VARCHAR: - case CHAR: - case HLL: - return "string_val"; - case DATE: - case DATETIME: - return "datetime_val"; - case DECIMAL: - return "decimal_val"; - default: - Preconditions.checkState(false, t.toString()); - return ""; - } - } - - public static String getUdfType(PrimitiveType t) { - switch (t) { - case NULL_TYPE: - return "AnyVal"; - case BOOLEAN: - return "BooleanVal"; - case TINYINT: - return "TinyIntVal"; - case SMALLINT: - return "SmallIntVal"; - case INT: - return "IntVal"; - case BIGINT: - return "BigIntVal"; - case LARGEINT: - return "LargeIntVal"; - case FLOAT: - return "FloatVal"; - case DOUBLE: - return "DoubleVal"; - case VARCHAR: - case CHAR: - case HLL: - return "StringVal"; - case DATE: - case DATETIME: - return "DateTimeVal"; - case DECIMAL: - return "DecimalVal"; - default: - Preconditions.checkState(false, t.toString()); - return ""; - } - } -} +import java.util.List; + + +/** + * Base class for all functions. + */ +public class Function { + private static final Logger LOG = LogManager.getLogger(Function.class); + + // Enum for how to compare function signatures. + // For decimal types, the type in the function can be a wildcard, i.e. decimal(*,*). + // The wildcard can *only* exist as function type, the caller will always be a + // fully specified decimal. + // For the purposes of function type resolution, decimal(*,*) will match exactly + // with any fully specified decimal (i.e. fn(decimal(*,*)) matches identically for + // the call to fn(decimal(1,0)). + public enum CompareMode { + // Two signatures are identical if the number of arguments and their types match + // exactly and either both signatures are varargs or neither. + IS_IDENTICAL, + + // Two signatures are indistinguishable if there is no way to tell them apart + // when matching a particular instantiation. That is, their fixed arguments + // match exactly and the remaining varargs have the same type. + // e.g. fn(int, int, int) and fn(int...) + // Argument types that are NULL are ignored when doing this comparison. + // e.g. fn(NULL, int) is indistinguishable from fn(int, int) + IS_INDISTINGUISHABLE, + + // X is a supertype of Y if Y.arg[i] can be strictly implicitly cast to X.arg[i]. If + /// X has vargs, the remaining arguments of Y must be strictly implicitly castable + // to the var arg type. The key property this provides is that X can be used in place + // of Y. e.g. fn(int, double, string...) is a supertype of fn(tinyint, float, string, + // string) + IS_SUPERTYPE_OF, + + // Nonstrict supertypes broaden the definition of supertype to accept implicit casts + // of arguments that may result in loss of precision - e.g. decimal to float. + IS_NONSTRICT_SUPERTYPE_OF, + } + + public static final long UNIQUE_FUNCTION_ID = 0; + // User specified function name e.g. "Add" + private FunctionName name; + private final Type retType; + // Array of parameter types. empty array if this function does not have parameters. + private Type[] argTypes; + // If true, this function has variable arguments. + // TODO: we don't currently support varargs with no fixed types. i.e. fn(...) + private boolean hasVarArgs; + + // If true (default), this function is called directly by the user. For operators, + // this is false. If false, it also means the function is not visible from + // 'show functions'. + private boolean userVisible; + + // Absolute path in HDFS for the binary that contains this function. + // e.g. /udfs/udfs.jar + private HdfsURI location; + private TFunctionBinaryType binaryType; + + public Function(FunctionName name, Type[] argTypes, Type retType, boolean varArgs) { + this.name = name; + this.hasVarArgs = varArgs; + if (argTypes == null) { + this.argTypes = new Type[0]; + } else { + this.argTypes = argTypes; + } + this.retType = retType; + } + + public Function(FunctionName name, List args, Type retType, boolean varArgs) { + this(name, (Type[]) null, retType, varArgs); + if (args.size() > 0) { + argTypes = args.toArray(new Type[args.size()]); + } else { + argTypes = new Type[0]; + } + } + + public FunctionName getFunctionName() { + return name; + } + + public String functionName() { + return name.getFunction(); + } + + public String dbName() { + return name.getDb(); + } + + public Type getReturnType() { + return retType; + } + + public Type[] getArgs() { + return argTypes; + } + + // Returns the number of arguments to this function. + public int getNumArgs() { + return argTypes.length; + } + + public HdfsURI getLocation() { + return location; + } + + public void setLocation(HdfsURI loc) { + location = loc; + } + + public TFunctionBinaryType getBinaryType() { + return binaryType; + } + + public void setBinaryType(TFunctionBinaryType type) { + binaryType = type; + } + + public boolean hasVarArgs() { + return hasVarArgs; + } + + public boolean isUserVisible() { + return userVisible; + } + + public void setUserVisible(boolean userVisible) { + this.userVisible = userVisible; + } + + public Type getVarArgsType() { + if (!hasVarArgs) { + return Type.INVALID; + } + Preconditions.checkState(argTypes.length > 0); + return argTypes[argTypes.length - 1]; + } + + public void setHasVarArgs(boolean v) { + hasVarArgs = v; + } + + // Returns a string with the signature in human readable format: + // FnName(argtype1, argtyp2). e.g. Add(int, int) + public String signatureString() { + StringBuilder sb = new StringBuilder(); + sb.append(name.getFunction()).append("(").append(Joiner.on(", ").join(argTypes)); + if (hasVarArgs) { + sb.append("..."); + } + sb.append(")"); + return sb.toString(); + } + + // Compares this to 'other' for mode. + public boolean compare(Function other, CompareMode mode) { + switch (mode) { + case IS_IDENTICAL: + return isIdentical(other); + case IS_INDISTINGUISHABLE: + return isIndistinguishable(other); + case IS_SUPERTYPE_OF: + return isSubtype(other); + case IS_NONSTRICT_SUPERTYPE_OF: + return isAssignCompatible(other); + default: + Preconditions.checkState(false); + return false; + } + } + + /** + * Returns true if 'this' is a supertype of 'other'. Each argument in other must + * be implicitly castable to the matching argument in this. + * TODO: look into how we resolve implicitly castable functions. Is there a rule + * for "most" compatible or maybe return an error if it is ambiguous? + */ + private boolean isSubtype(Function other) { + if (!this.hasVarArgs && other.argTypes.length != this.argTypes.length) { + return false; + } + if (this.hasVarArgs && other.argTypes.length < this.argTypes.length) { + return false; + } + for (int i = 0; i < this.argTypes.length; ++i) { + if (!Type.isImplicitlyCastable(other.argTypes[i], this.argTypes[i], true)) { + return false; + } + } + // Check trailing varargs. + if (this.hasVarArgs) { + for (int i = this.argTypes.length; i < other.argTypes.length; ++i) { + if (!Type.isImplicitlyCastable(other.argTypes[i], getVarArgsType(), true)) { + return false; + } + } + } + return true; + } + + // return true if 'this' is assign-compatible from 'other'. + // Each argument in 'other' must be assign-compatible to the matching argument in 'this'. + private boolean isAssignCompatible(Function other) { + if (!this.hasVarArgs && other.argTypes.length != this.argTypes.length) { + return false; + } + if (this.hasVarArgs && other.argTypes.length < this.argTypes.length) { + return false; + } + for (int i = 0; i < this.argTypes.length; ++i) { + if (!Type.canCastTo(other.argTypes[i], argTypes[i])) { + return false; + } + } + // Check trailing varargs. + if (this.hasVarArgs) { + for (int i = this.argTypes.length; i < other.argTypes.length; ++i) { + if (!Type.canCastTo(other.argTypes[i], getVarArgsType())) { + return false; + } + } + } + return true; + } + + private boolean isIdentical(Function o) { + if (!o.name.equals(name)) { + return false; + } + if (o.argTypes.length != this.argTypes.length) { + return false; + } + if (o.hasVarArgs != this.hasVarArgs) { + return false; + } + for (int i = 0; i < this.argTypes.length; ++i) { + if (!o.argTypes[i].matchesType(this.argTypes[i])) { + return false; + } + } + return true; + } + + private boolean isIndistinguishable(Function o) { + if (!o.name.equals(name)) { + return false; + } + int minArgs = Math.min(o.argTypes.length, this.argTypes.length); + // The first fully specified args must be identical. + for (int i = 0; i < minArgs; ++i) { + if (o.argTypes[i].isNull() || this.argTypes[i].isNull()) { + continue; + } + if (!o.argTypes[i].matchesType(this.argTypes[i])) { + return false; + } + } + if (o.argTypes.length == this.argTypes.length) { + return true; + } + + if (o.hasVarArgs && this.hasVarArgs) { + if (!o.getVarArgsType().matchesType(this.getVarArgsType())) { + return false; + } + if (this.getNumArgs() > o.getNumArgs()) { + for (int i = minArgs; i < this.getNumArgs(); ++i) { + if (this.argTypes[i].isNull()) { + continue; + } + if (!this.argTypes[i].matchesType(o.getVarArgsType())) { + return false; + } + } + } else { + for (int i = minArgs; i < o.getNumArgs(); ++i) { + if (o.argTypes[i].isNull()) { + continue; + } + if (!o.argTypes[i].matchesType(this.getVarArgsType())) { + return false; + } + } + } + return true; + } else if (o.hasVarArgs) { + // o has var args so check the remaining arguments from this + if (o.getNumArgs() > minArgs) { + return false; + } + for (int i = minArgs; i < this.getNumArgs(); ++i) { + if (this.argTypes[i].isNull()) { + continue; + } + if (!this.argTypes[i].matchesType(o.getVarArgsType())) { + return false; + } + } + return true; + } else if (this.hasVarArgs) { + // this has var args so check the remaining arguments from s + if (this.getNumArgs() > minArgs) { + return false; + } + for (int i = minArgs; i < o.getNumArgs(); ++i) { + if (o.argTypes[i].isNull()) { + continue; + } + if (!o.argTypes[i].matchesType(this.getVarArgsType())) { + return false; + } + } + return true; + } else { + // Neither has var args and the lengths don't match + return false; + } + } + + public TFunction toThrift() { + TFunction fn = new TFunction(); + fn.setSignature(signatureString()); + fn.setName(name.toThrift()); + fn.setBinary_type(binaryType); + if (location != null) { + fn.setHdfs_location(location.toString()); + } + fn.setArg_types(Type.toThrift(argTypes)); + fn.setRet_type(getReturnType().toThrift()); + fn.setHas_var_args(hasVarArgs); + // TODO: Comment field is missing? + // fn.setComment(comment) + return fn; + } + + // Child classes must override this function. + public String toSql(boolean ifNotExists) { + return ""; + } + + public static String getUdfTypeName(PrimitiveType t) { + switch (t) { + case BOOLEAN: + return "boolean_val"; + case TINYINT: + return "tiny_int_val"; + case SMALLINT: + return "small_int_val"; + case INT: + return "int_val"; + case BIGINT: + return "big_int_val"; + case LARGEINT: + return "large_int_val"; + case FLOAT: + return "float_val"; + case DOUBLE: + return "double_val"; + case VARCHAR: + case CHAR: + case HLL: + return "string_val"; + case DATE: + case DATETIME: + return "datetime_val"; + case DECIMAL: + return "decimal_val"; + default: + Preconditions.checkState(false, t.toString()); + return ""; + } + } + + public static String getUdfType(PrimitiveType t) { + switch (t) { + case NULL_TYPE: + return "AnyVal"; + case BOOLEAN: + return "BooleanVal"; + case TINYINT: + return "TinyIntVal"; + case SMALLINT: + return "SmallIntVal"; + case INT: + return "IntVal"; + case BIGINT: + return "BigIntVal"; + case LARGEINT: + return "LargeIntVal"; + case FLOAT: + return "FloatVal"; + case DOUBLE: + return "DoubleVal"; + case VARCHAR: + case CHAR: + case HLL: + return "StringVal"; + case DATE: + case DATETIME: + return "DateTimeVal"; + case DECIMAL: + return "DecimalVal"; + default: + Preconditions.checkState(false, t.toString()); + return ""; + } + } +} diff --git a/fe/src/com/baidu/palo/catalog/HashDistributionInfo.java b/fe/src/com/baidu/palo/catalog/HashDistributionInfo.java index 7e10b354a6..ef68f40624 100644 --- a/fe/src/com/baidu/palo/catalog/HashDistributionInfo.java +++ b/fe/src/com/baidu/palo/catalog/HashDistributionInfo.java @@ -13,128 +13,128 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.catalog; - -import com.baidu.palo.analysis.DistributionDesc; -import com.baidu.palo.analysis.HashDistributionDesc; - -import com.google.common.base.Joiner; -import com.google.common.collect.Lists; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -/** - * Hash Distribution Info. - */ -public class HashDistributionInfo extends DistributionInfo { - private List distributionColumns; - private int bucketNum; - - public HashDistributionInfo() { - super(); - this.distributionColumns = new ArrayList(); - } - - public HashDistributionInfo(int bucketNum, List distributionColumns) { - super(DistributionInfoType.HASH); - this.distributionColumns = distributionColumns; - this.bucketNum = bucketNum; - } - - public List getDistributionColumns() { - return distributionColumns; - } - - @Override - public int getBucketNum() { - return bucketNum; - } - - public void write(DataOutput out) throws IOException { - super.write(out); - int columnCount = distributionColumns.size(); - out.writeInt(columnCount); - for (Column column : distributionColumns) { - column.write(out); - } - out.writeInt(bucketNum); - } - - public void readFields(DataInput in) throws IOException { - super.readFields(in); - int columnCount = in.readInt(); - for (int i = 0; i < columnCount; i++) { - Column column = Column.read(in); - distributionColumns.add(column); - } - bucketNum = in.readInt(); - } - - public static DistributionInfo read(DataInput in) throws IOException { - DistributionInfo distributionInfo = new HashDistributionInfo(); - distributionInfo.readFields(in); - return distributionInfo; - } - - public boolean equals(DistributionInfo info) { - if (this == info) { - return true; - } - - if (!(info instanceof HashDistributionInfo)) { - return false; - } - - HashDistributionInfo hashDistributionInfo = (HashDistributionInfo) info; - - return type == hashDistributionInfo.type - && bucketNum == hashDistributionInfo.bucketNum - && distributionColumns.equals(hashDistributionInfo.distributionColumns); - } - - @Override - public DistributionDesc toDistributionDesc() { - List distriColNames = Lists.newArrayList(); - for (Column col : distributionColumns) { - distriColNames.add(col.getName()); - } - DistributionDesc distributionDesc = new HashDistributionDesc(bucketNum, distriColNames); - return distributionDesc; - } - - @Override - public String toSql() { - StringBuilder builder = new StringBuilder(); - builder.append("DISTRIBUTED BY HASH("); - - List colNames = Lists.newArrayList(); - for (Column column : distributionColumns) { - colNames.add("`" + column.getName() + "`"); - } - String colList = Joiner.on(", ").join(colNames); - builder.append(colList); - - builder.append(") BUCKETS ").append(bucketNum); - return builder.toString(); - } - - @Override - public String toString() { - StringBuilder builder = new StringBuilder(); - builder.append("type: ").append(type).append("; "); - - builder.append("distribution columns: ["); - for (Column column : distributionColumns) { - builder.append(column.getName()).append(","); - } - builder.append("]; "); - - builder.append("bucket num: ").append(bucketNum).append("; ");; - - return builder.toString(); - } -} +package com.baidu.palo.catalog; + +import com.baidu.palo.analysis.DistributionDesc; +import com.baidu.palo.analysis.HashDistributionDesc; + +import com.google.common.base.Joiner; +import com.google.common.collect.Lists; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/** + * Hash Distribution Info. + */ +public class HashDistributionInfo extends DistributionInfo { + private List distributionColumns; + private int bucketNum; + + public HashDistributionInfo() { + super(); + this.distributionColumns = new ArrayList(); + } + + public HashDistributionInfo(int bucketNum, List distributionColumns) { + super(DistributionInfoType.HASH); + this.distributionColumns = distributionColumns; + this.bucketNum = bucketNum; + } + + public List getDistributionColumns() { + return distributionColumns; + } + + @Override + public int getBucketNum() { + return bucketNum; + } + + public void write(DataOutput out) throws IOException { + super.write(out); + int columnCount = distributionColumns.size(); + out.writeInt(columnCount); + for (Column column : distributionColumns) { + column.write(out); + } + out.writeInt(bucketNum); + } + + public void readFields(DataInput in) throws IOException { + super.readFields(in); + int columnCount = in.readInt(); + for (int i = 0; i < columnCount; i++) { + Column column = Column.read(in); + distributionColumns.add(column); + } + bucketNum = in.readInt(); + } + + public static DistributionInfo read(DataInput in) throws IOException { + DistributionInfo distributionInfo = new HashDistributionInfo(); + distributionInfo.readFields(in); + return distributionInfo; + } + + public boolean equals(DistributionInfo info) { + if (this == info) { + return true; + } + + if (!(info instanceof HashDistributionInfo)) { + return false; + } + + HashDistributionInfo hashDistributionInfo = (HashDistributionInfo) info; + + return type == hashDistributionInfo.type + && bucketNum == hashDistributionInfo.bucketNum + && distributionColumns.equals(hashDistributionInfo.distributionColumns); + } + + @Override + public DistributionDesc toDistributionDesc() { + List distriColNames = Lists.newArrayList(); + for (Column col : distributionColumns) { + distriColNames.add(col.getName()); + } + DistributionDesc distributionDesc = new HashDistributionDesc(bucketNum, distriColNames); + return distributionDesc; + } + + @Override + public String toSql() { + StringBuilder builder = new StringBuilder(); + builder.append("DISTRIBUTED BY HASH("); + + List colNames = Lists.newArrayList(); + for (Column column : distributionColumns) { + colNames.add("`" + column.getName() + "`"); + } + String colList = Joiner.on(", ").join(colNames); + builder.append(colList); + + builder.append(") BUCKETS ").append(bucketNum); + return builder.toString(); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("type: ").append(type).append("; "); + + builder.append("distribution columns: ["); + for (Column column : distributionColumns) { + builder.append(column.getName()).append(","); + } + builder.append("]; "); + + builder.append("bucket num: ").append(bucketNum).append("; ");; + + return builder.toString(); + } +} diff --git a/fe/src/com/baidu/palo/catalog/KeysType.java b/fe/src/com/baidu/palo/catalog/KeysType.java index e83773d887..3c91f67275 100644 --- a/fe/src/com/baidu/palo/catalog/KeysType.java +++ b/fe/src/com/baidu/palo/catalog/KeysType.java @@ -18,44 +18,44 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.catalog; - -import com.baidu.palo.thrift.TKeysType; - -public enum KeysType { - PRIMARY_KEYS, - DUP_KEYS, - UNIQUE_KEYS, - AGG_KEYS; - - public TKeysType toThrift() { - switch (this) { - case PRIMARY_KEYS: - return TKeysType.PRIMARY_KEYS; - case DUP_KEYS: - return TKeysType.DUP_KEYS; - case UNIQUE_KEYS: - return TKeysType.UNIQUE_KEYS; - case AGG_KEYS: - return TKeysType.AGG_KEYS; - default: - return null; - } - } - - public String toSql() { - switch (this) { - case PRIMARY_KEYS: - return "PRIMARY KEY"; - case DUP_KEYS: - return "DUPLICATE KEY"; - case UNIQUE_KEYS: - return "UNIQUE KEY"; - case AGG_KEYS: - return "AGGREGATE KEY"; - default: - return null; - } - } -} - +package com.baidu.palo.catalog; + +import com.baidu.palo.thrift.TKeysType; + +public enum KeysType { + PRIMARY_KEYS, + DUP_KEYS, + UNIQUE_KEYS, + AGG_KEYS; + + public TKeysType toThrift() { + switch (this) { + case PRIMARY_KEYS: + return TKeysType.PRIMARY_KEYS; + case DUP_KEYS: + return TKeysType.DUP_KEYS; + case UNIQUE_KEYS: + return TKeysType.UNIQUE_KEYS; + case AGG_KEYS: + return TKeysType.AGG_KEYS; + default: + return null; + } + } + + public String toSql() { + switch (this) { + case PRIMARY_KEYS: + return "PRIMARY KEY"; + case DUP_KEYS: + return "DUPLICATE KEY"; + case UNIQUE_KEYS: + return "UNIQUE KEY"; + case AGG_KEYS: + return "AGGREGATE KEY"; + default: + return null; + } + } +} + diff --git a/fe/src/com/baidu/palo/catalog/MaterializedIndex.java b/fe/src/com/baidu/palo/catalog/MaterializedIndex.java index e05666d496..8846f824ee 100644 --- a/fe/src/com/baidu/palo/catalog/MaterializedIndex.java +++ b/fe/src/com/baidu/palo/catalog/MaterializedIndex.java @@ -18,8 +18,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.catalog; - +package com.baidu.palo.catalog; + import com.baidu.palo.common.io.Text; import com.baidu.palo.common.io.Writable; @@ -32,76 +32,76 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Map.Entry; - -/** - * The OlapTraditional table is a materialized table which stored as rowcolumnar file or columnar file - */ -public class MaterializedIndex extends MetaObject implements Writable { - public enum IndexState { - NORMAL, - ROLLUP, - SCHEMA_CHANGE - } - - private long id; - - private IndexState state; - private long rowCount; - - private Map idToTablets; - // this is for keeping tablet order - private List tablets; - - // for push after rollup index finished - private long rollupIndexId; - private long rollupFinishedVersion; - - public MaterializedIndex() { - this.state = IndexState.NORMAL; - this.idToTablets = new HashMap(); - this.tablets = new ArrayList(); - } - - public MaterializedIndex(long id, IndexState state) { - this.id = id; - - this.state = state; - if (this.state == null) { - this.state = IndexState.NORMAL; - } - - this.idToTablets = new HashMap(); - this.tablets = new ArrayList(); - - this.rowCount = 0; - - this.rollupIndexId = -1L; - this.rollupFinishedVersion = -1L; - } - - public List getTablets() { - return tablets; - } - - public List getTabletIdsInOrder() { - List tabletIds = Lists.newArrayList(); - for (Tablet tablet : tablets) { - tabletIds.add(tablet.getId()); - } - return tabletIds; - } - - public Tablet getTablet(long tabletId) { - return idToTablets.get(tabletId); +import java.util.Map.Entry; + +/** + * The OlapTraditional table is a materialized table which stored as rowcolumnar file or columnar file + */ +public class MaterializedIndex extends MetaObject implements Writable { + public enum IndexState { + NORMAL, + ROLLUP, + SCHEMA_CHANGE + } + + private long id; + + private IndexState state; + private long rowCount; + + private Map idToTablets; + // this is for keeping tablet order + private List tablets; + + // for push after rollup index finished + private long rollupIndexId; + private long rollupFinishedVersion; + + public MaterializedIndex() { + this.state = IndexState.NORMAL; + this.idToTablets = new HashMap(); + this.tablets = new ArrayList(); + } + + public MaterializedIndex(long id, IndexState state) { + this.id = id; + + this.state = state; + if (this.state == null) { + this.state = IndexState.NORMAL; + } + + this.idToTablets = new HashMap(); + this.tablets = new ArrayList(); + + this.rowCount = 0; + + this.rollupIndexId = -1L; + this.rollupFinishedVersion = -1L; + } + + public List getTablets() { + return tablets; + } + + public List getTabletIdsInOrder() { + List tabletIds = Lists.newArrayList(); + for (Tablet tablet : tablets) { + tabletIds.add(tablet.getId()); + } + return tabletIds; + } + + public Tablet getTablet(long tabletId) { + return idToTablets.get(tabletId); } public void clearTabletsForRestore() { idToTablets.clear(); tablets.clear(); - } - - public void addTablet(Tablet tablet, TabletMeta tabletMeta) { + } + + public void addTablet(Tablet tablet, TabletMeta tabletMeta) { addTablet(tablet, tabletMeta, false); } @@ -115,138 +115,138 @@ public class MaterializedIndex extends MetaObject implements Writable { public void setIdForRestore(long idxId) { this.id = idxId; - } - - public long getId() { - return id; - } - - public void setState(IndexState state) { - this.state = state; - } - - public IndexState getState() { - return this.state; - } - - public long getRowCount() { - return rowCount; - } - - public void setRowCount(long rowCount) { - this.rowCount = rowCount; - } - - public void setRollupIndexInfo(long rollupIndexId, long rollupFinishedVersion) { - this.rollupIndexId = rollupIndexId; - this.rollupFinishedVersion = rollupFinishedVersion; - } - - public long getRollupIndexId() { - return rollupIndexId; - } - - public long getRollupFinishedVersion() { - return rollupFinishedVersion; - } - - public void clearRollupIndexInfo() { - this.rollupIndexId = -1L; - this.rollupFinishedVersion = -1L; - } - - public void write(DataOutput out) throws IOException { - super.write(out); - - out.writeLong(id); - - Text.writeString(out, state.name()); - out.writeLong(rowCount); - - int tabletCount = tablets.size(); - out.writeInt(tabletCount); - for (Tablet tablet : tablets) { - tablet.write(out); - } - - out.writeLong(rollupIndexId); - out.writeLong(rollupFinishedVersion); - } - - public void readFields(DataInput in) throws IOException { - super.readFields(in); - - id = in.readLong(); - - state = IndexState.valueOf(Text.readString(in)); - rowCount = in.readLong(); - - int tabletCount = in.readInt(); - for (int i = 0; i < tabletCount; ++i) { - Tablet tablet = Tablet.read(in); - tablets.add(tablet); - idToTablets.put(tablet.getId(), tablet); - } - - rollupIndexId = in.readLong(); - rollupFinishedVersion = in.readLong(); - } - - public static MaterializedIndex read(DataInput in) throws IOException { - MaterializedIndex materializedIndex = new MaterializedIndex(); - materializedIndex.readFields(in); - return materializedIndex; - } - - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (!(obj instanceof MaterializedIndex)) { - return false; - } - - MaterializedIndex table = (MaterializedIndex) obj; - - // Check idToTablets - if (table.idToTablets == null) { - return false; - } - if (idToTablets.size() != table.idToTablets.size()) { - return false; - } - for (Entry entry : idToTablets.entrySet()) { - long key = entry.getKey(); - if (!table.idToTablets.containsKey(key)) { - return false; - } - if (!entry.getValue().equals(table.idToTablets.get(key))) { - return false; - } - } - - return (state.equals(table.state)) - && (rowCount == table.rowCount); - } - - @Override - public String toString() { - StringBuilder buffer = new StringBuilder(); - buffer.append("index id: ").append(id).append("; "); - buffer.append("index state: ").append(state.name()).append("; "); - - buffer.append("row count: ").append(rowCount).append("; "); - buffer.append("tablets size: ").append(tablets.size()).append("; "); - // - buffer.append("tablets: ["); - for (Tablet tablet : tablets) { - buffer.append("tablet: ").append(tablet.toString()).append(", "); - } - buffer.append("]; "); - - buffer.append("rollup index id: ").append(rollupIndexId).append("; "); - buffer.append("rollup finished version: ").append(rollupFinishedVersion).append("; "); - - return buffer.toString(); - } -} + } + + public long getId() { + return id; + } + + public void setState(IndexState state) { + this.state = state; + } + + public IndexState getState() { + return this.state; + } + + public long getRowCount() { + return rowCount; + } + + public void setRowCount(long rowCount) { + this.rowCount = rowCount; + } + + public void setRollupIndexInfo(long rollupIndexId, long rollupFinishedVersion) { + this.rollupIndexId = rollupIndexId; + this.rollupFinishedVersion = rollupFinishedVersion; + } + + public long getRollupIndexId() { + return rollupIndexId; + } + + public long getRollupFinishedVersion() { + return rollupFinishedVersion; + } + + public void clearRollupIndexInfo() { + this.rollupIndexId = -1L; + this.rollupFinishedVersion = -1L; + } + + public void write(DataOutput out) throws IOException { + super.write(out); + + out.writeLong(id); + + Text.writeString(out, state.name()); + out.writeLong(rowCount); + + int tabletCount = tablets.size(); + out.writeInt(tabletCount); + for (Tablet tablet : tablets) { + tablet.write(out); + } + + out.writeLong(rollupIndexId); + out.writeLong(rollupFinishedVersion); + } + + public void readFields(DataInput in) throws IOException { + super.readFields(in); + + id = in.readLong(); + + state = IndexState.valueOf(Text.readString(in)); + rowCount = in.readLong(); + + int tabletCount = in.readInt(); + for (int i = 0; i < tabletCount; ++i) { + Tablet tablet = Tablet.read(in); + tablets.add(tablet); + idToTablets.put(tablet.getId(), tablet); + } + + rollupIndexId = in.readLong(); + rollupFinishedVersion = in.readLong(); + } + + public static MaterializedIndex read(DataInput in) throws IOException { + MaterializedIndex materializedIndex = new MaterializedIndex(); + materializedIndex.readFields(in); + return materializedIndex; + } + + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof MaterializedIndex)) { + return false; + } + + MaterializedIndex table = (MaterializedIndex) obj; + + // Check idToTablets + if (table.idToTablets == null) { + return false; + } + if (idToTablets.size() != table.idToTablets.size()) { + return false; + } + for (Entry entry : idToTablets.entrySet()) { + long key = entry.getKey(); + if (!table.idToTablets.containsKey(key)) { + return false; + } + if (!entry.getValue().equals(table.idToTablets.get(key))) { + return false; + } + } + + return (state.equals(table.state)) + && (rowCount == table.rowCount); + } + + @Override + public String toString() { + StringBuilder buffer = new StringBuilder(); + buffer.append("index id: ").append(id).append("; "); + buffer.append("index state: ").append(state.name()).append("; "); + + buffer.append("row count: ").append(rowCount).append("; "); + buffer.append("tablets size: ").append(tablets.size()).append("; "); + // + buffer.append("tablets: ["); + for (Tablet tablet : tablets) { + buffer.append("tablet: ").append(tablet.toString()).append(", "); + } + buffer.append("]; "); + + buffer.append("rollup index id: ").append(rollupIndexId).append("; "); + buffer.append("rollup finished version: ").append(rollupFinishedVersion).append("; "); + + return buffer.toString(); + } +} diff --git a/fe/src/com/baidu/palo/catalog/MysqlTable.java b/fe/src/com/baidu/palo/catalog/MysqlTable.java index 635715acf4..2afac5748f 100644 --- a/fe/src/com/baidu/palo/catalog/MysqlTable.java +++ b/fe/src/com/baidu/palo/catalog/MysqlTable.java @@ -13,213 +13,213 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.catalog; - -import com.baidu.palo.analysis.CreateTableStmt; -import com.baidu.palo.analysis.TableName; -import com.baidu.palo.common.DdlException; -import com.baidu.palo.common.io.Text; -import com.baidu.palo.thrift.TMySQLTable; -import com.baidu.palo.thrift.TTableDescriptor; -import com.baidu.palo.thrift.TTableType; - -import com.google.common.base.Strings; -import com.google.common.collect.Maps; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.io.UnsupportedEncodingException; -import java.util.List; -import java.util.Map; -import java.util.zip.Adler32; - -public class MysqlTable extends Table { - private static final Logger LOG = LogManager.getLogger(OlapTable.class); - - private static final String MYSQL_HOST = "host"; - private static final String MYSQL_PORT = "port"; - private static final String MYSQL_USER = "user"; - private static final String MYSQL_PASSWORD = "password"; - private static final String MYSQL_DATABASE = "database"; - private static final String MYSQL_TABLE = "table"; - - private String host; - private String port; - private String userName; - private String passwd; - private String mysqlDatabaseName; - private String mysqlTableName; - - public MysqlTable() { - super(TableType.MYSQL); - } - - public MysqlTable(long id, String name, List schema, Map properties) - throws DdlException { - super(id, name, TableType.MYSQL, schema); - validate(properties); - } - - private void validate(Map properties) throws DdlException { - if (properties == null) { - throw new DdlException("Please set properties of mysql table, " - + "they are: host, port, user, password, database and table"); - } - - // Set up - host = properties.get(MYSQL_HOST); - if (Strings.isNullOrEmpty(host)) { - throw new DdlException("Host of MySQL table is null. " - + "Please add properties('host'='xxx.xxx.xxx.xxx') when create table"); - } - - port = properties.get(MYSQL_PORT); - if (Strings.isNullOrEmpty(port)) { - // Maybe null pointer or number convert - throw new DdlException("Port of MySQL table is null. " - + "Please add properties('port'='3306') when create table"); - } else { - try { - Integer.valueOf(port); - } catch (Exception e) { - throw new DdlException("Port of MySQL table must be a number." - + "Please add properties('port'='3306') when create table"); - - } - } - - userName = properties.get(MYSQL_USER); - if (Strings.isNullOrEmpty(userName)) { - throw new DdlException("User of MySQL table is null. " - + "Please add properties('user'='root') when create table"); - } - - passwd = properties.get(MYSQL_PASSWORD); - if (passwd == null) { - throw new DdlException("Password of MySQL table is null. " - + "Please add properties('password'='xxxx') when create table"); - } - - mysqlDatabaseName = properties.get(MYSQL_DATABASE); - if (Strings.isNullOrEmpty(mysqlDatabaseName)) { - throw new DdlException("Database of MySQL table is null. " - + "Please add properties('database'='xxxx') when create table"); - } - - mysqlTableName = properties.get(MYSQL_TABLE); - if (Strings.isNullOrEmpty(mysqlTableName)) { - throw new DdlException("Database of MySQL table is null. " - + "Please add properties('table'='xxxx') when create table"); - } - } - - public String getHost() { - return host; - } - - public String getPort() { - return port; - } - - public String getUserName() { - return userName; - } - - public String getPasswd() { - return passwd; - } - - public String getMysqlDatabaseName() { - return mysqlDatabaseName; - } - - public String getMysqlTableName() { - return mysqlTableName; - } - - public TTableDescriptor toThrift() { - TMySQLTable tMySQLTable = - new TMySQLTable(host, port, userName, passwd, mysqlDatabaseName, mysqlTableName); - TTableDescriptor tTableDescriptor = new TTableDescriptor(getId(), TTableType.MYSQL_TABLE, - baseSchema.size(), 0, getName(), ""); - tTableDescriptor.setMysqlTable(tMySQLTable); - return tTableDescriptor; - } - - @Override - public CreateTableStmt toCreateTableStmt(String dbName) { - Map properties = Maps.newHashMap(); - properties.put(MYSQL_HOST, host); - properties.put(MYSQL_PORT, port); - properties.put(MYSQL_USER, userName); - properties.put(MYSQL_PASSWORD, passwd); - properties.put(MYSQL_DATABASE, mysqlDatabaseName); - properties.put(MYSQL_TABLE, mysqlTableName); - - CreateTableStmt stmt = new CreateTableStmt(false, true, new TableName(dbName, name), baseSchema, - type.name(), null, null, null, properties, null); - - return stmt; - } - - @Override - public int getSignature(int signatureVersion) { - Adler32 adler32 = new Adler32(); - adler32.update(signatureVersion); - String charsetName = "UTF-8"; - - try { - // name - adler32.update(name.getBytes(charsetName)); - // type - adler32.update(type.name().getBytes(charsetName)); - // host - adler32.update(host.getBytes(charsetName)); - // port - adler32.update(port.getBytes(charsetName)); - // username - adler32.update(userName.getBytes(charsetName)); - // passwd - adler32.update(passwd.getBytes(charsetName)); - // mysql db - adler32.update(mysqlDatabaseName.getBytes(charsetName)); - // mysql table - adler32.update(mysqlTableName.getBytes(charsetName)); - - } catch (UnsupportedEncodingException e) { - LOG.error("encoding error", e); - return -1; - } - - return Math.abs((int) adler32.getValue()); - } - - @Override - public void write(DataOutput out) throws IOException { - super.write(out); - - Text.writeString(out, host); - Text.writeString(out, port); - Text.writeString(out, userName); - Text.writeString(out, passwd); - Text.writeString(out, mysqlDatabaseName); - Text.writeString(out, mysqlTableName); - } - - @Override - public void readFields(DataInput in) throws IOException { - super.readFields(in); - - // Read MySQL meta - host = Text.readString(in); - port = Text.readString(in); - userName = Text.readString(in); - passwd = Text.readString(in); - mysqlDatabaseName = Text.readString(in); - mysqlTableName = Text.readString(in); - } -} +package com.baidu.palo.catalog; + +import com.baidu.palo.analysis.CreateTableStmt; +import com.baidu.palo.analysis.TableName; +import com.baidu.palo.common.DdlException; +import com.baidu.palo.common.io.Text; +import com.baidu.palo.thrift.TMySQLTable; +import com.baidu.palo.thrift.TTableDescriptor; +import com.baidu.palo.thrift.TTableType; + +import com.google.common.base.Strings; +import com.google.common.collect.Maps; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.util.List; +import java.util.Map; +import java.util.zip.Adler32; + +public class MysqlTable extends Table { + private static final Logger LOG = LogManager.getLogger(OlapTable.class); + + private static final String MYSQL_HOST = "host"; + private static final String MYSQL_PORT = "port"; + private static final String MYSQL_USER = "user"; + private static final String MYSQL_PASSWORD = "password"; + private static final String MYSQL_DATABASE = "database"; + private static final String MYSQL_TABLE = "table"; + + private String host; + private String port; + private String userName; + private String passwd; + private String mysqlDatabaseName; + private String mysqlTableName; + + public MysqlTable() { + super(TableType.MYSQL); + } + + public MysqlTable(long id, String name, List schema, Map properties) + throws DdlException { + super(id, name, TableType.MYSQL, schema); + validate(properties); + } + + private void validate(Map properties) throws DdlException { + if (properties == null) { + throw new DdlException("Please set properties of mysql table, " + + "they are: host, port, user, password, database and table"); + } + + // Set up + host = properties.get(MYSQL_HOST); + if (Strings.isNullOrEmpty(host)) { + throw new DdlException("Host of MySQL table is null. " + + "Please add properties('host'='xxx.xxx.xxx.xxx') when create table"); + } + + port = properties.get(MYSQL_PORT); + if (Strings.isNullOrEmpty(port)) { + // Maybe null pointer or number convert + throw new DdlException("Port of MySQL table is null. " + + "Please add properties('port'='3306') when create table"); + } else { + try { + Integer.valueOf(port); + } catch (Exception e) { + throw new DdlException("Port of MySQL table must be a number." + + "Please add properties('port'='3306') when create table"); + + } + } + + userName = properties.get(MYSQL_USER); + if (Strings.isNullOrEmpty(userName)) { + throw new DdlException("User of MySQL table is null. " + + "Please add properties('user'='root') when create table"); + } + + passwd = properties.get(MYSQL_PASSWORD); + if (passwd == null) { + throw new DdlException("Password of MySQL table is null. " + + "Please add properties('password'='xxxx') when create table"); + } + + mysqlDatabaseName = properties.get(MYSQL_DATABASE); + if (Strings.isNullOrEmpty(mysqlDatabaseName)) { + throw new DdlException("Database of MySQL table is null. " + + "Please add properties('database'='xxxx') when create table"); + } + + mysqlTableName = properties.get(MYSQL_TABLE); + if (Strings.isNullOrEmpty(mysqlTableName)) { + throw new DdlException("Database of MySQL table is null. " + + "Please add properties('table'='xxxx') when create table"); + } + } + + public String getHost() { + return host; + } + + public String getPort() { + return port; + } + + public String getUserName() { + return userName; + } + + public String getPasswd() { + return passwd; + } + + public String getMysqlDatabaseName() { + return mysqlDatabaseName; + } + + public String getMysqlTableName() { + return mysqlTableName; + } + + public TTableDescriptor toThrift() { + TMySQLTable tMySQLTable = + new TMySQLTable(host, port, userName, passwd, mysqlDatabaseName, mysqlTableName); + TTableDescriptor tTableDescriptor = new TTableDescriptor(getId(), TTableType.MYSQL_TABLE, + baseSchema.size(), 0, getName(), ""); + tTableDescriptor.setMysqlTable(tMySQLTable); + return tTableDescriptor; + } + + @Override + public CreateTableStmt toCreateTableStmt(String dbName) { + Map properties = Maps.newHashMap(); + properties.put(MYSQL_HOST, host); + properties.put(MYSQL_PORT, port); + properties.put(MYSQL_USER, userName); + properties.put(MYSQL_PASSWORD, passwd); + properties.put(MYSQL_DATABASE, mysqlDatabaseName); + properties.put(MYSQL_TABLE, mysqlTableName); + + CreateTableStmt stmt = new CreateTableStmt(false, true, new TableName(dbName, name), baseSchema, + type.name(), null, null, null, properties, null); + + return stmt; + } + + @Override + public int getSignature(int signatureVersion) { + Adler32 adler32 = new Adler32(); + adler32.update(signatureVersion); + String charsetName = "UTF-8"; + + try { + // name + adler32.update(name.getBytes(charsetName)); + // type + adler32.update(type.name().getBytes(charsetName)); + // host + adler32.update(host.getBytes(charsetName)); + // port + adler32.update(port.getBytes(charsetName)); + // username + adler32.update(userName.getBytes(charsetName)); + // passwd + adler32.update(passwd.getBytes(charsetName)); + // mysql db + adler32.update(mysqlDatabaseName.getBytes(charsetName)); + // mysql table + adler32.update(mysqlTableName.getBytes(charsetName)); + + } catch (UnsupportedEncodingException e) { + LOG.error("encoding error", e); + return -1; + } + + return Math.abs((int) adler32.getValue()); + } + + @Override + public void write(DataOutput out) throws IOException { + super.write(out); + + Text.writeString(out, host); + Text.writeString(out, port); + Text.writeString(out, userName); + Text.writeString(out, passwd); + Text.writeString(out, mysqlDatabaseName); + Text.writeString(out, mysqlTableName); + } + + @Override + public void readFields(DataInput in) throws IOException { + super.readFields(in); + + // Read MySQL meta + host = Text.readString(in); + port = Text.readString(in); + userName = Text.readString(in); + passwd = Text.readString(in); + mysqlDatabaseName = Text.readString(in); + mysqlTableName = Text.readString(in); + } +} diff --git a/fe/src/com/baidu/palo/catalog/Partition.java b/fe/src/com/baidu/palo/catalog/Partition.java index 6384ff5db9..a0c6c84c46 100644 --- a/fe/src/com/baidu/palo/catalog/Partition.java +++ b/fe/src/com/baidu/palo/catalog/Partition.java @@ -13,8 +13,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.catalog; - +package com.baidu.palo.catalog; + import com.baidu.palo.catalog.DistributionInfo.DistributionInfoType; import com.baidu.palo.common.io.Text; import com.baidu.palo.common.io.Writable; @@ -26,242 +26,242 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Map.Entry; - -/** - * Internal representation of partition-related metadata. - */ -public class Partition extends MetaObject implements Writable { - public static final long PARTITION_INIT_VERSION = 1L; - public static final long PARTITION_INIT_VERSION_HASH = 0L; - - public enum PartitionState { - NORMAL, - ROLLUP, - SCHEMA_CHANGE - } - - private long id; - private String name; - private PartitionState state; - - private MaterializedIndex baseIndex; - private Map idToRollupIndex; - private long committedVersion; - private long committedVersionHash; - - private DistributionInfo distributionInfo; - - public Partition() { - this.idToRollupIndex = new HashMap(); - } - - public Partition(long id, String name, MaterializedIndex baseIndex, DistributionInfo distributionInfo) { - this.id = id; - this.name = name; - this.state = PartitionState.NORMAL; - - this.baseIndex = baseIndex; - this.idToRollupIndex = new HashMap(); - - this.committedVersion = PARTITION_INIT_VERSION; - this.committedVersionHash = PARTITION_INIT_VERSION_HASH; - this.distributionInfo = distributionInfo; +import java.util.Map.Entry; + +/** + * Internal representation of partition-related metadata. + */ +public class Partition extends MetaObject implements Writable { + public static final long PARTITION_INIT_VERSION = 1L; + public static final long PARTITION_INIT_VERSION_HASH = 0L; + + public enum PartitionState { + NORMAL, + ROLLUP, + SCHEMA_CHANGE + } + + private long id; + private String name; + private PartitionState state; + + private MaterializedIndex baseIndex; + private Map idToRollupIndex; + private long committedVersion; + private long committedVersionHash; + + private DistributionInfo distributionInfo; + + public Partition() { + this.idToRollupIndex = new HashMap(); + } + + public Partition(long id, String name, MaterializedIndex baseIndex, DistributionInfo distributionInfo) { + this.id = id; + this.name = name; + this.state = PartitionState.NORMAL; + + this.baseIndex = baseIndex; + this.idToRollupIndex = new HashMap(); + + this.committedVersion = PARTITION_INIT_VERSION; + this.committedVersionHash = PARTITION_INIT_VERSION_HASH; + this.distributionInfo = distributionInfo; } public void setIdForRestore(long id) { this.id = id; - } - - public long getId() { - return this.id; - } - - public void setName(String newName) { - this.name = newName; - } - - public String getName() { - return this.name; - } - - public void setState(PartitionState state) { - this.state = state; - } - - public long getCommittedVersion() { - return committedVersion; - } - - public void setCommittedVersion(long committedVersion) { - this.committedVersion = committedVersion; - } - - public long getCommittedVersionHash() { - return committedVersionHash; - } - - public void setCommittedVersionHash(long committedVersionHash) { - this.committedVersionHash = committedVersionHash; - } - - public PartitionState getState() { - return this.state; - } - - public DistributionInfo getDistributionInfo() { - return distributionInfo; - } - - public void createRollupIndex(MaterializedIndex mIndex) { - this.idToRollupIndex.put(mIndex.getId(), mIndex); - } - - public MaterializedIndex deleteRollupIndex(long indexId) { - return this.idToRollupIndex.remove(indexId); - } - - public MaterializedIndex getBaseIndex() { - return baseIndex; - } - - public List getRollupIndices() { - List rollupIndices = new ArrayList(idToRollupIndex.size()); - for (Map.Entry entry : idToRollupIndex.entrySet()) { - rollupIndices.add(entry.getValue()); - } - return rollupIndices; - } - - public MaterializedIndex getIndex(long indexId) { - if (baseIndex.getId() == indexId) { - return baseIndex; - } - if (idToRollupIndex.containsKey(indexId)) { - return idToRollupIndex.get(indexId); - } - return null; - } - - public List getMaterializedIndices() { - List indices = new ArrayList(); - indices.add(baseIndex); - for (MaterializedIndex rollupIndex : idToRollupIndex.values()) { - indices.add(rollupIndex); - } - return indices; - } - - public static Partition read(DataInput in) throws IOException { - Partition partition = new Partition(); - partition.readFields(in); - return partition; - } - - public void write(DataOutput out) throws IOException { - super.write(out); - - out.writeLong(id); - Text.writeString(out, name); - Text.writeString(out, state.name()); - baseIndex.write(out); - - int rollupCount = (idToRollupIndex != null) ? idToRollupIndex.size() : 0; - out.writeInt(rollupCount); - if (idToRollupIndex != null) { - for (Map.Entry entry : idToRollupIndex.entrySet()) { - entry.getValue().write(out); - } - } - - out.writeLong(committedVersion); - out.writeLong(committedVersionHash); - - Text.writeString(out, distributionInfo.getType().name()); - distributionInfo.write(out); - } - - public void readFields(DataInput in) throws IOException { - super.readFields(in); - - id = in.readLong(); - name = Text.readString(in); - state = PartitionState.valueOf(Text.readString(in)); - baseIndex = MaterializedIndex.read(in); - - int rollupCount = in.readInt(); - for (int i = 0; i < rollupCount; ++i) { - MaterializedIndex rollupTable = MaterializedIndex.read(in); - idToRollupIndex.put(rollupTable.getId(), rollupTable); - } - - committedVersion = in.readLong(); - committedVersionHash = in.readLong(); - - DistributionInfoType distriType = DistributionInfoType.valueOf(Text.readString(in)); - if (distriType == DistributionInfoType.HASH) { - distributionInfo = HashDistributionInfo.read(in); - } else if (distriType == DistributionInfoType.RANDOM) { - distributionInfo = RandomDistributionInfo.read(in); - } else { - throw new IOException("invalid distribution type: " + distriType); - } - } - - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (!(obj instanceof Partition)) { - return false; - } - - Partition partition = (Partition) obj; - if (idToRollupIndex != partition.idToRollupIndex) { - if (idToRollupIndex.size() != partition.idToRollupIndex.size()) { - return false; - } - for (Entry entry : idToRollupIndex.entrySet()) { - long key = entry.getKey(); - if (!partition.idToRollupIndex.containsKey(key)) { - return false; - } - if (!entry.getValue().equals(partition.idToRollupIndex.get(key))) { - return false; - } - } - } - - return (committedVersion == partition.committedVersion) - && (committedVersionHash == partition.committedVersionHash) - && (baseIndex.equals(partition.baseIndex) - && distributionInfo.eqauls(partition.distributionInfo)); - } - - @Override - public String toString() { - StringBuilder buffer = new StringBuilder(); - buffer.append("partition_id: ").append(id).append("; "); - buffer.append("name: ").append(name).append("; "); - buffer.append("partition_state.name: ").append(state.name()).append("; "); - - buffer.append("base_index: ").append(baseIndex.toString()).append("; "); - - int rollupCount = (idToRollupIndex != null) ? idToRollupIndex.size() : 0; - buffer.append("rollup count: ").append(rollupCount).append("; "); - - if (idToRollupIndex != null) { - for (Map.Entry entry : idToRollupIndex.entrySet()) { - buffer.append("rollup_index: ").append(entry.getValue().toString()).append("; "); - } - } - - buffer.append("committedVersion: ").append(committedVersion).append("; "); - buffer.append("committedVersionHash: ").append(committedVersionHash).append("; "); - - buffer.append("distribution_info.type: ").append(distributionInfo.getType().name()).append("; "); - buffer.append("distribution_info: ").append(distributionInfo.toString()); - - return buffer.toString(); - } -} + } + + public long getId() { + return this.id; + } + + public void setName(String newName) { + this.name = newName; + } + + public String getName() { + return this.name; + } + + public void setState(PartitionState state) { + this.state = state; + } + + public long getCommittedVersion() { + return committedVersion; + } + + public void setCommittedVersion(long committedVersion) { + this.committedVersion = committedVersion; + } + + public long getCommittedVersionHash() { + return committedVersionHash; + } + + public void setCommittedVersionHash(long committedVersionHash) { + this.committedVersionHash = committedVersionHash; + } + + public PartitionState getState() { + return this.state; + } + + public DistributionInfo getDistributionInfo() { + return distributionInfo; + } + + public void createRollupIndex(MaterializedIndex mIndex) { + this.idToRollupIndex.put(mIndex.getId(), mIndex); + } + + public MaterializedIndex deleteRollupIndex(long indexId) { + return this.idToRollupIndex.remove(indexId); + } + + public MaterializedIndex getBaseIndex() { + return baseIndex; + } + + public List getRollupIndices() { + List rollupIndices = new ArrayList(idToRollupIndex.size()); + for (Map.Entry entry : idToRollupIndex.entrySet()) { + rollupIndices.add(entry.getValue()); + } + return rollupIndices; + } + + public MaterializedIndex getIndex(long indexId) { + if (baseIndex.getId() == indexId) { + return baseIndex; + } + if (idToRollupIndex.containsKey(indexId)) { + return idToRollupIndex.get(indexId); + } + return null; + } + + public List getMaterializedIndices() { + List indices = new ArrayList(); + indices.add(baseIndex); + for (MaterializedIndex rollupIndex : idToRollupIndex.values()) { + indices.add(rollupIndex); + } + return indices; + } + + public static Partition read(DataInput in) throws IOException { + Partition partition = new Partition(); + partition.readFields(in); + return partition; + } + + public void write(DataOutput out) throws IOException { + super.write(out); + + out.writeLong(id); + Text.writeString(out, name); + Text.writeString(out, state.name()); + baseIndex.write(out); + + int rollupCount = (idToRollupIndex != null) ? idToRollupIndex.size() : 0; + out.writeInt(rollupCount); + if (idToRollupIndex != null) { + for (Map.Entry entry : idToRollupIndex.entrySet()) { + entry.getValue().write(out); + } + } + + out.writeLong(committedVersion); + out.writeLong(committedVersionHash); + + Text.writeString(out, distributionInfo.getType().name()); + distributionInfo.write(out); + } + + public void readFields(DataInput in) throws IOException { + super.readFields(in); + + id = in.readLong(); + name = Text.readString(in); + state = PartitionState.valueOf(Text.readString(in)); + baseIndex = MaterializedIndex.read(in); + + int rollupCount = in.readInt(); + for (int i = 0; i < rollupCount; ++i) { + MaterializedIndex rollupTable = MaterializedIndex.read(in); + idToRollupIndex.put(rollupTable.getId(), rollupTable); + } + + committedVersion = in.readLong(); + committedVersionHash = in.readLong(); + + DistributionInfoType distriType = DistributionInfoType.valueOf(Text.readString(in)); + if (distriType == DistributionInfoType.HASH) { + distributionInfo = HashDistributionInfo.read(in); + } else if (distriType == DistributionInfoType.RANDOM) { + distributionInfo = RandomDistributionInfo.read(in); + } else { + throw new IOException("invalid distribution type: " + distriType); + } + } + + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof Partition)) { + return false; + } + + Partition partition = (Partition) obj; + if (idToRollupIndex != partition.idToRollupIndex) { + if (idToRollupIndex.size() != partition.idToRollupIndex.size()) { + return false; + } + for (Entry entry : idToRollupIndex.entrySet()) { + long key = entry.getKey(); + if (!partition.idToRollupIndex.containsKey(key)) { + return false; + } + if (!entry.getValue().equals(partition.idToRollupIndex.get(key))) { + return false; + } + } + } + + return (committedVersion == partition.committedVersion) + && (committedVersionHash == partition.committedVersionHash) + && (baseIndex.equals(partition.baseIndex) + && distributionInfo.eqauls(partition.distributionInfo)); + } + + @Override + public String toString() { + StringBuilder buffer = new StringBuilder(); + buffer.append("partition_id: ").append(id).append("; "); + buffer.append("name: ").append(name).append("; "); + buffer.append("partition_state.name: ").append(state.name()).append("; "); + + buffer.append("base_index: ").append(baseIndex.toString()).append("; "); + + int rollupCount = (idToRollupIndex != null) ? idToRollupIndex.size() : 0; + buffer.append("rollup count: ").append(rollupCount).append("; "); + + if (idToRollupIndex != null) { + for (Map.Entry entry : idToRollupIndex.entrySet()) { + buffer.append("rollup_index: ").append(entry.getValue().toString()).append("; "); + } + } + + buffer.append("committedVersion: ").append(committedVersion).append("; "); + buffer.append("committedVersionHash: ").append(committedVersionHash).append("; "); + + buffer.append("distribution_info.type: ").append(distributionInfo.getType().name()).append("; "); + buffer.append("distribution_info: ").append(distributionInfo.toString()); + + return buffer.toString(); + } +} diff --git a/fe/src/com/baidu/palo/catalog/PartitionInfo.java b/fe/src/com/baidu/palo/catalog/PartitionInfo.java index 941b97af45..83983dad40 100644 --- a/fe/src/com/baidu/palo/catalog/PartitionInfo.java +++ b/fe/src/com/baidu/palo/catalog/PartitionInfo.java @@ -13,127 +13,127 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.catalog; - -import com.baidu.palo.common.io.Text; -import com.baidu.palo.common.io.Writable; - -import com.google.common.base.Preconditions; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/* - * Repository of a partition's related infos - */ -public class PartitionInfo implements Writable { - protected PartitionType type; - // partition id -> data property - protected Map idToDataProperty; - // partition id -> replication num - protected Map idToReplicationNum; - - public PartitionInfo() { - // for persist - this.idToDataProperty = new HashMap(); - this.idToReplicationNum = new HashMap(); - } - - public PartitionInfo(PartitionType type) { - this.type = type; - this.idToDataProperty = new HashMap(); - this.idToReplicationNum = new HashMap(); - } - - public PartitionType getType() { - return type; - } - - public DataProperty getDataProperty(long partitionId) { - return idToDataProperty.get(partitionId); - } - - public void setDataProperty(long partitionId, DataProperty newDataProperty) { - idToDataProperty.put(partitionId, newDataProperty); - } - - public short getReplicationNum(long partitionId) { - return idToReplicationNum.get(partitionId); - } - - public void setReplicationNum(long partitionId, short replicationNum) { - idToReplicationNum.put(partitionId, replicationNum); - } - - public static PartitionInfo read(DataInput in) throws IOException { - PartitionInfo partitionInfo = new PartitionInfo(); - partitionInfo.readFields(in); - return partitionInfo; - } - - public String toSql(OlapTable table, List partitionId) { - return ""; - } - - @Override - public void write(DataOutput out) throws IOException { - Text.writeString(out, type.name()); - - Preconditions.checkState(idToDataProperty.size() == idToReplicationNum.size()); - out.writeInt(idToDataProperty.size()); - for (Map.Entry entry : idToDataProperty.entrySet()) { - out.writeLong(entry.getKey()); - if (entry.getValue() == DataProperty.DEFAULT_HDD_DATA_PROPERTY) { - out.writeBoolean(true); - } else { - out.writeBoolean(false); - entry.getValue().write(out); - } - - out.writeShort(idToReplicationNum.get(entry.getKey())); - } - } - - @Override - public void readFields(DataInput in) throws IOException { - type = PartitionType.valueOf(Text.readString(in)); - - int counter = in.readInt(); - for (int i = 0; i < counter; i++) { - long partitionId = in.readLong(); - boolean isDefaultDataProperty = in.readBoolean(); - if (isDefaultDataProperty) { - idToDataProperty.put(partitionId, DataProperty.DEFAULT_HDD_DATA_PROPERTY); - } else { - idToDataProperty.put(partitionId, DataProperty.read(in)); - } - - short replicationNum = in.readShort(); - idToReplicationNum.put(partitionId, replicationNum); - } - } - - @Override - public String toString() { - StringBuilder buff = new StringBuilder(); - buff.append("type: ").append(type.typeString).append("; "); - - for (Map.Entry entry : idToDataProperty.entrySet()) { - buff.append(entry.getKey()).append("is HDD: ");; - if (entry.getValue() == DataProperty.DEFAULT_HDD_DATA_PROPERTY) { - buff.append(true); - } else { - buff.append(false); - - } - buff.append("data_property: ").append(entry.getValue().toString()); - buff.append("replica number: ").append(idToReplicationNum.get(entry.getKey())); - } - - return buff.toString(); - } -} +package com.baidu.palo.catalog; + +import com.baidu.palo.common.io.Text; +import com.baidu.palo.common.io.Writable; + +import com.google.common.base.Preconditions; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/* + * Repository of a partition's related infos + */ +public class PartitionInfo implements Writable { + protected PartitionType type; + // partition id -> data property + protected Map idToDataProperty; + // partition id -> replication num + protected Map idToReplicationNum; + + public PartitionInfo() { + // for persist + this.idToDataProperty = new HashMap(); + this.idToReplicationNum = new HashMap(); + } + + public PartitionInfo(PartitionType type) { + this.type = type; + this.idToDataProperty = new HashMap(); + this.idToReplicationNum = new HashMap(); + } + + public PartitionType getType() { + return type; + } + + public DataProperty getDataProperty(long partitionId) { + return idToDataProperty.get(partitionId); + } + + public void setDataProperty(long partitionId, DataProperty newDataProperty) { + idToDataProperty.put(partitionId, newDataProperty); + } + + public short getReplicationNum(long partitionId) { + return idToReplicationNum.get(partitionId); + } + + public void setReplicationNum(long partitionId, short replicationNum) { + idToReplicationNum.put(partitionId, replicationNum); + } + + public static PartitionInfo read(DataInput in) throws IOException { + PartitionInfo partitionInfo = new PartitionInfo(); + partitionInfo.readFields(in); + return partitionInfo; + } + + public String toSql(OlapTable table, List partitionId) { + return ""; + } + + @Override + public void write(DataOutput out) throws IOException { + Text.writeString(out, type.name()); + + Preconditions.checkState(idToDataProperty.size() == idToReplicationNum.size()); + out.writeInt(idToDataProperty.size()); + for (Map.Entry entry : idToDataProperty.entrySet()) { + out.writeLong(entry.getKey()); + if (entry.getValue() == DataProperty.DEFAULT_HDD_DATA_PROPERTY) { + out.writeBoolean(true); + } else { + out.writeBoolean(false); + entry.getValue().write(out); + } + + out.writeShort(idToReplicationNum.get(entry.getKey())); + } + } + + @Override + public void readFields(DataInput in) throws IOException { + type = PartitionType.valueOf(Text.readString(in)); + + int counter = in.readInt(); + for (int i = 0; i < counter; i++) { + long partitionId = in.readLong(); + boolean isDefaultDataProperty = in.readBoolean(); + if (isDefaultDataProperty) { + idToDataProperty.put(partitionId, DataProperty.DEFAULT_HDD_DATA_PROPERTY); + } else { + idToDataProperty.put(partitionId, DataProperty.read(in)); + } + + short replicationNum = in.readShort(); + idToReplicationNum.put(partitionId, replicationNum); + } + } + + @Override + public String toString() { + StringBuilder buff = new StringBuilder(); + buff.append("type: ").append(type.typeString).append("; "); + + for (Map.Entry entry : idToDataProperty.entrySet()) { + buff.append(entry.getKey()).append("is HDD: ");; + if (entry.getValue() == DataProperty.DEFAULT_HDD_DATA_PROPERTY) { + buff.append(true); + } else { + buff.append(false); + + } + buff.append("data_property: ").append(entry.getValue().toString()); + buff.append("replica number: ").append(idToReplicationNum.get(entry.getKey())); + } + + return buff.toString(); + } +} diff --git a/fe/src/com/baidu/palo/catalog/PartitionKey.java b/fe/src/com/baidu/palo/catalog/PartitionKey.java index 9d36f9d15c..a950bd71c3 100644 --- a/fe/src/com/baidu/palo/catalog/PartitionKey.java +++ b/fe/src/com/baidu/palo/catalog/PartitionKey.java @@ -13,304 +13,304 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.catalog; - -import com.baidu.palo.analysis.DateLiteral; -import com.baidu.palo.analysis.IntLiteral; -import com.baidu.palo.analysis.LargeIntLiteral; -import com.baidu.palo.analysis.LiteralExpr; -import com.baidu.palo.analysis.MaxLiteral; -import com.baidu.palo.common.AnalysisException; -import com.baidu.palo.common.io.Text; -import com.baidu.palo.common.io.Writable; - -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.zip.CRC32; - -public class PartitionKey implements Comparable, Writable { - private static final Logger LOG = LogManager.getLogger(PartitionKey.class); - private List keys; - private List types; - - // constuct for partition prune - public PartitionKey() { - keys = Lists.newArrayList(); - types = Lists.newArrayList(); - } - - // Factory methods - public static PartitionKey createInfinityPartitionKey(List columns, boolean isMax) - throws AnalysisException { - PartitionKey partitionKey = new PartitionKey(); - for (Column column : columns) { - partitionKey.keys.add(LiteralExpr.createInfinity(Type.fromPrimitiveType(column.getDataType()), isMax)); - partitionKey.types.add(column.getDataType()); - } - return partitionKey; - } - - public static PartitionKey createPartitionKey(List keys, List columns) - throws AnalysisException { - PartitionKey partitionKey = new PartitionKey(); - Preconditions.checkArgument(keys.size() <= columns.size()); - int i; - for (i = 0; i < keys.size(); ++i) { - partitionKey.keys.add(LiteralExpr.create(keys.get(i), - Type.fromPrimitiveType(columns.get(i).getDataType()))); - partitionKey.types.add(columns.get(i).getDataType()); - } - - // fill the vacancy with MIN - for (; i < columns.size(); ++i) { - Type type = Type.fromPrimitiveType(columns.get(i).getDataType()); - partitionKey.keys.add( - LiteralExpr.createInfinity(type, false)); - partitionKey.types.add(columns.get(i).getDataType()); - } - - Preconditions.checkState(partitionKey.keys.size() == columns.size()); - return partitionKey; - } - - public void pushColumn(LiteralExpr keyValue, PrimitiveType keyType) { - keys.add(keyValue); - types.add(keyType); - } - - public void popColumn() { - keys.remove(keys.size() - 1); - types.remove(types.size() - 1); - } - - public List getKeys() { - return keys; - } - - public long getHashValue() { - CRC32 hashValue = new CRC32(); - int i = 0; - for (LiteralExpr expr : keys) { - ByteBuffer buffer = expr.getHashValue(types.get(i)); - hashValue.update(buffer.array(), 0, buffer.limit()); - i++; - } - return hashValue.getValue(); - } - - public boolean isMinValue() { - for (LiteralExpr literalExpr : keys) { - if (!literalExpr.isMinValue()) { - return false; - } - } - return true; - } - - public boolean isMaxValue() { - for (LiteralExpr literalExpr : keys) { - if (literalExpr != MaxLiteral.MAX_VALUE) { - return false; - } - } - return true; - } - - @Override - // compare with other PartitionKey. used for partition prune - public int compareTo(PartitionKey other) { - int this_key_len = this.keys.size(); - int other_key_len = other.keys.size(); - int min_len = Math.min(this_key_len, other_key_len); - for (int i = 0; i < min_len; ++i) { - int ret = keys.get(i).compareLiteral(other.keys.get(i)); - if (0 != ret) { - return ret; - } - } - if (this_key_len < other_key_len) { - return -1; - } else if (this_key_len > other_key_len) { - return 1; - } else { - return 0; - } - } - - public String toSql() { - StringBuilder strBuilder = new StringBuilder(); - int i = 0; - for (LiteralExpr expr : keys) { - Object value = null; - if (expr == MaxLiteral.MAX_VALUE) { - value = expr.toSql(); - strBuilder.append(value); - continue; - } else { - value = "\"" + expr.getRealValue() + "\""; - if (expr instanceof DateLiteral) { - DateLiteral dateLiteral = (DateLiteral) expr; - value = dateLiteral.toSql(); - } - } - if (keys.size() - 1 == i) { - strBuilder.append("(").append(value).append(")"); - } else { - strBuilder.append("(").append(value).append("), "); - } - i++; - } - return strBuilder.toString(); - } - - @Override - public String toString() { - StringBuilder builder = new StringBuilder(); - builder.append("types: ["); - for (PrimitiveType type : types) { - builder.append(type.toString()); - } - builder.append("]; "); - - builder.append("keys: ["); - int i = 0; - for (LiteralExpr expr : keys) { - Object value = null; - if (expr == MaxLiteral.MAX_VALUE) { - value = expr.toSql(); - } else { - value = expr.getRealValue(); - if (expr instanceof DateLiteral) { - DateLiteral dateLiteral = (DateLiteral) expr; - value = dateLiteral.getStringValue(); - } - } - if (keys.size() - 1 == i) { - builder.append(value); - } else { - builder.append(value + ", "); - } - ++i; - } - builder.append("]; "); - - return builder.toString(); - } - - @Override - public void write(DataOutput out) throws IOException { - int count = keys.size(); - if (count != types.size()) { - throw new IOException("Size of keys and types are not equal"); - } - - out.writeInt(count); - for (int i = 0; i < count; i++) { - PrimitiveType type = types.get(i); - Text.writeString(out, type.toString()); - if (keys.get(i) == MaxLiteral.MAX_VALUE) { - out.writeBoolean(true); - } else { - out.writeBoolean(false); - keys.get(i).write(out); - } - } - } - - @Override - public void readFields(DataInput in) throws IOException { - int count = in.readInt(); - for (int i = 0; i < count; i++) { - PrimitiveType type = PrimitiveType.valueOf(Text.readString(in)); - types.add(type); - - LiteralExpr literal = null; - boolean isMax = in.readBoolean(); - if (isMax) { - literal = MaxLiteral.MAX_VALUE; - } else { - switch (type) { - case TINYINT: - case SMALLINT: - case INT: - case BIGINT: - literal = IntLiteral.read(in); - break; - case LARGEINT: - literal = LargeIntLiteral.read(in); - break; - case DATE: - case DATETIME: - literal = DateLiteral.read(in); - break; - default: - throw new IOException("type[" + type.name() + "] not supported: "); - } - } - literal.setType(Type.fromPrimitiveType(type)); - keys.add(literal); - } - } - - public static PartitionKey read(DataInput in) throws IOException { - PartitionKey key = new PartitionKey(); - key.readFields(in); - return key; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (!(obj instanceof PartitionKey)) { - return false; - } - - PartitionKey partitionKey = (PartitionKey) obj; - - // Check keys - if (keys != partitionKey.keys) { - if (keys.size() != partitionKey.keys.size()) { - return false; - } - for (int i = 0; i < keys.size(); i++) { - if (!keys.get(i).equals(partitionKey.keys.get(i))) { - return false; - } - } - } - - // Check types - if (types != partitionKey.types) { - if (types.size() != partitionKey.types.size()) { - return false; - } - for (int i = 0; i < types.size(); i++) { - if (!types.get(i).equals(partitionKey.types.get(i))) { - return false; - } - } - } - - return true; - } - - @Override - public int hashCode() { - int ret = types.size() * 1000; - for (int i = 0; i < types.size(); i++) { - ret += types.get(i).ordinal(); - } - return ret; - } -} +package com.baidu.palo.catalog; + +import com.baidu.palo.analysis.DateLiteral; +import com.baidu.palo.analysis.IntLiteral; +import com.baidu.palo.analysis.LargeIntLiteral; +import com.baidu.palo.analysis.LiteralExpr; +import com.baidu.palo.analysis.MaxLiteral; +import com.baidu.palo.common.AnalysisException; +import com.baidu.palo.common.io.Text; +import com.baidu.palo.common.io.Writable; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Lists; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.zip.CRC32; + +public class PartitionKey implements Comparable, Writable { + private static final Logger LOG = LogManager.getLogger(PartitionKey.class); + private List keys; + private List types; + + // constuct for partition prune + public PartitionKey() { + keys = Lists.newArrayList(); + types = Lists.newArrayList(); + } + + // Factory methods + public static PartitionKey createInfinityPartitionKey(List columns, boolean isMax) + throws AnalysisException { + PartitionKey partitionKey = new PartitionKey(); + for (Column column : columns) { + partitionKey.keys.add(LiteralExpr.createInfinity(Type.fromPrimitiveType(column.getDataType()), isMax)); + partitionKey.types.add(column.getDataType()); + } + return partitionKey; + } + + public static PartitionKey createPartitionKey(List keys, List columns) + throws AnalysisException { + PartitionKey partitionKey = new PartitionKey(); + Preconditions.checkArgument(keys.size() <= columns.size()); + int i; + for (i = 0; i < keys.size(); ++i) { + partitionKey.keys.add(LiteralExpr.create(keys.get(i), + Type.fromPrimitiveType(columns.get(i).getDataType()))); + partitionKey.types.add(columns.get(i).getDataType()); + } + + // fill the vacancy with MIN + for (; i < columns.size(); ++i) { + Type type = Type.fromPrimitiveType(columns.get(i).getDataType()); + partitionKey.keys.add( + LiteralExpr.createInfinity(type, false)); + partitionKey.types.add(columns.get(i).getDataType()); + } + + Preconditions.checkState(partitionKey.keys.size() == columns.size()); + return partitionKey; + } + + public void pushColumn(LiteralExpr keyValue, PrimitiveType keyType) { + keys.add(keyValue); + types.add(keyType); + } + + public void popColumn() { + keys.remove(keys.size() - 1); + types.remove(types.size() - 1); + } + + public List getKeys() { + return keys; + } + + public long getHashValue() { + CRC32 hashValue = new CRC32(); + int i = 0; + for (LiteralExpr expr : keys) { + ByteBuffer buffer = expr.getHashValue(types.get(i)); + hashValue.update(buffer.array(), 0, buffer.limit()); + i++; + } + return hashValue.getValue(); + } + + public boolean isMinValue() { + for (LiteralExpr literalExpr : keys) { + if (!literalExpr.isMinValue()) { + return false; + } + } + return true; + } + + public boolean isMaxValue() { + for (LiteralExpr literalExpr : keys) { + if (literalExpr != MaxLiteral.MAX_VALUE) { + return false; + } + } + return true; + } + + @Override + // compare with other PartitionKey. used for partition prune + public int compareTo(PartitionKey other) { + int this_key_len = this.keys.size(); + int other_key_len = other.keys.size(); + int min_len = Math.min(this_key_len, other_key_len); + for (int i = 0; i < min_len; ++i) { + int ret = keys.get(i).compareLiteral(other.keys.get(i)); + if (0 != ret) { + return ret; + } + } + if (this_key_len < other_key_len) { + return -1; + } else if (this_key_len > other_key_len) { + return 1; + } else { + return 0; + } + } + + public String toSql() { + StringBuilder strBuilder = new StringBuilder(); + int i = 0; + for (LiteralExpr expr : keys) { + Object value = null; + if (expr == MaxLiteral.MAX_VALUE) { + value = expr.toSql(); + strBuilder.append(value); + continue; + } else { + value = "\"" + expr.getRealValue() + "\""; + if (expr instanceof DateLiteral) { + DateLiteral dateLiteral = (DateLiteral) expr; + value = dateLiteral.toSql(); + } + } + if (keys.size() - 1 == i) { + strBuilder.append("(").append(value).append(")"); + } else { + strBuilder.append("(").append(value).append("), "); + } + i++; + } + return strBuilder.toString(); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("types: ["); + for (PrimitiveType type : types) { + builder.append(type.toString()); + } + builder.append("]; "); + + builder.append("keys: ["); + int i = 0; + for (LiteralExpr expr : keys) { + Object value = null; + if (expr == MaxLiteral.MAX_VALUE) { + value = expr.toSql(); + } else { + value = expr.getRealValue(); + if (expr instanceof DateLiteral) { + DateLiteral dateLiteral = (DateLiteral) expr; + value = dateLiteral.getStringValue(); + } + } + if (keys.size() - 1 == i) { + builder.append(value); + } else { + builder.append(value + ", "); + } + ++i; + } + builder.append("]; "); + + return builder.toString(); + } + + @Override + public void write(DataOutput out) throws IOException { + int count = keys.size(); + if (count != types.size()) { + throw new IOException("Size of keys and types are not equal"); + } + + out.writeInt(count); + for (int i = 0; i < count; i++) { + PrimitiveType type = types.get(i); + Text.writeString(out, type.toString()); + if (keys.get(i) == MaxLiteral.MAX_VALUE) { + out.writeBoolean(true); + } else { + out.writeBoolean(false); + keys.get(i).write(out); + } + } + } + + @Override + public void readFields(DataInput in) throws IOException { + int count = in.readInt(); + for (int i = 0; i < count; i++) { + PrimitiveType type = PrimitiveType.valueOf(Text.readString(in)); + types.add(type); + + LiteralExpr literal = null; + boolean isMax = in.readBoolean(); + if (isMax) { + literal = MaxLiteral.MAX_VALUE; + } else { + switch (type) { + case TINYINT: + case SMALLINT: + case INT: + case BIGINT: + literal = IntLiteral.read(in); + break; + case LARGEINT: + literal = LargeIntLiteral.read(in); + break; + case DATE: + case DATETIME: + literal = DateLiteral.read(in); + break; + default: + throw new IOException("type[" + type.name() + "] not supported: "); + } + } + literal.setType(Type.fromPrimitiveType(type)); + keys.add(literal); + } + } + + public static PartitionKey read(DataInput in) throws IOException { + PartitionKey key = new PartitionKey(); + key.readFields(in); + return key; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof PartitionKey)) { + return false; + } + + PartitionKey partitionKey = (PartitionKey) obj; + + // Check keys + if (keys != partitionKey.keys) { + if (keys.size() != partitionKey.keys.size()) { + return false; + } + for (int i = 0; i < keys.size(); i++) { + if (!keys.get(i).equals(partitionKey.keys.get(i))) { + return false; + } + } + } + + // Check types + if (types != partitionKey.types) { + if (types.size() != partitionKey.types.size()) { + return false; + } + for (int i = 0; i < types.size(); i++) { + if (!types.get(i).equals(partitionKey.types.get(i))) { + return false; + } + } + } + + return true; + } + + @Override + public int hashCode() { + int ret = types.size() * 1000; + for (int i = 0; i < types.size(); i++) { + ret += types.get(i).ordinal(); + } + return ret; + } +} diff --git a/fe/src/com/baidu/palo/catalog/RandomDistributionInfo.java b/fe/src/com/baidu/palo/catalog/RandomDistributionInfo.java index e1d16b668a..d51336415d 100644 --- a/fe/src/com/baidu/palo/catalog/RandomDistributionInfo.java +++ b/fe/src/com/baidu/palo/catalog/RandomDistributionInfo.java @@ -13,78 +13,78 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.catalog; - -import com.baidu.palo.analysis.DistributionDesc; -import com.baidu.palo.analysis.RandomDistributionDesc; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -/** - * Random partition. - */ -public class RandomDistributionInfo extends DistributionInfo { - - private int bucketNum; - - public RandomDistributionInfo() { - super(); - } - - public RandomDistributionInfo(int bucketNum) { - super(DistributionInfoType.RANDOM); - this.bucketNum = bucketNum; - } - - @Override - public DistributionDesc toDistributionDesc() { - DistributionDesc distributionDesc = new RandomDistributionDesc(bucketNum); - return distributionDesc; - } - - @Override - public int getBucketNum() { - return bucketNum; - } - - @Override - public String toSql() { - StringBuilder builder = new StringBuilder(); - builder.append("DISTRIBUTED BY RANDOM BUCKETS ").append(bucketNum); - return builder.toString(); - } - - public void write(DataOutput out) throws IOException { - super.write(out); - out.writeInt(bucketNum); - } - - public void readFields(DataInput in) throws IOException { - super.readFields(in); - bucketNum = in.readInt(); - } - - public static DistributionInfo read(DataInput in) throws IOException { - DistributionInfo distributionInfo = new RandomDistributionInfo(); - distributionInfo.readFields(in); - return distributionInfo; - } - - public boolean equals(DistributionInfo info) { - if (this == info) { - return true; - } - - if (!(info instanceof RandomDistributionInfo)) { - return false; - } - - RandomDistributionInfo randomDistributionInfo = (RandomDistributionInfo) info; - - return type == randomDistributionInfo.type - && bucketNum == randomDistributionInfo.bucketNum; - } - -} +package com.baidu.palo.catalog; + +import com.baidu.palo.analysis.DistributionDesc; +import com.baidu.palo.analysis.RandomDistributionDesc; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +/** + * Random partition. + */ +public class RandomDistributionInfo extends DistributionInfo { + + private int bucketNum; + + public RandomDistributionInfo() { + super(); + } + + public RandomDistributionInfo(int bucketNum) { + super(DistributionInfoType.RANDOM); + this.bucketNum = bucketNum; + } + + @Override + public DistributionDesc toDistributionDesc() { + DistributionDesc distributionDesc = new RandomDistributionDesc(bucketNum); + return distributionDesc; + } + + @Override + public int getBucketNum() { + return bucketNum; + } + + @Override + public String toSql() { + StringBuilder builder = new StringBuilder(); + builder.append("DISTRIBUTED BY RANDOM BUCKETS ").append(bucketNum); + return builder.toString(); + } + + public void write(DataOutput out) throws IOException { + super.write(out); + out.writeInt(bucketNum); + } + + public void readFields(DataInput in) throws IOException { + super.readFields(in); + bucketNum = in.readInt(); + } + + public static DistributionInfo read(DataInput in) throws IOException { + DistributionInfo distributionInfo = new RandomDistributionInfo(); + distributionInfo.readFields(in); + return distributionInfo; + } + + public boolean equals(DistributionInfo info) { + if (this == info) { + return true; + } + + if (!(info instanceof RandomDistributionInfo)) { + return false; + } + + RandomDistributionInfo randomDistributionInfo = (RandomDistributionInfo) info; + + return type == randomDistributionInfo.type + && bucketNum == randomDistributionInfo.bucketNum; + } + +} diff --git a/fe/src/com/baidu/palo/catalog/Tablet.java b/fe/src/com/baidu/palo/catalog/Tablet.java index 0eab63c7ea..37080eacfb 100644 --- a/fe/src/com/baidu/palo/catalog/Tablet.java +++ b/fe/src/com/baidu/palo/catalog/Tablet.java @@ -13,8 +13,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.catalog; - +package com.baidu.palo.catalog; + import com.baidu.palo.catalog.Replica.ReplicaState; import com.baidu.palo.common.io.Writable; @@ -31,266 +31,266 @@ import java.util.Collections; import java.util.Iterator; import java.util.LinkedList; import java.util.List; -import java.util.Set; - -/** - * This class represents the olap tablet related metadata. - */ -public class Tablet extends MetaObject implements Writable { - private static final Logger LOG = LogManager.getLogger(Tablet.class); - - private long id; - private List replicas; - - private long checkedVersion; - private long checkedVersionHash; - - private boolean isConsistent; - - public Tablet() { - this(0L, new ArrayList()); - } - - public Tablet(long tabletId) { - this(tabletId, new ArrayList()); - } - - public Tablet(long tabletId, List replicas) { - this.id = tabletId; - this.replicas = replicas; - if (this.replicas == null) { - this.replicas = new ArrayList(); - } - - checkedVersion = -1L; - checkedVersionHash = -1L; - - isConsistent = true; +import java.util.Set; + +/** + * This class represents the olap tablet related metadata. + */ +public class Tablet extends MetaObject implements Writable { + private static final Logger LOG = LogManager.getLogger(Tablet.class); + + private long id; + private List replicas; + + private long checkedVersion; + private long checkedVersionHash; + + private boolean isConsistent; + + public Tablet() { + this(0L, new ArrayList()); + } + + public Tablet(long tabletId) { + this(tabletId, new ArrayList()); + } + + public Tablet(long tabletId, List replicas) { + this.id = tabletId; + this.replicas = replicas; + if (this.replicas == null) { + this.replicas = new ArrayList(); + } + + checkedVersion = -1L; + checkedVersionHash = -1L; + + isConsistent = true; } public void setIdForRestore(long tabletId) { this.id = tabletId; } - - public long getId() { - return this.id; - } - - public long getCheckedVersion() { - return this.checkedVersion; - } - - public long getCheckedVersionHash() { - return this.checkedVersionHash; - } - - public void setCheckedVersion(long checkedVersion, long checkedVersionHash) { - this.checkedVersion = checkedVersion; - this.checkedVersionHash = checkedVersionHash; - } - - public void setIsConsistent(boolean good) { - this.isConsistent = good; - } - - public boolean isConsistent() { - return isConsistent; - } - - private boolean deleteRedundantReplica(long backendId, long version) { - boolean delete = false; - boolean hasBackend = false; - Iterator iterator = replicas.iterator(); - while (iterator.hasNext()) { - Replica replica = iterator.next(); - if (replica.getBackendId() == backendId) { - hasBackend = true; - if (replica.getVersion() <= version) { - iterator.remove(); - delete = true; - } - } - } - - return delete || !hasBackend; - } - - public void addReplica(Replica replica, boolean isRestore) { - if (deleteRedundantReplica(replica.getBackendId(), replica.getVersion())) { + + public long getId() { + return this.id; + } + + public long getCheckedVersion() { + return this.checkedVersion; + } + + public long getCheckedVersionHash() { + return this.checkedVersionHash; + } + + public void setCheckedVersion(long checkedVersion, long checkedVersionHash) { + this.checkedVersion = checkedVersion; + this.checkedVersionHash = checkedVersionHash; + } + + public void setIsConsistent(boolean good) { + this.isConsistent = good; + } + + public boolean isConsistent() { + return isConsistent; + } + + private boolean deleteRedundantReplica(long backendId, long version) { + boolean delete = false; + boolean hasBackend = false; + Iterator iterator = replicas.iterator(); + while (iterator.hasNext()) { + Replica replica = iterator.next(); + if (replica.getBackendId() == backendId) { + hasBackend = true; + if (replica.getVersion() <= version) { + iterator.remove(); + delete = true; + } + } + } + + return delete || !hasBackend; + } + + public void addReplica(Replica replica, boolean isRestore) { + if (deleteRedundantReplica(replica.getBackendId(), replica.getVersion())) { replicas.add(replica); if (!isRestore) { Catalog.getCurrentInvertedIndex().addReplica(id, replica); - } - } + } + } } public void addReplica(Replica replica) { addReplica(replica, false); } - - public List getReplicas() { - return this.replicas; - } - - public Set getBackendIds() { - Set beIds = Sets.newHashSet(); - for (Replica replica : replicas) { - beIds.add(replica.getBackendId()); - } - return beIds; - } - - // for query + + public List getReplicas() { + return this.replicas; + } + + public Set getBackendIds() { + Set beIds = Sets.newHashSet(); + for (Replica replica : replicas) { + beIds.add(replica.getBackendId()); + } + return beIds; + } + + // for query public void getQueryableReplicas(List allQuerableReplica, List localReplicas, - long committedVersion, long committedVersionHash, long localBeId) { - for (Replica replica : replicas) { - ReplicaState state = replica.getState(); - if (state == ReplicaState.NORMAL || state == ReplicaState.SCHEMA_CHANGE) { - if (replica.getVersion() > committedVersion - || (replica.getVersion() == committedVersion - && replica.getVersionHash() == committedVersionHash)) { + long committedVersion, long committedVersionHash, long localBeId) { + for (Replica replica : replicas) { + ReplicaState state = replica.getState(); + if (state == ReplicaState.NORMAL || state == ReplicaState.SCHEMA_CHANGE) { + if (replica.getVersion() > committedVersion + || (replica.getVersion() == committedVersion + && replica.getVersionHash() == committedVersionHash)) { allQuerableReplica.add(replica); if (localBeId != -1 && replica.getBackendId() == localBeId) { localReplicas.add(replica); - } - } - } - } - } - - public Replica getReplicaById(long replicaId) { - for (Replica replica : replicas) { - if (replica.getId() == replicaId) { - return replica; - } - } - return null; - } - - public Replica getReplicaByBackendId(long backendId) { - for (Replica replica : replicas) { - if (replica.getBackendId() == backendId) { - return replica; - } - } - return null; - } - - public boolean deleteReplica(Replica replica) { - if (replicas.contains(replica)) { - replicas.remove(replica); - Catalog.getCurrentInvertedIndex().deleteReplica(id, replica.getBackendId()); - return true; - } - return false; - } - - public boolean deleteReplicaByBackendId(long backendId) { - Iterator iterator = replicas.iterator(); - while (iterator.hasNext()) { - Replica replica = iterator.next(); - if (replica.getBackendId() == backendId) { - iterator.remove(); - Catalog.getCurrentInvertedIndex().deleteReplica(id, backendId); - return true; - } - } - return false; - } - - @Deprecated - public Replica deleteReplicaById(long replicaId) { - Iterator iterator = replicas.iterator(); - while (iterator.hasNext()) { - Replica replica = iterator.next(); - if (replica.getId() == replicaId) { - LOG.info("delete replica[" + replica.getId() + "]"); - iterator.remove(); - return replica; - } - } - return null; - } - - // for test only - public void clearReplica() { - this.replicas.clear(); - } - - public void setTabletId(long tabletId) { - this.id = tabletId; - } - - public static void sortReplicaByVersionDesc(List replicas) { - // sort replicas by version. higher version in the tops - Collections.sort(replicas, Replica.VERSION_DESC_COMPARATOR); - } - - public String toString() { - return "tabletId=" + this.id; - } - - public void write(DataOutput out) throws IOException { - super.write(out); - - out.writeLong(id); - int replicaCount = replicas.size(); - out.writeInt(replicaCount); - for (int i = 0; i < replicaCount; ++i) { - replicas.get(i).write(out); - } - - out.writeLong(checkedVersion); - out.writeLong(checkedVersionHash); - out.writeBoolean(isConsistent); - } - - public void readFields(DataInput in) throws IOException { - super.readFields(in); - - id = in.readLong(); - int replicaCount = in.readInt(); - for (int i = 0; i < replicaCount; ++i) { - Replica replica = Replica.read(in); - if (deleteRedundantReplica(replica.getBackendId(), replica.getVersion())) { - replicas.add(replica); - } - } - - if (Catalog.getCurrentCatalogJournalVersion() >= 6) { - checkedVersion = in.readLong(); - checkedVersionHash = in.readLong(); - isConsistent = in.readBoolean(); - } - } - - public static Tablet read(DataInput in) throws IOException { - Tablet tablet = new Tablet(); - tablet.readFields(in); - return tablet; - } - - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (!(obj instanceof Tablet)) { - return false; - } - - Tablet tablet = (Tablet) obj; - - if (replicas != tablet.replicas) { - if (replicas.size() != tablet.replicas.size()) { - return false; - } - int size = replicas.size(); - for (int i = 0; i < size; i++) { - if (!tablet.replicas.contains(replicas.get(i))) { - return false; - } - } - } - return id == tablet.id; - } -} + } + } + } + } + } + + public Replica getReplicaById(long replicaId) { + for (Replica replica : replicas) { + if (replica.getId() == replicaId) { + return replica; + } + } + return null; + } + + public Replica getReplicaByBackendId(long backendId) { + for (Replica replica : replicas) { + if (replica.getBackendId() == backendId) { + return replica; + } + } + return null; + } + + public boolean deleteReplica(Replica replica) { + if (replicas.contains(replica)) { + replicas.remove(replica); + Catalog.getCurrentInvertedIndex().deleteReplica(id, replica.getBackendId()); + return true; + } + return false; + } + + public boolean deleteReplicaByBackendId(long backendId) { + Iterator iterator = replicas.iterator(); + while (iterator.hasNext()) { + Replica replica = iterator.next(); + if (replica.getBackendId() == backendId) { + iterator.remove(); + Catalog.getCurrentInvertedIndex().deleteReplica(id, backendId); + return true; + } + } + return false; + } + + @Deprecated + public Replica deleteReplicaById(long replicaId) { + Iterator iterator = replicas.iterator(); + while (iterator.hasNext()) { + Replica replica = iterator.next(); + if (replica.getId() == replicaId) { + LOG.info("delete replica[" + replica.getId() + "]"); + iterator.remove(); + return replica; + } + } + return null; + } + + // for test only + public void clearReplica() { + this.replicas.clear(); + } + + public void setTabletId(long tabletId) { + this.id = tabletId; + } + + public static void sortReplicaByVersionDesc(List replicas) { + // sort replicas by version. higher version in the tops + Collections.sort(replicas, Replica.VERSION_DESC_COMPARATOR); + } + + public String toString() { + return "tabletId=" + this.id; + } + + public void write(DataOutput out) throws IOException { + super.write(out); + + out.writeLong(id); + int replicaCount = replicas.size(); + out.writeInt(replicaCount); + for (int i = 0; i < replicaCount; ++i) { + replicas.get(i).write(out); + } + + out.writeLong(checkedVersion); + out.writeLong(checkedVersionHash); + out.writeBoolean(isConsistent); + } + + public void readFields(DataInput in) throws IOException { + super.readFields(in); + + id = in.readLong(); + int replicaCount = in.readInt(); + for (int i = 0; i < replicaCount; ++i) { + Replica replica = Replica.read(in); + if (deleteRedundantReplica(replica.getBackendId(), replica.getVersion())) { + replicas.add(replica); + } + } + + if (Catalog.getCurrentCatalogJournalVersion() >= 6) { + checkedVersion = in.readLong(); + checkedVersionHash = in.readLong(); + isConsistent = in.readBoolean(); + } + } + + public static Tablet read(DataInput in) throws IOException { + Tablet tablet = new Tablet(); + tablet.readFields(in); + return tablet; + } + + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof Tablet)) { + return false; + } + + Tablet tablet = (Tablet) obj; + + if (replicas != tablet.replicas) { + if (replicas.size() != tablet.replicas.size()) { + return false; + } + int size = replicas.size(); + for (int i = 0; i < size; i++) { + if (!tablet.replicas.contains(replicas.get(i))) { + return false; + } + } + } + return id == tablet.id; + } +} diff --git a/fe/src/com/baidu/palo/catalog/Uda.java b/fe/src/com/baidu/palo/catalog/Uda.java index db671ab4b4..d403ae16c7 100755 --- a/fe/src/com/baidu/palo/catalog/Uda.java +++ b/fe/src/com/baidu/palo/catalog/Uda.java @@ -18,108 +18,108 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.catalog; - -import com.baidu.palo.analysis.FunctionArgs; -import com.baidu.palo.analysis.FunctionName; -import com.baidu.palo.analysis.HdfsURI; -import com.baidu.palo.thrift.TAggregateFunction; -import com.baidu.palo.thrift.TFunction; - -import java.util.List; - -/** - * Internal representation of a UDA. - */ -public class Uda extends Function { - private Type intermediateType_; - - // The symbol inside the binary at location_ that contains this particular. - // They can be null if it is not required. - private String updateFnSymbol_; - private String initFnSymbol_; - private String serializeFnSymbol_; - private String mergeFnSymbol_; - private String finalizeFnSymbol_; - - public Uda(long id, FunctionName fnName, FunctionArgs args, Type retType) { - super(fnName, args.argTypes, retType, args.hasVarArgs); - } - - public Uda(long id, FunctionName fnName, List argTypes, Type retType, - Type intermediateType, HdfsURI location, String updateFnSymbol, String initFnSymbol, - String serializeFnSymbol, String mergeFnSymbol, String finalizeFnSymbol) { - super(fnName, argTypes, retType, false); - setLocation(location); - intermediateType_ = intermediateType; - updateFnSymbol_ = updateFnSymbol; - initFnSymbol_ = initFnSymbol; - serializeFnSymbol_ = serializeFnSymbol; - mergeFnSymbol_ = mergeFnSymbol; - finalizeFnSymbol_ = finalizeFnSymbol; - } - - public String getUpdateFnSymbol() { - return updateFnSymbol_; - } - - public void setUpdateFnSymbol(String fn) { - updateFnSymbol_ = fn; - } - - public String getInitFnSymbol() { - return initFnSymbol_; - } - - public void setInitFnSymbol(String fn) { - initFnSymbol_ = fn; - } - - public String getSerializeFnSymbol() { - return serializeFnSymbol_; - } - - public void setSerializeFnSymbol(String fn) { - serializeFnSymbol_ = fn; - } - - public String getMergeFnSymbol() { - return mergeFnSymbol_; - } - - public void setMergeFnSymbol(String fn) { - mergeFnSymbol_ = fn; - } - - public String getFinalizeFnSymbol() { - return finalizeFnSymbol_; - } - - public void setFinalizeFnSymbol(String fn) { - finalizeFnSymbol_ = fn; - } - - public Type getIntermediateType() { - return intermediateType_; - } - - public void setIntermediateType(Type t) { - intermediateType_ = t; - } - - @Override - public TFunction toThrift() { - TFunction fn = super.toThrift(); - TAggregateFunction uda = new TAggregateFunction(); - uda.setUpdate_fn_symbol(updateFnSymbol_); - uda.setInit_fn_symbol(initFnSymbol_); - if (serializeFnSymbol_ == null) { - uda.setSerialize_fn_symbol(serializeFnSymbol_); - } - uda.setMerge_fn_symbol(mergeFnSymbol_); - uda.setFinalize_fn_symbol(finalizeFnSymbol_); - uda.setIntermediate_type(intermediateType_.toThrift()); - fn.setAggregate_fn(uda); - return fn; - } -} +package com.baidu.palo.catalog; + +import com.baidu.palo.analysis.FunctionArgs; +import com.baidu.palo.analysis.FunctionName; +import com.baidu.palo.analysis.HdfsURI; +import com.baidu.palo.thrift.TAggregateFunction; +import com.baidu.palo.thrift.TFunction; + +import java.util.List; + +/** + * Internal representation of a UDA. + */ +public class Uda extends Function { + private Type intermediateType_; + + // The symbol inside the binary at location_ that contains this particular. + // They can be null if it is not required. + private String updateFnSymbol_; + private String initFnSymbol_; + private String serializeFnSymbol_; + private String mergeFnSymbol_; + private String finalizeFnSymbol_; + + public Uda(long id, FunctionName fnName, FunctionArgs args, Type retType) { + super(fnName, args.argTypes, retType, args.hasVarArgs); + } + + public Uda(long id, FunctionName fnName, List argTypes, Type retType, + Type intermediateType, HdfsURI location, String updateFnSymbol, String initFnSymbol, + String serializeFnSymbol, String mergeFnSymbol, String finalizeFnSymbol) { + super(fnName, argTypes, retType, false); + setLocation(location); + intermediateType_ = intermediateType; + updateFnSymbol_ = updateFnSymbol; + initFnSymbol_ = initFnSymbol; + serializeFnSymbol_ = serializeFnSymbol; + mergeFnSymbol_ = mergeFnSymbol; + finalizeFnSymbol_ = finalizeFnSymbol; + } + + public String getUpdateFnSymbol() { + return updateFnSymbol_; + } + + public void setUpdateFnSymbol(String fn) { + updateFnSymbol_ = fn; + } + + public String getInitFnSymbol() { + return initFnSymbol_; + } + + public void setInitFnSymbol(String fn) { + initFnSymbol_ = fn; + } + + public String getSerializeFnSymbol() { + return serializeFnSymbol_; + } + + public void setSerializeFnSymbol(String fn) { + serializeFnSymbol_ = fn; + } + + public String getMergeFnSymbol() { + return mergeFnSymbol_; + } + + public void setMergeFnSymbol(String fn) { + mergeFnSymbol_ = fn; + } + + public String getFinalizeFnSymbol() { + return finalizeFnSymbol_; + } + + public void setFinalizeFnSymbol(String fn) { + finalizeFnSymbol_ = fn; + } + + public Type getIntermediateType() { + return intermediateType_; + } + + public void setIntermediateType(Type t) { + intermediateType_ = t; + } + + @Override + public TFunction toThrift() { + TFunction fn = super.toThrift(); + TAggregateFunction uda = new TAggregateFunction(); + uda.setUpdate_fn_symbol(updateFnSymbol_); + uda.setInit_fn_symbol(initFnSymbol_); + if (serializeFnSymbol_ == null) { + uda.setSerialize_fn_symbol(serializeFnSymbol_); + } + uda.setMerge_fn_symbol(mergeFnSymbol_); + uda.setFinalize_fn_symbol(finalizeFnSymbol_); + uda.setIntermediate_type(intermediateType_.toThrift()); + fn.setAggregate_fn(uda); + return fn; + } +} diff --git a/fe/src/com/baidu/palo/catalog/Udf.java b/fe/src/com/baidu/palo/catalog/Udf.java index c907b5ba87..4b8dad8c2b 100755 --- a/fe/src/com/baidu/palo/catalog/Udf.java +++ b/fe/src/com/baidu/palo/catalog/Udf.java @@ -18,51 +18,51 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.catalog; - -import com.baidu.palo.analysis.FunctionArgs; -import com.baidu.palo.analysis.FunctionName; -import com.baidu.palo.analysis.HdfsURI; -import com.baidu.palo.thrift.TFunction; -import com.baidu.palo.thrift.TScalarFunction; - -import java.util.List; - - -/** - * Internal representation of a UDF. - * TODO: unify this with builtins. - */ - -public class Udf extends Function { - // The name inside the binary at location_ that contains this particular - // UDF. e.g. org.example.MyUdf.class. - private String symbolName_; - - public Udf(long id, FunctionName fnName, FunctionArgs args, Type retType) { - super(fnName, args.argTypes, retType, args.hasVarArgs); - } - - public Udf(long id, FunctionName fnName, List argTypes, Type retType, - HdfsURI location, String symbolName) { - super(fnName, argTypes, retType, false); - setLocation(location); - setSymbolName(symbolName); - } - - public String getSymbolName() { - return symbolName_; - } - - public void setSymbolName(String s) { - symbolName_ = s; - } - - @Override - public TFunction toThrift() { - TFunction fn = super.toThrift(); - fn.setScalar_fn(new TScalarFunction()); - fn.getScalar_fn().setSymbol(symbolName_); - return fn; - } -} +package com.baidu.palo.catalog; + +import com.baidu.palo.analysis.FunctionArgs; +import com.baidu.palo.analysis.FunctionName; +import com.baidu.palo.analysis.HdfsURI; +import com.baidu.palo.thrift.TFunction; +import com.baidu.palo.thrift.TScalarFunction; + +import java.util.List; + + +/** + * Internal representation of a UDF. + * TODO: unify this with builtins. + */ + +public class Udf extends Function { + // The name inside the binary at location_ that contains this particular + // UDF. e.g. org.example.MyUdf.class. + private String symbolName_; + + public Udf(long id, FunctionName fnName, FunctionArgs args, Type retType) { + super(fnName, args.argTypes, retType, args.hasVarArgs); + } + + public Udf(long id, FunctionName fnName, List argTypes, Type retType, + HdfsURI location, String symbolName) { + super(fnName, argTypes, retType, false); + setLocation(location); + setSymbolName(symbolName); + } + + public String getSymbolName() { + return symbolName_; + } + + public void setSymbolName(String s) { + symbolName_ = s; + } + + @Override + public TFunction toThrift() { + TFunction fn = super.toThrift(); + fn.setScalar_fn(new TScalarFunction()); + fn.getScalar_fn().setSymbol(symbolName_); + return fn; + } +} diff --git a/fe/src/com/baidu/palo/common/AliasGenerator.java b/fe/src/com/baidu/palo/common/AliasGenerator.java index 019f9433ea..1dd6ea9c4d 100644 --- a/fe/src/com/baidu/palo/common/AliasGenerator.java +++ b/fe/src/com/baidu/palo/common/AliasGenerator.java @@ -18,38 +18,38 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common; - -import java.util.Set; - -import com.google.common.base.Preconditions; -import com.google.common.collect.Sets; - -/** - * Abstract class representing an alias generator. It uses a prefix and a - * monotonically increasing counter to generate new aliases. Classes extending - * this class are responsible for initializing the prefix. - */ -public abstract class AliasGenerator { - private int numGeneratedAliases = 1; - protected String aliasPrefix = null; - protected Set usedAliases = Sets.newHashSet(); - - /** - * Return the next available alias. - */ - public String getNextAlias() { - Preconditions.checkNotNull(aliasPrefix); - while (true) { - String candidateAlias = aliasPrefix + Integer.toString(numGeneratedAliases++); - if (usedAliases.add(candidateAlias)) { - // add success - return candidateAlias; - } - if (numGeneratedAliases < 0) { - throw new IllegalStateException("Overflow occured during alias generation."); - } - } - } -} - +package com.baidu.palo.common; + +import java.util.Set; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Sets; + +/** + * Abstract class representing an alias generator. It uses a prefix and a + * monotonically increasing counter to generate new aliases. Classes extending + * this class are responsible for initializing the prefix. + */ +public abstract class AliasGenerator { + private int numGeneratedAliases = 1; + protected String aliasPrefix = null; + protected Set usedAliases = Sets.newHashSet(); + + /** + * Return the next available alias. + */ + public String getNextAlias() { + Preconditions.checkNotNull(aliasPrefix); + while (true) { + String candidateAlias = aliasPrefix + Integer.toString(numGeneratedAliases++); + if (usedAliases.add(candidateAlias)) { + // add success + return candidateAlias; + } + if (numGeneratedAliases < 0) { + throw new IllegalStateException("Overflow occured during alias generation."); + } + } + } +} + diff --git a/fe/src/com/baidu/palo/common/AuthorizationException.java b/fe/src/com/baidu/palo/common/AuthorizationException.java index 1fec44cf5c..89e551f698 100755 --- a/fe/src/com/baidu/palo/common/AuthorizationException.java +++ b/fe/src/com/baidu/palo/common/AuthorizationException.java @@ -18,17 +18,17 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common; - -/** - * Thrown for authorization errors encountered when accessing Catalog objects. - */ -public class AuthorizationException extends Exception { - public AuthorizationException(String msg, Throwable cause) { - super(msg, cause); - } - - public AuthorizationException(String msg) { - super(msg); - } -} +package com.baidu.palo.common; + +/** + * Thrown for authorization errors encountered when accessing Catalog objects. + */ +public class AuthorizationException extends Exception { + public AuthorizationException(String msg, Throwable cause) { + super(msg, cause); + } + + public AuthorizationException(String msg) { + super(msg); + } +} diff --git a/fe/src/com/baidu/palo/common/ColumnAliasGenerator.java b/fe/src/com/baidu/palo/common/ColumnAliasGenerator.java index 16410b38be..1e88b0cb6a 100644 --- a/fe/src/com/baidu/palo/common/ColumnAliasGenerator.java +++ b/fe/src/com/baidu/palo/common/ColumnAliasGenerator.java @@ -18,19 +18,19 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common; - -import java.util.List; - -import com.google.common.base.Preconditions; - -public class ColumnAliasGenerator extends AliasGenerator { - private static final String DEFAULT_COL_ALIAS_PREFIX = "$c$"; - - public ColumnAliasGenerator(List existingLabels, String prefix) { - Preconditions.checkNotNull(existingLabels); - aliasPrefix = prefix != null ? prefix : DEFAULT_COL_ALIAS_PREFIX; - usedAliases.addAll(existingLabels); - } -} - +package com.baidu.palo.common; + +import java.util.List; + +import com.google.common.base.Preconditions; + +public class ColumnAliasGenerator extends AliasGenerator { + private static final String DEFAULT_COL_ALIAS_PREFIX = "$c$"; + + public ColumnAliasGenerator(List existingLabels, String prefix) { + Preconditions.checkNotNull(existingLabels); + aliasPrefix = prefix != null ? prefix : DEFAULT_COL_ALIAS_PREFIX; + usedAliases.addAll(existingLabels); + } +} + diff --git a/fe/src/com/baidu/palo/common/Config.java b/fe/src/com/baidu/palo/common/Config.java index 3d27d3806e..6567eb0e02 100644 --- a/fe/src/com/baidu/palo/common/Config.java +++ b/fe/src/com/baidu/palo/common/Config.java @@ -570,4 +570,24 @@ public class Config extends ConfigBase { * You may reduce this number to void Avalanche disaster. */ @ConfField public static int max_query_retry_time = 3; + + /* + * The tryLock timeout configuration of catalog lock. + * Normally it does not need to change, unless you need to test something. + */ + @ConfField public static long catalog_try_lock_timeout_ms = 5000; // 5 sec + + /* + * if this is set to true + * all pending load job will failed when call begin txn api + * all prepare load job will failed when call commit txn api + * all committed load job will waiting to be published + */ + @ConfField public static boolean disable_load_job = false; + + /* + * Load using hadoop cluster will be deprecated in future. + * Set to true to disable this kind of load. + */ + @ConfField public static boolean disable_hadoop_load = false; } diff --git a/fe/src/com/baidu/palo/common/ConfigWatcher.java b/fe/src/com/baidu/palo/common/ConfigWatcher.java index 48d2c238ef..d6dd4c79cf 100644 --- a/fe/src/com/baidu/palo/common/ConfigWatcher.java +++ b/fe/src/com/baidu/palo/common/ConfigWatcher.java @@ -18,101 +18,101 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common; - -import com.baidu.palo.common.util.Daemon; - -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; - -import java.nio.file.FileSystems; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.StandardWatchEventKinds; -import java.nio.file.WatchEvent; -import java.nio.file.WatchKey; -import java.nio.file.WatchService; -import java.nio.file.WatchEvent.Kind; - -/* - * used for watch config changed - */ -public class ConfigWatcher extends Daemon { - private static final Logger LOG = LogManager.getLogger(ConfigWatcher.class); - - public final Path configPath; - - public ConfigWatcher(String configPathStr) { - super("config watcher"); - Preconditions.checkState(!Strings.isNullOrEmpty(configPathStr)); - configPath = Paths.get(configPathStr); - } - - @Override - protected void runOneCycle() { - LOG.debug("start config watcher loop"); - try { - WatchService watchService = FileSystems.getDefault().newWatchService(); - configPath.register(watchService, StandardWatchEventKinds.ENTRY_CREATE, - StandardWatchEventKinds.ENTRY_MODIFY, - StandardWatchEventKinds.ENTRY_DELETE); - // start an infinite loop - while (true) { - // retrieve and remove the next watch key - final WatchKey key = watchService.take(); - // get list of pending events for the watch key - for (WatchEvent watchEvent : key.pollEvents()) { - // get the kind of event (create, modify, delete) - final Kind kind = watchEvent.kind(); - if (kind == StandardWatchEventKinds.OVERFLOW) { - continue; - } - - final WatchEvent watchEventPath = (WatchEvent) watchEvent; - final Path filePath = watchEventPath.context(); - LOG.info("config watcher [" + kind + " -> " + filePath + "]"); - - if (kind == StandardWatchEventKinds.ENTRY_CREATE) { - handleCreate(filePath); - } else if (kind == StandardWatchEventKinds.ENTRY_MODIFY) { - handleModify(filePath); - } else if (kind == StandardWatchEventKinds.ENTRY_DELETE) { - handleDelete(filePath); - } - } - - // reset the key - boolean valid = key.reset(); - // exit loop if the key is not valid - if (!valid) { - LOG.warn("config watch key is not valid"); - break; - } - } // end while - } catch (Exception e) { - LOG.warn("config watcher got exception", e); - } - } - - private void handleCreate(Path filePath) { - // TODO(cmy): implement if needed - } - - private void handleDelete(Path filePath) { - // TODO(cmy): implement if needed - } - - private void handleModify(Path filePath) { - // TODO(cmy): implement if needed - } - - // for test - public static void main(String[] args) throws InterruptedException { - ConfigWatcher wathcer = new ConfigWatcher("./"); - wathcer.start(); - Thread.sleep(500000); - } -} +package com.baidu.palo.common; + +import com.baidu.palo.common.util.Daemon; + +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + +import java.nio.file.FileSystems; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardWatchEventKinds; +import java.nio.file.WatchEvent; +import java.nio.file.WatchKey; +import java.nio.file.WatchService; +import java.nio.file.WatchEvent.Kind; + +/* + * used for watch config changed + */ +public class ConfigWatcher extends Daemon { + private static final Logger LOG = LogManager.getLogger(ConfigWatcher.class); + + public final Path configPath; + + public ConfigWatcher(String configPathStr) { + super("config watcher"); + Preconditions.checkState(!Strings.isNullOrEmpty(configPathStr)); + configPath = Paths.get(configPathStr); + } + + @Override + protected void runOneCycle() { + LOG.debug("start config watcher loop"); + try { + WatchService watchService = FileSystems.getDefault().newWatchService(); + configPath.register(watchService, StandardWatchEventKinds.ENTRY_CREATE, + StandardWatchEventKinds.ENTRY_MODIFY, + StandardWatchEventKinds.ENTRY_DELETE); + // start an infinite loop + while (true) { + // retrieve and remove the next watch key + final WatchKey key = watchService.take(); + // get list of pending events for the watch key + for (WatchEvent watchEvent : key.pollEvents()) { + // get the kind of event (create, modify, delete) + final Kind kind = watchEvent.kind(); + if (kind == StandardWatchEventKinds.OVERFLOW) { + continue; + } + + final WatchEvent watchEventPath = (WatchEvent) watchEvent; + final Path filePath = watchEventPath.context(); + LOG.info("config watcher [" + kind + " -> " + filePath + "]"); + + if (kind == StandardWatchEventKinds.ENTRY_CREATE) { + handleCreate(filePath); + } else if (kind == StandardWatchEventKinds.ENTRY_MODIFY) { + handleModify(filePath); + } else if (kind == StandardWatchEventKinds.ENTRY_DELETE) { + handleDelete(filePath); + } + } + + // reset the key + boolean valid = key.reset(); + // exit loop if the key is not valid + if (!valid) { + LOG.warn("config watch key is not valid"); + break; + } + } // end while + } catch (Exception e) { + LOG.warn("config watcher got exception", e); + } + } + + private void handleCreate(Path filePath) { + // TODO(cmy): implement if needed + } + + private void handleDelete(Path filePath) { + // TODO(cmy): implement if needed + } + + private void handleModify(Path filePath) { + // TODO(cmy): implement if needed + } + + // for test + public static void main(String[] args) throws InterruptedException { + ConfigWatcher wathcer = new ConfigWatcher("./"); + wathcer.start(); + Thread.sleep(500000); + } +} diff --git a/fe/src/com/baidu/palo/common/FeNameFormat.java b/fe/src/com/baidu/palo/common/FeNameFormat.java index d446e90bd7..205242722e 100644 --- a/fe/src/com/baidu/palo/common/FeNameFormat.java +++ b/fe/src/com/baidu/palo/common/FeNameFormat.java @@ -80,21 +80,21 @@ public class FeNameFormat { } } - public static void checkRoleName(String role, boolean canBeSuperuser) throws AnalysisException { + public static void checkRoleName(String role, boolean canBeAdmin, String errMsg) throws AnalysisException { if (Strings.isNullOrEmpty(role) || !role.matches(COMMON_NAME_REGEX)) { throw new AnalysisException("invalid role format: " + role); } boolean res = false; if (CaseSensibility.ROLE.getCaseSensibility()) { - res = role.equals(PaloRole.OPERATOR_ROLE) || (!canBeSuperuser && role.equals(PaloRole.ADMIN_ROLE)); + res = role.equals(PaloRole.OPERATOR_ROLE) || (!canBeAdmin && role.equals(PaloRole.ADMIN_ROLE)); } else { res = role.equalsIgnoreCase(PaloRole.OPERATOR_ROLE) - || (!canBeSuperuser && role.equalsIgnoreCase(PaloRole.ADMIN_ROLE)); + || (!canBeAdmin && role.equalsIgnoreCase(PaloRole.ADMIN_ROLE)); } if (res) { - throw new AnalysisException("Can not create role with name: " + role); + throw new AnalysisException(errMsg + ": " + role); } } diff --git a/fe/src/com/baidu/palo/common/GenericPool.java b/fe/src/com/baidu/palo/common/GenericPool.java index dd369a7f2f..d88ebcf00f 100644 --- a/fe/src/com/baidu/palo/common/GenericPool.java +++ b/fe/src/com/baidu/palo/common/GenericPool.java @@ -18,136 +18,136 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common; - -import java.lang.reflect.Constructor; - -import com.baidu.palo.thrift.TNetworkAddress; - -import org.apache.commons.pool2.BaseKeyedPooledObjectFactory; -import org.apache.commons.pool2.PooledObject; -import org.apache.commons.pool2.impl.DefaultPooledObject; -import org.apache.commons.pool2.impl.GenericKeyedObjectPool; -import org.apache.commons.pool2.impl.GenericKeyedObjectPoolConfig; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TProtocol; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TTransport; -import org.apache.thrift.transport.TTransportException; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; - -public class GenericPool { - private static final Logger LOG = LogManager.getLogger(GenericPool.class); - private GenericKeyedObjectPool pool; - private String className; - private int timeoutMs; - - public GenericPool(String className, GenericKeyedObjectPoolConfig config, int timeoutMs) { - this.className = "com.baidu.palo.thrift." + className + "$Client"; - ThriftClientFactory factory = new ThriftClientFactory(); - pool = new GenericKeyedObjectPool(factory, config); - this.timeoutMs = timeoutMs; - } - - public boolean reopen(VALUE object, int timeoutMs) { - boolean ok = true; - object.getOutputProtocol().getTransport().close(); - try { - object.getOutputProtocol().getTransport().open(); - // transport.open() doesn't set timeout, Maybe the timeoutMs change. - TSocket socket = (TSocket) object.getOutputProtocol().getTransport(); - socket.setTimeout(timeoutMs); - } catch (TTransportException e) { - ok = false; - } - return ok; - } - - public boolean reopen(VALUE object) { - boolean ok = true; - object.getOutputProtocol().getTransport().close(); - try { - object.getOutputProtocol().getTransport().open(); - } catch (TTransportException e) { - LOG.warn("reopen error", e); - ok = false; - } - return ok; - } - - public boolean peak(VALUE object) { - return object.getOutputProtocol().getTransport().peek(); - } - - public VALUE borrowObject(TNetworkAddress address) throws Exception { - return pool.borrowObject(address); - } - - public VALUE borrowObject(TNetworkAddress address, int timeoutMs) throws Exception { - VALUE value = pool.borrowObject(address); - TSocket socket = (TSocket) (value.getOutputProtocol().getTransport()); - socket.setTimeout(timeoutMs); - return value; - } - - public void returnObject(TNetworkAddress address, VALUE object) { - if (address == null || object == null) { - return; - } - pool.returnObject(address, object); - } - - public void invalidateObject(TNetworkAddress address, VALUE object) { - if (address == null || object == null) { - return; - } - try { - pool.invalidateObject(address, object); - } catch (Exception e) { - e.printStackTrace(); - } - } - - private class ThriftClientFactory extends BaseKeyedPooledObjectFactory { - - private Object newInstance(String className, TProtocol protocol) throws Exception { - Class newoneClass = Class.forName(className); - Constructor cons = newoneClass.getConstructor(TProtocol.class); - return cons.newInstance(protocol); - } - - @Override - public VALUE create(TNetworkAddress key) throws Exception { - if (LOG.isDebugEnabled()) { - LOG.debug("before create socket hostname={} key.port={} timeoutMs={}", - key.hostname, key.port, timeoutMs); - } - TTransport transport = new TSocket(key.hostname, key.port, timeoutMs); - transport.open(); - TProtocol protocol = new TBinaryProtocol(transport); - VALUE client = (VALUE) newInstance(className, protocol); - return client; - } - - @Override - public PooledObject wrap(VALUE client) { - return new DefaultPooledObject(client); - } - - @Override - public boolean validateObject(TNetworkAddress key, PooledObject p) { - boolean isOpen = p.getObject().getOutputProtocol().getTransport().isOpen(); - LOG.debug("isOpen={}", isOpen); - return isOpen; - } - - @Override - public void destroyObject(TNetworkAddress key, PooledObject p) { - // InputProtocol and OutputProtocol have the same reference in OurCondition - if (p.getObject().getOutputProtocol().getTransport().isOpen()) { - p.getObject().getOutputProtocol().getTransport().close(); - } - } - } -} +package com.baidu.palo.common; + +import java.lang.reflect.Constructor; + +import com.baidu.palo.thrift.TNetworkAddress; + +import org.apache.commons.pool2.BaseKeyedPooledObjectFactory; +import org.apache.commons.pool2.PooledObject; +import org.apache.commons.pool2.impl.DefaultPooledObject; +import org.apache.commons.pool2.impl.GenericKeyedObjectPool; +import org.apache.commons.pool2.impl.GenericKeyedObjectPoolConfig; +import org.apache.thrift.protocol.TBinaryProtocol; +import org.apache.thrift.protocol.TProtocol; +import org.apache.thrift.transport.TSocket; +import org.apache.thrift.transport.TTransport; +import org.apache.thrift.transport.TTransportException; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + +public class GenericPool { + private static final Logger LOG = LogManager.getLogger(GenericPool.class); + private GenericKeyedObjectPool pool; + private String className; + private int timeoutMs; + + public GenericPool(String className, GenericKeyedObjectPoolConfig config, int timeoutMs) { + this.className = "com.baidu.palo.thrift." + className + "$Client"; + ThriftClientFactory factory = new ThriftClientFactory(); + pool = new GenericKeyedObjectPool(factory, config); + this.timeoutMs = timeoutMs; + } + + public boolean reopen(VALUE object, int timeoutMs) { + boolean ok = true; + object.getOutputProtocol().getTransport().close(); + try { + object.getOutputProtocol().getTransport().open(); + // transport.open() doesn't set timeout, Maybe the timeoutMs change. + TSocket socket = (TSocket) object.getOutputProtocol().getTransport(); + socket.setTimeout(timeoutMs); + } catch (TTransportException e) { + ok = false; + } + return ok; + } + + public boolean reopen(VALUE object) { + boolean ok = true; + object.getOutputProtocol().getTransport().close(); + try { + object.getOutputProtocol().getTransport().open(); + } catch (TTransportException e) { + LOG.warn("reopen error", e); + ok = false; + } + return ok; + } + + public boolean peak(VALUE object) { + return object.getOutputProtocol().getTransport().peek(); + } + + public VALUE borrowObject(TNetworkAddress address) throws Exception { + return pool.borrowObject(address); + } + + public VALUE borrowObject(TNetworkAddress address, int timeoutMs) throws Exception { + VALUE value = pool.borrowObject(address); + TSocket socket = (TSocket) (value.getOutputProtocol().getTransport()); + socket.setTimeout(timeoutMs); + return value; + } + + public void returnObject(TNetworkAddress address, VALUE object) { + if (address == null || object == null) { + return; + } + pool.returnObject(address, object); + } + + public void invalidateObject(TNetworkAddress address, VALUE object) { + if (address == null || object == null) { + return; + } + try { + pool.invalidateObject(address, object); + } catch (Exception e) { + e.printStackTrace(); + } + } + + private class ThriftClientFactory extends BaseKeyedPooledObjectFactory { + + private Object newInstance(String className, TProtocol protocol) throws Exception { + Class newoneClass = Class.forName(className); + Constructor cons = newoneClass.getConstructor(TProtocol.class); + return cons.newInstance(protocol); + } + + @Override + public VALUE create(TNetworkAddress key) throws Exception { + if (LOG.isDebugEnabled()) { + LOG.debug("before create socket hostname={} key.port={} timeoutMs={}", + key.hostname, key.port, timeoutMs); + } + TTransport transport = new TSocket(key.hostname, key.port, timeoutMs); + transport.open(); + TProtocol protocol = new TBinaryProtocol(transport); + VALUE client = (VALUE) newInstance(className, protocol); + return client; + } + + @Override + public PooledObject wrap(VALUE client) { + return new DefaultPooledObject(client); + } + + @Override + public boolean validateObject(TNetworkAddress key, PooledObject p) { + boolean isOpen = p.getObject().getOutputProtocol().getTransport().isOpen(); + LOG.debug("isOpen={}", isOpen); + return isOpen; + } + + @Override + public void destroyObject(TNetworkAddress key, PooledObject p) { + // InputProtocol and OutputProtocol have the same reference in OurCondition + if (p.getObject().getOutputProtocol().getTransport().isOpen()) { + p.getObject().getOutputProtocol().getTransport().close(); + } + } + } +} diff --git a/fe/src/com/baidu/palo/common/Status.java b/fe/src/com/baidu/palo/common/Status.java index e07faf800a..7fd9ecc4de 100644 --- a/fe/src/com/baidu/palo/common/Status.java +++ b/fe/src/com/baidu/palo/common/Status.java @@ -26,7 +26,7 @@ import com.baidu.palo.thrift.TStatusCode; public class Status { public static final Status OK = new Status(); - public static final Status CANCELLED = new Status(TStatusCode.CANCELLED, "Canelled"); + public static final Status CANCELLED = new Status(TStatusCode.CANCELLED, "Cancelled"); public static final Status THRIFT_RPC_ERROR = new Status(TStatusCode.THRIFT_RPC_ERROR, "Thrift RPC failed"); diff --git a/fe/src/com/baidu/palo/common/TableAliasGenerator.java b/fe/src/com/baidu/palo/common/TableAliasGenerator.java index 522ec7e29e..473a993888 100644 --- a/fe/src/com/baidu/palo/common/TableAliasGenerator.java +++ b/fe/src/com/baidu/palo/common/TableAliasGenerator.java @@ -18,25 +18,25 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common; - -import com.baidu.palo.analysis.Analyzer; - -import com.google.common.base.Preconditions; - -public class TableAliasGenerator extends AliasGenerator { - private static final String DEFAULT_TBL_ALIAS_PREFIX = "$a$"; - - public TableAliasGenerator(Analyzer analyzer, String prefix) { - Preconditions.checkNotNull(analyzer); - aliasPrefix = prefix != null ? prefix : DEFAULT_TBL_ALIAS_PREFIX; - usedAliases.addAll(analyzer.getAliases()); - // Analyzer currentAnalyzer = analyzer; - // do { - // usedAliases.addAll(currentAnalyzer.getAliases()); - // usedAliases.addAll(currentAnalyzer.getLocalViews().keySet()); - // currentAnalyzer = currentAnalyzer.getParentAnalyzer(); - // } while (currentAnalyzer != null); - } -} - +package com.baidu.palo.common; + +import com.baidu.palo.analysis.Analyzer; + +import com.google.common.base.Preconditions; + +public class TableAliasGenerator extends AliasGenerator { + private static final String DEFAULT_TBL_ALIAS_PREFIX = "$a$"; + + public TableAliasGenerator(Analyzer analyzer, String prefix) { + Preconditions.checkNotNull(analyzer); + aliasPrefix = prefix != null ? prefix : DEFAULT_TBL_ALIAS_PREFIX; + usedAliases.addAll(analyzer.getAliases()); + // Analyzer currentAnalyzer = analyzer; + // do { + // usedAliases.addAll(currentAnalyzer.getAliases()); + // usedAliases.addAll(currentAnalyzer.getLocalViews().keySet()); + // currentAnalyzer = currentAnalyzer.getParentAnalyzer(); + // } while (currentAnalyzer != null); + } +} + diff --git a/fe/src/com/baidu/palo/common/proc/BackendProcNode.java b/fe/src/com/baidu/palo/common/proc/BackendProcNode.java index 3a4dd6c0c5..917c8d7250 100644 --- a/fe/src/com/baidu/palo/common/proc/BackendProcNode.java +++ b/fe/src/com/baidu/palo/common/proc/BackendProcNode.java @@ -18,71 +18,71 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.proc; - -import com.baidu.palo.common.AnalysisException; -import com.baidu.palo.common.Pair; -import com.baidu.palo.common.util.DebugUtil; -import com.baidu.palo.system.Backend; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; - -public class BackendProcNode implements ProcNodeInterface { - public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() - .add("RootPath").add("TotalCapacity").add("DataUsedCapacity").add("DiskAvailableCapacity").add("State") - .build(); - - private Backend backend; - - public BackendProcNode(Backend backend) { - this.backend = backend; - } - - @Override - public ProcResult fetchResult() throws AnalysisException { - Preconditions.checkNotNull(backend); - - BaseProcResult result = new BaseProcResult(); - - result.setNames(TITLE_NAMES); - - for (String infoString : backend.getDiskInfosAsString()) { - String[] infos = infoString.split("\\|"); - Preconditions.checkState(infos.length == 5); - - Pair totalUnitPair = DebugUtil.getByteUint(Long.valueOf(infos[1])); - Pair dataUsedUnitPair = DebugUtil.getByteUint(Long.valueOf(infos[2])); - Pair diskAvailableUnitPair = DebugUtil.getByteUint(Long.valueOf(infos[3])); - - String readableTotalCapacity = DebugUtil.DECIMAL_FORMAT_SCALE_3.format(totalUnitPair.first) + " " - + totalUnitPair.second; - String readableDataUsedCapacity = DebugUtil.DECIMAL_FORMAT_SCALE_3.format(dataUsedUnitPair.first) + " " - + dataUsedUnitPair.second; - String readableDiskAvailableCapacity = DebugUtil.DECIMAL_FORMAT_SCALE_3.format( - diskAvailableUnitPair.first) + " " + diskAvailableUnitPair.second; - - result.addRow(Lists.newArrayList(infos[0], readableTotalCapacity, readableDataUsedCapacity, - readableDiskAvailableCapacity, infos[4])); - } - - long totalCapacityB = backend.getTotalCapacityB(); - Pair unitPair = DebugUtil.getByteUint(totalCapacityB); - String readableTotalCapacity = DebugUtil.DECIMAL_FORMAT_SCALE_3.format(unitPair.first) + " " + unitPair.second; - - long dataUsedCapacityB = backend.getDataUsedCapacityB(); - unitPair = DebugUtil.getByteUint(dataUsedCapacityB); - String readableDataUsedCapacity = DebugUtil.DECIMAL_FORMAT_SCALE_3.format(unitPair.first) + " " - + unitPair.second; +package com.baidu.palo.common.proc; - long diskAvailableCapacityB = backend.getAvailableCapacityB(); - unitPair = DebugUtil.getByteUint(diskAvailableCapacityB); - String readableDiskAvailableCapacity = DebugUtil.DECIMAL_FORMAT_SCALE_3.format(unitPair.first) + " " - + unitPair.second; +import com.baidu.palo.common.AnalysisException; +import com.baidu.palo.common.Pair; +import com.baidu.palo.common.util.DebugUtil; +import com.baidu.palo.system.Backend; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + +public class BackendProcNode implements ProcNodeInterface { + public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() + .add("RootPath").add("TotalCapacity").add("DataUsedCapacity").add("DiskAvailableCapacity").add("State") + .build(); + + private Backend backend; + + public BackendProcNode(Backend backend) { + this.backend = backend; + } + + @Override + public ProcResult fetchResult() throws AnalysisException { + Preconditions.checkNotNull(backend); + + BaseProcResult result = new BaseProcResult(); + + result.setNames(TITLE_NAMES); + + for (String infoString : backend.getDiskInfosAsString()) { + String[] infos = infoString.split("\\|"); + Preconditions.checkState(infos.length == 5); + + Pair totalUnitPair = DebugUtil.getByteUint(Long.valueOf(infos[1])); + Pair dataUsedUnitPair = DebugUtil.getByteUint(Long.valueOf(infos[2])); + Pair diskAvailableUnitPair = DebugUtil.getByteUint(Long.valueOf(infos[3])); + + String readableTotalCapacity = DebugUtil.DECIMAL_FORMAT_SCALE_3.format(totalUnitPair.first) + " " + + totalUnitPair.second; + String readableDataUsedCapacity = DebugUtil.DECIMAL_FORMAT_SCALE_3.format(dataUsedUnitPair.first) + " " + + dataUsedUnitPair.second; + String readableDiskAvailableCapacity = DebugUtil.DECIMAL_FORMAT_SCALE_3.format( + diskAvailableUnitPair.first) + " " + diskAvailableUnitPair.second; + + result.addRow(Lists.newArrayList(infos[0], readableTotalCapacity, readableDataUsedCapacity, + readableDiskAvailableCapacity, infos[4])); + } + + long totalCapacityB = backend.getTotalCapacityB(); + Pair unitPair = DebugUtil.getByteUint(totalCapacityB); + String readableTotalCapacity = DebugUtil.DECIMAL_FORMAT_SCALE_3.format(unitPair.first) + " " + unitPair.second; + + long dataUsedCapacityB = backend.getDataUsedCapacityB(); + unitPair = DebugUtil.getByteUint(dataUsedCapacityB); + String readableDataUsedCapacity = DebugUtil.DECIMAL_FORMAT_SCALE_3.format(unitPair.first) + " " + + unitPair.second; + + long diskAvailableCapacityB = backend.getAvailableCapacityB(); + unitPair = DebugUtil.getByteUint(diskAvailableCapacityB); + String readableDiskAvailableCapacity = DebugUtil.DECIMAL_FORMAT_SCALE_3.format(unitPair.first) + " " + + unitPair.second; result.addRow(Lists.newArrayList("Total", readableTotalCapacity, readableDataUsedCapacity, - readableDiskAvailableCapacity, "")); - - return result; - } - -} + readableDiskAvailableCapacity, "")); + + return result; + } + +} diff --git a/fe/src/com/baidu/palo/common/proc/CloneProcNode.java b/fe/src/com/baidu/palo/common/proc/CloneProcNode.java index a32eb49aef..765b7880ef 100644 --- a/fe/src/com/baidu/palo/common/proc/CloneProcNode.java +++ b/fe/src/com/baidu/palo/common/proc/CloneProcNode.java @@ -18,51 +18,51 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.proc; - -import com.baidu.palo.catalog.Database; -import com.baidu.palo.clone.Clone; -import com.baidu.palo.common.AnalysisException; - -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; - -import java.util.ArrayList; -import java.util.List; - -public class CloneProcNode implements ProcNodeInterface { - public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() - .add("DbId").add("TableId").add("PartitionId").add("IndexId") - .add("TabletId").add("BackendId").add("State").add("Type") - .add("Priority").add("CreateTime").add("StartTime").add("FinishTime") - .add("Timeout(s)").add("FailMsg") - .build(); - - private Clone clone; - private Database db; - - public CloneProcNode(Clone clone, Database db) { - this.clone = clone; - this.db = db; - } - - @Override - public ProcResult fetchResult() throws AnalysisException { - Preconditions.checkNotNull(db); - Preconditions.checkNotNull(clone); - - BaseProcResult result = new BaseProcResult(); - result.setNames(TITLE_NAMES); - - List> cloneJobInfos = clone.getCloneJobInfosByDb(db); - for (List infoStr : cloneJobInfos) { - List oneInfo = new ArrayList(TITLE_NAMES.size()); - for (Comparable element : infoStr) { - oneInfo.add(element.toString()); - } - result.addRow(oneInfo); - } - return result; - } - -} +package com.baidu.palo.common.proc; + +import com.baidu.palo.catalog.Database; +import com.baidu.palo.clone.Clone; +import com.baidu.palo.common.AnalysisException; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; + +import java.util.ArrayList; +import java.util.List; + +public class CloneProcNode implements ProcNodeInterface { + public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() + .add("DbId").add("TableId").add("PartitionId").add("IndexId") + .add("TabletId").add("BackendId").add("State").add("Type") + .add("Priority").add("CreateTime").add("StartTime").add("FinishTime") + .add("Timeout(s)").add("FailMsg") + .build(); + + private Clone clone; + private Database db; + + public CloneProcNode(Clone clone, Database db) { + this.clone = clone; + this.db = db; + } + + @Override + public ProcResult fetchResult() throws AnalysisException { + Preconditions.checkNotNull(db); + Preconditions.checkNotNull(clone); + + BaseProcResult result = new BaseProcResult(); + result.setNames(TITLE_NAMES); + + List> cloneJobInfos = clone.getCloneJobInfosByDb(db); + for (List infoStr : cloneJobInfos) { + List oneInfo = new ArrayList(TITLE_NAMES.size()); + for (Comparable element : infoStr) { + oneInfo.add(element.toString()); + } + result.addRow(oneInfo); + } + return result; + } + +} diff --git a/fe/src/com/baidu/palo/common/proc/CurrentQueryFragmentProcNode.java b/fe/src/com/baidu/palo/common/proc/CurrentQueryFragmentProcNode.java index 6b95f3677c..2ff86caf18 100644 --- a/fe/src/com/baidu/palo/common/proc/CurrentQueryFragmentProcNode.java +++ b/fe/src/com/baidu/palo/common/proc/CurrentQueryFragmentProcNode.java @@ -169,9 +169,9 @@ public class CurrentQueryFragmentProcNode implements ProcNodeInterface { sortedRowDatas.sort(new Comparator>() { @Override public int compare(List l1, List l2) { - final int fragmentId1 = Integer.valueOf(l1.get(0)); - final int fragmentId2 = Integer.valueOf(l2.get(0)); - return fragmentId1 >= fragmentId2 ? 1 : -1; + final Integer fragmentId1 = Integer.valueOf(l1.get(0)); + final Integer fragmentId2 = Integer.valueOf(l2.get(0)); + return fragmentId1.compareTo(fragmentId2); } }); final BaseProcResult result = new BaseProcResult(); diff --git a/fe/src/com/baidu/palo/common/proc/IndexInfoProcDir.java b/fe/src/com/baidu/palo/common/proc/IndexInfoProcDir.java index b454c7e47c..ef58f8f3f9 100644 --- a/fe/src/com/baidu/palo/common/proc/IndexInfoProcDir.java +++ b/fe/src/com/baidu/palo/common/proc/IndexInfoProcDir.java @@ -18,130 +18,130 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.proc; - -import com.baidu.palo.catalog.Column; -import com.baidu.palo.catalog.Database; -import com.baidu.palo.catalog.OlapTable; -import com.baidu.palo.catalog.Table; -import com.baidu.palo.catalog.Table.TableType; -import com.baidu.palo.common.AnalysisException; -import com.baidu.palo.thrift.TStorageType; - -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; - -import java.util.List; -import java.util.Set; - -/* - * SHOW PROC /dbs/dbId/tableId/index_schema - * show indexNames(to schema) - */ -public class IndexInfoProcDir implements ProcDirInterface { - public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() - .add("IndexName").add("SchemaVersion").add("SchemaHash").add("ShortKeyColumnCount") - .add("StorageType").add("Keys") - .build(); - - private Database db; - private Table table; - - public IndexInfoProcDir(Database db, Table table) { - this.db = db; - this.table = table; - } - - @Override - public ProcResult fetchResult() throws AnalysisException { - Preconditions.checkNotNull(db); - Preconditions.checkNotNull(table); - - BaseProcResult result = new BaseProcResult(); - result.setNames(TITLE_NAMES); - db.readLock(); - try { - if (table.getType() == TableType.OLAP) { - OlapTable olapTable = (OlapTable) table; - - // indices order - List indices = Lists.newArrayList(); - indices.add(olapTable.getId()); - for (Long indexId : olapTable.getIndexIdToSchema().keySet()) { - if (indexId != olapTable.getId()) { - indices.add(indexId); - } - } - - for (long indexId : indices) { - int schemaVersion = olapTable.getSchemaVersionByIndexId(indexId); - int schemaHash = olapTable.getSchemaHashByIndexId(indexId); - short shortKeyColumnCount = olapTable.getShortKeyColumnCountByIndexId(indexId); - TStorageType storageType = olapTable.getStorageTypeByIndexId(indexId); - String indexName = olapTable.getIndexNameById(indexId); - - String type = olapTable.getKeysType().name(); - StringBuilder builder = new StringBuilder(); - builder.append(type).append("("); - List columnNames = Lists.newArrayList(); - List columns = olapTable.getSchemaByIndexId(indexId); - for (Column column : columns) { - if (column.isKey()) { - columnNames.add(column.getName()); - } - } - builder.append(Joiner.on(", ").join(columnNames)).append(")"); - - result.addRow(Lists.newArrayList(indexName, - String.valueOf(schemaVersion), - String.valueOf(schemaHash), - String.valueOf(shortKeyColumnCount), - storageType.name(), - builder.toString())); - } - } else { - result.addRow(Lists.newArrayList(table.getName(), "", "", "", "", "")); - } - - return result; - } finally { - db.readUnlock(); - } - } - - @Override - public boolean register(String name, ProcNodeInterface node) { - return false; - } - - @Override - public ProcNodeInterface lookup(String indexName) throws AnalysisException { - Preconditions.checkNotNull(db); - Preconditions.checkNotNull(table); - - db.readLock(); - try { - List schema = null; - Set bfColumns = null; - if (table.getType() == TableType.OLAP) { - OlapTable olapTable = (OlapTable) table; - Long indexId = olapTable.getIndexIdByName(indexName); - if (indexId == null) { - throw new AnalysisException("Index[" + indexName + "] does not exist in table[" - + table.getName() + "]"); - } - schema = olapTable.getSchemaByIndexId(indexId); - - bfColumns = olapTable.getCopiedBfColumns(); - } else { - schema = table.getBaseSchema(); - } - return new IndexSchemaProcNode(schema, bfColumns); - } finally { - db.readUnlock(); - } - } - -} +package com.baidu.palo.common.proc; + +import com.baidu.palo.catalog.Column; +import com.baidu.palo.catalog.Database; +import com.baidu.palo.catalog.OlapTable; +import com.baidu.palo.catalog.Table; +import com.baidu.palo.catalog.Table.TableType; +import com.baidu.palo.common.AnalysisException; +import com.baidu.palo.thrift.TStorageType; + +import com.google.common.base.Joiner; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + +import java.util.List; +import java.util.Set; + +/* + * SHOW PROC /dbs/dbId/tableId/index_schema + * show indexNames(to schema) + */ +public class IndexInfoProcDir implements ProcDirInterface { + public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() + .add("IndexName").add("SchemaVersion").add("SchemaHash").add("ShortKeyColumnCount") + .add("StorageType").add("Keys") + .build(); + + private Database db; + private Table table; + + public IndexInfoProcDir(Database db, Table table) { + this.db = db; + this.table = table; + } + + @Override + public ProcResult fetchResult() throws AnalysisException { + Preconditions.checkNotNull(db); + Preconditions.checkNotNull(table); + + BaseProcResult result = new BaseProcResult(); + result.setNames(TITLE_NAMES); + db.readLock(); + try { + if (table.getType() == TableType.OLAP) { + OlapTable olapTable = (OlapTable) table; + + // indices order + List indices = Lists.newArrayList(); + indices.add(olapTable.getId()); + for (Long indexId : olapTable.getIndexIdToSchema().keySet()) { + if (indexId != olapTable.getId()) { + indices.add(indexId); + } + } + + for (long indexId : indices) { + int schemaVersion = olapTable.getSchemaVersionByIndexId(indexId); + int schemaHash = olapTable.getSchemaHashByIndexId(indexId); + short shortKeyColumnCount = olapTable.getShortKeyColumnCountByIndexId(indexId); + TStorageType storageType = olapTable.getStorageTypeByIndexId(indexId); + String indexName = olapTable.getIndexNameById(indexId); + + String type = olapTable.getKeysType().name(); + StringBuilder builder = new StringBuilder(); + builder.append(type).append("("); + List columnNames = Lists.newArrayList(); + List columns = olapTable.getSchemaByIndexId(indexId); + for (Column column : columns) { + if (column.isKey()) { + columnNames.add(column.getName()); + } + } + builder.append(Joiner.on(", ").join(columnNames)).append(")"); + + result.addRow(Lists.newArrayList(indexName, + String.valueOf(schemaVersion), + String.valueOf(schemaHash), + String.valueOf(shortKeyColumnCount), + storageType.name(), + builder.toString())); + } + } else { + result.addRow(Lists.newArrayList(table.getName(), "", "", "", "", "")); + } + + return result; + } finally { + db.readUnlock(); + } + } + + @Override + public boolean register(String name, ProcNodeInterface node) { + return false; + } + + @Override + public ProcNodeInterface lookup(String indexName) throws AnalysisException { + Preconditions.checkNotNull(db); + Preconditions.checkNotNull(table); + + db.readLock(); + try { + List schema = null; + Set bfColumns = null; + if (table.getType() == TableType.OLAP) { + OlapTable olapTable = (OlapTable) table; + Long indexId = olapTable.getIndexIdByName(indexName); + if (indexId == null) { + throw new AnalysisException("Index[" + indexName + "] does not exist in table[" + + table.getName() + "]"); + } + schema = olapTable.getSchemaByIndexId(indexId); + + bfColumns = olapTable.getCopiedBfColumns(); + } else { + schema = table.getBaseSchema(); + } + return new IndexSchemaProcNode(schema, bfColumns); + } finally { + db.readUnlock(); + } + } + +} diff --git a/fe/src/com/baidu/palo/common/proc/IndexSchemaProcNode.java b/fe/src/com/baidu/palo/common/proc/IndexSchemaProcNode.java index 2914706379..178bc486ac 100644 --- a/fe/src/com/baidu/palo/common/proc/IndexSchemaProcNode.java +++ b/fe/src/com/baidu/palo/common/proc/IndexSchemaProcNode.java @@ -18,67 +18,67 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.proc; - -import com.baidu.palo.catalog.Column; -import com.baidu.palo.common.AnalysisException; - -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; - -import org.apache.commons.lang.StringUtils; - -import java.util.Arrays; -import java.util.List; -import java.util.Set; - -/* - * SHOW PROC /dbs/dbId/tableId/index_schema/"index name" - * show index schema - */ -public class IndexSchemaProcNode implements ProcNodeInterface { - public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() - .add("Field").add("Type").add("Null").add("Key") - .add("Default").add("Extra") - .build(); - - private final List schema; - private final Set bfColumns; - - public IndexSchemaProcNode(List schema, Set bfColumns) { - this.schema = schema; - this.bfColumns = bfColumns; - } - - @Override - public ProcResult fetchResult() throws AnalysisException { - Preconditions.checkNotNull(schema); - - BaseProcResult result = new BaseProcResult(); - result.setNames(TITLE_NAMES); - - for (Column column : schema) { - // Extra string (aggregation and bloom filter) - List extras = Lists.newArrayList(); - if (column.getAggregationType() != null) { - extras.add(column.getAggregationType().name()); - } - if (bfColumns != null && bfColumns.contains(column.getName())) { - extras.add("BLOOM_FILTER"); - } - String extraStr = StringUtils.join(extras, ","); - - List rowList = Arrays.asList(column.getName(), - column.getColumnType().toString(), - column.isAllowNull() ? "Yes" : "No", - ((Boolean) column.isKey()).toString(), - column.getDefaultValue() == null - ? "N/A" : column.getDefaultValue(), - extraStr); - result.addRow(rowList); - } - return result; - } - -} +package com.baidu.palo.common.proc; + +import com.baidu.palo.catalog.Column; +import com.baidu.palo.common.AnalysisException; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + +import org.apache.commons.lang.StringUtils; + +import java.util.Arrays; +import java.util.List; +import java.util.Set; + +/* + * SHOW PROC /dbs/dbId/tableId/index_schema/"index name" + * show index schema + */ +public class IndexSchemaProcNode implements ProcNodeInterface { + public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() + .add("Field").add("Type").add("Null").add("Key") + .add("Default").add("Extra") + .build(); + + private final List schema; + private final Set bfColumns; + + public IndexSchemaProcNode(List schema, Set bfColumns) { + this.schema = schema; + this.bfColumns = bfColumns; + } + + @Override + public ProcResult fetchResult() throws AnalysisException { + Preconditions.checkNotNull(schema); + + BaseProcResult result = new BaseProcResult(); + result.setNames(TITLE_NAMES); + + for (Column column : schema) { + // Extra string (aggregation and bloom filter) + List extras = Lists.newArrayList(); + if (column.getAggregationType() != null) { + extras.add(column.getAggregationType().name()); + } + if (bfColumns != null && bfColumns.contains(column.getName())) { + extras.add("BLOOM_FILTER"); + } + String extraStr = StringUtils.join(extras, ","); + + List rowList = Arrays.asList(column.getName(), + column.getColumnType().toString(), + column.isAllowNull() ? "Yes" : "No", + ((Boolean) column.isKey()).toString(), + column.getDefaultValue() == null + ? "N/A" : column.getDefaultValue(), + extraStr); + result.addRow(rowList); + } + return result; + } + +} diff --git a/fe/src/com/baidu/palo/common/proc/IndicesProcDir.java b/fe/src/com/baidu/palo/common/proc/IndicesProcDir.java index 9a69f7b26d..08e357cd31 100644 --- a/fe/src/com/baidu/palo/common/proc/IndicesProcDir.java +++ b/fe/src/com/baidu/palo/common/proc/IndicesProcDir.java @@ -18,113 +18,113 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.proc; - -import com.baidu.palo.catalog.Database; -import com.baidu.palo.catalog.MaterializedIndex; -import com.baidu.palo.catalog.OlapTable; -import com.baidu.palo.catalog.Partition; -import com.baidu.palo.common.AnalysisException; -import com.baidu.palo.common.util.ListComparator; -import com.baidu.palo.common.util.TimeUtils; - -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; -import com.google.common.collect.ImmutableList; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -/* - * SHOW PROC /dbs/dbId/tableId/partitions/partitionId - * show index's detail info within a partition - */ -public class IndicesProcDir implements ProcDirInterface { - public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() - .add("IndexId").add("IndexName").add("State").add("LastConsistencyCheckTime") - .build(); - - private Database db; - private OlapTable olapTable; - private Partition partition; - - public IndicesProcDir(Database db, OlapTable olapTable, Partition partition) { - this.db = db; - this.olapTable = olapTable; - this.partition = partition; - } - - @Override - public ProcResult fetchResult() throws AnalysisException { - Preconditions.checkNotNull(db); - Preconditions.checkNotNull(partition); - - BaseProcResult result = new BaseProcResult(); - // get info - List> indexInfos = new ArrayList>(); - db.readLock(); - try { - result.setNames(TITLE_NAMES); - for (MaterializedIndex materializedIndex : partition.getMaterializedIndices()) { - List indexInfo = new ArrayList(); - indexInfo.add(materializedIndex.getId()); - indexInfo.add(olapTable.getIndexNameById(materializedIndex.getId())); - indexInfo.add(materializedIndex.getState()); - indexInfo.add(TimeUtils.longToTimeString(materializedIndex.getLastCheckTime())); - - indexInfos.add(indexInfo); - } - - } finally { - db.readUnlock(); - } - - // sort by index id - ListComparator> comparator = new ListComparator>(0); - Collections.sort(indexInfos, comparator); - - // set result - for (List info : indexInfos) { - List row = new ArrayList(info.size()); - for (Comparable comparable : info) { - row.add(comparable.toString()); - } - result.addRow(row); - } - return result; - } - - @Override - public boolean register(String name, ProcNodeInterface node) { - return false; - } - - @Override - public ProcNodeInterface lookup(String indexIdStr) throws AnalysisException { - Preconditions.checkNotNull(db); - Preconditions.checkNotNull(partition); - if (Strings.isNullOrEmpty(indexIdStr)) { - throw new AnalysisException("Index id is null"); - } - - long indexId; - try { - indexId = Long.valueOf(indexIdStr); - } catch (NumberFormatException e) { - throw new AnalysisException("Invalid index id format: " + indexIdStr); - } - - db.readLock(); - try { - MaterializedIndex materializedIndex = partition.getIndex(indexId); - if (materializedIndex == null) { - throw new AnalysisException("Index[" + indexId + "] does not exist."); - } - return new TabletsProcDir(db, materializedIndex); - } finally { - db.readUnlock(); - } - } - -} +package com.baidu.palo.common.proc; + +import com.baidu.palo.catalog.Database; +import com.baidu.palo.catalog.MaterializedIndex; +import com.baidu.palo.catalog.OlapTable; +import com.baidu.palo.catalog.Partition; +import com.baidu.palo.common.AnalysisException; +import com.baidu.palo.common.util.ListComparator; +import com.baidu.palo.common.util.TimeUtils; + +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/* + * SHOW PROC /dbs/dbId/tableId/partitions/partitionId + * show index's detail info within a partition + */ +public class IndicesProcDir implements ProcDirInterface { + public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() + .add("IndexId").add("IndexName").add("State").add("LastConsistencyCheckTime") + .build(); + + private Database db; + private OlapTable olapTable; + private Partition partition; + + public IndicesProcDir(Database db, OlapTable olapTable, Partition partition) { + this.db = db; + this.olapTable = olapTable; + this.partition = partition; + } + + @Override + public ProcResult fetchResult() throws AnalysisException { + Preconditions.checkNotNull(db); + Preconditions.checkNotNull(partition); + + BaseProcResult result = new BaseProcResult(); + // get info + List> indexInfos = new ArrayList>(); + db.readLock(); + try { + result.setNames(TITLE_NAMES); + for (MaterializedIndex materializedIndex : partition.getMaterializedIndices()) { + List indexInfo = new ArrayList(); + indexInfo.add(materializedIndex.getId()); + indexInfo.add(olapTable.getIndexNameById(materializedIndex.getId())); + indexInfo.add(materializedIndex.getState()); + indexInfo.add(TimeUtils.longToTimeString(materializedIndex.getLastCheckTime())); + + indexInfos.add(indexInfo); + } + + } finally { + db.readUnlock(); + } + + // sort by index id + ListComparator> comparator = new ListComparator>(0); + Collections.sort(indexInfos, comparator); + + // set result + for (List info : indexInfos) { + List row = new ArrayList(info.size()); + for (Comparable comparable : info) { + row.add(comparable.toString()); + } + result.addRow(row); + } + return result; + } + + @Override + public boolean register(String name, ProcNodeInterface node) { + return false; + } + + @Override + public ProcNodeInterface lookup(String indexIdStr) throws AnalysisException { + Preconditions.checkNotNull(db); + Preconditions.checkNotNull(partition); + if (Strings.isNullOrEmpty(indexIdStr)) { + throw new AnalysisException("Index id is null"); + } + + long indexId; + try { + indexId = Long.valueOf(indexIdStr); + } catch (NumberFormatException e) { + throw new AnalysisException("Invalid index id format: " + indexIdStr); + } + + db.readLock(); + try { + MaterializedIndex materializedIndex = partition.getIndex(indexId); + if (materializedIndex == null) { + throw new AnalysisException("Index[" + indexId + "] does not exist."); + } + return new TabletsProcDir(db, materializedIndex); + } finally { + db.readUnlock(); + } + } + +} diff --git a/fe/src/com/baidu/palo/common/proc/JobsDbProcDir.java b/fe/src/com/baidu/palo/common/proc/JobsDbProcDir.java index 90968ce08e..c03d358dc9 100644 --- a/fe/src/com/baidu/palo/common/proc/JobsDbProcDir.java +++ b/fe/src/com/baidu/palo/common/proc/JobsDbProcDir.java @@ -18,78 +18,78 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.proc; - -import com.baidu.palo.catalog.Catalog; -import com.baidu.palo.catalog.Database; -import com.baidu.palo.common.AnalysisException; - -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; - -import java.util.List; - -/* - * SHOW PROC '/jobs/' - */ -public class JobsDbProcDir implements ProcDirInterface { - public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() - .add("DbId").add("DbName") - .build(); - - private Catalog catalog; - - public JobsDbProcDir(Catalog catalog) { - this.catalog = catalog; - } - - @Override - public boolean register(String name, ProcNodeInterface node) { - return false; - } - - @Override - public ProcNodeInterface lookup(String dbIdStr) throws AnalysisException { - if (Strings.isNullOrEmpty(dbIdStr)) { - throw new AnalysisException("Db id is null"); - } - - long dbId; - try { - dbId = Long.valueOf(dbIdStr); - } catch (NumberFormatException e) { - throw new AnalysisException("Invalid db id format: " + dbIdStr); - } - - Database db = catalog.getDb(dbId); - if (db == null) { - throw new AnalysisException("Database[" + dbId + "] does not exist."); - } - - return new JobsProcDir(catalog, db); - } - - @Override - public ProcResult fetchResult() throws AnalysisException { - Preconditions.checkNotNull(catalog); - - BaseProcResult result = new BaseProcResult(); - - result.setNames(TITLE_NAMES); - List names = catalog.getDbNames(); - if (names == null || names.isEmpty()) { - // empty - return result; - } - - for (String name : names) { - Database db = catalog.getDb(name); - result.addRow(Lists.newArrayList(String.valueOf(db.getId()), name)); - } - - return result; - } - -} +package com.baidu.palo.common.proc; + +import com.baidu.palo.catalog.Catalog; +import com.baidu.palo.catalog.Database; +import com.baidu.palo.common.AnalysisException; + +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + +import java.util.List; + +/* + * SHOW PROC '/jobs/' + */ +public class JobsDbProcDir implements ProcDirInterface { + public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() + .add("DbId").add("DbName") + .build(); + + private Catalog catalog; + + public JobsDbProcDir(Catalog catalog) { + this.catalog = catalog; + } + + @Override + public boolean register(String name, ProcNodeInterface node) { + return false; + } + + @Override + public ProcNodeInterface lookup(String dbIdStr) throws AnalysisException { + if (Strings.isNullOrEmpty(dbIdStr)) { + throw new AnalysisException("Db id is null"); + } + + long dbId; + try { + dbId = Long.valueOf(dbIdStr); + } catch (NumberFormatException e) { + throw new AnalysisException("Invalid db id format: " + dbIdStr); + } + + Database db = catalog.getDb(dbId); + if (db == null) { + throw new AnalysisException("Database[" + dbId + "] does not exist."); + } + + return new JobsProcDir(catalog, db); + } + + @Override + public ProcResult fetchResult() throws AnalysisException { + Preconditions.checkNotNull(catalog); + + BaseProcResult result = new BaseProcResult(); + + result.setNames(TITLE_NAMES); + List names = catalog.getDbNames(); + if (names == null || names.isEmpty()) { + // empty + return result; + } + + for (String name : names) { + Database db = catalog.getDb(name); + result.addRow(Lists.newArrayList(String.valueOf(db.getId()), name)); + } + + return result; + } + +} diff --git a/fe/src/com/baidu/palo/common/proc/LoadProcDir.java b/fe/src/com/baidu/palo/common/proc/LoadProcDir.java index dca0d330ca..840c1f6984 100644 --- a/fe/src/com/baidu/palo/common/proc/LoadProcDir.java +++ b/fe/src/com/baidu/palo/common/proc/LoadProcDir.java @@ -18,8 +18,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.proc; - +package com.baidu.palo.common.proc; + import com.baidu.palo.catalog.Database; import com.baidu.palo.common.AnalysisException; import com.baidu.palo.load.Load; @@ -30,80 +30,80 @@ import com.google.common.collect.ImmutableList; import java.util.ArrayList; import java.util.Iterator; import java.util.LinkedList; -import java.util.List; - -public class LoadProcDir implements ProcDirInterface { - public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() - .add("JobId").add("Label").add("State").add("Progress") - .add("EtlInfo").add("TaskInfo").add("ErrorMsg").add("CreateTime") - .add("EtlStartTime").add("EtlFinishTime").add("LoadStartTime").add("LoadFinishTime") - .add("URL") - .build(); - - // label and state column index of result - public static final int LABEL_INDEX = 1; - public static final int STATE_INDEX = 2; - - private static final int LIMIT = 2000; - - private Load load; - private Database db; - - public LoadProcDir(Load load, Database db) { - this.load = load; - this.db = db; - } - - @Override - public ProcResult fetchResult() throws AnalysisException { - Preconditions.checkNotNull(db); - Preconditions.checkNotNull(load); - - BaseProcResult result = new BaseProcResult(); - result.setNames(TITLE_NAMES); - +import java.util.List; + +public class LoadProcDir implements ProcDirInterface { + public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() + .add("JobId").add("Label").add("State").add("Progress") + .add("EtlInfo").add("TaskInfo").add("ErrorMsg").add("CreateTime") + .add("EtlStartTime").add("EtlFinishTime").add("LoadStartTime").add("LoadFinishTime") + .add("URL") + .build(); + + // label and state column index of result + public static final int LABEL_INDEX = 1; + public static final int STATE_INDEX = 2; + + private static final int LIMIT = 2000; + + private Load load; + private Database db; + + public LoadProcDir(Load load, Database db) { + this.load = load; + this.db = db; + } + + @Override + public ProcResult fetchResult() throws AnalysisException { + Preconditions.checkNotNull(db); + Preconditions.checkNotNull(load); + + BaseProcResult result = new BaseProcResult(); + result.setNames(TITLE_NAMES); + LinkedList> loadJobInfos = load.getLoadJobInfosByDb(db.getId(), db.getFullName(), - null, false, null, null); - int counter = 0; - Iterator> iterator = loadJobInfos.descendingIterator(); - while (iterator.hasNext()) { - List infoStr = iterator.next(); - List oneInfo = new ArrayList(TITLE_NAMES.size()); - for (Comparable element : infoStr) { - oneInfo.add(element.toString()); - } - result.addRow(oneInfo); - if (++counter >= LIMIT) { - break; - } - } - return result; - } - - @Override - public boolean register(String name, ProcNodeInterface node) { - return false; - } - - @Override - public ProcNodeInterface lookup(String jobIdStr) throws AnalysisException { - long jobId = -1L; - try { - jobId = Long.valueOf(jobIdStr); - } catch (NumberFormatException e) { - throw new AnalysisException("Invalid job id format: " + jobIdStr); - } - - return new LoadJobProcNode(load, jobId); - } - - public static int analyzeColumn(String columnName) throws AnalysisException { - for (String title : TITLE_NAMES) { - if (title.equalsIgnoreCase(columnName)) { - return TITLE_NAMES.indexOf(title); - } - } - - throw new AnalysisException("Title name[" + columnName + "] does not exist"); - } -} + null, false, null, null); + int counter = 0; + Iterator> iterator = loadJobInfos.descendingIterator(); + while (iterator.hasNext()) { + List infoStr = iterator.next(); + List oneInfo = new ArrayList(TITLE_NAMES.size()); + for (Comparable element : infoStr) { + oneInfo.add(element.toString()); + } + result.addRow(oneInfo); + if (++counter >= LIMIT) { + break; + } + } + return result; + } + + @Override + public boolean register(String name, ProcNodeInterface node) { + return false; + } + + @Override + public ProcNodeInterface lookup(String jobIdStr) throws AnalysisException { + long jobId = -1L; + try { + jobId = Long.valueOf(jobIdStr); + } catch (NumberFormatException e) { + throw new AnalysisException("Invalid job id format: " + jobIdStr); + } + + return new LoadJobProcNode(load, jobId); + } + + public static int analyzeColumn(String columnName) throws AnalysisException { + for (String title : TITLE_NAMES) { + if (title.equalsIgnoreCase(columnName)) { + return TITLE_NAMES.indexOf(title); + } + } + + throw new AnalysisException("Title name[" + columnName + "] does not exist"); + } +} diff --git a/fe/src/com/baidu/palo/common/proc/PartitionsProcDir.java b/fe/src/com/baidu/palo/common/proc/PartitionsProcDir.java index 62d1c6dcf7..6c8e3cd412 100644 --- a/fe/src/com/baidu/palo/common/proc/PartitionsProcDir.java +++ b/fe/src/com/baidu/palo/common/proc/PartitionsProcDir.java @@ -18,209 +18,209 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.proc; - -import com.baidu.palo.catalog.Column; -import com.baidu.palo.catalog.DataProperty; -import com.baidu.palo.catalog.Database; -import com.baidu.palo.catalog.DistributionInfo; -import com.baidu.palo.catalog.HashDistributionInfo; -import com.baidu.palo.catalog.OlapTable; -import com.baidu.palo.catalog.PartitionKey; -import com.baidu.palo.catalog.PartitionType; -import com.baidu.palo.catalog.RangePartitionInfo; -import com.baidu.palo.catalog.Partition; -import com.baidu.palo.catalog.DistributionInfo.DistributionInfoType; -import com.baidu.palo.catalog.Table.TableType; -import com.baidu.palo.common.AnalysisException; -import com.baidu.palo.common.util.TimeUtils; - -import com.google.common.base.Joiner; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Range; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -/* - * SHOW PROC /dbs/dbId/tableId/partitions - * show partitions' detail info within a table - */ -public class PartitionsProcDir implements ProcDirInterface { - public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() - .add("PartitionId").add("PartitionName").add("CommittedVersion").add("CommittedVersionHash") - .add("State").add("PartitionKey").add("Range").add("DistributionKey") - .add("Buckets").add("ReplicationNum").add("StorageMedium").add("CooldownTime") - .add("LastConsistencyCheckTime") - .build(); - - public static final int PARTITION_NAME_INDEX = 1; - - private Database db; - private OlapTable olapTable; - - public PartitionsProcDir(Database db, OlapTable olapTable) { - this.db = db; - this.olapTable = olapTable; - } - - @Override - public ProcResult fetchResult() throws AnalysisException { - Preconditions.checkNotNull(db); - Preconditions.checkNotNull(olapTable); - Preconditions.checkState(olapTable.getType() == TableType.OLAP); - - // get info - List> partitionInfos = new ArrayList>(); - db.readLock(); - try { - RangePartitionInfo rangePartitionInfo = null; - Joiner joiner = Joiner.on(", "); - if (olapTable.getPartitionInfo().getType() == PartitionType.RANGE) { - rangePartitionInfo = (RangePartitionInfo) olapTable.getPartitionInfo(); - List>> sortedRange = rangePartitionInfo.getSortedRangeMap(); - for (Map.Entry> entry : sortedRange) { - long partitionId = entry.getKey(); - Partition partition = olapTable.getPartition(partitionId); - List partitionInfo = new ArrayList(); - String partitionName = partition.getName(); - partitionInfo.add(partitionId); - partitionInfo.add(partitionName); - partitionInfo.add(partition.getCommittedVersion()); - partitionInfo.add(partition.getCommittedVersionHash()); - partitionInfo.add(partition.getState()); - - // partition - List partitionColumns = rangePartitionInfo.getPartitionColumns(); - List colNames = new ArrayList(); - for (Column column : partitionColumns) { - colNames.add(column.getName()); - } - partitionInfo.add(joiner.join(colNames)); - - partitionInfo.add(entry.getValue().toString()); - - // distribution - DistributionInfo distributionInfo = partition.getDistributionInfo(); - if (distributionInfo.getType() == DistributionInfoType.HASH) { - HashDistributionInfo hashDistributionInfo = (HashDistributionInfo) distributionInfo; - List distributionColumns = hashDistributionInfo.getDistributionColumns(); - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < distributionColumns.size(); i++) { - if (i != 0) { - sb.append(", "); - } - sb.append(distributionColumns.get(i).getName()); - } - partitionInfo.add(sb.toString()); - } else { - partitionInfo.add("ALL KEY"); - } - - partitionInfo.add(distributionInfo.getBucketNum()); - - short replicationNum = olapTable.getPartitionInfo().getReplicationNum(partitionId); - partitionInfo.add(String.valueOf(replicationNum)); - - DataProperty dataProperty = rangePartitionInfo.getDataProperty(partitionId); - partitionInfo.add(dataProperty.getStorageMedium().name()); - partitionInfo.add(TimeUtils.longToTimeString(dataProperty.getCooldownTimeMs())); - - partitionInfo.add(TimeUtils.longToTimeString(partition.getLastCheckTime())); - - partitionInfos.add(partitionInfo); - } - } else { - for (Partition partition : olapTable.getPartitions()) { - List partitionInfo = new ArrayList(); - String partitionName = partition.getName(); - long partitionId = partition.getId(); - partitionInfo.add(partitionId); - partitionInfo.add(partitionName); - partitionInfo.add(partition.getCommittedVersion()); - partitionInfo.add(partition.getCommittedVersionHash()); - partitionInfo.add(partition.getState()); - - // partition - partitionInfo.add(""); - partitionInfo.add(""); - - // distribution - DistributionInfo distributionInfo = partition.getDistributionInfo(); - if (distributionInfo.getType() == DistributionInfoType.HASH) { - HashDistributionInfo hashDistributionInfo = (HashDistributionInfo) distributionInfo; - List distributionColumns = hashDistributionInfo.getDistributionColumns(); - StringBuilder sb = new StringBuilder(); - for (int i = 0; i < distributionColumns.size(); i++) { - if (i != 0) { - sb.append(", "); - } - sb.append(distributionColumns.get(i).getName()); - } - partitionInfo.add(sb.toString()); - } else { - partitionInfo.add("ALL KEY"); - } - - partitionInfo.add(distributionInfo.getBucketNum()); - - short replicationNum = olapTable.getPartitionInfo().getReplicationNum(partitionId); - partitionInfo.add(String.valueOf(replicationNum)); - - DataProperty dataProperty = olapTable.getPartitionInfo().getDataProperty(partitionId); - partitionInfo.add(dataProperty.getStorageMedium().name()); - partitionInfo.add(TimeUtils.longToTimeString(dataProperty.getCooldownTimeMs())); - - partitionInfo.add(TimeUtils.longToTimeString(partition.getLastCheckTime())); - - partitionInfos.add(partitionInfo); - } - } - } finally { - db.readUnlock(); - } - - // set result - BaseProcResult result = new BaseProcResult(); - result.setNames(TITLE_NAMES); - for (List info : partitionInfos) { - List row = new ArrayList(info.size()); - for (Comparable comparable : info) { - row.add(comparable.toString()); - } - result.addRow(row); - } - - return result; - } - - @Override - public boolean register(String name, ProcNodeInterface node) { - return false; - } - - @Override - public ProcNodeInterface lookup(String partitionIdStr) throws AnalysisException { - long partitionId = -1L; - try { - partitionId = Long.valueOf(partitionIdStr); - } catch (NumberFormatException e) { - throw new AnalysisException("Invalid partition id format: " + partitionIdStr); - } - - db.readLock(); - try { - Partition partition = olapTable.getPartition(partitionId); - if (partition == null) { - throw new AnalysisException("Partition[" + partitionId + "] does not exist"); - } - - return new IndicesProcDir(db, olapTable, partition); - } finally { - db.readUnlock(); - } - } - -} +package com.baidu.palo.common.proc; + +import com.baidu.palo.catalog.Column; +import com.baidu.palo.catalog.DataProperty; +import com.baidu.palo.catalog.Database; +import com.baidu.palo.catalog.DistributionInfo; +import com.baidu.palo.catalog.HashDistributionInfo; +import com.baidu.palo.catalog.OlapTable; +import com.baidu.palo.catalog.PartitionKey; +import com.baidu.palo.catalog.PartitionType; +import com.baidu.palo.catalog.RangePartitionInfo; +import com.baidu.palo.catalog.Partition; +import com.baidu.palo.catalog.DistributionInfo.DistributionInfoType; +import com.baidu.palo.catalog.Table.TableType; +import com.baidu.palo.common.AnalysisException; +import com.baidu.palo.common.util.TimeUtils; + +import com.google.common.base.Joiner; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Range; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +/* + * SHOW PROC /dbs/dbId/tableId/partitions + * show partitions' detail info within a table + */ +public class PartitionsProcDir implements ProcDirInterface { + public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() + .add("PartitionId").add("PartitionName").add("CommittedVersion").add("CommittedVersionHash") + .add("State").add("PartitionKey").add("Range").add("DistributionKey") + .add("Buckets").add("ReplicationNum").add("StorageMedium").add("CooldownTime") + .add("LastConsistencyCheckTime") + .build(); + + public static final int PARTITION_NAME_INDEX = 1; + + private Database db; + private OlapTable olapTable; + + public PartitionsProcDir(Database db, OlapTable olapTable) { + this.db = db; + this.olapTable = olapTable; + } + + @Override + public ProcResult fetchResult() throws AnalysisException { + Preconditions.checkNotNull(db); + Preconditions.checkNotNull(olapTable); + Preconditions.checkState(olapTable.getType() == TableType.OLAP); + + // get info + List> partitionInfos = new ArrayList>(); + db.readLock(); + try { + RangePartitionInfo rangePartitionInfo = null; + Joiner joiner = Joiner.on(", "); + if (olapTable.getPartitionInfo().getType() == PartitionType.RANGE) { + rangePartitionInfo = (RangePartitionInfo) olapTable.getPartitionInfo(); + List>> sortedRange = rangePartitionInfo.getSortedRangeMap(); + for (Map.Entry> entry : sortedRange) { + long partitionId = entry.getKey(); + Partition partition = olapTable.getPartition(partitionId); + List partitionInfo = new ArrayList(); + String partitionName = partition.getName(); + partitionInfo.add(partitionId); + partitionInfo.add(partitionName); + partitionInfo.add(partition.getCommittedVersion()); + partitionInfo.add(partition.getCommittedVersionHash()); + partitionInfo.add(partition.getState()); + + // partition + List partitionColumns = rangePartitionInfo.getPartitionColumns(); + List colNames = new ArrayList(); + for (Column column : partitionColumns) { + colNames.add(column.getName()); + } + partitionInfo.add(joiner.join(colNames)); + + partitionInfo.add(entry.getValue().toString()); + + // distribution + DistributionInfo distributionInfo = partition.getDistributionInfo(); + if (distributionInfo.getType() == DistributionInfoType.HASH) { + HashDistributionInfo hashDistributionInfo = (HashDistributionInfo) distributionInfo; + List distributionColumns = hashDistributionInfo.getDistributionColumns(); + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < distributionColumns.size(); i++) { + if (i != 0) { + sb.append(", "); + } + sb.append(distributionColumns.get(i).getName()); + } + partitionInfo.add(sb.toString()); + } else { + partitionInfo.add("ALL KEY"); + } + + partitionInfo.add(distributionInfo.getBucketNum()); + + short replicationNum = olapTable.getPartitionInfo().getReplicationNum(partitionId); + partitionInfo.add(String.valueOf(replicationNum)); + + DataProperty dataProperty = rangePartitionInfo.getDataProperty(partitionId); + partitionInfo.add(dataProperty.getStorageMedium().name()); + partitionInfo.add(TimeUtils.longToTimeString(dataProperty.getCooldownTimeMs())); + + partitionInfo.add(TimeUtils.longToTimeString(partition.getLastCheckTime())); + + partitionInfos.add(partitionInfo); + } + } else { + for (Partition partition : olapTable.getPartitions()) { + List partitionInfo = new ArrayList(); + String partitionName = partition.getName(); + long partitionId = partition.getId(); + partitionInfo.add(partitionId); + partitionInfo.add(partitionName); + partitionInfo.add(partition.getCommittedVersion()); + partitionInfo.add(partition.getCommittedVersionHash()); + partitionInfo.add(partition.getState()); + + // partition + partitionInfo.add(""); + partitionInfo.add(""); + + // distribution + DistributionInfo distributionInfo = partition.getDistributionInfo(); + if (distributionInfo.getType() == DistributionInfoType.HASH) { + HashDistributionInfo hashDistributionInfo = (HashDistributionInfo) distributionInfo; + List distributionColumns = hashDistributionInfo.getDistributionColumns(); + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < distributionColumns.size(); i++) { + if (i != 0) { + sb.append(", "); + } + sb.append(distributionColumns.get(i).getName()); + } + partitionInfo.add(sb.toString()); + } else { + partitionInfo.add("ALL KEY"); + } + + partitionInfo.add(distributionInfo.getBucketNum()); + + short replicationNum = olapTable.getPartitionInfo().getReplicationNum(partitionId); + partitionInfo.add(String.valueOf(replicationNum)); + + DataProperty dataProperty = olapTable.getPartitionInfo().getDataProperty(partitionId); + partitionInfo.add(dataProperty.getStorageMedium().name()); + partitionInfo.add(TimeUtils.longToTimeString(dataProperty.getCooldownTimeMs())); + + partitionInfo.add(TimeUtils.longToTimeString(partition.getLastCheckTime())); + + partitionInfos.add(partitionInfo); + } + } + } finally { + db.readUnlock(); + } + + // set result + BaseProcResult result = new BaseProcResult(); + result.setNames(TITLE_NAMES); + for (List info : partitionInfos) { + List row = new ArrayList(info.size()); + for (Comparable comparable : info) { + row.add(comparable.toString()); + } + result.addRow(row); + } + + return result; + } + + @Override + public boolean register(String name, ProcNodeInterface node) { + return false; + } + + @Override + public ProcNodeInterface lookup(String partitionIdStr) throws AnalysisException { + long partitionId = -1L; + try { + partitionId = Long.valueOf(partitionIdStr); + } catch (NumberFormatException e) { + throw new AnalysisException("Invalid partition id format: " + partitionIdStr); + } + + db.readLock(); + try { + Partition partition = olapTable.getPartition(partitionId); + if (partition == null) { + throw new AnalysisException("Partition[" + partitionId + "] does not exist"); + } + + return new IndicesProcDir(db, olapTable, partition); + } finally { + db.readUnlock(); + } + } + +} diff --git a/fe/src/com/baidu/palo/common/proc/RollupProcDir.java b/fe/src/com/baidu/palo/common/proc/RollupProcDir.java index ccc511ff22..1b2a7c9b67 100644 --- a/fe/src/com/baidu/palo/common/proc/RollupProcDir.java +++ b/fe/src/com/baidu/palo/common/proc/RollupProcDir.java @@ -18,81 +18,81 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.proc; - -import com.baidu.palo.alter.AlterJob; -import com.baidu.palo.alter.RollupHandler; -import com.baidu.palo.alter.RollupJob; -import com.baidu.palo.catalog.Database; -import com.baidu.palo.common.AnalysisException; - -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; -import com.google.common.collect.ImmutableList; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -public class RollupProcDir implements ProcDirInterface { - public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() - .add("JobId").add("TableName").add("CreateTime").add("FinishedTime") - .add("BaseIndexName").add("RollupIndexName").add("State").add("Msg") - .add("Progress") - .build(); - - private RollupHandler rollupHandler; - private Database db; - - public RollupProcDir(RollupHandler rollupHandler, Database db) { - this.rollupHandler = rollupHandler; - this.db = db; - } - - @Override - public ProcResult fetchResult() throws AnalysisException { - Preconditions.checkNotNull(db); - Preconditions.checkNotNull(rollupHandler); - - BaseProcResult result = new BaseProcResult(); - result.setNames(TITLE_NAMES); - - List> rollupJobInfos = rollupHandler.getAlterJobInfosByDb(db); - for (List infoStr : rollupJobInfos) { - List oneInfo = new ArrayList(TITLE_NAMES.size()); - for (Comparable element : infoStr) { - oneInfo.add(element.toString()); - } - result.addRow(oneInfo); - } - return result; - } - - @Override - public boolean register(String name, ProcNodeInterface node) { - return false; - } - - @Override - public ProcNodeInterface lookup(String tableIdStr) throws AnalysisException { - if (Strings.isNullOrEmpty(tableIdStr)) { - throw new AnalysisException("Table id is null"); - } - - long tableId = -1L; - try { - tableId = Long.valueOf(tableIdStr); - } catch (Exception e) { - throw new AnalysisException("Table id is invalid"); - } - - Preconditions.checkState(tableId != -1L); - AlterJob job = rollupHandler.getAlterJob(tableId); - if (job == null) { - return null; - } - - return new RollupJobProcDir((RollupJob) job); - } - -} +package com.baidu.palo.common.proc; + +import com.baidu.palo.alter.AlterJob; +import com.baidu.palo.alter.RollupHandler; +import com.baidu.palo.alter.RollupJob; +import com.baidu.palo.catalog.Database; +import com.baidu.palo.common.AnalysisException; + +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +public class RollupProcDir implements ProcDirInterface { + public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() + .add("JobId").add("TableName").add("CreateTime").add("FinishedTime") + .add("BaseIndexName").add("RollupIndexName").add("State").add("Msg") + .add("Progress") + .build(); + + private RollupHandler rollupHandler; + private Database db; + + public RollupProcDir(RollupHandler rollupHandler, Database db) { + this.rollupHandler = rollupHandler; + this.db = db; + } + + @Override + public ProcResult fetchResult() throws AnalysisException { + Preconditions.checkNotNull(db); + Preconditions.checkNotNull(rollupHandler); + + BaseProcResult result = new BaseProcResult(); + result.setNames(TITLE_NAMES); + + List> rollupJobInfos = rollupHandler.getAlterJobInfosByDb(db); + for (List infoStr : rollupJobInfos) { + List oneInfo = new ArrayList(TITLE_NAMES.size()); + for (Comparable element : infoStr) { + oneInfo.add(element.toString()); + } + result.addRow(oneInfo); + } + return result; + } + + @Override + public boolean register(String name, ProcNodeInterface node) { + return false; + } + + @Override + public ProcNodeInterface lookup(String tableIdStr) throws AnalysisException { + if (Strings.isNullOrEmpty(tableIdStr)) { + throw new AnalysisException("Table id is null"); + } + + long tableId = -1L; + try { + tableId = Long.valueOf(tableIdStr); + } catch (Exception e) { + throw new AnalysisException("Table id is invalid"); + } + + Preconditions.checkState(tableId != -1L); + AlterJob job = rollupHandler.getAlterJob(tableId); + if (job == null) { + return null; + } + + return new RollupJobProcDir((RollupJob) job); + } + +} diff --git a/fe/src/com/baidu/palo/common/proc/SchemaChangeProcNode.java b/fe/src/com/baidu/palo/common/proc/SchemaChangeProcNode.java index 8ae23ba3db..e9a3ecd60b 100644 --- a/fe/src/com/baidu/palo/common/proc/SchemaChangeProcNode.java +++ b/fe/src/com/baidu/palo/common/proc/SchemaChangeProcNode.java @@ -18,8 +18,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.proc; - +package com.baidu.palo.common.proc; + import com.baidu.palo.alter.SchemaChangeHandler; import com.baidu.palo.catalog.Database; import com.baidu.palo.common.AnalysisException; @@ -28,40 +28,40 @@ import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import java.util.ArrayList; -import java.util.List; - -public class SchemaChangeProcNode implements ProcNodeInterface { - public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() - .add("JobId").add("TableName").add("CreateTime").add("FinishTime") - .add("IndexName").add("IndexState").add("State").add("Msg") - .add("Progress") - .build(); - - private SchemaChangeHandler schemaChangeHandler; - private Database db; - - public SchemaChangeProcNode(SchemaChangeHandler schemaChangeHandler, Database db) { - this.schemaChangeHandler = schemaChangeHandler; - this.db = db; - } - - @Override - public ProcResult fetchResult() throws AnalysisException { - Preconditions.checkNotNull(db); - Preconditions.checkNotNull(schemaChangeHandler); - - BaseProcResult result = new BaseProcResult(); - result.setNames(TITLE_NAMES); - - List> schemaChangeJobInfos = schemaChangeHandler.getAlterJobInfosByDb(db); - for (List infoStr : schemaChangeJobInfos) { - List oneInfo = new ArrayList(TITLE_NAMES.size()); - for (Comparable element : infoStr) { - oneInfo.add(element.toString()); - } - result.addRow(oneInfo); - } - return result; - } - -} +import java.util.List; + +public class SchemaChangeProcNode implements ProcNodeInterface { + public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() + .add("JobId").add("TableName").add("CreateTime").add("FinishTime") + .add("IndexName").add("IndexState").add("State").add("Msg") + .add("Progress") + .build(); + + private SchemaChangeHandler schemaChangeHandler; + private Database db; + + public SchemaChangeProcNode(SchemaChangeHandler schemaChangeHandler, Database db) { + this.schemaChangeHandler = schemaChangeHandler; + this.db = db; + } + + @Override + public ProcResult fetchResult() throws AnalysisException { + Preconditions.checkNotNull(db); + Preconditions.checkNotNull(schemaChangeHandler); + + BaseProcResult result = new BaseProcResult(); + result.setNames(TITLE_NAMES); + + List> schemaChangeJobInfos = schemaChangeHandler.getAlterJobInfosByDb(db); + for (List infoStr : schemaChangeJobInfos) { + List oneInfo = new ArrayList(TITLE_NAMES.size()); + for (Comparable element : infoStr) { + oneInfo.add(element.toString()); + } + result.addRow(oneInfo); + } + return result; + } + +} diff --git a/fe/src/com/baidu/palo/common/proc/StatisticProcDir.java b/fe/src/com/baidu/palo/common/proc/StatisticProcDir.java index dd092420d2..68a8dcd040 100644 --- a/fe/src/com/baidu/palo/common/proc/StatisticProcDir.java +++ b/fe/src/com/baidu/palo/common/proc/StatisticProcDir.java @@ -18,8 +18,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.proc; - +package com.baidu.palo.common.proc; + import com.baidu.palo.catalog.Catalog; import com.baidu.palo.catalog.Database; import com.baidu.palo.catalog.MaterializedIndex; @@ -43,182 +43,182 @@ import com.google.common.collect.Sets; import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.Set; - -public class StatisticProcDir implements ProcDirInterface { - public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() - .add("DbId").add("DbName").add("TableNum").add("PartitionNum") - .add("IndexNum").add("TabletNum").add("ReplicaNum").add("IncompleteTabletNum") - .add("InconsistentTabletNum") - .build(); - - private Catalog catalog; - - // db id -> set(tablet id) - Multimap incompleteTabletIds; - // db id -> set(tablet id) - Multimap inconsistentTabletIds; - - public StatisticProcDir(Catalog catalog) { - this.catalog = catalog; - incompleteTabletIds = HashMultimap.create(); - inconsistentTabletIds = HashMultimap.create(); - } - - @Override - public ProcResult fetchResult() throws AnalysisException { - Preconditions.checkNotNull(catalog); - - BaseProcResult result = new BaseProcResult(); - - result.setNames(TITLE_NAMES); - List dbIds = catalog.getDbIds(); - if (dbIds == null || dbIds.isEmpty()) { - // empty - return result; - } - - // get alive backends - Set aliveBackendIds = Sets.newHashSet(Catalog.getCurrentSystemInfo().getBackendIds(true)); - - int totalDbNum = 0; - int totalTableNum = 0; - int totalPartitionNum = 0; - int totalIndexNum = 0; - int totalTabletNum = 0; - int totalReplicaNum = 0; - - incompleteTabletIds.clear(); - inconsistentTabletIds.clear(); - List> lines = new ArrayList>(); - for (Long dbId : dbIds) { - if (dbId == 0) { - // skip information_schema database - continue; - } - Database db = catalog.getDb(dbId); - if (db == null) { - continue; - } - - ++totalDbNum; - db.readLock(); - try { - int dbTableNum = 0; - int dbPartitionNum = 0; - int dbIndexNum = 0; - int dbTabletNum = 0; - int dbReplicaNum = 0; - - for (Table table : db.getTables()) { - if (table.getType() != TableType.OLAP) { - continue; - } - - ++dbTableNum; - OlapTable olapTable = (OlapTable) table; - - for (Partition partition : olapTable.getPartitions()) { - short replicationNum = olapTable.getPartitionInfo().getReplicationNum(partition.getId()); - ++dbPartitionNum; - for (MaterializedIndex materializedIndex : partition.getMaterializedIndices()) { - ++dbIndexNum; - for (Tablet tablet : materializedIndex.getTablets()) { - int onlineReplicaNum = 0; - ++dbTabletNum; - List replicas = tablet.getReplicas(); - dbReplicaNum += replicas.size(); - - for (Replica replica : tablet.getReplicas()) { - ReplicaState state = replica.getState(); - if (state != ReplicaState.NORMAL && state != ReplicaState.SCHEMA_CHANGE) { - continue; - } - if (!aliveBackendIds.contains(replica.getBackendId())) { - continue; - } - ++onlineReplicaNum; - } - - if (onlineReplicaNum < replicationNum) { - incompleteTabletIds.put(dbId, tablet.getId()); - } - - if (!tablet.isConsistent()) { - inconsistentTabletIds.put(dbId, tablet.getId()); - } - } // end for tablets - } // end for indices - } // end for partitions - } // end for tables - - List oneLine = new ArrayList(TITLE_NAMES.size()); - oneLine.add(dbId); - oneLine.add(db.getFullName()); - oneLine.add(dbTableNum); - oneLine.add(dbPartitionNum); - oneLine.add(dbIndexNum); - oneLine.add(dbTabletNum); - oneLine.add(dbReplicaNum); - oneLine.add(incompleteTabletIds.get(dbId).size()); - oneLine.add(inconsistentTabletIds.get(dbId).size()); - - lines.add(oneLine); - - totalTableNum += dbTableNum; - totalPartitionNum += dbPartitionNum; - totalIndexNum += dbIndexNum; - totalTabletNum += dbTabletNum; - totalReplicaNum += dbReplicaNum; - } finally { - db.readUnlock(); - } - } // end for dbs - - // sort by dbName - ListComparator> comparator = new ListComparator>(1); - Collections.sort(lines, comparator); - - // add sum line after sort - List finalLine = new ArrayList(TITLE_NAMES.size()); - finalLine.add("Total"); - finalLine.add(totalDbNum); - finalLine.add(totalTableNum); - finalLine.add(totalPartitionNum); - finalLine.add(totalIndexNum); - finalLine.add(totalTabletNum); - finalLine.add(totalReplicaNum); - finalLine.add(incompleteTabletIds.size()); - finalLine.add(inconsistentTabletIds.size()); - lines.add(finalLine); - - // add result - for (List line : lines) { - List row = new ArrayList(line.size()); - for (Comparable comparable : line) { - row.add(comparable.toString()); - } - result.addRow(row); - } - - return result; - } - - @Override - public boolean register(String name, ProcNodeInterface node) { - return false; - } - - @Override - public ProcNodeInterface lookup(String dbIdStr) throws AnalysisException { - long dbId = -1L; - try { - dbId = Long.valueOf(dbIdStr); - } catch (NumberFormatException e) { - throw new AnalysisException("Invalid db id format: " + dbIdStr); - } - - return new IncompleteTabletsProcNode(incompleteTabletIds.get(dbId), inconsistentTabletIds.get(dbId)); +import java.util.Set; + +public class StatisticProcDir implements ProcDirInterface { + public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() + .add("DbId").add("DbName").add("TableNum").add("PartitionNum") + .add("IndexNum").add("TabletNum").add("ReplicaNum").add("IncompleteTabletNum") + .add("InconsistentTabletNum") + .build(); + + private Catalog catalog; + + // db id -> set(tablet id) + Multimap incompleteTabletIds; + // db id -> set(tablet id) + Multimap inconsistentTabletIds; + + public StatisticProcDir(Catalog catalog) { + this.catalog = catalog; + incompleteTabletIds = HashMultimap.create(); + inconsistentTabletIds = HashMultimap.create(); + } + + @Override + public ProcResult fetchResult() throws AnalysisException { + Preconditions.checkNotNull(catalog); + + BaseProcResult result = new BaseProcResult(); + + result.setNames(TITLE_NAMES); + List dbIds = catalog.getDbIds(); + if (dbIds == null || dbIds.isEmpty()) { + // empty + return result; + } + + // get alive backends + Set aliveBackendIds = Sets.newHashSet(Catalog.getCurrentSystemInfo().getBackendIds(true)); + + int totalDbNum = 0; + int totalTableNum = 0; + int totalPartitionNum = 0; + int totalIndexNum = 0; + int totalTabletNum = 0; + int totalReplicaNum = 0; + + incompleteTabletIds.clear(); + inconsistentTabletIds.clear(); + List> lines = new ArrayList>(); + for (Long dbId : dbIds) { + if (dbId == 0) { + // skip information_schema database + continue; + } + Database db = catalog.getDb(dbId); + if (db == null) { + continue; + } + + ++totalDbNum; + db.readLock(); + try { + int dbTableNum = 0; + int dbPartitionNum = 0; + int dbIndexNum = 0; + int dbTabletNum = 0; + int dbReplicaNum = 0; + + for (Table table : db.getTables()) { + if (table.getType() != TableType.OLAP) { + continue; + } + + ++dbTableNum; + OlapTable olapTable = (OlapTable) table; + + for (Partition partition : olapTable.getPartitions()) { + short replicationNum = olapTable.getPartitionInfo().getReplicationNum(partition.getId()); + ++dbPartitionNum; + for (MaterializedIndex materializedIndex : partition.getMaterializedIndices()) { + ++dbIndexNum; + for (Tablet tablet : materializedIndex.getTablets()) { + int onlineReplicaNum = 0; + ++dbTabletNum; + List replicas = tablet.getReplicas(); + dbReplicaNum += replicas.size(); + + for (Replica replica : tablet.getReplicas()) { + ReplicaState state = replica.getState(); + if (state != ReplicaState.NORMAL && state != ReplicaState.SCHEMA_CHANGE) { + continue; + } + if (!aliveBackendIds.contains(replica.getBackendId())) { + continue; + } + ++onlineReplicaNum; + } + + if (onlineReplicaNum < replicationNum) { + incompleteTabletIds.put(dbId, tablet.getId()); + } + + if (!tablet.isConsistent()) { + inconsistentTabletIds.put(dbId, tablet.getId()); + } + } // end for tablets + } // end for indices + } // end for partitions + } // end for tables + + List oneLine = new ArrayList(TITLE_NAMES.size()); + oneLine.add(dbId); + oneLine.add(db.getFullName()); + oneLine.add(dbTableNum); + oneLine.add(dbPartitionNum); + oneLine.add(dbIndexNum); + oneLine.add(dbTabletNum); + oneLine.add(dbReplicaNum); + oneLine.add(incompleteTabletIds.get(dbId).size()); + oneLine.add(inconsistentTabletIds.get(dbId).size()); + + lines.add(oneLine); + + totalTableNum += dbTableNum; + totalPartitionNum += dbPartitionNum; + totalIndexNum += dbIndexNum; + totalTabletNum += dbTabletNum; + totalReplicaNum += dbReplicaNum; + } finally { + db.readUnlock(); + } + } // end for dbs + + // sort by dbName + ListComparator> comparator = new ListComparator>(1); + Collections.sort(lines, comparator); + + // add sum line after sort + List finalLine = new ArrayList(TITLE_NAMES.size()); + finalLine.add("Total"); + finalLine.add(totalDbNum); + finalLine.add(totalTableNum); + finalLine.add(totalPartitionNum); + finalLine.add(totalIndexNum); + finalLine.add(totalTabletNum); + finalLine.add(totalReplicaNum); + finalLine.add(incompleteTabletIds.size()); + finalLine.add(inconsistentTabletIds.size()); + lines.add(finalLine); + + // add result + for (List line : lines) { + List row = new ArrayList(line.size()); + for (Comparable comparable : line) { + row.add(comparable.toString()); + } + result.addRow(row); + } + + return result; + } + + @Override + public boolean register(String name, ProcNodeInterface node) { + return false; + } + + @Override + public ProcNodeInterface lookup(String dbIdStr) throws AnalysisException { + long dbId = -1L; + try { + dbId = Long.valueOf(dbIdStr); + } catch (NumberFormatException e) { + throw new AnalysisException("Invalid db id format: " + dbIdStr); + } + + return new IncompleteTabletsProcNode(incompleteTabletIds.get(dbId), inconsistentTabletIds.get(dbId)); } // used to metrics @@ -311,5 +311,5 @@ public class StatisticProcDir implements ProcDirInterface { result.add(totalInconsistentTabletNum); return result; - } -} + } +} diff --git a/fe/src/com/baidu/palo/common/proc/TableProcDir.java b/fe/src/com/baidu/palo/common/proc/TableProcDir.java index cc4b96715d..20d0b11e1b 100644 --- a/fe/src/com/baidu/palo/common/proc/TableProcDir.java +++ b/fe/src/com/baidu/palo/common/proc/TableProcDir.java @@ -18,79 +18,79 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.proc; - -import com.baidu.palo.catalog.Database; -import com.baidu.palo.catalog.OlapTable; -import com.baidu.palo.catalog.Table; -import com.baidu.palo.catalog.Table.TableType; -import com.baidu.palo.common.AnalysisException; - -import com.google.common.base.Preconditions; -import com.google.common.base.Strings; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; - -/* - * SHOW PROC /dbs/dbId/tableId/ - * show choice to schema or to partitions - */ -public class TableProcDir implements ProcDirInterface { - public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() - .add("Nodes") - .build(); - - public static final String INDEX_SCHEMA = "index_schema"; - private static final String PARTITIONS = "partitions"; - - private static final ImmutableList CHILDREN_NODES = new ImmutableList.Builder() - .add(PARTITIONS) - .add(INDEX_SCHEMA) - .build(); - - private Database db; - private Table table; - - public TableProcDir(Database db, Table table) { - this.db = db; - this.table = table; - } - - @Override - public ProcResult fetchResult() throws AnalysisException { - BaseProcResult result = new BaseProcResult(); - - result.setNames(TITLE_NAMES); - for (String name : CHILDREN_NODES) { - result.addRow(Lists.newArrayList(name)); - } - return result; - } - - @Override - public boolean register(String name, ProcNodeInterface node) { - return false; - } - - @Override - public ProcNodeInterface lookup(String entryName) throws AnalysisException { - Preconditions.checkNotNull(db); - Preconditions.checkNotNull(table); - - if (Strings.isNullOrEmpty(entryName)) { - throw new AnalysisException("Entry name is null"); - } - - if (entryName.equals(PARTITIONS)) { - if (table.getType() != TableType.OLAP) { - throw new AnalysisException("Table[" + table.getName() + "] is not a OLAP table"); - } - return new PartitionsProcDir(db, (OlapTable) table); - } else if (entryName.equals(INDEX_SCHEMA)) { - return new IndexInfoProcDir(db, table); - } else { - throw new AnalysisException("Not implemented yet: " + entryName); - } - } - -} +package com.baidu.palo.common.proc; + +import com.baidu.palo.catalog.Database; +import com.baidu.palo.catalog.OlapTable; +import com.baidu.palo.catalog.Table; +import com.baidu.palo.catalog.Table.TableType; +import com.baidu.palo.common.AnalysisException; + +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + +/* + * SHOW PROC /dbs/dbId/tableId/ + * show choice to schema or to partitions + */ +public class TableProcDir implements ProcDirInterface { + public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() + .add("Nodes") + .build(); + + public static final String INDEX_SCHEMA = "index_schema"; + private static final String PARTITIONS = "partitions"; + + private static final ImmutableList CHILDREN_NODES = new ImmutableList.Builder() + .add(PARTITIONS) + .add(INDEX_SCHEMA) + .build(); + + private Database db; + private Table table; + + public TableProcDir(Database db, Table table) { + this.db = db; + this.table = table; + } + + @Override + public ProcResult fetchResult() throws AnalysisException { + BaseProcResult result = new BaseProcResult(); + + result.setNames(TITLE_NAMES); + for (String name : CHILDREN_NODES) { + result.addRow(Lists.newArrayList(name)); + } + return result; + } + + @Override + public boolean register(String name, ProcNodeInterface node) { + return false; + } + + @Override + public ProcNodeInterface lookup(String entryName) throws AnalysisException { + Preconditions.checkNotNull(db); + Preconditions.checkNotNull(table); + + if (Strings.isNullOrEmpty(entryName)) { + throw new AnalysisException("Entry name is null"); + } + + if (entryName.equals(PARTITIONS)) { + if (table.getType() != TableType.OLAP) { + throw new AnalysisException("Table[" + table.getName() + "] is not a OLAP table"); + } + return new PartitionsProcDir(db, (OlapTable) table); + } else if (entryName.equals(INDEX_SCHEMA)) { + return new IndexInfoProcDir(db, table); + } else { + throw new AnalysisException("Not implemented yet: " + entryName); + } + } + +} diff --git a/fe/src/com/baidu/palo/common/proc/UserPropertyProcNode.java b/fe/src/com/baidu/palo/common/proc/UserPropertyProcNode.java index c2b05bdc0c..672131edee 100644 --- a/fe/src/com/baidu/palo/common/proc/UserPropertyProcNode.java +++ b/fe/src/com/baidu/palo/common/proc/UserPropertyProcNode.java @@ -18,35 +18,35 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.proc; - +package com.baidu.palo.common.proc; + import com.baidu.palo.common.AnalysisException; import com.baidu.palo.mysql.privilege.PaloAuth; import com.google.common.collect.ImmutableList; - + /* * SHOW PROC '/auth/user' - */ -public class UserPropertyProcNode implements ProcNodeInterface { - public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() - .add("Key").add("Value") - .build(); - - private PaloAuth auth; - private String qualifiedUser; - + */ +public class UserPropertyProcNode implements ProcNodeInterface { + public static final ImmutableList TITLE_NAMES = new ImmutableList.Builder() + .add("Key").add("Value") + .build(); + + private PaloAuth auth; + private String qualifiedUser; + public UserPropertyProcNode(PaloAuth auth, String qualifiedUser) { - this.auth = auth; - this.qualifiedUser = qualifiedUser; - } - - @Override - public ProcResult fetchResult() throws AnalysisException { - BaseProcResult result = new BaseProcResult(); + this.auth = auth; + this.qualifiedUser = qualifiedUser; + } + + @Override + public ProcResult fetchResult() throws AnalysisException { + BaseProcResult result = new BaseProcResult(); result.setNames(TITLE_NAMES); - - result.setRows(auth.getUserProperties(qualifiedUser)); - return result; - } -} + + result.setRows(auth.getUserProperties(qualifiedUser)); + return result; + } +} diff --git a/fe/src/com/baidu/palo/common/util/CommandResult.java b/fe/src/com/baidu/palo/common/util/CommandResult.java index ec96bd3a7b..4379d62633 100644 --- a/fe/src/com/baidu/palo/common/util/CommandResult.java +++ b/fe/src/com/baidu/palo/common/util/CommandResult.java @@ -18,62 +18,62 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.util; - -public class CommandResult { - private int returnCode; - private String stdout; - private String stderr; - private String errMsg; - - public CommandResult() { - this.returnCode = -1; - this.stdout = null; - this.stderr = null; - this.errMsg = null; - } - - public int getReturnCode() { - return returnCode; - } - - public void setReturnCode(int returnCode) { - this.returnCode = returnCode; - } - - public String getStdout() { - return stdout; - } - - public void setStdout(String stdout) { - this.stdout = stdout; - } - - public String getStderr() { - return stderr; - } - - public void setStderr(String stderr) { - this.stderr = stderr; - } - - public String getErrMsg() { - return errMsg; - } - - public void setErrMsg(String errMsg) { - this.errMsg = errMsg; - } - - @Override - public String toString() { - StringBuilder builder = new StringBuilder(); - builder.append("CommandResult [returnCode=").append(returnCode); - builder.append(", stdout=").append(stdout); - builder.append(", stderr=").append(stderr); - builder.append(", errMsg=").append(errMsg); - builder.append("]"); - return builder.toString(); - } - -} +package com.baidu.palo.common.util; + +public class CommandResult { + private int returnCode; + private String stdout; + private String stderr; + private String errMsg; + + public CommandResult() { + this.returnCode = -1; + this.stdout = null; + this.stderr = null; + this.errMsg = null; + } + + public int getReturnCode() { + return returnCode; + } + + public void setReturnCode(int returnCode) { + this.returnCode = returnCode; + } + + public String getStdout() { + return stdout; + } + + public void setStdout(String stdout) { + this.stdout = stdout; + } + + public String getStderr() { + return stderr; + } + + public void setStderr(String stderr) { + this.stderr = stderr; + } + + public String getErrMsg() { + return errMsg; + } + + public void setErrMsg(String errMsg) { + this.errMsg = errMsg; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("CommandResult [returnCode=").append(returnCode); + builder.append(", stdout=").append(stdout); + builder.append(", stderr=").append(stderr); + builder.append(", errMsg=").append(errMsg); + builder.append("]"); + return builder.toString(); + } + +} diff --git a/fe/src/com/baidu/palo/common/util/Counter.java b/fe/src/com/baidu/palo/common/util/Counter.java index 8147100bc7..69f0ab2021 100644 --- a/fe/src/com/baidu/palo/common/util/Counter.java +++ b/fe/src/com/baidu/palo/common/util/Counter.java @@ -18,33 +18,33 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.util; - -import com.baidu.palo.thrift.TUnit; - -// Counter means indicators field. The counter's name is key, the counter itself is value. -public class Counter { - private long value; - private TUnit type; - - public long getValue() { - return value; - } - - public void setValue(long newValue) { - value = newValue; - } - - public TUnit getType() { - return type; - } - - public void setType(TUnit type) { - this.type = type; - } - - public Counter(TUnit type, long value) { - this.type = type; - this.value = value; - } -} +package com.baidu.palo.common.util; + +import com.baidu.palo.thrift.TUnit; + +// Counter means indicators field. The counter's name is key, the counter itself is value. +public class Counter { + private long value; + private TUnit type; + + public long getValue() { + return value; + } + + public void setValue(long newValue) { + value = newValue; + } + + public TUnit getType() { + return type; + } + + public void setType(TUnit type) { + this.type = type; + } + + public Counter(TUnit type, long value) { + this.type = type; + this.value = value; + } +} diff --git a/fe/src/com/baidu/palo/common/util/ListComparator.java b/fe/src/com/baidu/palo/common/util/ListComparator.java index 238cdf7cb2..02d696b636 100644 --- a/fe/src/com/baidu/palo/common/util/ListComparator.java +++ b/fe/src/com/baidu/palo/common/util/ListComparator.java @@ -18,64 +18,64 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.util; - -import com.google.common.base.Preconditions; - -import java.util.Comparator; -import java.util.List; - -/* - * this class is for sorting list collections - */ -public class ListComparator> implements Comparator { - - OrderByPair[] orderByPairs; - boolean isDesc; - int indicesLen; - - public ListComparator(int...indicec) { - this(false, indicec); - } - - public ListComparator(boolean isDesc, int...indices) { - this.orderByPairs = new OrderByPair[indices.length]; - for (int i = 0; i < indices.length; i++) { - this.orderByPairs[i] = new OrderByPair(indices[i], isDesc); - } - this.indicesLen = orderByPairs.length; - } - - public ListComparator(OrderByPair...orderByPairs) { - this.orderByPairs = orderByPairs; - this.indicesLen = orderByPairs.length; - } - - @Override - public int compare(T firstList, T secondList) { - int firstLen = firstList.size(); - int secondLen = secondList.size(); - int minLen = Math.min(firstLen, secondLen); - - OrderByPair orderByPair = null; - for (int i = 0; i < indicesLen; ++i) { - if (i < minLen) { - orderByPair = orderByPairs[i]; - int ret = firstList.get(orderByPair.getIndex()).compareTo(secondList.get(orderByPair.getIndex())); - if (ret != 0) { - return orderByPair.isDesc() ? -ret : ret; - } - } - } - - Preconditions.checkNotNull(orderByPair); - if (firstLen < secondLen) { - return orderByPair.isDesc() ? 1 : -1; - } - if (firstLen > secondLen) { - return orderByPair.isDesc() ? -1 : 1; - } - - return 0; - } -} +package com.baidu.palo.common.util; + +import com.google.common.base.Preconditions; + +import java.util.Comparator; +import java.util.List; + +/* + * this class is for sorting list collections + */ +public class ListComparator> implements Comparator { + + OrderByPair[] orderByPairs; + boolean isDesc; + int indicesLen; + + public ListComparator(int...indicec) { + this(false, indicec); + } + + public ListComparator(boolean isDesc, int...indices) { + this.orderByPairs = new OrderByPair[indices.length]; + for (int i = 0; i < indices.length; i++) { + this.orderByPairs[i] = new OrderByPair(indices[i], isDesc); + } + this.indicesLen = orderByPairs.length; + } + + public ListComparator(OrderByPair...orderByPairs) { + this.orderByPairs = orderByPairs; + this.indicesLen = orderByPairs.length; + } + + @Override + public int compare(T firstList, T secondList) { + int firstLen = firstList.size(); + int secondLen = secondList.size(); + int minLen = Math.min(firstLen, secondLen); + + OrderByPair orderByPair = null; + for (int i = 0; i < indicesLen; ++i) { + if (i < minLen) { + orderByPair = orderByPairs[i]; + int ret = firstList.get(orderByPair.getIndex()).compareTo(secondList.get(orderByPair.getIndex())); + if (ret != 0) { + return orderByPair.isDesc() ? -ret : ret; + } + } + } + + Preconditions.checkNotNull(orderByPair); + if (firstLen < secondLen) { + return orderByPair.isDesc() ? 1 : -1; + } + if (firstLen > secondLen) { + return orderByPair.isDesc() ? -1 : 1; + } + + return 0; + } +} diff --git a/fe/src/com/baidu/palo/common/util/ProfileManager.java b/fe/src/com/baidu/palo/common/util/ProfileManager.java index c345ce6c10..f498af0dad 100644 --- a/fe/src/com/baidu/palo/common/util/ProfileManager.java +++ b/fe/src/com/baidu/palo/common/util/ProfileManager.java @@ -18,152 +18,152 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.util; - -import com.google.common.base.Strings; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Deque; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; -import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; - -/* - * if you want to visit the atrribute(such as queryID,defaultDb) - * you can use profile.getInfoStrings("queryId") - * All attributes can be seen from the above. - * - * why the element in the finished profile arary is not RuntimeProfile, - * the purpose is let coordinator can destruct earlier(the fragment profile is in Coordinator) - * - */ -public class ProfileManager { - private static final Logger LOG = LogManager.getLogger(ProfileManager.class); - private static ProfileManager INSTANCE = null; - private static final int ARRAY_SIZE = 100; - // private static final int TOTAL_LEN = 1000 * ARRAY_SIZE ; - public static final String QUERY_ID = "Query ID"; - public static final String START_TIME = "Start Time"; - public static final String END_TIME = "End Time"; - public static final String TOTAL_TIME = "Total"; - public static final String QUERY_TYPE = "Query Type"; - public static final String QUERY_STATE = "Query State"; - public static final String SQL_STATEMENT = "Sql Statement"; - public static final String USER = "User"; - public static final String DEFAULT_DB = "Default Db"; - - public static final ArrayList PROFILE_HEADERS = new ArrayList( - Arrays.asList(QUERY_ID, USER, DEFAULT_DB, SQL_STATEMENT, QUERY_TYPE, - START_TIME, END_TIME, TOTAL_TIME, QUERY_STATE)); - - private class ProfileElement { - public Map infoStrings = Maps.newHashMap(); - public String profileContent; - } - - // only protect profileDeque; profileMap is concurrent, no need to protect - private ReentrantReadWriteLock lock; - private ReadLock readLock; - private WriteLock writeLock; - - private Deque profileDeque; - private Map profileMap; // from QueryId to RuntimeProfile - - public static ProfileManager getInstance() { - if (INSTANCE == null) { - INSTANCE = new ProfileManager(); - } - return INSTANCE; - } - - private ProfileManager() { - lock = new ReentrantReadWriteLock(true); - readLock = lock.readLock(); - writeLock = lock.writeLock(); - profileDeque = new LinkedList(); - profileMap = new ConcurrentHashMap(); - } - - public ProfileElement createElement(RuntimeProfile profile) { - ProfileElement element = new ProfileElement(); - RuntimeProfile summaryProfile = profile.getChildList().get(0).first; - for (String header : PROFILE_HEADERS) { - element.infoStrings.put(header, summaryProfile.getInfoString(header)); - } - element.profileContent = profile.toString(); - return element; - } - - public void pushProfile(RuntimeProfile profile) { - if (profile == null) { - return; - } - - ProfileElement element = createElement(profile); - String queryId = element.infoStrings.get(ProfileManager.QUERY_ID); - // check when push in, which can ensure every element in the list has QUERY_ID column, - // so there is no need to check when remove element from list. - if (Strings.isNullOrEmpty(queryId)) { - LOG.warn("the key or value of Map is null, " - + "may be forget to insert 'QUERY_ID' column into infoStrings"); - } - - profileMap.put(queryId, element); - writeLock.lock(); - try { - if (profileDeque.size() >= ARRAY_SIZE) { - profileMap.remove(profileDeque.getFirst().infoStrings.get(QUERY_ID)); - profileDeque.removeFirst(); - } - profileDeque.addLast(element); - } finally { - writeLock.unlock(); - } - } - - public List> getAllQueries() { - List> result = Lists.newArrayList(); - readLock.lock(); - try { - Iterator reverse = profileDeque.descendingIterator(); - while (reverse.hasNext()) { - ProfileElement element = (ProfileElement) reverse.next(); - Map infoStrings = element.infoStrings; - - List row = Lists.newArrayList(); - for (String str : PROFILE_HEADERS ) { - row.add(infoStrings.get(str)); - } - result.add(row); - } - } finally { - readLock.unlock(); - } - return result; - } - - public String getProfile(String queryID) { - readLock.lock(); - try { - ProfileElement element = profileMap.get(queryID); - if (element == null) { - return new String("query id " + queryID + " not found." ); - } - - return element.profileContent; - } finally { - readLock.unlock(); - } - } -} +package com.baidu.palo.common.util; + +import com.google.common.base.Strings; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Deque; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; +import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; + +/* + * if you want to visit the atrribute(such as queryID,defaultDb) + * you can use profile.getInfoStrings("queryId") + * All attributes can be seen from the above. + * + * why the element in the finished profile arary is not RuntimeProfile, + * the purpose is let coordinator can destruct earlier(the fragment profile is in Coordinator) + * + */ +public class ProfileManager { + private static final Logger LOG = LogManager.getLogger(ProfileManager.class); + private static ProfileManager INSTANCE = null; + private static final int ARRAY_SIZE = 100; + // private static final int TOTAL_LEN = 1000 * ARRAY_SIZE ; + public static final String QUERY_ID = "Query ID"; + public static final String START_TIME = "Start Time"; + public static final String END_TIME = "End Time"; + public static final String TOTAL_TIME = "Total"; + public static final String QUERY_TYPE = "Query Type"; + public static final String QUERY_STATE = "Query State"; + public static final String SQL_STATEMENT = "Sql Statement"; + public static final String USER = "User"; + public static final String DEFAULT_DB = "Default Db"; + + public static final ArrayList PROFILE_HEADERS = new ArrayList( + Arrays.asList(QUERY_ID, USER, DEFAULT_DB, SQL_STATEMENT, QUERY_TYPE, + START_TIME, END_TIME, TOTAL_TIME, QUERY_STATE)); + + private class ProfileElement { + public Map infoStrings = Maps.newHashMap(); + public String profileContent; + } + + // only protect profileDeque; profileMap is concurrent, no need to protect + private ReentrantReadWriteLock lock; + private ReadLock readLock; + private WriteLock writeLock; + + private Deque profileDeque; + private Map profileMap; // from QueryId to RuntimeProfile + + public static ProfileManager getInstance() { + if (INSTANCE == null) { + INSTANCE = new ProfileManager(); + } + return INSTANCE; + } + + private ProfileManager() { + lock = new ReentrantReadWriteLock(true); + readLock = lock.readLock(); + writeLock = lock.writeLock(); + profileDeque = new LinkedList(); + profileMap = new ConcurrentHashMap(); + } + + public ProfileElement createElement(RuntimeProfile profile) { + ProfileElement element = new ProfileElement(); + RuntimeProfile summaryProfile = profile.getChildList().get(0).first; + for (String header : PROFILE_HEADERS) { + element.infoStrings.put(header, summaryProfile.getInfoString(header)); + } + element.profileContent = profile.toString(); + return element; + } + + public void pushProfile(RuntimeProfile profile) { + if (profile == null) { + return; + } + + ProfileElement element = createElement(profile); + String queryId = element.infoStrings.get(ProfileManager.QUERY_ID); + // check when push in, which can ensure every element in the list has QUERY_ID column, + // so there is no need to check when remove element from list. + if (Strings.isNullOrEmpty(queryId)) { + LOG.warn("the key or value of Map is null, " + + "may be forget to insert 'QUERY_ID' column into infoStrings"); + } + + profileMap.put(queryId, element); + writeLock.lock(); + try { + if (profileDeque.size() >= ARRAY_SIZE) { + profileMap.remove(profileDeque.getFirst().infoStrings.get(QUERY_ID)); + profileDeque.removeFirst(); + } + profileDeque.addLast(element); + } finally { + writeLock.unlock(); + } + } + + public List> getAllQueries() { + List> result = Lists.newArrayList(); + readLock.lock(); + try { + Iterator reverse = profileDeque.descendingIterator(); + while (reverse.hasNext()) { + ProfileElement element = (ProfileElement) reverse.next(); + Map infoStrings = element.infoStrings; + + List row = Lists.newArrayList(); + for (String str : PROFILE_HEADERS ) { + row.add(infoStrings.get(str)); + } + result.add(row); + } + } finally { + readLock.unlock(); + } + return result; + } + + public String getProfile(String queryID) { + readLock.lock(); + try { + ProfileElement element = profileMap.get(queryID); + if (element == null) { + return new String("query id " + queryID + " not found." ); + } + + return element.profileContent; + } finally { + readLock.unlock(); + } + } +} diff --git a/fe/src/com/baidu/palo/common/util/RuntimeProfile.java b/fe/src/com/baidu/palo/common/util/RuntimeProfile.java index ad4b8653b8..0793be13fc 100644 --- a/fe/src/com/baidu/palo/common/util/RuntimeProfile.java +++ b/fe/src/com/baidu/palo/common/util/RuntimeProfile.java @@ -18,8 +18,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.util; - +package com.baidu.palo.common.util; + import com.baidu.palo.common.Pair; import com.baidu.palo.common.Reference; import com.baidu.palo.thrift.TCounter; @@ -40,333 +40,333 @@ import java.util.Formatter; import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; - -public class RuntimeProfile { - private static final Logger LOG = LogManager.getLogger(RuntimeProfile.class); - private static String ROOT_COUNTER = ""; - private Counter counterTotalTime; - private double localTimePercent; - - private Map infoStrings = Maps.newHashMap(); - private List infoStringsDisplayOrder = Lists.newArrayList(); - - private Map counterMap = Maps.newHashMap(); - private Map > childCounterMap = Maps.newHashMap(); - - private Map childMap = Maps.newHashMap(); - private List> childList = Lists.newArrayList(); - - private String name; - - public RuntimeProfile(String name) { - this(); - setName(name); - } - - public RuntimeProfile() { - this.counterTotalTime = new Counter(TUnit.TIME_NS, 0); - this.localTimePercent = 0; - this.counterMap.put("TotalTime", counterTotalTime); - } - - public Counter getCounterTotalTime() { - return counterTotalTime; - } - - public Map getCounterMap() { - return counterMap; - } - - public List> getChildList() { - return childList; - } - - public Counter addCounter(String name, TUnit type, String parentCounterName) { - Counter counter = this.counterMap.get(name); - if (counter != null) { - return counter; - } else { - Preconditions.checkState(parentCounterName.equals(ROOT_COUNTER) - || this.counterMap.containsKey(parentCounterName)); - Counter newCounter = new Counter(type, 0); - this.counterMap.put(name, newCounter); - - Set childCounters = childCounterMap.get(parentCounterName); - if (childCounters == null) { - childCounterMap.put(parentCounterName, new HashSet()); - childCounters = childCounterMap.get(parentCounterName); - } - childCounters.add(name); - return newCounter; - } - } - - public void update(final TRuntimeProfileTree thriftProfile) { - Reference idx = new Reference(0); - update(thriftProfile.nodes, idx); - Preconditions.checkState(idx.getRef().equals(thriftProfile.nodes.size())); - } - - // preorder traversal, idx should be modified in the traversal process - private void update(List nodes, Reference idx) { - TRuntimeProfileNode node = nodes.get(idx.getRef()); - - // update this level's counters - if (node.counters != null) { - for (TCounter tcounter : node.counters) { - Counter counter = counterMap.get(tcounter.name); - if (counter == null) { - counterMap.put(tcounter.name, new Counter(tcounter.type, tcounter.value)); - } else { - if (counter.getType() != tcounter.type) { - LOG.error("Cannot update counters with the same name but different types" - + " type=" + tcounter.type); - } else { - counter.setValue(tcounter.value); - } - } - } - - if (node.child_counters_map != null) { - // update childCounters - for (Map.Entry> entry : - node.child_counters_map.entrySet()) { - String parentCounterName = entry.getKey(); - Set childCounters = childCounterMap.get(parentCounterName); - if (childCounters == null) { - childCounterMap.put(parentCounterName, new HashSet()); - childCounters = childCounterMap.get(parentCounterName); - } - childCounters.addAll(entry.getValue()); - } - } - } - - if (node.info_strings_display_order != null) { - Map nodeInfoStrings = node.info_strings; - for (String key : node.info_strings_display_order) { - String value = nodeInfoStrings.get(key); - Preconditions.checkState(value != null); - if (this.infoStrings.containsKey(key)) { - // exists then replace - this.infoStrings.put(key, value); - } else { - this.infoStrings.put(key, value); - this.infoStringsDisplayOrder.add(key); - } - } - } - - idx.setRef(idx.getRef() + 1); - - for (int i = 0; i < node.num_children; i ++) { - TRuntimeProfileNode tchild = nodes.get(idx.getRef()); - String childName = tchild.name; - RuntimeProfile childProfile = this.childMap.get(childName); - if (childProfile == null) { - childMap.put(childName, new RuntimeProfile(childName)); - childProfile = this.childMap.get(childName); - Pair pair = Pair.create(childProfile, tchild.indent); - this.childList.add(pair); - } - childProfile.update(nodes, idx); - } - } - - // Print the profile: - // 1. Profile Name - // 2. Info Strings - // 3. Counters - // 4. Children - public void prettyPrint(StringBuilder builder, String prefix) { - Counter counter = this.counterMap.get("TotalTime"); - Preconditions.checkState(counter != null); - // 1. profile name - builder.append(prefix).append(name).append(":"); - // total time - if (counter.getValue() != 0) { - Formatter fmt = new Formatter(); - builder.append("(Active: ") - .append(this.printCounter(counter.getValue(), counter.getType())) - .append(", % non-child: ").append(fmt.format("%.2f", localTimePercent)) - .append("%)"); - } - builder.append("\n"); - - // 2. info String - for (String key : this.infoStringsDisplayOrder) { - builder.append(prefix).append(" ").append(key).append(": ") - .append(this.infoStrings.get(key)).append("\n"); - } - - // 3. counters - printChildCounters(prefix, ROOT_COUNTER, builder); - - // 4. children - for (int i = 0; i < childList.size(); i++) { - Pair pair = childList.get(i); - boolean indent = pair.second; - RuntimeProfile profile = pair.first; - profile.prettyPrint(builder, prefix + (indent ? " " : "")); - } - } - - public String toString() { - StringBuilder builder = new StringBuilder(); - prettyPrint(builder, ""); - return builder.toString(); - } - - private void printChildCounters(String prefix, String counterName, StringBuilder builder) { - Set childCounterSet = childCounterMap.get(counterName); - if (childCounterSet == null) { - return; - } - - for (String childCounterName : childCounterSet) { - Counter counter = this.counterMap.get(childCounterName); - Preconditions.checkState(counter != null); - builder.append(prefix).append( " - " ).append(childCounterName).append(": ") - .append(printCounter(counter.getValue(), counter.getType())).append("\n"); - this.printChildCounters(prefix + " ", childCounterName, builder); - } - } - - private String printCounter(long value, TUnit type) { +import java.util.Set; + +public class RuntimeProfile { + private static final Logger LOG = LogManager.getLogger(RuntimeProfile.class); + private static String ROOT_COUNTER = ""; + private Counter counterTotalTime; + private double localTimePercent; + + private Map infoStrings = Maps.newHashMap(); + private List infoStringsDisplayOrder = Lists.newArrayList(); + + private Map counterMap = Maps.newHashMap(); + private Map > childCounterMap = Maps.newHashMap(); + + private Map childMap = Maps.newHashMap(); + private List> childList = Lists.newArrayList(); + + private String name; + + public RuntimeProfile(String name) { + this(); + setName(name); + } + + public RuntimeProfile() { + this.counterTotalTime = new Counter(TUnit.TIME_NS, 0); + this.localTimePercent = 0; + this.counterMap.put("TotalTime", counterTotalTime); + } + + public Counter getCounterTotalTime() { + return counterTotalTime; + } + + public Map getCounterMap() { + return counterMap; + } + + public List> getChildList() { + return childList; + } + + public Counter addCounter(String name, TUnit type, String parentCounterName) { + Counter counter = this.counterMap.get(name); + if (counter != null) { + return counter; + } else { + Preconditions.checkState(parentCounterName.equals(ROOT_COUNTER) + || this.counterMap.containsKey(parentCounterName)); + Counter newCounter = new Counter(type, 0); + this.counterMap.put(name, newCounter); + + Set childCounters = childCounterMap.get(parentCounterName); + if (childCounters == null) { + childCounterMap.put(parentCounterName, new HashSet()); + childCounters = childCounterMap.get(parentCounterName); + } + childCounters.add(name); + return newCounter; + } + } + + public void update(final TRuntimeProfileTree thriftProfile) { + Reference idx = new Reference(0); + update(thriftProfile.nodes, idx); + Preconditions.checkState(idx.getRef().equals(thriftProfile.nodes.size())); + } + + // preorder traversal, idx should be modified in the traversal process + private void update(List nodes, Reference idx) { + TRuntimeProfileNode node = nodes.get(idx.getRef()); + + // update this level's counters + if (node.counters != null) { + for (TCounter tcounter : node.counters) { + Counter counter = counterMap.get(tcounter.name); + if (counter == null) { + counterMap.put(tcounter.name, new Counter(tcounter.type, tcounter.value)); + } else { + if (counter.getType() != tcounter.type) { + LOG.error("Cannot update counters with the same name but different types" + + " type=" + tcounter.type); + } else { + counter.setValue(tcounter.value); + } + } + } + + if (node.child_counters_map != null) { + // update childCounters + for (Map.Entry> entry : + node.child_counters_map.entrySet()) { + String parentCounterName = entry.getKey(); + Set childCounters = childCounterMap.get(parentCounterName); + if (childCounters == null) { + childCounterMap.put(parentCounterName, new HashSet()); + childCounters = childCounterMap.get(parentCounterName); + } + childCounters.addAll(entry.getValue()); + } + } + } + + if (node.info_strings_display_order != null) { + Map nodeInfoStrings = node.info_strings; + for (String key : node.info_strings_display_order) { + String value = nodeInfoStrings.get(key); + Preconditions.checkState(value != null); + if (this.infoStrings.containsKey(key)) { + // exists then replace + this.infoStrings.put(key, value); + } else { + this.infoStrings.put(key, value); + this.infoStringsDisplayOrder.add(key); + } + } + } + + idx.setRef(idx.getRef() + 1); + + for (int i = 0; i < node.num_children; i ++) { + TRuntimeProfileNode tchild = nodes.get(idx.getRef()); + String childName = tchild.name; + RuntimeProfile childProfile = this.childMap.get(childName); + if (childProfile == null) { + childMap.put(childName, new RuntimeProfile(childName)); + childProfile = this.childMap.get(childName); + Pair pair = Pair.create(childProfile, tchild.indent); + this.childList.add(pair); + } + childProfile.update(nodes, idx); + } + } + + // Print the profile: + // 1. Profile Name + // 2. Info Strings + // 3. Counters + // 4. Children + public void prettyPrint(StringBuilder builder, String prefix) { + Counter counter = this.counterMap.get("TotalTime"); + Preconditions.checkState(counter != null); + // 1. profile name + builder.append(prefix).append(name).append(":"); + // total time + if (counter.getValue() != 0) { + Formatter fmt = new Formatter(); + builder.append("(Active: ") + .append(this.printCounter(counter.getValue(), counter.getType())) + .append(", % non-child: ").append(fmt.format("%.2f", localTimePercent)) + .append("%)"); + } + builder.append("\n"); + + // 2. info String + for (String key : this.infoStringsDisplayOrder) { + builder.append(prefix).append(" ").append(key).append(": ") + .append(this.infoStrings.get(key)).append("\n"); + } + + // 3. counters + printChildCounters(prefix, ROOT_COUNTER, builder); + + // 4. children + for (int i = 0; i < childList.size(); i++) { + Pair pair = childList.get(i); + boolean indent = pair.second; + RuntimeProfile profile = pair.first; + profile.prettyPrint(builder, prefix + (indent ? " " : "")); + } + } + + public String toString() { StringBuilder builder = new StringBuilder(); - long tmpValue = value; - switch (type) { - case UNIT: { - Pair pair = DebugUtil.getUint(tmpValue); - if (pair.second.isEmpty()) { - builder.append(tmpValue); - } else { - builder.append(pair.first).append(pair.second) - .append(" (").append(tmpValue).append(")"); - } - break; - } - case TIME_NS: { - if (tmpValue >= DebugUtil.BILLION) { - // If the time is over a second, print it up to ms. + prettyPrint(builder, ""); + return builder.toString(); + } + + private void printChildCounters(String prefix, String counterName, StringBuilder builder) { + Set childCounterSet = childCounterMap.get(counterName); + if (childCounterSet == null) { + return; + } + + for (String childCounterName : childCounterSet) { + Counter counter = this.counterMap.get(childCounterName); + Preconditions.checkState(counter != null); + builder.append(prefix).append( " - " ).append(childCounterName).append(": ") + .append(printCounter(counter.getValue(), counter.getType())).append("\n"); + this.printChildCounters(prefix + " ", childCounterName, builder); + } + } + + private String printCounter(long value, TUnit type) { + StringBuilder builder = new StringBuilder(); + long tmpValue = value; + switch (type) { + case UNIT: { + Pair pair = DebugUtil.getUint(tmpValue); + if (pair.second.isEmpty()) { + builder.append(tmpValue); + } else { + builder.append(pair.first).append(pair.second) + .append(" (").append(tmpValue).append(")"); + } + break; + } + case TIME_NS: { + if (tmpValue >= DebugUtil.BILLION) { + // If the time is over a second, print it up to ms. tmpValue /= DebugUtil.MILLION; DebugUtil.printTimeMs(tmpValue, builder); - } else if (tmpValue >= DebugUtil.MILLION) { - // if the time is over a ms, print it up to microsecond in the unit of ms. + } else if (tmpValue >= DebugUtil.MILLION) { + // if the time is over a ms, print it up to microsecond in the unit of ms. tmpValue /= 1000; builder.append(tmpValue / 1000).append(".").append(tmpValue % 1000).append("ms"); - } else if (tmpValue > 1000) { - // if the time is over a microsecond, print it using unit microsecond - builder.append(tmpValue / 1000).append(".").append(tmpValue % 1000).append("us"); - } else { - builder.append(tmpValue).append("ns"); - } - break; - } - case BYTES: { - Pair pair = DebugUtil.getByteUint(tmpValue); - Formatter fmt = new Formatter(); - builder.append(fmt.format("%.2f", pair.first)).append(" ").append(pair.second); - fmt.close(); - break; - } - case BYTES_PER_SECOND: { - Pair pair = DebugUtil.getByteUint(tmpValue); - builder.append(pair.first).append(" ").append(pair.second).append("/sec"); - break; - } - case DOUBLE_VALUE: { - Formatter fmt = new Formatter(); - builder.append(fmt.format("%.2f", (double) tmpValue)); - fmt.close(); - break; - } - case UNIT_PER_SECOND: { - Pair pair = DebugUtil.getUint(tmpValue); - if (pair.second.isEmpty()) { - builder.append(tmpValue); - } else { - builder.append(pair.first).append(pair.second) - .append(" ").append("/sec"); - } - break; - } - default: { - Preconditions.checkState(false, "type=" + type); - break; - } - } - return builder.toString(); - } - - public void addChild(RuntimeProfile child) { - if (child == null) { - return; - } - - this.childMap.put(child.name, child); - Pair pair = Pair.create(child, true); - this.childList.add(pair); - } - - public void computeTimeInProfile() { - computeTimeInProfile(this.counterTotalTime.getValue()); - } - - public void computeTimeInProfile(long total) { - if (total == 0) { - return; - } - - // Add all the total times in all the children - long totalChildTime = 0; - - for (int i = 0; i < this.childList.size(); ++i) { - totalChildTime += childList.get(i).first.getCounterTotalTime().getValue(); - } - long localTime = this.getCounterTotalTime().getValue() - totalChildTime; - // Counters have some margin, set to 0 if it was negative. - localTime = Math.max(0, localTime); - this.localTimePercent = Double.valueOf(localTime) / Double.valueOf(total); - this.localTimePercent = Math.min(1.0, this.localTimePercent) * 100; - - // Recurse on children - for (int i = 0; i < this.childList.size(); i++) { - childList.get(i).first.computeTimeInProfile(total); - } - } - - // from bigger to smaller - public void sortChildren() { - Collections.sort(this.childList, new Comparator>() { - @Override - public int compare(Pair profile1, Pair profile2) - { - long distance = profile2.first.getCounterTotalTime().getValue() - - profile1.first.getCounterTotalTime().getValue(); - return (int) distance; - } - }); - } - - public void addInfoString(String key, String value) { - String target = this.infoStrings.get(key); - if (target == null) { - this.infoStrings.put(key, value); - this.infoStringsDisplayOrder.add(key); - } else { - this.infoStrings.put(key, value); - } - } - - public void setName(String name) { - this.name = name; - } - - // Returns the value to which the specified key is mapped; - // or null if this map contains no mapping for the key. - public String getInfoString(String key) { - return infoStrings.get(key); - } -} + } else if (tmpValue > 1000) { + // if the time is over a microsecond, print it using unit microsecond + builder.append(tmpValue / 1000).append(".").append(tmpValue % 1000).append("us"); + } else { + builder.append(tmpValue).append("ns"); + } + break; + } + case BYTES: { + Pair pair = DebugUtil.getByteUint(tmpValue); + Formatter fmt = new Formatter(); + builder.append(fmt.format("%.2f", pair.first)).append(" ").append(pair.second); + fmt.close(); + break; + } + case BYTES_PER_SECOND: { + Pair pair = DebugUtil.getByteUint(tmpValue); + builder.append(pair.first).append(" ").append(pair.second).append("/sec"); + break; + } + case DOUBLE_VALUE: { + Formatter fmt = new Formatter(); + builder.append(fmt.format("%.2f", (double) tmpValue)); + fmt.close(); + break; + } + case UNIT_PER_SECOND: { + Pair pair = DebugUtil.getUint(tmpValue); + if (pair.second.isEmpty()) { + builder.append(tmpValue); + } else { + builder.append(pair.first).append(pair.second) + .append(" ").append("/sec"); + } + break; + } + default: { + Preconditions.checkState(false, "type=" + type); + break; + } + } + return builder.toString(); + } + + public void addChild(RuntimeProfile child) { + if (child == null) { + return; + } + + this.childMap.put(child.name, child); + Pair pair = Pair.create(child, true); + this.childList.add(pair); + } + + public void computeTimeInProfile() { + computeTimeInProfile(this.counterTotalTime.getValue()); + } + + public void computeTimeInProfile(long total) { + if (total == 0) { + return; + } + + // Add all the total times in all the children + long totalChildTime = 0; + + for (int i = 0; i < this.childList.size(); ++i) { + totalChildTime += childList.get(i).first.getCounterTotalTime().getValue(); + } + long localTime = this.getCounterTotalTime().getValue() - totalChildTime; + // Counters have some margin, set to 0 if it was negative. + localTime = Math.max(0, localTime); + this.localTimePercent = Double.valueOf(localTime) / Double.valueOf(total); + this.localTimePercent = Math.min(1.0, this.localTimePercent) * 100; + + // Recurse on children + for (int i = 0; i < this.childList.size(); i++) { + childList.get(i).first.computeTimeInProfile(total); + } + } + + // from bigger to smaller + public void sortChildren() { + Collections.sort(this.childList, new Comparator>() { + @Override + public int compare(Pair profile1, Pair profile2) + { + long distance = profile2.first.getCounterTotalTime().getValue() + - profile1.first.getCounterTotalTime().getValue(); + return (int) distance; + } + }); + } + + public void addInfoString(String key, String value) { + String target = this.infoStrings.get(key); + if (target == null) { + this.infoStrings.put(key, value); + this.infoStringsDisplayOrder.add(key); + } else { + this.infoStrings.put(key, value); + } + } + + public void setName(String name) { + this.name = name; + } + + // Returns the value to which the specified key is mapped; + // or null if this map contains no mapping for the key. + public String getInfoString(String key) { + return infoStrings.get(key); + } +} diff --git a/fe/src/com/baidu/palo/common/util/TimeUtils.java b/fe/src/com/baidu/palo/common/util/TimeUtils.java index 0634ff8128..648eab93a8 100644 --- a/fe/src/com/baidu/palo/common/util/TimeUtils.java +++ b/fe/src/com/baidu/palo/common/util/TimeUtils.java @@ -18,8 +18,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.util; - +package com.baidu.palo.common.util; + import com.baidu.palo.catalog.PrimitiveType; import com.baidu.palo.catalog.Type; import com.baidu.palo.common.AnalysisException; @@ -37,71 +37,71 @@ import java.util.Date; import java.util.SimpleTimeZone; import java.util.TimeZone; import java.util.regex.Matcher; -import java.util.regex.Pattern; - -// TODO(dhc) add nanosecond timer for coordinator's root profile -public class TimeUtils { - private static final Logger LOG = LogManager.getLogger(TimeUtils.class); - - private static final TimeZone TIME_ZONE; - - // NOTICE: Date formats are not synchronized. - // it must be used as synchronized externally. - private static final SimpleDateFormat DATE_FORMAT; - private static final SimpleDateFormat DATETIME_FORMAT; - private static final SimpleDateFormat TIME_FORMAT; - - private static final Pattern DATETIME_FORMAT_REG = - Pattern.compile("^((\\d{2}(([02468][048])|([13579][26]))[\\-\\/\\s]?((((0?[13578])|(1[02]))[\\-\\/\\s]?" - + "((0?[1-9])|([1-2][0-9])|(3[01])))|(((0?[469])|(11))[\\-\\/\\s]?" - + "((0?[1-9])|([1-2][0-9])|(30)))|(0?2[\\-\\/\\s]?((0?[1-9])|([1-2][0-9])))))|(" - + "\\d{2}(([02468][1235679])|([13579][01345789]))[\\-\\/\\s]?((((0?[13578])|(1[02]))" - + "[\\-\\/\\s]?((0?[1-9])|([1-2][0-9])|(3[01])))|(((0?[469])|(11))[\\-\\/\\s]?" - + "((0?[1-9])|([1-2][0-9])|(30)))|(0?2[\\-\\/\\s]?((0?[1-9])|(1[0-9])|(2[0-8]))))))" - + "(\\s(((0?[0-9])|([1][0-9])|([2][0-3]))\\:([0-5]?[0-9])((\\s)|(\\:([0-5]?[0-9])))))?$"); - - public static Date MIN_DATE = null; - public static Date MAX_DATE = null; - - public static Date MIN_DATETIME = null; - public static Date MAX_DATETIME = null; - - static { - TIME_ZONE = new SimpleTimeZone(8 * 3600 * 1000, ""); - - DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd"); - DATE_FORMAT.setTimeZone(TIME_ZONE); - - DATETIME_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); - DATETIME_FORMAT.setTimeZone(TIME_ZONE); - - TIME_FORMAT = new SimpleDateFormat("HH"); - TIME_FORMAT.setTimeZone(TIME_ZONE); - - try { - MIN_DATE = DATE_FORMAT.parse("1900-01-01"); - MAX_DATE = DATE_FORMAT.parse("9999-12-31"); - - MIN_DATETIME = DATETIME_FORMAT.parse("1900-01-01 00:00:00"); - MAX_DATETIME = DATETIME_FORMAT.parse("9999-12-31 23:59:59"); - } catch (ParseException e) { - LOG.error("invalid date format", e); - System.exit(-1); - } - } - - public static long getStartTime() { - return System.nanoTime(); - } - - public static long getEstimatedTime(long startTime) { - return System.nanoTime() - startTime; - } - - public static synchronized String getCurrentFormatTime() { - return DATETIME_FORMAT.format(new Date()); - } - +import java.util.regex.Pattern; + +// TODO(dhc) add nanosecond timer for coordinator's root profile +public class TimeUtils { + private static final Logger LOG = LogManager.getLogger(TimeUtils.class); + + private static final TimeZone TIME_ZONE; + + // NOTICE: Date formats are not synchronized. + // it must be used as synchronized externally. + private static final SimpleDateFormat DATE_FORMAT; + private static final SimpleDateFormat DATETIME_FORMAT; + private static final SimpleDateFormat TIME_FORMAT; + + private static final Pattern DATETIME_FORMAT_REG = + Pattern.compile("^((\\d{2}(([02468][048])|([13579][26]))[\\-\\/\\s]?((((0?[13578])|(1[02]))[\\-\\/\\s]?" + + "((0?[1-9])|([1-2][0-9])|(3[01])))|(((0?[469])|(11))[\\-\\/\\s]?" + + "((0?[1-9])|([1-2][0-9])|(30)))|(0?2[\\-\\/\\s]?((0?[1-9])|([1-2][0-9])))))|(" + + "\\d{2}(([02468][1235679])|([13579][01345789]))[\\-\\/\\s]?((((0?[13578])|(1[02]))" + + "[\\-\\/\\s]?((0?[1-9])|([1-2][0-9])|(3[01])))|(((0?[469])|(11))[\\-\\/\\s]?" + + "((0?[1-9])|([1-2][0-9])|(30)))|(0?2[\\-\\/\\s]?((0?[1-9])|(1[0-9])|(2[0-8]))))))" + + "(\\s(((0?[0-9])|([1][0-9])|([2][0-3]))\\:([0-5]?[0-9])((\\s)|(\\:([0-5]?[0-9])))))?$"); + + public static Date MIN_DATE = null; + public static Date MAX_DATE = null; + + public static Date MIN_DATETIME = null; + public static Date MAX_DATETIME = null; + + static { + TIME_ZONE = new SimpleTimeZone(8 * 3600 * 1000, ""); + + DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd"); + DATE_FORMAT.setTimeZone(TIME_ZONE); + + DATETIME_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + DATETIME_FORMAT.setTimeZone(TIME_ZONE); + + TIME_FORMAT = new SimpleDateFormat("HH"); + TIME_FORMAT.setTimeZone(TIME_ZONE); + + try { + MIN_DATE = DATE_FORMAT.parse("1900-01-01"); + MAX_DATE = DATE_FORMAT.parse("9999-12-31"); + + MIN_DATETIME = DATETIME_FORMAT.parse("1900-01-01 00:00:00"); + MAX_DATETIME = DATETIME_FORMAT.parse("9999-12-31 23:59:59"); + } catch (ParseException e) { + LOG.error("invalid date format", e); + System.exit(-1); + } + } + + public static long getStartTime() { + return System.nanoTime(); + } + + public static long getEstimatedTime(long startTime) { + return System.nanoTime() - startTime; + } + + public static synchronized String getCurrentFormatTime() { + return DATETIME_FORMAT.format(new Date()); + } + public static String longToTimeString(long timeStamp, SimpleDateFormat dateFormat) { if (timeStamp < 0L) { return "N/A"; @@ -111,88 +111,88 @@ public class TimeUtils { public static synchronized String longToTimeString(long timeStamp) { return longToTimeString(timeStamp, DATETIME_FORMAT); - } - - public static synchronized Date getTimeAsDate(String timeString) { - try { - Date date = TIME_FORMAT.parse(timeString); - return date; - } catch (ParseException e) { - LOG.warn("invalid time format: {}", timeString); - return null; - } - } - - public static synchronized Date parseDate(String dateStr, PrimitiveType type) throws AnalysisException { - Date date = null; - Matcher matcher = DATETIME_FORMAT_REG.matcher(dateStr); - if (!matcher.matches()) { - throw new AnalysisException("Invalid date string: " + dateStr); - } - if (type == PrimitiveType.DATE) { - ParsePosition pos = new ParsePosition(0); - date = DATE_FORMAT.parse(dateStr, pos); - if (pos.getIndex() != dateStr.length() || date == null) { - throw new AnalysisException("Invalid date string: " + dateStr); - } - } else if (type == PrimitiveType.DATETIME) { - try { - date = DATETIME_FORMAT.parse(dateStr); - } catch (ParseException e) { - throw new AnalysisException("Invalid date string: " + dateStr); - } - } else { - Preconditions.checkState(false, "error type: " + type); - } - - return date; - } - - public static synchronized Date parseDate(String dateStr, Type type) throws AnalysisException { - return parseDate(dateStr, type.getPrimitiveType()); - } - - public static synchronized String format(Date date, PrimitiveType type) { - if (type == PrimitiveType.DATE) { - return DATE_FORMAT.format(date); - } else if (type == PrimitiveType.DATETIME) { - return DATETIME_FORMAT.format(date); - } else { - return "INVALID"; - } - } - - public static synchronized String format(Date date, Type type) { - return format(date, type.getPrimitiveType()); - } - - /* - * only used for ETL - */ - public static long dateTransform(long time, PrimitiveType type) { - Calendar cal = Calendar.getInstance(TIME_ZONE); - cal.setTimeInMillis(time); - - int year = cal.get(Calendar.YEAR); - int month = cal.get(Calendar.MONTH) + 1; - int day = cal.get(Calendar.DAY_OF_MONTH); - - if (type == PrimitiveType.DATE) { - return year * 16 * 32L + month * 32 + day; - } else if (type == PrimitiveType.DATETIME) { - // datetime - int hour = cal.get(Calendar.HOUR_OF_DAY); - int minute = cal.get(Calendar.MINUTE); - int second = cal.get(Calendar.SECOND); - return (year * 10000 + month * 100 + day) * 1000000L + hour * 10000 + minute * 100 + second; - } else { - Preconditions.checkState(false, "invalid date type: " + type); - return -1L; - } - } - - public static long dateTransform(long time, Type type) { - return dateTransform(time, type.getPrimitiveType()); + } + + public static synchronized Date getTimeAsDate(String timeString) { + try { + Date date = TIME_FORMAT.parse(timeString); + return date; + } catch (ParseException e) { + LOG.warn("invalid time format: {}", timeString); + return null; + } + } + + public static synchronized Date parseDate(String dateStr, PrimitiveType type) throws AnalysisException { + Date date = null; + Matcher matcher = DATETIME_FORMAT_REG.matcher(dateStr); + if (!matcher.matches()) { + throw new AnalysisException("Invalid date string: " + dateStr); + } + if (type == PrimitiveType.DATE) { + ParsePosition pos = new ParsePosition(0); + date = DATE_FORMAT.parse(dateStr, pos); + if (pos.getIndex() != dateStr.length() || date == null) { + throw new AnalysisException("Invalid date string: " + dateStr); + } + } else if (type == PrimitiveType.DATETIME) { + try { + date = DATETIME_FORMAT.parse(dateStr); + } catch (ParseException e) { + throw new AnalysisException("Invalid date string: " + dateStr); + } + } else { + Preconditions.checkState(false, "error type: " + type); + } + + return date; + } + + public static synchronized Date parseDate(String dateStr, Type type) throws AnalysisException { + return parseDate(dateStr, type.getPrimitiveType()); + } + + public static synchronized String format(Date date, PrimitiveType type) { + if (type == PrimitiveType.DATE) { + return DATE_FORMAT.format(date); + } else if (type == PrimitiveType.DATETIME) { + return DATETIME_FORMAT.format(date); + } else { + return "INVALID"; + } + } + + public static synchronized String format(Date date, Type type) { + return format(date, type.getPrimitiveType()); + } + + /* + * only used for ETL + */ + public static long dateTransform(long time, PrimitiveType type) { + Calendar cal = Calendar.getInstance(TIME_ZONE); + cal.setTimeInMillis(time); + + int year = cal.get(Calendar.YEAR); + int month = cal.get(Calendar.MONTH) + 1; + int day = cal.get(Calendar.DAY_OF_MONTH); + + if (type == PrimitiveType.DATE) { + return year * 16 * 32L + month * 32 + day; + } else if (type == PrimitiveType.DATETIME) { + // datetime + int hour = cal.get(Calendar.HOUR_OF_DAY); + int minute = cal.get(Calendar.MINUTE); + int second = cal.get(Calendar.SECOND); + return (year * 10000 + month * 100 + day) * 1000000L + hour * 10000 + minute * 100 + second; + } else { + Preconditions.checkState(false, "invalid date type: " + type); + return -1L; + } + } + + public static long dateTransform(long time, Type type) { + return dateTransform(time, type.getPrimitiveType()); } public static long timeStringToLong(String timeStr) { @@ -203,5 +203,5 @@ public class TimeUtils { return -1; } return d.getTime(); - } -} + } +} diff --git a/fe/src/com/baidu/palo/common/util/Util.java b/fe/src/com/baidu/palo/common/util/Util.java index 086c18a1a7..0b9f3f9332 100644 --- a/fe/src/com/baidu/palo/common/util/Util.java +++ b/fe/src/com/baidu/palo/common/util/Util.java @@ -18,8 +18,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.util; - +package com.baidu.palo.common.util; + import com.baidu.palo.catalog.Column; import com.baidu.palo.catalog.PrimitiveType; import com.baidu.palo.common.Config; @@ -40,267 +40,267 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.zip.Adler32; - -public class Util { - private static final Logger LOG = LogManager.getLogger(Util.class); - private static final Map TYPE_STRING_MAP = new HashMap(); - - private static final long DEFAULT_EXEC_CMD_TIMEOUT_MS = 600000L; - - static { - TYPE_STRING_MAP.put(PrimitiveType.TINYINT, "tinyint(4)"); - TYPE_STRING_MAP.put(PrimitiveType.SMALLINT, "smallint(6)"); - TYPE_STRING_MAP.put(PrimitiveType.INT, "int(11)"); - TYPE_STRING_MAP.put(PrimitiveType.BIGINT, "bigint(20)"); - TYPE_STRING_MAP.put(PrimitiveType.LARGEINT, "largeint(40)"); - TYPE_STRING_MAP.put(PrimitiveType.FLOAT, "float"); - TYPE_STRING_MAP.put(PrimitiveType.DOUBLE, "double"); - TYPE_STRING_MAP.put(PrimitiveType.DATE, "date"); - TYPE_STRING_MAP.put(PrimitiveType.DATETIME, "datetime"); - TYPE_STRING_MAP.put(PrimitiveType.CHAR, "char(%d)"); - TYPE_STRING_MAP.put(PrimitiveType.VARCHAR, "varchar(%d)"); - TYPE_STRING_MAP.put(PrimitiveType.DECIMAL, "decimal(%d,%d)"); - TYPE_STRING_MAP.put(PrimitiveType.HLL, "varchar(%d)"); - } - - private static class CmdWorker extends Thread { - private final Process process; - private Integer exitValue; - - private StringBuffer outBuffer; - private StringBuffer errBuffer; - - public CmdWorker(final Process process) { - this.process = process; - this.outBuffer = new StringBuffer(); - this.errBuffer = new StringBuffer(); - } - - public Integer getExitValue() { - return exitValue; - } - - public String getStdOut() { - return this.outBuffer.toString(); - } - - public String getErrOut() { - return this.errBuffer.toString(); - } - - @Override - public void run() { - BufferedReader outReader = null; - BufferedReader errReader = null; - String line = null; - try { - outReader = new BufferedReader(new InputStreamReader(process.getInputStream())); - while ((line = outReader.readLine()) != null) { - outBuffer.append(line + '\n'); - } - - errReader = new BufferedReader(new InputStreamReader(process.getErrorStream())); - while ((line = errReader.readLine()) != null) { - errBuffer.append(line + '\n'); - } - - exitValue = process.waitFor(); - } catch (InterruptedException e) { - LOG.warn("get exception", e); - } catch (IOException e) { - LOG.warn("get exception", e); - } finally { - try { - if (outReader != null) { - outReader.close(); - } - if (errReader != null) { - errReader.close(); - } - } catch (IOException e) { - LOG.warn("close buffered reader error", e); - } - } - } - } - - public static CommandResult executeCommand(String cmd, String[] envp) { - return executeCommand(cmd, envp, DEFAULT_EXEC_CMD_TIMEOUT_MS); - } - - public static CommandResult executeCommand(String cmd, String[] envp, long timeoutMs) { - CommandResult result = new CommandResult(); - List cmdList = shellSplit(cmd); - String[] cmds = cmdList.toArray(new String[0]); +import java.util.zip.Adler32; + +public class Util { + private static final Logger LOG = LogManager.getLogger(Util.class); + private static final Map TYPE_STRING_MAP = new HashMap(); + + private static final long DEFAULT_EXEC_CMD_TIMEOUT_MS = 600000L; + + static { + TYPE_STRING_MAP.put(PrimitiveType.TINYINT, "tinyint(4)"); + TYPE_STRING_MAP.put(PrimitiveType.SMALLINT, "smallint(6)"); + TYPE_STRING_MAP.put(PrimitiveType.INT, "int(11)"); + TYPE_STRING_MAP.put(PrimitiveType.BIGINT, "bigint(20)"); + TYPE_STRING_MAP.put(PrimitiveType.LARGEINT, "largeint(40)"); + TYPE_STRING_MAP.put(PrimitiveType.FLOAT, "float"); + TYPE_STRING_MAP.put(PrimitiveType.DOUBLE, "double"); + TYPE_STRING_MAP.put(PrimitiveType.DATE, "date"); + TYPE_STRING_MAP.put(PrimitiveType.DATETIME, "datetime"); + TYPE_STRING_MAP.put(PrimitiveType.CHAR, "char(%d)"); + TYPE_STRING_MAP.put(PrimitiveType.VARCHAR, "varchar(%d)"); + TYPE_STRING_MAP.put(PrimitiveType.DECIMAL, "decimal(%d,%d)"); + TYPE_STRING_MAP.put(PrimitiveType.HLL, "varchar(%d)"); + } + + private static class CmdWorker extends Thread { + private final Process process; + private Integer exitValue; + + private StringBuffer outBuffer; + private StringBuffer errBuffer; + + public CmdWorker(final Process process) { + this.process = process; + this.outBuffer = new StringBuffer(); + this.errBuffer = new StringBuffer(); + } + + public Integer getExitValue() { + return exitValue; + } + + public String getStdOut() { + return this.outBuffer.toString(); + } + + public String getErrOut() { + return this.errBuffer.toString(); + } + + @Override + public void run() { + BufferedReader outReader = null; + BufferedReader errReader = null; + String line = null; + try { + outReader = new BufferedReader(new InputStreamReader(process.getInputStream())); + while ((line = outReader.readLine()) != null) { + outBuffer.append(line + '\n'); + } + + errReader = new BufferedReader(new InputStreamReader(process.getErrorStream())); + while ((line = errReader.readLine()) != null) { + errBuffer.append(line + '\n'); + } + + exitValue = process.waitFor(); + } catch (InterruptedException e) { + LOG.warn("get exception", e); + } catch (IOException e) { + LOG.warn("get exception", e); + } finally { + try { + if (outReader != null) { + outReader.close(); + } + if (errReader != null) { + errReader.close(); + } + } catch (IOException e) { + LOG.warn("close buffered reader error", e); + } + } + } + } + + public static CommandResult executeCommand(String cmd, String[] envp) { + return executeCommand(cmd, envp, DEFAULT_EXEC_CMD_TIMEOUT_MS); + } + + public static CommandResult executeCommand(String cmd, String[] envp, long timeoutMs) { + CommandResult result = new CommandResult(); + List cmdList = shellSplit(cmd); + String[] cmds = cmdList.toArray(new String[0]); + + try { + Process p = Runtime.getRuntime().exec(cmds, envp); + CmdWorker cmdWorker = new CmdWorker(p); + cmdWorker.start(); + + Integer exitValue = -1; + try { + cmdWorker.join(timeoutMs); + exitValue = cmdWorker.getExitValue(); + if (exitValue == null) { + // if we get this far then we never got an exit value from the worker thread + // as a result of a timeout + LOG.warn("exec command [{}] timed out.", cmd); + exitValue = -1; + } + } catch (InterruptedException ex) { + cmdWorker.interrupt(); + Thread.currentThread().interrupt(); + throw ex; + } finally { + p.destroy(); + } + + result.setReturnCode(exitValue); + result.setStdout(cmdWorker.getStdOut()); + result.setStderr(cmdWorker.getErrOut()); + } catch (IOException e) { + LOG.warn("execute command error", e); + } catch (InterruptedException e) { + LOG.warn("execute command error", e); + } + + return result; + } + + public static List shellSplit(CharSequence string) { + List tokens = new ArrayList(); + boolean escaping = false; + char quoteChar = ' '; + boolean quoting = false; + StringBuilder current = new StringBuilder() ; + for (int i = 0; i 0) { + tokens.add(current.toString()); + current = new StringBuilder(); + } + } else { + current.append(c); + } + } + if (current.length() > 0) { + tokens.add(current.toString()); + } + return tokens; + } + + public static int schemaHash(int schemaVersion, List columns, Set bfColumns, double bfFpp) { + Adler32 adler32 = new Adler32(); + adler32.update(schemaVersion); + String charsetName = "UTF-8"; + try { + List indexColumnNames = Lists.newArrayList(); + List bfColumnNames = Lists.newArrayList(); + // columns + for (Column column : columns) { + adler32.update(column.getName().getBytes(charsetName)); + PrimitiveType dataType = column.getDataType(); + String typeString = null; + switch (dataType) { + case CHAR: + typeString = String.format( + TYPE_STRING_MAP.get(dataType), column.getStrLen()); + break; + case VARCHAR: + typeString = String.format( + TYPE_STRING_MAP.get(dataType), column.getStrLen()); + break; + case DECIMAL: + typeString = String.format( + TYPE_STRING_MAP.get(dataType), column.getPrecision(), + column.getScale()); + break; + default: + typeString = TYPE_STRING_MAP.get(dataType); + break; + } + adler32.update(typeString.getBytes(charsetName)); + + String columnName = column.getName(); + if (column.isKey()) { + indexColumnNames.add(columnName); + } + + if (bfColumns != null && bfColumns.contains(columnName)) { + bfColumnNames.add(columnName); + } + } + + // index column name + for (String columnName : indexColumnNames) { + adler32.update(columnName.getBytes(charsetName)); + } + + // bloom filter index + if (!bfColumnNames.isEmpty()) { + // bf column name + for (String columnName : bfColumnNames) { + adler32.update(columnName.getBytes(charsetName)); + } + + // bf fpp + String bfFppStr = String.valueOf(bfFpp); + adler32.update(bfFppStr.getBytes(charsetName)); + } + } catch (UnsupportedEncodingException e) { + LOG.error("encoding error", e); + return -1; + } + + return Math.abs((int) adler32.getValue()); + } + + /** + * Chooses k unique random elements from a population sequence + */ + public static List sample(List population, int kNum) { + if (population.isEmpty() || population.size() < kNum) { + return null; + } + + Collections.shuffle(population); + return population.subList(0, kNum); + } + + /** + * Delete directory and all contents in this directory + */ + public static boolean deleteDirectory(File directory) { + if (!directory.exists()) { + return true; + } + + if (directory.isDirectory()) { + File[] files = directory.listFiles(); + if (null != files) { + for (int i = 0; i < files.length; i++) { + if (files[i].isDirectory()) { + deleteDirectory(files[i]); + } else { + files[i].delete(); + } + } + } + } + return directory.delete(); + } +} - try { - Process p = Runtime.getRuntime().exec(cmds, envp); - CmdWorker cmdWorker = new CmdWorker(p); - cmdWorker.start(); - - Integer exitValue = -1; - try { - cmdWorker.join(timeoutMs); - exitValue = cmdWorker.getExitValue(); - if (exitValue == null) { - // if we get this far then we never got an exit value from the worker thread - // as a result of a timeout - LOG.warn("exec command [{}] timed out.", cmd); - exitValue = -1; - } - } catch (InterruptedException ex) { - cmdWorker.interrupt(); - Thread.currentThread().interrupt(); - throw ex; - } finally { - p.destroy(); - } - - result.setReturnCode(exitValue); - result.setStdout(cmdWorker.getStdOut()); - result.setStderr(cmdWorker.getErrOut()); - } catch (IOException e) { - LOG.warn("execute command error", e); - } catch (InterruptedException e) { - LOG.warn("execute command error", e); - } - - return result; - } - - public static List shellSplit(CharSequence string) { - List tokens = new ArrayList(); - boolean escaping = false; - char quoteChar = ' '; - boolean quoting = false; - StringBuilder current = new StringBuilder() ; - for (int i = 0; i 0) { - tokens.add(current.toString()); - current = new StringBuilder(); - } - } else { - current.append(c); - } - } - if (current.length() > 0) { - tokens.add(current.toString()); - } - return tokens; - } - - public static int schemaHash(int schemaVersion, List columns, Set bfColumns, double bfFpp) { - Adler32 adler32 = new Adler32(); - adler32.update(schemaVersion); - String charsetName = "UTF-8"; - try { - List indexColumnNames = Lists.newArrayList(); - List bfColumnNames = Lists.newArrayList(); - // columns - for (Column column : columns) { - adler32.update(column.getName().getBytes(charsetName)); - PrimitiveType dataType = column.getDataType(); - String typeString = null; - switch (dataType) { - case CHAR: - typeString = String.format( - TYPE_STRING_MAP.get(dataType), column.getStrLen()); - break; - case VARCHAR: - typeString = String.format( - TYPE_STRING_MAP.get(dataType), column.getStrLen()); - break; - case DECIMAL: - typeString = String.format( - TYPE_STRING_MAP.get(dataType), column.getPrecision(), - column.getScale()); - break; - default: - typeString = TYPE_STRING_MAP.get(dataType); - break; - } - adler32.update(typeString.getBytes(charsetName)); - - String columnName = column.getName(); - if (column.isKey()) { - indexColumnNames.add(columnName); - } - - if (bfColumns != null && bfColumns.contains(columnName)) { - bfColumnNames.add(columnName); - } - } - - // index column name - for (String columnName : indexColumnNames) { - adler32.update(columnName.getBytes(charsetName)); - } - - // bloom filter index - if (!bfColumnNames.isEmpty()) { - // bf column name - for (String columnName : bfColumnNames) { - adler32.update(columnName.getBytes(charsetName)); - } - - // bf fpp - String bfFppStr = String.valueOf(bfFpp); - adler32.update(bfFppStr.getBytes(charsetName)); - } - } catch (UnsupportedEncodingException e) { - LOG.error("encoding error", e); - return -1; - } - - return Math.abs((int) adler32.getValue()); - } - - /** - * Chooses k unique random elements from a population sequence - */ - public static List sample(List population, int kNum) { - if (population.isEmpty() || population.size() < kNum) { - return null; - } - - Collections.shuffle(population); - return population.subList(0, kNum); - } - - /** - * Delete directory and all contents in this directory - */ - public static boolean deleteDirectory(File directory) { - if (!directory.exists()) { - return true; - } - - if (directory.isDirectory()) { - File[] files = directory.listFiles(); - if (null != files) { - for (int i = 0; i < files.length; i++) { - if (files[i].isDirectory()) { - deleteDirectory(files[i]); - } else { - files[i].delete(); - } - } - } - } - return directory.delete(); - } -} - diff --git a/fe/src/com/baidu/palo/ha/BDBHA.java b/fe/src/com/baidu/palo/ha/BDBHA.java index 8ab589c2e8..14bde20732 100644 --- a/fe/src/com/baidu/palo/ha/BDBHA.java +++ b/fe/src/com/baidu/palo/ha/BDBHA.java @@ -13,8 +13,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.ha; - +package com.baidu.palo.ha; + import com.baidu.palo.catalog.Catalog; import com.baidu.palo.journal.bdbje.BDBEnvironment; @@ -36,174 +36,174 @@ import org.apache.logging.log4j.Logger; import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.List; -import java.util.Set; - -public class BDBHA implements HAProtocol { - private static final Logger LOG = LogManager.getLogger(BDBHA.class); - - private BDBEnvironment environment; - private String nodeName; - private static final int RETRY_TIME = 3; - - public BDBHA(BDBEnvironment env, String nodeName) { - this.environment = env; - this.nodeName = nodeName; - } - - @Override - public long getEpochNumber() { - return 0; - } - - @Override - public boolean fencing() { - Database epochDb = environment.getEpochDB(); - - for (int i = 0; i < RETRY_TIME; i++) { - try { - long count = epochDb.count(); - long myEpoch = count + 1; - LOG.info("start fencing, epoch number is {}", myEpoch); - Long key = new Long(myEpoch); - DatabaseEntry theKey = new DatabaseEntry(); - TupleBinding idBinding = TupleBinding.getPrimitiveBinding(Long.class); - idBinding.objectToEntry(key, theKey); - DatabaseEntry theData = new DatabaseEntry(new byte[1]); - OperationStatus status = epochDb.putNoOverwrite(null, theKey, theData); - if (status == OperationStatus.SUCCESS) { - Catalog.getInstance().setEpoch(myEpoch); - return true; - } else if (status == OperationStatus.KEYEXIST) { - return false; - } else { - Exception e = new Exception(status.toString()); - throw e; - } - } catch (Exception e) { - LOG.error("fencing failed. tried {} times", i, e); - if (i < RETRY_TIME) { - try { - Thread.sleep(2000); - } catch (InterruptedException e1) { - e1.printStackTrace(); - } - continue; - } - } - } - return false; - } - - @Override - public List getObserverNodes() { - ReplicationGroupAdmin replicationGroupAdmin = environment.getReplicationGroupAdmin(); - if (replicationGroupAdmin == null) { - return null; - } - List ret = new ArrayList(); - try { - ReplicationGroup replicationGroup = replicationGroupAdmin.getGroup(); - for (ReplicationNode replicationNode : replicationGroup.getSecondaryNodes()) { - ret.add(replicationNode.getSocketAddress()); - } - } catch (UnknownMasterException e) { - LOG.warn("Catch UnknownMasterException when calling getObserverNodes.", e); - return null; - } - return ret; - } - - @Override - public List getElectableNodes(boolean leaderIncluded) { - ReplicationGroupAdmin replicationGroupAdmin = environment.getReplicationGroupAdmin(); - if (replicationGroupAdmin == null) { - return null; - } - List ret = new ArrayList(); +import java.util.Set; + +public class BDBHA implements HAProtocol { + private static final Logger LOG = LogManager.getLogger(BDBHA.class); + + private BDBEnvironment environment; + private String nodeName; + private static final int RETRY_TIME = 3; + + public BDBHA(BDBEnvironment env, String nodeName) { + this.environment = env; + this.nodeName = nodeName; + } + + @Override + public long getEpochNumber() { + return 0; + } + + @Override + public boolean fencing() { + Database epochDb = environment.getEpochDB(); + + for (int i = 0; i < RETRY_TIME; i++) { + try { + long count = epochDb.count(); + long myEpoch = count + 1; + LOG.info("start fencing, epoch number is {}", myEpoch); + Long key = new Long(myEpoch); + DatabaseEntry theKey = new DatabaseEntry(); + TupleBinding idBinding = TupleBinding.getPrimitiveBinding(Long.class); + idBinding.objectToEntry(key, theKey); + DatabaseEntry theData = new DatabaseEntry(new byte[1]); + OperationStatus status = epochDb.putNoOverwrite(null, theKey, theData); + if (status == OperationStatus.SUCCESS) { + Catalog.getInstance().setEpoch(myEpoch); + return true; + } else if (status == OperationStatus.KEYEXIST) { + return false; + } else { + Exception e = new Exception(status.toString()); + throw e; + } + } catch (Exception e) { + LOG.error("fencing failed. tried {} times", i, e); + if (i < RETRY_TIME) { + try { + Thread.sleep(2000); + } catch (InterruptedException e1) { + e1.printStackTrace(); + } + continue; + } + } + } + return false; + } + + @Override + public List getObserverNodes() { + ReplicationGroupAdmin replicationGroupAdmin = environment.getReplicationGroupAdmin(); + if (replicationGroupAdmin == null) { + return null; + } + List ret = new ArrayList(); try { ReplicationGroup replicationGroup = replicationGroupAdmin.getGroup(); - for (ReplicationNode replicationNode : replicationGroup.getElectableNodes()) { - if (leaderIncluded) { - ret.add(replicationNode.getSocketAddress()); - } else { - if (!replicationNode.getName().equals(replicationGroupAdmin.getMasterNodeName())) { - ret.add(replicationNode.getSocketAddress()); - } - } - } - } catch (UnknownMasterException e) { - LOG.warn("Catch UnknownMasterException when calling getElectableNodes.", e); - return null; - } - return ret; - } - - @Override - public InetSocketAddress getLeader() { - ReplicationGroupAdmin replicationGroupAdmin = environment.getReplicationGroupAdmin(); - String leaderName = replicationGroupAdmin.getMasterNodeName(); - ReplicationGroup rg = replicationGroupAdmin.getGroup(); - ReplicationNode rn = rg.getMember(leaderName); - return rn.getSocketAddress(); - } - - @Override - public List getNoneLeaderNodes() { - ReplicationGroupAdmin replicationGroupAdmin = environment.getReplicationGroupAdmin(); - if (replicationGroupAdmin == null) { - return null; - } - List ret = new ArrayList(); - try { - ReplicationGroup replicationGroup = replicationGroupAdmin.getGroup(); - for (ReplicationNode replicationNode : replicationGroup.getSecondaryNodes()) { - ret.add(replicationNode.getSocketAddress()); - } - for (ReplicationNode replicationNode : replicationGroup.getElectableNodes()) { - if (!replicationNode.getName().equals(replicationGroupAdmin.getMasterNodeName())) { - ret.add(replicationNode.getSocketAddress()); - } - } - } catch (UnknownMasterException e) { - LOG.warn("Catch UnknownMasterException when calling getNoneLeaderNodes.", e); - return null; - } - return ret; - } - - @Override - public void transferToMaster() { - - } - - @Override - public void transferToNonMaster() { - - } - - @Override - public boolean isLeader() { - ReplicationGroupAdmin replicationGroupAdmin = environment.getReplicationGroupAdmin(); - String leaderName = replicationGroupAdmin.getMasterNodeName(); - return leaderName.equals(nodeName); - } - - @Override - public boolean removeElectableNode(String nodeName) { - ReplicationGroupAdmin replicationGroupAdmin = environment.getReplicationGroupAdmin(); - if (replicationGroupAdmin == null) { - return false; - } - try { - replicationGroupAdmin.removeMember(nodeName); - } catch (MemberNotFoundException e) { - LOG.error("the deleting electable node is not found {}", nodeName, e); - return false; - } catch (MasterStateException e) { - LOG.error("the deleting electable node is master {}", nodeName, e); - return false; - } - return true; + for (ReplicationNode replicationNode : replicationGroup.getSecondaryNodes()) { + ret.add(replicationNode.getSocketAddress()); + } + } catch (UnknownMasterException e) { + LOG.warn("Catch UnknownMasterException when calling getObserverNodes.", e); + return null; + } + return ret; + } + + @Override + public List getElectableNodes(boolean leaderIncluded) { + ReplicationGroupAdmin replicationGroupAdmin = environment.getReplicationGroupAdmin(); + if (replicationGroupAdmin == null) { + return null; + } + List ret = new ArrayList(); + try { + ReplicationGroup replicationGroup = replicationGroupAdmin.getGroup(); + for (ReplicationNode replicationNode : replicationGroup.getElectableNodes()) { + if (leaderIncluded) { + ret.add(replicationNode.getSocketAddress()); + } else { + if (!replicationNode.getName().equals(replicationGroupAdmin.getMasterNodeName())) { + ret.add(replicationNode.getSocketAddress()); + } + } + } + } catch (UnknownMasterException e) { + LOG.warn("Catch UnknownMasterException when calling getElectableNodes.", e); + return null; + } + return ret; + } + + @Override + public InetSocketAddress getLeader() { + ReplicationGroupAdmin replicationGroupAdmin = environment.getReplicationGroupAdmin(); + String leaderName = replicationGroupAdmin.getMasterNodeName(); + ReplicationGroup rg = replicationGroupAdmin.getGroup(); + ReplicationNode rn = rg.getMember(leaderName); + return rn.getSocketAddress(); + } + + @Override + public List getNoneLeaderNodes() { + ReplicationGroupAdmin replicationGroupAdmin = environment.getReplicationGroupAdmin(); + if (replicationGroupAdmin == null) { + return null; + } + List ret = new ArrayList(); + try { + ReplicationGroup replicationGroup = replicationGroupAdmin.getGroup(); + for (ReplicationNode replicationNode : replicationGroup.getSecondaryNodes()) { + ret.add(replicationNode.getSocketAddress()); + } + for (ReplicationNode replicationNode : replicationGroup.getElectableNodes()) { + if (!replicationNode.getName().equals(replicationGroupAdmin.getMasterNodeName())) { + ret.add(replicationNode.getSocketAddress()); + } + } + } catch (UnknownMasterException e) { + LOG.warn("Catch UnknownMasterException when calling getNoneLeaderNodes.", e); + return null; + } + return ret; + } + + @Override + public void transferToMaster() { + + } + + @Override + public void transferToNonMaster() { + + } + + @Override + public boolean isLeader() { + ReplicationGroupAdmin replicationGroupAdmin = environment.getReplicationGroupAdmin(); + String leaderName = replicationGroupAdmin.getMasterNodeName(); + return leaderName.equals(nodeName); + } + + @Override + public boolean removeElectableNode(String nodeName) { + ReplicationGroupAdmin replicationGroupAdmin = environment.getReplicationGroupAdmin(); + if (replicationGroupAdmin == null) { + return false; + } + try { + replicationGroupAdmin.removeMember(nodeName); + } catch (MemberNotFoundException e) { + LOG.error("the deleting electable node is not found {}", nodeName, e); + return false; + } catch (MasterStateException e) { + LOG.error("the deleting electable node is master {}", nodeName, e); + return false; + } + return true; } // When new Follower FE is added to the cluster, it should also be added to the helper sockets in @@ -222,5 +222,5 @@ public class BDBHA implements HAProtocol { environment.setNewReplicationGroupAdmin(helperSockets); LOG.info("add {}:{} to helper sockets", ip, port); } - } -} + } +} diff --git a/fe/src/com/baidu/palo/ha/BDBStateChangeListener.java b/fe/src/com/baidu/palo/ha/BDBStateChangeListener.java index 946f7d2895..69ea80aeca 100644 --- a/fe/src/com/baidu/palo/ha/BDBStateChangeListener.java +++ b/fe/src/com/baidu/palo/ha/BDBStateChangeListener.java @@ -13,8 +13,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.ha; - +package com.baidu.palo.ha; + import com.baidu.palo.catalog.Catalog; import com.baidu.palo.persist.EditLog; @@ -22,90 +22,90 @@ import com.sleepycat.je.rep.StateChangeEvent; import com.sleepycat.je.rep.StateChangeListener; import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - -public class BDBStateChangeListener implements StateChangeListener { - public static final Logger LOG = LogManager.getLogger(EditLog.class); - - public BDBStateChangeListener() { - } - - @Override +import org.apache.logging.log4j.Logger; + +public class BDBStateChangeListener implements StateChangeListener { + public static final Logger LOG = LogManager.getLogger(EditLog.class); + + public BDBStateChangeListener() { + } + + @Override public synchronized void stateChange(StateChangeEvent sce) throws RuntimeException { - FrontendNodeType originalType = Catalog.getInstance().getFeType(); - switch (sce.getState()) { - case MASTER: { - String msg = "transfer from " + originalType.name() + " to MASTER"; - System.out.println(msg); - LOG.warn(msg); - if (originalType == FrontendNodeType.MASTER) { - return; - } - Catalog.getInstance().setFeType(FrontendNodeType.MASTER); - break; - } - case REPLICA: { - if (originalType == FrontendNodeType.MASTER) { - String errMsg = "master transfer to REPLICA, will exit"; - LOG.error(errMsg); - System.out.println(errMsg); - System.exit(-1); - } - - if (Catalog.getInstance().isElectable()) { - String msg = "transfer from " + originalType.name() + " to FOLLOWER"; - System.out.println(msg); - LOG.warn(msg); - Catalog.getInstance().setFeType(FrontendNodeType.FOLLOWER); - } else { - String msg = "transfer from " + originalType.name() + " to OBSERVER"; - System.out.println(msg); - LOG.warn(msg); - Catalog.getInstance().setFeType(FrontendNodeType.OBSERVER); - } - break; - } - case UNKNOWN: { - if (originalType == FrontendNodeType.MASTER) { - String errMsg = "master transfer to UNKNOWN, will exit"; - LOG.error(errMsg); - System.out.println(errMsg); - System.exit(-1); - } - - if (originalType == FrontendNodeType.FOLLOWER) { - if (Catalog.getInstance().isElectable()) { - String msg = "transfer from FOLLOWER to UNKNOWN"; - System.out.println(msg); - LOG.warn(msg); - } else { - String msg = "transfer from OBSERVER to UNKNOWN"; - System.out.println(msg); - LOG.warn(msg); - } - } else { - String msg = "transfer from " + originalType.name() + " to UNKNOWN"; - System.out.println(msg); - LOG.warn(msg); - } - - Catalog.getInstance().setFeType(FrontendNodeType.UNKNOWN); - break; - } - default: { - if (originalType == FrontendNodeType.MASTER) { - String errMsg = "master transfer to DETACHED, will exit"; - LOG.error(errMsg); - System.out.println(errMsg); - System.exit(-1); - } - - String errMsg = "this node is DETACHED."; - LOG.error(errMsg); - System.out.println(errMsg); - break; - } - } - } - -} + FrontendNodeType originalType = Catalog.getInstance().getFeType(); + switch (sce.getState()) { + case MASTER: { + String msg = "transfer from " + originalType.name() + " to MASTER"; + System.out.println(msg); + LOG.warn(msg); + if (originalType == FrontendNodeType.MASTER) { + return; + } + Catalog.getInstance().setFeType(FrontendNodeType.MASTER); + break; + } + case REPLICA: { + if (originalType == FrontendNodeType.MASTER) { + String errMsg = "master transfer to REPLICA, will exit"; + LOG.error(errMsg); + System.out.println(errMsg); + System.exit(-1); + } + + if (Catalog.getInstance().isElectable()) { + String msg = "transfer from " + originalType.name() + " to FOLLOWER"; + System.out.println(msg); + LOG.warn(msg); + Catalog.getInstance().setFeType(FrontendNodeType.FOLLOWER); + } else { + String msg = "transfer from " + originalType.name() + " to OBSERVER"; + System.out.println(msg); + LOG.warn(msg); + Catalog.getInstance().setFeType(FrontendNodeType.OBSERVER); + } + break; + } + case UNKNOWN: { + if (originalType == FrontendNodeType.MASTER) { + String errMsg = "master transfer to UNKNOWN, will exit"; + LOG.error(errMsg); + System.out.println(errMsg); + System.exit(-1); + } + + if (originalType == FrontendNodeType.FOLLOWER) { + if (Catalog.getInstance().isElectable()) { + String msg = "transfer from FOLLOWER to UNKNOWN"; + System.out.println(msg); + LOG.warn(msg); + } else { + String msg = "transfer from OBSERVER to UNKNOWN"; + System.out.println(msg); + LOG.warn(msg); + } + } else { + String msg = "transfer from " + originalType.name() + " to UNKNOWN"; + System.out.println(msg); + LOG.warn(msg); + } + + Catalog.getInstance().setFeType(FrontendNodeType.UNKNOWN); + break; + } + default: { + if (originalType == FrontendNodeType.MASTER) { + String errMsg = "master transfer to DETACHED, will exit"; + LOG.error(errMsg); + System.out.println(errMsg); + System.exit(-1); + } + + String errMsg = "this node is DETACHED."; + LOG.error(errMsg); + System.out.println(errMsg); + break; + } + } + } + +} diff --git a/fe/src/com/baidu/palo/ha/FrontendNodeType.java b/fe/src/com/baidu/palo/ha/FrontendNodeType.java index 8aa6007967..7252d93fa6 100644 --- a/fe/src/com/baidu/palo/ha/FrontendNodeType.java +++ b/fe/src/com/baidu/palo/ha/FrontendNodeType.java @@ -13,13 +13,13 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.ha; - -public enum FrontendNodeType { - MASTER, - FOLLOWER, - OBSERVER, - REPLICA, - INIT, - UNKNOWN +package com.baidu.palo.ha; + +public enum FrontendNodeType { + MASTER, + FOLLOWER, + OBSERVER, + REPLICA, + INIT, + UNKNOWN } \ No newline at end of file diff --git a/fe/src/com/baidu/palo/ha/HAProtocol.java b/fe/src/com/baidu/palo/ha/HAProtocol.java index 33fec7e446..3903f2f4a7 100644 --- a/fe/src/com/baidu/palo/ha/HAProtocol.java +++ b/fe/src/com/baidu/palo/ha/HAProtocol.java @@ -13,39 +13,39 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.ha; - -import java.net.InetSocketAddress; -import java.util.List; - -public interface HAProtocol { - // get current epoch number - public long getEpochNumber(); - - // increase epoch number by one - public boolean fencing(); - - // get observer nodes in the current group - public List getObserverNodes(); - - // get replica nodes in the current group - public List getElectableNodes(boolean leaderIncluded); - - // get the leader of current group - public InetSocketAddress getLeader(); - - // get all the nodes except leader in the current group - public List getNoneLeaderNodes(); - - // transfer from nonMaster(unknown, follower or init) to master - public void transferToMaster(); - - // transfer to non-master - public void transferToNonMaster(); - - // check if the current node is leader - public boolean isLeader(); - - // remove a node from the group - public boolean removeElectableNode(String nodeName); -} +package com.baidu.palo.ha; + +import java.net.InetSocketAddress; +import java.util.List; + +public interface HAProtocol { + // get current epoch number + public long getEpochNumber(); + + // increase epoch number by one + public boolean fencing(); + + // get observer nodes in the current group + public List getObserverNodes(); + + // get replica nodes in the current group + public List getElectableNodes(boolean leaderIncluded); + + // get the leader of current group + public InetSocketAddress getLeader(); + + // get all the nodes except leader in the current group + public List getNoneLeaderNodes(); + + // transfer from nonMaster(unknown, follower or init) to master + public void transferToMaster(); + + // transfer to non-master + public void transferToNonMaster(); + + // check if the current node is leader + public boolean isLeader(); + + // remove a node from the group + public boolean removeElectableNode(String nodeName); +} diff --git a/fe/src/com/baidu/palo/ha/MasterInfo.java b/fe/src/com/baidu/palo/ha/MasterInfo.java index 0a45d4f652..9df4066ea2 100644 --- a/fe/src/com/baidu/palo/ha/MasterInfo.java +++ b/fe/src/com/baidu/palo/ha/MasterInfo.java @@ -13,25 +13,25 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.ha; - +package com.baidu.palo.ha; + import com.baidu.palo.common.io.Text; import com.baidu.palo.common.io.Writable; import java.io.DataInput; import java.io.DataOutput; -import java.io.IOException; - -public class MasterInfo implements Writable { - - private String ip; - private int httpPort; +import java.io.IOException; + +public class MasterInfo implements Writable { + + private String ip; + private int httpPort; private int rpcPort; - - public MasterInfo() { - this.ip = ""; - this.httpPort = 0; - this.rpcPort = 0; + + public MasterInfo() { + this.ip = ""; + this.httpPort = 0; + this.rpcPort = 0; } public MasterInfo(String ip, int httpPort, int rpcPort) { @@ -39,43 +39,43 @@ public class MasterInfo implements Writable { this.httpPort = httpPort; this.rpcPort = rpcPort; } - - public String getIp() { - return this.ip; - } - - public void setIp(String ip) { - this.ip = ip; - } - - public int getHttpPort() { - return this.httpPort; - } - - public void setHttpPort(int httpPort) { - this.httpPort = httpPort; - } - - public int getRpcPort() { - return this.rpcPort; - } - - public void setRpcPort(int rpcPort) { - this.rpcPort = rpcPort; - } - - @Override - public void write(DataOutput out) throws IOException { - Text.writeString(out, ip); - out.writeInt(httpPort); - out.writeInt(rpcPort); - } - - @Override - public void readFields(DataInput in) throws IOException { - ip = Text.readString(in); - httpPort = in.readInt(); - rpcPort = in.readInt(); - } - -} + + public String getIp() { + return this.ip; + } + + public void setIp(String ip) { + this.ip = ip; + } + + public int getHttpPort() { + return this.httpPort; + } + + public void setHttpPort(int httpPort) { + this.httpPort = httpPort; + } + + public int getRpcPort() { + return this.rpcPort; + } + + public void setRpcPort(int rpcPort) { + this.rpcPort = rpcPort; + } + + @Override + public void write(DataOutput out) throws IOException { + Text.writeString(out, ip); + out.writeInt(httpPort); + out.writeInt(rpcPort); + } + + @Override + public void readFields(DataInput in) throws IOException { + ip = Text.readString(in); + httpPort = in.readInt(); + rpcPort = in.readInt(); + } + +} diff --git a/fe/src/com/baidu/palo/http/BaseAction.java b/fe/src/com/baidu/palo/http/BaseAction.java index ff3b4c7705..20eaff86ec 100644 --- a/fe/src/com/baidu/palo/http/BaseAction.java +++ b/fe/src/com/baidu/palo/http/BaseAction.java @@ -304,12 +304,14 @@ public abstract class BaseAction implements IAction { } encodedAuthString = parts[1]; ByteBuf buf = null; + ByteBuf decodeBuf = null; try { buf = Unpooled.copiedBuffer(ByteBuffer.wrap(encodedAuthString.getBytes())); // The authString is a string connecting user-name and password with // a colon(':') - String authString = Base64.decode(buf).toString(CharsetUtil.UTF_8); + decodeBuf = Base64.decode(buf); + String authString = decodeBuf.toString(CharsetUtil.UTF_8); // Note that password may contain colon, so can not simply use a // colon to split. int index = authString.indexOf(":"); @@ -326,11 +328,15 @@ public abstract class BaseAction implements IAction { authInfo.password = authString.substring(index + 1); authInfo.remoteIp = request.getHostString(); } finally { - // release the buf after using Unpooled.copiedBuffer + // release the buf and decode buf after using Unpooled.copiedBuffer // or it will get memory leak if (buf != null) { buf.release(); } + + if (decodeBuf != null) { + decodeBuf.release(); + } } return true; } diff --git a/fe/src/com/baidu/palo/http/rest/LoadAction.java b/fe/src/com/baidu/palo/http/rest/LoadAction.java index 393dbbad73..6baa2dfc03 100644 --- a/fe/src/com/baidu/palo/http/rest/LoadAction.java +++ b/fe/src/com/baidu/palo/http/rest/LoadAction.java @@ -61,17 +61,13 @@ public class LoadAction extends RestBaseAction { } @Override - public void execute(BaseRequest request, BaseResponse response) throws DdlException { + public void executeWithoutPassword(AuthorizationInfo authInfo, BaseRequest request, BaseResponse response) + throws DdlException { // A 'Load' request must have 100-continue header if (!request.getRequest().headers().contains(HttpHeaders.Names.EXPECT)) { throw new DdlException("There is no 100-continue header"); } - final AuthorizationInfo authInfo = getAuthorizationInfo(request); - if (authInfo == null) { - throw new DdlException("Authorize failed"); - } - final String clusterName = authInfo.cluster; if (Strings.isNullOrEmpty(clusterName)) { throw new DdlException("No cluster selected."); @@ -122,3 +118,4 @@ public class LoadAction extends RestBaseAction { redirectTo(request, response, redirectAddr); } } + diff --git a/fe/src/com/baidu/palo/journal/Journal.java b/fe/src/com/baidu/palo/journal/Journal.java index d34ad83d96..0af7797375 100644 --- a/fe/src/com/baidu/palo/journal/Journal.java +++ b/fe/src/com/baidu/palo/journal/Journal.java @@ -13,46 +13,46 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.journal; - -import com.baidu.palo.common.io.Writable; - -import java.util.List; - -public interface Journal { - - // Open the journal environment - public void open(); - - // Roll Edit file or database - public void rollJournal(); - - // Get the newest journal id - public long getMaxJournalId(); - - // Get the oldest journal id - public long getMinJournalId(); - - // Close the environment - public void close(); - - // Get the journal which id = journalId - public JournalEntity read(long journalId); - - // Get all the journals whose id: fromKey <= id <= toKey - // toKey = -1 means toKey = Long.Max_Value - public JournalCursor read(long fromKey, long toKey); - - // Write a journal and sync to disk - public void write(short op, Writable writable); - - // Delete journals whose max id is less than deleteToJournalId - public void deleteJournals(long deleteJournalToId); - - // Current db's min journal id - 1 - public long getFinalizedJournalId(); - - // Get all the dbs' name - public List getDatabaseNames(); - -} +package com.baidu.palo.journal; + +import com.baidu.palo.common.io.Writable; + +import java.util.List; + +public interface Journal { + + // Open the journal environment + public void open(); + + // Roll Edit file or database + public void rollJournal(); + + // Get the newest journal id + public long getMaxJournalId(); + + // Get the oldest journal id + public long getMinJournalId(); + + // Close the environment + public void close(); + + // Get the journal which id = journalId + public JournalEntity read(long journalId); + + // Get all the journals whose id: fromKey <= id <= toKey + // toKey = -1 means toKey = Long.Max_Value + public JournalCursor read(long fromKey, long toKey); + + // Write a journal and sync to disk + public void write(short op, Writable writable); + + // Delete journals whose max id is less than deleteToJournalId + public void deleteJournals(long deleteJournalToId); + + // Current db's min journal id - 1 + public long getFinalizedJournalId(); + + // Get all the dbs' name + public List getDatabaseNames(); + +} diff --git a/fe/src/com/baidu/palo/journal/JournalCursor.java b/fe/src/com/baidu/palo/journal/JournalCursor.java index 3ff9fffd5c..052139766d 100644 --- a/fe/src/com/baidu/palo/journal/JournalCursor.java +++ b/fe/src/com/baidu/palo/journal/JournalCursor.java @@ -13,14 +13,14 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.journal; - -// This class is like JDBC ResultSet. -public interface JournalCursor { - - // Return the next journal. return null when there is no more journals - public JournalEntity next(); - - public void close(); - -} +package com.baidu.palo.journal; + +// This class is like JDBC ResultSet. +public interface JournalCursor { + + // Return the next journal. return null when there is no more journals + public JournalEntity next(); + + public void close(); + +} diff --git a/fe/src/com/baidu/palo/journal/bdbje/BDBEnvironment.java b/fe/src/com/baidu/palo/journal/bdbje/BDBEnvironment.java index f254a5ef7d..e7a282d6ec 100644 --- a/fe/src/com/baidu/palo/journal/bdbje/BDBEnvironment.java +++ b/fe/src/com/baidu/palo/journal/bdbje/BDBEnvironment.java @@ -13,8 +13,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.journal.bdbje; - +package com.baidu.palo.journal.bdbje; + import com.baidu.palo.catalog.Catalog; import com.baidu.palo.common.Config; import com.baidu.palo.ha.BDBHA; @@ -52,175 +52,175 @@ import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.ReentrantReadWriteLock; - -/* this class contains the reference to bdb environment. - * including all the opened databases and the replicationGroupAdmin. - * we can get the information of this bdb group through the API of replicationGroupAdmin - */ -public class BDBEnvironment { - private static final Logger LOG = LogManager.getLogger(BDBEnvironment.class); - private static final int RETRY_TIME = 3; - private static final int MEMORY_CACHE_PERCENT = 20; - - public static final String PALO_JOURNAL_GROUP = "PALO_JOURNAL_GROUP"; - - private ReplicatedEnvironment replicatedEnvironment; - private EnvironmentConfig environmentConfig; - private ReplicationConfig replicationConfig; - private DatabaseConfig dbConfig; - private Database epochDB = null; // used for fencing - private ReplicationGroupAdmin replicationGroupAdmin = null; - private ReentrantReadWriteLock lock; - private List openedDatabases; - - public BDBEnvironment() { - openedDatabases = new ArrayList(); - this.lock = new ReentrantReadWriteLock(true); - } - - // The setup() method opens the environment and database - public void setup(File envHome, String selfNodeName, String selfNodeHostPort, - String helperHostPort, boolean isElectable) { - - // Almost never used, just in case the master can not restart - if (Config.metadata_failure_recovery.equals("true")) { - if (!isElectable) { - LOG.error("Current node is not in the electable_nodes list. will exit"); - System.exit(-1); - } - DbResetRepGroup resetUtility = new DbResetRepGroup(envHome, PALO_JOURNAL_GROUP, selfNodeName, - selfNodeHostPort); - resetUtility.reset(); - LOG.info("group has been reset."); +import java.util.concurrent.locks.ReentrantReadWriteLock; + +/* this class contains the reference to bdb environment. + * including all the opened databases and the replicationGroupAdmin. + * we can get the information of this bdb group through the API of replicationGroupAdmin + */ +public class BDBEnvironment { + private static final Logger LOG = LogManager.getLogger(BDBEnvironment.class); + private static final int RETRY_TIME = 3; + private static final int MEMORY_CACHE_PERCENT = 20; + + public static final String PALO_JOURNAL_GROUP = "PALO_JOURNAL_GROUP"; + + private ReplicatedEnvironment replicatedEnvironment; + private EnvironmentConfig environmentConfig; + private ReplicationConfig replicationConfig; + private DatabaseConfig dbConfig; + private Database epochDB = null; // used for fencing + private ReplicationGroupAdmin replicationGroupAdmin = null; + private ReentrantReadWriteLock lock; + private List openedDatabases; + + public BDBEnvironment() { + openedDatabases = new ArrayList(); + this.lock = new ReentrantReadWriteLock(true); + } + + // The setup() method opens the environment and database + public void setup(File envHome, String selfNodeName, String selfNodeHostPort, + String helperHostPort, boolean isElectable) { + + // Almost never used, just in case the master can not restart + if (Config.metadata_failure_recovery.equals("true")) { + if (!isElectable) { + LOG.error("Current node is not in the electable_nodes list. will exit"); + System.exit(-1); + } + DbResetRepGroup resetUtility = new DbResetRepGroup(envHome, PALO_JOURNAL_GROUP, selfNodeName, + selfNodeHostPort); + resetUtility.reset(); + LOG.info("group has been reset."); } - - // set replication config - replicationConfig = new ReplicationConfig(); - replicationConfig.setNodeName(selfNodeName); - replicationConfig.setNodeHostPort(selfNodeHostPort); - replicationConfig.setHelperHosts(helperHostPort); - replicationConfig.setGroupName(PALO_JOURNAL_GROUP); + + // set replication config + replicationConfig = new ReplicationConfig(); + replicationConfig.setNodeName(selfNodeName); + replicationConfig.setNodeHostPort(selfNodeHostPort); + replicationConfig.setHelperHosts(helperHostPort); + replicationConfig.setGroupName(PALO_JOURNAL_GROUP); replicationConfig.setConfigParam(ReplicationConfig.ENV_UNKNOWN_STATE_TIMEOUT, "10"); replicationConfig.setMaxClockDelta(Config.max_bdbje_clock_delta_ms, TimeUnit.MILLISECONDS); - - if (isElectable) { - replicationConfig.setReplicaAckTimeout(2, TimeUnit.SECONDS); + + if (isElectable) { + replicationConfig.setReplicaAckTimeout(2, TimeUnit.SECONDS); replicationConfig.setConfigParam(ReplicationConfig.REPLICA_MAX_GROUP_COMMIT, "0"); - replicationConfig.setConsistencyPolicy(new NoConsistencyRequiredPolicy()); - } else { - replicationConfig.setNodeType(NodeType.SECONDARY); - replicationConfig.setConsistencyPolicy(new NoConsistencyRequiredPolicy()); - } - - // set environment config - environmentConfig = new EnvironmentConfig(); - environmentConfig.setTransactional(true); - environmentConfig.setAllowCreate(true); - environmentConfig.setCachePercent(MEMORY_CACHE_PERCENT); - if (isElectable) { - Durability durability = new Durability(getSyncPolicy(Config.master_sync_policy), - getSyncPolicy(Config.replica_sync_policy), getAckPolicy(Config.replica_ack_policy)); - environmentConfig.setDurability(durability); - } - - // set database config - dbConfig = new DatabaseConfig(); - dbConfig.setTransactional(true); - if (isElectable) { - dbConfig.setAllowCreate(true); - dbConfig.setReadOnly(false); - } else { - dbConfig.setAllowCreate(false); - dbConfig.setReadOnly(true); - } - - // open environment and epochDB - for (int i = 0; i < RETRY_TIME; i++) { - try { - // open the environment - replicatedEnvironment = new ReplicatedEnvironment(envHome, replicationConfig, environmentConfig); - - // get replicationGroupAdmin object. - Set adminNodes = new HashSet(); - // 1. add helper node - InetSocketAddress helper = new InetSocketAddress(helperHostPort.split(":")[0], - Integer.parseInt(helperHostPort.split(":")[1])); - adminNodes.add(helper); - LOG.info("add helper[{}] as ReplicationGroupAdmin", helperHostPort); - // 2. add self if is electable - if (!selfNodeHostPort.equals(helperHostPort) && Catalog.getInstance().isElectable()) { - InetSocketAddress self = new InetSocketAddress(selfNodeHostPort.split(":")[0], - Integer.parseInt(selfNodeHostPort.split(":")[1])); - adminNodes.add(self); - LOG.info("add self[{}] as ReplicationGroupAdmin", selfNodeHostPort); - } - + replicationConfig.setConsistencyPolicy(new NoConsistencyRequiredPolicy()); + } else { + replicationConfig.setNodeType(NodeType.SECONDARY); + replicationConfig.setConsistencyPolicy(new NoConsistencyRequiredPolicy()); + } + + // set environment config + environmentConfig = new EnvironmentConfig(); + environmentConfig.setTransactional(true); + environmentConfig.setAllowCreate(true); + environmentConfig.setCachePercent(MEMORY_CACHE_PERCENT); + if (isElectable) { + Durability durability = new Durability(getSyncPolicy(Config.master_sync_policy), + getSyncPolicy(Config.replica_sync_policy), getAckPolicy(Config.replica_ack_policy)); + environmentConfig.setDurability(durability); + } + + // set database config + dbConfig = new DatabaseConfig(); + dbConfig.setTransactional(true); + if (isElectable) { + dbConfig.setAllowCreate(true); + dbConfig.setReadOnly(false); + } else { + dbConfig.setAllowCreate(false); + dbConfig.setReadOnly(true); + } + + // open environment and epochDB + for (int i = 0; i < RETRY_TIME; i++) { + try { + // open the environment + replicatedEnvironment = new ReplicatedEnvironment(envHome, replicationConfig, environmentConfig); + + // get replicationGroupAdmin object. + Set adminNodes = new HashSet(); + // 1. add helper node + InetSocketAddress helper = new InetSocketAddress(helperHostPort.split(":")[0], + Integer.parseInt(helperHostPort.split(":")[1])); + adminNodes.add(helper); + LOG.info("add helper[{}] as ReplicationGroupAdmin", helperHostPort); + // 2. add self if is electable + if (!selfNodeHostPort.equals(helperHostPort) && Catalog.getInstance().isElectable()) { + InetSocketAddress self = new InetSocketAddress(selfNodeHostPort.split(":")[0], + Integer.parseInt(selfNodeHostPort.split(":")[1])); + adminNodes.add(self); + LOG.info("add self[{}] as ReplicationGroupAdmin", selfNodeHostPort); + } + replicationGroupAdmin = new ReplicationGroupAdmin(PALO_JOURNAL_GROUP, adminNodes); - - // get a BDBHA object and pass the reference to Catalog - HAProtocol protocol = new BDBHA(this, selfNodeName); - Catalog.getInstance().setHaProtocol(protocol); - - // start state change listener - StateChangeListener listener = new BDBStateChangeListener(); + + // get a BDBHA object and pass the reference to Catalog + HAProtocol protocol = new BDBHA(this, selfNodeName); + Catalog.getInstance().setHaProtocol(protocol); + + // start state change listener + StateChangeListener listener = new BDBStateChangeListener(); replicatedEnvironment.setStateChangeListener(listener); - - // open epochDB. the first parameter null means auto-commit - epochDB = replicatedEnvironment.openDatabase(null, "epochDB", dbConfig); - break; - } catch (InsufficientLogException insufficientLogEx) { - NetworkRestore restore = new NetworkRestore(); - NetworkRestoreConfig config = new NetworkRestoreConfig(); - config.setRetainLogFiles(false); // delete obsolete log files. - // Use the members returned by insufficientLogEx.getLogProviders() - // to select the desired subset of members and pass the resulting - // list as the argument to config.setLogProviders(), if the - // default selection of providers is not suitable. - restore.execute(insufficientLogEx, config); - continue; - } catch (DatabaseException e) { - if (i < RETRY_TIME - 1) { - try { - Thread.sleep(5 * 1000); - } catch (InterruptedException e1) { - e1.printStackTrace(); - } - continue; - } else { - LOG.error("error to open replicated environment. will exit.", e); - System.exit(-1); - } - } - } - } - - public ReplicationGroupAdmin getReplicationGroupAdmin() { - return this.replicationGroupAdmin; + + // open epochDB. the first parameter null means auto-commit + epochDB = replicatedEnvironment.openDatabase(null, "epochDB", dbConfig); + break; + } catch (InsufficientLogException insufficientLogEx) { + NetworkRestore restore = new NetworkRestore(); + NetworkRestoreConfig config = new NetworkRestoreConfig(); + config.setRetainLogFiles(false); // delete obsolete log files. + // Use the members returned by insufficientLogEx.getLogProviders() + // to select the desired subset of members and pass the resulting + // list as the argument to config.setLogProviders(), if the + // default selection of providers is not suitable. + restore.execute(insufficientLogEx, config); + continue; + } catch (DatabaseException e) { + if (i < RETRY_TIME - 1) { + try { + Thread.sleep(5 * 1000); + } catch (InterruptedException e1) { + e1.printStackTrace(); + } + continue; + } else { + LOG.error("error to open replicated environment. will exit.", e); + System.exit(-1); + } + } + } + } + + public ReplicationGroupAdmin getReplicationGroupAdmin() { + return this.replicationGroupAdmin; } public void setNewReplicationGroupAdmin(Set newHelperNodes) { this.replicationGroupAdmin = new ReplicationGroupAdmin(PALO_JOURNAL_GROUP, newHelperNodes); } - - // Return a handle to the epochDB - public Database getEpochDB() { - return epochDB; - } - - // Return a handle to the environment + + // Return a handle to the epochDB + public Database getEpochDB() { + return epochDB; + } + + // Return a handle to the environment public ReplicatedEnvironment getReplicatedEnvironment() { - return replicatedEnvironment; - } - + return replicatedEnvironment; + } + // return the database reference with the given name - // also try to close previous opened database. - public Database openDatabase(String dbName) { - Database db = null; - lock.writeLock().lock(); + // also try to close previous opened database. + public Database openDatabase(String dbName) { + Database db = null; + lock.writeLock().lock(); try { - // find if the specified database is already opened. find and return it. - for (java.util.Iterator iter = openedDatabases.iterator(); iter.hasNext();) { + // find if the specified database is already opened. find and return it. + for (java.util.Iterator iter = openedDatabases.iterator(); iter.hasNext();) { Database openedDb = iter.next(); try { if (openedDb.getDatabaseName() == null) { @@ -251,156 +251,156 @@ public class BDBEnvironment { iter.remove(); continue; } - - if (openedDb.getDatabaseName().equals(dbName)) { - return openedDb; - } - } - // open the specified database. - // the first parameter null means auto-commit - try { - db = replicatedEnvironment.openDatabase(null, dbName, dbConfig); - openedDatabases.add(db); - } catch (Exception e) { - LOG.warn("catch an exception when open database {}", dbName, e); - } - } finally { - lock.writeLock().unlock(); - } - return db; - } - - // close and remove the database whose name is dbName - public void removeDatabase(String dbName) { - lock.writeLock().lock(); - try { - String targetDbName = null; - int index = 0; - for (Database db : openedDatabases) { - String name = db.getDatabaseName(); - if (dbName.equals(name)) { + if (openedDb.getDatabaseName().equals(dbName)) { + return openedDb; + } + } + + // open the specified database. + // the first parameter null means auto-commit + try { + db = replicatedEnvironment.openDatabase(null, dbName, dbConfig); + openedDatabases.add(db); + } catch (Exception e) { + LOG.warn("catch an exception when open database {}", dbName, e); + } + } finally { + lock.writeLock().unlock(); + } + return db; + } + + // close and remove the database whose name is dbName + public void removeDatabase(String dbName) { + lock.writeLock().lock(); + try { + String targetDbName = null; + int index = 0; + for (Database db : openedDatabases) { + String name = db.getDatabaseName(); + if (dbName.equals(name)) { db.close(); - LOG.info("database {} has been closed", name); - targetDbName = name; - break; - } - index++; - } - if (targetDbName != null) { - LOG.info("begin to remove database {} from openedDatabases", targetDbName); - openedDatabases.remove(index); - } - try { - LOG.info("begin to remove database {} from replicatedEnviroment", dbName); - // the first parameter null means auto-commit - replicatedEnvironment.removeDatabase(null, dbName); - } catch (DatabaseNotFoundException e) { - LOG.warn("catch an exception when remove db:{}, this db does not exist", dbName, e); - } - } finally { - lock.writeLock().unlock(); - } - } - - // get journal db names and sort the names - public List getDatabaseNames() { - List ret = new ArrayList(); - List names = null; - int tried = 0; - while (true) { - try { - names = replicatedEnvironment.getDatabaseNames(); - break; - } catch (InsufficientLogException e) { - throw e; - } catch (EnvironmentFailureException e) { - tried++; - if (tried == RETRY_TIME) { - LOG.error("bdb environment failure exception.", e); - System.exit(-1); - } - LOG.warn("bdb environment failure exception. will retry", e); - try { - Thread.sleep(1000); - } catch (InterruptedException e1) { - e1.printStackTrace(); - } - continue; - } catch (DatabaseException e) { - LOG.warn("catch an exception when calling getDatabaseNames", e); - return null; - } - } - - if (names != null) { - for (String name : names) { - // We don't count epochDB - if (name.equals("epochDB")) { - continue; - } - - long db = Long.parseLong(name); - ret.add(db); - } - } - - Collections.sort(ret); - return ret; - } - - // Close the store and environment - public void close() { - for (Database db : openedDatabases) { - try { - db.close(); - } catch (DatabaseException exception) { - LOG.error("Error closing db {} will exit", db.getDatabaseName(), exception); - System.exit(-1); - } - } - openedDatabases.clear(); - - if (epochDB != null) { - try { - epochDB.close(); - } catch (DatabaseException exception) { - LOG.error("Error closing db {} will exit", epochDB.getDatabaseName(), exception); - System.exit(-1); - } - } - - if (replicatedEnvironment != null) { - try { - // Finally, close the store and environment. - replicatedEnvironment.close(); - } catch (DatabaseException exception) { - LOG.error("Error closing replicatedEnviroment", exception); - System.exit(-1); - } - } - } - - private SyncPolicy getSyncPolicy(String policy) { - if (policy.equalsIgnoreCase("SYNC")) { - return Durability.SyncPolicy.SYNC; - } - if (policy.equalsIgnoreCase("NO_SYNC")) { - return Durability.SyncPolicy.NO_SYNC; - } - // default value is WRITE_NO_SYNC - return Durability.SyncPolicy.WRITE_NO_SYNC; - } - - private ReplicaAckPolicy getAckPolicy(String policy) { - if (policy.equalsIgnoreCase("ALL")) { - return Durability.ReplicaAckPolicy.ALL; - } - if (policy.equalsIgnoreCase("NONE")) { - return Durability.ReplicaAckPolicy.NONE; - } - // default value is SIMPLE_MAJORITY - return Durability.ReplicaAckPolicy.SIMPLE_MAJORITY; - } - -} + LOG.info("database {} has been closed", name); + targetDbName = name; + break; + } + index++; + } + if (targetDbName != null) { + LOG.info("begin to remove database {} from openedDatabases", targetDbName); + openedDatabases.remove(index); + } + try { + LOG.info("begin to remove database {} from replicatedEnviroment", dbName); + // the first parameter null means auto-commit + replicatedEnvironment.removeDatabase(null, dbName); + } catch (DatabaseNotFoundException e) { + LOG.warn("catch an exception when remove db:{}, this db does not exist", dbName, e); + } + } finally { + lock.writeLock().unlock(); + } + } + + // get journal db names and sort the names + public List getDatabaseNames() { + List ret = new ArrayList(); + List names = null; + int tried = 0; + while (true) { + try { + names = replicatedEnvironment.getDatabaseNames(); + break; + } catch (InsufficientLogException e) { + throw e; + } catch (EnvironmentFailureException e) { + tried++; + if (tried == RETRY_TIME) { + LOG.error("bdb environment failure exception.", e); + System.exit(-1); + } + LOG.warn("bdb environment failure exception. will retry", e); + try { + Thread.sleep(1000); + } catch (InterruptedException e1) { + e1.printStackTrace(); + } + continue; + } catch (DatabaseException e) { + LOG.warn("catch an exception when calling getDatabaseNames", e); + return null; + } + } + + if (names != null) { + for (String name : names) { + // We don't count epochDB + if (name.equals("epochDB")) { + continue; + } + + long db = Long.parseLong(name); + ret.add(db); + } + } + + Collections.sort(ret); + return ret; + } + + // Close the store and environment + public void close() { + for (Database db : openedDatabases) { + try { + db.close(); + } catch (DatabaseException exception) { + LOG.error("Error closing db {} will exit", db.getDatabaseName(), exception); + System.exit(-1); + } + } + openedDatabases.clear(); + + if (epochDB != null) { + try { + epochDB.close(); + } catch (DatabaseException exception) { + LOG.error("Error closing db {} will exit", epochDB.getDatabaseName(), exception); + System.exit(-1); + } + } + + if (replicatedEnvironment != null) { + try { + // Finally, close the store and environment. + replicatedEnvironment.close(); + } catch (DatabaseException exception) { + LOG.error("Error closing replicatedEnviroment", exception); + System.exit(-1); + } + } + } + + private SyncPolicy getSyncPolicy(String policy) { + if (policy.equalsIgnoreCase("SYNC")) { + return Durability.SyncPolicy.SYNC; + } + if (policy.equalsIgnoreCase("NO_SYNC")) { + return Durability.SyncPolicy.NO_SYNC; + } + // default value is WRITE_NO_SYNC + return Durability.SyncPolicy.WRITE_NO_SYNC; + } + + private ReplicaAckPolicy getAckPolicy(String policy) { + if (policy.equalsIgnoreCase("ALL")) { + return Durability.ReplicaAckPolicy.ALL; + } + if (policy.equalsIgnoreCase("NONE")) { + return Durability.ReplicaAckPolicy.NONE; + } + // default value is SIMPLE_MAJORITY + return Durability.ReplicaAckPolicy.SIMPLE_MAJORITY; + } + +} diff --git a/fe/src/com/baidu/palo/journal/bdbje/BDBJournalCursor.java b/fe/src/com/baidu/palo/journal/bdbje/BDBJournalCursor.java index 56119d61b5..40fd81c38c 100644 --- a/fe/src/com/baidu/palo/journal/bdbje/BDBJournalCursor.java +++ b/fe/src/com/baidu/palo/journal/bdbje/BDBJournalCursor.java @@ -13,8 +13,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.journal.bdbje; - +package com.baidu.palo.journal.bdbje; + import com.baidu.palo.journal.JournalCursor; import com.baidu.palo.journal.JournalEntity; @@ -29,105 +29,105 @@ import org.apache.logging.log4j.Logger; import java.io.ByteArrayInputStream; import java.io.DataInputStream; -import java.util.List; - -public class BDBJournalCursor implements JournalCursor { - private static final Logger LOG = LogManager.getLogger(JournalCursor.class); - - private long toKey; - private long currentKey; - private BDBEnvironment environment; - private List dbNames; - private Database database; - private int nextDbPositionIndex; - private final int maxTryTime = 3; - - public static BDBJournalCursor getJournalCursor(BDBEnvironment env, long fromKey, long toKey) { - if (toKey < fromKey || fromKey < 0) { - System.out.println("Invalid key range!"); - return null; - } - BDBJournalCursor cursor = null; - try { - cursor = new BDBJournalCursor(env, fromKey, toKey); - } catch (Exception e) { - LOG.error("new BDBJournalCursor error.", e); - } - return cursor; - } - - - private BDBJournalCursor(BDBEnvironment env, long fromKey, long toKey) throws Exception { - this.environment = env; - this.toKey = toKey; - this.currentKey = fromKey; - this.dbNames = env.getDatabaseNames(); - if (dbNames == null) { - throw new NullPointerException("dbNames is null."); - } - this.nextDbPositionIndex = 0; - - // find the db which may contain the fromKey - String dbName = null; - for (long db : dbNames) { - if (fromKey >= db) { - dbName = Long.toString(db); - nextDbPositionIndex++; - continue; - } else { - break; - } - } - - if (dbName == null) { - LOG.error("Can not find the key:{}, fail to get journal cursor. will exit.", fromKey); - System.exit(-1); - } - this.database = env.openDatabase(dbName); - } - - @Override - public JournalEntity next() { - JournalEntity ret = null; - if (currentKey > toKey) { - return ret; - } - Long key = new Long(currentKey); - DatabaseEntry theKey = new DatabaseEntry(); - TupleBinding myBinding = TupleBinding.getPrimitiveBinding(Long.class); - myBinding.objectToEntry(key, theKey); - - DatabaseEntry theData = new DatabaseEntry(); - // if current db does not contain any more data, then we go to search the next db - try { - // null means perform the operation without transaction protection. - // READ_COMMITTED guarantees no dirty read. - int tryTimes = 0; +import java.util.List; + +public class BDBJournalCursor implements JournalCursor { + private static final Logger LOG = LogManager.getLogger(JournalCursor.class); + + private long toKey; + private long currentKey; + private BDBEnvironment environment; + private List dbNames; + private Database database; + private int nextDbPositionIndex; + private final int maxTryTime = 3; + + public static BDBJournalCursor getJournalCursor(BDBEnvironment env, long fromKey, long toKey) { + if (toKey < fromKey || fromKey < 0) { + System.out.println("Invalid key range!"); + return null; + } + BDBJournalCursor cursor = null; + try { + cursor = new BDBJournalCursor(env, fromKey, toKey); + } catch (Exception e) { + LOG.error("new BDBJournalCursor error.", e); + } + return cursor; + } + + + private BDBJournalCursor(BDBEnvironment env, long fromKey, long toKey) throws Exception { + this.environment = env; + this.toKey = toKey; + this.currentKey = fromKey; + this.dbNames = env.getDatabaseNames(); + if (dbNames == null) { + throw new NullPointerException("dbNames is null."); + } + this.nextDbPositionIndex = 0; + + // find the db which may contain the fromKey + String dbName = null; + for (long db : dbNames) { + if (fromKey >= db) { + dbName = Long.toString(db); + nextDbPositionIndex++; + continue; + } else { + break; + } + } + + if (dbName == null) { + LOG.error("Can not find the key:{}, fail to get journal cursor. will exit.", fromKey); + System.exit(-1); + } + this.database = env.openDatabase(dbName); + } + + @Override + public JournalEntity next() { + JournalEntity ret = null; + if (currentKey > toKey) { + return ret; + } + Long key = new Long(currentKey); + DatabaseEntry theKey = new DatabaseEntry(); + TupleBinding myBinding = TupleBinding.getPrimitiveBinding(Long.class); + myBinding.objectToEntry(key, theKey); + + DatabaseEntry theData = new DatabaseEntry(); + // if current db does not contain any more data, then we go to search the next db + try { + // null means perform the operation without transaction protection. + // READ_COMMITTED guarantees no dirty read. + int tryTimes = 0; while (true) { - OperationStatus operationStatus = database.get(null, theKey, theData, LockMode.READ_COMMITTED); - if (operationStatus == OperationStatus.SUCCESS) { - // Recreate the data String. - byte[] retData = theData.getData(); - DataInputStream in = new DataInputStream(new ByteArrayInputStream(retData)); - ret = new JournalEntity(); - try { - ret.readFields(in); - } catch (Exception e) { - LOG.error("fail to read journal entity key={}, will exit", currentKey, e); - System.exit(-1); - } - currentKey++; - return ret; - } else if (nextDbPositionIndex < dbNames.size() && currentKey == dbNames.get(nextDbPositionIndex)) { - database = environment.openDatabase(dbNames.get(nextDbPositionIndex).toString()); - nextDbPositionIndex++; - tryTimes = 0; - continue; - } else if (tryTimes < maxTryTime) { - tryTimes++; - LOG.warn("fail to get journal {}, will try again. status: {}", currentKey, operationStatus); - Thread.sleep(3000); - continue; + OperationStatus operationStatus = database.get(null, theKey, theData, LockMode.READ_COMMITTED); + if (operationStatus == OperationStatus.SUCCESS) { + // Recreate the data String. + byte[] retData = theData.getData(); + DataInputStream in = new DataInputStream(new ByteArrayInputStream(retData)); + ret = new JournalEntity(); + try { + ret.readFields(in); + } catch (Exception e) { + LOG.error("fail to read journal entity key={}, will exit", currentKey, e); + System.exit(-1); + } + currentKey++; + return ret; + } else if (nextDbPositionIndex < dbNames.size() && currentKey == dbNames.get(nextDbPositionIndex)) { + database = environment.openDatabase(dbNames.get(nextDbPositionIndex).toString()); + nextDbPositionIndex++; + tryTimes = 0; + continue; + } else if (tryTimes < maxTryTime) { + tryTimes++; + LOG.warn("fail to get journal {}, will try again. status: {}", currentKey, operationStatus); + Thread.sleep(3000); + continue; } else if (operationStatus == OperationStatus.NOTFOUND) { // In the case: // On non-master FE, the replayer will first get the max journal id, @@ -143,16 +143,16 @@ public class BDBJournalCursor implements JournalCursor { } else { LOG.error("fail to get journal {}, status: {}, will exit", currentKey); System.exit(-1); - } - } - } catch (Exception e) { - LOG.warn("Catch an exception when get next JournalEntity. key:{}", currentKey, e); - return null; - } - } - - @Override - public void close() { - - } -} + } + } + } catch (Exception e) { + LOG.warn("Catch an exception when get next JournalEntity. key:{}", currentKey, e); + return null; + } + } + + @Override + public void close() { + + } +} diff --git a/fe/src/com/baidu/palo/journal/bdbje/Timestamp.java b/fe/src/com/baidu/palo/journal/bdbje/Timestamp.java index bbdeeca196..9881773465 100644 --- a/fe/src/com/baidu/palo/journal/bdbje/Timestamp.java +++ b/fe/src/com/baidu/palo/journal/bdbje/Timestamp.java @@ -13,37 +13,37 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.journal.bdbje; - -import com.baidu.palo.common.io.Writable; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -// Write this class to bdb periodically -public class Timestamp implements Writable { - private long timestamp; - - public Timestamp() { - timestamp = System.currentTimeMillis(); - } - - public long getTimestamp() { - return this.timestamp; - } - - @Override - public void write(DataOutput out) throws IOException { - out.writeLong(timestamp); - } - - @Override - public void readFields(DataInput in) throws IOException { - timestamp = in.readLong(); - } - - public String toString() { - return "" + timestamp; - } -} +package com.baidu.palo.journal.bdbje; + +import com.baidu.palo.common.io.Writable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +// Write this class to bdb periodically +public class Timestamp implements Writable { + private long timestamp; + + public Timestamp() { + timestamp = System.currentTimeMillis(); + } + + public long getTimestamp() { + return this.timestamp; + } + + @Override + public void write(DataOutput out) throws IOException { + out.writeLong(timestamp); + } + + @Override + public void readFields(DataInput in) throws IOException { + timestamp = in.readLong(); + } + + public String toString() { + return "" + timestamp; + } +} diff --git a/fe/src/com/baidu/palo/journal/local/LocalJournal.java b/fe/src/com/baidu/palo/journal/local/LocalJournal.java index 0a356bb389..57d239e7ff 100644 --- a/fe/src/com/baidu/palo/journal/local/LocalJournal.java +++ b/fe/src/com/baidu/palo/journal/local/LocalJournal.java @@ -13,176 +13,176 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.journal.local; - -import com.baidu.palo.common.io.Writable; -import com.baidu.palo.journal.Journal; -import com.baidu.palo.journal.JournalCursor; -import com.baidu.palo.journal.JournalEntity; -import com.baidu.palo.persist.EditLogFileOutputStream; -import com.baidu.palo.persist.EditLogOutputStream; -import com.baidu.palo.persist.Storage; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - -import java.io.File; -import java.io.IOException; -import java.util.List; -import java.util.concurrent.atomic.AtomicLong; - -public class LocalJournal implements Journal { - private static final Logger LOG = LogManager.getLogger(LocalJournal.class); - - private EditLogOutputStream outputStream = null; - private AtomicLong journalId = new AtomicLong(1); - private String imageDir; - private File currentEditFile; - - public LocalJournal(String imageDir) { - this.imageDir = imageDir; - } - - @Override - public void open() { - if (outputStream == null) { - try { - Storage storage = new Storage(imageDir); - - this.journalId.set(getCurrentJournalId(storage.getEditsFileSequenceNumbers())); - - long id = journalId.get(); - if (id == storage.getEditsSeq()) { - this.currentEditFile = storage.getEditsFile(id); - this.outputStream = new EditLogFileOutputStream(currentEditFile); - } else { - currentEditFile = new File(imageDir, "edits." + (id + 1)); - currentEditFile.createNewFile(); - outputStream = new EditLogFileOutputStream(currentEditFile); - } - } catch (IOException e) { - LOG.error(e); - } - } - } - - @Override - public synchronized void rollJournal() { - Storage storage; - try { - storage = new Storage(imageDir); - if (journalId.get() == storage.getEditsSeq()) { - System.out.println("Does not need to roll!"); - return; - } - if (outputStream != null) { - outputStream.close(); - } - currentEditFile = new File(imageDir, "edits." + journalId.get()); - currentEditFile.createNewFile(); - outputStream = new EditLogFileOutputStream(currentEditFile); - } catch (IOException e) { - LOG.error(e); - } - } - - @Override - public long getMaxJournalId() { - return 0; - } - - @Override - public long getMinJournalId() { - return 0; - } - - @Override - public void close() { - if (outputStream == null) { - return; - } - - try { - outputStream.setReadyToFlush(); - outputStream.flush(); - outputStream.close(); - } catch (IOException e) { - LOG.error(e); - } - } - - @Override - public JournalEntity read(long journalId) { - return null; - } - - @Override - public JournalCursor read(long fromKey, long toKey) { - JournalCursor cursor = LocalJournalCursor.getJournalCursor(imageDir, fromKey, toKey); - return cursor; - } - - @Override - public synchronized void write(short op, Writable writable) { - try { - outputStream.write(op, writable); - outputStream.setReadyToFlush(); - outputStream.flush(); - journalId.incrementAndGet(); - } catch (IOException e) { - LOG.error(e); - } - } - - @Override - public void deleteJournals(long deleteJournalToId) { - try { - Storage storage = new Storage(imageDir); - List nubmers = storage.getEditsFileSequenceNumbers(); - for (long number : nubmers) { - if (number < deleteJournalToId) { - File file = new File(imageDir, "edits." + number); - if (file.exists()) { - file.delete(); - } - } - } - } catch (IOException e) { - LOG.error(e); - } - } - - @Override - public long getFinalizedJournalId() { - try { - Storage storage = new Storage(imageDir); - List numbers = storage.getEditsFileSequenceNumbers(); - int size = numbers.size(); - if (size > 1) { - return numbers.get(size - 1) - 1; - } - } catch (IOException e) { - LOG.error(e); - } - return 0; - } - - private long getCurrentJournalId(List editFileNames) { - if (editFileNames.size() == 0) { - return 1; - } - - long ret = editFileNames.get(editFileNames.size() - 1); - JournalCursor cursor = read(ret, -1); - while (cursor.next() != null) { - ret++; - } - - return ret; - } - - @Override - public List getDatabaseNames() { - throw new RuntimeException("Not Support"); - } -} +package com.baidu.palo.journal.local; + +import com.baidu.palo.common.io.Writable; +import com.baidu.palo.journal.Journal; +import com.baidu.palo.journal.JournalCursor; +import com.baidu.palo.journal.JournalEntity; +import com.baidu.palo.persist.EditLogFileOutputStream; +import com.baidu.palo.persist.EditLogOutputStream; +import com.baidu.palo.persist.Storage; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.io.File; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; + +public class LocalJournal implements Journal { + private static final Logger LOG = LogManager.getLogger(LocalJournal.class); + + private EditLogOutputStream outputStream = null; + private AtomicLong journalId = new AtomicLong(1); + private String imageDir; + private File currentEditFile; + + public LocalJournal(String imageDir) { + this.imageDir = imageDir; + } + + @Override + public void open() { + if (outputStream == null) { + try { + Storage storage = new Storage(imageDir); + + this.journalId.set(getCurrentJournalId(storage.getEditsFileSequenceNumbers())); + + long id = journalId.get(); + if (id == storage.getEditsSeq()) { + this.currentEditFile = storage.getEditsFile(id); + this.outputStream = new EditLogFileOutputStream(currentEditFile); + } else { + currentEditFile = new File(imageDir, "edits." + (id + 1)); + currentEditFile.createNewFile(); + outputStream = new EditLogFileOutputStream(currentEditFile); + } + } catch (IOException e) { + LOG.error(e); + } + } + } + + @Override + public synchronized void rollJournal() { + Storage storage; + try { + storage = new Storage(imageDir); + if (journalId.get() == storage.getEditsSeq()) { + System.out.println("Does not need to roll!"); + return; + } + if (outputStream != null) { + outputStream.close(); + } + currentEditFile = new File(imageDir, "edits." + journalId.get()); + currentEditFile.createNewFile(); + outputStream = new EditLogFileOutputStream(currentEditFile); + } catch (IOException e) { + LOG.error(e); + } + } + + @Override + public long getMaxJournalId() { + return 0; + } + + @Override + public long getMinJournalId() { + return 0; + } + + @Override + public void close() { + if (outputStream == null) { + return; + } + + try { + outputStream.setReadyToFlush(); + outputStream.flush(); + outputStream.close(); + } catch (IOException e) { + LOG.error(e); + } + } + + @Override + public JournalEntity read(long journalId) { + return null; + } + + @Override + public JournalCursor read(long fromKey, long toKey) { + JournalCursor cursor = LocalJournalCursor.getJournalCursor(imageDir, fromKey, toKey); + return cursor; + } + + @Override + public synchronized void write(short op, Writable writable) { + try { + outputStream.write(op, writable); + outputStream.setReadyToFlush(); + outputStream.flush(); + journalId.incrementAndGet(); + } catch (IOException e) { + LOG.error(e); + } + } + + @Override + public void deleteJournals(long deleteJournalToId) { + try { + Storage storage = new Storage(imageDir); + List nubmers = storage.getEditsFileSequenceNumbers(); + for (long number : nubmers) { + if (number < deleteJournalToId) { + File file = new File(imageDir, "edits." + number); + if (file.exists()) { + file.delete(); + } + } + } + } catch (IOException e) { + LOG.error(e); + } + } + + @Override + public long getFinalizedJournalId() { + try { + Storage storage = new Storage(imageDir); + List numbers = storage.getEditsFileSequenceNumbers(); + int size = numbers.size(); + if (size > 1) { + return numbers.get(size - 1) - 1; + } + } catch (IOException e) { + LOG.error(e); + } + return 0; + } + + private long getCurrentJournalId(List editFileNames) { + if (editFileNames.size() == 0) { + return 1; + } + + long ret = editFileNames.get(editFileNames.size() - 1); + JournalCursor cursor = read(ret, -1); + while (cursor.next() != null) { + ret++; + } + + return ret; + } + + @Override + public List getDatabaseNames() { + throw new RuntimeException("Not Support"); + } +} diff --git a/fe/src/com/baidu/palo/journal/local/LocalJournalCursor.java b/fe/src/com/baidu/palo/journal/local/LocalJournalCursor.java index df5ae99a45..0fe1ee093c 100644 --- a/fe/src/com/baidu/palo/journal/local/LocalJournalCursor.java +++ b/fe/src/com/baidu/palo/journal/local/LocalJournalCursor.java @@ -13,8 +13,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.journal.local; - +package com.baidu.palo.journal.local; + import com.baidu.palo.alter.AlterJob; import com.baidu.palo.catalog.Database; import com.baidu.palo.common.io.Text; @@ -52,342 +52,342 @@ import java.io.DataInputStream; import java.io.EOFException; import java.io.File; import java.io.IOException; -import java.util.List; +import java.util.List; -@Deprecated -public final class LocalJournalCursor implements JournalCursor { - private static final Logger LOG = LogManager.getLogger(LocalJournalCursor.class); - private String imageDir; - private long toKey; - private long currentKey; - private List editFileSequenceNumbers; - private int nextFilePositionIndex; - private Storage storage; - private DataInputStream currentStream; - - public static LocalJournalCursor getJournalCursor(String imageDir, long fromKey, long toKey) { - if (toKey < fromKey && toKey != -1 || fromKey < 0) { - System.out.println("Invalid key range!"); - return null; +@Deprecated +public final class LocalJournalCursor implements JournalCursor { + private static final Logger LOG = LogManager.getLogger(LocalJournalCursor.class); + private String imageDir; + private long toKey; + private long currentKey; + private List editFileSequenceNumbers; + private int nextFilePositionIndex; + private Storage storage; + private DataInputStream currentStream; + + public static LocalJournalCursor getJournalCursor(String imageDir, long fromKey, long toKey) { + if (toKey < fromKey && toKey != -1 || fromKey < 0) { + System.out.println("Invalid key range!"); + return null; } long newToKey = toKey; if (newToKey == -1) { - newToKey = Long.MAX_VALUE; - } - LocalJournalCursor cursor; - try { - cursor = new LocalJournalCursor(imageDir, fromKey, newToKey); - } catch (IOException e) { - LOG.error(e); - return null; - } - return cursor; - } - - /* - * edits file name is the minimum journal id in this file. - * For example: - * an edit file contains journal from 100 to 200. its file name is edits.100 - */ - private LocalJournalCursor(String imageDir, long fromKey, long toKey) throws IOException { - this.imageDir = imageDir; - this.currentKey = fromKey; - this.toKey = toKey; - this.storage = new Storage(imageDir); - this.editFileSequenceNumbers = storage.getEditsFileSequenceNumbers(); - this.nextFilePositionIndex = 0; - long scannedKey = 0; - - // find the file which may contain the journal with journalId=fromKey - String fileName = null; - for (long minJournalKey : editFileSequenceNumbers) { - if (fromKey >= minJournalKey) { - fileName = Long.toString(minJournalKey); - nextFilePositionIndex++; - scannedKey = minJournalKey; - continue; - } else { - break; - } - } - - if (fileName == null) { - System.out.println("Can not find the key:" + fromKey); - throw new IOException(); - } - - this.currentStream = new DataInputStream(new BufferedInputStream( - new EditLogFileInputStream(new File(imageDir, "edits." + fileName)))); - - while (scannedKey < fromKey) { - short opCode = currentStream.readShort(); - if (opCode == OperationType.OP_INVALID) { - System.out.println("Can not find the key:" + fromKey); - throw new IOException(); - } - getJournalEntity(currentStream, opCode); - scannedKey++; - } - } - - @Override - public JournalEntity next() { - if (currentKey > toKey) { - return null; - } - - JournalEntity ret = null; - try { - short opCode = OperationType.OP_INVALID; - - while (true) { - try { - opCode = currentStream.readShort(); - if (opCode == OperationType.OP_INVALID) { - if (nextFilePositionIndex < editFileSequenceNumbers.size()) { - currentStream.close(); - currentStream = new DataInputStream(new BufferedInputStream(new EditLogFileInputStream( - new File(imageDir, "edits." + editFileSequenceNumbers - .get(nextFilePositionIndex))))); - nextFilePositionIndex++; - continue; - } else { - return null; - } - } - } catch (EOFException e) { - if (nextFilePositionIndex < editFileSequenceNumbers.size()) { - currentStream.close(); - currentStream = new DataInputStream( - new BufferedInputStream(new EditLogFileInputStream(new File( - imageDir, "edits." + editFileSequenceNumbers.get(nextFilePositionIndex))))); - nextFilePositionIndex++; - continue; - } else { - return null; - } - } - break; - } - - ret = getJournalEntity(currentStream, opCode); - currentKey++; - return ret; - } catch (IOException e) { - LOG.error("something wrong. {}", e); - try { - currentStream.close(); - } catch (IOException e1) { - LOG.error(e1); - } - LOG.error(e); - } - return ret; - } + newToKey = Long.MAX_VALUE; + } + LocalJournalCursor cursor; + try { + cursor = new LocalJournalCursor(imageDir, fromKey, newToKey); + } catch (IOException e) { + LOG.error(e); + return null; + } + return cursor; + } + + /* + * edits file name is the minimum journal id in this file. + * For example: + * an edit file contains journal from 100 to 200. its file name is edits.100 + */ + private LocalJournalCursor(String imageDir, long fromKey, long toKey) throws IOException { + this.imageDir = imageDir; + this.currentKey = fromKey; + this.toKey = toKey; + this.storage = new Storage(imageDir); + this.editFileSequenceNumbers = storage.getEditsFileSequenceNumbers(); + this.nextFilePositionIndex = 0; + long scannedKey = 0; + + // find the file which may contain the journal with journalId=fromKey + String fileName = null; + for (long minJournalKey : editFileSequenceNumbers) { + if (fromKey >= minJournalKey) { + fileName = Long.toString(minJournalKey); + nextFilePositionIndex++; + scannedKey = minJournalKey; + continue; + } else { + break; + } + } + + if (fileName == null) { + System.out.println("Can not find the key:" + fromKey); + throw new IOException(); + } + + this.currentStream = new DataInputStream(new BufferedInputStream( + new EditLogFileInputStream(new File(imageDir, "edits." + fileName)))); + + while (scannedKey < fromKey) { + short opCode = currentStream.readShort(); + if (opCode == OperationType.OP_INVALID) { + System.out.println("Can not find the key:" + fromKey); + throw new IOException(); + } + getJournalEntity(currentStream, opCode); + scannedKey++; + } + } + + @Override + public JournalEntity next() { + if (currentKey > toKey) { + return null; + } + + JournalEntity ret = null; + try { + short opCode = OperationType.OP_INVALID; + + while (true) { + try { + opCode = currentStream.readShort(); + if (opCode == OperationType.OP_INVALID) { + if (nextFilePositionIndex < editFileSequenceNumbers.size()) { + currentStream.close(); + currentStream = new DataInputStream(new BufferedInputStream(new EditLogFileInputStream( + new File(imageDir, "edits." + editFileSequenceNumbers + .get(nextFilePositionIndex))))); + nextFilePositionIndex++; + continue; + } else { + return null; + } + } + } catch (EOFException e) { + if (nextFilePositionIndex < editFileSequenceNumbers.size()) { + currentStream.close(); + currentStream = new DataInputStream( + new BufferedInputStream(new EditLogFileInputStream(new File( + imageDir, "edits." + editFileSequenceNumbers.get(nextFilePositionIndex))))); + nextFilePositionIndex++; + continue; + } else { + return null; + } + } + break; + } + + ret = getJournalEntity(currentStream, opCode); + currentKey++; + return ret; + } catch (IOException e) { + LOG.error("something wrong. {}", e); + try { + currentStream.close(); + } catch (IOException e1) { + LOG.error(e1); + } + LOG.error(e); + } + return ret; + } + + @Deprecated + private JournalEntity getJournalEntity(DataInputStream in, short opCode) throws IOException { + JournalEntity ret = new JournalEntity(); + ret.setOpCode(opCode); + switch (opCode) { + case OperationType.OP_SAVE_NEXTID: { + Text text = new Text(); + text.readFields(in); + ret.setData(text); + break; + } + case OperationType.OP_CREATE_DB: { + Database db = new Database(); + db.readFields(in); + ret.setData(db); + break; + } + case OperationType.OP_DROP_DB: { + Text text = new Text(); + text.readFields(in); + ret.setData(text); + break; + } + case OperationType.OP_ALTER_DB: + case OperationType.OP_RENAME_DB: { + DatabaseInfo info = new DatabaseInfo(); + info.readFields(in); + ret.setData(info); + break; + } + case OperationType.OP_CREATE_TABLE: { + CreateTableInfo info = new CreateTableInfo(); + info.readFields(in); + ret.setData(info); + break; + } + case OperationType.OP_DROP_TABLE: { + DropInfo info = new DropInfo(); + info.readFields(in); + ret.setData(info); + break; + } + case OperationType.OP_ADD_PARTITION: { + PartitionPersistInfo info = new PartitionPersistInfo(); + info.readFields(in); + ret.setData(info); + break; + } + case OperationType.OP_DROP_PARTITION: { + DropPartitionInfo info = new DropPartitionInfo(); + info.readFields(in); + ret.setData(info); + break; + } + case OperationType.OP_MODIFY_PARTITION: { + ModifyPartitionInfo info = ModifyPartitionInfo.read(in); + ret.setData(info); + break; + } + case OperationType.OP_ERASE_DB: + case OperationType.OP_ERASE_TABLE: + case OperationType.OP_ERASE_PARTITION: { + Text text = new Text(); + text.readFields(in); + ret.setData(text); + break; + } + case OperationType.OP_RECOVER_DB: + case OperationType.OP_RECOVER_TABLE: + case OperationType.OP_RECOVER_PARTITION: { + RecoverInfo recoverInfo = new RecoverInfo(); + recoverInfo.readFields(in); + ret.setData(recoverInfo); + break; + } + case OperationType.OP_START_ROLLUP: + case OperationType.OP_FINISH_ROLLUP: + case OperationType.OP_CANCEL_ROLLUP: + case OperationType.OP_START_SCHEMA_CHANGE: + case OperationType.OP_FINISH_SCHEMA_CHANGE: + case OperationType.OP_CANCEL_SCHEMA_CHANGE: + case OperationType.OP_START_DECOMMISSION_BACKEND: + case OperationType.OP_FINISH_DECOMMISSION_BACKEND: { + AlterJob alterJob = AlterJob.read(in); + ret.setData(alterJob); + break; + } + case OperationType.OP_CLEAR_ROLLUP_INFO: { + ReplicaPersistInfo info = ReplicaPersistInfo.read(in); + ret.setData(info); + break; + } + case OperationType.OP_DROP_ROLLUP: { + DropInfo info = DropInfo.read(in); + ret.setData(info); + break; + } + case OperationType.OP_RENAME_TABLE: + case OperationType.OP_RENAME_ROLLUP: + case OperationType.OP_RENAME_PARTITION: { + TableInfo info = TableInfo.read(in); + ret.setData(info); + break; + } + case OperationType.OP_FINISH_CONSISTENCY_CHECK: { + ConsistencyCheckInfo info = ConsistencyCheckInfo.read(in); + ret.setData(info); + break; + } + case OperationType.OP_LOAD_START: + case OperationType.OP_LOAD_ETL: + case OperationType.OP_LOAD_LOADING: + case OperationType.OP_LOAD_QUORUM: + case OperationType.OP_LOAD_DONE: + case OperationType.OP_LOAD_CANCEL: { + LoadJob job = new LoadJob(); + job.readFields(in); + ret.setData(job); + break; + } + case OperationType.OP_FINISH_SYNC_DELETE: { + DeleteInfo info = new DeleteInfo(); + info.readFields(in); + ret.setData(info); + break; + } + case OperationType.OP_FINISH_ASYNC_DELETE: { + AsyncDeleteJob deleteJob = AsyncDeleteJob.read(in); + ret.setData(deleteJob); + break; + } + case OperationType.OP_CLONE_DONE: { + CloneInfo info = CloneInfo.read(in); + ret.setData(info); + break; + } + case OperationType.OP_ADD_REPLICA: + case OperationType.OP_DELETE_REPLICA: { + ReplicaPersistInfo info = ReplicaPersistInfo.read(in); + ret.setData(info); + break; + } + case OperationType.OP_ADD_BACKEND: + case OperationType.OP_DROP_BACKEND: + case OperationType.OP_BACKEND_STATE_CHANGE: { + Backend be = Backend.read(in); + ret.setData(be); + break; + } + case OperationType.OP_ADD_FRONTEND: + case OperationType.OP_ADD_FIRST_FRONTEND: + case OperationType.OP_REMOVE_FRONTEND: { + Frontend fe = Frontend.read(in); + ret.setData(fe); + break; + } + case OperationType.OP_SET_LOAD_ERROR_URL: { + LoadErrorHub.Param param = new LoadErrorHub.Param(); + param.readFields(in); + ret.setData(param); + break; + } + case OperationType.OP_ALTER_ACCESS_RESOURCE: { + UserProperty resource = new UserProperty(); + resource.readFields(in); + ret.setData(resource); + break; + } + case OperationType.OP_DROP_USER: { + Text text = new Text(); + text.readFields(in); + ret.setData(text); + break; + } + case OperationType.OP_MASTER_INFO_CHANGE: { + MasterInfo info = new MasterInfo(); + info.readFields(in); + ret.setData(info); + break; + } + case OperationType.OP_TIMESTAMP: { + Timestamp stamp = new Timestamp(); + stamp.readFields(in); + ret.setData(stamp); + break; + } + case OperationType.OP_META_VERSION: { + Text text = new Text(); + text.readFields(in); + ret.setData(text); + break; + } + + default: { + throw new IOException("Never seen opcode " + opCode); + } + } + return ret; + } + + @Override + public void close() { + + } +} - @Deprecated - private JournalEntity getJournalEntity(DataInputStream in, short opCode) throws IOException { - JournalEntity ret = new JournalEntity(); - ret.setOpCode(opCode); - switch (opCode) { - case OperationType.OP_SAVE_NEXTID: { - Text text = new Text(); - text.readFields(in); - ret.setData(text); - break; - } - case OperationType.OP_CREATE_DB: { - Database db = new Database(); - db.readFields(in); - ret.setData(db); - break; - } - case OperationType.OP_DROP_DB: { - Text text = new Text(); - text.readFields(in); - ret.setData(text); - break; - } - case OperationType.OP_ALTER_DB: - case OperationType.OP_RENAME_DB: { - DatabaseInfo info = new DatabaseInfo(); - info.readFields(in); - ret.setData(info); - break; - } - case OperationType.OP_CREATE_TABLE: { - CreateTableInfo info = new CreateTableInfo(); - info.readFields(in); - ret.setData(info); - break; - } - case OperationType.OP_DROP_TABLE: { - DropInfo info = new DropInfo(); - info.readFields(in); - ret.setData(info); - break; - } - case OperationType.OP_ADD_PARTITION: { - PartitionPersistInfo info = new PartitionPersistInfo(); - info.readFields(in); - ret.setData(info); - break; - } - case OperationType.OP_DROP_PARTITION: { - DropPartitionInfo info = new DropPartitionInfo(); - info.readFields(in); - ret.setData(info); - break; - } - case OperationType.OP_MODIFY_PARTITION: { - ModifyPartitionInfo info = ModifyPartitionInfo.read(in); - ret.setData(info); - break; - } - case OperationType.OP_ERASE_DB: - case OperationType.OP_ERASE_TABLE: - case OperationType.OP_ERASE_PARTITION: { - Text text = new Text(); - text.readFields(in); - ret.setData(text); - break; - } - case OperationType.OP_RECOVER_DB: - case OperationType.OP_RECOVER_TABLE: - case OperationType.OP_RECOVER_PARTITION: { - RecoverInfo recoverInfo = new RecoverInfo(); - recoverInfo.readFields(in); - ret.setData(recoverInfo); - break; - } - case OperationType.OP_START_ROLLUP: - case OperationType.OP_FINISH_ROLLUP: - case OperationType.OP_CANCEL_ROLLUP: - case OperationType.OP_START_SCHEMA_CHANGE: - case OperationType.OP_FINISH_SCHEMA_CHANGE: - case OperationType.OP_CANCEL_SCHEMA_CHANGE: - case OperationType.OP_START_DECOMMISSION_BACKEND: - case OperationType.OP_FINISH_DECOMMISSION_BACKEND: { - AlterJob alterJob = AlterJob.read(in); - ret.setData(alterJob); - break; - } - case OperationType.OP_CLEAR_ROLLUP_INFO: { - ReplicaPersistInfo info = ReplicaPersistInfo.read(in); - ret.setData(info); - break; - } - case OperationType.OP_DROP_ROLLUP: { - DropInfo info = DropInfo.read(in); - ret.setData(info); - break; - } - case OperationType.OP_RENAME_TABLE: - case OperationType.OP_RENAME_ROLLUP: - case OperationType.OP_RENAME_PARTITION: { - TableInfo info = TableInfo.read(in); - ret.setData(info); - break; - } - case OperationType.OP_FINISH_CONSISTENCY_CHECK: { - ConsistencyCheckInfo info = ConsistencyCheckInfo.read(in); - ret.setData(info); - break; - } - case OperationType.OP_LOAD_START: - case OperationType.OP_LOAD_ETL: - case OperationType.OP_LOAD_LOADING: - case OperationType.OP_LOAD_QUORUM: - case OperationType.OP_LOAD_DONE: - case OperationType.OP_LOAD_CANCEL: { - LoadJob job = new LoadJob(); - job.readFields(in); - ret.setData(job); - break; - } - case OperationType.OP_FINISH_SYNC_DELETE: { - DeleteInfo info = new DeleteInfo(); - info.readFields(in); - ret.setData(info); - break; - } - case OperationType.OP_FINISH_ASYNC_DELETE: { - AsyncDeleteJob deleteJob = AsyncDeleteJob.read(in); - ret.setData(deleteJob); - break; - } - case OperationType.OP_CLONE_DONE: { - CloneInfo info = CloneInfo.read(in); - ret.setData(info); - break; - } - case OperationType.OP_ADD_REPLICA: - case OperationType.OP_DELETE_REPLICA: { - ReplicaPersistInfo info = ReplicaPersistInfo.read(in); - ret.setData(info); - break; - } - case OperationType.OP_ADD_BACKEND: - case OperationType.OP_DROP_BACKEND: - case OperationType.OP_BACKEND_STATE_CHANGE: { - Backend be = Backend.read(in); - ret.setData(be); - break; - } - case OperationType.OP_ADD_FRONTEND: - case OperationType.OP_ADD_FIRST_FRONTEND: - case OperationType.OP_REMOVE_FRONTEND: { - Frontend fe = Frontend.read(in); - ret.setData(fe); - break; - } - case OperationType.OP_SET_LOAD_ERROR_URL: { - LoadErrorHub.Param param = new LoadErrorHub.Param(); - param.readFields(in); - ret.setData(param); - break; - } - case OperationType.OP_ALTER_ACCESS_RESOURCE: { - UserProperty resource = new UserProperty(); - resource.readFields(in); - ret.setData(resource); - break; - } - case OperationType.OP_DROP_USER: { - Text text = new Text(); - text.readFields(in); - ret.setData(text); - break; - } - case OperationType.OP_MASTER_INFO_CHANGE: { - MasterInfo info = new MasterInfo(); - info.readFields(in); - ret.setData(info); - break; - } - case OperationType.OP_TIMESTAMP: { - Timestamp stamp = new Timestamp(); - stamp.readFields(in); - ret.setData(stamp); - break; - } - case OperationType.OP_META_VERSION: { - Text text = new Text(); - text.readFields(in); - ret.setData(text); - break; - } - - default: { - throw new IOException("Never seen opcode " + opCode); - } - } - return ret; - } - - @Override - public void close() { - - } -} - diff --git a/fe/src/com/baidu/palo/load/DppScheduler.java b/fe/src/com/baidu/palo/load/DppScheduler.java index 7b1c05f9a7..6343815333 100644 --- a/fe/src/com/baidu/palo/load/DppScheduler.java +++ b/fe/src/com/baidu/palo/load/DppScheduler.java @@ -41,7 +41,6 @@ import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStreamReader; import java.io.OutputStreamWriter; -import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -108,7 +107,7 @@ public class DppScheduler { String etlJobId = null; TStatus status = new TStatus(); status.setStatus_code(TStatusCode.OK); - List failMsgs = new ArrayList(); + List failMsgs = Lists.newArrayList(); status.setError_msgs(failMsgs); // check dpp lock map @@ -122,7 +121,9 @@ public class DppScheduler { try { prepareDppApplications(); } catch (LoadException e) { - return null; + status.setStatus_code(TStatusCode.CANCELLED); + failMsgs.add(e.getMessage()); + return new EtlSubmitResult(status, null); } } } @@ -131,12 +132,18 @@ public class DppScheduler { String configDirPath = JOB_CONFIG_DIR + "/" + jobId; File configDir = new File(configDirPath); if (!Util.deleteDirectory(configDir)) { - LOG.warn("delete config dir error. job[{}]", jobId); - return null; + String errMsg = "delete config dir error. job: " + jobId; + LOG.warn(errMsg + ", path: {}", configDirPath); + status.setStatus_code(TStatusCode.CANCELLED); + failMsgs.add(errMsg); + return new EtlSubmitResult(status, null); } if (!configDir.mkdirs()) { - LOG.warn("create config file dir error. job[{}]", jobId); - return null; + String errMsg = "create config file dir error. job: " + jobId; + LOG.warn(errMsg + ", path: {}", configDirPath); + status.setStatus_code(TStatusCode.CANCELLED); + failMsgs.add(errMsg); + return new EtlSubmitResult(status, null); } File configFile = new File(configDirPath + "/" + JOB_CONFIG_FILE); BufferedWriter bw = null; @@ -147,15 +154,21 @@ public class DppScheduler { bw.flush(); } catch (IOException e) { Util.deleteDirectory(configDir); - LOG.warn("create config file error. job[" + jobId + "]", e); - return null; + + String errMsg = "create config file error. job: " + jobId; + LOG.warn(errMsg + ", file: {}", configDirPath + "/" + JOB_CONFIG_FILE); + status.setStatus_code(TStatusCode.CANCELLED); + failMsgs.add(errMsg); + return new EtlSubmitResult(status, null); } finally { if (bw != null) { try { bw.close(); } catch (IOException e) { LOG.warn("close buffered writer error", e); - return null; + status.setStatus_code(TStatusCode.CANCELLED); + failMsgs.add(e.getMessage()); + return new EtlSubmitResult(status, null); } } } @@ -169,8 +182,8 @@ public class DppScheduler { try { reduceNumByInputSize = calcReduceNumByInputSize(inputPaths); } catch (InputSizeInvalidException e) { - failMsgs.add(e.getMessage()); status.setStatus_code(TStatusCode.CANCELLED); + failMsgs.add(e.getMessage()); return new EtlSubmitResult(status, null); } int reduceNumByTablet = calcReduceNumByTablet(jobConf); @@ -218,7 +231,9 @@ public class DppScheduler { } } catch (IOException e) { LOG.warn("submit etl job error", e); - return null; + status.setStatus_code(TStatusCode.CANCELLED); + failMsgs.add(e.getMessage()); + return new EtlSubmitResult(status, null); } finally { Util.deleteDirectory(configDir); long endTime = System.currentTimeMillis(); @@ -228,7 +243,9 @@ public class DppScheduler { errorReader.close(); } catch (IOException e) { LOG.warn("close buffered reader error", e); - return null; + status.setStatus_code(TStatusCode.CANCELLED); + failMsgs.add(e.getMessage()); + return new EtlSubmitResult(status, null); } } } diff --git a/fe/src/com/baidu/palo/load/EtlStatus.java b/fe/src/com/baidu/palo/load/EtlStatus.java index de7125b704..8f95331002 100644 --- a/fe/src/com/baidu/palo/load/EtlStatus.java +++ b/fe/src/com/baidu/palo/load/EtlStatus.java @@ -13,173 +13,173 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.load; - -import com.baidu.palo.common.io.Text; -import com.baidu.palo.common.io.Writable; -import com.baidu.palo.thrift.TEtlState; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; - -public class EtlStatus implements Writable { - public static final String DEFAULT_TRACKING_URL = "N/A"; - - private TEtlState state; - private String trackingUrl; - private Map stats; - private Map counters; - // not persist - private Map fileMap; - - public EtlStatus() { - this.state = TEtlState.RUNNING; - this.trackingUrl = DEFAULT_TRACKING_URL; - this.stats = Maps.newHashMap(); - this.counters = Maps.newHashMap(); - this.fileMap = Maps.newHashMap(); - } - - public TEtlState getState() { - return state; - } - - public boolean setState(TEtlState state) { - // running -> finished or cancelled - if (this.state != TEtlState.RUNNING) { - return false; - } - this.state = state; - return true; - } - - public String getTrackingUrl() { - return trackingUrl; - } - - public void setTrackingUrl(String trackingUrl) { - this.trackingUrl = trackingUrl; - } - - public Map getStats() { - return stats; - } - - public void setStats(Map stats) { - this.stats = stats; - } - - public Map getCounters() { - return counters; - } - - public void setCounters(Map counters) { - this.counters = counters; - } - - public Map getFileMap() { - return fileMap; - } - - public void setFileMap(Map fileMap) { - this.fileMap = fileMap; - } - - @Override - public String toString() { - return "EtlTaskStatus [state=" + state + ", trackingUrl=" + trackingUrl + ", stats=" + stats + ", counters=" - + counters + "]"; - } - - public void write(DataOutput out) throws IOException { - Text.writeString(out, state.name()); - Text.writeString(out, trackingUrl); - - int statsCount = (stats == null) ? 0 : stats.size(); - out.writeInt(statsCount); - for (Map.Entry entry : stats.entrySet()) { - Text.writeString(out, entry.getKey()); - Text.writeString(out, entry.getValue()); - } - - int countersCount = (counters == null) ? 0 : counters.size(); - out.writeInt(countersCount); - for (Map.Entry entry : counters.entrySet()) { - Text.writeString(out, entry.getKey()); - Text.writeString(out, entry.getValue()); - } - } - - public void readFields(DataInput in) throws IOException { - state = TEtlState.valueOf(Text.readString(in)); - trackingUrl = Text.readString(in); - - int statsCount = in.readInt(); - for (int i = 0; i < statsCount; ++i) { - String key = Text.readString(in); - String value = Text.readString(in); - stats.put(key, value); - } - - int countersCount = in.readInt(); - for (int i = 0; i < countersCount; ++i) { - String key = Text.readString(in); - String value = Text.readString(in); - counters.put(key, value); - } - } - - public boolean equals(Object obj) { - if (obj == this) { - return true; - } - - if (!(obj instanceof EtlStatus)) { - return false; - } - - EtlStatus etlTaskStatus = (EtlStatus) obj; - - // Check stats - if (etlTaskStatus.stats == null) { - return false; - } - if (stats.size() != etlTaskStatus.stats.size()) { - return false; - } - for (Entry entry : stats.entrySet()) { - String key = entry.getKey(); - if (!etlTaskStatus.stats.containsKey(key)) { - return false; - } - if (!entry.getValue().equals(etlTaskStatus.stats.get(key))) { - return false; - } - } - - // Check counters - if (etlTaskStatus.counters == null) { - return false; - } - if (counters.size() != etlTaskStatus.counters.size()) { - return false; - } - for (Entry entry : counters.entrySet()) { - String key = entry.getKey(); - if (!etlTaskStatus.counters.containsKey(key)) { - return false; - } - if (!entry.getValue().equals(etlTaskStatus.counters.get(key))) { - return false; - } - } - - return state.equals(etlTaskStatus.state) && trackingUrl.equals(etlTaskStatus.trackingUrl); - } -} +package com.baidu.palo.load; + +import com.baidu.palo.common.io.Text; +import com.baidu.palo.common.io.Writable; +import com.baidu.palo.thrift.TEtlState; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +public class EtlStatus implements Writable { + public static final String DEFAULT_TRACKING_URL = "N/A"; + + private TEtlState state; + private String trackingUrl; + private Map stats; + private Map counters; + // not persist + private Map fileMap; + + public EtlStatus() { + this.state = TEtlState.RUNNING; + this.trackingUrl = DEFAULT_TRACKING_URL; + this.stats = Maps.newHashMap(); + this.counters = Maps.newHashMap(); + this.fileMap = Maps.newHashMap(); + } + + public TEtlState getState() { + return state; + } + + public boolean setState(TEtlState state) { + // running -> finished or cancelled + if (this.state != TEtlState.RUNNING) { + return false; + } + this.state = state; + return true; + } + + public String getTrackingUrl() { + return trackingUrl; + } + + public void setTrackingUrl(String trackingUrl) { + this.trackingUrl = trackingUrl; + } + + public Map getStats() { + return stats; + } + + public void setStats(Map stats) { + this.stats = stats; + } + + public Map getCounters() { + return counters; + } + + public void setCounters(Map counters) { + this.counters = counters; + } + + public Map getFileMap() { + return fileMap; + } + + public void setFileMap(Map fileMap) { + this.fileMap = fileMap; + } + + @Override + public String toString() { + return "EtlTaskStatus [state=" + state + ", trackingUrl=" + trackingUrl + ", stats=" + stats + ", counters=" + + counters + "]"; + } + + public void write(DataOutput out) throws IOException { + Text.writeString(out, state.name()); + Text.writeString(out, trackingUrl); + + int statsCount = (stats == null) ? 0 : stats.size(); + out.writeInt(statsCount); + for (Map.Entry entry : stats.entrySet()) { + Text.writeString(out, entry.getKey()); + Text.writeString(out, entry.getValue()); + } + + int countersCount = (counters == null) ? 0 : counters.size(); + out.writeInt(countersCount); + for (Map.Entry entry : counters.entrySet()) { + Text.writeString(out, entry.getKey()); + Text.writeString(out, entry.getValue()); + } + } + + public void readFields(DataInput in) throws IOException { + state = TEtlState.valueOf(Text.readString(in)); + trackingUrl = Text.readString(in); + + int statsCount = in.readInt(); + for (int i = 0; i < statsCount; ++i) { + String key = Text.readString(in); + String value = Text.readString(in); + stats.put(key, value); + } + + int countersCount = in.readInt(); + for (int i = 0; i < countersCount; ++i) { + String key = Text.readString(in); + String value = Text.readString(in); + counters.put(key, value); + } + } + + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + + if (!(obj instanceof EtlStatus)) { + return false; + } + + EtlStatus etlTaskStatus = (EtlStatus) obj; + + // Check stats + if (etlTaskStatus.stats == null) { + return false; + } + if (stats.size() != etlTaskStatus.stats.size()) { + return false; + } + for (Entry entry : stats.entrySet()) { + String key = entry.getKey(); + if (!etlTaskStatus.stats.containsKey(key)) { + return false; + } + if (!entry.getValue().equals(etlTaskStatus.stats.get(key))) { + return false; + } + } + + // Check counters + if (etlTaskStatus.counters == null) { + return false; + } + if (counters.size() != etlTaskStatus.counters.size()) { + return false; + } + for (Entry entry : counters.entrySet()) { + String key = entry.getKey(); + if (!etlTaskStatus.counters.containsKey(key)) { + return false; + } + if (!entry.getValue().equals(etlTaskStatus.counters.get(key))) { + return false; + } + } + + return state.equals(etlTaskStatus.state) && trackingUrl.equals(etlTaskStatus.trackingUrl); + } +} diff --git a/fe/src/com/baidu/palo/load/FailMsg.java b/fe/src/com/baidu/palo/load/FailMsg.java index e89d3c02d2..cc357bf5f9 100644 --- a/fe/src/com/baidu/palo/load/FailMsg.java +++ b/fe/src/com/baidu/palo/load/FailMsg.java @@ -13,83 +13,83 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.load; - -import com.baidu.palo.common.io.Text; -import com.baidu.palo.common.io.Writable; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -public class FailMsg implements Writable { - public enum CancelType { - USER_CANCEL, - ETL_SUBMIT_FAIL, - ETL_RUN_FAIL, - ETL_QUALITY_UNSATISFIED, - LOAD_RUN_FAIL, - TIMEOUT, - UNKNOWN - } - - private CancelType cancelType; - private String msg; - - public FailMsg() { - this.cancelType = CancelType.UNKNOWN; - this.msg = ""; - } - - public FailMsg(CancelType cancelType, String msg) { - this.cancelType = cancelType; - this.msg = msg; - } - - public CancelType getCancelType() { - return cancelType; - } - - public void setCancelType(CancelType cancelType) { - this.cancelType = cancelType; - } - - public String getMsg() { - return msg; - } - - public void setMsg(String msg) { - this.msg = msg; - } - - @Override - public String toString() { - return "FailMsg [cancelType=" + cancelType + ", msg=" + msg + "]"; - } - - public void write(DataOutput out) throws IOException { - Text.writeString(out, cancelType.name()); - Text.writeString(out, msg); - } - - public void readFields(DataInput in) throws IOException { - cancelType = CancelType.valueOf(Text.readString(in)); - msg = Text.readString(in); - } - - public boolean equals(Object obj) { - if (obj == this) { - return true; - } - - if (!(obj instanceof FailMsg)) { - return false; - } - - FailMsg failMsg = (FailMsg) obj; - - return cancelType.equals(failMsg.cancelType) - && msg.equals(failMsg.msg); - } - -} +package com.baidu.palo.load; + +import com.baidu.palo.common.io.Text; +import com.baidu.palo.common.io.Writable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +public class FailMsg implements Writable { + public enum CancelType { + USER_CANCEL, + ETL_SUBMIT_FAIL, + ETL_RUN_FAIL, + ETL_QUALITY_UNSATISFIED, + LOAD_RUN_FAIL, + TIMEOUT, + UNKNOWN + } + + private CancelType cancelType; + private String msg; + + public FailMsg() { + this.cancelType = CancelType.UNKNOWN; + this.msg = ""; + } + + public FailMsg(CancelType cancelType, String msg) { + this.cancelType = cancelType; + this.msg = msg; + } + + public CancelType getCancelType() { + return cancelType; + } + + public void setCancelType(CancelType cancelType) { + this.cancelType = cancelType; + } + + public String getMsg() { + return msg; + } + + public void setMsg(String msg) { + this.msg = msg; + } + + @Override + public String toString() { + return "FailMsg [cancelType=" + cancelType + ", msg=" + msg + "]"; + } + + public void write(DataOutput out) throws IOException { + Text.writeString(out, cancelType.name()); + Text.writeString(out, msg); + } + + public void readFields(DataInput in) throws IOException { + cancelType = CancelType.valueOf(Text.readString(in)); + msg = Text.readString(in); + } + + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + + if (!(obj instanceof FailMsg)) { + return false; + } + + FailMsg failMsg = (FailMsg) obj; + + return cancelType.equals(failMsg.cancelType) + && msg.equals(failMsg.msg); + } + +} diff --git a/fe/src/com/baidu/palo/load/Load.java b/fe/src/com/baidu/palo/load/Load.java index 07b80eee60..b41db19f21 100644 --- a/fe/src/com/baidu/palo/load/Load.java +++ b/fe/src/com/baidu/palo/load/Load.java @@ -416,7 +416,7 @@ public class Load { writeLock(); try { - unprotectAddLoadJob(job); + unprotectAddLoadJob(job, false /* not replay */); MetricRepo.COUNTER_LOAD_ADD.increase(1L); Catalog.getInstance().getEditLog().logLoadStart(job); } finally { @@ -802,13 +802,13 @@ public class Load { } } - public void unprotectAddLoadJob(LoadJob job) throws DdlException { + public void unprotectAddLoadJob(LoadJob job, boolean isReplay) throws DdlException { long jobId = job.getId(); long dbId = job.getDbId(); String label = job.getLabel(); + long timestamp = job.getTimestamp(); - - if (getAllUnfinishedLoadJob() > Config.max_unfinished_load_job) { + if (!isReplay && getAllUnfinishedLoadJob() > Config.max_unfinished_load_job) { throw new DdlException( "Number of unfinished load jobs exceed the max number: " + Config.max_unfinished_load_job); } @@ -882,7 +882,7 @@ public class Load { public void replayAddLoadJob(LoadJob job) throws DdlException { writeLock(); try { - unprotectAddLoadJob(job); + unprotectAddLoadJob(job, true /* replay */); } finally { writeUnlock(); } diff --git a/fe/src/com/baidu/palo/load/LoadChecker.java b/fe/src/com/baidu/palo/load/LoadChecker.java index 06e332ccfb..dcad9f399e 100644 --- a/fe/src/com/baidu/palo/load/LoadChecker.java +++ b/fe/src/com/baidu/palo/load/LoadChecker.java @@ -687,7 +687,7 @@ public class LoadChecker extends Daemon { int tryTime = tabletBeIds.size() + 1; LOG.debug("tryTime: {}, tablet be: {}", tryTime, tabletBeIds); do { - destBeId = Catalog.getCurrentSystemInfo().seqChooseBackendIds( + destBeId = Catalog.getCurrentSystemInfo().seqChooseBackendIds( 1, true, false, db.getClusterName()); LOG.debug("descBeId: {}", destBeId); --tryTime; diff --git a/fe/src/com/baidu/palo/load/PartitionLoadInfo.java b/fe/src/com/baidu/palo/load/PartitionLoadInfo.java index b7eb6156a4..a0549cf670 100644 --- a/fe/src/com/baidu/palo/load/PartitionLoadInfo.java +++ b/fe/src/com/baidu/palo/load/PartitionLoadInfo.java @@ -13,136 +13,136 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.load; - -import com.baidu.palo.common.io.Writable; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -public class PartitionLoadInfo implements Writable { - private long version; - private long versionHash; - private List sources; - private boolean needLoad; - - public PartitionLoadInfo() { - this(new ArrayList()); - } - - public PartitionLoadInfo(List sources) { - this.version = -1L; - this.versionHash = 0L; - this.sources = sources; - this.needLoad = true; - } - - public void setVersion(long version) { - this.version = version; - } - - public long getVersion() { - return version; - } - - public void setVersionHash(long versionHash) { - this.versionHash = versionHash; - } - - public long getVersionHash() { - return versionHash; - } - - public List getSources() { - return sources; - } - - public boolean isNeedLoad() { - return needLoad; - } - - public void setNeedLoad(boolean needLoad) { - this.needLoad = needLoad; - } - - public void write(DataOutput out) throws IOException { - out.writeLong(version); - out.writeLong(versionHash); - - int count = 0; - if (sources == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - count = sources.size(); - out.writeInt(count); - for (int i = 0; i < count; ++i) { - sources.get(i).write(out); - } - } - - out.writeBoolean(needLoad); - } - - public void readFields(DataInput in) throws IOException { - version = in.readLong(); - versionHash = in.readLong(); - int count = 0; - - if (in.readBoolean()) { - count = in.readInt(); - for (int i = 0; i < count; i++) { - Source source = new Source(); - source.readFields(in); - sources.add(source); - } - } - - needLoad = in.readBoolean(); - } - - @Override - public String toString() { - return "PartitionLoadInfo{version=" + version + ", versionHash=" + versionHash - + ", needLoad=" + needLoad + "}"; - } - - public boolean equals(Object obj) { - if (obj == this) { - return true; - } - - if (!(obj instanceof PartitionLoadInfo)) { - return false; - } - - PartitionLoadInfo info = (PartitionLoadInfo) obj; - - if (sources != info.sources) { - if (sources == null || info.sources == null) { - return false; - } - if (sources.size() != info.sources.size()) { - return false; - } - for (Source source : sources) { - if (!info.sources.contains(source)) { - return false; - } - } - } - - return version == info.version - && versionHash == info.versionHash - && needLoad == info.needLoad; - } - - public int hashCode() { - int ret = (int) (version ^ versionHash); - ret ^= sources.size(); - return ret; - } -} +package com.baidu.palo.load; + +import com.baidu.palo.common.io.Writable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class PartitionLoadInfo implements Writable { + private long version; + private long versionHash; + private List sources; + private boolean needLoad; + + public PartitionLoadInfo() { + this(new ArrayList()); + } + + public PartitionLoadInfo(List sources) { + this.version = -1L; + this.versionHash = 0L; + this.sources = sources; + this.needLoad = true; + } + + public void setVersion(long version) { + this.version = version; + } + + public long getVersion() { + return version; + } + + public void setVersionHash(long versionHash) { + this.versionHash = versionHash; + } + + public long getVersionHash() { + return versionHash; + } + + public List getSources() { + return sources; + } + + public boolean isNeedLoad() { + return needLoad; + } + + public void setNeedLoad(boolean needLoad) { + this.needLoad = needLoad; + } + + public void write(DataOutput out) throws IOException { + out.writeLong(version); + out.writeLong(versionHash); + + int count = 0; + if (sources == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + count = sources.size(); + out.writeInt(count); + for (int i = 0; i < count; ++i) { + sources.get(i).write(out); + } + } + + out.writeBoolean(needLoad); + } + + public void readFields(DataInput in) throws IOException { + version = in.readLong(); + versionHash = in.readLong(); + int count = 0; + + if (in.readBoolean()) { + count = in.readInt(); + for (int i = 0; i < count; i++) { + Source source = new Source(); + source.readFields(in); + sources.add(source); + } + } + + needLoad = in.readBoolean(); + } + + @Override + public String toString() { + return "PartitionLoadInfo{version=" + version + ", versionHash=" + versionHash + + ", needLoad=" + needLoad + "}"; + } + + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + + if (!(obj instanceof PartitionLoadInfo)) { + return false; + } + + PartitionLoadInfo info = (PartitionLoadInfo) obj; + + if (sources != info.sources) { + if (sources == null || info.sources == null) { + return false; + } + if (sources.size() != info.sources.size()) { + return false; + } + for (Source source : sources) { + if (!info.sources.contains(source)) { + return false; + } + } + } + + return version == info.version + && versionHash == info.versionHash + && needLoad == info.needLoad; + } + + public int hashCode() { + int ret = (int) (version ^ versionHash); + ret ^= sources.size(); + return ret; + } +} diff --git a/fe/src/com/baidu/palo/load/Source.java b/fe/src/com/baidu/palo/load/Source.java index c388d1af16..94179de3a7 100644 --- a/fe/src/com/baidu/palo/load/Source.java +++ b/fe/src/com/baidu/palo/load/Source.java @@ -13,278 +13,278 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.load; - -import com.baidu.palo.catalog.Catalog; -import com.baidu.palo.common.FeMetaVersion; -import com.baidu.palo.common.Pair; -import com.baidu.palo.common.io.Text; -import com.baidu.palo.common.io.Writable; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; - -public class Source implements Writable { - private static final String DEFAULT_COLUMN_SEPARATOR = "\t"; - private static final String DEFAULT_LINE_DELIMITER = "\n"; - - private List fileUrls; - private List columnNames; - private String columnSeparator; - private String lineDelimiter; - private boolean isNegative; - private Map>> columnToFunction; - - public Source() { - this.fileUrls = new ArrayList(); - this.columnNames = new ArrayList(); - this.columnSeparator = DEFAULT_COLUMN_SEPARATOR; - this.lineDelimiter = DEFAULT_LINE_DELIMITER; - this.columnToFunction = Maps.newHashMap(); - } - - public Source(List fileUrls, List columnNames, String columnSeprator, - String lineDelimiter, boolean isNegative) { - this.fileUrls = fileUrls; - if (fileUrls == null) { - this.fileUrls = new ArrayList(); - } - this.columnNames = columnNames; - if (columnNames == null) { - this.columnNames = new ArrayList(); - } - this.columnSeparator = columnSeprator; - this.lineDelimiter = lineDelimiter; - this.isNegative = isNegative; - this.columnToFunction = Maps.newHashMap(); - } - - public Source(List fileUrls) { - this(fileUrls, null, DEFAULT_COLUMN_SEPARATOR, DEFAULT_LINE_DELIMITER, false); - } - - public List getFileUrls() { - return fileUrls; - } - - public void setFileUrls(List fileUrls) { - this.fileUrls = fileUrls; - } - - public List getColumnNames() { - return columnNames; - } - - public void setColumnNames(List columnNames) { - this.columnNames = columnNames; - } - - public String getColumnSeparator() { - return columnSeparator; - } - - public void setColumnSeparator(String columnSeparator) { - this.columnSeparator = columnSeparator; - } - - public String getLineDelimiter() { - return lineDelimiter; - } - - public void setLineDelimiter(String lineDelimiter) { - this.lineDelimiter = lineDelimiter; - } - - public boolean isNegative() { - return isNegative; - } - - public void setNegative(boolean isNegative) { - this.isNegative = isNegative; - } - - public Map>> getColumnToFunction() { - return columnToFunction; - } - - public void setColumnToFunction(Map>> columnToFunction) { - this.columnToFunction = columnToFunction; - } - - public void write(DataOutput out) throws IOException { - int count = 0; - if (fileUrls == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - count = fileUrls.size(); - out.writeInt(count); - for (String url : fileUrls) { - Text.writeString(out, url); - } - } - - if (columnNames == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - count = columnNames.size(); - out.writeInt(count); - for (String name : columnNames) { - Text.writeString(out, name); - } - } - - Text.writeString(out, columnSeparator); - Text.writeString(out, lineDelimiter); - out.writeBoolean(isNegative); - - if (columnToFunction == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - count = columnToFunction.size(); - out.writeInt(count); - for (Entry>> entry : columnToFunction.entrySet()) { - Text.writeString(out, entry.getKey()); - Pair> functionPair = entry.getValue(); - Text.writeString(out, functionPair.first); - count = functionPair.second.size(); - out.writeInt(count); - for (String arg : functionPair.second) { - if (arg == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - Text.writeString(out, arg); - } - } - } - } - } - - public void readFields(DataInput in) throws IOException { - int count = 0; - - if (in.readBoolean()) { - count = in.readInt(); - for (int i = 0; i < count; i++) { - fileUrls.add(Text.readString(in).intern()); - } - } - - if (in.readBoolean()) { - count = in.readInt(); - for (int i = 0; i < count; i++) { - columnNames.add(Text.readString(in).intern()); - } - } - - columnSeparator = Text.readString(in).intern(); - lineDelimiter = Text.readString(in).intern(); - isNegative = in.readBoolean(); - - if (in.readBoolean()) { - count = in.readInt(); - for (int i = 0; i < count; i++) { - String column = Text.readString(in).intern(); - String functionName = Text.readString(in).intern(); - int argsNum = in.readInt(); - List args = Lists.newArrayList(); - for (int j = 0; j < argsNum; j++) { - if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_22) { - if (in.readBoolean()) { - args.add(Text.readString(in)); - } - } else { - args.add(Text.readString(in)); - } - } - columnToFunction.put(column, new Pair>(functionName, args)); - } - } - } - - public boolean equals(Object obj) { - if (obj == this) { - return true; - } - - if (!(obj instanceof Source)) { - return false; - } - - Source source = (Source) obj; - - // Check fileUrls - if (fileUrls != source.fileUrls) { - if (fileUrls == null || source.fileUrls == null) { - return false; - } - if (fileUrls.size() != source.fileUrls.size()) { - return false; - } - for (String url : fileUrls) { - if (!source.fileUrls.contains(url)) { - return false; - } - } - } - - // Check columnNames - if (columnNames != source.columnNames) { - if (columnNames == null || source.columnNames == null) { - return false; - } - if (columnNames.size() != source.columnNames.size()) { - return false; - } - for (String column : columnNames) { - if (!source.columnNames.contains(column)) { - return false; - } - } - } - - // columnToFunction - if (columnToFunction != source.columnToFunction) { - if (columnToFunction == null || source.columnToFunction == null) { - return false; - } - if (columnToFunction.size() != source.columnToFunction.size()) { - return false; - } - for (Entry>> entry : columnToFunction.entrySet()) { - String column = entry.getKey(); - if (!source.columnToFunction.containsKey(column)) { - return false; - } - if (!entry.getValue().equals(source.columnToFunction.get(column))) { - return false; - } - } - } - - return columnSeparator.equals(source.columnSeparator) - && lineDelimiter.equals(source.lineDelimiter) - && isNegative == source.isNegative; - } - - public int hashCode() { - if (fileUrls == null || columnNames == null) { - return -1; - } - - int ret = fileUrls.size() ^ columnNames.size() ^ columnToFunction.size(); - ret ^= columnSeparator.length(); - ret ^= lineDelimiter.length(); - return ret; - } -} +package com.baidu.palo.load; + +import com.baidu.palo.catalog.Catalog; +import com.baidu.palo.common.FeMetaVersion; +import com.baidu.palo.common.Pair; +import com.baidu.palo.common.io.Text; +import com.baidu.palo.common.io.Writable; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +public class Source implements Writable { + private static final String DEFAULT_COLUMN_SEPARATOR = "\t"; + private static final String DEFAULT_LINE_DELIMITER = "\n"; + + private List fileUrls; + private List columnNames; + private String columnSeparator; + private String lineDelimiter; + private boolean isNegative; + private Map>> columnToFunction; + + public Source() { + this.fileUrls = new ArrayList(); + this.columnNames = new ArrayList(); + this.columnSeparator = DEFAULT_COLUMN_SEPARATOR; + this.lineDelimiter = DEFAULT_LINE_DELIMITER; + this.columnToFunction = Maps.newHashMap(); + } + + public Source(List fileUrls, List columnNames, String columnSeprator, + String lineDelimiter, boolean isNegative) { + this.fileUrls = fileUrls; + if (fileUrls == null) { + this.fileUrls = new ArrayList(); + } + this.columnNames = columnNames; + if (columnNames == null) { + this.columnNames = new ArrayList(); + } + this.columnSeparator = columnSeprator; + this.lineDelimiter = lineDelimiter; + this.isNegative = isNegative; + this.columnToFunction = Maps.newHashMap(); + } + + public Source(List fileUrls) { + this(fileUrls, null, DEFAULT_COLUMN_SEPARATOR, DEFAULT_LINE_DELIMITER, false); + } + + public List getFileUrls() { + return fileUrls; + } + + public void setFileUrls(List fileUrls) { + this.fileUrls = fileUrls; + } + + public List getColumnNames() { + return columnNames; + } + + public void setColumnNames(List columnNames) { + this.columnNames = columnNames; + } + + public String getColumnSeparator() { + return columnSeparator; + } + + public void setColumnSeparator(String columnSeparator) { + this.columnSeparator = columnSeparator; + } + + public String getLineDelimiter() { + return lineDelimiter; + } + + public void setLineDelimiter(String lineDelimiter) { + this.lineDelimiter = lineDelimiter; + } + + public boolean isNegative() { + return isNegative; + } + + public void setNegative(boolean isNegative) { + this.isNegative = isNegative; + } + + public Map>> getColumnToFunction() { + return columnToFunction; + } + + public void setColumnToFunction(Map>> columnToFunction) { + this.columnToFunction = columnToFunction; + } + + public void write(DataOutput out) throws IOException { + int count = 0; + if (fileUrls == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + count = fileUrls.size(); + out.writeInt(count); + for (String url : fileUrls) { + Text.writeString(out, url); + } + } + + if (columnNames == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + count = columnNames.size(); + out.writeInt(count); + for (String name : columnNames) { + Text.writeString(out, name); + } + } + + Text.writeString(out, columnSeparator); + Text.writeString(out, lineDelimiter); + out.writeBoolean(isNegative); + + if (columnToFunction == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + count = columnToFunction.size(); + out.writeInt(count); + for (Entry>> entry : columnToFunction.entrySet()) { + Text.writeString(out, entry.getKey()); + Pair> functionPair = entry.getValue(); + Text.writeString(out, functionPair.first); + count = functionPair.second.size(); + out.writeInt(count); + for (String arg : functionPair.second) { + if (arg == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + Text.writeString(out, arg); + } + } + } + } + } + + public void readFields(DataInput in) throws IOException { + int count = 0; + + if (in.readBoolean()) { + count = in.readInt(); + for (int i = 0; i < count; i++) { + fileUrls.add(Text.readString(in).intern()); + } + } + + if (in.readBoolean()) { + count = in.readInt(); + for (int i = 0; i < count; i++) { + columnNames.add(Text.readString(in).intern()); + } + } + + columnSeparator = Text.readString(in).intern(); + lineDelimiter = Text.readString(in).intern(); + isNegative = in.readBoolean(); + + if (in.readBoolean()) { + count = in.readInt(); + for (int i = 0; i < count; i++) { + String column = Text.readString(in).intern(); + String functionName = Text.readString(in).intern(); + int argsNum = in.readInt(); + List args = Lists.newArrayList(); + for (int j = 0; j < argsNum; j++) { + if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_22) { + if (in.readBoolean()) { + args.add(Text.readString(in)); + } + } else { + args.add(Text.readString(in)); + } + } + columnToFunction.put(column, new Pair>(functionName, args)); + } + } + } + + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + + if (!(obj instanceof Source)) { + return false; + } + + Source source = (Source) obj; + + // Check fileUrls + if (fileUrls != source.fileUrls) { + if (fileUrls == null || source.fileUrls == null) { + return false; + } + if (fileUrls.size() != source.fileUrls.size()) { + return false; + } + for (String url : fileUrls) { + if (!source.fileUrls.contains(url)) { + return false; + } + } + } + + // Check columnNames + if (columnNames != source.columnNames) { + if (columnNames == null || source.columnNames == null) { + return false; + } + if (columnNames.size() != source.columnNames.size()) { + return false; + } + for (String column : columnNames) { + if (!source.columnNames.contains(column)) { + return false; + } + } + } + + // columnToFunction + if (columnToFunction != source.columnToFunction) { + if (columnToFunction == null || source.columnToFunction == null) { + return false; + } + if (columnToFunction.size() != source.columnToFunction.size()) { + return false; + } + for (Entry>> entry : columnToFunction.entrySet()) { + String column = entry.getKey(); + if (!source.columnToFunction.containsKey(column)) { + return false; + } + if (!entry.getValue().equals(source.columnToFunction.get(column))) { + return false; + } + } + } + + return columnSeparator.equals(source.columnSeparator) + && lineDelimiter.equals(source.lineDelimiter) + && isNegative == source.isNegative; + } + + public int hashCode() { + if (fileUrls == null || columnNames == null) { + return -1; + } + + int ret = fileUrls.size() ^ columnNames.size() ^ columnToFunction.size(); + ret ^= columnSeparator.length(); + ret ^= lineDelimiter.length(); + return ret; + } +} diff --git a/fe/src/com/baidu/palo/load/TabletLoadInfo.java b/fe/src/com/baidu/palo/load/TabletLoadInfo.java index a42daa7d77..2b5bd43d7e 100644 --- a/fe/src/com/baidu/palo/load/TabletLoadInfo.java +++ b/fe/src/com/baidu/palo/load/TabletLoadInfo.java @@ -13,109 +13,109 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.load; - -import com.baidu.palo.catalog.Catalog; -import com.baidu.palo.common.FeMetaVersion; -import com.baidu.palo.common.io.Text; -import com.baidu.palo.common.io.Writable; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.util.HashSet; -import java.util.Set; - -public class TabletLoadInfo implements Writable { - private String filePath; - private long fileSize; - private Set sentReplicas; - - public TabletLoadInfo() { - this("", -1); - } - - public TabletLoadInfo(String filePath, long fileSize) { - this.filePath = filePath; - this.fileSize = fileSize; - this.sentReplicas = new HashSet(); - } - - public String getFilePath() { - return filePath; - } - - public long getFileSize() { - return fileSize; - } - - public boolean addSentReplica(long replicaId) { - sentReplicas.add(replicaId); - return true; - } - - public boolean isReplicaSent(long replicaId) { - return sentReplicas.contains(replicaId); - } - - public void write(DataOutput out) throws IOException { - if (filePath == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - Text.writeString(out, filePath); - out.writeLong(fileSize); - } - } - - public void readFields(DataInput in) throws IOException { - if (in.readBoolean()) { - filePath = Text.readString(in).intern(); - if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_8) { - fileSize = in.readLong(); - } - } else { - filePath = null; - fileSize = -1; - } - } - - public boolean equals(Object obj) { - if (obj == this) { - return true; - } - - if (!(obj instanceof TabletLoadInfo)) { - return false; - } - - TabletLoadInfo info = (TabletLoadInfo) obj; - - if (sentReplicas != info.sentReplicas) { - if (sentReplicas == null || info.sentReplicas == null) { - return false; - } - if (sentReplicas.size() != info.sentReplicas.size()) { - return false; - } - for (long id : sentReplicas) { - if (!info.sentReplicas.contains(id)) { - return false; - } - } - } - - if (filePath != info.filePath) { - if (filePath == null || info.filePath == null) { - return false; - } - } - - return filePath.equals(info.filePath); - } - - public int hashCode() { - int ret = filePath.length(); - return ret; - } -} +package com.baidu.palo.load; + +import com.baidu.palo.catalog.Catalog; +import com.baidu.palo.common.FeMetaVersion; +import com.baidu.palo.common.io.Text; +import com.baidu.palo.common.io.Writable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; + +public class TabletLoadInfo implements Writable { + private String filePath; + private long fileSize; + private Set sentReplicas; + + public TabletLoadInfo() { + this("", -1); + } + + public TabletLoadInfo(String filePath, long fileSize) { + this.filePath = filePath; + this.fileSize = fileSize; + this.sentReplicas = new HashSet(); + } + + public String getFilePath() { + return filePath; + } + + public long getFileSize() { + return fileSize; + } + + public boolean addSentReplica(long replicaId) { + sentReplicas.add(replicaId); + return true; + } + + public boolean isReplicaSent(long replicaId) { + return sentReplicas.contains(replicaId); + } + + public void write(DataOutput out) throws IOException { + if (filePath == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + Text.writeString(out, filePath); + out.writeLong(fileSize); + } + } + + public void readFields(DataInput in) throws IOException { + if (in.readBoolean()) { + filePath = Text.readString(in).intern(); + if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_8) { + fileSize = in.readLong(); + } + } else { + filePath = null; + fileSize = -1; + } + } + + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + + if (!(obj instanceof TabletLoadInfo)) { + return false; + } + + TabletLoadInfo info = (TabletLoadInfo) obj; + + if (sentReplicas != info.sentReplicas) { + if (sentReplicas == null || info.sentReplicas == null) { + return false; + } + if (sentReplicas.size() != info.sentReplicas.size()) { + return false; + } + for (long id : sentReplicas) { + if (!info.sentReplicas.contains(id)) { + return false; + } + } + } + + if (filePath != info.filePath) { + if (filePath == null || info.filePath == null) { + return false; + } + } + + return filePath.equals(info.filePath); + } + + public int hashCode() { + int ret = filePath.length(); + return ret; + } +} diff --git a/fe/src/com/baidu/palo/master/Checkpoint.java b/fe/src/com/baidu/palo/master/Checkpoint.java index 5b35b2d73e..9830844fbf 100644 --- a/fe/src/com/baidu/palo/master/Checkpoint.java +++ b/fe/src/com/baidu/palo/master/Checkpoint.java @@ -103,6 +103,7 @@ public class Checkpoint extends Daemon { long replayedJournalId = -1; // generate new image file + LOG.info("begin to generate new image: image.{}", replayedJournalId); catalog = Catalog.getCheckpoint(); catalog.setEditLog(editLog); try { diff --git a/fe/src/com/baidu/palo/master/MetaHelper.java b/fe/src/com/baidu/palo/master/MetaHelper.java index 0c15a1e668..ee2f09a8c1 100644 --- a/fe/src/com/baidu/palo/master/MetaHelper.java +++ b/fe/src/com/baidu/palo/master/MetaHelper.java @@ -13,85 +13,85 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.master; - -import com.baidu.palo.catalog.Catalog; -import com.baidu.palo.common.io.IOUtils; - -import java.io.BufferedInputStream; -import java.io.File; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.OutputStream; -import java.net.HttpURLConnection; -import java.net.URL; - -public class MetaHelper { - private static final String PART_SUFFIX = ".part"; - public static final String X_IMAGE_SIZE = "X-Image-Size"; - private static final int BUFFER_BYTES = 8 * 1024; - private static final int CHECKPOINT_LIMIT_BYTES = 30 * 1024 * 1024; - - public static File getMasterImageDir() { - String metaDir = Catalog.IMAGE_DIR; - return new File(metaDir); - } - - public static int getLimit() { - return CHECKPOINT_LIMIT_BYTES; - } - - // rename the .PART_SUFFIX file to filename - public static File complete(String filename, File dir) throws IOException { - File file = new File(dir, filename + MetaHelper.PART_SUFFIX); - File newFile = new File(dir, filename); - if (!file.renameTo(newFile)) { - throw new IOException("Complete file" + filename + " failed"); - } - return newFile; - } - - public static OutputStream getOutputStream(String filename, File dir) - throws FileNotFoundException { - File file = new File(dir, filename + MetaHelper.PART_SUFFIX); - return new FileOutputStream(file); - } - - // download file from remote node - public static void getRemoteFile(String urlStr, int timeout, OutputStream out) - throws IOException { - URL url = new URL(urlStr); - HttpURLConnection conn = null; - - try { - conn = (HttpURLConnection) url.openConnection(); - conn.setConnectTimeout(timeout); - conn.setReadTimeout(timeout); - - // Get image size - long imageSize = -1; - String imageSizeStr = conn.getHeaderField(X_IMAGE_SIZE); - if (imageSizeStr != null) { - imageSize = Long.parseLong(imageSizeStr); - } - - BufferedInputStream bin = new BufferedInputStream(conn.getInputStream()); - - // Do not limit speed in client side. - long bytes = IOUtils.copyBytes(bin, out, BUFFER_BYTES, CHECKPOINT_LIMIT_BYTES, true); - - if ((imageSize > 0) && (bytes != imageSize)) { - throw new IOException("Unexpected image size, expected: " + imageSize + ", actual: " + bytes); - } - } finally { - if (conn != null) { - conn.disconnect(); - } - if (out != null) { - out.close(); - } - } - } - -} +package com.baidu.palo.master; + +import com.baidu.palo.catalog.Catalog; +import com.baidu.palo.common.io.IOUtils; + +import java.io.BufferedInputStream; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.net.HttpURLConnection; +import java.net.URL; + +public class MetaHelper { + private static final String PART_SUFFIX = ".part"; + public static final String X_IMAGE_SIZE = "X-Image-Size"; + private static final int BUFFER_BYTES = 8 * 1024; + private static final int CHECKPOINT_LIMIT_BYTES = 30 * 1024 * 1024; + + public static File getMasterImageDir() { + String metaDir = Catalog.IMAGE_DIR; + return new File(metaDir); + } + + public static int getLimit() { + return CHECKPOINT_LIMIT_BYTES; + } + + // rename the .PART_SUFFIX file to filename + public static File complete(String filename, File dir) throws IOException { + File file = new File(dir, filename + MetaHelper.PART_SUFFIX); + File newFile = new File(dir, filename); + if (!file.renameTo(newFile)) { + throw new IOException("Complete file" + filename + " failed"); + } + return newFile; + } + + public static OutputStream getOutputStream(String filename, File dir) + throws FileNotFoundException { + File file = new File(dir, filename + MetaHelper.PART_SUFFIX); + return new FileOutputStream(file); + } + + // download file from remote node + public static void getRemoteFile(String urlStr, int timeout, OutputStream out) + throws IOException { + URL url = new URL(urlStr); + HttpURLConnection conn = null; + + try { + conn = (HttpURLConnection) url.openConnection(); + conn.setConnectTimeout(timeout); + conn.setReadTimeout(timeout); + + // Get image size + long imageSize = -1; + String imageSizeStr = conn.getHeaderField(X_IMAGE_SIZE); + if (imageSizeStr != null) { + imageSize = Long.parseLong(imageSizeStr); + } + + BufferedInputStream bin = new BufferedInputStream(conn.getInputStream()); + + // Do not limit speed in client side. + long bytes = IOUtils.copyBytes(bin, out, BUFFER_BYTES, CHECKPOINT_LIMIT_BYTES, true); + + if ((imageSize > 0) && (bytes != imageSize)) { + throw new IOException("Unexpected image size, expected: " + imageSize + ", actual: " + bytes); + } + } finally { + if (conn != null) { + conn.disconnect(); + } + if (out != null) { + out.close(); + } + } + } + +} diff --git a/fe/src/com/baidu/palo/mysql/privilege/PrivBitSet.java b/fe/src/com/baidu/palo/mysql/privilege/PrivBitSet.java index 47d20022eb..00680daf73 100644 --- a/fe/src/com/baidu/palo/mysql/privilege/PrivBitSet.java +++ b/fe/src/com/baidu/palo/mysql/privilege/PrivBitSet.java @@ -27,10 +27,10 @@ import java.io.IOException; import java.util.List; // ....0000000000 -// ^ ^ -// | | -// | -- first priv(0) -// |-------last priv(6) +// ^ ^ +// | | +// | -- first priv(0) +// |--------last priv(7) public class PrivBitSet implements Writable { private long set = 0; diff --git a/fe/src/com/baidu/palo/persist/CloneInfo.java b/fe/src/com/baidu/palo/persist/CloneInfo.java index fefb3fdc21..74cc0f1718 100644 --- a/fe/src/com/baidu/palo/persist/CloneInfo.java +++ b/fe/src/com/baidu/palo/persist/CloneInfo.java @@ -13,148 +13,148 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.persist; - -import com.baidu.palo.common.io.Text; -import com.baidu.palo.common.io.Writable; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -@Deprecated -// (cmy 2015-07-22) -// donot use anymore. use ReplicaPersistInfo instead. -// remove later -public class CloneInfo implements Writable { - public enum CloneType { - CLONE, - DELETE - } - - private long dbId; - private long tableId; - private long partitionId; - private long indexId; - private long tabletId; - private long replicaId; - private long version; - private long versionHash; - private long dataSize; - private long rowCount; - private long backendId; - private CloneType type; - - - public CloneInfo() { - this(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, CloneType.CLONE); - } - - public CloneInfo(long dbId, long tableId, long partitionId, long indexId, long tabletId, - long replicaId, CloneType type) { - this(dbId, tableId, partitionId, indexId, tabletId, replicaId, 0, 0, 0, 0, 0, type); - } - - public CloneInfo(long dbId, long tableId, long partitionId, long indexId, long tabletId, - long replicaId, long version, long versionHash, long dataSize, long rowCount, - long backendId, CloneType type) { - this.dbId = dbId; - this.tableId = tableId; - this.partitionId = partitionId; - this.indexId = indexId; - this.tabletId = tabletId; - this.replicaId = replicaId; - this.version = version; - this.versionHash = versionHash; - this.dataSize = dataSize; - this.rowCount = rowCount; - this.backendId = backendId; - this.type = type; - } - - public long getDbId() { - return this.dbId; - } - - public long getTableId() { - return this.tableId; - } - - public long getPartitionId() { - return this.partitionId; - } - - public long getIndexId() { - return this.indexId; - } - - public long getTabletId() { - return this.tabletId; - } - - public long getReplicaId() { - return this.replicaId; - } - - public long getVersion() { - return this.version; - } - - public long getVersionHash() { - return this.versionHash; - } - - public long getDataSize() { - return this.dataSize; - } - - public long getRowCount() { - return this.rowCount; - } - - public long getBackendId() { - return this.backendId; - } - - public CloneType getType() { - return this.type; - } - - @Override - public void write(DataOutput out) throws IOException { - out.writeLong(dbId); - out.writeLong(tableId); - out.writeLong(partitionId); - out.writeLong(indexId); - out.writeLong(tabletId); - out.writeLong(replicaId); - out.writeLong(version); - out.writeLong(versionHash); - out.writeLong(dataSize); - out.writeLong(rowCount); - out.writeLong(backendId); - Text.writeString(out, type.toString()); - } - - @Override - public void readFields(DataInput in) throws IOException { - dbId = in.readLong(); - tableId = in.readLong(); - partitionId = in.readLong(); - indexId = in.readLong(); - tabletId = in.readLong(); - replicaId = in.readLong(); - version = in.readLong(); - versionHash = in.readLong(); - dataSize = in.readLong(); - rowCount = in.readLong(); - backendId = in.readLong(); - type = CloneType.valueOf(Text.readString(in)); - } - - public static CloneInfo read(DataInput in) throws IOException { - CloneInfo cloneInfo = new CloneInfo(); - cloneInfo.readFields(in); - return cloneInfo; - } -} +package com.baidu.palo.persist; + +import com.baidu.palo.common.io.Text; +import com.baidu.palo.common.io.Writable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +@Deprecated +// (cmy 2015-07-22) +// donot use anymore. use ReplicaPersistInfo instead. +// remove later +public class CloneInfo implements Writable { + public enum CloneType { + CLONE, + DELETE + } + + private long dbId; + private long tableId; + private long partitionId; + private long indexId; + private long tabletId; + private long replicaId; + private long version; + private long versionHash; + private long dataSize; + private long rowCount; + private long backendId; + private CloneType type; + + + public CloneInfo() { + this(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, CloneType.CLONE); + } + + public CloneInfo(long dbId, long tableId, long partitionId, long indexId, long tabletId, + long replicaId, CloneType type) { + this(dbId, tableId, partitionId, indexId, tabletId, replicaId, 0, 0, 0, 0, 0, type); + } + + public CloneInfo(long dbId, long tableId, long partitionId, long indexId, long tabletId, + long replicaId, long version, long versionHash, long dataSize, long rowCount, + long backendId, CloneType type) { + this.dbId = dbId; + this.tableId = tableId; + this.partitionId = partitionId; + this.indexId = indexId; + this.tabletId = tabletId; + this.replicaId = replicaId; + this.version = version; + this.versionHash = versionHash; + this.dataSize = dataSize; + this.rowCount = rowCount; + this.backendId = backendId; + this.type = type; + } + + public long getDbId() { + return this.dbId; + } + + public long getTableId() { + return this.tableId; + } + + public long getPartitionId() { + return this.partitionId; + } + + public long getIndexId() { + return this.indexId; + } + + public long getTabletId() { + return this.tabletId; + } + + public long getReplicaId() { + return this.replicaId; + } + + public long getVersion() { + return this.version; + } + + public long getVersionHash() { + return this.versionHash; + } + + public long getDataSize() { + return this.dataSize; + } + + public long getRowCount() { + return this.rowCount; + } + + public long getBackendId() { + return this.backendId; + } + + public CloneType getType() { + return this.type; + } + + @Override + public void write(DataOutput out) throws IOException { + out.writeLong(dbId); + out.writeLong(tableId); + out.writeLong(partitionId); + out.writeLong(indexId); + out.writeLong(tabletId); + out.writeLong(replicaId); + out.writeLong(version); + out.writeLong(versionHash); + out.writeLong(dataSize); + out.writeLong(rowCount); + out.writeLong(backendId); + Text.writeString(out, type.toString()); + } + + @Override + public void readFields(DataInput in) throws IOException { + dbId = in.readLong(); + tableId = in.readLong(); + partitionId = in.readLong(); + indexId = in.readLong(); + tabletId = in.readLong(); + replicaId = in.readLong(); + version = in.readLong(); + versionHash = in.readLong(); + dataSize = in.readLong(); + rowCount = in.readLong(); + backendId = in.readLong(); + type = CloneType.valueOf(Text.readString(in)); + } + + public static CloneInfo read(DataInput in) throws IOException { + CloneInfo cloneInfo = new CloneInfo(); + cloneInfo.readFields(in); + return cloneInfo; + } +} diff --git a/fe/src/com/baidu/palo/persist/CreateTableInfo.java b/fe/src/com/baidu/palo/persist/CreateTableInfo.java index 830abeb2d8..cd109aaa0b 100644 --- a/fe/src/com/baidu/palo/persist/CreateTableInfo.java +++ b/fe/src/com/baidu/palo/persist/CreateTableInfo.java @@ -13,78 +13,78 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.persist; - -import com.baidu.palo.catalog.Catalog; -import com.baidu.palo.catalog.Table; -import com.baidu.palo.cluster.ClusterNamespace; -import com.baidu.palo.common.io.Writable; -import com.baidu.palo.system.SystemInfoService; -import com.baidu.palo.common.FeMetaVersion; -import com.baidu.palo.common.io.Text; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -public class CreateTableInfo implements Writable { - public static final Logger LOG = LoggerFactory.getLogger(CreateTableInfo.class); - - private String dbName; - private Table table; - - public CreateTableInfo() { - // for persist - } - - public CreateTableInfo(String dbName, Table table) { - this.dbName = dbName; - this.table = table; - } - - public String getDbName() { - return dbName; - } - - public Table getTable() { - return table; - } - - public void write(DataOutput out) throws IOException { - Text.writeString(out, dbName); - table.write(out); - } - - public void readFields(DataInput in) throws IOException { - if (Catalog.getCurrentCatalogJournalVersion() < FeMetaVersion.VERSION_30) { - dbName = ClusterNamespace.getFullName(SystemInfoService.DEFAULT_CLUSTER, Text.readString(in)); - } else { - dbName = Text.readString(in); - } - - table = Table.read(in); - } - - public static CreateTableInfo read(DataInput in) throws IOException { - CreateTableInfo createTableInfo = new CreateTableInfo(); - createTableInfo.readFields(in); - return createTableInfo; - } - - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (!(obj instanceof CreateTableInfo)) { - return false; - } - - CreateTableInfo info = (CreateTableInfo) obj; - - return (dbName.equals(info.dbName)) - && (table.equals(info.table)); - } -} +package com.baidu.palo.persist; + +import com.baidu.palo.catalog.Catalog; +import com.baidu.palo.catalog.Table; +import com.baidu.palo.cluster.ClusterNamespace; +import com.baidu.palo.common.io.Writable; +import com.baidu.palo.system.SystemInfoService; +import com.baidu.palo.common.FeMetaVersion; +import com.baidu.palo.common.io.Text; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +public class CreateTableInfo implements Writable { + public static final Logger LOG = LoggerFactory.getLogger(CreateTableInfo.class); + + private String dbName; + private Table table; + + public CreateTableInfo() { + // for persist + } + + public CreateTableInfo(String dbName, Table table) { + this.dbName = dbName; + this.table = table; + } + + public String getDbName() { + return dbName; + } + + public Table getTable() { + return table; + } + + public void write(DataOutput out) throws IOException { + Text.writeString(out, dbName); + table.write(out); + } + + public void readFields(DataInput in) throws IOException { + if (Catalog.getCurrentCatalogJournalVersion() < FeMetaVersion.VERSION_30) { + dbName = ClusterNamespace.getFullName(SystemInfoService.DEFAULT_CLUSTER, Text.readString(in)); + } else { + dbName = Text.readString(in); + } + + table = Table.read(in); + } + + public static CreateTableInfo read(DataInput in) throws IOException { + CreateTableInfo createTableInfo = new CreateTableInfo(); + createTableInfo.readFields(in); + return createTableInfo; + } + + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof CreateTableInfo)) { + return false; + } + + CreateTableInfo info = (CreateTableInfo) obj; + + return (dbName.equals(info.dbName)) + && (table.equals(info.table)); + } +} diff --git a/fe/src/com/baidu/palo/persist/DatabaseInfo.java b/fe/src/com/baidu/palo/persist/DatabaseInfo.java index 83cb6e6138..2458bc334a 100644 --- a/fe/src/com/baidu/palo/persist/DatabaseInfo.java +++ b/fe/src/com/baidu/palo/persist/DatabaseInfo.java @@ -13,97 +13,97 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.persist; - -import com.baidu.palo.catalog.Catalog; -import com.baidu.palo.catalog.Database.DbState; -import com.baidu.palo.common.FeMetaVersion; -import com.baidu.palo.common.io.Text; -import com.baidu.palo.common.io.Writable; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -public class DatabaseInfo implements Writable { - - private String dbName; - private String newDbName; - private long quota; - private String clusterName; - private DbState dbState; - - public DatabaseInfo() { - // for persist - this.dbName = ""; - this.newDbName = ""; - this.quota = 0; - this.clusterName = ""; - this.dbState = DbState.NORMAL; - } - - public DatabaseInfo(String dbName, String newDbName, long quota) { - this.dbName = dbName; - this.newDbName = newDbName; - this.quota = quota; - this.clusterName = ""; - this.dbState = DbState.NORMAL; - } - - public String getDbName() { - return dbName; - } - - public String getNewDbName() { - return newDbName; - } - - public long getQuota() { - return quota; - } - - public static DatabaseInfo read(DataInput in) throws IOException { - DatabaseInfo dbInfo = new DatabaseInfo(); - dbInfo.readFields(in); - return dbInfo; - } - - @Override - public void write(DataOutput out) throws IOException { - Text.writeString(out, dbName); - Text.writeString(out, newDbName); - out.writeLong(quota); - Text.writeString(out, this.clusterName); - Text.writeString(out, this.dbState.name()); - } - - @Override - public void readFields(DataInput in) throws IOException { - this.dbName = Text.readString(in); - if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_10) { - newDbName = Text.readString(in); - } - this.quota = in.readLong(); - if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_30) { - this.clusterName = Text.readString(in); - this.dbState = DbState.valueOf(Text.readString(in)); - } - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public DbState getDbState() { - return dbState; - } - - public void setDbState(DbState dbState) { - this.dbState = dbState; - } - -} +package com.baidu.palo.persist; + +import com.baidu.palo.catalog.Catalog; +import com.baidu.palo.catalog.Database.DbState; +import com.baidu.palo.common.FeMetaVersion; +import com.baidu.palo.common.io.Text; +import com.baidu.palo.common.io.Writable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +public class DatabaseInfo implements Writable { + + private String dbName; + private String newDbName; + private long quota; + private String clusterName; + private DbState dbState; + + public DatabaseInfo() { + // for persist + this.dbName = ""; + this.newDbName = ""; + this.quota = 0; + this.clusterName = ""; + this.dbState = DbState.NORMAL; + } + + public DatabaseInfo(String dbName, String newDbName, long quota) { + this.dbName = dbName; + this.newDbName = newDbName; + this.quota = quota; + this.clusterName = ""; + this.dbState = DbState.NORMAL; + } + + public String getDbName() { + return dbName; + } + + public String getNewDbName() { + return newDbName; + } + + public long getQuota() { + return quota; + } + + public static DatabaseInfo read(DataInput in) throws IOException { + DatabaseInfo dbInfo = new DatabaseInfo(); + dbInfo.readFields(in); + return dbInfo; + } + + @Override + public void write(DataOutput out) throws IOException { + Text.writeString(out, dbName); + Text.writeString(out, newDbName); + out.writeLong(quota); + Text.writeString(out, this.clusterName); + Text.writeString(out, this.dbState.name()); + } + + @Override + public void readFields(DataInput in) throws IOException { + this.dbName = Text.readString(in); + if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_10) { + newDbName = Text.readString(in); + } + this.quota = in.readLong(); + if (Catalog.getCurrentCatalogJournalVersion() >= FeMetaVersion.VERSION_30) { + this.clusterName = Text.readString(in); + this.dbState = DbState.valueOf(Text.readString(in)); + } + } + + public String getClusterName() { + return clusterName; + } + + public void setClusterName(String clusterName) { + this.clusterName = clusterName; + } + + public DbState getDbState() { + return dbState; + } + + public void setDbState(DbState dbState) { + this.dbState = dbState; + } + +} diff --git a/fe/src/com/baidu/palo/persist/DropInfo.java b/fe/src/com/baidu/palo/persist/DropInfo.java index d11b7bcc7c..9718126e83 100644 --- a/fe/src/com/baidu/palo/persist/DropInfo.java +++ b/fe/src/com/baidu/palo/persist/DropInfo.java @@ -13,83 +13,83 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.persist; - -import com.baidu.palo.common.io.Writable; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -public class DropInfo implements Writable { - private long dbId; - private long tableId; - - private long indexId; - - public DropInfo() { - } - - public DropInfo(long dbId, long tableId, long indexId) { - this.dbId = dbId; - this.tableId = tableId; - this.indexId = indexId; - } - - public long getDbId() { - return this.dbId; - } - - public long getTableId() { - return this.tableId; - } - - public long getIndexId() { - return this.indexId; - } - - @Override - public void write(DataOutput out) throws IOException { - out.writeLong(dbId); - out.writeLong(tableId); - if (indexId == -1L) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeLong(indexId); - } - } - - @Override - public void readFields(DataInput in) throws IOException { - dbId = in.readLong(); - tableId = in.readLong(); - - boolean hasIndexId = in.readBoolean(); - if (hasIndexId) { - indexId = in.readLong(); - } else { - indexId = -1L; - } - } - - public static DropInfo read(DataInput in) throws IOException { - DropInfo dropInfo = new DropInfo(); - dropInfo.readFields(in); - return dropInfo; - } - - public boolean equals (Object obj) { - if (this == obj) { - return true; - } - - if (!(obj instanceof DropInfo)) { - return false; - } - - DropInfo info = (DropInfo) obj; - - return dbId == info.dbId && tableId == info.tableId; - } -} +package com.baidu.palo.persist; + +import com.baidu.palo.common.io.Writable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +public class DropInfo implements Writable { + private long dbId; + private long tableId; + + private long indexId; + + public DropInfo() { + } + + public DropInfo(long dbId, long tableId, long indexId) { + this.dbId = dbId; + this.tableId = tableId; + this.indexId = indexId; + } + + public long getDbId() { + return this.dbId; + } + + public long getTableId() { + return this.tableId; + } + + public long getIndexId() { + return this.indexId; + } + + @Override + public void write(DataOutput out) throws IOException { + out.writeLong(dbId); + out.writeLong(tableId); + if (indexId == -1L) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeLong(indexId); + } + } + + @Override + public void readFields(DataInput in) throws IOException { + dbId = in.readLong(); + tableId = in.readLong(); + + boolean hasIndexId = in.readBoolean(); + if (hasIndexId) { + indexId = in.readLong(); + } else { + indexId = -1L; + } + } + + public static DropInfo read(DataInput in) throws IOException { + DropInfo dropInfo = new DropInfo(); + dropInfo.readFields(in); + return dropInfo; + } + + public boolean equals (Object obj) { + if (this == obj) { + return true; + } + + if (!(obj instanceof DropInfo)) { + return false; + } + + DropInfo info = (DropInfo) obj; + + return dbId == info.dbId && tableId == info.tableId; + } +} diff --git a/fe/src/com/baidu/palo/persist/EditLogInputStream.java b/fe/src/com/baidu/palo/persist/EditLogInputStream.java index ca8605b1b9..56f5f25bbb 100644 --- a/fe/src/com/baidu/palo/persist/EditLogInputStream.java +++ b/fe/src/com/baidu/palo/persist/EditLogInputStream.java @@ -13,29 +13,29 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.persist; - -import java.io.IOException; -import java.io.InputStream; - -/** - * A generic abstract class to support reading edits log data from persistent - * storage. - * - * It should stream bytes from the storage exactly as they were written into the - * #{@link EditLogOutputStream}. - */ -public abstract class EditLogInputStream extends InputStream { - abstract String getName(); - - public abstract int available() throws IOException; - - public abstract int read() throws IOException; - - public abstract int read(byte[] buffer, int offset, int len) throws IOException; - - public abstract void close() throws IOException; - - abstract long length() throws IOException; - -} +package com.baidu.palo.persist; + +import java.io.IOException; +import java.io.InputStream; + +/** + * A generic abstract class to support reading edits log data from persistent + * storage. + * + * It should stream bytes from the storage exactly as they were written into the + * #{@link EditLogOutputStream}. + */ +public abstract class EditLogInputStream extends InputStream { + abstract String getName(); + + public abstract int available() throws IOException; + + public abstract int read() throws IOException; + + public abstract int read(byte[] buffer, int offset, int len) throws IOException; + + public abstract void close() throws IOException; + + abstract long length() throws IOException; + +} diff --git a/fe/src/com/baidu/palo/persist/EditLogOutputStream.java b/fe/src/com/baidu/palo/persist/EditLogOutputStream.java index edff1dc057..89ed76483a 100644 --- a/fe/src/com/baidu/palo/persist/EditLogOutputStream.java +++ b/fe/src/com/baidu/palo/persist/EditLogOutputStream.java @@ -13,89 +13,89 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.persist; - -import com.baidu.palo.common.io.Writable; - -import java.io.IOException; -import java.io.OutputStream; - -/** - * A generic abstract class to support journaling of edits logs into a - * persistent storage. - */ -public abstract class EditLogOutputStream extends OutputStream { - private long numSync; // number of sync(s) to disk - private long totalTimeSync; // total time to sync - - EditLogOutputStream() throws IOException { - numSync = 0; - totalTimeSync = 0; - } - - abstract String getName(); - - public abstract void write(int b) throws IOException; - - /** - * Write edits log record into the stream. The record is represented by - * operation name and an array of Writable arguments. - * - * @param op - * operation - * @param writable - * Writable argument - * @throws IOException - */ - public abstract void write(short op, Writable writable) throws IOException; - - abstract void create() throws IOException; - - public abstract void close() throws IOException; - - /** - * All data that has been written to the stream so far will be flushed. New - * data can be still written to the stream while flushing is performed. - */ - public abstract void setReadyToFlush() throws IOException; - - /** - * Flush and sync all data that is ready to be flush - * {@link #setReadyToFlush()} into underlying persistent store. - * - * @throws IOException - */ - protected abstract void flushAndSync() throws IOException; - - /** - * Flush data to persistent store. Collect sync metrics. - */ - public void flush() throws IOException { - numSync++; - long start = System.currentTimeMillis(); - flushAndSync(); - long end = System.currentTimeMillis(); - totalTimeSync += (end - start); - } - - /** - * Return the size of the current edits log. Length is used to check when it - * is large enough to start a checkpoint. Should be either 0 or the same as - * other streams. - */ - abstract long length() throws IOException; - - /** - * Return total time spent in {@link #flushAndSync()} - */ - long getTotalSyncTime() { - return totalTimeSync; - } - - /** - * Return number of calls to {@link #flushAndSync()} - */ - long getNumSync() { - return numSync; - } -} +package com.baidu.palo.persist; + +import com.baidu.palo.common.io.Writable; + +import java.io.IOException; +import java.io.OutputStream; + +/** + * A generic abstract class to support journaling of edits logs into a + * persistent storage. + */ +public abstract class EditLogOutputStream extends OutputStream { + private long numSync; // number of sync(s) to disk + private long totalTimeSync; // total time to sync + + EditLogOutputStream() throws IOException { + numSync = 0; + totalTimeSync = 0; + } + + abstract String getName(); + + public abstract void write(int b) throws IOException; + + /** + * Write edits log record into the stream. The record is represented by + * operation name and an array of Writable arguments. + * + * @param op + * operation + * @param writable + * Writable argument + * @throws IOException + */ + public abstract void write(short op, Writable writable) throws IOException; + + abstract void create() throws IOException; + + public abstract void close() throws IOException; + + /** + * All data that has been written to the stream so far will be flushed. New + * data can be still written to the stream while flushing is performed. + */ + public abstract void setReadyToFlush() throws IOException; + + /** + * Flush and sync all data that is ready to be flush + * {@link #setReadyToFlush()} into underlying persistent store. + * + * @throws IOException + */ + protected abstract void flushAndSync() throws IOException; + + /** + * Flush data to persistent store. Collect sync metrics. + */ + public void flush() throws IOException { + numSync++; + long start = System.currentTimeMillis(); + flushAndSync(); + long end = System.currentTimeMillis(); + totalTimeSync += (end - start); + } + + /** + * Return the size of the current edits log. Length is used to check when it + * is large enough to start a checkpoint. Should be either 0 or the same as + * other streams. + */ + abstract long length() throws IOException; + + /** + * Return total time spent in {@link #flushAndSync()} + */ + long getTotalSyncTime() { + return totalTimeSync; + } + + /** + * Return number of calls to {@link #flushAndSync()} + */ + long getNumSync() { + return numSync; + } +} diff --git a/fe/src/com/baidu/palo/persist/ReplicaPersistInfo.java b/fe/src/com/baidu/palo/persist/ReplicaPersistInfo.java index e56d353f64..a315465e99 100644 --- a/fe/src/com/baidu/palo/persist/ReplicaPersistInfo.java +++ b/fe/src/com/baidu/palo/persist/ReplicaPersistInfo.java @@ -13,250 +13,250 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.persist; - -import com.baidu.palo.common.io.Writable; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -public class ReplicaPersistInfo implements Writable { - // required - private long dbId; - private long tableId; - private long partitionId; - private long indexId; - private long tabletId; - - private long replicaId; - private long backendId; - - private long version; - private long versionHash; - private long dataSize; - private long rowCount; - - public static ReplicaPersistInfo createForAdd(long dbId, long tableId, long partitionId, long indexId, - long tabletId, long backendId, long replicaId, long version, - long versionHash, long dataSize, long rowCount) { - return new ReplicaPersistInfo(dbId, tableId, partitionId, indexId, tabletId, backendId, - replicaId, version, versionHash, dataSize, rowCount); - } - - /* - * this for delete stmt operation - */ - public static ReplicaPersistInfo createForCondDelete(long indexId, long tabletId, long replicaId, long version, - long versionHash, long dataSize, long rowCount) { - return new ReplicaPersistInfo(-1L, -1L, -1L, indexId, tabletId, -1L, - replicaId, version, versionHash, dataSize, rowCount); - } - - /* - * this for remove replica from meta - */ - public static ReplicaPersistInfo createForDelete(long dbId, long tableId, long partitionId, long indexId, - long tabletId, long backendId) { - return new ReplicaPersistInfo(dbId, tableId, partitionId, indexId, tabletId, backendId, - -1L, -1L, -1L, -1L, -1L); - } - - public static ReplicaPersistInfo createForClone(long dbId, long tableId, long partitionId, long indexId, - long tabletId, long backendId, long replicaId, long version, - long versionHash, long dataSize, long rowCount) { - return new ReplicaPersistInfo(dbId, tableId, partitionId, indexId, tabletId, backendId, replicaId, - version, versionHash, dataSize, rowCount); - } - - public static ReplicaPersistInfo createForLoad(long tableId, long partitionId, long indexId, long tabletId, - long replicaId, long version, long versionHash, long dataSize, - long rowCount) { - return new ReplicaPersistInfo(-1L, tableId, partitionId, indexId, tabletId, -1L, - replicaId, version, versionHash, dataSize, rowCount); - } - - public static ReplicaPersistInfo createForRollup(long indexId, long tabletId, long backendId, long version, - long versionHash, long dataSize, long rowCount) { - return new ReplicaPersistInfo(-1L, -1L, -1L, indexId, tabletId, backendId, -1L, - version, versionHash, dataSize, rowCount); - } - - public static ReplicaPersistInfo createForSchemaChange(long partitionId, long indexId, long tabletId, - long backendId, long version, long versionHash, - long dataSize, long rowCount) { - return new ReplicaPersistInfo(-1L, -1L, partitionId, indexId, tabletId, backendId, -1L, version, - versionHash, dataSize, rowCount); - } - - public static ReplicaPersistInfo createForClearRollupInfo(long dbId, long tableId, long partitionId, long indexId) { - return new ReplicaPersistInfo(dbId, tableId, partitionId, indexId, -1L, -1L, -1L, -1L, -1L, -1L, -1L); - } - - public ReplicaPersistInfo() { - } - - private ReplicaPersistInfo(long dbId, long tableId, long partitionId, long indexId, long tabletId, long backendId, - long replicaId, long version, long versionHash, long dataSize, long rowCount) { - this.dbId = dbId; - this.tableId = tableId; - this.partitionId = partitionId; - this.indexId = indexId; - this.tabletId = tabletId; - this.backendId = backendId; - - this.replicaId = replicaId; - - this.version = version; - this.versionHash = versionHash; - this.dataSize = dataSize; - this.rowCount = rowCount; - } - - public void setReplicaId(long replicaId) { - this.replicaId = replicaId; - } - - public void setBackendId(long backendId) { - this.backendId = backendId; - } - - public void setVersion(long version) { - this.version = version; - } - - public void setVersionHash(long versionHash) { - this.versionHash = versionHash; - } - - public void setDataSize(long dataSize) { - this.dataSize = dataSize; - } - - public void setRowCount(long rowCount) { - this.rowCount = rowCount; - } - - public long getDbId() { - return dbId; - } - - public long getTableId() { - return tableId; - } - - public long getPartitionId() { - return partitionId; - } - - public long getIndexId() { - return indexId; - } - - public long getTabletId() { - return tabletId; - } - - public long getReplicaId() { - return replicaId; - } - - public long getBackendId() { - return backendId; - } - - public long getVersion() { - return version; - } - - public long getVersionHash() { - return versionHash; - } - - public long getDataSize() { - return dataSize; - } - - public long getRowCount() { - return rowCount; - } - - public static ReplicaPersistInfo read(DataInput in) throws IOException { - ReplicaPersistInfo replicaInfo = new ReplicaPersistInfo(); - replicaInfo.readFields(in); - return replicaInfo; - } - - @Override - public void write(DataOutput out) throws IOException { - out.writeLong(dbId); - out.writeLong(tableId); - out.writeLong(partitionId); - out.writeLong(indexId); - out.writeLong(tabletId); - out.writeLong(backendId); - out.writeLong(replicaId); - out.writeLong(version); - out.writeLong(versionHash); - out.writeLong(dataSize); - out.writeLong(rowCount); - } - - @Override - public void readFields(DataInput in) throws IOException { - dbId = in.readLong(); - tableId = in.readLong(); - partitionId = in.readLong(); - indexId = in.readLong(); - tabletId = in.readLong(); - backendId = in.readLong(); - replicaId = in.readLong(); - version = in.readLong(); - versionHash = in.readLong(); - dataSize = in.readLong(); - rowCount = in.readLong(); - } - - @Override - public boolean equals(Object obj) { - if (obj == this) { - return true; - } - - if (!(obj instanceof ReplicaPersistInfo)) { - return false; - } - - ReplicaPersistInfo info = (ReplicaPersistInfo) obj; - - return backendId == info.backendId - && replicaId == info.replicaId - && tabletId == info.tabletId - && indexId == info.indexId - && partitionId == info.partitionId - && tableId == info.tableId - && dbId == info.dbId - && version == info.version - && versionHash == info.versionHash - && dataSize == info.dataSize - && rowCount == info.rowCount; - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("table id: ").append(tableId); - sb.append(" partition id: ").append(partitionId); - sb.append(" index id: ").append(indexId); - sb.append(" index id: ").append(indexId); - sb.append(" tablet id: ").append(tabletId); - sb.append(" backend id: ").append(backendId); - sb.append(" replica id: ").append(replicaId); - sb.append(" version: ").append(version); - sb.append(" version hash: ").append(versionHash); - sb.append(" data size: ").append(dataSize); - sb.append(" row count: ").append(rowCount); - - return sb.toString(); - } -} +package com.baidu.palo.persist; + +import com.baidu.palo.common.io.Writable; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +public class ReplicaPersistInfo implements Writable { + // required + private long dbId; + private long tableId; + private long partitionId; + private long indexId; + private long tabletId; + + private long replicaId; + private long backendId; + + private long version; + private long versionHash; + private long dataSize; + private long rowCount; + + public static ReplicaPersistInfo createForAdd(long dbId, long tableId, long partitionId, long indexId, + long tabletId, long backendId, long replicaId, long version, + long versionHash, long dataSize, long rowCount) { + return new ReplicaPersistInfo(dbId, tableId, partitionId, indexId, tabletId, backendId, + replicaId, version, versionHash, dataSize, rowCount); + } + + /* + * this for delete stmt operation + */ + public static ReplicaPersistInfo createForCondDelete(long indexId, long tabletId, long replicaId, long version, + long versionHash, long dataSize, long rowCount) { + return new ReplicaPersistInfo(-1L, -1L, -1L, indexId, tabletId, -1L, + replicaId, version, versionHash, dataSize, rowCount); + } + + /* + * this for remove replica from meta + */ + public static ReplicaPersistInfo createForDelete(long dbId, long tableId, long partitionId, long indexId, + long tabletId, long backendId) { + return new ReplicaPersistInfo(dbId, tableId, partitionId, indexId, tabletId, backendId, + -1L, -1L, -1L, -1L, -1L); + } + + public static ReplicaPersistInfo createForClone(long dbId, long tableId, long partitionId, long indexId, + long tabletId, long backendId, long replicaId, long version, + long versionHash, long dataSize, long rowCount) { + return new ReplicaPersistInfo(dbId, tableId, partitionId, indexId, tabletId, backendId, replicaId, + version, versionHash, dataSize, rowCount); + } + + public static ReplicaPersistInfo createForLoad(long tableId, long partitionId, long indexId, long tabletId, + long replicaId, long version, long versionHash, long dataSize, + long rowCount) { + return new ReplicaPersistInfo(-1L, tableId, partitionId, indexId, tabletId, -1L, + replicaId, version, versionHash, dataSize, rowCount); + } + + public static ReplicaPersistInfo createForRollup(long indexId, long tabletId, long backendId, long version, + long versionHash, long dataSize, long rowCount) { + return new ReplicaPersistInfo(-1L, -1L, -1L, indexId, tabletId, backendId, -1L, + version, versionHash, dataSize, rowCount); + } + + public static ReplicaPersistInfo createForSchemaChange(long partitionId, long indexId, long tabletId, + long backendId, long version, long versionHash, + long dataSize, long rowCount) { + return new ReplicaPersistInfo(-1L, -1L, partitionId, indexId, tabletId, backendId, -1L, version, + versionHash, dataSize, rowCount); + } + + public static ReplicaPersistInfo createForClearRollupInfo(long dbId, long tableId, long partitionId, long indexId) { + return new ReplicaPersistInfo(dbId, tableId, partitionId, indexId, -1L, -1L, -1L, -1L, -1L, -1L, -1L); + } + + public ReplicaPersistInfo() { + } + + private ReplicaPersistInfo(long dbId, long tableId, long partitionId, long indexId, long tabletId, long backendId, + long replicaId, long version, long versionHash, long dataSize, long rowCount) { + this.dbId = dbId; + this.tableId = tableId; + this.partitionId = partitionId; + this.indexId = indexId; + this.tabletId = tabletId; + this.backendId = backendId; + + this.replicaId = replicaId; + + this.version = version; + this.versionHash = versionHash; + this.dataSize = dataSize; + this.rowCount = rowCount; + } + + public void setReplicaId(long replicaId) { + this.replicaId = replicaId; + } + + public void setBackendId(long backendId) { + this.backendId = backendId; + } + + public void setVersion(long version) { + this.version = version; + } + + public void setVersionHash(long versionHash) { + this.versionHash = versionHash; + } + + public void setDataSize(long dataSize) { + this.dataSize = dataSize; + } + + public void setRowCount(long rowCount) { + this.rowCount = rowCount; + } + + public long getDbId() { + return dbId; + } + + public long getTableId() { + return tableId; + } + + public long getPartitionId() { + return partitionId; + } + + public long getIndexId() { + return indexId; + } + + public long getTabletId() { + return tabletId; + } + + public long getReplicaId() { + return replicaId; + } + + public long getBackendId() { + return backendId; + } + + public long getVersion() { + return version; + } + + public long getVersionHash() { + return versionHash; + } + + public long getDataSize() { + return dataSize; + } + + public long getRowCount() { + return rowCount; + } + + public static ReplicaPersistInfo read(DataInput in) throws IOException { + ReplicaPersistInfo replicaInfo = new ReplicaPersistInfo(); + replicaInfo.readFields(in); + return replicaInfo; + } + + @Override + public void write(DataOutput out) throws IOException { + out.writeLong(dbId); + out.writeLong(tableId); + out.writeLong(partitionId); + out.writeLong(indexId); + out.writeLong(tabletId); + out.writeLong(backendId); + out.writeLong(replicaId); + out.writeLong(version); + out.writeLong(versionHash); + out.writeLong(dataSize); + out.writeLong(rowCount); + } + + @Override + public void readFields(DataInput in) throws IOException { + dbId = in.readLong(); + tableId = in.readLong(); + partitionId = in.readLong(); + indexId = in.readLong(); + tabletId = in.readLong(); + backendId = in.readLong(); + replicaId = in.readLong(); + version = in.readLong(); + versionHash = in.readLong(); + dataSize = in.readLong(); + rowCount = in.readLong(); + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + + if (!(obj instanceof ReplicaPersistInfo)) { + return false; + } + + ReplicaPersistInfo info = (ReplicaPersistInfo) obj; + + return backendId == info.backendId + && replicaId == info.replicaId + && tabletId == info.tabletId + && indexId == info.indexId + && partitionId == info.partitionId + && tableId == info.tableId + && dbId == info.dbId + && version == info.version + && versionHash == info.versionHash + && dataSize == info.dataSize + && rowCount == info.rowCount; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("table id: ").append(tableId); + sb.append(" partition id: ").append(partitionId); + sb.append(" index id: ").append(indexId); + sb.append(" index id: ").append(indexId); + sb.append(" tablet id: ").append(tabletId); + sb.append(" backend id: ").append(backendId); + sb.append(" replica id: ").append(replicaId); + sb.append(" version: ").append(version); + sb.append(" version hash: ").append(versionHash); + sb.append(" data size: ").append(dataSize); + sb.append(" row count: ").append(rowCount); + + return sb.toString(); + } +} diff --git a/fe/src/com/baidu/palo/persist/StorageInfo.java b/fe/src/com/baidu/palo/persist/StorageInfo.java index 032384a42a..5923794fff 100644 --- a/fe/src/com/baidu/palo/persist/StorageInfo.java +++ b/fe/src/com/baidu/palo/persist/StorageInfo.java @@ -13,48 +13,48 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.persist; - -/** - * This class is designed for sending storage information from master to standby master. - * StorageInfo is easier to serialize to a Json String than class Storage - */ -public class StorageInfo { - private int clusterID; - private long imageSeq; - private long editsSeq; - - public StorageInfo() { - this(-1, 0, 0); - } - - public StorageInfo(int clusterID, long imageSeq, long editsSeq) { - this.clusterID = clusterID; - this.editsSeq = editsSeq; - this.imageSeq = imageSeq; - } - - public int getClusterID() { - return clusterID; - } - - public void setClusterID(int clusterID) { - this.clusterID = clusterID; - } - - public long getEditsSeq() { - return editsSeq; - } - - public void setEditsSeq(long editsSeq) { - this.editsSeq = editsSeq; - } - - public long getImageSeq() { - return imageSeq; - } - - public void setImageSeq(long imageSeq) { - this.imageSeq = imageSeq; - } -} +package com.baidu.palo.persist; + +/** + * This class is designed for sending storage information from master to standby master. + * StorageInfo is easier to serialize to a Json String than class Storage + */ +public class StorageInfo { + private int clusterID; + private long imageSeq; + private long editsSeq; + + public StorageInfo() { + this(-1, 0, 0); + } + + public StorageInfo(int clusterID, long imageSeq, long editsSeq) { + this.clusterID = clusterID; + this.editsSeq = editsSeq; + this.imageSeq = imageSeq; + } + + public int getClusterID() { + return clusterID; + } + + public void setClusterID(int clusterID) { + this.clusterID = clusterID; + } + + public long getEditsSeq() { + return editsSeq; + } + + public void setEditsSeq(long editsSeq) { + this.editsSeq = editsSeq; + } + + public long getImageSeq() { + return imageSeq; + } + + public void setImageSeq(long imageSeq) { + this.imageSeq = imageSeq; + } +} diff --git a/fe/src/com/baidu/palo/qe/DdlExecutor.java b/fe/src/com/baidu/palo/qe/DdlExecutor.java index dbd1ec4512..941c715e36 100644 --- a/fe/src/com/baidu/palo/qe/DdlExecutor.java +++ b/fe/src/com/baidu/palo/qe/DdlExecutor.java @@ -52,6 +52,7 @@ import com.baidu.palo.analysis.RevokeStmt; import com.baidu.palo.analysis.SetUserPropertyStmt; import com.baidu.palo.analysis.SyncStmt; import com.baidu.palo.catalog.Catalog; +import com.baidu.palo.common.Config; import com.baidu.palo.common.DdlException; import com.baidu.palo.load.LoadJob.EtlJobType; @@ -89,6 +90,10 @@ public class DdlExecutor { if (loadStmt.getBrokerDesc() != null) { jobType = EtlJobType.BROKER; } else { + if (Config.disable_hadoop_load) { + throw new DdlException("Load job by hadoop cluster is disabled." + + " Try use broker load. See 'help broker load;'"); + } jobType = EtlJobType.HADOOP; } catalog.getLoadInstance().addLoadJob(loadStmt, jobType, System.currentTimeMillis()); diff --git a/fe/src/com/baidu/palo/qe/JournalObservable.java b/fe/src/com/baidu/palo/qe/JournalObservable.java index cbd256652d..ba5958f106 100644 --- a/fe/src/com/baidu/palo/qe/JournalObservable.java +++ b/fe/src/com/baidu/palo/qe/JournalObservable.java @@ -13,98 +13,98 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.qe; - -import java.util.concurrent.TimeUnit; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - -import com.baidu.palo.catalog.Catalog; -import com.baidu.palo.common.DdlException; - -import com.google.common.collect.Multiset; -import com.google.common.collect.TreeMultiset; - -public class JournalObservable { - private static final Logger LOG = LogManager.getLogger(JournalObservable.class); - private Multiset obs; - - public JournalObservable() { - obs = TreeMultiset.create(); - } - - private synchronized void addObserver(JournalObserver o) { - if (o == null) { - throw new NullPointerException(); - } - - obs.add(o); - LOG.debug("JournalObservable addObserver=[" + o + "] the size is " + obs.size()); - } - - private synchronized void deleteObserver(JournalObserver o) { - obs.remove(o); - LOG.debug("JournalObservable deleteObserver=[" + o + "] the size is " + obs.size()); - } - - public void waitOn(Long journalVersion, int timeoutMs) throws DdlException { - long replayedJournalId = Catalog.getInstance().getReplayedJournalId(); - if (replayedJournalId >= journalVersion || timeoutMs <= 0) { - LOG.debug("follower no need to sync getReplayedJournalId={}, journalVersion={}, timeoutMs={}", - replayedJournalId, journalVersion, timeoutMs); - return; - } else { - LOG.info("waiting observer to replay journal from {} to {}", replayedJournalId, journalVersion); - JournalObserver observer = new JournalObserver(journalVersion); - addObserver(observer); - try { - boolean ok = observer.getLatch().await(timeoutMs, TimeUnit.MILLISECONDS); - if (!ok) { - throw new DdlException("Execute timeout, the command may be succeed, you'd better retry"); - } - } catch (InterruptedException e) { - throw new DdlException("Interrupted exception happens, " - + "the command may be succeed, you'd better retry"); - } finally { - deleteObserver(observer); - } - } - } - - // return min pos which is bigger than value - public static int upperBound(Object[] array, int size, Long value) { - int left = 0; - int right = size - 1; - while (left < right) { - int middle = left + ((right - left) >> 1); - if (value >= ((JournalObserver) array[middle]).getTargetJournalVersion()) { - left = middle + 1; - } else { - right = middle - 1; - } - } - if (right == -1) { - return 0; - } - Long rightValue = ((JournalObserver) array[right]).getTargetJournalVersion(); - return value >= rightValue ? right + 1 : right; - } - - public void notifyObservers(Long journalId) { - Object[] arrLocal; - int size; - synchronized (this) { - size = obs.size(); - arrLocal = obs.toArray(); - } - - int pos = upperBound(arrLocal, size, journalId); - LOG.debug("notify observers: journal: {}, pos: {}, size: {}, obs: {}", journalId, pos, size, obs); - - for (int i = 0; i < pos; i ++) { - JournalObserver observer = ((JournalObserver) arrLocal[i]); - observer.update(); - } - } -} +package com.baidu.palo.qe; + +import java.util.concurrent.TimeUnit; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import com.baidu.palo.catalog.Catalog; +import com.baidu.palo.common.DdlException; + +import com.google.common.collect.Multiset; +import com.google.common.collect.TreeMultiset; + +public class JournalObservable { + private static final Logger LOG = LogManager.getLogger(JournalObservable.class); + private Multiset obs; + + public JournalObservable() { + obs = TreeMultiset.create(); + } + + private synchronized void addObserver(JournalObserver o) { + if (o == null) { + throw new NullPointerException(); + } + + obs.add(o); + LOG.debug("JournalObservable addObserver=[" + o + "] the size is " + obs.size()); + } + + private synchronized void deleteObserver(JournalObserver o) { + obs.remove(o); + LOG.debug("JournalObservable deleteObserver=[" + o + "] the size is " + obs.size()); + } + + public void waitOn(Long journalVersion, int timeoutMs) throws DdlException { + long replayedJournalId = Catalog.getInstance().getReplayedJournalId(); + if (replayedJournalId >= journalVersion || timeoutMs <= 0) { + LOG.debug("follower no need to sync getReplayedJournalId={}, journalVersion={}, timeoutMs={}", + replayedJournalId, journalVersion, timeoutMs); + return; + } else { + LOG.info("waiting observer to replay journal from {} to {}", replayedJournalId, journalVersion); + JournalObserver observer = new JournalObserver(journalVersion); + addObserver(observer); + try { + boolean ok = observer.getLatch().await(timeoutMs, TimeUnit.MILLISECONDS); + if (!ok) { + throw new DdlException("Execute timeout, the command may be succeed, you'd better retry"); + } + } catch (InterruptedException e) { + throw new DdlException("Interrupted exception happens, " + + "the command may be succeed, you'd better retry"); + } finally { + deleteObserver(observer); + } + } + } + + // return min pos which is bigger than value + public static int upperBound(Object[] array, int size, Long value) { + int left = 0; + int right = size - 1; + while (left < right) { + int middle = left + ((right - left) >> 1); + if (value >= ((JournalObserver) array[middle]).getTargetJournalVersion()) { + left = middle + 1; + } else { + right = middle - 1; + } + } + if (right == -1) { + return 0; + } + Long rightValue = ((JournalObserver) array[right]).getTargetJournalVersion(); + return value >= rightValue ? right + 1 : right; + } + + public void notifyObservers(Long journalId) { + Object[] arrLocal; + int size; + synchronized (this) { + size = obs.size(); + arrLocal = obs.toArray(); + } + + int pos = upperBound(arrLocal, size, journalId); + LOG.debug("notify observers: journal: {}, pos: {}, size: {}, obs: {}", journalId, pos, size, obs); + + for (int i = 0; i < pos; i ++) { + JournalObserver observer = ((JournalObserver) arrLocal[i]); + observer.update(); + } + } +} diff --git a/fe/src/com/baidu/palo/qe/JournalObserver.java b/fe/src/com/baidu/palo/qe/JournalObserver.java index 62eddac72a..342cadaf4e 100644 --- a/fe/src/com/baidu/palo/qe/JournalObserver.java +++ b/fe/src/com/baidu/palo/qe/JournalObserver.java @@ -13,73 +13,73 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.qe; - -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicLong; - -public class JournalObserver implements Comparable { - private static AtomicLong idGen = new AtomicLong(0L); - - private Long id; - private Long targetJournalVersion; - private CountDownLatch latch; - - public JournalObserver(Long targetJournalVersion) { - this.id = idGen.getAndIncrement(); - this.targetJournalVersion = targetJournalVersion; - this.latch = new CountDownLatch(1); - } - - @Override - public int compareTo(JournalObserver jo) { - if (this.targetJournalVersion < jo.targetJournalVersion) { - return -1; - } else if (this.targetJournalVersion > jo.targetJournalVersion) { - return 1; - } else { - if (this.id < jo.id) { - return -1; - } else if (this.id > jo.id) { - return 1; - } else { - return 0; - } - } - } - - @Override - public boolean equals(Object obj) { - if (this.hashCode() != obj.hashCode()) { - return false; - } - if (!(obj instanceof JournalObserver)) { - return false; - } - - JournalObserver obs = ((JournalObserver) obj); - return this.targetJournalVersion == obs.targetJournalVersion && this.id == obs.id; - } - - @Override - public int hashCode() { - return this.id.hashCode(); - } - - @Override - public String toString() { - return "target: " + targetJournalVersion; - } - - public void update() { - latch.countDown(); - } - - public Long getTargetJournalVersion() { - return targetJournalVersion; - } - - public CountDownLatch getLatch() { - return latch; - } -} +package com.baidu.palo.qe; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicLong; + +public class JournalObserver implements Comparable { + private static AtomicLong idGen = new AtomicLong(0L); + + private Long id; + private Long targetJournalVersion; + private CountDownLatch latch; + + public JournalObserver(Long targetJournalVersion) { + this.id = idGen.getAndIncrement(); + this.targetJournalVersion = targetJournalVersion; + this.latch = new CountDownLatch(1); + } + + @Override + public int compareTo(JournalObserver jo) { + if (this.targetJournalVersion < jo.targetJournalVersion) { + return -1; + } else if (this.targetJournalVersion > jo.targetJournalVersion) { + return 1; + } else { + if (this.id < jo.id) { + return -1; + } else if (this.id > jo.id) { + return 1; + } else { + return 0; + } + } + } + + @Override + public boolean equals(Object obj) { + if (this.hashCode() != obj.hashCode()) { + return false; + } + if (!(obj instanceof JournalObserver)) { + return false; + } + + JournalObserver obs = ((JournalObserver) obj); + return this.targetJournalVersion == obs.targetJournalVersion && this.id == obs.id; + } + + @Override + public int hashCode() { + return this.id.hashCode(); + } + + @Override + public String toString() { + return "target: " + targetJournalVersion; + } + + public void update() { + latch.countDown(); + } + + public Long getTargetJournalVersion() { + return targetJournalVersion; + } + + public CountDownLatch getLatch() { + return latch; + } +} diff --git a/fe/src/com/baidu/palo/qe/MasterOpExecutor.java b/fe/src/com/baidu/palo/qe/MasterOpExecutor.java index 93089102cb..779975997d 100644 --- a/fe/src/com/baidu/palo/qe/MasterOpExecutor.java +++ b/fe/src/com/baidu/palo/qe/MasterOpExecutor.java @@ -13,8 +13,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.qe; - +package com.baidu.palo.qe; + import com.baidu.palo.analysis.RedirectStatus; import com.baidu.palo.common.ClientPool; import com.baidu.palo.thrift.FrontendService; @@ -26,105 +26,105 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.thrift.transport.TTransportException; -import java.nio.ByteBuffer; - -public class MasterOpExecutor { - private static final Logger LOG = LogManager.getLogger(MasterOpExecutor.class); - - private final String originStmt; - private final ConnectContext ctx; - private TMasterOpResult result; - - private int waitTimeoutMs; - // the total time of thrift connectTime add readTime and writeTime - private int thriftTimeoutMs; - - public MasterOpExecutor(String originStmt, ConnectContext ctx, RedirectStatus status) { - this.originStmt = originStmt; - this.ctx = ctx; - if (status.isNeedToWaitJournalSync()) { - this.waitTimeoutMs = ctx.getSessionVariable().getQueryTimeoutS() * 1000; - } else { - this.waitTimeoutMs = 0; - } - this.thriftTimeoutMs = ctx.getSessionVariable().getQueryTimeoutS() * 1000; - } - - public void execute() throws Exception { - forward(); - LOG.info("forwarding to master get result max journal id: {}", result.maxJournalId); - ctx.getCatalog().getJournalObservable().waitOn(result.maxJournalId, waitTimeoutMs); - } - - // Send request to Master - private void forward() throws Exception { - String masterHost = ctx.getCatalog().getMasterIp(); - int masterRpcPort = ctx.getCatalog().getMasterRpcPort(); - TNetworkAddress thriftAddress = new TNetworkAddress(masterHost, masterRpcPort); - - FrontendService.Client client = null; - try { - client = ClientPool.frontendPool.borrowObject(thriftAddress, thriftTimeoutMs); - } catch (Exception e) { - // may throw NullPointerException. add err msg - throw new Exception("Failed to get master client.", e); - } - TMasterOpRequest params = new TMasterOpRequest(); - params.setCluster(ctx.getClusterName()); - params.setSql(originStmt); - params.setUser(ctx.getQualifiedUser()); - params.setDb(ctx.getDatabase()); - params.setResourceInfo(ctx.toResourceCtx()); +import java.nio.ByteBuffer; + +public class MasterOpExecutor { + private static final Logger LOG = LogManager.getLogger(MasterOpExecutor.class); + + private final String originStmt; + private final ConnectContext ctx; + private TMasterOpResult result; + + private int waitTimeoutMs; + // the total time of thrift connectTime add readTime and writeTime + private int thriftTimeoutMs; + + public MasterOpExecutor(String originStmt, ConnectContext ctx, RedirectStatus status) { + this.originStmt = originStmt; + this.ctx = ctx; + if (status.isNeedToWaitJournalSync()) { + this.waitTimeoutMs = ctx.getSessionVariable().getQueryTimeoutS() * 1000; + } else { + this.waitTimeoutMs = 0; + } + this.thriftTimeoutMs = ctx.getSessionVariable().getQueryTimeoutS() * 1000; + } + + public void execute() throws Exception { + forward(); + LOG.info("forwarding to master get result max journal id: {}", result.maxJournalId); + ctx.getCatalog().getJournalObservable().waitOn(result.maxJournalId, waitTimeoutMs); + } + + // Send request to Master + private void forward() throws Exception { + String masterHost = ctx.getCatalog().getMasterIp(); + int masterRpcPort = ctx.getCatalog().getMasterRpcPort(); + TNetworkAddress thriftAddress = new TNetworkAddress(masterHost, masterRpcPort); + + FrontendService.Client client = null; + try { + client = ClientPool.frontendPool.borrowObject(thriftAddress, thriftTimeoutMs); + } catch (Exception e) { + // may throw NullPointerException. add err msg + throw new Exception("Failed to get master client.", e); + } + TMasterOpRequest params = new TMasterOpRequest(); + params.setCluster(ctx.getClusterName()); + params.setSql(originStmt); + params.setUser(ctx.getQualifiedUser()); + params.setDb(ctx.getDatabase()); + params.setResourceInfo(ctx.toResourceCtx()); params.setExecMemLimit(ctx.getSessionVariable().getMaxExecMemByte()); params.setQueryTimeout(ctx.getSessionVariable().getQueryTimeoutS()); params.setUser_ip(ctx.getRemoteIP()); - - LOG.info("Forward statement {} to Master {}", ctx.getStmtId(), thriftAddress); - - boolean isReturnToPool = false; - try { - result = client.forward(params); - isReturnToPool = true; - } catch (TTransportException e) { - boolean ok = ClientPool.frontendPool.reopen(client, thriftTimeoutMs); - if (!ok) { - throw e; - } - if (e.getType() == TTransportException.TIMED_OUT) { - throw e; - } else { - result = client.forward(params); - isReturnToPool = true; - } - } finally { - if (isReturnToPool) { - ClientPool.frontendPool.returnObject(thriftAddress, client); - } else { - ClientPool.frontendPool.invalidateObject(thriftAddress, client); - } - } - } - - public ByteBuffer getOutputPacket() { - if (result == null) { - return null; - } - return result.packet; - } - - public ShowResultSet getProxyResultSet() { - if (result == null) { - return null; - } - if (result.isSetResultSet()) { - return new ShowResultSet(result.resultSet); - } else { - return null; - } - } - - public void setResult(TMasterOpResult result) { - this.result = result; - } -} - + + LOG.info("Forward statement {} to Master {}", ctx.getStmtId(), thriftAddress); + + boolean isReturnToPool = false; + try { + result = client.forward(params); + isReturnToPool = true; + } catch (TTransportException e) { + boolean ok = ClientPool.frontendPool.reopen(client, thriftTimeoutMs); + if (!ok) { + throw e; + } + if (e.getType() == TTransportException.TIMED_OUT) { + throw e; + } else { + result = client.forward(params); + isReturnToPool = true; + } + } finally { + if (isReturnToPool) { + ClientPool.frontendPool.returnObject(thriftAddress, client); + } else { + ClientPool.frontendPool.invalidateObject(thriftAddress, client); + } + } + } + + public ByteBuffer getOutputPacket() { + if (result == null) { + return null; + } + return result.packet; + } + + public ShowResultSet getProxyResultSet() { + if (result == null) { + return null; + } + if (result.isSetResultSet()) { + return new ShowResultSet(result.resultSet); + } else { + return null; + } + } + + public void setResult(TMasterOpResult result) { + this.result = result; + } +} + diff --git a/fe/src/com/baidu/palo/qe/SimpleScheduler.java b/fe/src/com/baidu/palo/qe/SimpleScheduler.java index 33b758ad75..77c077d1b8 100644 --- a/fe/src/com/baidu/palo/qe/SimpleScheduler.java +++ b/fe/src/com/baidu/palo/qe/SimpleScheduler.java @@ -1,204 +1,204 @@ -// Copyright (c) 2017, Baidu.com, Inc. All Rights Reserved - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package com.baidu.palo.qe; - -import com.baidu.palo.catalog.Catalog; -import com.baidu.palo.common.FeConstants; -import com.baidu.palo.common.Reference; -import com.baidu.palo.system.Backend; -import com.baidu.palo.system.SystemInfoService; -import com.baidu.palo.thrift.TNetworkAddress; -import com.baidu.palo.thrift.TScanRangeLocation; - -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; - -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; - -public class SimpleScheduler { - private static AtomicLong nextId = new AtomicLong(0); - private static final Logger LOG = LogManager.getLogger(SimpleScheduler.class); - - private static Map blacklistBackends = Maps.newHashMap(); - private static Lock lock = new ReentrantLock(); - private static UpdateBlacklistThread updateBlacklistThread; - - static { - updateBlacklistThread = new UpdateBlacklistThread(); - updateBlacklistThread.start(); - } - - public static TNetworkAddress getHost(long backendId, - List locations, - ImmutableMap backends, - Reference backendIdRef) { - if (locations == null || backends == null) { - return null; - } - LOG.debug("getHost backendID={}, backendSize={}", backendId, backends.size()); - Backend backend = backends.get(backendId); - lock.lock(); - try { - if (backend != null && backend.isAlive() && !blacklistBackends.containsKey(backendId)) { - backendIdRef.setRef(backendId); - return new TNetworkAddress(backend.getHost(), backend.getBePort()); - } else { - for (TScanRangeLocation location : locations) { - if (location.backend_id == backendId) { - continue; - } - // choose the first alive backend(in analysis stage, the locations are random) - Backend candidateBackend = backends.get(location.backend_id); - if (candidateBackend != null && candidateBackend.isAlive() - && !blacklistBackends.containsKey(location.backend_id)) { - backendIdRef.setRef(location.backend_id); - return new TNetworkAddress(candidateBackend.getHost(), candidateBackend.getBePort()); - } - } - } - } finally { - lock.unlock(); - } - // no backend returned - return null; - } - - public static TNetworkAddress getHost(ImmutableMap backends, - Reference backendIdRef) { - if (backends == null) { - return null; - } - int backendSize = backends.size(); - if (backendSize == 0) { - return null; - } - long id = nextId.getAndIncrement() % backendSize; - - List idToBackendId = Lists.newArrayList(); - idToBackendId.addAll(backends.keySet()); - Long backendId = idToBackendId.get((int) id); - Backend backend = backends.get(backendId); - - if (backend != null && backend.isAlive() && !blacklistBackends.containsKey(backendId)) { - backendIdRef.setRef(backendId); - return new TNetworkAddress(backend.getHost(), backend.getBePort()); - } else { - long candidateId = id + 1; // get next candidate id - for (int i = 0; i < backendSize; i ++, candidateId ++) { - LOG.debug("i={} candidatedId={}", i, candidateId); - if (candidateId >= backendSize) { - candidateId = 0; - } - if (candidateId == id) { - continue; - } - Long candidatebackendId = idToBackendId.get((int) candidateId); - LOG.debug("candidatebackendId={}", candidatebackendId); - Backend candidateBackend = backends.get(candidatebackendId); - if (candidateBackend != null && candidateBackend.isAlive() - && !blacklistBackends.containsKey(candidatebackendId)) { - backendIdRef.setRef(candidatebackendId); - return new TNetworkAddress(candidateBackend.getHost(), candidateBackend.getBePort()); - } - } - } - // no backend returned - return null; - } - - public static void updateBlacklistBackends(Long backendID) { - if (backendID == null) { - return; - } - lock.lock(); - try { - int tryTime = FeConstants.heartbeat_interval_second + 1; - blacklistBackends.put(backendID, tryTime); - LOG.warn("add black list " + backendID); - } finally { - lock.unlock(); - } - } - - private static class UpdateBlacklistThread implements Runnable { - private static final Logger LOG = LogManager.getLogger(UpdateBlacklistThread.class); - private static Thread thread; - - public UpdateBlacklistThread() { - thread = new Thread(this, "UpdateBlacklistThread"); - thread.setDaemon(true); - } - - public void start() { - thread.start(); - } - - @Override - public void run() { - LOG.debug("UpdateBlacklistThread is start to run"); - while (true) { - try { - Thread.sleep(1000L); - SystemInfoService clusterInfoService = Catalog.getCurrentSystemInfo(); - LOG.debug("UpdateBlacklistThread retry begin"); - lock.lock(); - try { - Iterator> iterator = blacklistBackends.entrySet().iterator(); - while (iterator.hasNext()) { - Map.Entry entry = iterator.next(); - Long backendId = entry.getKey(); - - // remove from blacklist if - // 1. backend does not exist antmore - // 2. backend is alive - if (clusterInfoService.getBackend(backendId) == null - || clusterInfoService.checkBackendAvailable(backendId)) { - iterator.remove(); - LOG.debug("remove backendID {} which is alive", backendId); - } else { - // 3. max try time is reach - Integer retryTimes = entry.getValue(); - retryTimes = retryTimes - 1; - if (retryTimes <= 0) { - iterator.remove(); - LOG.warn("remove backendID {}. reach max try time", backendId); - } else { - entry.setValue(retryTimes); - LOG.debug("blacklistBackends backendID={} retryTimes={}", backendId, retryTimes); - } - } - } - } finally { - lock.unlock(); - LOG.debug("UpdateBlacklistThread retry end"); - } - - } catch (Throwable ex) { - LOG.warn("blacklist thread exception" + ex); - } - } - } - } -} +// Copyright (c) 2017, Baidu.com, Inc. All Rights Reserved + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.baidu.palo.qe; + +import com.baidu.palo.catalog.Catalog; +import com.baidu.palo.common.FeConstants; +import com.baidu.palo.common.Reference; +import com.baidu.palo.system.Backend; +import com.baidu.palo.system.SystemInfoService; +import com.baidu.palo.thrift.TNetworkAddress; +import com.baidu.palo.thrift.TScanRangeLocation; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + +public class SimpleScheduler { + private static AtomicLong nextId = new AtomicLong(0); + private static final Logger LOG = LogManager.getLogger(SimpleScheduler.class); + + private static Map blacklistBackends = Maps.newHashMap(); + private static Lock lock = new ReentrantLock(); + private static UpdateBlacklistThread updateBlacklistThread; + + static { + updateBlacklistThread = new UpdateBlacklistThread(); + updateBlacklistThread.start(); + } + + public static TNetworkAddress getHost(long backendId, + List locations, + ImmutableMap backends, + Reference backendIdRef) { + if (locations == null || backends == null) { + return null; + } + LOG.debug("getHost backendID={}, backendSize={}", backendId, backends.size()); + Backend backend = backends.get(backendId); + lock.lock(); + try { + if (backend != null && backend.isAlive() && !blacklistBackends.containsKey(backendId)) { + backendIdRef.setRef(backendId); + return new TNetworkAddress(backend.getHost(), backend.getBePort()); + } else { + for (TScanRangeLocation location : locations) { + if (location.backend_id == backendId) { + continue; + } + // choose the first alive backend(in analysis stage, the locations are random) + Backend candidateBackend = backends.get(location.backend_id); + if (candidateBackend != null && candidateBackend.isAlive() + && !blacklistBackends.containsKey(location.backend_id)) { + backendIdRef.setRef(location.backend_id); + return new TNetworkAddress(candidateBackend.getHost(), candidateBackend.getBePort()); + } + } + } + } finally { + lock.unlock(); + } + // no backend returned + return null; + } + + public static TNetworkAddress getHost(ImmutableMap backends, + Reference backendIdRef) { + if (backends == null) { + return null; + } + int backendSize = backends.size(); + if (backendSize == 0) { + return null; + } + long id = nextId.getAndIncrement() % backendSize; + + List idToBackendId = Lists.newArrayList(); + idToBackendId.addAll(backends.keySet()); + Long backendId = idToBackendId.get((int) id); + Backend backend = backends.get(backendId); + + if (backend != null && backend.isAlive() && !blacklistBackends.containsKey(backendId)) { + backendIdRef.setRef(backendId); + return new TNetworkAddress(backend.getHost(), backend.getBePort()); + } else { + long candidateId = id + 1; // get next candidate id + for (int i = 0; i < backendSize; i ++, candidateId ++) { + LOG.debug("i={} candidatedId={}", i, candidateId); + if (candidateId >= backendSize) { + candidateId = 0; + } + if (candidateId == id) { + continue; + } + Long candidatebackendId = idToBackendId.get((int) candidateId); + LOG.debug("candidatebackendId={}", candidatebackendId); + Backend candidateBackend = backends.get(candidatebackendId); + if (candidateBackend != null && candidateBackend.isAlive() + && !blacklistBackends.containsKey(candidatebackendId)) { + backendIdRef.setRef(candidatebackendId); + return new TNetworkAddress(candidateBackend.getHost(), candidateBackend.getBePort()); + } + } + } + // no backend returned + return null; + } + + public static void updateBlacklistBackends(Long backendID) { + if (backendID == null) { + return; + } + lock.lock(); + try { + int tryTime = FeConstants.heartbeat_interval_second + 1; + blacklistBackends.put(backendID, tryTime); + LOG.warn("add black list " + backendID); + } finally { + lock.unlock(); + } + } + + private static class UpdateBlacklistThread implements Runnable { + private static final Logger LOG = LogManager.getLogger(UpdateBlacklistThread.class); + private static Thread thread; + + public UpdateBlacklistThread() { + thread = new Thread(this, "UpdateBlacklistThread"); + thread.setDaemon(true); + } + + public void start() { + thread.start(); + } + + @Override + public void run() { + LOG.debug("UpdateBlacklistThread is start to run"); + while (true) { + try { + Thread.sleep(1000L); + SystemInfoService clusterInfoService = Catalog.getCurrentSystemInfo(); + LOG.debug("UpdateBlacklistThread retry begin"); + lock.lock(); + try { + Iterator> iterator = blacklistBackends.entrySet().iterator(); + while (iterator.hasNext()) { + Map.Entry entry = iterator.next(); + Long backendId = entry.getKey(); + + // remove from blacklist if + // 1. backend does not exist antmore + // 2. backend is alive + if (clusterInfoService.getBackend(backendId) == null + || clusterInfoService.checkBackendAvailable(backendId)) { + iterator.remove(); + LOG.debug("remove backendID {} which is alive", backendId); + } else { + // 3. max try time is reach + Integer retryTimes = entry.getValue(); + retryTimes = retryTimes - 1; + if (retryTimes <= 0) { + iterator.remove(); + LOG.warn("remove backendID {}. reach max try time", backendId); + } else { + entry.setValue(retryTimes); + LOG.debug("blacklistBackends backendID={} retryTimes={}", backendId, retryTimes); + } + } + } + } finally { + lock.unlock(); + LOG.debug("UpdateBlacklistThread retry end"); + } + + } catch (Throwable ex) { + LOG.warn("blacklist thread exception" + ex); + } + } + } + } +} diff --git a/fe/src/com/baidu/palo/service/FrontendServiceImpl.java b/fe/src/com/baidu/palo/service/FrontendServiceImpl.java index c6a9f0ce78..c06cb2c91e 100644 --- a/fe/src/com/baidu/palo/service/FrontendServiceImpl.java +++ b/fe/src/com/baidu/palo/service/FrontendServiceImpl.java @@ -23,6 +23,7 @@ import com.baidu.palo.catalog.Table; import com.baidu.palo.cluster.ClusterNamespace; import com.baidu.palo.common.AnalysisException; import com.baidu.palo.common.AuditLog; +import com.baidu.palo.common.AuthorizationException; import com.baidu.palo.common.CaseSensibility; import com.baidu.palo.common.Config; import com.baidu.palo.common.DdlException; @@ -458,6 +459,7 @@ public class FrontendServiceImpl implements FrontendService.Iface { public TFeResult loadCheck(TLoadCheckRequest request) throws TException { LOG.info("Load check request is {}", request); + TStatus status = new TStatus(TStatusCode.OK); TFeResult result = new TFeResult(FrontendServiceVersion.V1, status); String cluster; @@ -469,8 +471,14 @@ public class FrontendServiceImpl implements FrontendService.Iface { final String dbFullName = ClusterNamespace.getFullName(cluster, request.db); - request.setUser(request.user); - request.setDb(dbFullName); + try { + checkPasswordAndPrivs(cluster, request.user, request.passwd, request.db, request.tbl, request.user_ip, + PrivPredicate.LOAD); + } catch (AuthorizationException e) { + status.setStatus_code(TStatusCode.ANALYSIS_ERROR); + status.setError_msgs(Lists.newArrayList(e.getMessage())); + return result; + } if (request.isSetLabel()) { // Only single table will be set label @@ -491,4 +499,25 @@ public class FrontendServiceImpl implements FrontendService.Iface { return result; } + + private void checkPasswordAndPrivs(String cluster, String user, String passwd, String db, String tbl, + String clientIp, PrivPredicate predicate) throws AuthorizationException { + + final String fullUserName = ClusterNamespace.getFullName(cluster, user); + final String fullDbName = ClusterNamespace.getFullName(cluster, db); + + if (!Catalog.getCurrentCatalog().getAuth().checkPlainPassword(fullUserName, + clientIp, + passwd)) { + throw new AuthorizationException("Access denied for " + + fullUserName + "@" + clientIp); + } + + if (!Catalog.getCurrentCatalog().getAuth().checkTblPriv(clientIp, fullDbName, + fullUserName, tbl, predicate)) { + throw new AuthorizationException( + "Access denied; you need (at least one of) the LOAD privilege(s) for this operation"); + } + } } + diff --git a/fe/src/com/baidu/palo/system/BackendEvent.java b/fe/src/com/baidu/palo/system/BackendEvent.java index a8f981fb58..e92246e880 100644 --- a/fe/src/com/baidu/palo/system/BackendEvent.java +++ b/fe/src/com/baidu/palo/system/BackendEvent.java @@ -13,46 +13,46 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.system; - -import org.apache.commons.lang.StringUtils; - -public class BackendEvent { - - public enum BackendEventType { - BACKEND_DOWN, - BACKEND_DROPPED, - BACKEND_DECOMMISSION - } - - private final BackendEventType type; - private final String message; - private final Long[] backendIds; - - public BackendEvent(BackendEventType type, String message, Long...backendIds) { - this.type = type; - this.message = message; - this.backendIds = backendIds; - } - - public BackendEventType getType() { - return type; - } - - public String getMessage() { - return message; - } - - public Long[] getBackendIds() { - return backendIds; - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("Backend[").append(StringUtils.join(backendIds, ", ")).append("]"); - sb.append(" Type: ").append(type.name()); - sb.append(" Msg: ").append(message); - return sb.toString(); - } -} +package com.baidu.palo.system; + +import org.apache.commons.lang.StringUtils; + +public class BackendEvent { + + public enum BackendEventType { + BACKEND_DOWN, + BACKEND_DROPPED, + BACKEND_DECOMMISSION + } + + private final BackendEventType type; + private final String message; + private final Long[] backendIds; + + public BackendEvent(BackendEventType type, String message, Long...backendIds) { + this.type = type; + this.message = message; + this.backendIds = backendIds; + } + + public BackendEventType getType() { + return type; + } + + public String getMessage() { + return message; + } + + public Long[] getBackendIds() { + return backendIds; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("Backend[").append(StringUtils.join(backendIds, ", ")).append("]"); + sb.append(" Type: ").append(type.name()); + sb.append(" Msg: ").append(message); + return sb.toString(); + } +} diff --git a/fe/src/com/baidu/palo/system/SystemInfoObserver.java b/fe/src/com/baidu/palo/system/SystemInfoObserver.java index 57a466a767..a1fd5570ab 100644 --- a/fe/src/com/baidu/palo/system/SystemInfoObserver.java +++ b/fe/src/com/baidu/palo/system/SystemInfoObserver.java @@ -13,35 +13,35 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.system; - -import com.google.common.eventbus.DeadEvent; -import com.google.common.eventbus.Subscribe; - -public abstract class SystemInfoObserver { - - private String name; - - public SystemInfoObserver() { - } - - public SystemInfoObserver(String name) { - this.name = name; - } - - public void setName(String name) { - this.name = name; - } - - public String getName() { - return name; - } - - @Subscribe - public void listen(DeadEvent deadEvent) { - // do nothing - } - - @Subscribe - public abstract void listen(BackendEvent backendEvent); -} +package com.baidu.palo.system; + +import com.google.common.eventbus.DeadEvent; +import com.google.common.eventbus.Subscribe; + +public abstract class SystemInfoObserver { + + private String name; + + public SystemInfoObserver() { + } + + public SystemInfoObserver(String name) { + this.name = name; + } + + public void setName(String name) { + this.name = name; + } + + public String getName() { + return name; + } + + @Subscribe + public void listen(DeadEvent deadEvent) { + // do nothing + } + + @Subscribe + public abstract void listen(BackendEvent backendEvent); +} diff --git a/fe/src/com/baidu/palo/task/AgentBatchTask.java b/fe/src/com/baidu/palo/task/AgentBatchTask.java index 0514feab02..de005ef219 100644 --- a/fe/src/com/baidu/palo/task/AgentBatchTask.java +++ b/fe/src/com/baidu/palo/task/AgentBatchTask.java @@ -13,8 +13,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.task; - +package com.baidu.palo.task; + import com.baidu.palo.catalog.Catalog; import com.baidu.palo.common.ClientPool; import com.baidu.palo.system.Backend; @@ -44,203 +44,203 @@ import org.apache.logging.log4j.Logger; import java.util.HashMap; import java.util.LinkedList; import java.util.List; -import java.util.Map; - -/* - * This class group tasks by backend - */ -public class AgentBatchTask implements Runnable { - private static final Logger LOG = LogManager.getLogger(AgentBatchTask.class); - - // backendId -> AgentTask List - private Map> backendIdToTasks; - - public AgentBatchTask() { - this.backendIdToTasks = new HashMap>(); - } - - public AgentBatchTask(AgentTask singleTask) { - this(); - addTask(singleTask); - } - - public void addTask(AgentTask agentTask) { - if (agentTask == null) { - return; - } - long backendId = agentTask.getBackendId(); - if (backendIdToTasks.containsKey(backendId)) { - List tasks = backendIdToTasks.get(backendId); - tasks.add(agentTask); - } else { - List tasks = new LinkedList(); - tasks.add(agentTask); - backendIdToTasks.put(backendId, tasks); - } - } - - public List getAllTasks() { - List tasks = new LinkedList(); - for (Long backendId : this.backendIdToTasks.keySet()) { - tasks.addAll(this.backendIdToTasks.get(backendId)); - } - return tasks; - } - - public int getTaskNum() { - int num = 0; - for (List tasks : backendIdToTasks.values()) { - num += tasks.size(); - } - return num; - } - - @Override - public void run() { - for (Long backendId : this.backendIdToTasks.keySet()) { - BackendService.Client client = null; - TNetworkAddress address = null; - boolean ok = false; - try { - Backend backend = Catalog.getCurrentSystemInfo().getBackend(backendId); - if (backend == null || !backend.isAlive()) { - continue; - } - List tasks = this.backendIdToTasks.get(backendId); - // create AgentClient - address = new TNetworkAddress(backend.getHost(), backend.getBePort()); - client = ClientPool.backendPool.borrowObject(address); - - List agentTaskRequests = new LinkedList(); - for (AgentTask task : tasks) { - agentTaskRequests.add(toAgentTaskRequest(task)); - } - client.submit_tasks(agentTaskRequests); - - if (LOG.isDebugEnabled()) { - for (AgentTask task : tasks) { - LOG.debug("send task: type[{}], backend[{}], signature[{}]", - task.getTaskType(), backendId, task.getSignature()); - } - } - - ok = true; - } catch (Exception e) { - LOG.warn("task exec error. backend[{}]", backendId, e); - } finally { - if (ok) { - ClientPool.backendPool.returnObject(address, client); - } else { - ClientPool.backendPool.invalidateObject(address, client); - } - } - } // end for backend - } - - private TAgentTaskRequest toAgentTaskRequest(AgentTask task) { - TAgentTaskRequest tAgentTaskRequest = new TAgentTaskRequest(); - tAgentTaskRequest.setProtocol_version(TAgentServiceVersion.V1); - tAgentTaskRequest.setSignature(task.getSignature()); - - TTaskType taskType = task.getTaskType(); - tAgentTaskRequest.setTask_type(taskType); - switch (taskType) { - case CREATE: { - CreateReplicaTask createReplicaTask = (CreateReplicaTask) task; - TCreateTabletReq request = createReplicaTask.toThrift(); - LOG.debug(request.toString()); - tAgentTaskRequest.setCreate_tablet_req(request); - return tAgentTaskRequest; - } - case DROP: { - DropReplicaTask dropReplicaTask = (DropReplicaTask) task; - TDropTabletReq request = dropReplicaTask.toThrift(); - LOG.debug(request.toString()); - tAgentTaskRequest.setDrop_tablet_req(request); - return tAgentTaskRequest; - } - case PUSH: { - PushTask pushTask = (PushTask) task; - TPushReq request = pushTask.toThrift(); - LOG.debug(request.toString()); - tAgentTaskRequest.setPush_req(request); - if (pushTask.getPushType() == TPushType.LOAD || pushTask.getPushType() == TPushType.LOAD_DELETE) { - tAgentTaskRequest.setResource_info(pushTask.getResourceInfo()); - } - tAgentTaskRequest.setPriority(pushTask.getPriority()); - return tAgentTaskRequest; - } - case CLONE: { - CloneTask cloneTask = (CloneTask) task; - TCloneReq request = cloneTask.toThrift(); - LOG.debug(request.toString()); - tAgentTaskRequest.setClone_req(request); - return tAgentTaskRequest; - } - case ROLLUP: { - CreateRollupTask rollupTask = (CreateRollupTask) task; - TAlterTabletReq request = rollupTask.toThrift(); - LOG.debug(request.toString()); - tAgentTaskRequest.setAlter_tablet_req(request); - tAgentTaskRequest.setResource_info(rollupTask.getResourceInfo()); - return tAgentTaskRequest; - } - case SCHEMA_CHANGE: { - SchemaChangeTask schemaChangeTask = (SchemaChangeTask) task; - TAlterTabletReq request = schemaChangeTask.toThrift(); - LOG.debug(request.toString()); - tAgentTaskRequest.setAlter_tablet_req(request); - tAgentTaskRequest.setResource_info(schemaChangeTask.getResourceInfo()); - return tAgentTaskRequest; - } - case CANCEL_DELETE: { - CancelDeleteTask cancelDeleteTask = (CancelDeleteTask) task; - TCancelDeleteDataReq request = cancelDeleteTask.toThrift(); - LOG.debug(request.toString()); - tAgentTaskRequest.setCancel_delete_data_req(request); - return tAgentTaskRequest; - } - case STORAGE_MEDIUM_MIGRATE: { - StorageMediaMigrationTask migrationTask = (StorageMediaMigrationTask) task; - TStorageMediumMigrateReq request = migrationTask.toThrift(); - LOG.debug(request.toString()); - tAgentTaskRequest.setStorage_medium_migrate_req(request); - return tAgentTaskRequest; - } - case CHECK_CONSISTENCY: { - CheckConsistencyTask checkConsistencyTask = (CheckConsistencyTask) task; - TCheckConsistencyReq request = checkConsistencyTask.toThrift(); - LOG.debug(request.toString()); - tAgentTaskRequest.setCheck_consistency_req(request); - return tAgentTaskRequest; - } - case MAKE_SNAPSHOT: { - SnapshotTask snapshotTask = (SnapshotTask) task; - TSnapshotRequest request = snapshotTask.toThrift(); - LOG.debug(request.toString()); - tAgentTaskRequest.setSnapshot_req(request); - return tAgentTaskRequest; - } - case RELEASE_SNAPSHOT: { - ReleaseSnapshotTask releaseSnapshotTask = (ReleaseSnapshotTask) task; - TReleaseSnapshotRequest request = releaseSnapshotTask.toThrift(); - LOG.debug(request.toString()); - tAgentTaskRequest.setRelease_snapshot_req(request); - return tAgentTaskRequest; - } - case UPLOAD: { - UploadTask uploadTask = (UploadTask) task; - TUploadReq request = uploadTask.toThrift(); - LOG.debug(request.toString()); - tAgentTaskRequest.setUpload_req(request); - return tAgentTaskRequest; - } +import java.util.Map; + +/* + * This class group tasks by backend + */ +public class AgentBatchTask implements Runnable { + private static final Logger LOG = LogManager.getLogger(AgentBatchTask.class); + + // backendId -> AgentTask List + private Map> backendIdToTasks; + + public AgentBatchTask() { + this.backendIdToTasks = new HashMap>(); + } + + public AgentBatchTask(AgentTask singleTask) { + this(); + addTask(singleTask); + } + + public void addTask(AgentTask agentTask) { + if (agentTask == null) { + return; + } + long backendId = agentTask.getBackendId(); + if (backendIdToTasks.containsKey(backendId)) { + List tasks = backendIdToTasks.get(backendId); + tasks.add(agentTask); + } else { + List tasks = new LinkedList(); + tasks.add(agentTask); + backendIdToTasks.put(backendId, tasks); + } + } + + public List getAllTasks() { + List tasks = new LinkedList(); + for (Long backendId : this.backendIdToTasks.keySet()) { + tasks.addAll(this.backendIdToTasks.get(backendId)); + } + return tasks; + } + + public int getTaskNum() { + int num = 0; + for (List tasks : backendIdToTasks.values()) { + num += tasks.size(); + } + return num; + } + + @Override + public void run() { + for (Long backendId : this.backendIdToTasks.keySet()) { + BackendService.Client client = null; + TNetworkAddress address = null; + boolean ok = false; + try { + Backend backend = Catalog.getCurrentSystemInfo().getBackend(backendId); + if (backend == null || !backend.isAlive()) { + continue; + } + List tasks = this.backendIdToTasks.get(backendId); + // create AgentClient + address = new TNetworkAddress(backend.getHost(), backend.getBePort()); + client = ClientPool.backendPool.borrowObject(address); + + List agentTaskRequests = new LinkedList(); + for (AgentTask task : tasks) { + agentTaskRequests.add(toAgentTaskRequest(task)); + } + client.submit_tasks(agentTaskRequests); + + if (LOG.isDebugEnabled()) { + for (AgentTask task : tasks) { + LOG.debug("send task: type[{}], backend[{}], signature[{}]", + task.getTaskType(), backendId, task.getSignature()); + } + } + + ok = true; + } catch (Exception e) { + LOG.warn("task exec error. backend[{}]", backendId, e); + } finally { + if (ok) { + ClientPool.backendPool.returnObject(address, client); + } else { + ClientPool.backendPool.invalidateObject(address, client); + } + } + } // end for backend + } + + private TAgentTaskRequest toAgentTaskRequest(AgentTask task) { + TAgentTaskRequest tAgentTaskRequest = new TAgentTaskRequest(); + tAgentTaskRequest.setProtocol_version(TAgentServiceVersion.V1); + tAgentTaskRequest.setSignature(task.getSignature()); + + TTaskType taskType = task.getTaskType(); + tAgentTaskRequest.setTask_type(taskType); + switch (taskType) { + case CREATE: { + CreateReplicaTask createReplicaTask = (CreateReplicaTask) task; + TCreateTabletReq request = createReplicaTask.toThrift(); + LOG.debug(request.toString()); + tAgentTaskRequest.setCreate_tablet_req(request); + return tAgentTaskRequest; + } + case DROP: { + DropReplicaTask dropReplicaTask = (DropReplicaTask) task; + TDropTabletReq request = dropReplicaTask.toThrift(); + LOG.debug(request.toString()); + tAgentTaskRequest.setDrop_tablet_req(request); + return tAgentTaskRequest; + } + case PUSH: { + PushTask pushTask = (PushTask) task; + TPushReq request = pushTask.toThrift(); + LOG.debug(request.toString()); + tAgentTaskRequest.setPush_req(request); + if (pushTask.getPushType() == TPushType.LOAD || pushTask.getPushType() == TPushType.LOAD_DELETE) { + tAgentTaskRequest.setResource_info(pushTask.getResourceInfo()); + } + tAgentTaskRequest.setPriority(pushTask.getPriority()); + return tAgentTaskRequest; + } + case CLONE: { + CloneTask cloneTask = (CloneTask) task; + TCloneReq request = cloneTask.toThrift(); + LOG.debug(request.toString()); + tAgentTaskRequest.setClone_req(request); + return tAgentTaskRequest; + } + case ROLLUP: { + CreateRollupTask rollupTask = (CreateRollupTask) task; + TAlterTabletReq request = rollupTask.toThrift(); + LOG.debug(request.toString()); + tAgentTaskRequest.setAlter_tablet_req(request); + tAgentTaskRequest.setResource_info(rollupTask.getResourceInfo()); + return tAgentTaskRequest; + } + case SCHEMA_CHANGE: { + SchemaChangeTask schemaChangeTask = (SchemaChangeTask) task; + TAlterTabletReq request = schemaChangeTask.toThrift(); + LOG.debug(request.toString()); + tAgentTaskRequest.setAlter_tablet_req(request); + tAgentTaskRequest.setResource_info(schemaChangeTask.getResourceInfo()); + return tAgentTaskRequest; + } + case CANCEL_DELETE: { + CancelDeleteTask cancelDeleteTask = (CancelDeleteTask) task; + TCancelDeleteDataReq request = cancelDeleteTask.toThrift(); + LOG.debug(request.toString()); + tAgentTaskRequest.setCancel_delete_data_req(request); + return tAgentTaskRequest; + } + case STORAGE_MEDIUM_MIGRATE: { + StorageMediaMigrationTask migrationTask = (StorageMediaMigrationTask) task; + TStorageMediumMigrateReq request = migrationTask.toThrift(); + LOG.debug(request.toString()); + tAgentTaskRequest.setStorage_medium_migrate_req(request); + return tAgentTaskRequest; + } + case CHECK_CONSISTENCY: { + CheckConsistencyTask checkConsistencyTask = (CheckConsistencyTask) task; + TCheckConsistencyReq request = checkConsistencyTask.toThrift(); + LOG.debug(request.toString()); + tAgentTaskRequest.setCheck_consistency_req(request); + return tAgentTaskRequest; + } + case MAKE_SNAPSHOT: { + SnapshotTask snapshotTask = (SnapshotTask) task; + TSnapshotRequest request = snapshotTask.toThrift(); + LOG.debug(request.toString()); + tAgentTaskRequest.setSnapshot_req(request); + return tAgentTaskRequest; + } + case RELEASE_SNAPSHOT: { + ReleaseSnapshotTask releaseSnapshotTask = (ReleaseSnapshotTask) task; + TReleaseSnapshotRequest request = releaseSnapshotTask.toThrift(); + LOG.debug(request.toString()); + tAgentTaskRequest.setRelease_snapshot_req(request); + return tAgentTaskRequest; + } + case UPLOAD: { + UploadTask uploadTask = (UploadTask) task; + TUploadReq request = uploadTask.toThrift(); + LOG.debug(request.toString()); + tAgentTaskRequest.setUpload_req(request); + return tAgentTaskRequest; + } case DOWNLOAD: { DownloadTask downloadTask = (DownloadTask) task; - TDownloadReq request = downloadTask.toThrift(); - LOG.debug(request.toString()); - tAgentTaskRequest.setDownload_req(request); - return tAgentTaskRequest; + TDownloadReq request = downloadTask.toThrift(); + LOG.debug(request.toString()); + tAgentTaskRequest.setDownload_req(request); + return tAgentTaskRequest; } case MOVE: { DirMoveTask dirMoveTask = (DirMoveTask) task; @@ -248,9 +248,9 @@ public class AgentBatchTask implements Runnable { LOG.debug(request.toString()); tAgentTaskRequest.setMove_dir_req(request); return tAgentTaskRequest; - } - default: - return null; - } - } -} + } + default: + return null; + } + } +} diff --git a/fe/src/com/baidu/palo/task/AgentTask.java b/fe/src/com/baidu/palo/task/AgentTask.java index a3662f40ad..159076c937 100644 --- a/fe/src/com/baidu/palo/task/AgentTask.java +++ b/fe/src/com/baidu/palo/task/AgentTask.java @@ -13,89 +13,89 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.task; - +package com.baidu.palo.task; + import com.baidu.palo.thrift.TResourceInfo; -import com.baidu.palo.thrift.TTaskType; - -public abstract class AgentTask { - private long signature; - private long backendId; - private TTaskType taskType; - - protected long dbId; - protected long tableId; - protected long partitionId; - protected long indexId; - protected long tabletId; - - protected TResourceInfo resourceInfo; - - protected int failedTimes; - - public AgentTask(TResourceInfo resourceInfo, long backendId, long signature, TTaskType taskType, - long dbId, long tableId, long partitionId, long indexId, long tabletId) { - this.backendId = backendId; - this.signature = signature; - this.taskType = taskType; - - this.dbId = dbId; - this.tableId = tableId; - this.partitionId = partitionId; - this.indexId = indexId; - this.tabletId = tabletId; - - this.resourceInfo = resourceInfo; - - this.failedTimes = 0; - } - - public long getSignature() { - return this.signature; - } - - public long getBackendId() { - return this.backendId; - } - - public TTaskType getTaskType() { - return this.taskType; - } - - public long getDbId() { - return dbId; - } - - public long getTableId() { - return tableId; - } - - public long getPartitionId() { - return partitionId; - } - - public long getIndexId() { - return indexId; - } - - public long getTabletId() { - return tabletId; - } - - public TResourceInfo getResourceInfo() { - return resourceInfo; - } - - public void failed() { - ++this.failedTimes; - } - - public int getFailedTimes() { - return this.failedTimes; - } - - @Override - public String toString() { - return "[" + taskType + "], signature: " + signature + ", backendId: " + backendId + ", tablet id: " + tabletId; - } -} +import com.baidu.palo.thrift.TTaskType; + +public abstract class AgentTask { + private long signature; + private long backendId; + private TTaskType taskType; + + protected long dbId; + protected long tableId; + protected long partitionId; + protected long indexId; + protected long tabletId; + + protected TResourceInfo resourceInfo; + + protected int failedTimes; + + public AgentTask(TResourceInfo resourceInfo, long backendId, long signature, TTaskType taskType, + long dbId, long tableId, long partitionId, long indexId, long tabletId) { + this.backendId = backendId; + this.signature = signature; + this.taskType = taskType; + + this.dbId = dbId; + this.tableId = tableId; + this.partitionId = partitionId; + this.indexId = indexId; + this.tabletId = tabletId; + + this.resourceInfo = resourceInfo; + + this.failedTimes = 0; + } + + public long getSignature() { + return this.signature; + } + + public long getBackendId() { + return this.backendId; + } + + public TTaskType getTaskType() { + return this.taskType; + } + + public long getDbId() { + return dbId; + } + + public long getTableId() { + return tableId; + } + + public long getPartitionId() { + return partitionId; + } + + public long getIndexId() { + return indexId; + } + + public long getTabletId() { + return tabletId; + } + + public TResourceInfo getResourceInfo() { + return resourceInfo; + } + + public void failed() { + ++this.failedTimes; + } + + public int getFailedTimes() { + return this.failedTimes; + } + + @Override + public String toString() { + return "[" + taskType + "], signature: " + signature + ", backendId: " + backendId + ", tablet id: " + tabletId; + } +} diff --git a/fe/src/com/baidu/palo/task/AgentTaskExecutor.java b/fe/src/com/baidu/palo/task/AgentTaskExecutor.java index a54cc142b1..9f290527ca 100644 --- a/fe/src/com/baidu/palo/task/AgentTaskExecutor.java +++ b/fe/src/com/baidu/palo/task/AgentTaskExecutor.java @@ -13,19 +13,19 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.task; - -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; - -public class AgentTaskExecutor { - private static final ExecutorService EXECUTOR = Executors.newCachedThreadPool(); - - public AgentTaskExecutor() { - } - - public static void submit(AgentBatchTask task) { - EXECUTOR.submit(task); - } - -} +package com.baidu.palo.task; + +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +public class AgentTaskExecutor { + private static final ExecutorService EXECUTOR = Executors.newCachedThreadPool(); + + public AgentTaskExecutor() { + } + + public static void submit(AgentBatchTask task) { + EXECUTOR.submit(task); + } + +} diff --git a/fe/src/com/baidu/palo/task/AgentTaskQueue.java b/fe/src/com/baidu/palo/task/AgentTaskQueue.java index 253c23db96..96cf2fbd4a 100644 --- a/fe/src/com/baidu/palo/task/AgentTaskQueue.java +++ b/fe/src/com/baidu/palo/task/AgentTaskQueue.java @@ -13,8 +13,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.task; - +package com.baidu.palo.task; + import com.baidu.palo.thrift.TPushType; import com.baidu.palo.thrift.TTaskType; @@ -30,82 +30,82 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; - -/** - * Task queue - */ -public class AgentTaskQueue { - private static final Logger LOG = LogManager.getLogger(AgentTaskQueue.class); - - // backend id -> (task type -> (signature -> agent task)) - private static Table> tasks = HashBasedTable.create(); - private static int taskNum = 0; - - public static synchronized boolean addTask(AgentTask task) { - long backendId = task.getBackendId(); - TTaskType type = task.getTaskType(); - - Map signatureMap = tasks.get(backendId, type); - if (signatureMap == null) { - signatureMap = Maps.newHashMap(); - tasks.put(backendId, type, signatureMap); - } - - long signature = task.getSignature(); - if (signatureMap.containsKey(signature)) { - return false; - } - signatureMap.put(signature, task); - ++taskNum; - LOG.debug("add task: type[{}], backend[{}], signature[{}]", type, backendId, signature); - if (type == TTaskType.PUSH) { - PushTask pushTask = (PushTask) task; - LOG.debug("push task info: version[{}], version hash[{}]", - pushTask.getVersion(), pushTask.getVersionHash()); - } - return true; - } - - public static synchronized void removeTask(long backendId, TTaskType type, long signature) { - if (!tasks.contains(backendId, type)) { - return; - } - - Map signatureMap = tasks.get(backendId, type); - if (!signatureMap.containsKey(signature)) { - return; - } - signatureMap.remove(signature); - LOG.debug("remove task: type[{}], backend[{}], signature[{}]", type, backendId, signature); - --taskNum; - } - - /* - * we cannot define a push task with only 'backendId', 'signature' and 'TTaskType' - * add version, versionHash and TPushType to help - */ - public static synchronized void removePushTask(long backendId, long signature, long version, long versionHash, - TPushType pushType) { - if (!tasks.contains(backendId, TTaskType.PUSH)) { - return; - } - - Map signatureMap = tasks.get(backendId, TTaskType.PUSH); - AgentTask task = signatureMap.get(signature); - if (task == null) { - return; - } - - PushTask pushTask = (PushTask) task; - if (pushTask.getVersion() != version || pushTask.getVersionHash() != versionHash - || pushTask.getPushType() != pushType) { - return; - } - - signatureMap.remove(signature); - LOG.debug("remove task: type[{}], backend[{}], signature[{}]", TTaskType.PUSH, backendId, signature); - --taskNum; +import java.util.Set; + +/** + * Task queue + */ +public class AgentTaskQueue { + private static final Logger LOG = LogManager.getLogger(AgentTaskQueue.class); + + // backend id -> (task type -> (signature -> agent task)) + private static Table> tasks = HashBasedTable.create(); + private static int taskNum = 0; + + public static synchronized boolean addTask(AgentTask task) { + long backendId = task.getBackendId(); + TTaskType type = task.getTaskType(); + + Map signatureMap = tasks.get(backendId, type); + if (signatureMap == null) { + signatureMap = Maps.newHashMap(); + tasks.put(backendId, type, signatureMap); + } + + long signature = task.getSignature(); + if (signatureMap.containsKey(signature)) { + return false; + } + signatureMap.put(signature, task); + ++taskNum; + LOG.debug("add task: type[{}], backend[{}], signature[{}]", type, backendId, signature); + if (type == TTaskType.PUSH) { + PushTask pushTask = (PushTask) task; + LOG.debug("push task info: version[{}], version hash[{}]", + pushTask.getVersion(), pushTask.getVersionHash()); + } + return true; + } + + public static synchronized void removeTask(long backendId, TTaskType type, long signature) { + if (!tasks.contains(backendId, type)) { + return; + } + + Map signatureMap = tasks.get(backendId, type); + if (!signatureMap.containsKey(signature)) { + return; + } + signatureMap.remove(signature); + LOG.debug("remove task: type[{}], backend[{}], signature[{}]", type, backendId, signature); + --taskNum; + } + + /* + * we cannot define a push task with only 'backendId', 'signature' and 'TTaskType' + * add version, versionHash and TPushType to help + */ + public static synchronized void removePushTask(long backendId, long signature, long version, long versionHash, + TPushType pushType) { + if (!tasks.contains(backendId, TTaskType.PUSH)) { + return; + } + + Map signatureMap = tasks.get(backendId, TTaskType.PUSH); + AgentTask task = signatureMap.get(signature); + if (task == null) { + return; + } + + PushTask pushTask = (PushTask) task; + if (pushTask.getVersion() != version || pushTask.getVersionHash() != versionHash + || pushTask.getPushType() != pushType) { + return; + } + + signatureMap.remove(signature); + LOG.debug("remove task: type[{}], backend[{}], signature[{}]", TTaskType.PUSH, backendId, signature); + --taskNum; } public static synchronized void removeTaskOfType(TTaskType type, long signature) { @@ -114,120 +114,120 @@ public class AgentTaskQueue { for (Map innerMap : map.values()) { innerMap.remove(signature); } - } - - public static synchronized AgentTask getTask(long backendId, TTaskType type, long signature) { - if (!tasks.contains(backendId, type)) { - return null; - } - - Map signatureMap = tasks.get(backendId, type); - return signatureMap.get(signature); - } - - public static synchronized List getDiffTasks(long backendId, Map> runningTasks) { - List diffTasks = new ArrayList(); - if (!tasks.containsRow(backendId)) { - return diffTasks; - } - - Map> backendAllTasks = tasks.row(backendId); - for (Map.Entry> entry : backendAllTasks.entrySet()) { - TTaskType taskType = entry.getKey(); - Map tasks = entry.getValue(); - Set excludeSignatures = new HashSet(); - if (runningTasks.containsKey(taskType)) { - excludeSignatures = runningTasks.get(taskType); - } - - for (Map.Entry taskEntry : tasks.entrySet()) { - long signature = taskEntry.getKey(); - AgentTask task = taskEntry.getValue(); - if (!excludeSignatures.contains(signature)) { - diffTasks.add(task); - } - } // end for tasks - } // end for backendAllTasks - - return diffTasks; - } - - public static synchronized void removeReplicaRelatedTasks(long backendId, long signature) { - if (!tasks.containsRow(backendId)) { - return; - } - - Map> backendTasks = tasks.row(backendId); - for (TTaskType type : TTaskType.values()) { - if (backendTasks.containsKey(type)) { - Map typeTasks = backendTasks.get(type); - if (typeTasks.containsKey(signature)) { - typeTasks.remove(signature); - LOG.debug("remove task: type[{}], backend[{}], signature[{}]", type, backendId, signature); - --taskNum; - } - } - } // end for types - } - - // only for test now - public static synchronized void clearAllTasks() { - tasks.clear(); - taskNum = 0; - } - - public static synchronized int getTaskNum() { - return taskNum; - } - - public static synchronized int getTaskNum(long backendId, TTaskType type, boolean isFailed) { - int taskNum = 0; - if (backendId != -1) { - Map taskMap = tasks.get(backendId, type); - if (taskMap != null) { - if (isFailed) { - for (AgentTask task : taskMap.values()) { - if (task.getFailedTimes() > 0) { - ++taskNum; - } - } - } else { - taskNum += taskMap.size(); - } - } - } else { - Map> taskMap = tasks.column(type); - if (taskMap != null) { - for (Map signatureMap : taskMap.values()) { - if (isFailed) { - for (AgentTask task : signatureMap.values()) { - if (task.getFailedTimes() > 0) { - ++taskNum; - } - } - } else { - taskNum += signatureMap.size(); - } - } - } - } - - LOG.info("get task num with type[{}] in backend[{}]: {}. isFailed: {}", - type.name(), backendId, taskNum, isFailed); - return taskNum; - } - - public static synchronized List getFailedTask(long backendId, TTaskType type) { - Map taskMap = tasks.get(backendId, type); - List tasks = Lists.newArrayList(); - if (taskMap != null) { - for (AgentTask task : taskMap.values()) { - if (task.getFailedTimes() > 0) { - tasks.add(task); - } - } - } - return tasks; - } -} - + } + + public static synchronized AgentTask getTask(long backendId, TTaskType type, long signature) { + if (!tasks.contains(backendId, type)) { + return null; + } + + Map signatureMap = tasks.get(backendId, type); + return signatureMap.get(signature); + } + + public static synchronized List getDiffTasks(long backendId, Map> runningTasks) { + List diffTasks = new ArrayList(); + if (!tasks.containsRow(backendId)) { + return diffTasks; + } + + Map> backendAllTasks = tasks.row(backendId); + for (Map.Entry> entry : backendAllTasks.entrySet()) { + TTaskType taskType = entry.getKey(); + Map tasks = entry.getValue(); + Set excludeSignatures = new HashSet(); + if (runningTasks.containsKey(taskType)) { + excludeSignatures = runningTasks.get(taskType); + } + + for (Map.Entry taskEntry : tasks.entrySet()) { + long signature = taskEntry.getKey(); + AgentTask task = taskEntry.getValue(); + if (!excludeSignatures.contains(signature)) { + diffTasks.add(task); + } + } // end for tasks + } // end for backendAllTasks + + return diffTasks; + } + + public static synchronized void removeReplicaRelatedTasks(long backendId, long signature) { + if (!tasks.containsRow(backendId)) { + return; + } + + Map> backendTasks = tasks.row(backendId); + for (TTaskType type : TTaskType.values()) { + if (backendTasks.containsKey(type)) { + Map typeTasks = backendTasks.get(type); + if (typeTasks.containsKey(signature)) { + typeTasks.remove(signature); + LOG.debug("remove task: type[{}], backend[{}], signature[{}]", type, backendId, signature); + --taskNum; + } + } + } // end for types + } + + // only for test now + public static synchronized void clearAllTasks() { + tasks.clear(); + taskNum = 0; + } + + public static synchronized int getTaskNum() { + return taskNum; + } + + public static synchronized int getTaskNum(long backendId, TTaskType type, boolean isFailed) { + int taskNum = 0; + if (backendId != -1) { + Map taskMap = tasks.get(backendId, type); + if (taskMap != null) { + if (isFailed) { + for (AgentTask task : taskMap.values()) { + if (task.getFailedTimes() > 0) { + ++taskNum; + } + } + } else { + taskNum += taskMap.size(); + } + } + } else { + Map> taskMap = tasks.column(type); + if (taskMap != null) { + for (Map signatureMap : taskMap.values()) { + if (isFailed) { + for (AgentTask task : signatureMap.values()) { + if (task.getFailedTimes() > 0) { + ++taskNum; + } + } + } else { + taskNum += signatureMap.size(); + } + } + } + } + + LOG.info("get task num with type[{}] in backend[{}]: {}. isFailed: {}", + type.name(), backendId, taskNum, isFailed); + return taskNum; + } + + public static synchronized List getFailedTask(long backendId, TTaskType type) { + Map taskMap = tasks.get(backendId, type); + List tasks = Lists.newArrayList(); + if (taskMap != null) { + for (AgentTask task : taskMap.values()) { + if (task.getFailedTimes() > 0) { + tasks.add(task); + } + } + } + return tasks; + } +} + diff --git a/fe/src/com/baidu/palo/task/CancelDeleteTask.java b/fe/src/com/baidu/palo/task/CancelDeleteTask.java index f428d310fb..6c948c0521 100644 --- a/fe/src/com/baidu/palo/task/CancelDeleteTask.java +++ b/fe/src/com/baidu/palo/task/CancelDeleteTask.java @@ -13,39 +13,39 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.task; - +package com.baidu.palo.task; + import com.baidu.palo.thrift.TCancelDeleteDataReq; -import com.baidu.palo.thrift.TTaskType; - -public class CancelDeleteTask extends AgentTask { - private int schemaHash; - private long version; - private long versionHash; - - public CancelDeleteTask(long backendId, long dbId, long tableId, long partitionId, long indexId, - long tabletId, int schemaHash, long version, long versionHash) { - super(null, backendId, tabletId, TTaskType.CANCEL_DELETE, dbId, tableId, partitionId, indexId, tabletId); - - this.schemaHash = schemaHash; - this.version = version; - this.versionHash = versionHash; - } - - public TCancelDeleteDataReq toThrift() { - TCancelDeleteDataReq request = new TCancelDeleteDataReq(tabletId, schemaHash, version, versionHash); - return request; - } - - public int getSchemaHash() { - return schemaHash; - } - - public long getVersion() { - return version; - } - - public long getVersionHash() { - return versionHash; - } -} +import com.baidu.palo.thrift.TTaskType; + +public class CancelDeleteTask extends AgentTask { + private int schemaHash; + private long version; + private long versionHash; + + public CancelDeleteTask(long backendId, long dbId, long tableId, long partitionId, long indexId, + long tabletId, int schemaHash, long version, long versionHash) { + super(null, backendId, tabletId, TTaskType.CANCEL_DELETE, dbId, tableId, partitionId, indexId, tabletId); + + this.schemaHash = schemaHash; + this.version = version; + this.versionHash = versionHash; + } + + public TCancelDeleteDataReq toThrift() { + TCancelDeleteDataReq request = new TCancelDeleteDataReq(tabletId, schemaHash, version, versionHash); + return request; + } + + public int getSchemaHash() { + return schemaHash; + } + + public long getVersion() { + return version; + } + + public long getVersionHash() { + return versionHash; + } +} diff --git a/fe/src/com/baidu/palo/task/CloneTask.java b/fe/src/com/baidu/palo/task/CloneTask.java index 71c43576a6..e5e57fea27 100644 --- a/fe/src/com/baidu/palo/task/CloneTask.java +++ b/fe/src/com/baidu/palo/task/CloneTask.java @@ -13,41 +13,41 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.task; - +package com.baidu.palo.task; + import com.baidu.palo.thrift.TBackend; import com.baidu.palo.thrift.TCloneReq; import com.baidu.palo.thrift.TStorageMedium; import com.baidu.palo.thrift.TTaskType; -import java.util.List; - -public class CloneTask extends AgentTask { - - private int schemaHash; - private List srcBackends; +import java.util.List; + +public class CloneTask extends AgentTask { + + private int schemaHash; + private List srcBackends; private TStorageMedium storageMedium; long committedVersion; - long committedVersionHash; - - public CloneTask(long backendId, long dbId, long tableId, long partitionId, long indexId, + long committedVersionHash; + + public CloneTask(long backendId, long dbId, long tableId, long partitionId, long indexId, long tabletId, int schemaHash, List srcBackends, TStorageMedium storageMedium, - long committedVersion, long committedVersionHash) { - super(null, backendId, tabletId, TTaskType.CLONE, dbId, tableId, partitionId, indexId, tabletId); - this.schemaHash = schemaHash; - this.srcBackends = srcBackends; + long committedVersion, long committedVersionHash) { + super(null, backendId, tabletId, TTaskType.CLONE, dbId, tableId, partitionId, indexId, tabletId); + this.schemaHash = schemaHash; + this.srcBackends = srcBackends; this.storageMedium = storageMedium; this.committedVersion = committedVersion; - this.committedVersionHash = committedVersionHash; - } - - public int getSchemaHash() { - return schemaHash; - } - - public TStorageMedium getStorageMedium() { - return storageMedium; + this.committedVersionHash = committedVersionHash; + } + + public int getSchemaHash() { + return schemaHash; + } + + public TStorageMedium getStorageMedium() { + return storageMedium; } public long getCommittedVersion() { @@ -64,5 +64,5 @@ public class CloneTask extends AgentTask { request.setCommitted_version(committedVersion); request.setCommitted_version_hash(committedVersionHash); return request; - } -} + } +} diff --git a/fe/src/com/baidu/palo/task/CreateReplicaTask.java b/fe/src/com/baidu/palo/task/CreateReplicaTask.java index ded2303cf4..1f1a2b632f 100644 --- a/fe/src/com/baidu/palo/task/CreateReplicaTask.java +++ b/fe/src/com/baidu/palo/task/CreateReplicaTask.java @@ -13,8 +13,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.task; - +package com.baidu.palo.task; + import com.baidu.palo.catalog.Column; import com.baidu.palo.catalog.KeysType; import com.baidu.palo.common.MarkedCountDownLatch; @@ -30,64 +30,64 @@ import org.apache.logging.log4j.Logger; import java.util.ArrayList; import java.util.List; -import java.util.Set; - -public class CreateReplicaTask extends AgentTask { - private static final Logger LOG = LogManager.getLogger(CreateReplicaTask.class); - - private short shortKeyColumnCount; - private int schemaHash; - - private long version; - private long versionHash; - - private KeysType keysType; - private TStorageType storageType; - private TStorageMedium storageMedium; - - private List columns; - - // bloom filter columns - private Set bfColumns; - private double bfFpp; - - // used for synchronous process +import java.util.Set; + +public class CreateReplicaTask extends AgentTask { + private static final Logger LOG = LogManager.getLogger(CreateReplicaTask.class); + + private short shortKeyColumnCount; + private int schemaHash; + + private long version; + private long versionHash; + + private KeysType keysType; + private TStorageType storageType; + private TStorageMedium storageMedium; + + private List columns; + + // bloom filter columns + private Set bfColumns; + private double bfFpp; + + // used for synchronous process private MarkedCountDownLatch latch; - private boolean inRestoreMode = false; - - public CreateReplicaTask(long backendId, long dbId, long tableId, long partitionId, long indexId, long tabletId, - short shortKeyColumnCount, int schemaHash, long version, long versionHash, - KeysType keysType, TStorageType storageType, - TStorageMedium storageMedium, List columns, - Set bfColumns, double bfFpp, MarkedCountDownLatch latch) { - super(null, backendId, tabletId, TTaskType.CREATE, dbId, tableId, partitionId, indexId, tabletId); - - this.shortKeyColumnCount = shortKeyColumnCount; - this.schemaHash = schemaHash; - - this.version = version; - this.versionHash = versionHash; - - this.keysType = keysType; - this.storageType = storageType; - this.storageMedium = storageMedium; - - this.columns = columns; - - this.bfColumns = bfColumns; - this.bfFpp = bfFpp; - - this.latch = latch; - } - - public void countDownLatch(long backendId, long tabletId) { - if (this.latch != null) { - if (latch.markedCountDown(backendId, tabletId)) { - LOG.debug("CreateReplicaTask current latch count: {}, backend: {}, tablet:{}", - latch.getCount(), backendId, tabletId); - } - } + private boolean inRestoreMode = false; + + public CreateReplicaTask(long backendId, long dbId, long tableId, long partitionId, long indexId, long tabletId, + short shortKeyColumnCount, int schemaHash, long version, long versionHash, + KeysType keysType, TStorageType storageType, + TStorageMedium storageMedium, List columns, + Set bfColumns, double bfFpp, MarkedCountDownLatch latch) { + super(null, backendId, tabletId, TTaskType.CREATE, dbId, tableId, partitionId, indexId, tabletId); + + this.shortKeyColumnCount = shortKeyColumnCount; + this.schemaHash = schemaHash; + + this.version = version; + this.versionHash = versionHash; + + this.keysType = keysType; + this.storageType = storageType; + this.storageMedium = storageMedium; + + this.columns = columns; + + this.bfColumns = bfColumns; + this.bfFpp = bfFpp; + + this.latch = latch; + } + + public void countDownLatch(long backendId, long tabletId) { + if (this.latch != null) { + if (latch.markedCountDown(backendId, tabletId)) { + LOG.debug("CreateReplicaTask current latch count: {}, backend: {}, tablet:{}", + latch.getCount(), backendId, tabletId); + } + } } public void setLatch(MarkedCountDownLatch latch) { @@ -96,42 +96,42 @@ public class CreateReplicaTask extends AgentTask { public void setInRestoreMode(boolean inRestoreMode) { this.inRestoreMode = inRestoreMode; - } - - public TCreateTabletReq toThrift() { - TCreateTabletReq createTabletReq = new TCreateTabletReq(); - createTabletReq.setTablet_id(tabletId); - - TTabletSchema tSchema = new TTabletSchema(); - tSchema.setShort_key_column_count(shortKeyColumnCount); - tSchema.setSchema_hash(schemaHash); - tSchema.setKeys_type(keysType.toThrift()); - tSchema.setStorage_type(storageType); - - List tColumns = new ArrayList(); - for (Column column : columns) { - TColumn tColumn = column.toThrift(); - // is bloom filter column - if (bfColumns != null && bfColumns.contains(column.getName())) { - tColumn.setIs_bloom_filter_column(true); - } - tColumns.add(tColumn); - } - tSchema.setColumns(tColumns); - - if (bfColumns != null) { - tSchema.setBloom_filter_fpp(bfFpp); - } - createTabletReq.setTablet_schema(tSchema); - - createTabletReq.setVersion(version); - createTabletReq.setVersion_hash(versionHash); - - createTabletReq.setStorage_medium(storageMedium); + } + + public TCreateTabletReq toThrift() { + TCreateTabletReq createTabletReq = new TCreateTabletReq(); + createTabletReq.setTablet_id(tabletId); + + TTabletSchema tSchema = new TTabletSchema(); + tSchema.setShort_key_column_count(shortKeyColumnCount); + tSchema.setSchema_hash(schemaHash); + tSchema.setKeys_type(keysType.toThrift()); + tSchema.setStorage_type(storageType); + + List tColumns = new ArrayList(); + for (Column column : columns) { + TColumn tColumn = column.toThrift(); + // is bloom filter column + if (bfColumns != null && bfColumns.contains(column.getName())) { + tColumn.setIs_bloom_filter_column(true); + } + tColumns.add(tColumn); + } + tSchema.setColumns(tColumns); + + if (bfColumns != null) { + tSchema.setBloom_filter_fpp(bfFpp); + } + createTabletReq.setTablet_schema(tSchema); + + createTabletReq.setVersion(version); + createTabletReq.setVersion_hash(versionHash); + + createTabletReq.setStorage_medium(storageMedium); if (inRestoreMode) { createTabletReq.setIn_restore_mode(true); - } - - return createTabletReq; - } -} + } + + return createTabletReq; + } +} diff --git a/fe/src/com/baidu/palo/task/CreateRollupTask.java b/fe/src/com/baidu/palo/task/CreateRollupTask.java index f51c8d0daa..8b1746e7bd 100644 --- a/fe/src/com/baidu/palo/task/CreateRollupTask.java +++ b/fe/src/com/baidu/palo/task/CreateRollupTask.java @@ -13,8 +13,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.task; - +package com.baidu.palo.task; + import com.baidu.palo.catalog.Column; import com.baidu.palo.thrift.TAlterTabletReq; import com.baidu.palo.thrift.TColumn; @@ -27,120 +27,120 @@ import com.baidu.palo.thrift.TTaskType; import java.util.ArrayList; import java.util.List; -import java.util.Set; - -public class CreateRollupTask extends AgentTask { - - private long baseTableId; - private long baseTabletId; - - private long rollupReplicaId; - - private int rollupSchemaHash; - private int baseSchemaHash; - - private short shortKeyColumnCount; - private TStorageType storageType; - private TKeysType keysType; - - private List rollupColumns; - - // bloom filter columns - private Set bfColumns; - private double bfFpp; - - public CreateRollupTask(TResourceInfo resourceInfo, long backendId, long dbId, long tableId, - long partitionId, long rollupIndexId, long baseIndexId, long rollupTabletId, - long baseTabletId, long rollupReplicaId, short shortKeyColumnCount, - int rollupSchemaHash, int baseSchemaHash, TStorageType storageType, - List rollupColumns, Set bfColumns, double bfFpp, TKeysType keysType) { +import java.util.Set; + +public class CreateRollupTask extends AgentTask { + + private long baseTableId; + private long baseTabletId; + + private long rollupReplicaId; + + private int rollupSchemaHash; + private int baseSchemaHash; + + private short shortKeyColumnCount; + private TStorageType storageType; + private TKeysType keysType; + + private List rollupColumns; + + // bloom filter columns + private Set bfColumns; + private double bfFpp; + + public CreateRollupTask(TResourceInfo resourceInfo, long backendId, long dbId, long tableId, + long partitionId, long rollupIndexId, long baseIndexId, long rollupTabletId, + long baseTabletId, long rollupReplicaId, short shortKeyColumnCount, + int rollupSchemaHash, int baseSchemaHash, TStorageType storageType, + List rollupColumns, Set bfColumns, double bfFpp, TKeysType keysType) { super(resourceInfo, backendId, rollupTabletId, TTaskType.ROLLUP, dbId, tableId, partitionId, rollupIndexId, - rollupTabletId); - - this.baseTableId = baseIndexId; - this.baseTabletId = baseTabletId; - this.rollupReplicaId = rollupReplicaId; - - this.rollupSchemaHash = rollupSchemaHash; - this.baseSchemaHash = baseSchemaHash; - - this.shortKeyColumnCount = shortKeyColumnCount; - this.storageType = storageType; - this.keysType = keysType; - - this.rollupColumns = rollupColumns; - - this.bfColumns = bfColumns; - this.bfFpp = bfFpp; - } - - public TAlterTabletReq toThrift() { - TAlterTabletReq tAlterTabletReq = new TAlterTabletReq(); - tAlterTabletReq.setBase_tablet_id(baseTabletId); - tAlterTabletReq.setBase_schema_hash(baseSchemaHash); - - // make 1 TCreateTableReq - TCreateTabletReq createTabletReq = new TCreateTabletReq(); - createTabletReq.setTablet_id(tabletId); - - // no need to set version - // schema - TTabletSchema tSchema = new TTabletSchema(); - tSchema.setShort_key_column_count(shortKeyColumnCount); - tSchema.setSchema_hash(rollupSchemaHash); - tSchema.setStorage_type(storageType); - tSchema.setKeys_type(keysType); - - List tColumns = new ArrayList(); - for (Column column : rollupColumns) { - TColumn tColumn = column.toThrift(); - // is bloom filter column - if (bfColumns != null && bfColumns.contains(column.getName())) { - tColumn.setIs_bloom_filter_column(true); - } - tColumns.add(tColumn); - } - tSchema.setColumns(tColumns); - - if (bfColumns != null) { - tSchema.setBloom_filter_fpp(bfFpp); - } - createTabletReq.setTablet_schema(tSchema); - - tAlterTabletReq.setNew_tablet_req(createTabletReq); - - return tAlterTabletReq; - } - - public long getBaseTableId() { - return baseTableId; - } - - public long getBaseTabletId() { - return baseTabletId; - } - - public long getRollupReplicaId() { - return rollupReplicaId; - } - - public int getRollupSchemaHash() { - return rollupSchemaHash; - } - - public int getBaseSchemaHash() { - return baseSchemaHash; - } - - public short getShortKeyColumnCount() { - return shortKeyColumnCount; - } - - public TStorageType getStorageType() { - return storageType; - } - - public List getRollupColumns() { - return rollupColumns; - } -} + rollupTabletId); + + this.baseTableId = baseIndexId; + this.baseTabletId = baseTabletId; + this.rollupReplicaId = rollupReplicaId; + + this.rollupSchemaHash = rollupSchemaHash; + this.baseSchemaHash = baseSchemaHash; + + this.shortKeyColumnCount = shortKeyColumnCount; + this.storageType = storageType; + this.keysType = keysType; + + this.rollupColumns = rollupColumns; + + this.bfColumns = bfColumns; + this.bfFpp = bfFpp; + } + + public TAlterTabletReq toThrift() { + TAlterTabletReq tAlterTabletReq = new TAlterTabletReq(); + tAlterTabletReq.setBase_tablet_id(baseTabletId); + tAlterTabletReq.setBase_schema_hash(baseSchemaHash); + + // make 1 TCreateTableReq + TCreateTabletReq createTabletReq = new TCreateTabletReq(); + createTabletReq.setTablet_id(tabletId); + + // no need to set version + // schema + TTabletSchema tSchema = new TTabletSchema(); + tSchema.setShort_key_column_count(shortKeyColumnCount); + tSchema.setSchema_hash(rollupSchemaHash); + tSchema.setStorage_type(storageType); + tSchema.setKeys_type(keysType); + + List tColumns = new ArrayList(); + for (Column column : rollupColumns) { + TColumn tColumn = column.toThrift(); + // is bloom filter column + if (bfColumns != null && bfColumns.contains(column.getName())) { + tColumn.setIs_bloom_filter_column(true); + } + tColumns.add(tColumn); + } + tSchema.setColumns(tColumns); + + if (bfColumns != null) { + tSchema.setBloom_filter_fpp(bfFpp); + } + createTabletReq.setTablet_schema(tSchema); + + tAlterTabletReq.setNew_tablet_req(createTabletReq); + + return tAlterTabletReq; + } + + public long getBaseTableId() { + return baseTableId; + } + + public long getBaseTabletId() { + return baseTabletId; + } + + public long getRollupReplicaId() { + return rollupReplicaId; + } + + public int getRollupSchemaHash() { + return rollupSchemaHash; + } + + public int getBaseSchemaHash() { + return baseSchemaHash; + } + + public short getShortKeyColumnCount() { + return shortKeyColumnCount; + } + + public TStorageType getStorageType() { + return storageType; + } + + public List getRollupColumns() { + return rollupColumns; + } +} diff --git a/fe/src/com/baidu/palo/task/DropReplicaTask.java b/fe/src/com/baidu/palo/task/DropReplicaTask.java index 52eba4fa83..fadcd962e9 100644 --- a/fe/src/com/baidu/palo/task/DropReplicaTask.java +++ b/fe/src/com/baidu/palo/task/DropReplicaTask.java @@ -13,28 +13,28 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.task; - +package com.baidu.palo.task; + import com.baidu.palo.thrift.TDropTabletReq; -import com.baidu.palo.thrift.TTaskType; - -public class DropReplicaTask extends AgentTask { - private int schemaHash; // set -1L as unknown - - public DropReplicaTask(long backendId, long tabletId, int schemaHash) { - super(null, backendId, tabletId, TTaskType.DROP, -1L, -1L, -1L, -1L, tabletId); - this.schemaHash = schemaHash; - } - - public TDropTabletReq toThrift() { - TDropTabletReq request = new TDropTabletReq(tabletId); - if (this.schemaHash != -1) { - request.setSchema_hash(schemaHash); - } - return request; - } - - public int getSchemaHash() { - return schemaHash; - } -} +import com.baidu.palo.thrift.TTaskType; + +public class DropReplicaTask extends AgentTask { + private int schemaHash; // set -1L as unknown + + public DropReplicaTask(long backendId, long tabletId, int schemaHash) { + super(null, backendId, tabletId, TTaskType.DROP, -1L, -1L, -1L, -1L, tabletId); + this.schemaHash = schemaHash; + } + + public TDropTabletReq toThrift() { + TDropTabletReq request = new TDropTabletReq(tabletId); + if (this.schemaHash != -1) { + request.setSchema_hash(schemaHash); + } + return request; + } + + public int getSchemaHash() { + return schemaHash; + } +} diff --git a/fe/src/com/baidu/palo/task/HadoopLoadEtlTask.java b/fe/src/com/baidu/palo/task/HadoopLoadEtlTask.java index 0e8806569e..655373508f 100644 --- a/fe/src/com/baidu/palo/task/HadoopLoadEtlTask.java +++ b/fe/src/com/baidu/palo/task/HadoopLoadEtlTask.java @@ -13,100 +13,100 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.task; - -import com.baidu.palo.common.LoadException; -import com.baidu.palo.common.Pair; -import com.baidu.palo.load.DppConfig; -import com.baidu.palo.load.DppScheduler; -import com.baidu.palo.load.EtlStatus; -import com.baidu.palo.load.LoadJob; -import com.google.common.collect.Maps; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; - -import java.util.List; -import java.util.Map; - -public class HadoopLoadEtlTask extends LoadEtlTask { - private static final Logger LOG = LogManager.getLogger(HadoopLoadEtlTask.class); - private static final String MAP_COMPLETION = "map() completion"; - private static final String REDUCE_COMPLETION = "reduce() completion"; - private static final double FILTER_RATIO_DELTA = 0.02; - - public HadoopLoadEtlTask(LoadJob job) { - super(job); - } - - @Override - protected boolean updateJobEtlStatus() { - // get etl status - DppScheduler dppScheduler = new DppScheduler(job.getHadoopDppConfig()); - EtlStatus status = dppScheduler.getEtlJobStatus(job.getHadoopEtlJobId()); - LOG.info("job status: {}. job: {}", status, job.toString()); - - // update load job etl status - job.setEtlJobStatus(status); - return true; - } - - @Override - protected void processEtlRunning() throws LoadException { - Map stats = job.getEtlJobStatus().getStats(); - boolean isMapCompleted = false; - if (stats.containsKey(MAP_COMPLETION) && stats.containsKey(REDUCE_COMPLETION)) { - float mapProgress = Float.parseFloat(stats.get(MAP_COMPLETION)); - float reduceProgress = Float.parseFloat(stats.get(REDUCE_COMPLETION)); - int progress = (int) (100 * (mapProgress + reduceProgress) / 2); - if (progress >= 100) { - // hadoop job status result: - // [map() completion] and [reduce() completion] are not accurate, - // etl job state must be depend on [job state] - progress = 99; - } - job.setProgress(progress); - - if (mapProgress >= 1) { - isMapCompleted = true; - } - } - - // check data quality when map complete - if (isMapCompleted) { - // [map() completion] is not accurate - double maxFilterRatio = job.getMaxFilterRatio() + FILTER_RATIO_DELTA; - if (!checkDataQuality(maxFilterRatio)) { - throw new LoadException(QUALITY_FAIL_MSG); - } - } - } - - @Override - protected Map> getFilePathMap() throws LoadException { - DppConfig dppConfig = job.getHadoopDppConfig(); - // get etl files - DppScheduler dppScheduler = new DppScheduler(dppConfig); - long dbId = job.getDbId(); - String loadLabel = job.getLabel(); - String outputPath = DppScheduler.getEtlOutputPath(dppConfig.getFsDefaultName(), dppConfig.getOutputPath(), - dbId, loadLabel, job.getHadoopEtlOutputDir()); - Map fileMap = dppScheduler.getEtlFiles(outputPath); - if (fileMap == null) { - throw new LoadException("get etl files error"); - } - - // create file map - Map> filePathMap = Maps.newHashMap(); - String httpServer = String.format("http://%s:%s", dppConfig.getNameNodeHost(), dppConfig.getHttpPort()); - String ugi = String.format("ugi=%s", dppConfig.getHadoopJobUgiStr()); - for (Map.Entry entry : fileMap.entrySet()) { - String filePath = entry.getKey(); - String partitionIndexBucket = getPartitionIndexBucketString(filePath); - filePath = String.format("%s/data%s?%s", httpServer, filePath, ugi); - filePathMap.put(partitionIndexBucket, Pair.create(filePath, entry.getValue())); - } - - return filePathMap; - } -} +package com.baidu.palo.task; + +import com.baidu.palo.common.LoadException; +import com.baidu.palo.common.Pair; +import com.baidu.palo.load.DppConfig; +import com.baidu.palo.load.DppScheduler; +import com.baidu.palo.load.EtlStatus; +import com.baidu.palo.load.LoadJob; +import com.google.common.collect.Maps; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + +import java.util.List; +import java.util.Map; + +public class HadoopLoadEtlTask extends LoadEtlTask { + private static final Logger LOG = LogManager.getLogger(HadoopLoadEtlTask.class); + private static final String MAP_COMPLETION = "map() completion"; + private static final String REDUCE_COMPLETION = "reduce() completion"; + private static final double FILTER_RATIO_DELTA = 0.02; + + public HadoopLoadEtlTask(LoadJob job) { + super(job); + } + + @Override + protected boolean updateJobEtlStatus() { + // get etl status + DppScheduler dppScheduler = new DppScheduler(job.getHadoopDppConfig()); + EtlStatus status = dppScheduler.getEtlJobStatus(job.getHadoopEtlJobId()); + LOG.info("job status: {}. job: {}", status, job.toString()); + + // update load job etl status + job.setEtlJobStatus(status); + return true; + } + + @Override + protected void processEtlRunning() throws LoadException { + Map stats = job.getEtlJobStatus().getStats(); + boolean isMapCompleted = false; + if (stats.containsKey(MAP_COMPLETION) && stats.containsKey(REDUCE_COMPLETION)) { + float mapProgress = Float.parseFloat(stats.get(MAP_COMPLETION)); + float reduceProgress = Float.parseFloat(stats.get(REDUCE_COMPLETION)); + int progress = (int) (100 * (mapProgress + reduceProgress) / 2); + if (progress >= 100) { + // hadoop job status result: + // [map() completion] and [reduce() completion] are not accurate, + // etl job state must be depend on [job state] + progress = 99; + } + job.setProgress(progress); + + if (mapProgress >= 1) { + isMapCompleted = true; + } + } + + // check data quality when map complete + if (isMapCompleted) { + // [map() completion] is not accurate + double maxFilterRatio = job.getMaxFilterRatio() + FILTER_RATIO_DELTA; + if (!checkDataQuality(maxFilterRatio)) { + throw new LoadException(QUALITY_FAIL_MSG); + } + } + } + + @Override + protected Map> getFilePathMap() throws LoadException { + DppConfig dppConfig = job.getHadoopDppConfig(); + // get etl files + DppScheduler dppScheduler = new DppScheduler(dppConfig); + long dbId = job.getDbId(); + String loadLabel = job.getLabel(); + String outputPath = DppScheduler.getEtlOutputPath(dppConfig.getFsDefaultName(), dppConfig.getOutputPath(), + dbId, loadLabel, job.getHadoopEtlOutputDir()); + Map fileMap = dppScheduler.getEtlFiles(outputPath); + if (fileMap == null) { + throw new LoadException("get etl files error"); + } + + // create file map + Map> filePathMap = Maps.newHashMap(); + String httpServer = String.format("http://%s:%s", dppConfig.getNameNodeHost(), dppConfig.getHttpPort()); + String ugi = String.format("ugi=%s", dppConfig.getHadoopJobUgiStr()); + for (Map.Entry entry : fileMap.entrySet()) { + String filePath = entry.getKey(); + String partitionIndexBucket = getPartitionIndexBucketString(filePath); + filePath = String.format("%s/data%s?%s", httpServer, filePath, ugi); + filePathMap.put(partitionIndexBucket, Pair.create(filePath, entry.getValue())); + } + + return filePathMap; + } +} diff --git a/fe/src/com/baidu/palo/task/LoadEtlTask.java b/fe/src/com/baidu/palo/task/LoadEtlTask.java index d059e377a3..2c422d306b 100644 --- a/fe/src/com/baidu/palo/task/LoadEtlTask.java +++ b/fe/src/com/baidu/palo/task/LoadEtlTask.java @@ -1,336 +1,336 @@ -// Copyright (c) 2017, Baidu.com, Inc. All Rights Reserved - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package com.baidu.palo.task; - -import com.baidu.palo.catalog.Catalog; -import com.baidu.palo.catalog.Database; -import com.baidu.palo.catalog.DistributionInfo; -import com.baidu.palo.catalog.DistributionInfo.DistributionInfoType; -import com.baidu.palo.catalog.MaterializedIndex; -import com.baidu.palo.catalog.OlapTable; -import com.baidu.palo.catalog.Partition; -import com.baidu.palo.catalog.Tablet; -import com.baidu.palo.common.LoadException; -import com.baidu.palo.common.MetaNotFoundException; -import com.baidu.palo.common.Pair; -import com.baidu.palo.load.FailMsg.CancelType; -import com.baidu.palo.load.Load; -import com.baidu.palo.load.LoadChecker; -import com.baidu.palo.load.LoadJob; -import com.baidu.palo.load.LoadJob.JobState; -import com.baidu.palo.load.PartitionLoadInfo; -import com.baidu.palo.load.TableLoadInfo; -import com.baidu.palo.load.TabletLoadInfo; -import com.baidu.palo.thrift.TEtlState; - -import com.google.common.collect.Maps; -import com.google.common.collect.Sets; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - -import java.util.Map; -import java.util.Map.Entry; -import java.util.Random; -import java.util.Set; - -public abstract class LoadEtlTask extends MasterTask { - private static final Logger LOG = LogManager.getLogger(LoadEtlTask.class); - - protected static final String QUALITY_FAIL_MSG = "quality not good enough to cancel"; - public static final String DPP_NORMAL_ALL = "dpp.norm.ALL"; - public static final String DPP_ABNORMAL_ALL = "dpp.abnorm.ALL"; - - protected final LoadJob job; - protected final Load load; - protected Database db; - - public LoadEtlTask(LoadJob job) { - super(); - this.job = job; - this.signature = job.getId(); - this.load = Catalog.getInstance().getLoadInstance(); - } - - protected String getErrorMsg() { - return "etl job fail"; - } - - @Override - protected void exec() { - // check job state - if (job.getState() != JobState.ETL) { - return; - } - - // check timeout - if (LoadChecker.checkTimeout(job)) { - load.cancelLoadJob(job, CancelType.TIMEOUT, "etl timeout to cancel"); - return; - } - - // check db - long dbId = job.getDbId(); - db = Catalog.getInstance().getDb(dbId); - if (db == null) { - load.cancelLoadJob(job, CancelType.ETL_RUN_FAIL, "db does not exist. id: " + dbId); - return; - } - - // update etl job status - if (job.getProgress() != 100) { - try { - updateEtlStatus(); - } catch (LoadException e) { - CancelType cancelType = CancelType.ETL_RUN_FAIL; - if (e.getMessage().equals(QUALITY_FAIL_MSG)) { - cancelType = CancelType.ETL_QUALITY_UNSATISFIED; - } - LOG.debug("update etl status fail, msg: {}. job: {}", e.getMessage(), job.toString()); - load.cancelLoadJob(job, cancelType, e.getMessage()); - return; - } - } - - // check partition is loading - if (job.getProgress() == 100) { - tryUpdateLoading(); - } - } - - private void updateEtlStatus() throws LoadException { - if (!updateJobEtlStatus()) { - throw new LoadException("update job etl status fail"); - } - - TEtlState state = job.getEtlJobStatus().getState(); - switch (state) { - case FINISHED: - processEtlFinished(); - break; - case CANCELLED: - throw new LoadException(getErrorMsg()); - case RUNNING: - processEtlRunning(); - break; - default: - LOG.warn("wrong etl job state: {}", state.name()); - break; - } - } - - private void processEtlFinished() throws LoadException { - // check data quality when etl finished - if (!checkDataQuality(job.getMaxFilterRatio())) { - throw new LoadException(QUALITY_FAIL_MSG); - } - - // get etl file map - Map> filePathMap = getFilePathMap(); - - // init tablet load info - Map idToTabletLoadInfo = getTabletLoadInfos(filePathMap); - job.setIdToTabletLoadInfo(idToTabletLoadInfo); - - // update job - job.setProgress(100); - job.setEtlFinishTimeMs(System.currentTimeMillis()); - job.getEtlJobStatus().setFileMap(null); - LOG.info("etl job finished. job: {}", job); - } - - private void tryUpdateLoading() { - // check job has loading partitions - Map idToTableLoadInfo = job.getIdToTableLoadInfo(); - Set partitionIds = Sets.newHashSet(); - for (TableLoadInfo tableLoadInfo : idToTableLoadInfo.values()) { - Map idToPartitionLoadInfo = tableLoadInfo.getIdToPartitionLoadInfo(); - for (Entry entry : idToPartitionLoadInfo.entrySet()) { - PartitionLoadInfo partitionLoadInfo = entry.getValue(); - if (partitionLoadInfo.isNeedLoad()) { - partitionIds.add(entry.getKey()); - } - } - } - if (!load.addLoadingPartitions(partitionIds)) { - LOG.info("load job has unfinished loading partitions. job: {}, job partitions: {}", job, partitionIds); - return; - } - - // new version and version hash - try { - for (Entry tableEntry : idToTableLoadInfo.entrySet()) { - long tableId = tableEntry.getKey(); - OlapTable table = null; - db.readLock(); - try { - table = (OlapTable) db.getTable(tableId); - } finally { - db.readUnlock(); - } - if (table == null) { - throw new MetaNotFoundException("table does not exist. id: " + tableId); - } - - TableLoadInfo tableLoadInfo = tableEntry.getValue(); - Map idToPartitionLoadInfo = tableLoadInfo.getIdToPartitionLoadInfo(); - for (Map.Entry entry : idToPartitionLoadInfo.entrySet()) { - long partitionId = entry.getKey(); - PartitionLoadInfo partitionLoadInfo = entry.getValue(); - if (!partitionLoadInfo.isNeedLoad()) { - continue; - } - - db.readLock(); - try { - Partition partition = table.getPartition(partitionId); - if (partition == null) { - throw new MetaNotFoundException("partition does not exist. id: " + partitionId); - } - - partitionLoadInfo.setVersion(partition.getCommittedVersion() + 1); - partitionLoadInfo.setVersionHash(Math.abs(new Random().nextLong())); - } finally { - db.readUnlock(); - } - - LOG.info("load job id: {}, label: {}, partition info: {}-{}-{}, partition load info: {}", - job.getId(), job.getLabel(), db.getId(), tableId, partitionId, partitionLoadInfo); - } - } - } catch (MetaNotFoundException e) { - // remove loading partitions - load.removeLoadingPartitions(partitionIds); - load.cancelLoadJob(job, CancelType.ETL_RUN_FAIL, e.getMessage()); - return; - } - - // update job to loading - if (load.updateLoadJobState(job, JobState.LOADING)) { - LOG.info("update job state to loading success. job: {}", job); - } else { - // remove loading partitions - load.removeLoadingPartitions(partitionIds); - } - } - - protected String getPartitionIndexBucketString(String filePath) throws LoadException { - String fileName = filePath.substring(filePath.lastIndexOf("/") + 1); - // label.partitionId.indexId.bucket - String[] fileNameArr = fileName.split("\\."); - if (fileNameArr.length != 4) { - throw new LoadException("etl file name format error, name: " + fileName); - } - - String partitionIndexBucket = fileName.substring(fileName.indexOf(".") + 1); - return partitionIndexBucket; - } - - protected Map getTabletLoadInfos(Map> filePathMap) - throws LoadException { - Map idToTabletLoadInfo = Maps.newHashMap(); - boolean hasLoadFiles = false; - - // create tablet load info - Map idToTableLoadInfo = job.getIdToTableLoadInfo(); - for (Entry tableEntry : idToTableLoadInfo.entrySet()) { - long tableId = tableEntry.getKey(); - OlapTable table = null; - db.readLock(); - try { - table = (OlapTable) db.getTable(tableId); - } finally { - db.readUnlock(); - } - if (table == null) { - throw new LoadException("table does not exist. id: " + tableId); - } - - TableLoadInfo tableLoadInfo = tableEntry.getValue(); - for (Entry partitionEntry : tableLoadInfo.getIdToPartitionLoadInfo().entrySet()) { - long partitionId = partitionEntry.getKey(); - boolean needLoad = false; - db.readLock(); - try { - Partition partition = table.getPartition(partitionId); - if (partition == null) { - throw new LoadException("partition does not exist. id: " + partitionId); - } - - DistributionInfo distributionInfo = partition.getDistributionInfo(); - DistributionInfoType distributionType = distributionInfo.getType(); - if (distributionType != DistributionInfoType.RANDOM - && distributionType != DistributionInfoType.HASH) { - throw new LoadException("unknown distribution type. type: " + distributionType.name()); - } - - for (MaterializedIndex materializedIndex : partition.getMaterializedIndices()) { - long indexId = materializedIndex.getId(); - int tabletIndex = 0; - for (Tablet tablet : materializedIndex.getTablets()) { - long bucket = tabletIndex++; - String tableViewBucket = String.format("%d.%d.%d", partitionId, indexId, bucket); - String filePath = null; - long fileSize = -1; - if (filePathMap.containsKey(tableViewBucket)) { - Pair filePair = filePathMap.get(tableViewBucket); - filePath = filePair.first; - fileSize = filePair.second; - - needLoad = true; - hasLoadFiles = true; - } - - TabletLoadInfo tabletLoadInfo = new TabletLoadInfo(filePath, fileSize); - idToTabletLoadInfo.put(tablet.getId(), tabletLoadInfo); - } - } - - // partition might have no load data - partitionEntry.getValue().setNeedLoad(needLoad); - } finally { - db.readUnlock(); - } - } - } - - // all partitions might have no load data - if (!hasLoadFiles) { - throw new LoadException("all partitions have no load data"); - } - - return idToTabletLoadInfo; - } - - protected boolean checkDataQuality(double maxFilterRatio) { - Map counters = job.getEtlJobStatus().getCounters(); - if (!counters.containsKey(DPP_NORMAL_ALL) || !counters.containsKey(DPP_ABNORMAL_ALL)) { - return true; - } - - long normalNum = Long.parseLong(counters.get(DPP_NORMAL_ALL)); - long abnormalNum = Long.parseLong(counters.get(DPP_ABNORMAL_ALL)); - if (abnormalNum > (abnormalNum + normalNum) * maxFilterRatio) { - return false; - } - - return true; - } - - protected abstract boolean updateJobEtlStatus(); - protected abstract void processEtlRunning() throws LoadException; - protected abstract Map> getFilePathMap() throws LoadException; -} +// Copyright (c) 2017, Baidu.com, Inc. All Rights Reserved + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.baidu.palo.task; + +import com.baidu.palo.catalog.Catalog; +import com.baidu.palo.catalog.Database; +import com.baidu.palo.catalog.DistributionInfo; +import com.baidu.palo.catalog.DistributionInfo.DistributionInfoType; +import com.baidu.palo.catalog.MaterializedIndex; +import com.baidu.palo.catalog.OlapTable; +import com.baidu.palo.catalog.Partition; +import com.baidu.palo.catalog.Tablet; +import com.baidu.palo.common.LoadException; +import com.baidu.palo.common.MetaNotFoundException; +import com.baidu.palo.common.Pair; +import com.baidu.palo.load.FailMsg.CancelType; +import com.baidu.palo.load.Load; +import com.baidu.palo.load.LoadChecker; +import com.baidu.palo.load.LoadJob; +import com.baidu.palo.load.LoadJob.JobState; +import com.baidu.palo.load.PartitionLoadInfo; +import com.baidu.palo.load.TableLoadInfo; +import com.baidu.palo.load.TabletLoadInfo; +import com.baidu.palo.thrift.TEtlState; + +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.util.Map; +import java.util.Map.Entry; +import java.util.Random; +import java.util.Set; + +public abstract class LoadEtlTask extends MasterTask { + private static final Logger LOG = LogManager.getLogger(LoadEtlTask.class); + + protected static final String QUALITY_FAIL_MSG = "quality not good enough to cancel"; + public static final String DPP_NORMAL_ALL = "dpp.norm.ALL"; + public static final String DPP_ABNORMAL_ALL = "dpp.abnorm.ALL"; + + protected final LoadJob job; + protected final Load load; + protected Database db; + + public LoadEtlTask(LoadJob job) { + super(); + this.job = job; + this.signature = job.getId(); + this.load = Catalog.getInstance().getLoadInstance(); + } + + protected String getErrorMsg() { + return "etl job fail"; + } + + @Override + protected void exec() { + // check job state + if (job.getState() != JobState.ETL) { + return; + } + + // check timeout + if (LoadChecker.checkTimeout(job)) { + load.cancelLoadJob(job, CancelType.TIMEOUT, "etl timeout to cancel"); + return; + } + + // check db + long dbId = job.getDbId(); + db = Catalog.getInstance().getDb(dbId); + if (db == null) { + load.cancelLoadJob(job, CancelType.ETL_RUN_FAIL, "db does not exist. id: " + dbId); + return; + } + + // update etl job status + if (job.getProgress() != 100) { + try { + updateEtlStatus(); + } catch (LoadException e) { + CancelType cancelType = CancelType.ETL_RUN_FAIL; + if (e.getMessage().equals(QUALITY_FAIL_MSG)) { + cancelType = CancelType.ETL_QUALITY_UNSATISFIED; + } + LOG.debug("update etl status fail, msg: {}. job: {}", e.getMessage(), job.toString()); + load.cancelLoadJob(job, cancelType, e.getMessage()); + return; + } + } + + // check partition is loading + if (job.getProgress() == 100) { + tryUpdateLoading(); + } + } + + private void updateEtlStatus() throws LoadException { + if (!updateJobEtlStatus()) { + throw new LoadException("update job etl status fail"); + } + + TEtlState state = job.getEtlJobStatus().getState(); + switch (state) { + case FINISHED: + processEtlFinished(); + break; + case CANCELLED: + throw new LoadException(getErrorMsg()); + case RUNNING: + processEtlRunning(); + break; + default: + LOG.warn("wrong etl job state: {}", state.name()); + break; + } + } + + private void processEtlFinished() throws LoadException { + // check data quality when etl finished + if (!checkDataQuality(job.getMaxFilterRatio())) { + throw new LoadException(QUALITY_FAIL_MSG); + } + + // get etl file map + Map> filePathMap = getFilePathMap(); + + // init tablet load info + Map idToTabletLoadInfo = getTabletLoadInfos(filePathMap); + job.setIdToTabletLoadInfo(idToTabletLoadInfo); + + // update job + job.setProgress(100); + job.setEtlFinishTimeMs(System.currentTimeMillis()); + job.getEtlJobStatus().setFileMap(null); + LOG.info("etl job finished. job: {}", job); + } + + private void tryUpdateLoading() { + // check job has loading partitions + Map idToTableLoadInfo = job.getIdToTableLoadInfo(); + Set partitionIds = Sets.newHashSet(); + for (TableLoadInfo tableLoadInfo : idToTableLoadInfo.values()) { + Map idToPartitionLoadInfo = tableLoadInfo.getIdToPartitionLoadInfo(); + for (Entry entry : idToPartitionLoadInfo.entrySet()) { + PartitionLoadInfo partitionLoadInfo = entry.getValue(); + if (partitionLoadInfo.isNeedLoad()) { + partitionIds.add(entry.getKey()); + } + } + } + if (!load.addLoadingPartitions(partitionIds)) { + LOG.info("load job has unfinished loading partitions. job: {}, job partitions: {}", job, partitionIds); + return; + } + + // new version and version hash + try { + for (Entry tableEntry : idToTableLoadInfo.entrySet()) { + long tableId = tableEntry.getKey(); + OlapTable table = null; + db.readLock(); + try { + table = (OlapTable) db.getTable(tableId); + } finally { + db.readUnlock(); + } + if (table == null) { + throw new MetaNotFoundException("table does not exist. id: " + tableId); + } + + TableLoadInfo tableLoadInfo = tableEntry.getValue(); + Map idToPartitionLoadInfo = tableLoadInfo.getIdToPartitionLoadInfo(); + for (Map.Entry entry : idToPartitionLoadInfo.entrySet()) { + long partitionId = entry.getKey(); + PartitionLoadInfo partitionLoadInfo = entry.getValue(); + if (!partitionLoadInfo.isNeedLoad()) { + continue; + } + + db.readLock(); + try { + Partition partition = table.getPartition(partitionId); + if (partition == null) { + throw new MetaNotFoundException("partition does not exist. id: " + partitionId); + } + + partitionLoadInfo.setVersion(partition.getCommittedVersion() + 1); + partitionLoadInfo.setVersionHash(Math.abs(new Random().nextLong())); + } finally { + db.readUnlock(); + } + + LOG.info("load job id: {}, label: {}, partition info: {}-{}-{}, partition load info: {}", + job.getId(), job.getLabel(), db.getId(), tableId, partitionId, partitionLoadInfo); + } + } + } catch (MetaNotFoundException e) { + // remove loading partitions + load.removeLoadingPartitions(partitionIds); + load.cancelLoadJob(job, CancelType.ETL_RUN_FAIL, e.getMessage()); + return; + } + + // update job to loading + if (load.updateLoadJobState(job, JobState.LOADING)) { + LOG.info("update job state to loading success. job: {}", job); + } else { + // remove loading partitions + load.removeLoadingPartitions(partitionIds); + } + } + + protected String getPartitionIndexBucketString(String filePath) throws LoadException { + String fileName = filePath.substring(filePath.lastIndexOf("/") + 1); + // label.partitionId.indexId.bucket + String[] fileNameArr = fileName.split("\\."); + if (fileNameArr.length != 4) { + throw new LoadException("etl file name format error, name: " + fileName); + } + + String partitionIndexBucket = fileName.substring(fileName.indexOf(".") + 1); + return partitionIndexBucket; + } + + protected Map getTabletLoadInfos(Map> filePathMap) + throws LoadException { + Map idToTabletLoadInfo = Maps.newHashMap(); + boolean hasLoadFiles = false; + + // create tablet load info + Map idToTableLoadInfo = job.getIdToTableLoadInfo(); + for (Entry tableEntry : idToTableLoadInfo.entrySet()) { + long tableId = tableEntry.getKey(); + OlapTable table = null; + db.readLock(); + try { + table = (OlapTable) db.getTable(tableId); + } finally { + db.readUnlock(); + } + if (table == null) { + throw new LoadException("table does not exist. id: " + tableId); + } + + TableLoadInfo tableLoadInfo = tableEntry.getValue(); + for (Entry partitionEntry : tableLoadInfo.getIdToPartitionLoadInfo().entrySet()) { + long partitionId = partitionEntry.getKey(); + boolean needLoad = false; + db.readLock(); + try { + Partition partition = table.getPartition(partitionId); + if (partition == null) { + throw new LoadException("partition does not exist. id: " + partitionId); + } + + DistributionInfo distributionInfo = partition.getDistributionInfo(); + DistributionInfoType distributionType = distributionInfo.getType(); + if (distributionType != DistributionInfoType.RANDOM + && distributionType != DistributionInfoType.HASH) { + throw new LoadException("unknown distribution type. type: " + distributionType.name()); + } + + for (MaterializedIndex materializedIndex : partition.getMaterializedIndices()) { + long indexId = materializedIndex.getId(); + int tabletIndex = 0; + for (Tablet tablet : materializedIndex.getTablets()) { + long bucket = tabletIndex++; + String tableViewBucket = String.format("%d.%d.%d", partitionId, indexId, bucket); + String filePath = null; + long fileSize = -1; + if (filePathMap.containsKey(tableViewBucket)) { + Pair filePair = filePathMap.get(tableViewBucket); + filePath = filePair.first; + fileSize = filePair.second; + + needLoad = true; + hasLoadFiles = true; + } + + TabletLoadInfo tabletLoadInfo = new TabletLoadInfo(filePath, fileSize); + idToTabletLoadInfo.put(tablet.getId(), tabletLoadInfo); + } + } + + // partition might have no load data + partitionEntry.getValue().setNeedLoad(needLoad); + } finally { + db.readUnlock(); + } + } + } + + // all partitions might have no load data + if (!hasLoadFiles) { + throw new LoadException("all partitions have no load data"); + } + + return idToTabletLoadInfo; + } + + protected boolean checkDataQuality(double maxFilterRatio) { + Map counters = job.getEtlJobStatus().getCounters(); + if (!counters.containsKey(DPP_NORMAL_ALL) || !counters.containsKey(DPP_ABNORMAL_ALL)) { + return true; + } + + long normalNum = Long.parseLong(counters.get(DPP_NORMAL_ALL)); + long abnormalNum = Long.parseLong(counters.get(DPP_ABNORMAL_ALL)); + if (abnormalNum > (abnormalNum + normalNum) * maxFilterRatio) { + return false; + } + + return true; + } + + protected abstract boolean updateJobEtlStatus(); + protected abstract void processEtlRunning() throws LoadException; + protected abstract Map> getFilePathMap() throws LoadException; +} diff --git a/fe/src/com/baidu/palo/task/LoadPendingTask.java b/fe/src/com/baidu/palo/task/LoadPendingTask.java index d9e9871d30..6d979c0493 100644 --- a/fe/src/com/baidu/palo/task/LoadPendingTask.java +++ b/fe/src/com/baidu/palo/task/LoadPendingTask.java @@ -13,94 +13,96 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.task; - -import com.baidu.palo.catalog.Catalog; -import com.baidu.palo.catalog.Database; -import com.baidu.palo.load.EtlSubmitResult; -import com.baidu.palo.load.FailMsg.CancelType; -import com.baidu.palo.load.Load; -import com.baidu.palo.load.LoadChecker; -import com.baidu.palo.load.LoadJob; -import com.baidu.palo.load.LoadJob.JobState; -import com.baidu.palo.thrift.TStatusCode; - -import org.apache.commons.lang.StringUtils; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; - -import java.util.List; - -public abstract class LoadPendingTask extends MasterTask { - private static final Logger LOG = LogManager.getLogger(LoadPendingTask.class); - private static final int RETRY_NUM = 5; - - protected final LoadJob job; - protected final Load load; - protected Database db; - - public LoadPendingTask(LoadJob job) { - this.job = job; - this.signature = job.getId(); - this.load = Catalog.getInstance().getLoadInstance(); - } - - @Override - protected void exec() { - // check job state - if (job.getState() != JobState.PENDING) { - return; - } - - // check timeout - if (LoadChecker.checkTimeout(job)) { - load.cancelLoadJob(job, CancelType.TIMEOUT, "pending timeout to cancel"); - return; - } - - // get db - long dbId = job.getDbId(); - db = Catalog.getInstance().getDb(dbId); - if (db == null) { - load.cancelLoadJob(job, CancelType.ETL_SUBMIT_FAIL, "db does not exist. id: " + dbId); - return; - } - - // create etl request - try { - createEtlRequest(); - } catch (Exception e) { - LOG.info("create etl request failed.{}", e); - load.cancelLoadJob(job, CancelType.ETL_SUBMIT_FAIL, "create job request fail. " + e.getMessage()); - return; - } - - // submit etl job and retry 5 times if error - EtlSubmitResult result = null; - int retry = 0; - while (retry < RETRY_NUM) { - result = submitEtlJob(retry); - if (result != null) { - if (result.getStatus().getStatus_code() == TStatusCode.OK) { - if (load.updateLoadJobState(job, JobState.ETL)) { - LOG.info("submit etl job success. job: {}", job); - return; - } - } - } - ++retry; - } - - String failMsg = "submit etl job fail"; - if (result != null) { - List failMsgs = result.getStatus().getError_msgs(); - failMsg = StringUtils.join(failMsgs, ";"); - } - load.cancelLoadJob(job, CancelType.ETL_SUBMIT_FAIL, failMsg); - LOG.warn("submit etl job fail. job: {}", job); - } - - protected abstract void createEtlRequest() throws Exception; - - protected abstract EtlSubmitResult submitEtlJob(int retry); -} +package com.baidu.palo.task; + +import com.baidu.palo.catalog.Catalog; +import com.baidu.palo.catalog.Database; +import com.baidu.palo.load.EtlSubmitResult; +import com.baidu.palo.load.FailMsg.CancelType; +import com.baidu.palo.load.Load; +import com.baidu.palo.load.LoadChecker; +import com.baidu.palo.load.LoadJob; +import com.baidu.palo.load.LoadJob.JobState; +import com.baidu.palo.thrift.TStatusCode; + +import com.google.common.base.Joiner; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + +import java.util.List; + +public abstract class LoadPendingTask extends MasterTask { + private static final Logger LOG = LogManager.getLogger(LoadPendingTask.class); + private static final int RETRY_NUM = 5; + + protected final LoadJob job; + protected final Load load; + protected Database db; + + public LoadPendingTask(LoadJob job) { + this.job = job; + this.signature = job.getId(); + this.load = Catalog.getInstance().getLoadInstance(); + } + + @Override + protected void exec() { + // check job state + if (job.getState() != JobState.PENDING) { + return; + } + + // check timeout + if (LoadChecker.checkTimeout(job)) { + load.cancelLoadJob(job, CancelType.TIMEOUT, "pending timeout to cancel"); + return; + } + + // get db + long dbId = job.getDbId(); + db = Catalog.getInstance().getDb(dbId); + if (db == null) { + load.cancelLoadJob(job, CancelType.ETL_SUBMIT_FAIL, "db does not exist. id: " + dbId); + return; + } + + // create etl request + try { + createEtlRequest(); + } catch (Exception e) { + LOG.info("create etl request failed.{}", e); + load.cancelLoadJob(job, CancelType.ETL_SUBMIT_FAIL, "create job request fail. " + e.getMessage()); + return; + } + + // submit etl job and retry 5 times if error + EtlSubmitResult result = null; + int retry = 0; + while (retry < RETRY_NUM) { + result = submitEtlJob(retry); + if (result != null) { + if (result.getStatus().getStatus_code() == TStatusCode.OK) { + if (load.updateLoadJobState(job, JobState.ETL)) { + LOG.info("submit etl job success. job: {}", job); + return; + } + } + } + ++retry; + } + + String failMsg = "submit etl job fail"; + if (result != null) { + List failMsgs = result.getStatus().getError_msgs(); + failMsg = Joiner.on(";").join(failMsgs); + } + + load.cancelLoadJob(job, CancelType.ETL_SUBMIT_FAIL, failMsg); + LOG.warn("submit etl job fail. job: {}", job); + } + + protected abstract void createEtlRequest() throws Exception; + + protected abstract EtlSubmitResult submitEtlJob(int retry); +} diff --git a/fe/src/com/baidu/palo/task/MasterTask.java b/fe/src/com/baidu/palo/task/MasterTask.java index 38e33d6b6d..a831045d45 100644 --- a/fe/src/com/baidu/palo/task/MasterTask.java +++ b/fe/src/com/baidu/palo/task/MasterTask.java @@ -13,32 +13,32 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.task; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; - -public abstract class MasterTask implements Runnable { - private static final Logger LOG = LogManager.getLogger(MasterTask.class); - - protected long signature; - - @Override - public void run() { - try { - exec(); - } catch (Exception e) { - LOG.error("task exec error ", e); - } - } - - public long getSignature() { - return signature; - } - - /** - * implement in child - */ - protected abstract void exec(); - -} +package com.baidu.palo.task; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + +public abstract class MasterTask implements Runnable { + private static final Logger LOG = LogManager.getLogger(MasterTask.class); + + protected long signature; + + @Override + public void run() { + try { + exec(); + } catch (Exception e) { + LOG.error("task exec error ", e); + } + } + + public long getSignature() { + return signature; + } + + /** + * implement in child + */ + protected abstract void exec(); + +} diff --git a/fe/src/com/baidu/palo/task/MasterTaskExecutor.java b/fe/src/com/baidu/palo/task/MasterTaskExecutor.java index 51155cce78..f439c7d6c0 100644 --- a/fe/src/com/baidu/palo/task/MasterTaskExecutor.java +++ b/fe/src/com/baidu/palo/task/MasterTaskExecutor.java @@ -13,82 +13,82 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.task; - -import com.google.common.collect.Maps; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - -import java.util.Iterator; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Timer; -import java.util.TimerTask; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; - -public class MasterTaskExecutor { - private static final Logger LOG = LogManager.getLogger(MasterTaskExecutor.class); - - private ExecutorService executor; - private Map> runningTasks; - private Timer checkTimer; - - public MasterTaskExecutor(int threadNum) { - executor = Executors.newFixedThreadPool(threadNum); - runningTasks = Maps.newHashMap(); - checkTimer = new Timer("Master Task Check Timer", true); - checkTimer.scheduleAtFixedRate(new TaskChecker(), 0L, 1000L); - } - - /** - * submit task to task executor - * @param task - * @return true if submit success - * false if task exists - */ - public boolean submit(MasterTask task) { - long signature = task.getSignature(); - synchronized (runningTasks) { - if (runningTasks.containsKey(signature)) { - return false; - } - Future future = executor.submit(task); - runningTasks.put(signature, future); - return true; - } - } - - public void close() { - executor.shutdown(); - runningTasks.clear(); - } - - public int getTaskNum() { - synchronized (runningTasks) { - return runningTasks.size(); - } - } - - private class TaskChecker extends TimerTask { - @Override - public void run() { - try { - synchronized (runningTasks) { - Iterator>> iterator = runningTasks.entrySet().iterator(); - while (iterator.hasNext()) { - Entry> entry = iterator.next(); - Future future = entry.getValue(); - if (future.isDone()) { - iterator.remove(); - } - } - } - } catch (Exception e) { - LOG.error("check task error", e); - } - } - } -} +package com.baidu.palo.task; + +import com.google.common.collect.Maps; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.util.Iterator; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Timer; +import java.util.TimerTask; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; + +public class MasterTaskExecutor { + private static final Logger LOG = LogManager.getLogger(MasterTaskExecutor.class); + + private ExecutorService executor; + private Map> runningTasks; + private Timer checkTimer; + + public MasterTaskExecutor(int threadNum) { + executor = Executors.newFixedThreadPool(threadNum); + runningTasks = Maps.newHashMap(); + checkTimer = new Timer("Master Task Check Timer", true); + checkTimer.scheduleAtFixedRate(new TaskChecker(), 0L, 1000L); + } + + /** + * submit task to task executor + * @param task + * @return true if submit success + * false if task exists + */ + public boolean submit(MasterTask task) { + long signature = task.getSignature(); + synchronized (runningTasks) { + if (runningTasks.containsKey(signature)) { + return false; + } + Future future = executor.submit(task); + runningTasks.put(signature, future); + return true; + } + } + + public void close() { + executor.shutdown(); + runningTasks.clear(); + } + + public int getTaskNum() { + synchronized (runningTasks) { + return runningTasks.size(); + } + } + + private class TaskChecker extends TimerTask { + @Override + public void run() { + try { + synchronized (runningTasks) { + Iterator>> iterator = runningTasks.entrySet().iterator(); + while (iterator.hasNext()) { + Entry> entry = iterator.next(); + Future future = entry.getValue(); + if (future.isDone()) { + iterator.remove(); + } + } + } + } catch (Exception e) { + LOG.error("check task error", e); + } + } + } +} diff --git a/fe/src/com/baidu/palo/task/PushTask.java b/fe/src/com/baidu/palo/task/PushTask.java index 0dfad36468..0ca078c223 100644 --- a/fe/src/com/baidu/palo/task/PushTask.java +++ b/fe/src/com/baidu/palo/task/PushTask.java @@ -13,8 +13,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.task; - +package com.baidu.palo.task; + import com.baidu.palo.analysis.BinaryPredicate; import com.baidu.palo.analysis.BinaryPredicate.Operator; import com.baidu.palo.analysis.IsNullPredicate; @@ -33,158 +33,158 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import java.util.ArrayList; -import java.util.List; - -public class PushTask extends AgentTask { - private static final Logger LOG = LogManager.getLogger(CreateReplicaTask.class); - - private long replicaId; - private int schemaHash; - private long version; - private long versionHash; - private String filePath; - private long fileSize; - private int timeoutSecond; - private long loadJobId; - private TPushType pushType; - private List conditions; - // for synchronous delete - private MarkedCountDownLatch latch; - - // lzop decompress or not - private boolean needDecompress; - - private TPriority priority; - private boolean isSyncDelete; - private long asyncDeleteJobId; - - public PushTask(TResourceInfo resourceInfo, long backendId, long dbId, long tableId, long partitionId, - long indexId, long tabletId, long replicaId, int schemaHash, long version, long versionHash, - String filePath, long fileSize, int timeoutSecond, long loadJobId, TPushType pushType, - List conditions, boolean needDecompress, TPriority priority) { - super(resourceInfo, backendId, tabletId, TTaskType.PUSH, dbId, tableId, partitionId, indexId, tabletId); - this.replicaId = replicaId; - this.schemaHash = schemaHash; - this.version = version; - this.versionHash = versionHash; - this.filePath = filePath; - this.fileSize = fileSize; - this.timeoutSecond = timeoutSecond; - this.loadJobId = loadJobId; - this.pushType = pushType; - this.conditions = conditions; - this.latch = null; - this.needDecompress = needDecompress; - this.priority = priority; - this.isSyncDelete = true; - this.asyncDeleteJobId = -1; - } - - public TPushReq toThrift() { - TPushReq request = new TPushReq(tabletId, schemaHash, version, versionHash, timeoutSecond, pushType); - switch (pushType) { - case LOAD: - case LOAD_DELETE: - request.setHttp_file_path(filePath); - if (fileSize != -1) { - request.setHttp_file_size(fileSize); - } - request.setNeed_decompress(needDecompress); - break; - case DELETE: - List tConditions = new ArrayList(); - for (Predicate condition : conditions) { - TCondition tCondition = new TCondition(); - ArrayList conditionValues = new ArrayList(); - if (condition instanceof BinaryPredicate) { - BinaryPredicate binaryPredicate = (BinaryPredicate) condition; - String columnName = ((SlotRef) binaryPredicate.getChild(0)).getColumnName(); - String value = ((LiteralExpr) binaryPredicate.getChild(1)).getStringValue(); - Operator op = binaryPredicate.getOp(); - tCondition.setColumn_name(columnName); - tCondition.setCondition_op(op.toString()); - conditionValues.add(value); - } else if (condition instanceof IsNullPredicate) { - IsNullPredicate isNullPredicate = (IsNullPredicate) condition; - String columnName = ((SlotRef) isNullPredicate.getChild(0)).getColumnName(); - String op = "IS"; - String value = "NULL"; - if (isNullPredicate.isNotNull()) { - value = "NOT NULL"; - } - tCondition.setColumn_name(columnName); - tCondition.setCondition_op(op); - conditionValues.add(value); - } - - tCondition.setCondition_values(conditionValues); - - tConditions.add(tCondition); - } - request.setDelete_conditions(tConditions); - break; - default: - LOG.warn("unknown push type. type: " + pushType.name()); - break; - } - - return request; - } - - public void setCountDownLatch(MarkedCountDownLatch latch) { - this.latch = latch; - } - - public void countDownLatch(long backendId, long tabletId) { - if (this.latch != null) { - if (latch.markedCountDown(backendId, tabletId)) { - LOG.info("pushTask current latch count: {}. backend: {}, tablet:{}", - latch.getCount(), backendId, tabletId); - } - } - } - - public long getReplicaId() { - return replicaId; - } - - public int getSchemaHash() { - return schemaHash; - } - - public long getVersion() { - return version; - } - - public long getVersionHash() { - return versionHash; - } - - public long getLoadJobId() { - return loadJobId; - } - - public TPushType getPushType() { - return pushType; - } - - public TPriority getPriority() { - return priority; - } - - public void setIsSyncDelete(boolean isSyncDelete) { - this.isSyncDelete = isSyncDelete; - } - - public boolean isSyncDelete() { - return isSyncDelete; - } - - public void setAsyncDeleteJobId(long jobId) { - this.asyncDeleteJobId = jobId; - } - - public long getAsyncDeleteJobId() { - return asyncDeleteJobId; - } -} +import java.util.List; + +public class PushTask extends AgentTask { + private static final Logger LOG = LogManager.getLogger(CreateReplicaTask.class); + + private long replicaId; + private int schemaHash; + private long version; + private long versionHash; + private String filePath; + private long fileSize; + private int timeoutSecond; + private long loadJobId; + private TPushType pushType; + private List conditions; + // for synchronous delete + private MarkedCountDownLatch latch; + + // lzop decompress or not + private boolean needDecompress; + + private TPriority priority; + private boolean isSyncDelete; + private long asyncDeleteJobId; + + public PushTask(TResourceInfo resourceInfo, long backendId, long dbId, long tableId, long partitionId, + long indexId, long tabletId, long replicaId, int schemaHash, long version, long versionHash, + String filePath, long fileSize, int timeoutSecond, long loadJobId, TPushType pushType, + List conditions, boolean needDecompress, TPriority priority) { + super(resourceInfo, backendId, tabletId, TTaskType.PUSH, dbId, tableId, partitionId, indexId, tabletId); + this.replicaId = replicaId; + this.schemaHash = schemaHash; + this.version = version; + this.versionHash = versionHash; + this.filePath = filePath; + this.fileSize = fileSize; + this.timeoutSecond = timeoutSecond; + this.loadJobId = loadJobId; + this.pushType = pushType; + this.conditions = conditions; + this.latch = null; + this.needDecompress = needDecompress; + this.priority = priority; + this.isSyncDelete = true; + this.asyncDeleteJobId = -1; + } + + public TPushReq toThrift() { + TPushReq request = new TPushReq(tabletId, schemaHash, version, versionHash, timeoutSecond, pushType); + switch (pushType) { + case LOAD: + case LOAD_DELETE: + request.setHttp_file_path(filePath); + if (fileSize != -1) { + request.setHttp_file_size(fileSize); + } + request.setNeed_decompress(needDecompress); + break; + case DELETE: + List tConditions = new ArrayList(); + for (Predicate condition : conditions) { + TCondition tCondition = new TCondition(); + ArrayList conditionValues = new ArrayList(); + if (condition instanceof BinaryPredicate) { + BinaryPredicate binaryPredicate = (BinaryPredicate) condition; + String columnName = ((SlotRef) binaryPredicate.getChild(0)).getColumnName(); + String value = ((LiteralExpr) binaryPredicate.getChild(1)).getStringValue(); + Operator op = binaryPredicate.getOp(); + tCondition.setColumn_name(columnName); + tCondition.setCondition_op(op.toString()); + conditionValues.add(value); + } else if (condition instanceof IsNullPredicate) { + IsNullPredicate isNullPredicate = (IsNullPredicate) condition; + String columnName = ((SlotRef) isNullPredicate.getChild(0)).getColumnName(); + String op = "IS"; + String value = "NULL"; + if (isNullPredicate.isNotNull()) { + value = "NOT NULL"; + } + tCondition.setColumn_name(columnName); + tCondition.setCondition_op(op); + conditionValues.add(value); + } + + tCondition.setCondition_values(conditionValues); + + tConditions.add(tCondition); + } + request.setDelete_conditions(tConditions); + break; + default: + LOG.warn("unknown push type. type: " + pushType.name()); + break; + } + + return request; + } + + public void setCountDownLatch(MarkedCountDownLatch latch) { + this.latch = latch; + } + + public void countDownLatch(long backendId, long tabletId) { + if (this.latch != null) { + if (latch.markedCountDown(backendId, tabletId)) { + LOG.info("pushTask current latch count: {}. backend: {}, tablet:{}", + latch.getCount(), backendId, tabletId); + } + } + } + + public long getReplicaId() { + return replicaId; + } + + public int getSchemaHash() { + return schemaHash; + } + + public long getVersion() { + return version; + } + + public long getVersionHash() { + return versionHash; + } + + public long getLoadJobId() { + return loadJobId; + } + + public TPushType getPushType() { + return pushType; + } + + public TPriority getPriority() { + return priority; + } + + public void setIsSyncDelete(boolean isSyncDelete) { + this.isSyncDelete = isSyncDelete; + } + + public boolean isSyncDelete() { + return isSyncDelete; + } + + public void setAsyncDeleteJobId(long jobId) { + this.asyncDeleteJobId = jobId; + } + + public long getAsyncDeleteJobId() { + return asyncDeleteJobId; + } +} diff --git a/fe/src/com/baidu/palo/task/SchemaChangeTask.java b/fe/src/com/baidu/palo/task/SchemaChangeTask.java index 23be4708fd..fb1bed3519 100644 --- a/fe/src/com/baidu/palo/task/SchemaChangeTask.java +++ b/fe/src/com/baidu/palo/task/SchemaChangeTask.java @@ -13,8 +13,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.task; - +package com.baidu.palo.task; + import com.baidu.palo.catalog.Column; import com.baidu.palo.thrift.TAlterTabletReq; import com.baidu.palo.thrift.TColumn; @@ -27,105 +27,105 @@ import com.baidu.palo.thrift.TTaskType; import java.util.ArrayList; import java.util.List; -import java.util.Set; - -public class SchemaChangeTask extends AgentTask { - - private long baseReplicaId; - private int baseSchemaHash; - private TStorageType storageType; - private TKeysType keysType; - - private int newSchemaHash; - private short newShortKeyColumnCount; - private List newColumns; - - // bloom filter columns - private Set bfColumns; - private double bfFpp; - - public SchemaChangeTask(TResourceInfo resourceInfo, long backendId, long dbId, long tableId, - long partitionId, long indexId, long baseTabletId, long baseReplicaId, - List newColumns, int newSchemaHash, int baseSchemaHash, - short newShortKeyColumnCount, TStorageType storageType, - Set bfColumns, double bfFpp, TKeysType keysType) { +import java.util.Set; + +public class SchemaChangeTask extends AgentTask { + + private long baseReplicaId; + private int baseSchemaHash; + private TStorageType storageType; + private TKeysType keysType; + + private int newSchemaHash; + private short newShortKeyColumnCount; + private List newColumns; + + // bloom filter columns + private Set bfColumns; + private double bfFpp; + + public SchemaChangeTask(TResourceInfo resourceInfo, long backendId, long dbId, long tableId, + long partitionId, long indexId, long baseTabletId, long baseReplicaId, + List newColumns, int newSchemaHash, int baseSchemaHash, + short newShortKeyColumnCount, TStorageType storageType, + Set bfColumns, double bfFpp, TKeysType keysType) { super(resourceInfo, backendId, baseTabletId, TTaskType.SCHEMA_CHANGE, dbId, tableId, partitionId, indexId, - baseTabletId); - - this.baseReplicaId = baseReplicaId; - this.baseSchemaHash = baseSchemaHash; - this.storageType = storageType; - this.keysType = keysType; - - this.newSchemaHash = newSchemaHash; - this.newShortKeyColumnCount = newShortKeyColumnCount; - this.newColumns = newColumns; - - this.bfColumns = bfColumns; - this.bfFpp = bfFpp; - } - - public TAlterTabletReq toThrift() { - TAlterTabletReq tAlterTabletReq = new TAlterTabletReq(); - - tAlterTabletReq.setBase_tablet_id(tabletId); - tAlterTabletReq.setBase_schema_hash(baseSchemaHash); - - // make 1 TCreateTableReq - TCreateTabletReq createTabletReq = new TCreateTabletReq(); - createTabletReq.setTablet_id(tabletId); - - // no need to set version - // schema - TTabletSchema tSchema = new TTabletSchema(); - tSchema.setShort_key_column_count(newShortKeyColumnCount); - tSchema.setSchema_hash(newSchemaHash); - tSchema.setStorage_type(storageType); - tSchema.setKeys_type(keysType); - - List tColumns = new ArrayList(); - for (Column column : newColumns) { - TColumn tColumn = column.toThrift(); - // is bloom filter column - if (bfColumns != null && bfColumns.contains(column.getName())) { - tColumn.setIs_bloom_filter_column(true); - } - tColumns.add(tColumn); - } - tSchema.setColumns(tColumns); - - if (bfColumns != null) { - tSchema.setBloom_filter_fpp(bfFpp); - } - createTabletReq.setTablet_schema(tSchema); - - tAlterTabletReq.setNew_tablet_req(createTabletReq); - - return tAlterTabletReq; - } - - public long getReplicaId() { - return baseReplicaId; - } - - public int getSchemaHash() { - return newSchemaHash; - } - - public int getBaseSchemaHash() { - return baseSchemaHash; - } - - public short getNewShortKeyColumnCount() { - return newShortKeyColumnCount; - } - - public TStorageType getStorageType() { - return storageType; - } - - public List getColumns() { - return newColumns; - } - -} + baseTabletId); + + this.baseReplicaId = baseReplicaId; + this.baseSchemaHash = baseSchemaHash; + this.storageType = storageType; + this.keysType = keysType; + + this.newSchemaHash = newSchemaHash; + this.newShortKeyColumnCount = newShortKeyColumnCount; + this.newColumns = newColumns; + + this.bfColumns = bfColumns; + this.bfFpp = bfFpp; + } + + public TAlterTabletReq toThrift() { + TAlterTabletReq tAlterTabletReq = new TAlterTabletReq(); + + tAlterTabletReq.setBase_tablet_id(tabletId); + tAlterTabletReq.setBase_schema_hash(baseSchemaHash); + + // make 1 TCreateTableReq + TCreateTabletReq createTabletReq = new TCreateTabletReq(); + createTabletReq.setTablet_id(tabletId); + + // no need to set version + // schema + TTabletSchema tSchema = new TTabletSchema(); + tSchema.setShort_key_column_count(newShortKeyColumnCount); + tSchema.setSchema_hash(newSchemaHash); + tSchema.setStorage_type(storageType); + tSchema.setKeys_type(keysType); + + List tColumns = new ArrayList(); + for (Column column : newColumns) { + TColumn tColumn = column.toThrift(); + // is bloom filter column + if (bfColumns != null && bfColumns.contains(column.getName())) { + tColumn.setIs_bloom_filter_column(true); + } + tColumns.add(tColumn); + } + tSchema.setColumns(tColumns); + + if (bfColumns != null) { + tSchema.setBloom_filter_fpp(bfFpp); + } + createTabletReq.setTablet_schema(tSchema); + + tAlterTabletReq.setNew_tablet_req(createTabletReq); + + return tAlterTabletReq; + } + + public long getReplicaId() { + return baseReplicaId; + } + + public int getSchemaHash() { + return newSchemaHash; + } + + public int getBaseSchemaHash() { + return baseSchemaHash; + } + + public short getNewShortKeyColumnCount() { + return newShortKeyColumnCount; + } + + public TStorageType getStorageType() { + return storageType; + } + + public List getColumns() { + return newColumns; + } + +} diff --git a/fe/test/com/baidu/palo/analysis/BackendStmtTest.java b/fe/test/com/baidu/palo/analysis/BackendStmtTest.java index 07fb82eb70..b67fb63cb2 100644 --- a/fe/test/com/baidu/palo/analysis/BackendStmtTest.java +++ b/fe/test/com/baidu/palo/analysis/BackendStmtTest.java @@ -18,83 +18,83 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - +package com.baidu.palo.analysis; + import com.baidu.palo.common.AnalysisException; import com.google.common.collect.Lists; import org.junit.Assert; import org.junit.BeforeClass; -import org.junit.Test; - -public class BackendStmtTest { - - private static Analyzer analyzer; - - @BeforeClass - public static void setUp() throws Exception { - analyzer = AccessTestUtil.fetchAdminAnalyzer(false); - } - - public BackendClause createStmt(int type) { - BackendClause stmt = null; - switch (type) { - case 1: - // missing ip - stmt = new AddBackendClause(Lists.newArrayList(":12346")); - break; - case 2: - // invalid ip - stmt = new AddBackendClause(Lists.newArrayList("asdasd:12345")); - break; - case 3: - // invalid port - stmt = new AddBackendClause(Lists.newArrayList("10.1.2.3:123467")); - break; - case 4: - // normal add - stmt = new AddBackendClause(Lists.newArrayList("192.168.1.1:12345")); - break; - case 5: - // normal remove - stmt = new DropBackendClause(Lists.newArrayList("192.168.1.2:12345")); - break; - default: - break; - } - return stmt; - } - - @Test(expected = AnalysisException.class) - public void initBackendsTest1() throws Exception { - BackendClause stmt = createStmt(1); - stmt.analyze(analyzer); - } - - @Test(expected = AnalysisException.class) - public void initBackendsTest2() throws Exception { - BackendClause stmt = createStmt(2); - stmt.analyze(analyzer); - } - - @Test(expected = AnalysisException.class) - public void initBackendsTest3() throws Exception { - BackendClause stmt = createStmt(3); - stmt.analyze(analyzer); - } - - @Test - public void initBackendsTest4() throws Exception { - BackendClause stmt = createStmt(4); - stmt.analyze(analyzer); - Assert.assertEquals("ADD FREE BACKEND \"192.168.1.1:12345\"", stmt.toSql()); - } - - @Test - public void initBackendsTest5() throws Exception { - BackendClause stmt = createStmt(5); - stmt.analyze(analyzer); - Assert.assertEquals("DROP BACKEND \"192.168.1.2:12345\"", stmt.toSql()); - } -} +import org.junit.Test; + +public class BackendStmtTest { + + private static Analyzer analyzer; + + @BeforeClass + public static void setUp() throws Exception { + analyzer = AccessTestUtil.fetchAdminAnalyzer(false); + } + + public BackendClause createStmt(int type) { + BackendClause stmt = null; + switch (type) { + case 1: + // missing ip + stmt = new AddBackendClause(Lists.newArrayList(":12346")); + break; + case 2: + // invalid ip + stmt = new AddBackendClause(Lists.newArrayList("asdasd:12345")); + break; + case 3: + // invalid port + stmt = new AddBackendClause(Lists.newArrayList("10.1.2.3:123467")); + break; + case 4: + // normal add + stmt = new AddBackendClause(Lists.newArrayList("192.168.1.1:12345")); + break; + case 5: + // normal remove + stmt = new DropBackendClause(Lists.newArrayList("192.168.1.2:12345")); + break; + default: + break; + } + return stmt; + } + + @Test(expected = AnalysisException.class) + public void initBackendsTest1() throws Exception { + BackendClause stmt = createStmt(1); + stmt.analyze(analyzer); + } + + @Test(expected = AnalysisException.class) + public void initBackendsTest2() throws Exception { + BackendClause stmt = createStmt(2); + stmt.analyze(analyzer); + } + + @Test(expected = AnalysisException.class) + public void initBackendsTest3() throws Exception { + BackendClause stmt = createStmt(3); + stmt.analyze(analyzer); + } + + @Test + public void initBackendsTest4() throws Exception { + BackendClause stmt = createStmt(4); + stmt.analyze(analyzer); + Assert.assertEquals("ADD FREE BACKEND \"192.168.1.1:12345\"", stmt.toSql()); + } + + @Test + public void initBackendsTest5() throws Exception { + BackendClause stmt = createStmt(5); + stmt.analyze(analyzer); + Assert.assertEquals("DROP BACKEND \"192.168.1.2:12345\"", stmt.toSql()); + } +} diff --git a/fe/test/com/baidu/palo/analysis/CancelAlterStmtTest.java b/fe/test/com/baidu/palo/analysis/CancelAlterStmtTest.java index 3ff288b545..7643e46dc5 100644 --- a/fe/test/com/baidu/palo/analysis/CancelAlterStmtTest.java +++ b/fe/test/com/baidu/palo/analysis/CancelAlterStmtTest.java @@ -18,8 +18,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - +package com.baidu.palo.analysis; + import com.baidu.palo.analysis.ShowAlterStmt.AlterType; import com.baidu.palo.catalog.Catalog; import com.baidu.palo.common.AnalysisException; @@ -35,60 +35,60 @@ import org.junit.runner.RunWith; import org.powermock.api.easymock.PowerMock; import org.powermock.core.classloader.annotations.PowerMockIgnore; import org.powermock.core.classloader.annotations.PrepareForTest; -import org.powermock.modules.junit4.PowerMockRunner; - -@RunWith(PowerMockRunner.class) -@PowerMockIgnore("org.apache.log4j.*") +import org.powermock.modules.junit4.PowerMockRunner; + +@RunWith(PowerMockRunner.class) +@PowerMockIgnore("org.apache.log4j.*") @PrepareForTest({ Catalog.class, ConnectContext.class }) -public class CancelAlterStmtTest { - - private Analyzer analyzer; +public class CancelAlterStmtTest { + + private Analyzer analyzer; private Catalog catalog; private ConnectContext ctx; private PaloAuth auth; - - @Before + + @Before public void setUp() { auth = new PaloAuth(); ctx = new ConnectContext(null); ctx.setQualifiedUser("root"); ctx.setRemoteIP("192.168.1.1"); - - catalog = AccessTestUtil.fetchAdminCatalog(); - - PowerMock.mockStatic(Catalog.class); + + catalog = AccessTestUtil.fetchAdminCatalog(); + + PowerMock.mockStatic(Catalog.class); EasyMock.expect(Catalog.getInstance()).andReturn(catalog).anyTimes(); EasyMock.expect(Catalog.getCurrentCatalog()).andReturn(catalog).anyTimes(); PowerMock.replay(Catalog.class); PowerMock.mockStatic(ConnectContext.class); EasyMock.expect(ConnectContext.get()).andReturn(ctx).anyTimes(); - PowerMock.replay(ConnectContext.class); - - analyzer = EasyMock.createMock(Analyzer.class); - EasyMock.expect(analyzer.getDefaultDb()).andReturn("testDb").anyTimes(); - EasyMock.expect(analyzer.getQualifiedUser()).andReturn("testUser").anyTimes(); - EasyMock.expect(analyzer.getCatalog()).andReturn(catalog).anyTimes(); - EasyMock.replay(analyzer); - } - - @Test - public void testNormal() throws InternalException, AnalysisException { - // cancel alter column - CancelAlterTableStmt stmt = new CancelAlterTableStmt(AlterType.COLUMN, new TableName(null, "testTbl")); - stmt.analyze(analyzer); - Assert.assertEquals("CANCEL ALTER COLUMN FROM `testDb`.`testTbl`", stmt.toString()); - Assert.assertEquals("testDb", stmt.getDbName()); - Assert.assertEquals(AlterType.COLUMN, stmt.getAlterType()); - Assert.assertEquals("testTbl", stmt.getTableName()); - - stmt = new CancelAlterTableStmt(AlterType.ROLLUP, new TableName(null, "testTbl")); - stmt.analyze(analyzer); - Assert.assertEquals("CANCEL ALTER ROLLUP FROM `testDb`.`testTbl`", stmt.toString()); - Assert.assertEquals("testDb", stmt.getDbName()); - Assert.assertEquals(AlterType.ROLLUP, stmt.getAlterType()); - } -} + PowerMock.replay(ConnectContext.class); + + analyzer = EasyMock.createMock(Analyzer.class); + EasyMock.expect(analyzer.getDefaultDb()).andReturn("testDb").anyTimes(); + EasyMock.expect(analyzer.getQualifiedUser()).andReturn("testUser").anyTimes(); + EasyMock.expect(analyzer.getCatalog()).andReturn(catalog).anyTimes(); + EasyMock.replay(analyzer); + } + + @Test + public void testNormal() throws InternalException, AnalysisException { + // cancel alter column + CancelAlterTableStmt stmt = new CancelAlterTableStmt(AlterType.COLUMN, new TableName(null, "testTbl")); + stmt.analyze(analyzer); + Assert.assertEquals("CANCEL ALTER COLUMN FROM `testDb`.`testTbl`", stmt.toString()); + Assert.assertEquals("testDb", stmt.getDbName()); + Assert.assertEquals(AlterType.COLUMN, stmt.getAlterType()); + Assert.assertEquals("testTbl", stmt.getTableName()); + + stmt = new CancelAlterTableStmt(AlterType.ROLLUP, new TableName(null, "testTbl")); + stmt.analyze(analyzer); + Assert.assertEquals("CANCEL ALTER ROLLUP FROM `testDb`.`testTbl`", stmt.toString()); + Assert.assertEquals("testDb", stmt.getDbName()); + Assert.assertEquals(AlterType.ROLLUP, stmt.getAlterType()); + } +} diff --git a/fe/test/com/baidu/palo/analysis/DeleteStmtTest.java b/fe/test/com/baidu/palo/analysis/DeleteStmtTest.java index db911570c2..8643223c9e 100644 --- a/fe/test/com/baidu/palo/analysis/DeleteStmtTest.java +++ b/fe/test/com/baidu/palo/analysis/DeleteStmtTest.java @@ -18,8 +18,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - +package com.baidu.palo.analysis; + import com.baidu.palo.analysis.BinaryPredicate.Operator; import com.baidu.palo.common.AnalysisException; import com.baidu.palo.common.InternalException; @@ -33,9 +33,9 @@ import org.junit.Test; import mockit.Mocked; import mockit.internal.startup.Startup; - -public class DeleteStmtTest { - + +public class DeleteStmtTest { + Analyzer analyzer; @Mocked @@ -45,127 +45,127 @@ public class DeleteStmtTest { static { Startup.initializeIfPossible(); - } - - @Before - public void setUp() { + } + + @Before + public void setUp() { analyzer = AccessTestUtil.fetchAdminAnalyzer(false); MockedAuth.mockedAuth(auth); - MockedAuth.mockedConnectContext(ctx, "root", "192.168.1.1"); - } - - @Test - public void getMethodTest() { - BinaryPredicate wherePredicate = new BinaryPredicate(Operator.EQ, new SlotRef(null, "k1"), - new StringLiteral("abc")); - DeleteStmt deleteStmt = new DeleteStmt(new TableName("testDb", "testTbl"), "partition", wherePredicate, null); - - Assert.assertEquals("testDb", deleteStmt.getDbName()); - Assert.assertEquals("testTbl", deleteStmt.getTableName()); - Assert.assertEquals("partition", deleteStmt.getPartitionName()); - Assert.assertEquals("DELETE FROM `testDb`.`testTbl` PARTITION partition WHERE `k1` = 'abc'", - deleteStmt.toSql()); - } - - @Test - public void testAnalyze() { - // case 1 - LikePredicate likePredicate = new LikePredicate(com.baidu.palo.analysis.LikePredicate.Operator.LIKE, - new SlotRef(null, "k1"), - new StringLiteral("abc")); - DeleteStmt deleteStmt = new DeleteStmt(new TableName("testDb", "testTbl"), "partition", likePredicate, null); - try { - deleteStmt.analyze(analyzer); - } catch (AnalysisException | InternalException e) { - Assert.assertTrue(e.getMessage().contains("should be compound or binary predicate")); - } - - // case 2 - BinaryPredicate binaryPredicate = new BinaryPredicate(Operator.EQ, new SlotRef(null, "k1"), - new StringLiteral("abc")); - CompoundPredicate compoundPredicate = - new CompoundPredicate(com.baidu.palo.analysis.CompoundPredicate.Operator.OR, binaryPredicate, - binaryPredicate); - - deleteStmt = new DeleteStmt(new TableName("testDb", "testTbl"), "partition", compoundPredicate, null); - - try { - deleteStmt.analyze(analyzer); - } catch (AnalysisException | InternalException e) { - Assert.assertTrue(e.getMessage().contains("should be AND")); - } - - // case 3 - compoundPredicate = new CompoundPredicate(com.baidu.palo.analysis.CompoundPredicate.Operator.AND, - binaryPredicate, - likePredicate); - - deleteStmt = new DeleteStmt(new TableName("testDb", "testTbl"), "partition", compoundPredicate, null); - try { - deleteStmt.analyze(analyzer); - } catch (AnalysisException | InternalException e) { - Assert.assertTrue(e.getMessage().contains("should be compound or binary predicate")); - } - - // case 4 - binaryPredicate = new BinaryPredicate(Operator.EQ, new SlotRef(null, "k1"), - new SlotRef(null, "k1")); - compoundPredicate = new CompoundPredicate(com.baidu.palo.analysis.CompoundPredicate.Operator.AND, - binaryPredicate, - binaryPredicate); - - deleteStmt = new DeleteStmt(new TableName("testDb", "testTbl"), "partition", compoundPredicate, null); - try { - deleteStmt.analyze(analyzer); - } catch (AnalysisException | InternalException e) { - Assert.assertTrue(e.getMessage().contains("Right expr should be value")); - } - - // case 5 - binaryPredicate = new BinaryPredicate(Operator.EQ, new StringLiteral("abc"), - new SlotRef(null, "k1")); - compoundPredicate = new CompoundPredicate(com.baidu.palo.analysis.CompoundPredicate.Operator.AND, - binaryPredicate, - binaryPredicate); - - deleteStmt = new DeleteStmt(new TableName("testDb", "testTbl"), "partition", compoundPredicate, null); - try { - deleteStmt.analyze(analyzer); - } catch (AnalysisException | InternalException e) { - Assert.assertTrue(e.getMessage().contains("Left expr should be column name")); - } - - // case 6 partition is null - binaryPredicate = new BinaryPredicate(Operator.EQ, new StringLiteral("abc"), - new SlotRef(null, "k1")); - compoundPredicate = new CompoundPredicate(com.baidu.palo.analysis.CompoundPredicate.Operator.AND, - binaryPredicate, - binaryPredicate); - - deleteStmt = new DeleteStmt(new TableName("testDb", "testTbl"), null, compoundPredicate, null); - try { - deleteStmt.analyze(analyzer); - } catch (AnalysisException | InternalException e) { - Assert.assertTrue(e.getMessage().contains("Partition is not set")); - } - - // normal - binaryPredicate = new BinaryPredicate(Operator.EQ, new SlotRef(null, "k1"), - new StringLiteral("abc")); - CompoundPredicate compoundPredicate2 = - new CompoundPredicate(com.baidu.palo.analysis.CompoundPredicate.Operator.AND, - binaryPredicate, - binaryPredicate); - compoundPredicate = new CompoundPredicate(com.baidu.palo.analysis.CompoundPredicate.Operator.AND, - binaryPredicate, - compoundPredicate2); - - deleteStmt = new DeleteStmt(new TableName("testDb", "testTbl"), "partition", compoundPredicate, null); - try { - deleteStmt.analyze(analyzer); - } catch (AnalysisException | InternalException e) { - Assert.fail(); - } - } - -} + MockedAuth.mockedConnectContext(ctx, "root", "192.168.1.1"); + } + + @Test + public void getMethodTest() { + BinaryPredicate wherePredicate = new BinaryPredicate(Operator.EQ, new SlotRef(null, "k1"), + new StringLiteral("abc")); + DeleteStmt deleteStmt = new DeleteStmt(new TableName("testDb", "testTbl"), "partition", wherePredicate, null); + + Assert.assertEquals("testDb", deleteStmt.getDbName()); + Assert.assertEquals("testTbl", deleteStmt.getTableName()); + Assert.assertEquals("partition", deleteStmt.getPartitionName()); + Assert.assertEquals("DELETE FROM `testDb`.`testTbl` PARTITION partition WHERE `k1` = 'abc'", + deleteStmt.toSql()); + } + + @Test + public void testAnalyze() { + // case 1 + LikePredicate likePredicate = new LikePredicate(com.baidu.palo.analysis.LikePredicate.Operator.LIKE, + new SlotRef(null, "k1"), + new StringLiteral("abc")); + DeleteStmt deleteStmt = new DeleteStmt(new TableName("testDb", "testTbl"), "partition", likePredicate, null); + try { + deleteStmt.analyze(analyzer); + } catch (AnalysisException | InternalException e) { + Assert.assertTrue(e.getMessage().contains("should be compound or binary predicate")); + } + + // case 2 + BinaryPredicate binaryPredicate = new BinaryPredicate(Operator.EQ, new SlotRef(null, "k1"), + new StringLiteral("abc")); + CompoundPredicate compoundPredicate = + new CompoundPredicate(com.baidu.palo.analysis.CompoundPredicate.Operator.OR, binaryPredicate, + binaryPredicate); + + deleteStmt = new DeleteStmt(new TableName("testDb", "testTbl"), "partition", compoundPredicate, null); + + try { + deleteStmt.analyze(analyzer); + } catch (AnalysisException | InternalException e) { + Assert.assertTrue(e.getMessage().contains("should be AND")); + } + + // case 3 + compoundPredicate = new CompoundPredicate(com.baidu.palo.analysis.CompoundPredicate.Operator.AND, + binaryPredicate, + likePredicate); + + deleteStmt = new DeleteStmt(new TableName("testDb", "testTbl"), "partition", compoundPredicate, null); + try { + deleteStmt.analyze(analyzer); + } catch (AnalysisException | InternalException e) { + Assert.assertTrue(e.getMessage().contains("should be compound or binary predicate")); + } + + // case 4 + binaryPredicate = new BinaryPredicate(Operator.EQ, new SlotRef(null, "k1"), + new SlotRef(null, "k1")); + compoundPredicate = new CompoundPredicate(com.baidu.palo.analysis.CompoundPredicate.Operator.AND, + binaryPredicate, + binaryPredicate); + + deleteStmt = new DeleteStmt(new TableName("testDb", "testTbl"), "partition", compoundPredicate, null); + try { + deleteStmt.analyze(analyzer); + } catch (AnalysisException | InternalException e) { + Assert.assertTrue(e.getMessage().contains("Right expr should be value")); + } + + // case 5 + binaryPredicate = new BinaryPredicate(Operator.EQ, new StringLiteral("abc"), + new SlotRef(null, "k1")); + compoundPredicate = new CompoundPredicate(com.baidu.palo.analysis.CompoundPredicate.Operator.AND, + binaryPredicate, + binaryPredicate); + + deleteStmt = new DeleteStmt(new TableName("testDb", "testTbl"), "partition", compoundPredicate, null); + try { + deleteStmt.analyze(analyzer); + } catch (AnalysisException | InternalException e) { + Assert.assertTrue(e.getMessage().contains("Left expr should be column name")); + } + + // case 6 partition is null + binaryPredicate = new BinaryPredicate(Operator.EQ, new StringLiteral("abc"), + new SlotRef(null, "k1")); + compoundPredicate = new CompoundPredicate(com.baidu.palo.analysis.CompoundPredicate.Operator.AND, + binaryPredicate, + binaryPredicate); + + deleteStmt = new DeleteStmt(new TableName("testDb", "testTbl"), null, compoundPredicate, null); + try { + deleteStmt.analyze(analyzer); + } catch (AnalysisException | InternalException e) { + Assert.assertTrue(e.getMessage().contains("Partition is not set")); + } + + // normal + binaryPredicate = new BinaryPredicate(Operator.EQ, new SlotRef(null, "k1"), + new StringLiteral("abc")); + CompoundPredicate compoundPredicate2 = + new CompoundPredicate(com.baidu.palo.analysis.CompoundPredicate.Operator.AND, + binaryPredicate, + binaryPredicate); + compoundPredicate = new CompoundPredicate(com.baidu.palo.analysis.CompoundPredicate.Operator.AND, + binaryPredicate, + compoundPredicate2); + + deleteStmt = new DeleteStmt(new TableName("testDb", "testTbl"), "partition", compoundPredicate, null); + try { + deleteStmt.analyze(analyzer); + } catch (AnalysisException | InternalException e) { + Assert.fail(); + } + } + +} diff --git a/fe/test/com/baidu/palo/analysis/LiteralExprCompareTest.java b/fe/test/com/baidu/palo/analysis/LiteralExprCompareTest.java index 5b2b8b5e26..d9050a6c44 100644 --- a/fe/test/com/baidu/palo/analysis/LiteralExprCompareTest.java +++ b/fe/test/com/baidu/palo/analysis/LiteralExprCompareTest.java @@ -18,303 +18,303 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - -import com.baidu.palo.catalog.PrimitiveType; -import com.baidu.palo.catalog.ScalarType; -import com.baidu.palo.common.AnalysisException; - -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.util.TimeZone; - -public class LiteralExprCompareTest { - - @BeforeClass - public static void setUp() { - TimeZone tz = TimeZone.getTimeZone("ETC/GMT-0"); - TimeZone.setDefault(tz); - } - - @Test - public void boolTest() { - LiteralExpr boolTrue1 = new BoolLiteral(true); - LiteralExpr boolFalse1 = new BoolLiteral(false); - LiteralExpr boolTrue2 = new BoolLiteral(true); - - // value equal - Assert.assertTrue(boolTrue1.equals(boolTrue2)); - // self equal - Assert.assertTrue(boolTrue1.equals(boolTrue1)); - - // value compare - Assert.assertTrue(!boolTrue1.equals(boolFalse1) && 1 == boolTrue1.compareLiteral(boolFalse1)); - Assert.assertTrue(-1 == boolFalse1.compareLiteral(boolTrue1)); - // value equal - Assert.assertTrue(0 == boolTrue1.compareLiteral(boolTrue2)); - // self equal - Assert.assertTrue(0 == boolTrue1.compareLiteral(boolTrue1)); - } - - @Test(expected = AnalysisException.class) - public void dateFormat1Test() throws AnalysisException { - LiteralExpr date = new DateLiteral("2015-02-15 12:12:12", ScalarType.DATE); - Assert.fail(); - } - - @Test(expected = AnalysisException.class) - public void dateFormat2Test() throws AnalysisException { - LiteralExpr datetime = new DateLiteral("2015-02-15", ScalarType.DATETIME); - Assert.fail(); - } - - @Test - public void dateTest() throws AnalysisException { - LiteralExpr date1 = new DateLiteral("2015-02-15", ScalarType.DATE); - LiteralExpr date1Same = new DateLiteral("2015-02-15", ScalarType.DATE); - LiteralExpr date1Large = new DateLiteral("2015-02-16", ScalarType.DATE); - LiteralExpr datetime1 = new DateLiteral("2015-02-15 13:14:00", ScalarType.DATETIME); - LiteralExpr datetime1Same = new DateLiteral("2015-02-15 13:14:00", ScalarType.DATETIME); - LiteralExpr datetime1Large = new DateLiteral("2015-02-15 13:14:15", ScalarType.DATETIME); - - // infinity - LiteralExpr maxDate1 = new DateLiteral(ScalarType.DATE, true); - LiteralExpr maxDate1Same = new DateLiteral(ScalarType.DATE, true); - LiteralExpr minDate1 = new DateLiteral(ScalarType.DATE, false); - LiteralExpr minDate1Same = new DateLiteral(ScalarType.DATE, false); - LiteralExpr maxDatetime1 = new DateLiteral(ScalarType.DATETIME, true); - LiteralExpr maxDatetime1Same = new DateLiteral(ScalarType.DATETIME, true); - LiteralExpr minDatetime1 = new DateLiteral(ScalarType.DATETIME, false); - LiteralExpr minDatetime1Same = new DateLiteral(ScalarType.DATETIME, false); - LiteralExpr date8 = new DateLiteral("9999-12-31", ScalarType.DATE); - LiteralExpr date9 = new DateLiteral("9999-12-31 23:59:59", ScalarType.DATETIME); - LiteralExpr date10 = new DateLiteral("1900-01-01", ScalarType.DATE); - LiteralExpr date11 = new DateLiteral("1900-01-01 00:00:00", ScalarType.DATETIME); - - Assert.assertTrue(date1.equals(date1Same) && date1.compareLiteral(date1Same) == 0); - Assert.assertTrue(date1.equals(date1Same) && date1.compareLiteral(date1Same) == 0); - Assert.assertTrue(datetime1.equals(datetime1Same) && datetime1.compareLiteral(datetime1Same) == 0); - Assert.assertTrue(datetime1.equals(datetime1) && datetime1.compareLiteral(datetime1) == 0); - - // value compare - Assert.assertTrue(!date1Large.equals(date1Same) && 1 == date1Large.compareLiteral(date1Same)); - Assert.assertTrue(!datetime1Large.equals(datetime1Same) && 1 == datetime1Large.compareLiteral(datetime1Same)); - Assert.assertTrue(!datetime1Same.equals(datetime1Large) && -1 == datetime1Same.compareLiteral(datetime1Large)); - - // infinity - Assert.assertTrue(maxDate1.equals(maxDate1) && maxDate1.compareLiteral(maxDate1) == 0); - Assert.assertTrue(maxDate1.equals(maxDate1Same) && maxDate1.compareLiteral(maxDate1Same) == 0); - Assert.assertTrue(minDate1.equals(minDate1) && minDate1.compareLiteral(minDate1) == 0); - Assert.assertTrue(minDate1.equals(minDate1Same) && minDate1.compareLiteral(minDate1Same) == 0); - Assert.assertTrue(maxDatetime1.equals(maxDatetime1) && maxDatetime1.compareLiteral(maxDatetime1) == 0); - Assert.assertTrue(maxDatetime1.equals(maxDatetime1Same) && maxDatetime1.compareLiteral(maxDatetime1Same) == 0); - Assert.assertTrue(minDatetime1.equals(minDatetime1) && minDatetime1.compareLiteral(minDatetime1) == 0); - Assert.assertTrue(minDatetime1.equals(minDatetime1Same) && minDatetime1.compareLiteral(minDatetime1Same) == 0); - - Assert.assertTrue(maxDate1.equals(date8) && maxDate1.compareLiteral(date8) == 0); - Assert.assertTrue(minDate1.equals(date10) && minDate1.compareLiteral(date10) == 0); - Assert.assertTrue(maxDatetime1.equals(date9) && maxDatetime1.compareLiteral(date9) == 0); - Assert.assertTrue(minDatetime1.equals(date11) && minDatetime1.compareLiteral(date11) == 0); - - Assert.assertTrue(!maxDate1.equals(date1) && 1 == maxDate1.compareLiteral(date1)); - Assert.assertTrue(!minDate1.equals(date1) && -1 == minDate1.compareLiteral(date1)); - Assert.assertTrue(!maxDatetime1.equals(datetime1) && 1 == maxDatetime1.compareLiteral(datetime1)); - Assert.assertTrue(!minDatetime1.equals(datetime1) && -1 == minDatetime1.compareLiteral(datetime1)); - } - - @Test - public void decimalTest() throws AnalysisException { - LiteralExpr decimal1 = new DecimalLiteral("1.23456"); - LiteralExpr decimal2 = new DecimalLiteral("1.23456"); - LiteralExpr decimal3 = new DecimalLiteral("1.23457"); - LiteralExpr decimal4 = new DecimalLiteral("2.23457"); - - // value equal - Assert.assertTrue(decimal1.equals(decimal2)); - // self equal - Assert.assertTrue(decimal1.equals(decimal1)); - - // value compare - Assert.assertTrue(!decimal3.equals(decimal2) && 1 == decimal3.compareLiteral(decimal2)); - Assert.assertTrue(!decimal4.equals(decimal3) && 1 == decimal4.compareLiteral(decimal3)); - Assert.assertTrue(!decimal1.equals(decimal4) && -1 == decimal1.compareLiteral(decimal4)); - // value equal - Assert.assertTrue(0 == decimal1.compareLiteral(decimal2)); - // self equal - Assert.assertTrue(0 == decimal1.compareLiteral(decimal1)); - } - - public void floatAndDoubleExpr() { - LiteralExpr float1 = new FloatLiteral(1.12345, ScalarType.FLOAT); - LiteralExpr float2 = new FloatLiteral(1.12345, ScalarType.FLOAT); - LiteralExpr float3 = new FloatLiteral(1.12346, ScalarType.FLOAT); - LiteralExpr float4 = new FloatLiteral(2.12345, ScalarType.FLOAT); - - LiteralExpr double1 = new FloatLiteral(1.12345, ScalarType.DOUBLE); - LiteralExpr double2 = new FloatLiteral(1.12345, ScalarType.DOUBLE); - LiteralExpr double3 = new FloatLiteral(1.12346, ScalarType.DOUBLE); - LiteralExpr double4 = new FloatLiteral(2.12345, ScalarType.DOUBLE); - - // float - // value equal - Assert.assertTrue(float1.equals(float2)); - // self equal - Assert.assertTrue(float1.equals(float1)); - - // value compare - Assert.assertTrue(!float3.equals(float2) && 1 == float3.compareLiteral(float2)); - Assert.assertTrue(!float4.equals(float1) && 1 == float4.compareLiteral(float1)); - Assert.assertTrue(!float1.equals(float4) && -1 == float1.compareLiteral(float4)); - // value equal - Assert.assertTrue(0 == float1.compareLiteral(float2)); - // self equal - Assert.assertTrue(0 == float1.compareLiteral(float1)); - - // double - // value equal - Assert.assertTrue(double1.equals(double2)); - // self equal - Assert.assertTrue(double1.equals(double1)); - - // value compare - Assert.assertTrue(!double3.equals(double2) && 1 == double3.compareLiteral(double2)); - Assert.assertTrue(!double4.equals(double1) && 1 == double4.compareLiteral(double1)); - Assert.assertTrue(!double1.equals(double4) && -1 == double1.compareLiteral(double4)); - // value equal - Assert.assertTrue(0 == double1.compareLiteral(double2)); - // self equal - Assert.assertTrue(0 == double1.compareLiteral(double1)); - } - - private void intTestInternal(ScalarType type) throws AnalysisException { - String maxValue = ""; - String minValue = ""; - String normalValue = "100"; - - switch (type.getPrimitiveType()) { - case TINYINT: - maxValue = "127"; - minValue = "-128"; - break; - case SMALLINT: - maxValue = "32767"; - minValue = "-32768"; - break; - case INT: - maxValue = "2147483647"; - minValue = "-2147483648"; - break; - case BIGINT: - maxValue = "9223372036854775807"; - minValue = "-9223372036854775808"; - break; - default: - Assert.fail(); - } - - LiteralExpr tinyint1 = new IntLiteral(maxValue, type); - LiteralExpr tinyint2 = new IntLiteral(maxValue, type); - LiteralExpr tinyint3 = new IntLiteral(minValue, type); - LiteralExpr tinyint4 = new IntLiteral(normalValue, type); - - // infinity - LiteralExpr infinity1 = MaxLiteral.MAX_VALUE; - LiteralExpr infinity2 = MaxLiteral.MAX_VALUE; - LiteralExpr infinity3 = LiteralExpr.createInfinity(type, false); - LiteralExpr infinity4 = LiteralExpr.createInfinity(type, false); - - // value equal - Assert.assertTrue(tinyint1.equals(tinyint1)); - // self equal - Assert.assertTrue(tinyint1.equals(tinyint2)); - - // value compare - Assert.assertTrue(!tinyint1.equals(tinyint3) && 1 == tinyint1.compareLiteral(tinyint3)); - Assert.assertTrue(!tinyint2.equals(tinyint4) && 1 == tinyint2.compareLiteral(tinyint4)); - Assert.assertTrue(!tinyint3.equals(tinyint4) && -1 == tinyint3.compareLiteral(tinyint4)); - // value equal - Assert.assertTrue(0 == tinyint1.compareLiteral(tinyint1)); - // self equal - Assert.assertTrue(0 == tinyint1.compareLiteral(tinyint2)); - - // infinity - Assert.assertTrue(infinity1.equals(infinity1)); - Assert.assertTrue(infinity1.equals(infinity2)); - Assert.assertTrue(infinity3.equals(infinity3)); - Assert.assertTrue(infinity3.equals(infinity4)); - Assert.assertFalse(tinyint1.equals(infinity1)); - Assert.assertTrue(tinyint3.equals(infinity3)); - - Assert.assertTrue(0 == infinity1.compareLiteral(infinity1)); - Assert.assertTrue(0 == infinity1.compareLiteral(infinity2)); - Assert.assertTrue(!infinity1.equals(infinity3) && 1 == infinity1.compareLiteral(infinity3)); - Assert.assertTrue(!infinity4.equals(infinity2) && -1 == infinity4.compareLiteral(infinity2)); - - Assert.assertTrue(!infinity4.equals(tinyint1) && -1 == infinity4.compareLiteral(tinyint1)); - Assert.assertTrue(!infinity3.equals(tinyint4) && -1 == infinity3.compareLiteral(tinyint4)); - - Assert.assertTrue(infinity1.compareLiteral(tinyint2) == 1); - Assert.assertTrue(0 == infinity4.compareLiteral(tinyint3)); - } - - @Test - public void intTest() throws AnalysisException { - intTestInternal(ScalarType.createType(PrimitiveType.TINYINT)); - intTestInternal(ScalarType.createType(PrimitiveType.SMALLINT)); - intTestInternal(ScalarType.createType(PrimitiveType.INT)); - intTestInternal(ScalarType.createType(PrimitiveType.BIGINT)); - } - - @Test - public void largeIntTest() throws AnalysisException { - LiteralExpr largeInt1 = new LargeIntLiteral("170141183460469231731687303715884105727"); - LiteralExpr largeInt3 = new LargeIntLiteral("-170141183460469231731687303715884105728"); - - LiteralExpr infinity1 = new LargeIntLiteral(true); - LiteralExpr infinity3 = new LargeIntLiteral(false); - - // value equal - Assert.assertTrue(largeInt1.equals(largeInt1)); - - // value compare - Assert.assertTrue(!largeInt1.equals(largeInt3) && 1 == largeInt1.compareLiteral(largeInt3)); - // value equal - Assert.assertTrue(0 == largeInt1.compareLiteral(largeInt1)); - - // infinity - Assert.assertTrue(infinity1.equals(infinity1)); - Assert.assertTrue(infinity3.equals(infinity3)); - Assert.assertTrue(infinity1.equals(largeInt1)); - Assert.assertTrue(infinity3.equals(largeInt3)); - - Assert.assertTrue(!infinity1.equals(largeInt3) && 1 == infinity1.compareLiteral(largeInt3)); - Assert.assertTrue(!infinity3.equals(infinity1) && -1 == infinity3.compareLiteral(infinity1)); - - Assert.assertTrue(0 == infinity1.compareLiteral(infinity1)); - Assert.assertTrue(0 == infinity3.compareLiteral(infinity3)); - Assert.assertTrue(0 == infinity1.compareLiteral(largeInt1)); - Assert.assertTrue(0 == infinity3.compareLiteral(largeInt3)); - } - - @Test - public void stringTest() throws AnalysisException { - LiteralExpr string1 = new StringLiteral("abc"); - LiteralExpr string2 = new StringLiteral("abc"); - LiteralExpr string3 = new StringLiteral("bcd"); - LiteralExpr string4 = new StringLiteral("a"); - LiteralExpr string5 = new StringLiteral("aa"); - LiteralExpr empty = new StringLiteral(""); - - Assert.assertTrue(string1.equals(string1) && string1.compareLiteral(string2) == 0); - Assert.assertTrue(string1.equals(string2) && string1.compareLiteral(string1) == 0); - - Assert.assertTrue(!string3.equals(string1) && 1 == string3.compareLiteral(string1)); - Assert.assertTrue(!string1.equals(string3) && -1 == string1.compareLiteral(string3)); - Assert.assertTrue(!string5.equals(string4) && 1 == string5.compareLiteral(string4)); - Assert.assertTrue(!string3.equals(string4) && 1 == string3.compareLiteral(string4)); - Assert.assertTrue(!string4.equals(empty) && 1 == string4.compareLiteral(empty)); - } - -} +package com.baidu.palo.analysis; + +import com.baidu.palo.catalog.PrimitiveType; +import com.baidu.palo.catalog.ScalarType; +import com.baidu.palo.common.AnalysisException; + +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.util.TimeZone; + +public class LiteralExprCompareTest { + + @BeforeClass + public static void setUp() { + TimeZone tz = TimeZone.getTimeZone("ETC/GMT-0"); + TimeZone.setDefault(tz); + } + + @Test + public void boolTest() { + LiteralExpr boolTrue1 = new BoolLiteral(true); + LiteralExpr boolFalse1 = new BoolLiteral(false); + LiteralExpr boolTrue2 = new BoolLiteral(true); + + // value equal + Assert.assertTrue(boolTrue1.equals(boolTrue2)); + // self equal + Assert.assertTrue(boolTrue1.equals(boolTrue1)); + + // value compare + Assert.assertTrue(!boolTrue1.equals(boolFalse1) && 1 == boolTrue1.compareLiteral(boolFalse1)); + Assert.assertTrue(-1 == boolFalse1.compareLiteral(boolTrue1)); + // value equal + Assert.assertTrue(0 == boolTrue1.compareLiteral(boolTrue2)); + // self equal + Assert.assertTrue(0 == boolTrue1.compareLiteral(boolTrue1)); + } + + @Test(expected = AnalysisException.class) + public void dateFormat1Test() throws AnalysisException { + LiteralExpr date = new DateLiteral("2015-02-15 12:12:12", ScalarType.DATE); + Assert.fail(); + } + + @Test(expected = AnalysisException.class) + public void dateFormat2Test() throws AnalysisException { + LiteralExpr datetime = new DateLiteral("2015-02-15", ScalarType.DATETIME); + Assert.fail(); + } + + @Test + public void dateTest() throws AnalysisException { + LiteralExpr date1 = new DateLiteral("2015-02-15", ScalarType.DATE); + LiteralExpr date1Same = new DateLiteral("2015-02-15", ScalarType.DATE); + LiteralExpr date1Large = new DateLiteral("2015-02-16", ScalarType.DATE); + LiteralExpr datetime1 = new DateLiteral("2015-02-15 13:14:00", ScalarType.DATETIME); + LiteralExpr datetime1Same = new DateLiteral("2015-02-15 13:14:00", ScalarType.DATETIME); + LiteralExpr datetime1Large = new DateLiteral("2015-02-15 13:14:15", ScalarType.DATETIME); + + // infinity + LiteralExpr maxDate1 = new DateLiteral(ScalarType.DATE, true); + LiteralExpr maxDate1Same = new DateLiteral(ScalarType.DATE, true); + LiteralExpr minDate1 = new DateLiteral(ScalarType.DATE, false); + LiteralExpr minDate1Same = new DateLiteral(ScalarType.DATE, false); + LiteralExpr maxDatetime1 = new DateLiteral(ScalarType.DATETIME, true); + LiteralExpr maxDatetime1Same = new DateLiteral(ScalarType.DATETIME, true); + LiteralExpr minDatetime1 = new DateLiteral(ScalarType.DATETIME, false); + LiteralExpr minDatetime1Same = new DateLiteral(ScalarType.DATETIME, false); + LiteralExpr date8 = new DateLiteral("9999-12-31", ScalarType.DATE); + LiteralExpr date9 = new DateLiteral("9999-12-31 23:59:59", ScalarType.DATETIME); + LiteralExpr date10 = new DateLiteral("1900-01-01", ScalarType.DATE); + LiteralExpr date11 = new DateLiteral("1900-01-01 00:00:00", ScalarType.DATETIME); + + Assert.assertTrue(date1.equals(date1Same) && date1.compareLiteral(date1Same) == 0); + Assert.assertTrue(date1.equals(date1Same) && date1.compareLiteral(date1Same) == 0); + Assert.assertTrue(datetime1.equals(datetime1Same) && datetime1.compareLiteral(datetime1Same) == 0); + Assert.assertTrue(datetime1.equals(datetime1) && datetime1.compareLiteral(datetime1) == 0); + + // value compare + Assert.assertTrue(!date1Large.equals(date1Same) && 1 == date1Large.compareLiteral(date1Same)); + Assert.assertTrue(!datetime1Large.equals(datetime1Same) && 1 == datetime1Large.compareLiteral(datetime1Same)); + Assert.assertTrue(!datetime1Same.equals(datetime1Large) && -1 == datetime1Same.compareLiteral(datetime1Large)); + + // infinity + Assert.assertTrue(maxDate1.equals(maxDate1) && maxDate1.compareLiteral(maxDate1) == 0); + Assert.assertTrue(maxDate1.equals(maxDate1Same) && maxDate1.compareLiteral(maxDate1Same) == 0); + Assert.assertTrue(minDate1.equals(minDate1) && minDate1.compareLiteral(minDate1) == 0); + Assert.assertTrue(minDate1.equals(minDate1Same) && minDate1.compareLiteral(minDate1Same) == 0); + Assert.assertTrue(maxDatetime1.equals(maxDatetime1) && maxDatetime1.compareLiteral(maxDatetime1) == 0); + Assert.assertTrue(maxDatetime1.equals(maxDatetime1Same) && maxDatetime1.compareLiteral(maxDatetime1Same) == 0); + Assert.assertTrue(minDatetime1.equals(minDatetime1) && minDatetime1.compareLiteral(minDatetime1) == 0); + Assert.assertTrue(minDatetime1.equals(minDatetime1Same) && minDatetime1.compareLiteral(minDatetime1Same) == 0); + + Assert.assertTrue(maxDate1.equals(date8) && maxDate1.compareLiteral(date8) == 0); + Assert.assertTrue(minDate1.equals(date10) && minDate1.compareLiteral(date10) == 0); + Assert.assertTrue(maxDatetime1.equals(date9) && maxDatetime1.compareLiteral(date9) == 0); + Assert.assertTrue(minDatetime1.equals(date11) && minDatetime1.compareLiteral(date11) == 0); + + Assert.assertTrue(!maxDate1.equals(date1) && 1 == maxDate1.compareLiteral(date1)); + Assert.assertTrue(!minDate1.equals(date1) && -1 == minDate1.compareLiteral(date1)); + Assert.assertTrue(!maxDatetime1.equals(datetime1) && 1 == maxDatetime1.compareLiteral(datetime1)); + Assert.assertTrue(!minDatetime1.equals(datetime1) && -1 == minDatetime1.compareLiteral(datetime1)); + } + + @Test + public void decimalTest() throws AnalysisException { + LiteralExpr decimal1 = new DecimalLiteral("1.23456"); + LiteralExpr decimal2 = new DecimalLiteral("1.23456"); + LiteralExpr decimal3 = new DecimalLiteral("1.23457"); + LiteralExpr decimal4 = new DecimalLiteral("2.23457"); + + // value equal + Assert.assertTrue(decimal1.equals(decimal2)); + // self equal + Assert.assertTrue(decimal1.equals(decimal1)); + + // value compare + Assert.assertTrue(!decimal3.equals(decimal2) && 1 == decimal3.compareLiteral(decimal2)); + Assert.assertTrue(!decimal4.equals(decimal3) && 1 == decimal4.compareLiteral(decimal3)); + Assert.assertTrue(!decimal1.equals(decimal4) && -1 == decimal1.compareLiteral(decimal4)); + // value equal + Assert.assertTrue(0 == decimal1.compareLiteral(decimal2)); + // self equal + Assert.assertTrue(0 == decimal1.compareLiteral(decimal1)); + } + + public void floatAndDoubleExpr() { + LiteralExpr float1 = new FloatLiteral(1.12345, ScalarType.FLOAT); + LiteralExpr float2 = new FloatLiteral(1.12345, ScalarType.FLOAT); + LiteralExpr float3 = new FloatLiteral(1.12346, ScalarType.FLOAT); + LiteralExpr float4 = new FloatLiteral(2.12345, ScalarType.FLOAT); + + LiteralExpr double1 = new FloatLiteral(1.12345, ScalarType.DOUBLE); + LiteralExpr double2 = new FloatLiteral(1.12345, ScalarType.DOUBLE); + LiteralExpr double3 = new FloatLiteral(1.12346, ScalarType.DOUBLE); + LiteralExpr double4 = new FloatLiteral(2.12345, ScalarType.DOUBLE); + + // float + // value equal + Assert.assertTrue(float1.equals(float2)); + // self equal + Assert.assertTrue(float1.equals(float1)); + + // value compare + Assert.assertTrue(!float3.equals(float2) && 1 == float3.compareLiteral(float2)); + Assert.assertTrue(!float4.equals(float1) && 1 == float4.compareLiteral(float1)); + Assert.assertTrue(!float1.equals(float4) && -1 == float1.compareLiteral(float4)); + // value equal + Assert.assertTrue(0 == float1.compareLiteral(float2)); + // self equal + Assert.assertTrue(0 == float1.compareLiteral(float1)); + + // double + // value equal + Assert.assertTrue(double1.equals(double2)); + // self equal + Assert.assertTrue(double1.equals(double1)); + + // value compare + Assert.assertTrue(!double3.equals(double2) && 1 == double3.compareLiteral(double2)); + Assert.assertTrue(!double4.equals(double1) && 1 == double4.compareLiteral(double1)); + Assert.assertTrue(!double1.equals(double4) && -1 == double1.compareLiteral(double4)); + // value equal + Assert.assertTrue(0 == double1.compareLiteral(double2)); + // self equal + Assert.assertTrue(0 == double1.compareLiteral(double1)); + } + + private void intTestInternal(ScalarType type) throws AnalysisException { + String maxValue = ""; + String minValue = ""; + String normalValue = "100"; + + switch (type.getPrimitiveType()) { + case TINYINT: + maxValue = "127"; + minValue = "-128"; + break; + case SMALLINT: + maxValue = "32767"; + minValue = "-32768"; + break; + case INT: + maxValue = "2147483647"; + minValue = "-2147483648"; + break; + case BIGINT: + maxValue = "9223372036854775807"; + minValue = "-9223372036854775808"; + break; + default: + Assert.fail(); + } + + LiteralExpr tinyint1 = new IntLiteral(maxValue, type); + LiteralExpr tinyint2 = new IntLiteral(maxValue, type); + LiteralExpr tinyint3 = new IntLiteral(minValue, type); + LiteralExpr tinyint4 = new IntLiteral(normalValue, type); + + // infinity + LiteralExpr infinity1 = MaxLiteral.MAX_VALUE; + LiteralExpr infinity2 = MaxLiteral.MAX_VALUE; + LiteralExpr infinity3 = LiteralExpr.createInfinity(type, false); + LiteralExpr infinity4 = LiteralExpr.createInfinity(type, false); + + // value equal + Assert.assertTrue(tinyint1.equals(tinyint1)); + // self equal + Assert.assertTrue(tinyint1.equals(tinyint2)); + + // value compare + Assert.assertTrue(!tinyint1.equals(tinyint3) && 1 == tinyint1.compareLiteral(tinyint3)); + Assert.assertTrue(!tinyint2.equals(tinyint4) && 1 == tinyint2.compareLiteral(tinyint4)); + Assert.assertTrue(!tinyint3.equals(tinyint4) && -1 == tinyint3.compareLiteral(tinyint4)); + // value equal + Assert.assertTrue(0 == tinyint1.compareLiteral(tinyint1)); + // self equal + Assert.assertTrue(0 == tinyint1.compareLiteral(tinyint2)); + + // infinity + Assert.assertTrue(infinity1.equals(infinity1)); + Assert.assertTrue(infinity1.equals(infinity2)); + Assert.assertTrue(infinity3.equals(infinity3)); + Assert.assertTrue(infinity3.equals(infinity4)); + Assert.assertFalse(tinyint1.equals(infinity1)); + Assert.assertTrue(tinyint3.equals(infinity3)); + + Assert.assertTrue(0 == infinity1.compareLiteral(infinity1)); + Assert.assertTrue(0 == infinity1.compareLiteral(infinity2)); + Assert.assertTrue(!infinity1.equals(infinity3) && 1 == infinity1.compareLiteral(infinity3)); + Assert.assertTrue(!infinity4.equals(infinity2) && -1 == infinity4.compareLiteral(infinity2)); + + Assert.assertTrue(!infinity4.equals(tinyint1) && -1 == infinity4.compareLiteral(tinyint1)); + Assert.assertTrue(!infinity3.equals(tinyint4) && -1 == infinity3.compareLiteral(tinyint4)); + + Assert.assertTrue(infinity1.compareLiteral(tinyint2) == 1); + Assert.assertTrue(0 == infinity4.compareLiteral(tinyint3)); + } + + @Test + public void intTest() throws AnalysisException { + intTestInternal(ScalarType.createType(PrimitiveType.TINYINT)); + intTestInternal(ScalarType.createType(PrimitiveType.SMALLINT)); + intTestInternal(ScalarType.createType(PrimitiveType.INT)); + intTestInternal(ScalarType.createType(PrimitiveType.BIGINT)); + } + + @Test + public void largeIntTest() throws AnalysisException { + LiteralExpr largeInt1 = new LargeIntLiteral("170141183460469231731687303715884105727"); + LiteralExpr largeInt3 = new LargeIntLiteral("-170141183460469231731687303715884105728"); + + LiteralExpr infinity1 = new LargeIntLiteral(true); + LiteralExpr infinity3 = new LargeIntLiteral(false); + + // value equal + Assert.assertTrue(largeInt1.equals(largeInt1)); + + // value compare + Assert.assertTrue(!largeInt1.equals(largeInt3) && 1 == largeInt1.compareLiteral(largeInt3)); + // value equal + Assert.assertTrue(0 == largeInt1.compareLiteral(largeInt1)); + + // infinity + Assert.assertTrue(infinity1.equals(infinity1)); + Assert.assertTrue(infinity3.equals(infinity3)); + Assert.assertTrue(infinity1.equals(largeInt1)); + Assert.assertTrue(infinity3.equals(largeInt3)); + + Assert.assertTrue(!infinity1.equals(largeInt3) && 1 == infinity1.compareLiteral(largeInt3)); + Assert.assertTrue(!infinity3.equals(infinity1) && -1 == infinity3.compareLiteral(infinity1)); + + Assert.assertTrue(0 == infinity1.compareLiteral(infinity1)); + Assert.assertTrue(0 == infinity3.compareLiteral(infinity3)); + Assert.assertTrue(0 == infinity1.compareLiteral(largeInt1)); + Assert.assertTrue(0 == infinity3.compareLiteral(largeInt3)); + } + + @Test + public void stringTest() throws AnalysisException { + LiteralExpr string1 = new StringLiteral("abc"); + LiteralExpr string2 = new StringLiteral("abc"); + LiteralExpr string3 = new StringLiteral("bcd"); + LiteralExpr string4 = new StringLiteral("a"); + LiteralExpr string5 = new StringLiteral("aa"); + LiteralExpr empty = new StringLiteral(""); + + Assert.assertTrue(string1.equals(string1) && string1.compareLiteral(string2) == 0); + Assert.assertTrue(string1.equals(string2) && string1.compareLiteral(string1) == 0); + + Assert.assertTrue(!string3.equals(string1) && 1 == string3.compareLiteral(string1)); + Assert.assertTrue(!string1.equals(string3) && -1 == string1.compareLiteral(string3)); + Assert.assertTrue(!string5.equals(string4) && 1 == string5.compareLiteral(string4)); + Assert.assertTrue(!string3.equals(string4) && 1 == string3.compareLiteral(string4)); + Assert.assertTrue(!string4.equals(empty) && 1 == string4.compareLiteral(empty)); + } + +} diff --git a/fe/test/com/baidu/palo/analysis/ShowDataStmtTest.java b/fe/test/com/baidu/palo/analysis/ShowDataStmtTest.java index 8158e72a36..1e25297d5d 100644 --- a/fe/test/com/baidu/palo/analysis/ShowDataStmtTest.java +++ b/fe/test/com/baidu/palo/analysis/ShowDataStmtTest.java @@ -18,8 +18,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.analysis; - +package com.baidu.palo.analysis; + import com.baidu.palo.backup.CatalogMocker; import com.baidu.palo.catalog.Catalog; import com.baidu.palo.catalog.Database; @@ -38,9 +38,9 @@ import org.junit.Test; import mockit.Mocked; import mockit.NonStrictExpectations; import mockit.internal.startup.Startup; - -public class ShowDataStmtTest { - + +public class ShowDataStmtTest { + @Mocked private PaloAuth auth; @Mocked @@ -56,8 +56,8 @@ public class ShowDataStmtTest { static { Startup.initializeIfPossible(); - } - + } + @Before public void setUp() throws AnalysisException { auth = new PaloAuth(); @@ -122,20 +122,20 @@ public class ShowDataStmtTest { }; AccessTestUtil.fetchAdminAccess(); - } - - @Test - public void testNormal() throws AnalysisException, InternalException { - ShowDataStmt stmt = new ShowDataStmt(null, null); - stmt.analyze(analyzer); - Assert.assertEquals("SHOW DATA FROM `testCluster:testDb`", stmt.toString()); - Assert.assertEquals(2, stmt.getMetaData().getColumnCount()); - Assert.assertEquals(false, stmt.hasTable()); - - stmt = new ShowDataStmt("testDb", "test_tbl"); - stmt.analyze(analyzer); - Assert.assertEquals("SHOW DATA FROM `default_cluster:testDb`.`test_tbl`", stmt.toString()); - Assert.assertEquals(3, stmt.getMetaData().getColumnCount()); - Assert.assertEquals(true, stmt.hasTable()); - } -} + } + + @Test + public void testNormal() throws AnalysisException, InternalException { + ShowDataStmt stmt = new ShowDataStmt(null, null); + stmt.analyze(analyzer); + Assert.assertEquals("SHOW DATA FROM `testCluster:testDb`", stmt.toString()); + Assert.assertEquals(2, stmt.getMetaData().getColumnCount()); + Assert.assertEquals(false, stmt.hasTable()); + + stmt = new ShowDataStmt("testDb", "test_tbl"); + stmt.analyze(analyzer); + Assert.assertEquals("SHOW DATA FROM `default_cluster:testDb`.`test_tbl`", stmt.toString()); + Assert.assertEquals(3, stmt.getMetaData().getColumnCount()); + Assert.assertEquals(true, stmt.hasTable()); + } +} diff --git a/fe/test/com/baidu/palo/catalog/BackendTest.java b/fe/test/com/baidu/palo/catalog/BackendTest.java index e98c9531b3..46a53a5503 100644 --- a/fe/test/com/baidu/palo/catalog/BackendTest.java +++ b/fe/test/com/baidu/palo/catalog/BackendTest.java @@ -18,8 +18,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.catalog; - +package com.baidu.palo.catalog; + import com.baidu.palo.analysis.AccessTestUtil; import com.baidu.palo.common.FeConstants; import com.baidu.palo.metric.MetricRepo; @@ -44,154 +44,154 @@ import java.io.FileOutputStream; import java.util.HashMap; import java.util.LinkedList; import java.util.List; -import java.util.Map; - -@RunWith(PowerMockRunner.class) -@PowerMockIgnore("org.apache.log4j.*") -@PrepareForTest({ Catalog.class, MetricRepo.class }) -public class BackendTest { - private Backend backend; - private long backendId = 9999; - private String host = "myhost"; - private int heartbeatPort = 21234; - private int bePort = 21235; +import java.util.Map; + +@RunWith(PowerMockRunner.class) +@PowerMockIgnore("org.apache.log4j.*") +@PrepareForTest({ Catalog.class, MetricRepo.class }) +public class BackendTest { + private Backend backend; + private long backendId = 9999; + private String host = "myhost"; + private int heartbeatPort = 21234; + private int bePort = 21235; private int httpPort = 21237; - private int beRpcPort = 21238; - + private int beRpcPort = 21238; + private Catalog catalog; - - @Before - public void setUp() { - catalog = AccessTestUtil.fetchAdminCatalog(); - PowerMock.mockStatic(Catalog.class); - EasyMock.expect(Catalog.getInstance()).andReturn(catalog).anyTimes(); - EasyMock.expect(Catalog.getCurrentCatalogJournalVersion()).andReturn(FeConstants.meta_version).anyTimes(); - PowerMock.replay(Catalog.class); - - backend = new Backend(backendId, host, heartbeatPort); + + @Before + public void setUp() { + catalog = AccessTestUtil.fetchAdminCatalog(); + PowerMock.mockStatic(Catalog.class); + EasyMock.expect(Catalog.getInstance()).andReturn(catalog).anyTimes(); + EasyMock.expect(Catalog.getCurrentCatalogJournalVersion()).andReturn(FeConstants.meta_version).anyTimes(); + PowerMock.replay(Catalog.class); + + backend = new Backend(backendId, host, heartbeatPort); backend.updateOnce(bePort, httpPort, beRpcPort); PowerMock.mockStatic(MetricRepo.class); MetricRepo.generateCapacityMetrics(); EasyMock.expectLastCall().anyTimes(); - PowerMock.replay(MetricRepo.class); - } - - @Test - public void getMethodTest() { - Assert.assertEquals(backendId, backend.getId()); - Assert.assertEquals(host, backend.getHost()); - Assert.assertEquals(heartbeatPort, backend.getHeartbeatPort()); - Assert.assertEquals(bePort, backend.getBePort()); - - // set new port - int newBePort = 31235; - int newHttpPort = 31237; - backend.updateOnce(newBePort, newHttpPort, beRpcPort); - Assert.assertEquals(newBePort, backend.getBePort()); - - // check alive - Assert.assertTrue(backend.isAlive()); - } - - @Test - public void diskInfoTest() { - Map diskInfos = new HashMap(); - - TDisk disk1 = new TDisk("/data1/", 1000, 800, true); - TDisk disk2 = new TDisk("/data2/", 2000, 700, true); - TDisk disk3 = new TDisk("/data3/", 3000, 600, false); - - diskInfos.put(disk1.getRoot_path(), disk1); - diskInfos.put(disk2.getRoot_path(), disk2); - diskInfos.put(disk3.getRoot_path(), disk3); - - // first update - backend.updateDisks(diskInfos); - Assert.assertEquals(disk1.getDisk_total_capacity() + disk2.getDisk_total_capacity(), - backend.getTotalCapacityB()); - Assert.assertEquals(1, backend.getAvailableCapacityB()); - - // second update - diskInfos.remove(disk1.getRoot_path()); - backend.updateDisks(diskInfos); + PowerMock.replay(MetricRepo.class); + } + + @Test + public void getMethodTest() { + Assert.assertEquals(backendId, backend.getId()); + Assert.assertEquals(host, backend.getHost()); + Assert.assertEquals(heartbeatPort, backend.getHeartbeatPort()); + Assert.assertEquals(bePort, backend.getBePort()); + + // set new port + int newBePort = 31235; + int newHttpPort = 31237; + backend.updateOnce(newBePort, newHttpPort, beRpcPort); + Assert.assertEquals(newBePort, backend.getBePort()); + + // check alive + Assert.assertTrue(backend.isAlive()); + } + + @Test + public void diskInfoTest() { + Map diskInfos = new HashMap(); + + TDisk disk1 = new TDisk("/data1/", 1000, 800, true); + TDisk disk2 = new TDisk("/data2/", 2000, 700, true); + TDisk disk3 = new TDisk("/data3/", 3000, 600, false); + + diskInfos.put(disk1.getRoot_path(), disk1); + diskInfos.put(disk2.getRoot_path(), disk2); + diskInfos.put(disk3.getRoot_path(), disk3); + + // first update + backend.updateDisks(diskInfos); + Assert.assertEquals(disk1.getDisk_total_capacity() + disk2.getDisk_total_capacity(), + backend.getTotalCapacityB()); + Assert.assertEquals(1, backend.getAvailableCapacityB()); + + // second update + diskInfos.remove(disk1.getRoot_path()); + backend.updateDisks(diskInfos); Assert.assertEquals(disk2.getDisk_total_capacity(), backend.getTotalCapacityB()); - Assert.assertEquals(disk2.getDisk_available_capacity() + 1, backend.getAvailableCapacityB()); - } - - @Test - public void testSerialization() throws Exception { - // Write 100 objects to file - File file = new File("./backendTest"); - file.createNewFile(); - DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); - - List list1 = new LinkedList(); - List list2 = new LinkedList(); - - for (int count = 0; count < 100; ++count) { - Backend backend = new Backend(count, "10.120.22.32" + count, 6000 + count); - backend.updateOnce(7000 + count, 9000 + count, beRpcPort); - list1.add(backend); - } - for (int count = 100; count < 200; count++) { - Backend backend = new Backend(count, "10.120.22.32" + count, 6000 + count); - backend.updateOnce(7000 + count, 9000 + count, beRpcPort); - list1.add(backend); - } - for (Backend backend : list1) { - backend.write(dos); - } - dos.flush(); - dos.close(); - - // 2. Read objects from file - DataInputStream dis = new DataInputStream(new FileInputStream(file)); - for (int count = 0; count < 100; ++count) { - Backend backend = new Backend(); - backend.readFields(dis); - list2.add(backend); - Assert.assertEquals(count, backend.getId()); - Assert.assertEquals("10.120.22.32" + count, backend.getHost()); - } - - for (int count = 100; count < 200; ++count) { - Backend backend = Backend.read(dis); - list2.add(backend); - Assert.assertEquals(count, backend.getId()); - Assert.assertEquals("10.120.22.32" + count, backend.getHost()); - } - - for (int count = 0; count < 200; count++) { - Assert.assertTrue(list1.get(count).equals(list2.get(count))); - } - Assert.assertFalse(list1.get(1).equals(list1.get(2))); - Assert.assertFalse(list1.get(1).equals(this)); - Assert.assertTrue(list1.get(1).equals(list1.get(1))); - - Backend back1 = new Backend(1, "a", 1); - back1.updateOnce(1, 1, 1); - Backend back2 = new Backend(2, "a", 1); - back2.updateOnce(1, 1, 1); - Assert.assertFalse(back1.equals(back2)); - - back1 = new Backend(1, "a", 1); - back1.updateOnce(1, 1, 1); - back2 = new Backend(1, "b", 1); - back2.updateOnce(1, 1, 1); - Assert.assertFalse(back1.equals(back2)); - - back1 = new Backend(1, "a", 1); - back1.updateOnce(1, 1, 1); - back2 = new Backend(1, "a", 2); - back2.updateOnce(1, 1, 1); - Assert.assertFalse(back1.equals(back2)); - - Assert.assertEquals("Backend [id=1, host=a, heartbeatPort=1, alive=true]", back1.toString()); - - // 3. delete files - dis.close(); - file.delete(); - } - -} + Assert.assertEquals(disk2.getDisk_available_capacity() + 1, backend.getAvailableCapacityB()); + } + + @Test + public void testSerialization() throws Exception { + // Write 100 objects to file + File file = new File("./backendTest"); + file.createNewFile(); + DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); + + List list1 = new LinkedList(); + List list2 = new LinkedList(); + + for (int count = 0; count < 100; ++count) { + Backend backend = new Backend(count, "10.120.22.32" + count, 6000 + count); + backend.updateOnce(7000 + count, 9000 + count, beRpcPort); + list1.add(backend); + } + for (int count = 100; count < 200; count++) { + Backend backend = new Backend(count, "10.120.22.32" + count, 6000 + count); + backend.updateOnce(7000 + count, 9000 + count, beRpcPort); + list1.add(backend); + } + for (Backend backend : list1) { + backend.write(dos); + } + dos.flush(); + dos.close(); + + // 2. Read objects from file + DataInputStream dis = new DataInputStream(new FileInputStream(file)); + for (int count = 0; count < 100; ++count) { + Backend backend = new Backend(); + backend.readFields(dis); + list2.add(backend); + Assert.assertEquals(count, backend.getId()); + Assert.assertEquals("10.120.22.32" + count, backend.getHost()); + } + + for (int count = 100; count < 200; ++count) { + Backend backend = Backend.read(dis); + list2.add(backend); + Assert.assertEquals(count, backend.getId()); + Assert.assertEquals("10.120.22.32" + count, backend.getHost()); + } + + for (int count = 0; count < 200; count++) { + Assert.assertTrue(list1.get(count).equals(list2.get(count))); + } + Assert.assertFalse(list1.get(1).equals(list1.get(2))); + Assert.assertFalse(list1.get(1).equals(this)); + Assert.assertTrue(list1.get(1).equals(list1.get(1))); + + Backend back1 = new Backend(1, "a", 1); + back1.updateOnce(1, 1, 1); + Backend back2 = new Backend(2, "a", 1); + back2.updateOnce(1, 1, 1); + Assert.assertFalse(back1.equals(back2)); + + back1 = new Backend(1, "a", 1); + back1.updateOnce(1, 1, 1); + back2 = new Backend(1, "b", 1); + back2.updateOnce(1, 1, 1); + Assert.assertFalse(back1.equals(back2)); + + back1 = new Backend(1, "a", 1); + back1.updateOnce(1, 1, 1); + back2 = new Backend(1, "a", 2); + back2.updateOnce(1, 1, 1); + Assert.assertFalse(back1.equals(back2)); + + Assert.assertEquals("Backend [id=1, host=a, heartbeatPort=1, alive=true]", back1.toString()); + + // 3. delete files + dis.close(); + file.delete(); + } + +} diff --git a/fe/test/com/baidu/palo/catalog/CatalogTest.java b/fe/test/com/baidu/palo/catalog/CatalogTest.java index 19f0b8a7ff..a59c40f733 100644 --- a/fe/test/com/baidu/palo/catalog/CatalogTest.java +++ b/fe/test/com/baidu/palo/catalog/CatalogTest.java @@ -20,14 +20,22 @@ package com.baidu.palo.catalog; -import com.baidu.palo.common.FeConstants; -import com.baidu.palo.alter.AlterJob.JobType; import com.baidu.palo.alter.AlterJob; +import com.baidu.palo.alter.AlterJob.JobType; import com.baidu.palo.alter.SchemaChangeJob; import com.baidu.palo.catalog.MaterializedIndex.IndexState; +import com.baidu.palo.cluster.Cluster; +import com.baidu.palo.common.FeConstants; import com.baidu.palo.load.Load; import com.baidu.palo.load.LoadJob; -import com.baidu.palo.cluster.Cluster; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; import java.io.BufferedInputStream; import java.io.DataInputStream; @@ -43,14 +51,6 @@ import java.util.List; import java.util.Map; import java.util.Random; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.powermock.core.classloader.annotations.PowerMockIgnore; -import org.powermock.core.classloader.annotations.PrepareForTest; -import org.powermock.modules.junit4.PowerMockRunner; - @RunWith(PowerMockRunner.class) @PowerMockIgnore("org.apache.log4j.*") @PrepareForTest(Catalog.class) @@ -170,7 +170,7 @@ public class CatalogTest { field.set(catalog, new Load()); LoadJob job1 = new LoadJob("label1", 20, 0); - catalog.getLoadInstance().unprotectAddLoadJob(job1); + catalog.getLoadInstance().unprotectAddLoadJob(job1, true); long checksum1 = catalog.saveLoadJob(dos, 0); catalog.clear(); catalog = null; diff --git a/fe/test/com/baidu/palo/catalog/ColumnStatsTest.java b/fe/test/com/baidu/palo/catalog/ColumnStatsTest.java index b3dc5a0dcd..a79a3158fd 100644 --- a/fe/test/com/baidu/palo/catalog/ColumnStatsTest.java +++ b/fe/test/com/baidu/palo/catalog/ColumnStatsTest.java @@ -18,73 +18,73 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.catalog; - -import org.junit.Assert; -import org.junit.Test; - -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; - -public class ColumnStatsTest { - - @Test - public void testSerialization() throws Exception { - // 1. Write objects to file - File file = new File("./columnStats"); - file.createNewFile(); - DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); - - ColumnStats stats1 = new ColumnStats(); - stats1.write(dos); - - ColumnStats stats2 = new ColumnStats(); - stats2.setAvgSerializedSize(1.1f); - stats2.setNumDistinctValues(100L); - stats2.setMaxSize(1000L); - stats2.setNumNulls(10000L); - stats2.write(dos); - - ColumnStats stats3 = new ColumnStats(); - stats3.setAvgSerializedSize(3.3f); - stats3.setNumDistinctValues(200L); - stats3.setMaxSize(2000L); - stats3.setNumNulls(20000L); - stats3.write(dos); - - ColumnStats stats4 = new ColumnStats(stats3); - stats4.write(dos); - - dos.flush(); - dos.close(); - - // 2. Read objects from file - DataInputStream dis = new DataInputStream(new FileInputStream(file)); - ColumnStats rStats1 = new ColumnStats(); - rStats1.readFields(dis); - Assert.assertTrue(rStats1.equals(stats1)); - - ColumnStats rStats2 = new ColumnStats(); - rStats2.readFields(dis); - Assert.assertTrue(rStats2.equals(stats2)); - - ColumnStats rStats3 = ColumnStats.read(dis); - Assert.assertTrue(rStats3.equals(stats3)); - - ColumnStats rStats4 = ColumnStats.read(dis); - Assert.assertTrue(rStats4.equals(stats4)); - Assert.assertTrue(rStats4.equals(stats3)); - - Assert.assertTrue(rStats3.equals(rStats3)); - Assert.assertFalse(rStats3.equals(this)); - Assert.assertFalse(rStats2.equals(rStats3)); - - // 3. delete files - dis.close(); - file.delete(); - } - -} +package com.baidu.palo.catalog; + +import org.junit.Assert; +import org.junit.Test; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; + +public class ColumnStatsTest { + + @Test + public void testSerialization() throws Exception { + // 1. Write objects to file + File file = new File("./columnStats"); + file.createNewFile(); + DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); + + ColumnStats stats1 = new ColumnStats(); + stats1.write(dos); + + ColumnStats stats2 = new ColumnStats(); + stats2.setAvgSerializedSize(1.1f); + stats2.setNumDistinctValues(100L); + stats2.setMaxSize(1000L); + stats2.setNumNulls(10000L); + stats2.write(dos); + + ColumnStats stats3 = new ColumnStats(); + stats3.setAvgSerializedSize(3.3f); + stats3.setNumDistinctValues(200L); + stats3.setMaxSize(2000L); + stats3.setNumNulls(20000L); + stats3.write(dos); + + ColumnStats stats4 = new ColumnStats(stats3); + stats4.write(dos); + + dos.flush(); + dos.close(); + + // 2. Read objects from file + DataInputStream dis = new DataInputStream(new FileInputStream(file)); + ColumnStats rStats1 = new ColumnStats(); + rStats1.readFields(dis); + Assert.assertTrue(rStats1.equals(stats1)); + + ColumnStats rStats2 = new ColumnStats(); + rStats2.readFields(dis); + Assert.assertTrue(rStats2.equals(stats2)); + + ColumnStats rStats3 = ColumnStats.read(dis); + Assert.assertTrue(rStats3.equals(stats3)); + + ColumnStats rStats4 = ColumnStats.read(dis); + Assert.assertTrue(rStats4.equals(stats4)); + Assert.assertTrue(rStats4.equals(stats3)); + + Assert.assertTrue(rStats3.equals(rStats3)); + Assert.assertFalse(rStats3.equals(this)); + Assert.assertFalse(rStats2.equals(rStats3)); + + // 3. delete files + dis.close(); + file.delete(); + } + +} diff --git a/fe/test/com/baidu/palo/catalog/ColumnTest.java b/fe/test/com/baidu/palo/catalog/ColumnTest.java index 00bf46a0c7..c0ccc7c658 100644 --- a/fe/test/com/baidu/palo/catalog/ColumnTest.java +++ b/fe/test/com/baidu/palo/catalog/ColumnTest.java @@ -18,114 +18,114 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.catalog; - -import com.baidu.palo.common.DdlException; -import com.baidu.palo.common.FeConstants; - -import org.easymock.EasyMock; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.powermock.api.easymock.PowerMock; -import org.powermock.core.classloader.annotations.PowerMockIgnore; -import org.powermock.core.classloader.annotations.PrepareForTest; -import org.powermock.modules.junit4.PowerMockRunner; - -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; - -@RunWith(PowerMockRunner.class) -@PowerMockIgnore("org.apache.log4j.*") -@PrepareForTest(Catalog.class) -public class ColumnTest { - - private Catalog catalog; - - @Before - public void setUp() { - catalog = EasyMock.createMock(Catalog.class); - - PowerMock.mockStatic(Catalog.class); - EasyMock.expect(Catalog.getInstance()).andReturn(catalog).anyTimes(); - EasyMock.expect(Catalog.getCurrentCatalogJournalVersion()).andReturn(FeConstants.meta_version).anyTimes(); - PowerMock.replay(Catalog.class); - } - - @Test - public void testSerialization() throws Exception { - // 1. Write objects to file - File file = new File("./columnTest"); - file.createNewFile(); - DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); - - Column column1 = new Column("user", - new ColumnType(PrimitiveType.CHAR, 20, 1, 0), false, AggregateType.SUM, "", ""); - column1.write(dos); - Column column2 = new Column("age", - new ColumnType(PrimitiveType.INT, 30, 2, 1), false, AggregateType.REPLACE, "20", ""); - column2.write(dos); - - Column column3 = new Column("name", PrimitiveType.BIGINT); - column3.setIsKey(true); - column3.write(dos); - - Column column4 = new Column("age", - new ColumnType(PrimitiveType.INT, 30, 2, 1), false, AggregateType.REPLACE, "20", - ""); - column4.write(dos); - - dos.flush(); - dos.close(); - - // 2. Read objects from file - DataInputStream dis = new DataInputStream(new FileInputStream(file)); - Column rColumn1 = new Column(); - rColumn1.readFields(dis); - Assert.assertEquals("user", rColumn1.getName()); - Assert.assertEquals(PrimitiveType.CHAR, rColumn1.getDataType()); - Assert.assertEquals(AggregateType.SUM, rColumn1.getAggregationType()); - Assert.assertEquals("", rColumn1.getDefaultValue()); - Assert.assertEquals(0, rColumn1.getScale()); - Assert.assertEquals(1, rColumn1.getPrecision()); - Assert.assertEquals(20, rColumn1.getStrLen()); - Assert.assertFalse(rColumn1.isAllowNull()); - - // 3. Test read() - Column rColumn2 = Column.read(dis); - Assert.assertEquals("age", rColumn2.getName()); - Assert.assertEquals(PrimitiveType.INT, rColumn2.getDataType()); - Assert.assertEquals(AggregateType.REPLACE, rColumn2.getAggregationType()); - Assert.assertEquals("20", rColumn2.getDefaultValue()); - Assert.assertEquals(1, rColumn2.getScale()); - Assert.assertEquals(2, rColumn2.getPrecision()); - Assert.assertEquals(30, rColumn2.getStrLen()); - - Column rColumn3 = Column.read(dis); - Assert.assertTrue(rColumn3.equals(column3)); - - Column rColumn4 = Column.read(dis); - Assert.assertTrue(rColumn4.equals(column4)); - - Assert.assertEquals(rColumn2.toString(), column2.toString()); - Assert.assertTrue(column1.equals(column1)); - Assert.assertFalse(column1.equals(this)); - - // 4. delete files - dis.close(); - file.delete(); - } - - @Test(expected = DdlException.class) - public void testSchemaChangeAllowed() throws DdlException { - Column oldColumn = new Column("user", new ColumnType(PrimitiveType.INT), true, null, true, "0", ""); - Column newColumn = new Column("user", new ColumnType(PrimitiveType.INT), true, null, false, "0", ""); - oldColumn.checkSchemaChangeAllowed(newColumn); - Assert.fail("No exception throws."); - } - -} +package com.baidu.palo.catalog; + +import com.baidu.palo.common.DdlException; +import com.baidu.palo.common.FeConstants; + +import org.easymock.EasyMock; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.api.easymock.PowerMock; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; + +@RunWith(PowerMockRunner.class) +@PowerMockIgnore("org.apache.log4j.*") +@PrepareForTest(Catalog.class) +public class ColumnTest { + + private Catalog catalog; + + @Before + public void setUp() { + catalog = EasyMock.createMock(Catalog.class); + + PowerMock.mockStatic(Catalog.class); + EasyMock.expect(Catalog.getInstance()).andReturn(catalog).anyTimes(); + EasyMock.expect(Catalog.getCurrentCatalogJournalVersion()).andReturn(FeConstants.meta_version).anyTimes(); + PowerMock.replay(Catalog.class); + } + + @Test + public void testSerialization() throws Exception { + // 1. Write objects to file + File file = new File("./columnTest"); + file.createNewFile(); + DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); + + Column column1 = new Column("user", + new ColumnType(PrimitiveType.CHAR, 20, 1, 0), false, AggregateType.SUM, "", ""); + column1.write(dos); + Column column2 = new Column("age", + new ColumnType(PrimitiveType.INT, 30, 2, 1), false, AggregateType.REPLACE, "20", ""); + column2.write(dos); + + Column column3 = new Column("name", PrimitiveType.BIGINT); + column3.setIsKey(true); + column3.write(dos); + + Column column4 = new Column("age", + new ColumnType(PrimitiveType.INT, 30, 2, 1), false, AggregateType.REPLACE, "20", + ""); + column4.write(dos); + + dos.flush(); + dos.close(); + + // 2. Read objects from file + DataInputStream dis = new DataInputStream(new FileInputStream(file)); + Column rColumn1 = new Column(); + rColumn1.readFields(dis); + Assert.assertEquals("user", rColumn1.getName()); + Assert.assertEquals(PrimitiveType.CHAR, rColumn1.getDataType()); + Assert.assertEquals(AggregateType.SUM, rColumn1.getAggregationType()); + Assert.assertEquals("", rColumn1.getDefaultValue()); + Assert.assertEquals(0, rColumn1.getScale()); + Assert.assertEquals(1, rColumn1.getPrecision()); + Assert.assertEquals(20, rColumn1.getStrLen()); + Assert.assertFalse(rColumn1.isAllowNull()); + + // 3. Test read() + Column rColumn2 = Column.read(dis); + Assert.assertEquals("age", rColumn2.getName()); + Assert.assertEquals(PrimitiveType.INT, rColumn2.getDataType()); + Assert.assertEquals(AggregateType.REPLACE, rColumn2.getAggregationType()); + Assert.assertEquals("20", rColumn2.getDefaultValue()); + Assert.assertEquals(1, rColumn2.getScale()); + Assert.assertEquals(2, rColumn2.getPrecision()); + Assert.assertEquals(30, rColumn2.getStrLen()); + + Column rColumn3 = Column.read(dis); + Assert.assertTrue(rColumn3.equals(column3)); + + Column rColumn4 = Column.read(dis); + Assert.assertTrue(rColumn4.equals(column4)); + + Assert.assertEquals(rColumn2.toString(), column2.toString()); + Assert.assertTrue(column1.equals(column1)); + Assert.assertFalse(column1.equals(this)); + + // 4. delete files + dis.close(); + file.delete(); + } + + @Test(expected = DdlException.class) + public void testSchemaChangeAllowed() throws DdlException { + Column oldColumn = new Column("user", new ColumnType(PrimitiveType.INT), true, null, true, "0", ""); + Column newColumn = new Column("user", new ColumnType(PrimitiveType.INT), true, null, false, "0", ""); + oldColumn.checkSchemaChangeAllowed(newColumn); + Assert.fail("No exception throws."); + } + +} diff --git a/fe/test/com/baidu/palo/catalog/DatabaseTest.java b/fe/test/com/baidu/palo/catalog/DatabaseTest.java index f3193f779f..4c260f76de 100644 --- a/fe/test/com/baidu/palo/catalog/DatabaseTest.java +++ b/fe/test/com/baidu/palo/catalog/DatabaseTest.java @@ -18,8 +18,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.catalog; - +package com.baidu.palo.catalog; + import com.baidu.palo.catalog.MaterializedIndex.IndexState; import com.baidu.palo.common.FeConstants; import com.baidu.palo.persist.CreateTableInfo; @@ -43,158 +43,158 @@ import java.io.FileOutputStream; import java.util.ArrayList; import java.util.LinkedList; import java.util.List; -import java.util.concurrent.TimeUnit; - -@RunWith(PowerMockRunner.class) -@PowerMockIgnore("org.apache.log4j.*") -@PrepareForTest(Catalog.class) -public class DatabaseTest { - - private Database db; - private long dbId = 10000; - - private Catalog catalog; - private EditLog editLog; - - @Before - public void Setup() { - db = new Database(dbId, "dbTest"); - - editLog = EasyMock.createMock(EditLog.class); - editLog.logCreateTable(EasyMock.anyObject(CreateTableInfo.class)); - EasyMock.expectLastCall().anyTimes(); - EasyMock.replay(editLog); - - catalog = EasyMock.createMock(Catalog.class); - EasyMock.expect(catalog.getEditLog()).andReturn(editLog); - EasyMock.replay(catalog); - - PowerMock.mockStatic(Catalog.class); - EasyMock.expect(Catalog.getInstance()).andReturn(catalog).anyTimes(); - EasyMock.expect(Catalog.getCurrentCatalogJournalVersion()).andReturn(FeConstants.meta_version).anyTimes(); - PowerMock.replay(Catalog.class); - } - - @Test - public void lockTest() { - db.readLock(); - try { - Assert.assertFalse(db.tryWriteLock(0, TimeUnit.SECONDS)); - Assert.assertTrue(db.tryReadLock(0, TimeUnit.SECONDS)); - db.readUnlock(); - } finally { - db.readUnlock(); - } - - db.writeLock(); - try { - Assert.assertTrue(db.tryWriteLock(0, TimeUnit.SECONDS)); - Assert.assertTrue(db.tryReadLock(0, TimeUnit.SECONDS)); - } finally { - db.writeUnlock(); - } - } - - @Test - public void createAndDropPartitionTest() { - Assert.assertEquals("dbTest", db.getFullName()); - Assert.assertEquals(dbId, db.getId()); - - MaterializedIndex baseIndex = new MaterializedIndex(10001, IndexState.NORMAL); - Partition partition = new Partition(20000L, "baseTable", baseIndex, new RandomDistributionInfo(10)); - List baseSchema = new LinkedList(); - OlapTable table = new OlapTable(2000, "baseTable", baseSchema, KeysType.AGG_KEYS, - new SinglePartitionInfo(), new RandomDistributionInfo(10)); - table.addPartition(partition); - - // create - Assert.assertTrue(db.createTable(table)); - // duplicate - Assert.assertFalse(db.createTable(table)); - - Assert.assertEquals(table, db.getTable(table.getId())); - Assert.assertEquals(table, db.getTable(table.getName())); - - Assert.assertEquals(1, db.getTables().size()); - Assert.assertEquals(table, db.getTables().get(0)); - - Assert.assertEquals(1, db.getTableNamesWithLock().size()); - for (String tableFamilyGroupName : db.getTableNamesWithLock()) { - Assert.assertEquals(table.getName(), tableFamilyGroupName); - } - - // drop - // drop not exist tableFamily - db.dropTable("invalid"); - Assert.assertEquals(1, db.getTables().size()); - db.dropTableWithLock("invalid"); - Assert.assertEquals(1, db.getTables().size()); - - // drop normal - db.dropTableWithLock(table.getName()); - Assert.assertEquals(0, db.getTables().size()); - - db.createTable(table); - db.dropTable(table.getName()); - Assert.assertEquals(0, db.getTables().size()); - } - - @Test - public void testSerialization() throws Exception { - // 1. Write objects to file - File file = new File("./database"); - file.createNewFile(); - DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); - - // db1 - Database db1 = new Database(); - db1.write(dos); - - // db2 - Database db2 = new Database(2, "db2"); - List columns = new ArrayList(); - columns.add(new Column("column2", - ColumnType.createType(PrimitiveType.TINYINT), false, AggregateType.MIN, "", "")); - columns.add(new Column("column3", - ColumnType.createType(PrimitiveType.SMALLINT), false, AggregateType.SUM, "", "")); - columns.add(new Column("column4", - ColumnType.createType(PrimitiveType.INT), false, AggregateType.REPLACE, "", "")); - columns.add(new Column("column5", - ColumnType.createType(PrimitiveType.BIGINT), false, AggregateType.REPLACE, "", "")); - columns.add(new Column("column6", - ColumnType.createType(PrimitiveType.FLOAT), false, AggregateType.REPLACE, "", "")); - columns.add(new Column("column7", - ColumnType.createType(PrimitiveType.DOUBLE), false, AggregateType.REPLACE, "", "")); - columns.add(new Column("column8", ColumnType.createChar(10), true, null, "", "")); - columns.add(new Column("column9", ColumnType.createVarchar(10), true, null, "", "")); - columns.add(new Column("column10", ColumnType.createType(PrimitiveType.DATE), true, null, "", "")); - columns.add(new Column("column11", ColumnType.createType(PrimitiveType.DATETIME), true, null, "", "")); - - MaterializedIndex index = new MaterializedIndex(1, IndexState.NORMAL); - Partition partition = new Partition(20000L, "table", index, new RandomDistributionInfo(10)); - OlapTable table = new OlapTable(1000, "table", columns, KeysType.AGG_KEYS, - new SinglePartitionInfo(), new RandomDistributionInfo(10)); - table.addPartition(partition); - db2.createTable(table); - db2.write(dos); - - - dos.flush(); - dos.close(); - - // 2. Read objects from file - DataInputStream dis = new DataInputStream(new FileInputStream(file)); - - Database rDb1 = new Database(); - rDb1.readFields(dis); - Assert.assertTrue(rDb1.equals(db1)); - - Database rDb2 = new Database(); - rDb2.readFields(dis); - Assert.assertTrue(rDb2.equals(db2)); - - // 3. delete files - dis.close(); - file.delete(); - } -} +import java.util.concurrent.TimeUnit; + +@RunWith(PowerMockRunner.class) +@PowerMockIgnore("org.apache.log4j.*") +@PrepareForTest(Catalog.class) +public class DatabaseTest { + + private Database db; + private long dbId = 10000; + + private Catalog catalog; + private EditLog editLog; + + @Before + public void Setup() { + db = new Database(dbId, "dbTest"); + + editLog = EasyMock.createMock(EditLog.class); + editLog.logCreateTable(EasyMock.anyObject(CreateTableInfo.class)); + EasyMock.expectLastCall().anyTimes(); + EasyMock.replay(editLog); + + catalog = EasyMock.createMock(Catalog.class); + EasyMock.expect(catalog.getEditLog()).andReturn(editLog); + EasyMock.replay(catalog); + + PowerMock.mockStatic(Catalog.class); + EasyMock.expect(Catalog.getInstance()).andReturn(catalog).anyTimes(); + EasyMock.expect(Catalog.getCurrentCatalogJournalVersion()).andReturn(FeConstants.meta_version).anyTimes(); + PowerMock.replay(Catalog.class); + } + + @Test + public void lockTest() { + db.readLock(); + try { + Assert.assertFalse(db.tryWriteLock(0, TimeUnit.SECONDS)); + Assert.assertTrue(db.tryReadLock(0, TimeUnit.SECONDS)); + db.readUnlock(); + } finally { + db.readUnlock(); + } + + db.writeLock(); + try { + Assert.assertTrue(db.tryWriteLock(0, TimeUnit.SECONDS)); + Assert.assertTrue(db.tryReadLock(0, TimeUnit.SECONDS)); + } finally { + db.writeUnlock(); + } + } + + @Test + public void createAndDropPartitionTest() { + Assert.assertEquals("dbTest", db.getFullName()); + Assert.assertEquals(dbId, db.getId()); + + MaterializedIndex baseIndex = new MaterializedIndex(10001, IndexState.NORMAL); + Partition partition = new Partition(20000L, "baseTable", baseIndex, new RandomDistributionInfo(10)); + List baseSchema = new LinkedList(); + OlapTable table = new OlapTable(2000, "baseTable", baseSchema, KeysType.AGG_KEYS, + new SinglePartitionInfo(), new RandomDistributionInfo(10)); + table.addPartition(partition); + + // create + Assert.assertTrue(db.createTable(table)); + // duplicate + Assert.assertFalse(db.createTable(table)); + + Assert.assertEquals(table, db.getTable(table.getId())); + Assert.assertEquals(table, db.getTable(table.getName())); + + Assert.assertEquals(1, db.getTables().size()); + Assert.assertEquals(table, db.getTables().get(0)); + + Assert.assertEquals(1, db.getTableNamesWithLock().size()); + for (String tableFamilyGroupName : db.getTableNamesWithLock()) { + Assert.assertEquals(table.getName(), tableFamilyGroupName); + } + + // drop + // drop not exist tableFamily + db.dropTable("invalid"); + Assert.assertEquals(1, db.getTables().size()); + db.dropTableWithLock("invalid"); + Assert.assertEquals(1, db.getTables().size()); + + // drop normal + db.dropTableWithLock(table.getName()); + Assert.assertEquals(0, db.getTables().size()); + + db.createTable(table); + db.dropTable(table.getName()); + Assert.assertEquals(0, db.getTables().size()); + } + + @Test + public void testSerialization() throws Exception { + // 1. Write objects to file + File file = new File("./database"); + file.createNewFile(); + DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); + + // db1 + Database db1 = new Database(); + db1.write(dos); + + // db2 + Database db2 = new Database(2, "db2"); + List columns = new ArrayList(); + columns.add(new Column("column2", + ColumnType.createType(PrimitiveType.TINYINT), false, AggregateType.MIN, "", "")); + columns.add(new Column("column3", + ColumnType.createType(PrimitiveType.SMALLINT), false, AggregateType.SUM, "", "")); + columns.add(new Column("column4", + ColumnType.createType(PrimitiveType.INT), false, AggregateType.REPLACE, "", "")); + columns.add(new Column("column5", + ColumnType.createType(PrimitiveType.BIGINT), false, AggregateType.REPLACE, "", "")); + columns.add(new Column("column6", + ColumnType.createType(PrimitiveType.FLOAT), false, AggregateType.REPLACE, "", "")); + columns.add(new Column("column7", + ColumnType.createType(PrimitiveType.DOUBLE), false, AggregateType.REPLACE, "", "")); + columns.add(new Column("column8", ColumnType.createChar(10), true, null, "", "")); + columns.add(new Column("column9", ColumnType.createVarchar(10), true, null, "", "")); + columns.add(new Column("column10", ColumnType.createType(PrimitiveType.DATE), true, null, "", "")); + columns.add(new Column("column11", ColumnType.createType(PrimitiveType.DATETIME), true, null, "", "")); + + MaterializedIndex index = new MaterializedIndex(1, IndexState.NORMAL); + Partition partition = new Partition(20000L, "table", index, new RandomDistributionInfo(10)); + OlapTable table = new OlapTable(1000, "table", columns, KeysType.AGG_KEYS, + new SinglePartitionInfo(), new RandomDistributionInfo(10)); + table.addPartition(partition); + db2.createTable(table); + db2.write(dos); + + + dos.flush(); + dos.close(); + + // 2. Read objects from file + DataInputStream dis = new DataInputStream(new FileInputStream(file)); + + Database rDb1 = new Database(); + rDb1.readFields(dis); + Assert.assertTrue(rDb1.equals(db1)); + + Database rDb2 = new Database(); + rDb2.readFields(dis); + Assert.assertTrue(rDb2.equals(db2)); + + // 3. delete files + dis.close(); + file.delete(); + } +} diff --git a/fe/test/com/baidu/palo/catalog/MaterializedIndexTest.java b/fe/test/com/baidu/palo/catalog/MaterializedIndexTest.java index c0a7212219..88ce7ed936 100644 --- a/fe/test/com/baidu/palo/catalog/MaterializedIndexTest.java +++ b/fe/test/com/baidu/palo/catalog/MaterializedIndexTest.java @@ -18,82 +18,82 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.catalog; - -import com.baidu.palo.catalog.MaterializedIndex.IndexState; -import com.baidu.palo.common.FeConstants; - -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.util.LinkedList; -import java.util.List; - -import org.easymock.EasyMock; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.powermock.api.easymock.PowerMock; -import org.powermock.core.classloader.annotations.PowerMockIgnore; -import org.powermock.core.classloader.annotations.PrepareForTest; -import org.powermock.modules.junit4.PowerMockRunner; - -@RunWith(PowerMockRunner.class) -@PowerMockIgnore("org.apache.log4j.*") -@PrepareForTest(Catalog.class) -public class MaterializedIndexTest { - - private MaterializedIndex index; - private long indexId; - - private List columns; - private Catalog catalog; - - @Before - public void setUp() { - indexId = 10000; - - columns = new LinkedList(); - columns.add(new Column("k1", ColumnType.createType(PrimitiveType.TINYINT), true, null, "", "")); - columns.add(new Column("k2", ColumnType.createType(PrimitiveType.SMALLINT), true, null, "", "")); - columns.add(new Column("v1", ColumnType.createType(PrimitiveType.INT), false, AggregateType.REPLACE, "", "")); - index = new MaterializedIndex(indexId, IndexState.NORMAL); - - catalog = EasyMock.createMock(Catalog.class); - - PowerMock.mockStatic(Catalog.class); - EasyMock.expect(Catalog.getInstance()).andReturn(catalog).anyTimes(); - EasyMock.expect(Catalog.getCurrentCatalogJournalVersion()).andReturn(FeConstants.meta_version).anyTimes(); - PowerMock.replay(Catalog.class); - } - - @Test - public void getMethodTest() { - Assert.assertEquals(indexId, index.getId()); - } - - @Test - public void testSerialization() throws Exception { - // 1. Write objects to file - File file = new File("./index"); - file.createNewFile(); - DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); - - index.write(dos); - - dos.flush(); - dos.close(); - - // 2. Read objects from file - DataInputStream dis = new DataInputStream(new FileInputStream(file)); - MaterializedIndex rIndex = MaterializedIndex.read(dis); - Assert.assertTrue(index.equals(rIndex)); - - // 3. delete files - dis.close(); - file.delete(); - } -} +package com.baidu.palo.catalog; + +import com.baidu.palo.catalog.MaterializedIndex.IndexState; +import com.baidu.palo.common.FeConstants; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.util.LinkedList; +import java.util.List; + +import org.easymock.EasyMock; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.api.easymock.PowerMock; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +@RunWith(PowerMockRunner.class) +@PowerMockIgnore("org.apache.log4j.*") +@PrepareForTest(Catalog.class) +public class MaterializedIndexTest { + + private MaterializedIndex index; + private long indexId; + + private List columns; + private Catalog catalog; + + @Before + public void setUp() { + indexId = 10000; + + columns = new LinkedList(); + columns.add(new Column("k1", ColumnType.createType(PrimitiveType.TINYINT), true, null, "", "")); + columns.add(new Column("k2", ColumnType.createType(PrimitiveType.SMALLINT), true, null, "", "")); + columns.add(new Column("v1", ColumnType.createType(PrimitiveType.INT), false, AggregateType.REPLACE, "", "")); + index = new MaterializedIndex(indexId, IndexState.NORMAL); + + catalog = EasyMock.createMock(Catalog.class); + + PowerMock.mockStatic(Catalog.class); + EasyMock.expect(Catalog.getInstance()).andReturn(catalog).anyTimes(); + EasyMock.expect(Catalog.getCurrentCatalogJournalVersion()).andReturn(FeConstants.meta_version).anyTimes(); + PowerMock.replay(Catalog.class); + } + + @Test + public void getMethodTest() { + Assert.assertEquals(indexId, index.getId()); + } + + @Test + public void testSerialization() throws Exception { + // 1. Write objects to file + File file = new File("./index"); + file.createNewFile(); + DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); + + index.write(dos); + + dos.flush(); + dos.close(); + + // 2. Read objects from file + DataInputStream dis = new DataInputStream(new FileInputStream(file)); + MaterializedIndex rIndex = MaterializedIndex.read(dis); + Assert.assertTrue(index.equals(rIndex)); + + // 3. delete files + dis.close(); + file.delete(); + } +} diff --git a/fe/test/com/baidu/palo/catalog/PartitionKeyTest.java b/fe/test/com/baidu/palo/catalog/PartitionKeyTest.java index 54ab360e0c..c77a99d4dd 100644 --- a/fe/test/com/baidu/palo/catalog/PartitionKeyTest.java +++ b/fe/test/com/baidu/palo/catalog/PartitionKeyTest.java @@ -18,176 +18,176 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.catalog; - -import com.baidu.palo.common.AnalysisException; - -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.TimeZone; - -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -public class PartitionKeyTest { - - private static List allColumns; - private static Column tinyInt; - private static Column smallInt; - private static Column int32; - private static Column bigInt; - private static Column largeInt; - private static Column date; - private static Column datetime; - - @BeforeClass - public static void setUp() { - TimeZone tz = TimeZone.getTimeZone("ETC/GMT-0"); - TimeZone.setDefault(tz); - - tinyInt = new Column("tinyint", PrimitiveType.TINYINT); - smallInt = new Column("smallint", PrimitiveType.SMALLINT); - int32 = new Column("int32", PrimitiveType.INT); - bigInt = new Column("bigint", PrimitiveType.BIGINT); - largeInt = new Column("largeint", PrimitiveType.LARGEINT); - date = new Column("date", PrimitiveType.DATE); - datetime = new Column("datetime", PrimitiveType.DATETIME); - - allColumns = Arrays.asList(tinyInt, smallInt, int32, bigInt, largeInt, date, datetime); - } - - @Test - public void compareTest() throws AnalysisException { - PartitionKey pk1; - PartitionKey pk2; - - // case1 - pk1 = PartitionKey.createPartitionKey(Arrays.asList("127", "32767"), - Arrays.asList(tinyInt, smallInt)); - pk2 = PartitionKey.createInfinityPartitionKey(Arrays.asList(tinyInt, smallInt), true); - Assert.assertTrue(!pk1.equals(pk2) && pk1.compareTo(pk2) == -1); - - // case2 - pk1 = PartitionKey.createPartitionKey(Arrays.asList("127"), - Arrays.asList(tinyInt, smallInt)); - pk2 = PartitionKey.createPartitionKey(Arrays.asList("127", "-32768"), - Arrays.asList(tinyInt, smallInt)); - Assert.assertTrue(pk1.equals(pk2) && pk1.compareTo(pk2) == 0); - - // case3 - pk1 = PartitionKey.createPartitionKey(Arrays.asList("127"), - Arrays.asList(int32, bigInt)); - pk2 = PartitionKey.createPartitionKey(Arrays.asList("128", "-32768"), - Arrays.asList(int32, bigInt)); - Assert.assertTrue(!pk1.equals(pk2) && pk1.compareTo(pk2) == -1); - - // case4 - pk1 = PartitionKey.createPartitionKey(Arrays.asList("127", "12345"), - Arrays.asList(largeInt, bigInt)); - pk2 = PartitionKey.createPartitionKey(Arrays.asList("127", "12346"), - Arrays.asList(largeInt, bigInt)); - Assert.assertTrue(!pk1.equals(pk2) && pk1.compareTo(pk2) == -1); - - // case5 - pk1 = PartitionKey.createPartitionKey(Arrays.asList("2014-12-12", "2014-12-12 10:00:00"), - Arrays.asList(date, datetime)); - pk2 = PartitionKey.createPartitionKey(Arrays.asList("2014-12-12", "2014-12-12 10:00:01"), - Arrays.asList(date, datetime)); - Assert.assertTrue(!pk1.equals(pk2) && pk1.compareTo(pk2) == -1); - - // case6 - pk1 = PartitionKey.createPartitionKey(Arrays.asList("-128"), - Arrays.asList(tinyInt, smallInt)); - pk2 = PartitionKey.createInfinityPartitionKey(Arrays.asList(tinyInt, smallInt), false); - Assert.assertTrue(pk1.equals(pk2) && pk1.compareTo(pk2) == 0); - - // case7 - pk1 = PartitionKey.createPartitionKey(Arrays.asList("127"), - Arrays.asList(tinyInt, smallInt)); - pk2 = PartitionKey.createInfinityPartitionKey(Arrays.asList(tinyInt, smallInt), true); - Assert.assertTrue(!pk1.equals(pk2) && pk1.compareTo(pk2) == -1); - - // case7 - pk1 = PartitionKey.createPartitionKey(Arrays.asList("127", "32767"), - Arrays.asList(tinyInt, smallInt)); - pk2 = PartitionKey.createInfinityPartitionKey(Arrays.asList(tinyInt, smallInt), true); - Assert.assertTrue(!pk1.equals(pk2) && pk1.compareTo(pk2) == -1); - - // case8 - pk1 = PartitionKey.createPartitionKey(Arrays.asList("127", "32767", "2147483647", "9223372036854775807", - "170141183460469231731687303715884105727", - "9999-12-31", "9999-12-31 23:59:59"), - allColumns); - pk2 = PartitionKey.createInfinityPartitionKey(allColumns, true); - Assert.assertTrue(!pk1.equals(pk2) && pk1.compareTo(pk2) == -1); - - // case9 - pk1 = PartitionKey.createPartitionKey(Arrays.asList("-128", "-32768", "-2147483648", "-9223372036854775808", - "-170141183460469231731687303715884105728", - "1900-01-01", "1900-01-01 00:00:00"), - allColumns); - pk2 = PartitionKey.createInfinityPartitionKey(allColumns, false); - Assert.assertTrue(pk1.equals(pk2) && pk1.compareTo(pk2) == 0); - - // case10 - pk1 = PartitionKey.createPartitionKey(Arrays.asList("-128", "-32768", "0", "-9223372036854775808", - "0", "1970-01-01", "1970-01-01 00:00:00"), - allColumns); - pk2 = PartitionKey.createInfinityPartitionKey(allColumns, false); - Assert.assertTrue(!pk1.equals(pk2) && pk1.compareTo(pk2) == 1); - } - - @Test - public void testSerialization() throws Exception { - // 1. Write objects to file - File file = new File("./keyRangePartition"); - file.createNewFile(); - DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); - - PartitionKey keyEmpty = new PartitionKey(); - keyEmpty.write(dos); - - List keys = new ArrayList(); - List columns = new ArrayList(); - keys.add("100"); - columns.add(new Column("column2", ColumnType.createType(PrimitiveType.TINYINT), true, null, "", "")); - keys.add("101"); - columns.add(new Column("column3", ColumnType.createType(PrimitiveType.SMALLINT), true, null, "", "")); - keys.add("102"); - columns.add(new Column("column4", ColumnType.createType(PrimitiveType.INT), true, null, "", "")); - keys.add("103"); - columns.add(new Column("column5", ColumnType.createType(PrimitiveType.BIGINT), true, null, "", "")); - keys.add("2014-12-26"); - columns.add(new Column("column10", ColumnType.createType(PrimitiveType.DATE), true, null, "", "")); - keys.add("2014-12-27 11:12:13"); - columns.add(new Column("column11", ColumnType.createType(PrimitiveType.DATETIME), true, null, "", "")); - - PartitionKey key = PartitionKey.createPartitionKey(keys, columns); - key.write(dos); - - dos.flush(); - dos.close(); - - // 2. Read objects from file - DataInputStream dis = new DataInputStream(new FileInputStream(file)); - PartitionKey rKeyEmpty = PartitionKey.read(dis); - Assert.assertTrue(keyEmpty.equals(rKeyEmpty)); - - PartitionKey rKey = new PartitionKey(); - rKey.readFields(dis); - Assert.assertTrue(key.equals(rKey)); - Assert.assertTrue(key.equals(key)); - Assert.assertFalse(key.equals(this)); - - // 3. delete files - dis.close(); - file.delete(); - } -} +package com.baidu.palo.catalog; + +import com.baidu.palo.common.AnalysisException; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.TimeZone; + +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +public class PartitionKeyTest { + + private static List allColumns; + private static Column tinyInt; + private static Column smallInt; + private static Column int32; + private static Column bigInt; + private static Column largeInt; + private static Column date; + private static Column datetime; + + @BeforeClass + public static void setUp() { + TimeZone tz = TimeZone.getTimeZone("ETC/GMT-0"); + TimeZone.setDefault(tz); + + tinyInt = new Column("tinyint", PrimitiveType.TINYINT); + smallInt = new Column("smallint", PrimitiveType.SMALLINT); + int32 = new Column("int32", PrimitiveType.INT); + bigInt = new Column("bigint", PrimitiveType.BIGINT); + largeInt = new Column("largeint", PrimitiveType.LARGEINT); + date = new Column("date", PrimitiveType.DATE); + datetime = new Column("datetime", PrimitiveType.DATETIME); + + allColumns = Arrays.asList(tinyInt, smallInt, int32, bigInt, largeInt, date, datetime); + } + + @Test + public void compareTest() throws AnalysisException { + PartitionKey pk1; + PartitionKey pk2; + + // case1 + pk1 = PartitionKey.createPartitionKey(Arrays.asList("127", "32767"), + Arrays.asList(tinyInt, smallInt)); + pk2 = PartitionKey.createInfinityPartitionKey(Arrays.asList(tinyInt, smallInt), true); + Assert.assertTrue(!pk1.equals(pk2) && pk1.compareTo(pk2) == -1); + + // case2 + pk1 = PartitionKey.createPartitionKey(Arrays.asList("127"), + Arrays.asList(tinyInt, smallInt)); + pk2 = PartitionKey.createPartitionKey(Arrays.asList("127", "-32768"), + Arrays.asList(tinyInt, smallInt)); + Assert.assertTrue(pk1.equals(pk2) && pk1.compareTo(pk2) == 0); + + // case3 + pk1 = PartitionKey.createPartitionKey(Arrays.asList("127"), + Arrays.asList(int32, bigInt)); + pk2 = PartitionKey.createPartitionKey(Arrays.asList("128", "-32768"), + Arrays.asList(int32, bigInt)); + Assert.assertTrue(!pk1.equals(pk2) && pk1.compareTo(pk2) == -1); + + // case4 + pk1 = PartitionKey.createPartitionKey(Arrays.asList("127", "12345"), + Arrays.asList(largeInt, bigInt)); + pk2 = PartitionKey.createPartitionKey(Arrays.asList("127", "12346"), + Arrays.asList(largeInt, bigInt)); + Assert.assertTrue(!pk1.equals(pk2) && pk1.compareTo(pk2) == -1); + + // case5 + pk1 = PartitionKey.createPartitionKey(Arrays.asList("2014-12-12", "2014-12-12 10:00:00"), + Arrays.asList(date, datetime)); + pk2 = PartitionKey.createPartitionKey(Arrays.asList("2014-12-12", "2014-12-12 10:00:01"), + Arrays.asList(date, datetime)); + Assert.assertTrue(!pk1.equals(pk2) && pk1.compareTo(pk2) == -1); + + // case6 + pk1 = PartitionKey.createPartitionKey(Arrays.asList("-128"), + Arrays.asList(tinyInt, smallInt)); + pk2 = PartitionKey.createInfinityPartitionKey(Arrays.asList(tinyInt, smallInt), false); + Assert.assertTrue(pk1.equals(pk2) && pk1.compareTo(pk2) == 0); + + // case7 + pk1 = PartitionKey.createPartitionKey(Arrays.asList("127"), + Arrays.asList(tinyInt, smallInt)); + pk2 = PartitionKey.createInfinityPartitionKey(Arrays.asList(tinyInt, smallInt), true); + Assert.assertTrue(!pk1.equals(pk2) && pk1.compareTo(pk2) == -1); + + // case7 + pk1 = PartitionKey.createPartitionKey(Arrays.asList("127", "32767"), + Arrays.asList(tinyInt, smallInt)); + pk2 = PartitionKey.createInfinityPartitionKey(Arrays.asList(tinyInt, smallInt), true); + Assert.assertTrue(!pk1.equals(pk2) && pk1.compareTo(pk2) == -1); + + // case8 + pk1 = PartitionKey.createPartitionKey(Arrays.asList("127", "32767", "2147483647", "9223372036854775807", + "170141183460469231731687303715884105727", + "9999-12-31", "9999-12-31 23:59:59"), + allColumns); + pk2 = PartitionKey.createInfinityPartitionKey(allColumns, true); + Assert.assertTrue(!pk1.equals(pk2) && pk1.compareTo(pk2) == -1); + + // case9 + pk1 = PartitionKey.createPartitionKey(Arrays.asList("-128", "-32768", "-2147483648", "-9223372036854775808", + "-170141183460469231731687303715884105728", + "1900-01-01", "1900-01-01 00:00:00"), + allColumns); + pk2 = PartitionKey.createInfinityPartitionKey(allColumns, false); + Assert.assertTrue(pk1.equals(pk2) && pk1.compareTo(pk2) == 0); + + // case10 + pk1 = PartitionKey.createPartitionKey(Arrays.asList("-128", "-32768", "0", "-9223372036854775808", + "0", "1970-01-01", "1970-01-01 00:00:00"), + allColumns); + pk2 = PartitionKey.createInfinityPartitionKey(allColumns, false); + Assert.assertTrue(!pk1.equals(pk2) && pk1.compareTo(pk2) == 1); + } + + @Test + public void testSerialization() throws Exception { + // 1. Write objects to file + File file = new File("./keyRangePartition"); + file.createNewFile(); + DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); + + PartitionKey keyEmpty = new PartitionKey(); + keyEmpty.write(dos); + + List keys = new ArrayList(); + List columns = new ArrayList(); + keys.add("100"); + columns.add(new Column("column2", ColumnType.createType(PrimitiveType.TINYINT), true, null, "", "")); + keys.add("101"); + columns.add(new Column("column3", ColumnType.createType(PrimitiveType.SMALLINT), true, null, "", "")); + keys.add("102"); + columns.add(new Column("column4", ColumnType.createType(PrimitiveType.INT), true, null, "", "")); + keys.add("103"); + columns.add(new Column("column5", ColumnType.createType(PrimitiveType.BIGINT), true, null, "", "")); + keys.add("2014-12-26"); + columns.add(new Column("column10", ColumnType.createType(PrimitiveType.DATE), true, null, "", "")); + keys.add("2014-12-27 11:12:13"); + columns.add(new Column("column11", ColumnType.createType(PrimitiveType.DATETIME), true, null, "", "")); + + PartitionKey key = PartitionKey.createPartitionKey(keys, columns); + key.write(dos); + + dos.flush(); + dos.close(); + + // 2. Read objects from file + DataInputStream dis = new DataInputStream(new FileInputStream(file)); + PartitionKey rKeyEmpty = PartitionKey.read(dis); + Assert.assertTrue(keyEmpty.equals(rKeyEmpty)); + + PartitionKey rKey = new PartitionKey(); + rKey.readFields(dis); + Assert.assertTrue(key.equals(rKey)); + Assert.assertTrue(key.equals(key)); + Assert.assertFalse(key.equals(this)); + + // 3. delete files + dis.close(); + file.delete(); + } +} diff --git a/fe/test/com/baidu/palo/catalog/RangePartitionInfoTest.java b/fe/test/com/baidu/palo/catalog/RangePartitionInfoTest.java index c6f7518f4d..b438d4a791 100644 --- a/fe/test/com/baidu/palo/catalog/RangePartitionInfoTest.java +++ b/fe/test/com/baidu/palo/catalog/RangePartitionInfoTest.java @@ -18,126 +18,126 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.catalog; - -import com.baidu.palo.analysis.PartitionKeyDesc; -import com.baidu.palo.analysis.SingleRangePartitionDesc; -import com.baidu.palo.common.AnalysisException; -import com.baidu.palo.common.DdlException; - -import com.google.common.collect.Lists; - -import org.junit.Before; -import org.junit.Test; - -import java.util.LinkedList; -import java.util.List; - -public class RangePartitionInfoTest { - - private List partitionColumns; - private RangePartitionInfo partitionInfo; - - private List singleRangePartitionDescs; - - @Before - public void setUp() { - partitionColumns = new LinkedList(); - singleRangePartitionDescs = new LinkedList(); - } - - @Test(expected = DdlException.class) - public void testTinyInt() throws DdlException, AnalysisException { - Column k1 = new Column("k1", new ColumnType(PrimitiveType.TINYINT), true, null, "", ""); - partitionColumns.add(k1); - - singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p1", - new PartitionKeyDesc(Lists .newArrayList("-128")), - null)); - - - partitionInfo = new RangePartitionInfo(partitionColumns); - for (SingleRangePartitionDesc singleRangePartitionDesc : singleRangePartitionDescs) { - singleRangePartitionDesc.analyze(1, null); - partitionInfo.handleNewSinglePartitionDesc(singleRangePartitionDesc, 20000L); - } - } - - @Test(expected = DdlException.class) - public void testSmallInt() throws DdlException, AnalysisException { - Column k1 = new Column("k1", new ColumnType(PrimitiveType.SMALLINT), true, null, "", ""); - partitionColumns.add(k1); - - singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p1", - new PartitionKeyDesc(Lists.newArrayList("-32768")), - null)); - - partitionInfo = new RangePartitionInfo(partitionColumns); - for (SingleRangePartitionDesc singleRangePartitionDesc : singleRangePartitionDescs) { - singleRangePartitionDesc.analyze(1, null); - partitionInfo.handleNewSinglePartitionDesc(singleRangePartitionDesc, 20000L); - } - } - - @Test(expected = DdlException.class) - public void testInt() throws DdlException, AnalysisException { - Column k1 = new Column("k1", new ColumnType(PrimitiveType.INT), true, null, "", ""); - partitionColumns.add(k1); - - singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p1", - new PartitionKeyDesc(Lists - .newArrayList("-2147483648")), - null)); - - partitionInfo = new RangePartitionInfo(partitionColumns); - for (SingleRangePartitionDesc singleRangePartitionDesc : singleRangePartitionDescs) { - singleRangePartitionDesc.analyze(1, null); - partitionInfo.handleNewSinglePartitionDesc(singleRangePartitionDesc, 20000L); - } - } - - @Test(expected = DdlException.class) - public void testBigInt() throws DdlException, AnalysisException { - Column k1 = new Column("k1", new ColumnType(PrimitiveType.BIGINT), true, null, "", ""); - partitionColumns.add(k1); - - singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p1", new PartitionKeyDesc(Lists - .newArrayList("-9223372036854775808")), null)); - singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p2", new PartitionKeyDesc(Lists - .newArrayList("-9223372036854775806")), null)); - singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p3", new PartitionKeyDesc(Lists - .newArrayList("0")), null)); - singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p4", new PartitionKeyDesc(Lists - .newArrayList("9223372036854775806")), null)); - - partitionInfo = new RangePartitionInfo(partitionColumns); - - for (SingleRangePartitionDesc singleRangePartitionDesc : singleRangePartitionDescs) { - singleRangePartitionDesc.analyze(1, null); - partitionInfo.handleNewSinglePartitionDesc(singleRangePartitionDesc, 20000L); - } - } - - @Test - public void testBigIntNormal() throws DdlException, AnalysisException { - Column k1 = new Column("k1", new ColumnType(PrimitiveType.BIGINT), true, null, "", ""); - partitionColumns.add(k1); - - singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p1", new PartitionKeyDesc(Lists - .newArrayList("-9223372036854775806")), null)); - singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p2", new PartitionKeyDesc(Lists - .newArrayList("-9223372036854775805")), null)); - singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p3", new PartitionKeyDesc(Lists - .newArrayList("0")), null)); - singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p4", new PartitionKeyDesc(Lists - .newArrayList("9223372036854775806")), null)); - - partitionInfo = new RangePartitionInfo(partitionColumns); - - for (SingleRangePartitionDesc singleRangePartitionDesc : singleRangePartitionDescs) { - singleRangePartitionDesc.analyze(1, null); - partitionInfo.handleNewSinglePartitionDesc(singleRangePartitionDesc, 20000L); - } - } - -} +package com.baidu.palo.catalog; + +import com.baidu.palo.analysis.PartitionKeyDesc; +import com.baidu.palo.analysis.SingleRangePartitionDesc; +import com.baidu.palo.common.AnalysisException; +import com.baidu.palo.common.DdlException; + +import com.google.common.collect.Lists; + +import org.junit.Before; +import org.junit.Test; + +import java.util.LinkedList; +import java.util.List; + +public class RangePartitionInfoTest { + + private List partitionColumns; + private RangePartitionInfo partitionInfo; + + private List singleRangePartitionDescs; + + @Before + public void setUp() { + partitionColumns = new LinkedList(); + singleRangePartitionDescs = new LinkedList(); + } + + @Test(expected = DdlException.class) + public void testTinyInt() throws DdlException, AnalysisException { + Column k1 = new Column("k1", new ColumnType(PrimitiveType.TINYINT), true, null, "", ""); + partitionColumns.add(k1); + + singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p1", + new PartitionKeyDesc(Lists .newArrayList("-128")), + null)); + + + partitionInfo = new RangePartitionInfo(partitionColumns); + for (SingleRangePartitionDesc singleRangePartitionDesc : singleRangePartitionDescs) { + singleRangePartitionDesc.analyze(1, null); + partitionInfo.handleNewSinglePartitionDesc(singleRangePartitionDesc, 20000L); + } + } + + @Test(expected = DdlException.class) + public void testSmallInt() throws DdlException, AnalysisException { + Column k1 = new Column("k1", new ColumnType(PrimitiveType.SMALLINT), true, null, "", ""); + partitionColumns.add(k1); + + singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p1", + new PartitionKeyDesc(Lists.newArrayList("-32768")), + null)); + + partitionInfo = new RangePartitionInfo(partitionColumns); + for (SingleRangePartitionDesc singleRangePartitionDesc : singleRangePartitionDescs) { + singleRangePartitionDesc.analyze(1, null); + partitionInfo.handleNewSinglePartitionDesc(singleRangePartitionDesc, 20000L); + } + } + + @Test(expected = DdlException.class) + public void testInt() throws DdlException, AnalysisException { + Column k1 = new Column("k1", new ColumnType(PrimitiveType.INT), true, null, "", ""); + partitionColumns.add(k1); + + singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p1", + new PartitionKeyDesc(Lists + .newArrayList("-2147483648")), + null)); + + partitionInfo = new RangePartitionInfo(partitionColumns); + for (SingleRangePartitionDesc singleRangePartitionDesc : singleRangePartitionDescs) { + singleRangePartitionDesc.analyze(1, null); + partitionInfo.handleNewSinglePartitionDesc(singleRangePartitionDesc, 20000L); + } + } + + @Test(expected = DdlException.class) + public void testBigInt() throws DdlException, AnalysisException { + Column k1 = new Column("k1", new ColumnType(PrimitiveType.BIGINT), true, null, "", ""); + partitionColumns.add(k1); + + singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p1", new PartitionKeyDesc(Lists + .newArrayList("-9223372036854775808")), null)); + singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p2", new PartitionKeyDesc(Lists + .newArrayList("-9223372036854775806")), null)); + singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p3", new PartitionKeyDesc(Lists + .newArrayList("0")), null)); + singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p4", new PartitionKeyDesc(Lists + .newArrayList("9223372036854775806")), null)); + + partitionInfo = new RangePartitionInfo(partitionColumns); + + for (SingleRangePartitionDesc singleRangePartitionDesc : singleRangePartitionDescs) { + singleRangePartitionDesc.analyze(1, null); + partitionInfo.handleNewSinglePartitionDesc(singleRangePartitionDesc, 20000L); + } + } + + @Test + public void testBigIntNormal() throws DdlException, AnalysisException { + Column k1 = new Column("k1", new ColumnType(PrimitiveType.BIGINT), true, null, "", ""); + partitionColumns.add(k1); + + singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p1", new PartitionKeyDesc(Lists + .newArrayList("-9223372036854775806")), null)); + singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p2", new PartitionKeyDesc(Lists + .newArrayList("-9223372036854775805")), null)); + singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p3", new PartitionKeyDesc(Lists + .newArrayList("0")), null)); + singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p4", new PartitionKeyDesc(Lists + .newArrayList("9223372036854775806")), null)); + + partitionInfo = new RangePartitionInfo(partitionColumns); + + for (SingleRangePartitionDesc singleRangePartitionDesc : singleRangePartitionDescs) { + singleRangePartitionDesc.analyze(1, null); + partitionInfo.handleNewSinglePartitionDesc(singleRangePartitionDesc, 20000L); + } + } + +} diff --git a/fe/test/com/baidu/palo/catalog/ReplicaTest.java b/fe/test/com/baidu/palo/catalog/ReplicaTest.java index 4fba84e357..3668d38fb4 100644 --- a/fe/test/com/baidu/palo/catalog/ReplicaTest.java +++ b/fe/test/com/baidu/palo/catalog/ReplicaTest.java @@ -18,138 +18,138 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.catalog; - -import com.baidu.palo.catalog.Replica.ReplicaState; - -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.util.ArrayList; -import java.util.List; - -public class ReplicaTest { - - private Replica replica; - private long replicaId; - private long backendId; - private long version; - private long versionHash; - private long dataSize; - private long rowCount; - - @Before - public void setUp() { - replicaId = 10000; - backendId = 20000; - version = 2; - versionHash = 98765; - dataSize = 9999; - rowCount = 1024; - replica = new Replica(replicaId, backendId, version, versionHash, dataSize, rowCount, ReplicaState.NORMAL); - } - - @Test - public void getMethodTest() { - Assert.assertEquals(replicaId, replica.getId()); - Assert.assertEquals(backendId, replica.getBackendId()); - Assert.assertEquals(version, replica.getVersion()); - Assert.assertEquals(versionHash, replica.getVersionHash()); - Assert.assertEquals(dataSize, replica.getDataSize()); - Assert.assertEquals(rowCount, replica.getRowCount()); - - // update new version - long newVersion = version + 1; - long newVersionHash = 87654; - long newDataSize = dataSize + 100; - long newRowCount = rowCount + 10; - replica.updateInfo(newVersion, newVersionHash, newDataSize, newRowCount); - Assert.assertEquals(newVersion, replica.getVersion()); - Assert.assertEquals(newVersionHash, replica.getVersionHash()); - Assert.assertEquals(newDataSize, replica.getDataSize()); - Assert.assertEquals(newRowCount, replica.getRowCount()); - - // check version catch up - Assert.assertFalse(replica.checkVersionCatchUp(5, 98765)); - Assert.assertFalse(replica.checkVersionCatchUp(newVersion, 76543)); - Assert.assertTrue(replica.checkVersionCatchUp(newVersion, newVersionHash)); - } - - @Test - public void toStringTest() { - StringBuffer strBuffer = new StringBuffer("replicaId="); - strBuffer.append(replicaId); - strBuffer.append(", BackendId="); - strBuffer.append(backendId); - strBuffer.append(", version="); - strBuffer.append(version); - strBuffer.append(", versionHash="); - strBuffer.append(versionHash); - strBuffer.append(", dataSize="); - strBuffer.append(dataSize); - strBuffer.append(", rowCount="); - strBuffer.append(rowCount); - - Assert.assertEquals(strBuffer.toString(), replica.toString()); - } - - @Test - public void testSerialization() throws Exception { - // 1. Write objects to file - File file = new File("./olapReplicaTest"); - file.createNewFile(); - DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); - - List list1 = new ArrayList(); - List list2 = new ArrayList(); - for (int count = 0; count < 10; ++count) { - Replica olapReplica = new Replica(100L * count, 100L * count, 100L * count, 100L * count, - 100L * count, 100 * count, ReplicaState.NORMAL); - list1.add(olapReplica); - olapReplica.write(dos); - } - - Replica replica = new Replica(10L, 20L, null); - list1.add(replica); - replica.write(dos); - dos.flush(); - dos.close(); - - // 2. Read a object from file - DataInputStream dis = new DataInputStream(new FileInputStream(file)); - for (int count = 0; count < 10; ++count) { - Replica olapReplica = new Replica(); - olapReplica.readFields(dis); - Assert.assertEquals(100 * count, olapReplica.getId()); - Assert.assertEquals(100 * count, olapReplica.getBackendId()); - Assert.assertEquals(100 * count, olapReplica.getVersion()); - Assert.assertEquals(100 * count, olapReplica.getVersionHash()); - Assert.assertEquals(100 * count, olapReplica.getDataSize()); - Assert.assertEquals(100 * count, olapReplica.getRowCount()); - Assert.assertEquals(Replica.ReplicaState.NORMAL, olapReplica.getState()); - list2.add(olapReplica); - } - Replica olapReplica = new Replica(); - olapReplica.readFields(dis); - list2.add(olapReplica); - - // 3. Check equal - for (int i = 0; i < 11; i++) { - Assert.assertTrue(list1.get(i).equals(list2.get(i))); - Assert.assertTrue(list1.get(i).toString().equals(list2.get(i).toString())); - } - - Assert.assertTrue(list1.get(1).equals(list1.get(1))); - Assert.assertFalse(list1.get(1).equals(list1)); - - dis.close(); - file.delete(); - } -} - +package com.baidu.palo.catalog; + +import com.baidu.palo.catalog.Replica.ReplicaState; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.util.ArrayList; +import java.util.List; + +public class ReplicaTest { + + private Replica replica; + private long replicaId; + private long backendId; + private long version; + private long versionHash; + private long dataSize; + private long rowCount; + + @Before + public void setUp() { + replicaId = 10000; + backendId = 20000; + version = 2; + versionHash = 98765; + dataSize = 9999; + rowCount = 1024; + replica = new Replica(replicaId, backendId, version, versionHash, dataSize, rowCount, ReplicaState.NORMAL); + } + + @Test + public void getMethodTest() { + Assert.assertEquals(replicaId, replica.getId()); + Assert.assertEquals(backendId, replica.getBackendId()); + Assert.assertEquals(version, replica.getVersion()); + Assert.assertEquals(versionHash, replica.getVersionHash()); + Assert.assertEquals(dataSize, replica.getDataSize()); + Assert.assertEquals(rowCount, replica.getRowCount()); + + // update new version + long newVersion = version + 1; + long newVersionHash = 87654; + long newDataSize = dataSize + 100; + long newRowCount = rowCount + 10; + replica.updateInfo(newVersion, newVersionHash, newDataSize, newRowCount); + Assert.assertEquals(newVersion, replica.getVersion()); + Assert.assertEquals(newVersionHash, replica.getVersionHash()); + Assert.assertEquals(newDataSize, replica.getDataSize()); + Assert.assertEquals(newRowCount, replica.getRowCount()); + + // check version catch up + Assert.assertFalse(replica.checkVersionCatchUp(5, 98765)); + Assert.assertFalse(replica.checkVersionCatchUp(newVersion, 76543)); + Assert.assertTrue(replica.checkVersionCatchUp(newVersion, newVersionHash)); + } + + @Test + public void toStringTest() { + StringBuffer strBuffer = new StringBuffer("replicaId="); + strBuffer.append(replicaId); + strBuffer.append(", BackendId="); + strBuffer.append(backendId); + strBuffer.append(", version="); + strBuffer.append(version); + strBuffer.append(", versionHash="); + strBuffer.append(versionHash); + strBuffer.append(", dataSize="); + strBuffer.append(dataSize); + strBuffer.append(", rowCount="); + strBuffer.append(rowCount); + + Assert.assertEquals(strBuffer.toString(), replica.toString()); + } + + @Test + public void testSerialization() throws Exception { + // 1. Write objects to file + File file = new File("./olapReplicaTest"); + file.createNewFile(); + DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); + + List list1 = new ArrayList(); + List list2 = new ArrayList(); + for (int count = 0; count < 10; ++count) { + Replica olapReplica = new Replica(100L * count, 100L * count, 100L * count, 100L * count, + 100L * count, 100 * count, ReplicaState.NORMAL); + list1.add(olapReplica); + olapReplica.write(dos); + } + + Replica replica = new Replica(10L, 20L, null); + list1.add(replica); + replica.write(dos); + dos.flush(); + dos.close(); + + // 2. Read a object from file + DataInputStream dis = new DataInputStream(new FileInputStream(file)); + for (int count = 0; count < 10; ++count) { + Replica olapReplica = new Replica(); + olapReplica.readFields(dis); + Assert.assertEquals(100 * count, olapReplica.getId()); + Assert.assertEquals(100 * count, olapReplica.getBackendId()); + Assert.assertEquals(100 * count, olapReplica.getVersion()); + Assert.assertEquals(100 * count, olapReplica.getVersionHash()); + Assert.assertEquals(100 * count, olapReplica.getDataSize()); + Assert.assertEquals(100 * count, olapReplica.getRowCount()); + Assert.assertEquals(Replica.ReplicaState.NORMAL, olapReplica.getState()); + list2.add(olapReplica); + } + Replica olapReplica = new Replica(); + olapReplica.readFields(dis); + list2.add(olapReplica); + + // 3. Check equal + for (int i = 0; i < 11; i++) { + Assert.assertTrue(list1.get(i).equals(list2.get(i))); + Assert.assertTrue(list1.get(i).toString().equals(list2.get(i).toString())); + } + + Assert.assertTrue(list1.get(1).equals(list1.get(1))); + Assert.assertFalse(list1.get(1).equals(list1)); + + dis.close(); + file.delete(); + } +} + diff --git a/fe/test/com/baidu/palo/catalog/TableTest.java b/fe/test/com/baidu/palo/catalog/TableTest.java index ecfc37ec8b..560f2c8119 100644 --- a/fe/test/com/baidu/palo/catalog/TableTest.java +++ b/fe/test/com/baidu/palo/catalog/TableTest.java @@ -18,84 +18,84 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.catalog; - -import com.baidu.palo.common.FeConstants; - -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.util.ArrayList; -import java.util.List; - -import org.easymock.EasyMock; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.powermock.api.easymock.PowerMock; -import org.powermock.core.classloader.annotations.PowerMockIgnore; -import org.powermock.core.classloader.annotations.PrepareForTest; -import org.powermock.modules.junit4.PowerMockRunner; - -@RunWith(PowerMockRunner.class) -@PowerMockIgnore("org.apache.log4j.*") -@PrepareForTest(Catalog.class) -public class TableTest { - - private Catalog catalog; - - @Before - public void setUp() { - catalog = EasyMock.createMock(Catalog.class); - - PowerMock.mockStatic(Catalog.class); - EasyMock.expect(Catalog.getInstance()).andReturn(catalog).anyTimes(); - EasyMock.expect(Catalog.getCurrentCatalogJournalVersion()).andReturn(FeConstants.meta_version).anyTimes(); - PowerMock.replay(Catalog.class); - } - - @Test - public void testSerialization() throws Exception { - // 1. Write objects to file - File file = new File("./tableFamilyGroup"); - file.createNewFile(); - DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); - - List columns = new ArrayList(); - columns.add(new Column("column2", - ColumnType.createType(PrimitiveType.TINYINT), false, AggregateType.MIN, "", "")); - columns.add(new Column("column3", - ColumnType.createType(PrimitiveType.SMALLINT), false, AggregateType.SUM, "", "")); - columns.add(new Column("column4", - ColumnType.createType(PrimitiveType.INT), false, AggregateType.REPLACE, "", "")); - columns.add(new Column("column5", - ColumnType.createType(PrimitiveType.BIGINT), false, AggregateType.REPLACE, "", "")); - columns.add(new Column("column6", - ColumnType.createType(PrimitiveType.FLOAT), false, AggregateType.REPLACE, "", "")); - columns.add(new Column("column7", - ColumnType.createType(PrimitiveType.DOUBLE), false, AggregateType.REPLACE, "", "")); - columns.add(new Column("column8", ColumnType.createChar(10), true, null, "", "")); - columns.add(new Column("column9", ColumnType.createVarchar(10), true, null, "", "")); - columns.add(new Column("column10", ColumnType.createType(PrimitiveType.DATE), true, null, "", "")); - columns.add(new Column("column11", ColumnType.createType(PrimitiveType.DATETIME), true, null, "", "")); - - Table table1 = new OlapTable(1000L, "group1", columns, KeysType.AGG_KEYS, - new SinglePartitionInfo(), new RandomDistributionInfo(10)); - table1.write(dos); - dos.flush(); - dos.close(); - - // 2. Read objects from file - DataInputStream dis = new DataInputStream(new FileInputStream(file)); - - Table rFamily1 = Table.read(dis); - Assert.assertTrue(table1.equals(rFamily1)); - - // 3. delete files - dis.close(); - file.delete(); - } -} +package com.baidu.palo.catalog; + +import com.baidu.palo.common.FeConstants; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.util.ArrayList; +import java.util.List; + +import org.easymock.EasyMock; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.api.easymock.PowerMock; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +@RunWith(PowerMockRunner.class) +@PowerMockIgnore("org.apache.log4j.*") +@PrepareForTest(Catalog.class) +public class TableTest { + + private Catalog catalog; + + @Before + public void setUp() { + catalog = EasyMock.createMock(Catalog.class); + + PowerMock.mockStatic(Catalog.class); + EasyMock.expect(Catalog.getInstance()).andReturn(catalog).anyTimes(); + EasyMock.expect(Catalog.getCurrentCatalogJournalVersion()).andReturn(FeConstants.meta_version).anyTimes(); + PowerMock.replay(Catalog.class); + } + + @Test + public void testSerialization() throws Exception { + // 1. Write objects to file + File file = new File("./tableFamilyGroup"); + file.createNewFile(); + DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); + + List columns = new ArrayList(); + columns.add(new Column("column2", + ColumnType.createType(PrimitiveType.TINYINT), false, AggregateType.MIN, "", "")); + columns.add(new Column("column3", + ColumnType.createType(PrimitiveType.SMALLINT), false, AggregateType.SUM, "", "")); + columns.add(new Column("column4", + ColumnType.createType(PrimitiveType.INT), false, AggregateType.REPLACE, "", "")); + columns.add(new Column("column5", + ColumnType.createType(PrimitiveType.BIGINT), false, AggregateType.REPLACE, "", "")); + columns.add(new Column("column6", + ColumnType.createType(PrimitiveType.FLOAT), false, AggregateType.REPLACE, "", "")); + columns.add(new Column("column7", + ColumnType.createType(PrimitiveType.DOUBLE), false, AggregateType.REPLACE, "", "")); + columns.add(new Column("column8", ColumnType.createChar(10), true, null, "", "")); + columns.add(new Column("column9", ColumnType.createVarchar(10), true, null, "", "")); + columns.add(new Column("column10", ColumnType.createType(PrimitiveType.DATE), true, null, "", "")); + columns.add(new Column("column11", ColumnType.createType(PrimitiveType.DATETIME), true, null, "", "")); + + Table table1 = new OlapTable(1000L, "group1", columns, KeysType.AGG_KEYS, + new SinglePartitionInfo(), new RandomDistributionInfo(10)); + table1.write(dos); + dos.flush(); + dos.close(); + + // 2. Read objects from file + DataInputStream dis = new DataInputStream(new FileInputStream(file)); + + Table rFamily1 = Table.read(dis); + Assert.assertTrue(table1.equals(rFamily1)); + + // 3. delete files + dis.close(); + file.delete(); + } +} diff --git a/fe/test/com/baidu/palo/catalog/TabletTest.java b/fe/test/com/baidu/palo/catalog/TabletTest.java index 16f72d9adc..7f45f1deb4 100644 --- a/fe/test/com/baidu/palo/catalog/TabletTest.java +++ b/fe/test/com/baidu/palo/catalog/TabletTest.java @@ -18,139 +18,139 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.catalog; - -import com.baidu.palo.catalog.Replica.ReplicaState; -import com.baidu.palo.common.FeConstants; - -import org.easymock.EasyMock; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.powermock.api.easymock.PowerMock; -import org.powermock.core.classloader.annotations.PowerMockIgnore; -import org.powermock.core.classloader.annotations.PrepareForTest; -import org.powermock.modules.junit4.PowerMockRunner; - -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; - -@RunWith(PowerMockRunner.class) -@PowerMockIgnore("org.apache.log4j.*") -@PrepareForTest({ Catalog.class }) -public class TabletTest { - - private Tablet tablet; - private Replica replica1; - private Replica replica2; - private Replica replica3; - - private TabletInvertedIndex invertedIndex; - - @Before - public void makeTablet() { - invertedIndex = new TabletInvertedIndex(); - - PowerMock.mockStatic(Catalog.class); - EasyMock.expect(Catalog.getCurrentCatalogJournalVersion()).andReturn(FeConstants.meta_version).anyTimes(); - EasyMock.expect(Catalog.getCurrentInvertedIndex()).andReturn(invertedIndex).anyTimes(); - EasyMock.expect(Catalog.isCheckpointThread()).andReturn(false).anyTimes(); - PowerMock.replay(Catalog.class); - - tablet = new Tablet(1); - TabletMeta tabletMeta = new TabletMeta(10, 20, 30, 40, 1); - invertedIndex.addTablet(1, tabletMeta); - replica1 = new Replica(1L, 1L, 100L, 0L, 200000L, 3000L, ReplicaState.NORMAL); - replica2 = new Replica(2L, 2L, 100L, 0L, 200000L, 3000L, ReplicaState.NORMAL); - replica3 = new Replica(3L, 3L, 100L, 0L, 200000L, 3000L, ReplicaState.NORMAL); - tablet.addReplica(replica1); - tablet.addReplica(replica2); - tablet.addReplica(replica3); - - - } - - @Test - public void getMethodTest() { - Assert.assertEquals(replica1, tablet.getReplicaById(replica1.getId())); - Assert.assertEquals(replica2, tablet.getReplicaById(replica2.getId())); - Assert.assertEquals(replica3, tablet.getReplicaById(replica3.getId())); - - Assert.assertEquals(3, tablet.getReplicas().size()); - Assert.assertEquals(replica1, tablet.getReplicaByBackendId(replica1.getBackendId())); - Assert.assertEquals(replica2, tablet.getReplicaByBackendId(replica2.getBackendId())); - Assert.assertEquals(replica3, tablet.getReplicaByBackendId(replica3.getBackendId())); - - - long newTabletId = 20000; - tablet.setTabletId(newTabletId); - Assert.assertEquals("tabletId=" + newTabletId, tablet.toString()); - } - - @Test - public void deleteReplicaTest() { - // delete replica1 - Assert.assertTrue(tablet.deleteReplicaByBackendId(replica1.getBackendId())); - Assert.assertNull(tablet.getReplicaById(replica1.getId())); - - // err: re-delete replica1 - Assert.assertFalse(tablet.deleteReplicaByBackendId(replica1.getBackendId())); - Assert.assertFalse(tablet.deleteReplica(replica1)); - Assert.assertNull(tablet.getReplicaById(replica1.getId())); - - // delete replica2 - Assert.assertTrue(tablet.deleteReplica(replica2)); - Assert.assertEquals(1, tablet.getReplicas().size()); - - // clear replicas - tablet.clearReplica(); - Assert.assertEquals(0, tablet.getReplicas().size()); - } - - @Test - public void testSerialization() throws Exception { - File file = new File("./olapTabletTest"); - file.createNewFile(); - DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); - tablet.write(dos); - dos.flush(); - dos.close(); - - // 2. Read a object from file - DataInputStream dis = new DataInputStream(new FileInputStream(file)); - Tablet rTablet1 = Tablet.read(dis); - Assert.assertEquals(1, rTablet1.getId()); - Assert.assertEquals(3, rTablet1.getReplicas().size()); - Assert.assertEquals(rTablet1.getReplicas().get(0).getVersion(), rTablet1.getReplicas().get(1).getVersion()); - Assert.assertEquals(rTablet1.getReplicas().get(0).getVersionHash(), - rTablet1.getReplicas().get(1).getVersionHash()); - - Assert.assertTrue(rTablet1.equals(tablet)); - Assert.assertTrue(rTablet1.equals(rTablet1)); - Assert.assertFalse(rTablet1.equals(this)); - - Tablet tablet2 = new Tablet(1); - Replica replica1 = new Replica(1L, 1L, 100L, 0L, 200000L, 3000L, ReplicaState.NORMAL); - Replica replica2 = new Replica(2L, 2L, 100L, 0L, 200000L, 3000L, ReplicaState.NORMAL); - Replica replica3 = new Replica(3L, 3L, 100L, 0L, 200000L, 3000L, ReplicaState.NORMAL); - tablet2.addReplica(replica1); - tablet2.addReplica(replica2); - Assert.assertFalse(tablet2.equals(tablet)); - tablet2.addReplica(replica3); - Assert.assertTrue(tablet2.equals(tablet)); - - Tablet tablet3 = new Tablet(1); - tablet3.addReplica(replica1); - tablet3.addReplica(replica2); - tablet3.addReplica(new Replica(4L, 4L, 100L, 0L, 200000L, 3000L, ReplicaState.NORMAL)); - Assert.assertFalse(tablet3.equals(tablet)); - - dis.close(); - file.delete(); - } - -} +package com.baidu.palo.catalog; + +import com.baidu.palo.catalog.Replica.ReplicaState; +import com.baidu.palo.common.FeConstants; + +import org.easymock.EasyMock; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.api.easymock.PowerMock; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; + +@RunWith(PowerMockRunner.class) +@PowerMockIgnore("org.apache.log4j.*") +@PrepareForTest({ Catalog.class }) +public class TabletTest { + + private Tablet tablet; + private Replica replica1; + private Replica replica2; + private Replica replica3; + + private TabletInvertedIndex invertedIndex; + + @Before + public void makeTablet() { + invertedIndex = new TabletInvertedIndex(); + + PowerMock.mockStatic(Catalog.class); + EasyMock.expect(Catalog.getCurrentCatalogJournalVersion()).andReturn(FeConstants.meta_version).anyTimes(); + EasyMock.expect(Catalog.getCurrentInvertedIndex()).andReturn(invertedIndex).anyTimes(); + EasyMock.expect(Catalog.isCheckpointThread()).andReturn(false).anyTimes(); + PowerMock.replay(Catalog.class); + + tablet = new Tablet(1); + TabletMeta tabletMeta = new TabletMeta(10, 20, 30, 40, 1); + invertedIndex.addTablet(1, tabletMeta); + replica1 = new Replica(1L, 1L, 100L, 0L, 200000L, 3000L, ReplicaState.NORMAL); + replica2 = new Replica(2L, 2L, 100L, 0L, 200000L, 3000L, ReplicaState.NORMAL); + replica3 = new Replica(3L, 3L, 100L, 0L, 200000L, 3000L, ReplicaState.NORMAL); + tablet.addReplica(replica1); + tablet.addReplica(replica2); + tablet.addReplica(replica3); + + + } + + @Test + public void getMethodTest() { + Assert.assertEquals(replica1, tablet.getReplicaById(replica1.getId())); + Assert.assertEquals(replica2, tablet.getReplicaById(replica2.getId())); + Assert.assertEquals(replica3, tablet.getReplicaById(replica3.getId())); + + Assert.assertEquals(3, tablet.getReplicas().size()); + Assert.assertEquals(replica1, tablet.getReplicaByBackendId(replica1.getBackendId())); + Assert.assertEquals(replica2, tablet.getReplicaByBackendId(replica2.getBackendId())); + Assert.assertEquals(replica3, tablet.getReplicaByBackendId(replica3.getBackendId())); + + + long newTabletId = 20000; + tablet.setTabletId(newTabletId); + Assert.assertEquals("tabletId=" + newTabletId, tablet.toString()); + } + + @Test + public void deleteReplicaTest() { + // delete replica1 + Assert.assertTrue(tablet.deleteReplicaByBackendId(replica1.getBackendId())); + Assert.assertNull(tablet.getReplicaById(replica1.getId())); + + // err: re-delete replica1 + Assert.assertFalse(tablet.deleteReplicaByBackendId(replica1.getBackendId())); + Assert.assertFalse(tablet.deleteReplica(replica1)); + Assert.assertNull(tablet.getReplicaById(replica1.getId())); + + // delete replica2 + Assert.assertTrue(tablet.deleteReplica(replica2)); + Assert.assertEquals(1, tablet.getReplicas().size()); + + // clear replicas + tablet.clearReplica(); + Assert.assertEquals(0, tablet.getReplicas().size()); + } + + @Test + public void testSerialization() throws Exception { + File file = new File("./olapTabletTest"); + file.createNewFile(); + DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); + tablet.write(dos); + dos.flush(); + dos.close(); + + // 2. Read a object from file + DataInputStream dis = new DataInputStream(new FileInputStream(file)); + Tablet rTablet1 = Tablet.read(dis); + Assert.assertEquals(1, rTablet1.getId()); + Assert.assertEquals(3, rTablet1.getReplicas().size()); + Assert.assertEquals(rTablet1.getReplicas().get(0).getVersion(), rTablet1.getReplicas().get(1).getVersion()); + Assert.assertEquals(rTablet1.getReplicas().get(0).getVersionHash(), + rTablet1.getReplicas().get(1).getVersionHash()); + + Assert.assertTrue(rTablet1.equals(tablet)); + Assert.assertTrue(rTablet1.equals(rTablet1)); + Assert.assertFalse(rTablet1.equals(this)); + + Tablet tablet2 = new Tablet(1); + Replica replica1 = new Replica(1L, 1L, 100L, 0L, 200000L, 3000L, ReplicaState.NORMAL); + Replica replica2 = new Replica(2L, 2L, 100L, 0L, 200000L, 3000L, ReplicaState.NORMAL); + Replica replica3 = new Replica(3L, 3L, 100L, 0L, 200000L, 3000L, ReplicaState.NORMAL); + tablet2.addReplica(replica1); + tablet2.addReplica(replica2); + Assert.assertFalse(tablet2.equals(tablet)); + tablet2.addReplica(replica3); + Assert.assertTrue(tablet2.equals(tablet)); + + Tablet tablet3 = new Tablet(1); + tablet3.addReplica(replica1); + tablet3.addReplica(replica2); + tablet3.addReplica(new Replica(4L, 4L, 100L, 0L, 200000L, 3000L, ReplicaState.NORMAL)); + Assert.assertFalse(tablet3.equals(tablet)); + + dis.close(); + file.delete(); + } + +} diff --git a/fe/test/com/baidu/palo/cluster/SystemInfoServiceTest.java b/fe/test/com/baidu/palo/cluster/SystemInfoServiceTest.java index 7f83fd03e2..4db11b4707 100644 --- a/fe/test/com/baidu/palo/cluster/SystemInfoServiceTest.java +++ b/fe/test/com/baidu/palo/cluster/SystemInfoServiceTest.java @@ -17,8 +17,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.cluster; - +package com.baidu.palo.cluster; + import com.baidu.palo.analysis.AccessTestUtil; import com.baidu.palo.analysis.AddBackendClause; import com.baidu.palo.analysis.Analyzer; @@ -50,223 +50,223 @@ import java.io.DataOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; -import java.io.IOException; - -@RunWith(PowerMockRunner.class) -@PowerMockIgnore("org.apache.log4j.*") -@PrepareForTest(Catalog.class) -public class SystemInfoServiceTest { - - private EditLog editLog; - private Catalog catalog; - private SystemInfoService systemInfoService; - private Database db; - - private Analyzer analyzer; - - private String hostPort; - - private long backendId = 10000L; - - @Before - public void setUp() throws IOException { - editLog = EasyMock.createMock(EditLog.class); - editLog.logAddBackend(EasyMock.anyObject(Backend.class)); - EasyMock.expectLastCall().anyTimes(); - editLog.logDropBackend(EasyMock.anyObject(Backend.class)); - EasyMock.expectLastCall().anyTimes(); - editLog.logBackendStateChange(EasyMock.anyObject(Backend.class)); - EasyMock.expectLastCall().anyTimes(); - EasyMock.replay(editLog); - - db = EasyMock.createMock(Database.class); - db.readLock(); - EasyMock.expectLastCall().anyTimes(); - db.readUnlock(); - EasyMock.expectLastCall().anyTimes(); - EasyMock.replay(db); - - catalog = EasyMock.createMock(Catalog.class); - EasyMock.expect(catalog.getNextId()).andReturn(backendId).anyTimes(); - EasyMock.expect(catalog.getEditLog()).andReturn(editLog).anyTimes(); +import java.io.IOException; + +@RunWith(PowerMockRunner.class) +@PowerMockIgnore("org.apache.log4j.*") +@PrepareForTest(Catalog.class) +public class SystemInfoServiceTest { + + private EditLog editLog; + private Catalog catalog; + private SystemInfoService systemInfoService; + private Database db; + + private Analyzer analyzer; + + private String hostPort; + + private long backendId = 10000L; + + @Before + public void setUp() throws IOException { + editLog = EasyMock.createMock(EditLog.class); + editLog.logAddBackend(EasyMock.anyObject(Backend.class)); + EasyMock.expectLastCall().anyTimes(); + editLog.logDropBackend(EasyMock.anyObject(Backend.class)); + EasyMock.expectLastCall().anyTimes(); + editLog.logBackendStateChange(EasyMock.anyObject(Backend.class)); + EasyMock.expectLastCall().anyTimes(); + EasyMock.replay(editLog); + + db = EasyMock.createMock(Database.class); + db.readLock(); + EasyMock.expectLastCall().anyTimes(); + db.readUnlock(); + EasyMock.expectLastCall().anyTimes(); + EasyMock.replay(db); + + catalog = EasyMock.createMock(Catalog.class); + EasyMock.expect(catalog.getNextId()).andReturn(backendId).anyTimes(); + EasyMock.expect(catalog.getEditLog()).andReturn(editLog).anyTimes(); EasyMock.expect(catalog.getDb(EasyMock.anyLong())).andReturn(db).anyTimes(); EasyMock.expect(catalog.getCluster(EasyMock.anyString())).andReturn(new Cluster("cluster", 1)).anyTimes(); - - catalog.clear(); - EasyMock.expectLastCall().anyTimes(); - EasyMock.replay(catalog); - - PowerMock.mockStatic(Catalog.class); - systemInfoService = new SystemInfoService(); - EasyMock.expect(Catalog.getInstance()).andReturn(catalog).anyTimes(); - EasyMock.expect(Catalog.getCurrentSystemInfo()).andReturn(systemInfoService).anyTimes(); - EasyMock.expect(Catalog.getCurrentCatalogJournalVersion()).andReturn(FeConstants.meta_version).anyTimes(); - PowerMock.replay(Catalog.class); - - analyzer = AccessTestUtil.fetchAdminAnalyzer(false); - } - - public void mkdir(String dirString) { - File dir = new File(dirString); - if (!dir.exists()) { - dir.mkdir(); - } else { - File[] files = dir.listFiles(); - for (File file : files) { - if (file.isFile()) { - file.delete(); - } - } - } - } - - public void deleteDir(String metaDir) { - File dir = new File(metaDir); - if (dir.exists()) { - File[] files = dir.listFiles(); - for (File file : files) { - if (file.isFile()) { - file.delete(); - } - } - - dir.delete(); - } - } - - public void createHostAndPort(int type) { - switch (type) { - case 1: - // missing ip - hostPort = "12346"; - break; - case 2: - // invalid ip - hostPort = "asdasd:12345"; - break; - case 3: - // invalid port - hostPort = "10.1.2.3:123467"; - break; - case 4: - // normal - hostPort = "127.0.0.1:12345"; - break; - default: - break; - } - } - - public void clearAllBackend() { - Catalog.getCurrentSystemInfo().dropAllBackend(); - } - - @Test(expected = AnalysisException.class) - public void validHostAndPortTest1() throws Exception { - createHostAndPort(1); - systemInfoService.validateHostAndPort(hostPort); - } - - @Test(expected = AnalysisException.class) - public void validHostAndPortTest2() throws Exception { - createHostAndPort(2); - systemInfoService.validateHostAndPort(hostPort); - } - - @Test(expected = AnalysisException.class) - public void validHostAndPortTest3() throws Exception { - createHostAndPort(3); - systemInfoService.validateHostAndPort(hostPort); - } - - @Test - public void validHostAndPortTest4() throws Exception { - createHostAndPort(4); - systemInfoService.validateHostAndPort(hostPort); - } - - @Test - public void addBackendTest() throws AnalysisException { - clearAllBackend(); - AddBackendClause stmt = new AddBackendClause(Lists.newArrayList("192.168.0.1:1234")); - stmt.analyze(analyzer); - try { - Catalog.getCurrentSystemInfo().addBackends(stmt.getHostPortPairs(), true); - } catch (DdlException e) { - Assert.fail(); - } - - try { - Catalog.getCurrentSystemInfo().addBackends(stmt.getHostPortPairs(), true); - } catch (DdlException e) { - Assert.assertTrue(e.getMessage().contains("already exists")); - } - - Assert.assertNotNull(Catalog.getCurrentSystemInfo().getBackend(backendId)); - Assert.assertNotNull(Catalog.getCurrentSystemInfo().getBackendWithHeartbeatPort("192.168.0.1", 1234)); - - Assert.assertTrue(Catalog.getCurrentSystemInfo().getBackendIds(false).size() == 1); - Assert.assertTrue(Catalog.getCurrentSystemInfo().getBackendIds(false).get(0) == backendId); - - Assert.assertTrue(Catalog.getCurrentSystemInfo().getBackendReportVersion(backendId) == 0L); - - Catalog.getCurrentSystemInfo().updateBackendReportVersion(backendId, 2L, 20000L); - Assert.assertTrue(Catalog.getCurrentSystemInfo().getBackendReportVersion(backendId) == 2L); - } - - @Test - public void removeBackendTest() throws AnalysisException { - clearAllBackend(); - AddBackendClause stmt = new AddBackendClause(Lists.newArrayList("192.168.0.1:1234")); - stmt.analyze(analyzer); - try { - Catalog.getCurrentSystemInfo().addBackends(stmt.getHostPortPairs(), true); - } catch (DdlException e) { - e.printStackTrace(); - } - - DropBackendClause dropStmt = new DropBackendClause(Lists.newArrayList("192.168.0.1:1234")); - dropStmt.analyze(analyzer); - try { - Catalog.getCurrentSystemInfo().dropBackends(dropStmt.getHostPortPairs()); - } catch (DdlException e) { - e.printStackTrace(); - Assert.fail(); - } - - try { - Catalog.getCurrentSystemInfo().dropBackends(dropStmt.getHostPortPairs()); - } catch (DdlException e) { - Assert.assertTrue(e.getMessage().contains("does not exist")); - } - } - - @Test - public void testSaveLoadBackend() throws Exception { - clearAllBackend(); - String dir = "testLoadBackend"; - mkdir(dir); - File file = new File(dir, "image"); - file.createNewFile(); - DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); - SystemInfoService systemInfoService = Catalog.getCurrentSystemInfo(); - Backend back1 = new Backend(1L, "localhost", 3); - back1.updateOnce(4, 6, 8); - systemInfoService.replayAddBackend(back1); - long checksum1 = systemInfoService.saveBackends(dos, 0); - catalog.clear(); - catalog = null; - dos.close(); - - DataInputStream dis = new DataInputStream(new BufferedInputStream(new FileInputStream(file))); - long checksum2 = systemInfoService.loadBackends(dis, 0); - Assert.assertEquals(checksum1, checksum2); - Assert.assertEquals(1, systemInfoService.getIdToBackend().size()); - Backend back2 = systemInfoService.getBackend(1); - Assert.assertTrue(back1.equals(back2)); - dis.close(); - - deleteDir(dir); - } - -} + + catalog.clear(); + EasyMock.expectLastCall().anyTimes(); + EasyMock.replay(catalog); + + PowerMock.mockStatic(Catalog.class); + systemInfoService = new SystemInfoService(); + EasyMock.expect(Catalog.getInstance()).andReturn(catalog).anyTimes(); + EasyMock.expect(Catalog.getCurrentSystemInfo()).andReturn(systemInfoService).anyTimes(); + EasyMock.expect(Catalog.getCurrentCatalogJournalVersion()).andReturn(FeConstants.meta_version).anyTimes(); + PowerMock.replay(Catalog.class); + + analyzer = AccessTestUtil.fetchAdminAnalyzer(false); + } + + public void mkdir(String dirString) { + File dir = new File(dirString); + if (!dir.exists()) { + dir.mkdir(); + } else { + File[] files = dir.listFiles(); + for (File file : files) { + if (file.isFile()) { + file.delete(); + } + } + } + } + + public void deleteDir(String metaDir) { + File dir = new File(metaDir); + if (dir.exists()) { + File[] files = dir.listFiles(); + for (File file : files) { + if (file.isFile()) { + file.delete(); + } + } + + dir.delete(); + } + } + + public void createHostAndPort(int type) { + switch (type) { + case 1: + // missing ip + hostPort = "12346"; + break; + case 2: + // invalid ip + hostPort = "asdasd:12345"; + break; + case 3: + // invalid port + hostPort = "10.1.2.3:123467"; + break; + case 4: + // normal + hostPort = "127.0.0.1:12345"; + break; + default: + break; + } + } + + public void clearAllBackend() { + Catalog.getCurrentSystemInfo().dropAllBackend(); + } + + @Test(expected = AnalysisException.class) + public void validHostAndPortTest1() throws Exception { + createHostAndPort(1); + systemInfoService.validateHostAndPort(hostPort); + } + + @Test(expected = AnalysisException.class) + public void validHostAndPortTest2() throws Exception { + createHostAndPort(2); + systemInfoService.validateHostAndPort(hostPort); + } + + @Test(expected = AnalysisException.class) + public void validHostAndPortTest3() throws Exception { + createHostAndPort(3); + systemInfoService.validateHostAndPort(hostPort); + } + + @Test + public void validHostAndPortTest4() throws Exception { + createHostAndPort(4); + systemInfoService.validateHostAndPort(hostPort); + } + + @Test + public void addBackendTest() throws AnalysisException { + clearAllBackend(); + AddBackendClause stmt = new AddBackendClause(Lists.newArrayList("192.168.0.1:1234")); + stmt.analyze(analyzer); + try { + Catalog.getCurrentSystemInfo().addBackends(stmt.getHostPortPairs(), true); + } catch (DdlException e) { + Assert.fail(); + } + + try { + Catalog.getCurrentSystemInfo().addBackends(stmt.getHostPortPairs(), true); + } catch (DdlException e) { + Assert.assertTrue(e.getMessage().contains("already exists")); + } + + Assert.assertNotNull(Catalog.getCurrentSystemInfo().getBackend(backendId)); + Assert.assertNotNull(Catalog.getCurrentSystemInfo().getBackendWithHeartbeatPort("192.168.0.1", 1234)); + + Assert.assertTrue(Catalog.getCurrentSystemInfo().getBackendIds(false).size() == 1); + Assert.assertTrue(Catalog.getCurrentSystemInfo().getBackendIds(false).get(0) == backendId); + + Assert.assertTrue(Catalog.getCurrentSystemInfo().getBackendReportVersion(backendId) == 0L); + + Catalog.getCurrentSystemInfo().updateBackendReportVersion(backendId, 2L, 20000L); + Assert.assertTrue(Catalog.getCurrentSystemInfo().getBackendReportVersion(backendId) == 2L); + } + + @Test + public void removeBackendTest() throws AnalysisException { + clearAllBackend(); + AddBackendClause stmt = new AddBackendClause(Lists.newArrayList("192.168.0.1:1234")); + stmt.analyze(analyzer); + try { + Catalog.getCurrentSystemInfo().addBackends(stmt.getHostPortPairs(), true); + } catch (DdlException e) { + e.printStackTrace(); + } + + DropBackendClause dropStmt = new DropBackendClause(Lists.newArrayList("192.168.0.1:1234")); + dropStmt.analyze(analyzer); + try { + Catalog.getCurrentSystemInfo().dropBackends(dropStmt.getHostPortPairs()); + } catch (DdlException e) { + e.printStackTrace(); + Assert.fail(); + } + + try { + Catalog.getCurrentSystemInfo().dropBackends(dropStmt.getHostPortPairs()); + } catch (DdlException e) { + Assert.assertTrue(e.getMessage().contains("does not exist")); + } + } + + @Test + public void testSaveLoadBackend() throws Exception { + clearAllBackend(); + String dir = "testLoadBackend"; + mkdir(dir); + File file = new File(dir, "image"); + file.createNewFile(); + DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); + SystemInfoService systemInfoService = Catalog.getCurrentSystemInfo(); + Backend back1 = new Backend(1L, "localhost", 3); + back1.updateOnce(4, 6, 8); + systemInfoService.replayAddBackend(back1); + long checksum1 = systemInfoService.saveBackends(dos, 0); + catalog.clear(); + catalog = null; + dos.close(); + + DataInputStream dis = new DataInputStream(new BufferedInputStream(new FileInputStream(file))); + long checksum2 = systemInfoService.loadBackends(dis, 0); + Assert.assertEquals(checksum1, checksum2); + Assert.assertEquals(1, systemInfoService.getIdToBackend().size()); + Backend back2 = systemInfoService.getBackend(1); + Assert.assertTrue(back1.equals(back2)); + dis.close(); + + deleteDir(dir); + } + +} diff --git a/fe/test/com/baidu/palo/common/proc/BackendProcNodeTest.java b/fe/test/com/baidu/palo/common/proc/BackendProcNodeTest.java index 564d132c91..bfad868430 100644 --- a/fe/test/com/baidu/palo/common/proc/BackendProcNodeTest.java +++ b/fe/test/com/baidu/palo/common/proc/BackendProcNodeTest.java @@ -18,8 +18,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.proc; - +package com.baidu.palo.common.proc; + import com.baidu.palo.catalog.Catalog; import com.baidu.palo.common.AnalysisException; import com.baidu.palo.persist.EditLog; @@ -36,60 +36,60 @@ import org.junit.runner.RunWith; import org.powermock.api.easymock.PowerMock; import org.powermock.core.classloader.annotations.PowerMockIgnore; import org.powermock.core.classloader.annotations.PrepareForTest; -import org.powermock.modules.junit4.PowerMockRunner; - -@RunWith(PowerMockRunner.class) -@PowerMockIgnore("org.apache.log4j.*") -@PrepareForTest(Catalog.class) -public class BackendProcNodeTest { - private Backend b1; - private Catalog catalog; - private EditLog editLog; - - @Before - public void setUp() { - editLog = EasyMock.createMock(EditLog.class); - editLog.logAddBackend(EasyMock.anyObject(Backend.class)); - EasyMock.expectLastCall().anyTimes(); - editLog.logDropBackend(EasyMock.anyObject(Backend.class)); - EasyMock.expectLastCall().anyTimes(); - editLog.logBackendStateChange(EasyMock.anyObject(Backend.class)); - EasyMock.expectLastCall().anyTimes(); - EasyMock.replay(editLog); - - catalog = EasyMock.createMock(Catalog.class); - EasyMock.expect(catalog.getNextId()).andReturn(10000L).anyTimes(); - EasyMock.expect(catalog.getEditLog()).andReturn(editLog).anyTimes(); - catalog.clear(); - EasyMock.expectLastCall().anyTimes(); - EasyMock.replay(catalog); - - PowerMock.mockStatic(Catalog.class); - EasyMock.expect(Catalog.getInstance()).andReturn(catalog).anyTimes(); - PowerMock.replay(Catalog.class); - - b1 = new Backend(1000, "host1", 10000); - b1.updateOnce(10001, 10003, 10005); - } - - @After - public void tearDown() { - } - - @Test - public void testResultNormal() throws AnalysisException { - BackendProcNode node = new BackendProcNode(b1); - ProcResult result; - - // fetch result - result = node.fetchResult(); - Assert.assertNotNull(result); - Assert.assertTrue(result instanceof BaseProcResult); - - Assert.assertTrue(result.getRows().size() >= 1); +import org.powermock.modules.junit4.PowerMockRunner; + +@RunWith(PowerMockRunner.class) +@PowerMockIgnore("org.apache.log4j.*") +@PrepareForTest(Catalog.class) +public class BackendProcNodeTest { + private Backend b1; + private Catalog catalog; + private EditLog editLog; + + @Before + public void setUp() { + editLog = EasyMock.createMock(EditLog.class); + editLog.logAddBackend(EasyMock.anyObject(Backend.class)); + EasyMock.expectLastCall().anyTimes(); + editLog.logDropBackend(EasyMock.anyObject(Backend.class)); + EasyMock.expectLastCall().anyTimes(); + editLog.logBackendStateChange(EasyMock.anyObject(Backend.class)); + EasyMock.expectLastCall().anyTimes(); + EasyMock.replay(editLog); + + catalog = EasyMock.createMock(Catalog.class); + EasyMock.expect(catalog.getNextId()).andReturn(10000L).anyTimes(); + EasyMock.expect(catalog.getEditLog()).andReturn(editLog).anyTimes(); + catalog.clear(); + EasyMock.expectLastCall().anyTimes(); + EasyMock.replay(catalog); + + PowerMock.mockStatic(Catalog.class); + EasyMock.expect(Catalog.getInstance()).andReturn(catalog).anyTimes(); + PowerMock.replay(Catalog.class); + + b1 = new Backend(1000, "host1", 10000); + b1.updateOnce(10001, 10003, 10005); + } + + @After + public void tearDown() { + } + + @Test + public void testResultNormal() throws AnalysisException { + BackendProcNode node = new BackendProcNode(b1); + ProcResult result; + + // fetch result + result = node.fetchResult(); + Assert.assertNotNull(result); + Assert.assertTrue(result instanceof BaseProcResult); + + Assert.assertTrue(result.getRows().size() >= 1); Assert.assertEquals(Lists.newArrayList("RootPath", "TotalCapacity", "DataUsedCapacity", - "DiskAvailableCapacity", "State"), - result.getColumnNames()); - } - -} + "DiskAvailableCapacity", "State"), + result.getColumnNames()); + } + +} diff --git a/fe/test/com/baidu/palo/common/proc/BackendsProcDirTest.java b/fe/test/com/baidu/palo/common/proc/BackendsProcDirTest.java index 581c3d32a3..46b0349495 100644 --- a/fe/test/com/baidu/palo/common/proc/BackendsProcDirTest.java +++ b/fe/test/com/baidu/palo/common/proc/BackendsProcDirTest.java @@ -18,8 +18,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.proc; - +package com.baidu.palo.common.proc; + import com.baidu.palo.catalog.Catalog; import com.baidu.palo.common.AnalysisException; import com.baidu.palo.persist.EditLog; @@ -38,132 +38,132 @@ import org.junit.runner.RunWith; import org.powermock.api.easymock.PowerMock; import org.powermock.core.classloader.annotations.PowerMockIgnore; import org.powermock.core.classloader.annotations.PrepareForTest; -import org.powermock.modules.junit4.PowerMockRunner; - -@RunWith(PowerMockRunner.class) -@PowerMockIgnore("org.apache.log4j.*") -@PrepareForTest(Catalog.class) -public class BackendsProcDirTest { - private static Backend b1; - private static Backend b2; - - private SystemInfoService systemInfoService; - private static Catalog catalog; - private static EditLog editLog; - - // construct test case - @BeforeClass - public static void setUpClass() { - editLog = EasyMock.createMock(EditLog.class); - editLog.logAddBackend(EasyMock.anyObject(Backend.class)); - EasyMock.expectLastCall().anyTimes(); - editLog.logDropBackend(EasyMock.anyObject(Backend.class)); - EasyMock.expectLastCall().anyTimes(); - editLog.logBackendStateChange(EasyMock.anyObject(Backend.class)); - EasyMock.expectLastCall().anyTimes(); - EasyMock.replay(editLog); - - catalog = EasyMock.createMock(Catalog.class); - EasyMock.expect(catalog.getNextId()).andReturn(10000L).anyTimes(); - EasyMock.expect(catalog.getEditLog()).andReturn(editLog).anyTimes(); - catalog.clear(); - EasyMock.expectLastCall().anyTimes(); - EasyMock.replay(catalog); - - PowerMock.mockStatic(Catalog.class); - EasyMock.expect(Catalog.getInstance()).andReturn(catalog).anyTimes(); - PowerMock.replay(Catalog.class); - - b1 = new Backend(1000, "host1", 10000); - b1.updateOnce(10001, 10003, 10005); - b2 = new Backend(1001, "host2", 20000); - b2.updateOnce(20001, 20003, 20005); - } - - @Before - public void setUp() { - systemInfoService = EasyMock.createNiceMock(SystemInfoService.class); - } - - @After - public void tearDown() { - systemInfoService = null; - } - - @Test - public void testRegister() { - BackendsProcDir dir; - - dir = new BackendsProcDir(systemInfoService); - Assert.assertFalse(dir.register("100000", new BaseProcDir())); - } - - @Test(expected = AnalysisException.class) - public void testLookupNormal() throws AnalysisException { - EasyMock.expect(systemInfoService.getBackend(1000)).andReturn(b1); - EasyMock.expect(systemInfoService.getBackend(1001)).andReturn(b2); - EasyMock.expect(systemInfoService.getBackend(1002)).andReturn(null); - EasyMock.replay(systemInfoService); - - BackendsProcDir dir; - ProcNodeInterface node; - - dir = new BackendsProcDir(systemInfoService); - try { - node = dir.lookup("1000"); - Assert.assertNotNull(node); - Assert.assertTrue(node instanceof BackendProcNode); - } catch (AnalysisException e) { - Assert.fail(); - } - - dir = new BackendsProcDir(systemInfoService); - try { - node = dir.lookup("1001"); - Assert.assertNotNull(node); - Assert.assertTrue(node instanceof BackendProcNode); - } catch (AnalysisException e) { - Assert.fail(); - } - - dir = new BackendsProcDir(systemInfoService); - node = dir.lookup("1002"); - Assert.fail(); - } - - @Test - public void testLookupInvalid() { - BackendsProcDir dir; - ProcNodeInterface node; - - dir = new BackendsProcDir(systemInfoService); - try { - node = dir.lookup(null); - } catch (AnalysisException e) { - e.printStackTrace(); - } - - try { - node = dir.lookup(""); - } catch (AnalysisException e) { - e.printStackTrace(); - } - } - - @Test - public void testFetchResultNormal() throws AnalysisException { - EasyMock.expect(systemInfoService.getBackendIds(false)).andReturn(Lists.newArrayList(1000L, 1001L)); - EasyMock.expect(systemInfoService.getBackend(1000)).andReturn(b1); - EasyMock.expect(systemInfoService.getBackend(1001)).andReturn(b2); - EasyMock.replay(systemInfoService); - - BackendsProcDir dir; - ProcResult result; - - dir = new BackendsProcDir(systemInfoService); - result = dir.fetchResult(); - Assert.assertNotNull(result); - Assert.assertTrue(result instanceof BaseProcResult); - } - -} +import org.powermock.modules.junit4.PowerMockRunner; + +@RunWith(PowerMockRunner.class) +@PowerMockIgnore("org.apache.log4j.*") +@PrepareForTest(Catalog.class) +public class BackendsProcDirTest { + private static Backend b1; + private static Backend b2; + + private SystemInfoService systemInfoService; + private static Catalog catalog; + private static EditLog editLog; + + // construct test case + @BeforeClass + public static void setUpClass() { + editLog = EasyMock.createMock(EditLog.class); + editLog.logAddBackend(EasyMock.anyObject(Backend.class)); + EasyMock.expectLastCall().anyTimes(); + editLog.logDropBackend(EasyMock.anyObject(Backend.class)); + EasyMock.expectLastCall().anyTimes(); + editLog.logBackendStateChange(EasyMock.anyObject(Backend.class)); + EasyMock.expectLastCall().anyTimes(); + EasyMock.replay(editLog); + + catalog = EasyMock.createMock(Catalog.class); + EasyMock.expect(catalog.getNextId()).andReturn(10000L).anyTimes(); + EasyMock.expect(catalog.getEditLog()).andReturn(editLog).anyTimes(); + catalog.clear(); + EasyMock.expectLastCall().anyTimes(); + EasyMock.replay(catalog); + + PowerMock.mockStatic(Catalog.class); + EasyMock.expect(Catalog.getInstance()).andReturn(catalog).anyTimes(); + PowerMock.replay(Catalog.class); + + b1 = new Backend(1000, "host1", 10000); + b1.updateOnce(10001, 10003, 10005); + b2 = new Backend(1001, "host2", 20000); + b2.updateOnce(20001, 20003, 20005); + } + + @Before + public void setUp() { + systemInfoService = EasyMock.createNiceMock(SystemInfoService.class); + } + + @After + public void tearDown() { + systemInfoService = null; + } + + @Test + public void testRegister() { + BackendsProcDir dir; + + dir = new BackendsProcDir(systemInfoService); + Assert.assertFalse(dir.register("100000", new BaseProcDir())); + } + + @Test(expected = AnalysisException.class) + public void testLookupNormal() throws AnalysisException { + EasyMock.expect(systemInfoService.getBackend(1000)).andReturn(b1); + EasyMock.expect(systemInfoService.getBackend(1001)).andReturn(b2); + EasyMock.expect(systemInfoService.getBackend(1002)).andReturn(null); + EasyMock.replay(systemInfoService); + + BackendsProcDir dir; + ProcNodeInterface node; + + dir = new BackendsProcDir(systemInfoService); + try { + node = dir.lookup("1000"); + Assert.assertNotNull(node); + Assert.assertTrue(node instanceof BackendProcNode); + } catch (AnalysisException e) { + Assert.fail(); + } + + dir = new BackendsProcDir(systemInfoService); + try { + node = dir.lookup("1001"); + Assert.assertNotNull(node); + Assert.assertTrue(node instanceof BackendProcNode); + } catch (AnalysisException e) { + Assert.fail(); + } + + dir = new BackendsProcDir(systemInfoService); + node = dir.lookup("1002"); + Assert.fail(); + } + + @Test + public void testLookupInvalid() { + BackendsProcDir dir; + ProcNodeInterface node; + + dir = new BackendsProcDir(systemInfoService); + try { + node = dir.lookup(null); + } catch (AnalysisException e) { + e.printStackTrace(); + } + + try { + node = dir.lookup(""); + } catch (AnalysisException e) { + e.printStackTrace(); + } + } + + @Test + public void testFetchResultNormal() throws AnalysisException { + EasyMock.expect(systemInfoService.getBackendIds(false)).andReturn(Lists.newArrayList(1000L, 1001L)); + EasyMock.expect(systemInfoService.getBackend(1000)).andReturn(b1); + EasyMock.expect(systemInfoService.getBackend(1001)).andReturn(b2); + EasyMock.replay(systemInfoService); + + BackendsProcDir dir; + ProcResult result; + + dir = new BackendsProcDir(systemInfoService); + result = dir.fetchResult(); + Assert.assertNotNull(result); + Assert.assertTrue(result instanceof BaseProcResult); + } + +} diff --git a/fe/test/com/baidu/palo/common/util/DebugUtilTest.java b/fe/test/com/baidu/palo/common/util/DebugUtilTest.java index 6883495c38..e909a7c7fb 100644 --- a/fe/test/com/baidu/palo/common/util/DebugUtilTest.java +++ b/fe/test/com/baidu/palo/common/util/DebugUtilTest.java @@ -18,71 +18,71 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.util; - -import org.junit.Assert; - -import org.junit.Test; - -import com.baidu.palo.common.Pair; - -public class DebugUtilTest { - @Test - public void testGetUint() { - Pair result; - result = DebugUtil.getUint(2000000000L); - Assert.assertEquals(Double.valueOf(2.0), result.first); - Assert.assertEquals(result.second, "B"); - - result = DebugUtil.getUint(1234567L); - Assert.assertEquals(result.first, Double.valueOf(1.234567)); - Assert.assertEquals(result.second, "M"); - - result = DebugUtil.getUint(1234L); - Assert.assertEquals(result.first, Double.valueOf(1.234)); - Assert.assertEquals(result.second, "K"); - - result = DebugUtil.getUint(123L); - Assert.assertEquals(result.first, Double.valueOf(123.0)); - Assert.assertEquals(result.second, ""); - } - - @Test - public void testGetPrettyStringMs() { - // 6hour1min - Assert.assertEquals(DebugUtil.getPrettyStringMs(21660222), "6h1m"); - - // 1min222ms - Assert.assertEquals(DebugUtil.getPrettyStringMs(60222), "1m"); - - // 2s222ms - Assert.assertEquals(DebugUtil.getPrettyStringMs(2222), "2s222ms"); - - // 22ms - Assert.assertEquals(DebugUtil.getPrettyStringMs(22), "22ms"); - } - - @Test - public void testGetByteUint() { - Pair result; - result = DebugUtil.getByteUint(0); - Assert.assertEquals(result.first, Double.valueOf(0.0)); - Assert.assertEquals(result.second, ""); - - result = DebugUtil.getByteUint(123); // B - Assert.assertEquals(result.first, Double.valueOf(123.0)); - Assert.assertEquals(result.second, "B"); - - result = DebugUtil.getByteUint(123456); // K - Assert.assertEquals(result.first, Double.valueOf(120.5625)); - Assert.assertEquals(result.second, "KB"); - - result = DebugUtil.getByteUint(1234567); // M - Assert.assertEquals(result.first, Double.valueOf(1.1773748397827148)); - Assert.assertEquals(result.second, "MB"); - - result = DebugUtil.getByteUint(1234567890L); // G - Assert.assertEquals(result.first, Double.valueOf(1.1497809458523989)); - Assert.assertEquals(result.second, "GB"); - } -} +package com.baidu.palo.common.util; + +import org.junit.Assert; + +import org.junit.Test; + +import com.baidu.palo.common.Pair; + +public class DebugUtilTest { + @Test + public void testGetUint() { + Pair result; + result = DebugUtil.getUint(2000000000L); + Assert.assertEquals(Double.valueOf(2.0), result.first); + Assert.assertEquals(result.second, "B"); + + result = DebugUtil.getUint(1234567L); + Assert.assertEquals(result.first, Double.valueOf(1.234567)); + Assert.assertEquals(result.second, "M"); + + result = DebugUtil.getUint(1234L); + Assert.assertEquals(result.first, Double.valueOf(1.234)); + Assert.assertEquals(result.second, "K"); + + result = DebugUtil.getUint(123L); + Assert.assertEquals(result.first, Double.valueOf(123.0)); + Assert.assertEquals(result.second, ""); + } + + @Test + public void testGetPrettyStringMs() { + // 6hour1min + Assert.assertEquals(DebugUtil.getPrettyStringMs(21660222), "6h1m"); + + // 1min222ms + Assert.assertEquals(DebugUtil.getPrettyStringMs(60222), "1m"); + + // 2s222ms + Assert.assertEquals(DebugUtil.getPrettyStringMs(2222), "2s222ms"); + + // 22ms + Assert.assertEquals(DebugUtil.getPrettyStringMs(22), "22ms"); + } + + @Test + public void testGetByteUint() { + Pair result; + result = DebugUtil.getByteUint(0); + Assert.assertEquals(result.first, Double.valueOf(0.0)); + Assert.assertEquals(result.second, ""); + + result = DebugUtil.getByteUint(123); // B + Assert.assertEquals(result.first, Double.valueOf(123.0)); + Assert.assertEquals(result.second, "B"); + + result = DebugUtil.getByteUint(123456); // K + Assert.assertEquals(result.first, Double.valueOf(120.5625)); + Assert.assertEquals(result.second, "KB"); + + result = DebugUtil.getByteUint(1234567); // M + Assert.assertEquals(result.first, Double.valueOf(1.1773748397827148)); + Assert.assertEquals(result.second, "MB"); + + result = DebugUtil.getByteUint(1234567890L); // G + Assert.assertEquals(result.first, Double.valueOf(1.1497809458523989)); + Assert.assertEquals(result.second, "GB"); + } +} diff --git a/fe/test/com/baidu/palo/common/util/ListComparatorTest.java b/fe/test/com/baidu/palo/common/util/ListComparatorTest.java index e6eaf9f032..58f73f059b 100644 --- a/fe/test/com/baidu/palo/common/util/ListComparatorTest.java +++ b/fe/test/com/baidu/palo/common/util/ListComparatorTest.java @@ -18,172 +18,172 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.util; - -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.util.Collections; -import java.util.LinkedList; -import java.util.List; - -public class ListComparatorTest { - - List> listCollection; - - @Before - public void setUp() { - listCollection = new LinkedList>(); - } - - private void printCollection() { - System.out.println("LIST:"); - for (List list : listCollection) { - for (Comparable comparable : list) { - System.out.print(comparable + " | "); - } - System.out.println(); - } - System.out.println("END LIST\n"); - } - - @Test - public void test_1() { - // 1, 200, "bcd", 2000 - // 1, 200, "abc" - List list1 = new LinkedList(); - list1.add(new Long(1)); - list1.add(new Long(200)); - list1.add("bcd"); - list1.add(new Long(1000)); - listCollection.add(list1); - - List list2 = new LinkedList(); - list2.add(new Long(1)); - list2.add(new Long(200)); - list2.add("abc"); - listCollection.add(list2); - - printCollection(); - - ListComparator> comparator = new ListComparator<>(new OrderByPair(1, false), - new OrderByPair(2, false)); - Collections.sort(listCollection, comparator); - printCollection(); - - Assert.assertEquals(list2, listCollection.get(0)); - } - - @Test - public void test_2() { - // 1, 200, "abc", 1000 - // 1, 200, "abc" - List list1 = new LinkedList(); - list1.add(new Long(1)); - list1.add(new Long(200)); - list1.add("abc"); - list1.add(new Long(1000)); - listCollection.add(list1); - - List list2 = new LinkedList(); - list2.add(new Long(1)); - list2.add(new Long(200)); - list2.add("abc"); - listCollection.add(list2); - - printCollection(); - - ListComparator> comparator = new ListComparator<>(new OrderByPair(1, false), - new OrderByPair(2, false)); - Collections.sort(listCollection, comparator); - printCollection(); - Assert.assertEquals(list2, listCollection.get(0)); - } - - @Test(expected = ClassCastException.class) - public void test_3() { - // 1, 200, "abc", 2000 - // 1, 200, "abc", "bcd" - List list1 = new LinkedList(); - list1.add(new Long(1)); - list1.add(new Long(200)); - list1.add("abc"); - list1.add(new Long(2000)); - listCollection.add(list1); - - List list2 = new LinkedList(); - list2.add(new Long(1)); - list2.add(new Long(200)); - list2.add("abc"); - list2.add("bcd"); - listCollection.add(list2); - - printCollection(); - - ListComparator> comparator = new ListComparator<>(new OrderByPair(1, false), - new OrderByPair(3, false)); - Collections.sort(listCollection, comparator); - Assert.fail(); - } - - @Test - public void test_4() { - // 1, 200, "bb", 2000 - // 1, 300, "aa" - List list1 = new LinkedList(); - list1.add(new Long(1)); - list1.add(new Long(200)); - list1.add("bb"); - list1.add(new Long(1000)); - listCollection.add(list1); - - List list2 = new LinkedList(); - list2.add(new Long(1)); - list2.add(new Long(300)); - list2.add("aa"); - listCollection.add(list2); - - printCollection(); - - ListComparator> comparator = new ListComparator<>(new OrderByPair(2, false), - new OrderByPair(1, false)); - Collections.sort(listCollection, comparator); - printCollection(); - Assert.assertEquals(list2, listCollection.get(0)); - } - - @Test - public void test_5() { - // 1, 200, "bb", 2000 - // 1, 100, "aa" - // 1, 300, "aa" - List list1 = new LinkedList(); - list1.add(new Long(1)); - list1.add(new Long(200)); - list1.add("bb"); - list1.add(new Long(1000)); - listCollection.add(list1); - - List list2 = new LinkedList(); - list2.add(new Long(1)); - list2.add(new Long(100)); - list2.add("aa"); - listCollection.add(list2); - - List list3 = new LinkedList(); - list3.add(new Long(1)); - list3.add(new Long(300)); - list3.add("aa"); - listCollection.add(list3); - - printCollection(); - - ListComparator> comparator = new ListComparator<>(new OrderByPair(2, false), - new OrderByPair(1, true)); - Collections.sort(listCollection, comparator); - printCollection(); - Assert.assertEquals(list3, listCollection.get(0)); - } - -} +package com.baidu.palo.common.util; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; + +public class ListComparatorTest { + + List> listCollection; + + @Before + public void setUp() { + listCollection = new LinkedList>(); + } + + private void printCollection() { + System.out.println("LIST:"); + for (List list : listCollection) { + for (Comparable comparable : list) { + System.out.print(comparable + " | "); + } + System.out.println(); + } + System.out.println("END LIST\n"); + } + + @Test + public void test_1() { + // 1, 200, "bcd", 2000 + // 1, 200, "abc" + List list1 = new LinkedList(); + list1.add(new Long(1)); + list1.add(new Long(200)); + list1.add("bcd"); + list1.add(new Long(1000)); + listCollection.add(list1); + + List list2 = new LinkedList(); + list2.add(new Long(1)); + list2.add(new Long(200)); + list2.add("abc"); + listCollection.add(list2); + + printCollection(); + + ListComparator> comparator = new ListComparator<>(new OrderByPair(1, false), + new OrderByPair(2, false)); + Collections.sort(listCollection, comparator); + printCollection(); + + Assert.assertEquals(list2, listCollection.get(0)); + } + + @Test + public void test_2() { + // 1, 200, "abc", 1000 + // 1, 200, "abc" + List list1 = new LinkedList(); + list1.add(new Long(1)); + list1.add(new Long(200)); + list1.add("abc"); + list1.add(new Long(1000)); + listCollection.add(list1); + + List list2 = new LinkedList(); + list2.add(new Long(1)); + list2.add(new Long(200)); + list2.add("abc"); + listCollection.add(list2); + + printCollection(); + + ListComparator> comparator = new ListComparator<>(new OrderByPair(1, false), + new OrderByPair(2, false)); + Collections.sort(listCollection, comparator); + printCollection(); + Assert.assertEquals(list2, listCollection.get(0)); + } + + @Test(expected = ClassCastException.class) + public void test_3() { + // 1, 200, "abc", 2000 + // 1, 200, "abc", "bcd" + List list1 = new LinkedList(); + list1.add(new Long(1)); + list1.add(new Long(200)); + list1.add("abc"); + list1.add(new Long(2000)); + listCollection.add(list1); + + List list2 = new LinkedList(); + list2.add(new Long(1)); + list2.add(new Long(200)); + list2.add("abc"); + list2.add("bcd"); + listCollection.add(list2); + + printCollection(); + + ListComparator> comparator = new ListComparator<>(new OrderByPair(1, false), + new OrderByPair(3, false)); + Collections.sort(listCollection, comparator); + Assert.fail(); + } + + @Test + public void test_4() { + // 1, 200, "bb", 2000 + // 1, 300, "aa" + List list1 = new LinkedList(); + list1.add(new Long(1)); + list1.add(new Long(200)); + list1.add("bb"); + list1.add(new Long(1000)); + listCollection.add(list1); + + List list2 = new LinkedList(); + list2.add(new Long(1)); + list2.add(new Long(300)); + list2.add("aa"); + listCollection.add(list2); + + printCollection(); + + ListComparator> comparator = new ListComparator<>(new OrderByPair(2, false), + new OrderByPair(1, false)); + Collections.sort(listCollection, comparator); + printCollection(); + Assert.assertEquals(list2, listCollection.get(0)); + } + + @Test + public void test_5() { + // 1, 200, "bb", 2000 + // 1, 100, "aa" + // 1, 300, "aa" + List list1 = new LinkedList(); + list1.add(new Long(1)); + list1.add(new Long(200)); + list1.add("bb"); + list1.add(new Long(1000)); + listCollection.add(list1); + + List list2 = new LinkedList(); + list2.add(new Long(1)); + list2.add(new Long(100)); + list2.add("aa"); + listCollection.add(list2); + + List list3 = new LinkedList(); + list3.add(new Long(1)); + list3.add(new Long(300)); + list3.add("aa"); + listCollection.add(list3); + + printCollection(); + + ListComparator> comparator = new ListComparator<>(new OrderByPair(2, false), + new OrderByPair(1, true)); + Collections.sort(listCollection, comparator); + printCollection(); + Assert.assertEquals(list3, listCollection.get(0)); + } + +} diff --git a/fe/test/com/baidu/palo/common/util/RuntimeProfileTest.java b/fe/test/com/baidu/palo/common/util/RuntimeProfileTest.java index 4b54064c16..89fae7b13b 100644 --- a/fe/test/com/baidu/palo/common/util/RuntimeProfileTest.java +++ b/fe/test/com/baidu/palo/common/util/RuntimeProfileTest.java @@ -18,8 +18,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.util; - +package com.baidu.palo.common.util; + import com.baidu.palo.thrift.TCounter; import com.baidu.palo.thrift.TRuntimeProfileNode; import com.baidu.palo.thrift.TRuntimeProfileTree; @@ -35,146 +35,146 @@ import org.junit.Test; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; -import java.util.Set; - -public class RuntimeProfileTest { - - @Test - public void testSortChildren() { - RuntimeProfile profile = new RuntimeProfile(); - // init profile - RuntimeProfile profile1 = new RuntimeProfile(); - RuntimeProfile profile2 = new RuntimeProfile(); - RuntimeProfile profile3 = new RuntimeProfile(); - profile1.getCounterTotalTime().setValue(1); - profile2.getCounterTotalTime().setValue(3); - profile3.getCounterTotalTime().setValue(2); - profile.addChild(profile1); - profile.addChild(profile2); - profile.addChild(profile3); - // compare - profile.sortChildren(); - // check result - long time0 = profile.getChildList().get(0).first.getCounterTotalTime().getValue(); - long time1 = profile.getChildList().get(1).first.getCounterTotalTime().getValue(); - long time2 = profile.getChildList().get(2).first.getCounterTotalTime().getValue(); - - Assert.assertEquals(3, time0); - Assert.assertEquals(2, time1); - Assert.assertEquals(1, time2); - } - - @Test - public void testInfoStrings() { - RuntimeProfile profile = new RuntimeProfile("profileName"); - - // not exists key - Assert.assertNull(profile.getInfoString("key")); - // normal add and get - profile.addInfoString("key", "value"); - String value = profile.getInfoString("key"); - Assert.assertNotNull(value); - Assert.assertEquals(value, "value"); - // from thrift to profile and first update - TRuntimeProfileTree tprofileTree = new TRuntimeProfileTree(); - TRuntimeProfileNode tnode = new TRuntimeProfileNode(); - tprofileTree.addToNodes(tnode); - tnode.info_strings = new HashMap(); - tnode.info_strings.put("key", "value2"); - tnode.info_strings.put("key3", "value3"); - tnode.info_strings_display_order = new ArrayList(); - tnode.info_strings_display_order.add("key"); - tnode.info_strings_display_order.add("key3"); - - profile.update(tprofileTree); - Assert.assertEquals(profile.getInfoString("key"), "value2"); - Assert.assertEquals(profile.getInfoString("key3"), "value3"); - // second update - tnode.info_strings.put("key", "value4"); - - profile.update(tprofileTree); - Assert.assertEquals(profile.getInfoString("key"), "value4"); - - StringBuilder builder = new StringBuilder(); - profile.prettyPrint(builder, ""); - Assert.assertEquals(builder.toString(), - "profileName:\n key: value4\n key3: value3\n"); - } - - @Test - public void testCounter() { - RuntimeProfile profile = new RuntimeProfile(); - profile.addCounter("key", TUnit.UNIT, ""); - Assert.assertNotNull(profile.getCounterMap().get("key")); - Assert.assertNull(profile.getCounterMap().get("key2")); - profile.getCounterMap().get("key").setValue(1); - Assert.assertEquals(profile.getCounterMap().get("key").getValue(), 1); - } - - @Test - public void testUpdate() throws IOException { - RuntimeProfile profile = new RuntimeProfile("REAL_ROOT"); - /* the profile tree - * ROOT(time=5s info[key=value]) - * A(time=2s) B(time=1s info[BInfo1=BValu1;BInfo2=BValue2]) - * A_SON(time=10ms counter[counterA1=1; counterA2=2; counterA1Son=3]) - */ - TRuntimeProfileTree tprofileTree = new TRuntimeProfileTree(); - TRuntimeProfileNode tnodeRoot = new TRuntimeProfileNode(); - TRuntimeProfileNode tnodeA = new TRuntimeProfileNode(); - TRuntimeProfileNode tnodeB = new TRuntimeProfileNode(); - TRuntimeProfileNode tnodeASon = new TRuntimeProfileNode(); - tnodeRoot.num_children = 2; - tnodeA.num_children = 1; - tnodeASon.num_children = 0; - tnodeB.num_children = 0; - tprofileTree.addToNodes(tnodeRoot); - tprofileTree.addToNodes(tnodeA); - tprofileTree.addToNodes(tnodeASon); - tprofileTree.addToNodes(tnodeB); - tnodeRoot.info_strings = new HashMap(); - tnodeRoot.info_strings.put("key", "value"); - tnodeRoot.info_strings_display_order = new ArrayList(); - tnodeRoot.info_strings_display_order.add("key"); - tnodeRoot.counters = Lists.newArrayList(); - tnodeA.counters = Lists.newArrayList(); - tnodeB.counters = Lists.newArrayList(); - tnodeASon.counters = Lists.newArrayList(); - +import java.util.Set; + +public class RuntimeProfileTest { + + @Test + public void testSortChildren() { + RuntimeProfile profile = new RuntimeProfile(); + // init profile + RuntimeProfile profile1 = new RuntimeProfile(); + RuntimeProfile profile2 = new RuntimeProfile(); + RuntimeProfile profile3 = new RuntimeProfile(); + profile1.getCounterTotalTime().setValue(1); + profile2.getCounterTotalTime().setValue(3); + profile3.getCounterTotalTime().setValue(2); + profile.addChild(profile1); + profile.addChild(profile2); + profile.addChild(profile3); + // compare + profile.sortChildren(); + // check result + long time0 = profile.getChildList().get(0).first.getCounterTotalTime().getValue(); + long time1 = profile.getChildList().get(1).first.getCounterTotalTime().getValue(); + long time2 = profile.getChildList().get(2).first.getCounterTotalTime().getValue(); + + Assert.assertEquals(3, time0); + Assert.assertEquals(2, time1); + Assert.assertEquals(1, time2); + } + + @Test + public void testInfoStrings() { + RuntimeProfile profile = new RuntimeProfile("profileName"); + + // not exists key + Assert.assertNull(profile.getInfoString("key")); + // normal add and get + profile.addInfoString("key", "value"); + String value = profile.getInfoString("key"); + Assert.assertNotNull(value); + Assert.assertEquals(value, "value"); + // from thrift to profile and first update + TRuntimeProfileTree tprofileTree = new TRuntimeProfileTree(); + TRuntimeProfileNode tnode = new TRuntimeProfileNode(); + tprofileTree.addToNodes(tnode); + tnode.info_strings = new HashMap(); + tnode.info_strings.put("key", "value2"); + tnode.info_strings.put("key3", "value3"); + tnode.info_strings_display_order = new ArrayList(); + tnode.info_strings_display_order.add("key"); + tnode.info_strings_display_order.add("key3"); + + profile.update(tprofileTree); + Assert.assertEquals(profile.getInfoString("key"), "value2"); + Assert.assertEquals(profile.getInfoString("key3"), "value3"); + // second update + tnode.info_strings.put("key", "value4"); + + profile.update(tprofileTree); + Assert.assertEquals(profile.getInfoString("key"), "value4"); + + StringBuilder builder = new StringBuilder(); + profile.prettyPrint(builder, ""); + Assert.assertEquals(builder.toString(), + "profileName:\n key: value4\n key3: value3\n"); + } + + @Test + public void testCounter() { + RuntimeProfile profile = new RuntimeProfile(); + profile.addCounter("key", TUnit.UNIT, ""); + Assert.assertNotNull(profile.getCounterMap().get("key")); + Assert.assertNull(profile.getCounterMap().get("key2")); + profile.getCounterMap().get("key").setValue(1); + Assert.assertEquals(profile.getCounterMap().get("key").getValue(), 1); + } + + @Test + public void testUpdate() throws IOException { + RuntimeProfile profile = new RuntimeProfile("REAL_ROOT"); + /* the profile tree + * ROOT(time=5s info[key=value]) + * A(time=2s) B(time=1s info[BInfo1=BValu1;BInfo2=BValue2]) + * A_SON(time=10ms counter[counterA1=1; counterA2=2; counterA1Son=3]) + */ + TRuntimeProfileTree tprofileTree = new TRuntimeProfileTree(); + TRuntimeProfileNode tnodeRoot = new TRuntimeProfileNode(); + TRuntimeProfileNode tnodeA = new TRuntimeProfileNode(); + TRuntimeProfileNode tnodeB = new TRuntimeProfileNode(); + TRuntimeProfileNode tnodeASon = new TRuntimeProfileNode(); + tnodeRoot.num_children = 2; + tnodeA.num_children = 1; + tnodeASon.num_children = 0; + tnodeB.num_children = 0; + tprofileTree.addToNodes(tnodeRoot); + tprofileTree.addToNodes(tnodeA); + tprofileTree.addToNodes(tnodeASon); + tprofileTree.addToNodes(tnodeB); + tnodeRoot.info_strings = new HashMap(); + tnodeRoot.info_strings.put("key", "value"); + tnodeRoot.info_strings_display_order = new ArrayList(); + tnodeRoot.info_strings_display_order.add("key"); + tnodeRoot.counters = Lists.newArrayList(); + tnodeA.counters = Lists.newArrayList(); + tnodeB.counters = Lists.newArrayList(); + tnodeASon.counters = Lists.newArrayList(); + tnodeRoot.counters.add(new TCounter("TotalTime", TUnit.TIME_NS, 3000000000L)); tnodeA.counters.add(new TCounter("TotalTime", TUnit.TIME_NS, 1000000000L)); tnodeB.counters.add(new TCounter("TotalTime", TUnit.TIME_NS, 1000000000L)); tnodeASon.counters.add(new TCounter("TotalTime", TUnit.TIME_NS, 10000000)); tnodeASon.counters.add(new TCounter("counterA1", TUnit.UNIT, 1)); tnodeASon.counters.add(new TCounter("counterA2", TUnit.BYTES, 1234567L)); - tnodeASon.counters.add(new TCounter("counterA1Son", TUnit.UNIT, 3)); - tnodeASon.child_counters_map = Maps.newHashMap(); - - Set set1 = Sets.newHashSet(); - set1.add("counterA1"); - set1.add("counterA2"); - tnodeASon.child_counters_map.put("", set1); - Set set2 = Sets.newHashSet(); - set2.add("counterA1Son"); - tnodeASon.child_counters_map.put("counterA1", set2); - tnodeB.info_strings = Maps.newHashMap(); - tnodeB.info_strings_display_order = Lists.newArrayList(); - tnodeB.info_strings.put("BInfo1", "BValue1"); - tnodeB.info_strings.put("BInfo2", "BValue2"); - tnodeB.info_strings_display_order.add("BInfo2"); - tnodeB.info_strings_display_order.add("BInfo1"); - tnodeRoot.indent = true; - tnodeA.indent = true; - tnodeB.indent = true; - tnodeASon.indent = true; - tnodeRoot.name = "ROOT"; - tnodeA.name = "A"; - tnodeB.name = "B"; - tnodeASon.name = "ASON"; - - profile.update(tprofileTree); - StringBuilder builder = new StringBuilder(); - profile.computeTimeInProfile(); - profile.prettyPrint(builder, ""); - } -} + tnodeASon.counters.add(new TCounter("counterA1Son", TUnit.UNIT, 3)); + tnodeASon.child_counters_map = Maps.newHashMap(); + + Set set1 = Sets.newHashSet(); + set1.add("counterA1"); + set1.add("counterA2"); + tnodeASon.child_counters_map.put("", set1); + Set set2 = Sets.newHashSet(); + set2.add("counterA1Son"); + tnodeASon.child_counters_map.put("counterA1", set2); + tnodeB.info_strings = Maps.newHashMap(); + tnodeB.info_strings_display_order = Lists.newArrayList(); + tnodeB.info_strings.put("BInfo1", "BValue1"); + tnodeB.info_strings.put("BInfo2", "BValue2"); + tnodeB.info_strings_display_order.add("BInfo2"); + tnodeB.info_strings_display_order.add("BInfo1"); + tnodeRoot.indent = true; + tnodeA.indent = true; + tnodeB.indent = true; + tnodeASon.indent = true; + tnodeRoot.name = "ROOT"; + tnodeA.name = "A"; + tnodeB.name = "B"; + tnodeASon.name = "ASON"; + + profile.update(tprofileTree); + StringBuilder builder = new StringBuilder(); + profile.computeTimeInProfile(); + profile.prettyPrint(builder, ""); + } +} diff --git a/fe/test/com/baidu/palo/common/util/TimeUtilsTest.java b/fe/test/com/baidu/palo/common/util/TimeUtilsTest.java index e3c1838871..fc49a09155 100644 --- a/fe/test/com/baidu/palo/common/util/TimeUtilsTest.java +++ b/fe/test/com/baidu/palo/common/util/TimeUtilsTest.java @@ -18,126 +18,126 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.common.util; - -import com.baidu.palo.analysis.DateLiteral; -import com.baidu.palo.catalog.PrimitiveType; -import com.baidu.palo.catalog.ScalarType; -import com.baidu.palo.common.AnalysisException; - -import org.junit.Assert; -import org.junit.Test; - -import java.util.LinkedList; -import java.util.List; - -public class TimeUtilsTest { - - @Test - public void testNormal() { - Assert.assertNotNull(TimeUtils.getCurrentFormatTime()); - Assert.assertNotNull(TimeUtils.getStartTime()); - Assert.assertTrue(TimeUtils.getEstimatedTime(0L) > 0); - - Assert.assertEquals(-2209017600000L, TimeUtils.MIN_DATE.getTime()); - Assert.assertEquals(253402185600000L, TimeUtils.MAX_DATE.getTime()); - Assert.assertEquals(-2209017600000L, TimeUtils.MIN_DATETIME.getTime()); - Assert.assertEquals(253402271999000L, TimeUtils.MAX_DATETIME.getTime()); - } - - @Test - public void testDateParse() { - // date - List validDateList = new LinkedList<>(); - validDateList.add("2013-12-02"); - validDateList.add("2013-12-02"); - validDateList.add("2013-12-2"); - validDateList.add("2013-12-2"); - validDateList.add("9999-12-31"); - validDateList.add("1900-01-01"); - validDateList.add("2013-2-28"); - for (String validDate : validDateList) { - try { - TimeUtils.parseDate(validDate, PrimitiveType.DATE); - } catch (AnalysisException e) { - e.printStackTrace(); - System.out.println(validDate); - Assert.fail(); - } - } - - List invalidDateList = new LinkedList<>(); - invalidDateList.add("2013-12-02 "); - invalidDateList.add(" 2013-12-02"); - invalidDateList.add("20131-2-28"); - invalidDateList.add("a2013-2-28"); - invalidDateList.add("2013-22-28"); - invalidDateList.add("2013-2-29"); - invalidDateList.add("2013-2-28 2:3:4"); - for (String invalidDate : invalidDateList) { - try { - TimeUtils.parseDate(invalidDate, PrimitiveType.DATE); - Assert.fail(); - } catch (AnalysisException e) { - Assert.assertTrue(e.getMessage().contains("Invalid")); - } - } - - // datetime - List validDateTimeList = new LinkedList<>(); - validDateTimeList.add("2013-12-02 13:59:59"); - validDateTimeList.add("2013-12-2 13:59:59"); - validDateTimeList.add("2013-12-2 1:59:59"); - validDateTimeList.add("2013-12-2 3:1:1"); - validDateTimeList.add("9999-12-31 23:59:59"); - validDateTimeList.add("1900-01-01 00:00:00"); - validDateTimeList.add("2013-2-28 23:59:59"); - validDateTimeList.add("2013-2-28 2:3:4"); - validDateTimeList.add("2014-05-07 19:8:50"); - for (String validDateTime : validDateTimeList) { - try { - TimeUtils.parseDate(validDateTime, PrimitiveType.DATETIME); - } catch (AnalysisException e) { - e.printStackTrace(); - System.out.println(validDateTime); - Assert.fail(); - } - } - - List invalidDateTimeList = new LinkedList<>(); - invalidDateTimeList.add("2013-12-02 12:12:10"); - invalidDateTimeList.add(" 2013-12-02 12:12:10 "); - invalidDateTimeList.add("20131-2-28 12:12:10"); - invalidDateTimeList.add("a2013-2-28 12:12:10"); - invalidDateTimeList.add("2013-22-28 12:12:10"); - invalidDateTimeList.add("2013-2-29 12:12:10"); - invalidDateTimeList.add("2013-2-28"); - invalidDateTimeList.add("2013-13-01 12:12:12"); - for (String invalidDateTime : invalidDateTimeList) { - try { - TimeUtils.parseDate(invalidDateTime, PrimitiveType.DATETIME); - Assert.fail(); - } catch (AnalysisException e) { - Assert.assertTrue(e.getMessage().contains("Invalid")); - } - } - } - - @Test - public void testDateTrans() throws AnalysisException { - Assert.assertEquals("N/A", TimeUtils.longToTimeString(-2)); - - long timestamp = 1426125600000L; - Assert.assertEquals("2015-03-12 10:00:00", TimeUtils.longToTimeString(timestamp)); - - DateLiteral date = new DateLiteral("2015-03-01", ScalarType.DATE); - Assert.assertEquals(1031777L, date.getRealValue()); - - DateLiteral datetime = new DateLiteral("2015-03-01 12:00:00", ScalarType.DATETIME); - Assert.assertEquals(20150301120000L, datetime.getRealValue()); - - Assert.assertEquals("2015-03-01", TimeUtils.format(date.getValue(), date.getType())); - Assert.assertEquals("2015-03-01 12:00:00", TimeUtils.format(datetime.getValue(), datetime.getType())); - } - -} +package com.baidu.palo.common.util; + +import com.baidu.palo.analysis.DateLiteral; +import com.baidu.palo.catalog.PrimitiveType; +import com.baidu.palo.catalog.ScalarType; +import com.baidu.palo.common.AnalysisException; + +import org.junit.Assert; +import org.junit.Test; + +import java.util.LinkedList; +import java.util.List; + +public class TimeUtilsTest { + + @Test + public void testNormal() { + Assert.assertNotNull(TimeUtils.getCurrentFormatTime()); + Assert.assertNotNull(TimeUtils.getStartTime()); + Assert.assertTrue(TimeUtils.getEstimatedTime(0L) > 0); + + Assert.assertEquals(-2209017600000L, TimeUtils.MIN_DATE.getTime()); + Assert.assertEquals(253402185600000L, TimeUtils.MAX_DATE.getTime()); + Assert.assertEquals(-2209017600000L, TimeUtils.MIN_DATETIME.getTime()); + Assert.assertEquals(253402271999000L, TimeUtils.MAX_DATETIME.getTime()); + } + + @Test + public void testDateParse() { + // date + List validDateList = new LinkedList<>(); + validDateList.add("2013-12-02"); + validDateList.add("2013-12-02"); + validDateList.add("2013-12-2"); + validDateList.add("2013-12-2"); + validDateList.add("9999-12-31"); + validDateList.add("1900-01-01"); + validDateList.add("2013-2-28"); + for (String validDate : validDateList) { + try { + TimeUtils.parseDate(validDate, PrimitiveType.DATE); + } catch (AnalysisException e) { + e.printStackTrace(); + System.out.println(validDate); + Assert.fail(); + } + } + + List invalidDateList = new LinkedList<>(); + invalidDateList.add("2013-12-02 "); + invalidDateList.add(" 2013-12-02"); + invalidDateList.add("20131-2-28"); + invalidDateList.add("a2013-2-28"); + invalidDateList.add("2013-22-28"); + invalidDateList.add("2013-2-29"); + invalidDateList.add("2013-2-28 2:3:4"); + for (String invalidDate : invalidDateList) { + try { + TimeUtils.parseDate(invalidDate, PrimitiveType.DATE); + Assert.fail(); + } catch (AnalysisException e) { + Assert.assertTrue(e.getMessage().contains("Invalid")); + } + } + + // datetime + List validDateTimeList = new LinkedList<>(); + validDateTimeList.add("2013-12-02 13:59:59"); + validDateTimeList.add("2013-12-2 13:59:59"); + validDateTimeList.add("2013-12-2 1:59:59"); + validDateTimeList.add("2013-12-2 3:1:1"); + validDateTimeList.add("9999-12-31 23:59:59"); + validDateTimeList.add("1900-01-01 00:00:00"); + validDateTimeList.add("2013-2-28 23:59:59"); + validDateTimeList.add("2013-2-28 2:3:4"); + validDateTimeList.add("2014-05-07 19:8:50"); + for (String validDateTime : validDateTimeList) { + try { + TimeUtils.parseDate(validDateTime, PrimitiveType.DATETIME); + } catch (AnalysisException e) { + e.printStackTrace(); + System.out.println(validDateTime); + Assert.fail(); + } + } + + List invalidDateTimeList = new LinkedList<>(); + invalidDateTimeList.add("2013-12-02 12:12:10"); + invalidDateTimeList.add(" 2013-12-02 12:12:10 "); + invalidDateTimeList.add("20131-2-28 12:12:10"); + invalidDateTimeList.add("a2013-2-28 12:12:10"); + invalidDateTimeList.add("2013-22-28 12:12:10"); + invalidDateTimeList.add("2013-2-29 12:12:10"); + invalidDateTimeList.add("2013-2-28"); + invalidDateTimeList.add("2013-13-01 12:12:12"); + for (String invalidDateTime : invalidDateTimeList) { + try { + TimeUtils.parseDate(invalidDateTime, PrimitiveType.DATETIME); + Assert.fail(); + } catch (AnalysisException e) { + Assert.assertTrue(e.getMessage().contains("Invalid")); + } + } + } + + @Test + public void testDateTrans() throws AnalysisException { + Assert.assertEquals("N/A", TimeUtils.longToTimeString(-2)); + + long timestamp = 1426125600000L; + Assert.assertEquals("2015-03-12 10:00:00", TimeUtils.longToTimeString(timestamp)); + + DateLiteral date = new DateLiteral("2015-03-01", ScalarType.DATE); + Assert.assertEquals(1031777L, date.getRealValue()); + + DateLiteral datetime = new DateLiteral("2015-03-01 12:00:00", ScalarType.DATETIME); + Assert.assertEquals(20150301120000L, datetime.getRealValue()); + + Assert.assertEquals("2015-03-01", TimeUtils.format(date.getValue(), date.getType())); + Assert.assertEquals("2015-03-01 12:00:00", TimeUtils.format(datetime.getValue(), datetime.getType())); + } + +} diff --git a/fe/test/com/baidu/palo/persist/CreateTableInfoTest.java b/fe/test/com/baidu/palo/persist/CreateTableInfoTest.java index f5a3b1702a..0276a1f784 100644 --- a/fe/test/com/baidu/palo/persist/CreateTableInfoTest.java +++ b/fe/test/com/baidu/palo/persist/CreateTableInfoTest.java @@ -17,103 +17,103 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.persist; - -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.util.ArrayList; -import java.util.List; - -import org.easymock.EasyMock; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.powermock.api.easymock.PowerMock; -import org.powermock.core.classloader.annotations.PowerMockIgnore; -import org.powermock.core.classloader.annotations.PrepareForTest; -import org.powermock.modules.junit4.PowerMockRunner; - -import com.baidu.palo.catalog.AggregateType; -import com.baidu.palo.catalog.Catalog; -import com.baidu.palo.catalog.Column; -import com.baidu.palo.catalog.ColumnType; -import com.baidu.palo.catalog.KeysType; -import com.baidu.palo.catalog.MaterializedIndex; -import com.baidu.palo.catalog.OlapTable; -import com.baidu.palo.catalog.Partition; -import com.baidu.palo.catalog.PrimitiveType; -import com.baidu.palo.catalog.RandomDistributionInfo; -import com.baidu.palo.catalog.SinglePartitionInfo; -import com.baidu.palo.catalog.MaterializedIndex.IndexState; -import com.baidu.palo.common.FeConstants; - -@RunWith(PowerMockRunner.class) -@PowerMockIgnore("org.apache.log4j.*") -@PrepareForTest(Catalog.class) -public class CreateTableInfoTest { - private Catalog catalog; - - @Before - public void setUp() { - catalog = EasyMock.createMock(Catalog.class); - - PowerMock.mockStatic(Catalog.class); - EasyMock.expect(Catalog.getInstance()).andReturn(catalog).anyTimes(); - EasyMock.expect(Catalog.getCurrentCatalogJournalVersion()).andReturn(FeConstants.meta_version).anyTimes(); - PowerMock.replay(Catalog.class); - } - - @Test - public void testSerialization() throws Exception { - // 1. Write objects to file - File file = new File("./createTableInfo"); - file.createNewFile(); - DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); - - List columns = new ArrayList(); - columns.add(new Column("column2", - ColumnType.createType(PrimitiveType.TINYINT), false, AggregateType.MIN, "", "")); - columns.add(new Column("column3", - ColumnType.createType(PrimitiveType.SMALLINT), false, AggregateType.SUM, "", "")); - columns.add(new Column("column4", - ColumnType.createType(PrimitiveType.INT), false, AggregateType.REPLACE, "", "")); - columns.add(new Column("column5", - ColumnType.createType(PrimitiveType.BIGINT), false, AggregateType.REPLACE, "", "")); - columns.add(new Column("column6", - ColumnType.createType(PrimitiveType.FLOAT), false, AggregateType.REPLACE, "", "")); - columns.add(new Column("column7", - ColumnType.createType(PrimitiveType.DOUBLE), false, AggregateType.REPLACE, "", "")); - columns.add(new Column("column8", ColumnType.createChar(10), true, null, "", "")); - columns.add(new Column("column9", ColumnType.createVarchar(10), true, null, "", "")); - columns.add(new Column("column10", ColumnType.createType(PrimitiveType.DATE), true, null, "", "")); - columns.add(new Column("column11", ColumnType.createType(PrimitiveType.DATETIME), true, null, "", "")); - - MaterializedIndex index = new MaterializedIndex(1, IndexState.NORMAL); - RandomDistributionInfo distributionInfo = new RandomDistributionInfo(10); - Partition partition = new Partition(20000L, "table", index, distributionInfo); - OlapTable table = new OlapTable(1000L, "table", columns, KeysType.AGG_KEYS, - new SinglePartitionInfo(), distributionInfo); - table.addPartition(partition); - CreateTableInfo info = new CreateTableInfo("db1", table); - info.write(dos); - - dos.flush(); - dos.close(); - - // 2. Read objects from file - DataInputStream dis = new DataInputStream(new FileInputStream(file)); - - CreateTableInfo rInfo1 = CreateTableInfo.read(dis); - Assert.assertTrue(rInfo1.getTable().equals(table)); - Assert.assertTrue(rInfo1.equals(info)); - Assert.assertEquals(rInfo1.getDbName(), "db1"); - - // 3. delete files - dis.close(); - file.delete(); - } -} +package com.baidu.palo.persist; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.util.ArrayList; +import java.util.List; + +import org.easymock.EasyMock; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.powermock.api.easymock.PowerMock; +import org.powermock.core.classloader.annotations.PowerMockIgnore; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +import com.baidu.palo.catalog.AggregateType; +import com.baidu.palo.catalog.Catalog; +import com.baidu.palo.catalog.Column; +import com.baidu.palo.catalog.ColumnType; +import com.baidu.palo.catalog.KeysType; +import com.baidu.palo.catalog.MaterializedIndex; +import com.baidu.palo.catalog.OlapTable; +import com.baidu.palo.catalog.Partition; +import com.baidu.palo.catalog.PrimitiveType; +import com.baidu.palo.catalog.RandomDistributionInfo; +import com.baidu.palo.catalog.SinglePartitionInfo; +import com.baidu.palo.catalog.MaterializedIndex.IndexState; +import com.baidu.palo.common.FeConstants; + +@RunWith(PowerMockRunner.class) +@PowerMockIgnore("org.apache.log4j.*") +@PrepareForTest(Catalog.class) +public class CreateTableInfoTest { + private Catalog catalog; + + @Before + public void setUp() { + catalog = EasyMock.createMock(Catalog.class); + + PowerMock.mockStatic(Catalog.class); + EasyMock.expect(Catalog.getInstance()).andReturn(catalog).anyTimes(); + EasyMock.expect(Catalog.getCurrentCatalogJournalVersion()).andReturn(FeConstants.meta_version).anyTimes(); + PowerMock.replay(Catalog.class); + } + + @Test + public void testSerialization() throws Exception { + // 1. Write objects to file + File file = new File("./createTableInfo"); + file.createNewFile(); + DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); + + List columns = new ArrayList(); + columns.add(new Column("column2", + ColumnType.createType(PrimitiveType.TINYINT), false, AggregateType.MIN, "", "")); + columns.add(new Column("column3", + ColumnType.createType(PrimitiveType.SMALLINT), false, AggregateType.SUM, "", "")); + columns.add(new Column("column4", + ColumnType.createType(PrimitiveType.INT), false, AggregateType.REPLACE, "", "")); + columns.add(new Column("column5", + ColumnType.createType(PrimitiveType.BIGINT), false, AggregateType.REPLACE, "", "")); + columns.add(new Column("column6", + ColumnType.createType(PrimitiveType.FLOAT), false, AggregateType.REPLACE, "", "")); + columns.add(new Column("column7", + ColumnType.createType(PrimitiveType.DOUBLE), false, AggregateType.REPLACE, "", "")); + columns.add(new Column("column8", ColumnType.createChar(10), true, null, "", "")); + columns.add(new Column("column9", ColumnType.createVarchar(10), true, null, "", "")); + columns.add(new Column("column10", ColumnType.createType(PrimitiveType.DATE), true, null, "", "")); + columns.add(new Column("column11", ColumnType.createType(PrimitiveType.DATETIME), true, null, "", "")); + + MaterializedIndex index = new MaterializedIndex(1, IndexState.NORMAL); + RandomDistributionInfo distributionInfo = new RandomDistributionInfo(10); + Partition partition = new Partition(20000L, "table", index, distributionInfo); + OlapTable table = new OlapTable(1000L, "table", columns, KeysType.AGG_KEYS, + new SinglePartitionInfo(), distributionInfo); + table.addPartition(partition); + CreateTableInfo info = new CreateTableInfo("db1", table); + info.write(dos); + + dos.flush(); + dos.close(); + + // 2. Read objects from file + DataInputStream dis = new DataInputStream(new FileInputStream(file)); + + CreateTableInfo rInfo1 = CreateTableInfo.read(dis); + Assert.assertTrue(rInfo1.getTable().equals(table)); + Assert.assertTrue(rInfo1.equals(info)); + Assert.assertEquals(rInfo1.getDbName(), "db1"); + + // 3. delete files + dis.close(); + file.delete(); + } +} diff --git a/fe/test/com/baidu/palo/persist/DropInfoTest.java b/fe/test/com/baidu/palo/persist/DropInfoTest.java index d694d257a5..a35c3b3928 100644 --- a/fe/test/com/baidu/palo/persist/DropInfoTest.java +++ b/fe/test/com/baidu/palo/persist/DropInfoTest.java @@ -17,54 +17,54 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.persist; - -import org.junit.Assert; -import org.junit.Test; - -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; - -public class DropInfoTest { - @Test - public void testSerialization() throws Exception { - // 1. Write objects to file - File file = new File("./dropInfo"); - file.createNewFile(); - DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); - - DropInfo info1 = new DropInfo(); - info1.write(dos); - - DropInfo info2 = new DropInfo(1, 2, -1); - info2.write(dos); - - dos.flush(); - dos.close(); - - // 2. Read objects from file - DataInputStream dis = new DataInputStream(new FileInputStream(file)); - - DropInfo rInfo1 = DropInfo.read(dis); - Assert.assertTrue(rInfo1.equals(info1)); - - DropInfo rInfo2 = DropInfo.read(dis); - Assert.assertTrue(rInfo2.equals(info2)); - - Assert.assertEquals(1, rInfo2.getDbId()); - Assert.assertEquals(2, rInfo2.getTableId()); - - Assert.assertTrue(rInfo2.equals(rInfo2)); - Assert.assertFalse(rInfo2.equals(this)); - Assert.assertFalse(info2.equals(new DropInfo(0, 2, -1L))); - Assert.assertFalse(info2.equals(new DropInfo(1, 0, -1L))); - Assert.assertTrue(info2.equals(new DropInfo(1, 2, -1L))); - - // 3. delete files - dis.close(); - file.delete(); - } -} +package com.baidu.palo.persist; + +import org.junit.Assert; +import org.junit.Test; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; + +public class DropInfoTest { + @Test + public void testSerialization() throws Exception { + // 1. Write objects to file + File file = new File("./dropInfo"); + file.createNewFile(); + DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); + + DropInfo info1 = new DropInfo(); + info1.write(dos); + + DropInfo info2 = new DropInfo(1, 2, -1); + info2.write(dos); + + dos.flush(); + dos.close(); + + // 2. Read objects from file + DataInputStream dis = new DataInputStream(new FileInputStream(file)); + + DropInfo rInfo1 = DropInfo.read(dis); + Assert.assertTrue(rInfo1.equals(info1)); + + DropInfo rInfo2 = DropInfo.read(dis); + Assert.assertTrue(rInfo2.equals(info2)); + + Assert.assertEquals(1, rInfo2.getDbId()); + Assert.assertEquals(2, rInfo2.getTableId()); + + Assert.assertTrue(rInfo2.equals(rInfo2)); + Assert.assertFalse(rInfo2.equals(this)); + Assert.assertFalse(info2.equals(new DropInfo(0, 2, -1L))); + Assert.assertFalse(info2.equals(new DropInfo(1, 0, -1L))); + Assert.assertTrue(info2.equals(new DropInfo(1, 2, -1L))); + + // 3. delete files + dis.close(); + file.delete(); + } +} diff --git a/fe/test/com/baidu/palo/persist/EditLogTest.java b/fe/test/com/baidu/palo/persist/EditLogTest.java index 8d12421fca..bde404f9df 100644 --- a/fe/test/com/baidu/palo/persist/EditLogTest.java +++ b/fe/test/com/baidu/palo/persist/EditLogTest.java @@ -17,91 +17,91 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.persist; - -import org.junit.Test; - -import java.io.File; -import java.io.FileWriter; -import java.io.IOException; - -public class EditLogTest { - private String meta = "editLogTestDir/"; - - public void mkdir() { - File dir = new File(meta); - if (!dir.exists()) { - dir.mkdir(); - } else { - File[] files = dir.listFiles(); - for (File file : files) { - if (file.isFile()) { - file.delete(); - } - } - } - } - - public void addFiles(int image, int edit) { - File imageFile = new File(meta + "image." + image); - try { - imageFile.createNewFile(); - } catch (IOException e) { - e.printStackTrace(); - } - - for (int i = 1; i <= edit; i++) { - File editFile = new File(meta + "edits." + i); - try { - editFile.createNewFile(); - } catch (IOException e) { - e.printStackTrace(); - } - } - - File current = new File(meta + "edits"); - try { - current.createNewFile(); - } catch (IOException e) { - e.printStackTrace(); - } - - File version = new File(meta + "VERSION"); - try { - version.createNewFile(); - String line1 = "#Mon Feb 02 13:59:54 CST 2015\n"; - String line2 = "clusterId=966271669"; - FileWriter fw = new FileWriter(version); - fw.write(line1); - fw.write(line2); - fw.flush(); - fw.close(); - } catch (IOException e) { - e.printStackTrace(); - } - } - - public void deleteDir() { - File dir = new File(meta); - if (dir.exists()) { - File[] files = dir.listFiles(); - for (File file : files) { - if (file.isFile()) { - file.delete(); - } - } - - dir.delete(); - } - } - - @Test - public void testWriteLog() throws IOException { - - } - - @Test - public void test() { - - } -} +package com.baidu.palo.persist; + +import org.junit.Test; + +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; + +public class EditLogTest { + private String meta = "editLogTestDir/"; + + public void mkdir() { + File dir = new File(meta); + if (!dir.exists()) { + dir.mkdir(); + } else { + File[] files = dir.listFiles(); + for (File file : files) { + if (file.isFile()) { + file.delete(); + } + } + } + } + + public void addFiles(int image, int edit) { + File imageFile = new File(meta + "image." + image); + try { + imageFile.createNewFile(); + } catch (IOException e) { + e.printStackTrace(); + } + + for (int i = 1; i <= edit; i++) { + File editFile = new File(meta + "edits." + i); + try { + editFile.createNewFile(); + } catch (IOException e) { + e.printStackTrace(); + } + } + + File current = new File(meta + "edits"); + try { + current.createNewFile(); + } catch (IOException e) { + e.printStackTrace(); + } + + File version = new File(meta + "VERSION"); + try { + version.createNewFile(); + String line1 = "#Mon Feb 02 13:59:54 CST 2015\n"; + String line2 = "clusterId=966271669"; + FileWriter fw = new FileWriter(version); + fw.write(line1); + fw.write(line2); + fw.flush(); + fw.close(); + } catch (IOException e) { + e.printStackTrace(); + } + } + + public void deleteDir() { + File dir = new File(meta); + if (dir.exists()) { + File[] files = dir.listFiles(); + for (File file : files) { + if (file.isFile()) { + file.delete(); + } + } + + dir.delete(); + } + } + + @Test + public void testWriteLog() throws IOException { + + } + + @Test + public void test() { + + } +} diff --git a/fe/test/com/baidu/palo/persist/ReplicaPersistInfoTest.java b/fe/test/com/baidu/palo/persist/ReplicaPersistInfoTest.java index 04437dd224..c7e18a1852 100644 --- a/fe/test/com/baidu/palo/persist/ReplicaPersistInfoTest.java +++ b/fe/test/com/baidu/palo/persist/ReplicaPersistInfoTest.java @@ -17,64 +17,64 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.persist; - -import org.junit.Assert; -import org.junit.Test; - -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; - -public class ReplicaPersistInfoTest { - @Test - public void testSerialization() throws Exception { - // 1. Write objects to file - File file = new File("./replicaInfo"); - file.createNewFile(); - DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); - - ReplicaPersistInfo info1 = new ReplicaPersistInfo(); - info1.write(dos); - - ReplicaPersistInfo info2 = ReplicaPersistInfo.createForLoad(1, 2, 3, 4, 5, 6, 7, 8, 9); - info2.write(dos); - - dos.flush(); - dos.close(); - - // 2. Read objects from file - DataInputStream dis = new DataInputStream(new FileInputStream(file)); - - ReplicaPersistInfo rInfo1 = new ReplicaPersistInfo(); - rInfo1.readFields(dis); - Assert.assertTrue(info1.equals(rInfo1)); - Assert.assertTrue(info1.equals(info1)); - Assert.assertFalse(info1.equals(this)); - - ReplicaPersistInfo rInfo2 = new ReplicaPersistInfo(); - rInfo2.readFields(dis); - Assert.assertTrue(info2.equals(rInfo2)); - Assert.assertFalse(info1.equals(info2)); - - // 3. delete files - dis.close(); - file.delete(); - } - - @Test - public void testGet() throws Exception { - ReplicaPersistInfo info = ReplicaPersistInfo.createForLoad(0, 1, 2, 3, 4, 5, 6, 7, 8); - Assert.assertEquals(0, info.getTableId()); - Assert.assertEquals(1, info.getPartitionId()); - Assert.assertEquals(2, info.getIndexId()); - Assert.assertEquals(3, info.getTabletId()); - Assert.assertEquals(4, info.getReplicaId()); - Assert.assertEquals(5, info.getVersion()); - Assert.assertEquals(6, info.getVersionHash()); - Assert.assertEquals(7, info.getDataSize()); - Assert.assertEquals(8, info.getRowCount()); - } -} +package com.baidu.palo.persist; + +import org.junit.Assert; +import org.junit.Test; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; + +public class ReplicaPersistInfoTest { + @Test + public void testSerialization() throws Exception { + // 1. Write objects to file + File file = new File("./replicaInfo"); + file.createNewFile(); + DataOutputStream dos = new DataOutputStream(new FileOutputStream(file)); + + ReplicaPersistInfo info1 = new ReplicaPersistInfo(); + info1.write(dos); + + ReplicaPersistInfo info2 = ReplicaPersistInfo.createForLoad(1, 2, 3, 4, 5, 6, 7, 8, 9); + info2.write(dos); + + dos.flush(); + dos.close(); + + // 2. Read objects from file + DataInputStream dis = new DataInputStream(new FileInputStream(file)); + + ReplicaPersistInfo rInfo1 = new ReplicaPersistInfo(); + rInfo1.readFields(dis); + Assert.assertTrue(info1.equals(rInfo1)); + Assert.assertTrue(info1.equals(info1)); + Assert.assertFalse(info1.equals(this)); + + ReplicaPersistInfo rInfo2 = new ReplicaPersistInfo(); + rInfo2.readFields(dis); + Assert.assertTrue(info2.equals(rInfo2)); + Assert.assertFalse(info1.equals(info2)); + + // 3. delete files + dis.close(); + file.delete(); + } + + @Test + public void testGet() throws Exception { + ReplicaPersistInfo info = ReplicaPersistInfo.createForLoad(0, 1, 2, 3, 4, 5, 6, 7, 8); + Assert.assertEquals(0, info.getTableId()); + Assert.assertEquals(1, info.getPartitionId()); + Assert.assertEquals(2, info.getIndexId()); + Assert.assertEquals(3, info.getTabletId()); + Assert.assertEquals(4, info.getReplicaId()); + Assert.assertEquals(5, info.getVersion()); + Assert.assertEquals(6, info.getVersionHash()); + Assert.assertEquals(7, info.getDataSize()); + Assert.assertEquals(8, info.getRowCount()); + } +} diff --git a/fe/test/com/baidu/palo/persist/StorageInfoTest.java b/fe/test/com/baidu/palo/persist/StorageInfoTest.java index 0720e293b9..c1181ae925 100644 --- a/fe/test/com/baidu/palo/persist/StorageInfoTest.java +++ b/fe/test/com/baidu/palo/persist/StorageInfoTest.java @@ -17,30 +17,30 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.persist; - -import org.junit.Assert; -import org.junit.Test; - -public class StorageInfoTest { - @Test - public void test() { - StorageInfo info = new StorageInfo(); - Assert.assertEquals(-1, info.getClusterID()); - Assert.assertEquals(0, info.getImageSeq()); - Assert.assertEquals(0, info.getEditsSeq()); - - info = new StorageInfo(10, 20, 30); - Assert.assertEquals(10, info.getClusterID()); - Assert.assertEquals(20, info.getImageSeq()); - Assert.assertEquals(30, info.getEditsSeq()); - - info.setClusterID(100); - info.setImageSeq(200); - info.setEditsSeq(300); - - Assert.assertEquals(100, info.getClusterID()); - Assert.assertEquals(200, info.getImageSeq()); - Assert.assertEquals(300, info.getEditsSeq()); - } -} +package com.baidu.palo.persist; + +import org.junit.Assert; +import org.junit.Test; + +public class StorageInfoTest { + @Test + public void test() { + StorageInfo info = new StorageInfo(); + Assert.assertEquals(-1, info.getClusterID()); + Assert.assertEquals(0, info.getImageSeq()); + Assert.assertEquals(0, info.getEditsSeq()); + + info = new StorageInfo(10, 20, 30); + Assert.assertEquals(10, info.getClusterID()); + Assert.assertEquals(20, info.getImageSeq()); + Assert.assertEquals(30, info.getEditsSeq()); + + info.setClusterID(100); + info.setImageSeq(200); + info.setEditsSeq(300); + + Assert.assertEquals(100, info.getClusterID()); + Assert.assertEquals(200, info.getImageSeq()); + Assert.assertEquals(300, info.getEditsSeq()); + } +} diff --git a/fe/test/com/baidu/palo/planner/OlapScanNodeTest.java b/fe/test/com/baidu/palo/planner/OlapScanNodeTest.java index 2e457c4f22..e566079bec 100644 --- a/fe/test/com/baidu/palo/planner/OlapScanNodeTest.java +++ b/fe/test/com/baidu/palo/planner/OlapScanNodeTest.java @@ -18,148 +18,148 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.planner; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Map; - -import org.junit.Assert; -import org.junit.Test; - -import com.baidu.palo.analysis.Expr; -import com.baidu.palo.analysis.InPredicate; -import com.baidu.palo.analysis.IntLiteral; -import com.baidu.palo.analysis.SlotRef; -import com.baidu.palo.analysis.TableName; -import com.baidu.palo.catalog.Column; -import com.baidu.palo.catalog.PartitionKey; -import com.baidu.palo.catalog.PrimitiveType; -import com.baidu.palo.common.AnalysisException; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; - -public class OlapScanNodeTest { - // columnA in (1) hashmode=3 - @Test - public void testHashDistributionOneUser() throws AnalysisException { - - List partitions = new ArrayList<>(); - partitions.add(new Long(0)); - partitions.add(new Long(1)); - partitions.add(new Long(2)); - - - List columns = Lists.newArrayList(); - columns.add(new Column("columnA", PrimitiveType.BIGINT)); - - List inList = Lists.newArrayList(); - inList.add(new IntLiteral(1)); - - Expr compareExpr = new SlotRef(new TableName("db", "tableName"), "columnA"); - InPredicate inPredicate = new InPredicate(compareExpr, inList, false); - - PartitionColumnFilter columnFilter = new PartitionColumnFilter(); - columnFilter.setInPredicate(inPredicate); - Map filterMap = Maps.newHashMap(); - filterMap.put("columnA", columnFilter); - - DistributionPruner partitionPruner = new HashDistributionPruner( - partitions, - columns, - filterMap, - 3); - - Collection ids = partitionPruner.prune(); - Assert.assertEquals(ids.size(), 1); - - for (Long id : ids) { - Assert.assertEquals((1 & 0xffffffff) % 3, id.intValue()); - } - } - - // columnA in (1, 2 ,3, 4, 5, 6) hashmode=3 - @Test - public void testHashPartitionManyUser() throws AnalysisException { - - List partitions = new ArrayList<>(); - partitions.add(new Long(0)); - partitions.add(new Long(1)); - partitions.add(new Long(2)); - - List columns = Lists.newArrayList(); - columns.add(new Column("columnA", PrimitiveType.BIGINT)); - - List inList = Lists.newArrayList(); - inList.add(new IntLiteral(1)); - inList.add(new IntLiteral(2)); - inList.add(new IntLiteral(3)); - inList.add(new IntLiteral(4)); - inList.add(new IntLiteral(5)); - inList.add(new IntLiteral(6)); - - Expr compareExpr = new SlotRef(new TableName("db", "tableName"), "columnA"); - InPredicate inPredicate = new InPredicate(compareExpr, inList, false); - - PartitionColumnFilter columnFilter = new PartitionColumnFilter(); - columnFilter.setInPredicate(inPredicate); - Map filterMap = Maps.newHashMap(); - filterMap.put("columnA", columnFilter); - - DistributionPruner partitionPruner = new HashDistributionPruner( - partitions, - columns, - filterMap, - 3); - - Collection ids = partitionPruner.prune(); - Assert.assertEquals(ids.size(), 3); - } - - @Test - public void testHashForIntLiteral() { - { - PartitionKey hashKey = new PartitionKey(); - hashKey.pushColumn(new IntLiteral(1), PrimitiveType.BIGINT); - long hashValue = hashKey.getHashValue(); - long mod = (int) ((hashValue & 0xffffffff) % 3); - Assert.assertEquals(mod, 1); - } - { - PartitionKey hashKey = new PartitionKey(); - hashKey.pushColumn(new IntLiteral(2), PrimitiveType.BIGINT); - long hashValue = hashKey.getHashValue(); - long mod = (int) ((hashValue & 0xffffffff) % 3); - Assert.assertEquals(mod, 0); - } - { - PartitionKey hashKey = new PartitionKey(); - hashKey.pushColumn(new IntLiteral(3), PrimitiveType.BIGINT); - long hashValue = hashKey.getHashValue(); - long mod = (int) ((hashValue & 0xffffffff) % 3); - Assert.assertEquals(mod, 0); - } - { - PartitionKey hashKey = new PartitionKey(); - hashKey.pushColumn(new IntLiteral(4), PrimitiveType.BIGINT); - long hashValue = hashKey.getHashValue(); - long mod = (int) ((hashValue & 0xffffffff) % 3); - Assert.assertEquals(mod, 1); - } - { - PartitionKey hashKey = new PartitionKey(); - hashKey.pushColumn(new IntLiteral(5), PrimitiveType.BIGINT); - long hashValue = hashKey.getHashValue(); - long mod = (int) ((hashValue & 0xffffffff) % 3); - Assert.assertEquals(mod, 2); - } - { - PartitionKey hashKey = new PartitionKey(); - hashKey.pushColumn(new IntLiteral(6), PrimitiveType.BIGINT); - long hashValue = hashKey.getHashValue(); - long mod = (int) ((hashValue & 0xffffffff) % 3); - Assert.assertEquals(mod, 2); - } - } -} +package com.baidu.palo.planner; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import org.junit.Assert; +import org.junit.Test; + +import com.baidu.palo.analysis.Expr; +import com.baidu.palo.analysis.InPredicate; +import com.baidu.palo.analysis.IntLiteral; +import com.baidu.palo.analysis.SlotRef; +import com.baidu.palo.analysis.TableName; +import com.baidu.palo.catalog.Column; +import com.baidu.palo.catalog.PartitionKey; +import com.baidu.palo.catalog.PrimitiveType; +import com.baidu.palo.common.AnalysisException; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + +public class OlapScanNodeTest { + // columnA in (1) hashmode=3 + @Test + public void testHashDistributionOneUser() throws AnalysisException { + + List partitions = new ArrayList<>(); + partitions.add(new Long(0)); + partitions.add(new Long(1)); + partitions.add(new Long(2)); + + + List columns = Lists.newArrayList(); + columns.add(new Column("columnA", PrimitiveType.BIGINT)); + + List inList = Lists.newArrayList(); + inList.add(new IntLiteral(1)); + + Expr compareExpr = new SlotRef(new TableName("db", "tableName"), "columnA"); + InPredicate inPredicate = new InPredicate(compareExpr, inList, false); + + PartitionColumnFilter columnFilter = new PartitionColumnFilter(); + columnFilter.setInPredicate(inPredicate); + Map filterMap = Maps.newHashMap(); + filterMap.put("columnA", columnFilter); + + DistributionPruner partitionPruner = new HashDistributionPruner( + partitions, + columns, + filterMap, + 3); + + Collection ids = partitionPruner.prune(); + Assert.assertEquals(ids.size(), 1); + + for (Long id : ids) { + Assert.assertEquals((1 & 0xffffffff) % 3, id.intValue()); + } + } + + // columnA in (1, 2 ,3, 4, 5, 6) hashmode=3 + @Test + public void testHashPartitionManyUser() throws AnalysisException { + + List partitions = new ArrayList<>(); + partitions.add(new Long(0)); + partitions.add(new Long(1)); + partitions.add(new Long(2)); + + List columns = Lists.newArrayList(); + columns.add(new Column("columnA", PrimitiveType.BIGINT)); + + List inList = Lists.newArrayList(); + inList.add(new IntLiteral(1)); + inList.add(new IntLiteral(2)); + inList.add(new IntLiteral(3)); + inList.add(new IntLiteral(4)); + inList.add(new IntLiteral(5)); + inList.add(new IntLiteral(6)); + + Expr compareExpr = new SlotRef(new TableName("db", "tableName"), "columnA"); + InPredicate inPredicate = new InPredicate(compareExpr, inList, false); + + PartitionColumnFilter columnFilter = new PartitionColumnFilter(); + columnFilter.setInPredicate(inPredicate); + Map filterMap = Maps.newHashMap(); + filterMap.put("columnA", columnFilter); + + DistributionPruner partitionPruner = new HashDistributionPruner( + partitions, + columns, + filterMap, + 3); + + Collection ids = partitionPruner.prune(); + Assert.assertEquals(ids.size(), 3); + } + + @Test + public void testHashForIntLiteral() { + { + PartitionKey hashKey = new PartitionKey(); + hashKey.pushColumn(new IntLiteral(1), PrimitiveType.BIGINT); + long hashValue = hashKey.getHashValue(); + long mod = (int) ((hashValue & 0xffffffff) % 3); + Assert.assertEquals(mod, 1); + } + { + PartitionKey hashKey = new PartitionKey(); + hashKey.pushColumn(new IntLiteral(2), PrimitiveType.BIGINT); + long hashValue = hashKey.getHashValue(); + long mod = (int) ((hashValue & 0xffffffff) % 3); + Assert.assertEquals(mod, 0); + } + { + PartitionKey hashKey = new PartitionKey(); + hashKey.pushColumn(new IntLiteral(3), PrimitiveType.BIGINT); + long hashValue = hashKey.getHashValue(); + long mod = (int) ((hashValue & 0xffffffff) % 3); + Assert.assertEquals(mod, 0); + } + { + PartitionKey hashKey = new PartitionKey(); + hashKey.pushColumn(new IntLiteral(4), PrimitiveType.BIGINT); + long hashValue = hashKey.getHashValue(); + long mod = (int) ((hashValue & 0xffffffff) % 3); + Assert.assertEquals(mod, 1); + } + { + PartitionKey hashKey = new PartitionKey(); + hashKey.pushColumn(new IntLiteral(5), PrimitiveType.BIGINT); + long hashValue = hashKey.getHashValue(); + long mod = (int) ((hashValue & 0xffffffff) % 3); + Assert.assertEquals(mod, 2); + } + { + PartitionKey hashKey = new PartitionKey(); + hashKey.pushColumn(new IntLiteral(6), PrimitiveType.BIGINT); + long hashValue = hashKey.getHashValue(); + long mod = (int) ((hashValue & 0xffffffff) % 3); + Assert.assertEquals(mod, 2); + } + } +} diff --git a/fe/test/com/baidu/palo/qe/JournalObservableTest.java b/fe/test/com/baidu/palo/qe/JournalObservableTest.java index dfcea2722d..39b6dd2630 100644 --- a/fe/test/com/baidu/palo/qe/JournalObservableTest.java +++ b/fe/test/com/baidu/palo/qe/JournalObservableTest.java @@ -18,135 +18,135 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.qe; - -import com.google.common.collect.Multiset; -import com.google.common.collect.TreeMultiset; - -import java.util.concurrent.CountDownLatch; - -import org.junit.Assert; -import org.junit.Test; - -public class JournalObservableTest { - @Test - public void testUpperBound() { - Multiset elements = TreeMultiset.create(); - JournalObserver ovserver2 = new JournalObserver(2L); - JournalObserver ovserver4 = new JournalObserver(4L); - JournalObserver ovserver41 = new JournalObserver(4L); - JournalObserver ovserver42 = new JournalObserver(4L); - JournalObserver ovserver6 = new JournalObserver(6L); - - // empty - { - Assert.assertEquals(0, JournalObservable.upperBound(elements.toArray(), 0, 1L)); - } - - // one element - { - elements.add(ovserver2); - int size = elements.size(); - Assert.assertEquals(0, JournalObservable.upperBound(elements.toArray(), size, 1L)); - Assert.assertEquals(1, JournalObservable.upperBound(elements.toArray(), size, 2L)); - Assert.assertEquals(1, JournalObservable.upperBound(elements.toArray(), size, 3L)); - } - - // same element - { - elements.clear(); - elements.add(ovserver2); - elements.add(ovserver6); - elements.add(ovserver4); - elements.add(ovserver41); - elements.add(ovserver42); - - for (JournalObserver journalObserver : elements) { - System.out.println(journalObserver); - } - - int size = elements.size(); - Assert.assertEquals(0, JournalObservable.upperBound(elements.toArray(), size, 1L)); - Assert.assertEquals(1, JournalObservable.upperBound(elements.toArray(), size, 2L)); - Assert.assertEquals(1, JournalObservable.upperBound(elements.toArray(), size, 3L)); - Assert.assertEquals(4, JournalObservable.upperBound(elements.toArray(), size, 4L)); - elements.remove(ovserver41); - Assert.assertEquals(3, JournalObservable.upperBound(elements.toArray(), elements.size(), 4L)); - elements.remove(ovserver4); - Assert.assertEquals(2, JournalObservable.upperBound(elements.toArray(), elements.size(), 4L)); - elements.remove(ovserver42); - Assert.assertEquals(1, JournalObservable.upperBound(elements.toArray(), elements.size(), 4L)); - } - - // same element 2 - { - elements.clear(); - elements.add(ovserver4); - elements.add(ovserver41); - - int size = elements.size(); - Assert.assertEquals(2, JournalObservable.upperBound(elements.toArray(), size, 4L)); - elements.remove(ovserver41); - Assert.assertEquals(1, JournalObservable.upperBound(elements.toArray(), elements.size(), 4L)); - elements.remove(ovserver4); - Assert.assertEquals(0, JournalObservable.upperBound(elements.toArray(), elements.size(), 4L)); - } - - // odd elements - { - elements.clear(); - elements.add(ovserver2); - elements.add(ovserver2); - elements.add(ovserver4); - elements.add(ovserver4); - elements.add(ovserver6); - elements.add(ovserver6); - int size = elements.size(); -// System.out.println("size=" + size); -// for(int i = 0; i < size; i ++) { -// System.out.println("array " + i + " = " + ((MasterOpExecutor)elements.get(i)).getTargetJournalId()); -// } - Assert.assertEquals(0, JournalObservable.upperBound(elements.toArray(), size, 1L)); - Assert.assertEquals(2, JournalObservable.upperBound(elements.toArray(), size, 2L)); - Assert.assertEquals(2, JournalObservable.upperBound(elements.toArray(), size, 3L)); - Assert.assertEquals(4, JournalObservable.upperBound(elements.toArray(), size, 4L)); - Assert.assertEquals(4, JournalObservable.upperBound(elements.toArray(), size, 5L)); - Assert.assertEquals(6, JournalObservable.upperBound(elements.toArray(), size, 6L)); - Assert.assertEquals(6, JournalObservable.upperBound(elements.toArray(), size, 7L)); - } - // even elements - { - elements.clear(); - elements.add(ovserver2); - elements.add(ovserver2); - elements.add(ovserver4); - elements.add(ovserver4); - elements.add(ovserver4); - elements.add(ovserver6); - elements.add(ovserver6); - int size = elements.size(); - Assert.assertEquals(0, JournalObservable.upperBound(elements.toArray(), size, 1L)); - Assert.assertEquals(2, JournalObservable.upperBound(elements.toArray(), size, 2L)); - Assert.assertEquals(2, JournalObservable.upperBound(elements.toArray(), size, 3L)); - Assert.assertEquals(5, JournalObservable.upperBound(elements.toArray(), size, 4L)); - Assert.assertEquals(5, JournalObservable.upperBound(elements.toArray(), size, 5L)); - Assert.assertEquals(7, JournalObservable.upperBound(elements.toArray(), size, 6L)); - Assert.assertEquals(7, JournalObservable.upperBound(elements.toArray(), size, 7L)); - } - { - CountDownLatch latch = new CountDownLatch(1); - System.out.println(latch.getCount()); - - latch.countDown(); - System.out.println(latch.getCount()); - - latch.countDown(); - System.out.println(latch.getCount()); - - latch.countDown(); - System.out.println(latch.getCount()); - } - System.out.println("success"); - } -} - +package com.baidu.palo.qe; + +import com.google.common.collect.Multiset; +import com.google.common.collect.TreeMultiset; + +import java.util.concurrent.CountDownLatch; + +import org.junit.Assert; +import org.junit.Test; + +public class JournalObservableTest { + @Test + public void testUpperBound() { + Multiset elements = TreeMultiset.create(); + JournalObserver ovserver2 = new JournalObserver(2L); + JournalObserver ovserver4 = new JournalObserver(4L); + JournalObserver ovserver41 = new JournalObserver(4L); + JournalObserver ovserver42 = new JournalObserver(4L); + JournalObserver ovserver6 = new JournalObserver(6L); + + // empty + { + Assert.assertEquals(0, JournalObservable.upperBound(elements.toArray(), 0, 1L)); + } + + // one element + { + elements.add(ovserver2); + int size = elements.size(); + Assert.assertEquals(0, JournalObservable.upperBound(elements.toArray(), size, 1L)); + Assert.assertEquals(1, JournalObservable.upperBound(elements.toArray(), size, 2L)); + Assert.assertEquals(1, JournalObservable.upperBound(elements.toArray(), size, 3L)); + } + + // same element + { + elements.clear(); + elements.add(ovserver2); + elements.add(ovserver6); + elements.add(ovserver4); + elements.add(ovserver41); + elements.add(ovserver42); + + for (JournalObserver journalObserver : elements) { + System.out.println(journalObserver); + } + + int size = elements.size(); + Assert.assertEquals(0, JournalObservable.upperBound(elements.toArray(), size, 1L)); + Assert.assertEquals(1, JournalObservable.upperBound(elements.toArray(), size, 2L)); + Assert.assertEquals(1, JournalObservable.upperBound(elements.toArray(), size, 3L)); + Assert.assertEquals(4, JournalObservable.upperBound(elements.toArray(), size, 4L)); + elements.remove(ovserver41); + Assert.assertEquals(3, JournalObservable.upperBound(elements.toArray(), elements.size(), 4L)); + elements.remove(ovserver4); + Assert.assertEquals(2, JournalObservable.upperBound(elements.toArray(), elements.size(), 4L)); + elements.remove(ovserver42); + Assert.assertEquals(1, JournalObservable.upperBound(elements.toArray(), elements.size(), 4L)); + } + + // same element 2 + { + elements.clear(); + elements.add(ovserver4); + elements.add(ovserver41); + + int size = elements.size(); + Assert.assertEquals(2, JournalObservable.upperBound(elements.toArray(), size, 4L)); + elements.remove(ovserver41); + Assert.assertEquals(1, JournalObservable.upperBound(elements.toArray(), elements.size(), 4L)); + elements.remove(ovserver4); + Assert.assertEquals(0, JournalObservable.upperBound(elements.toArray(), elements.size(), 4L)); + } + + // odd elements + { + elements.clear(); + elements.add(ovserver2); + elements.add(ovserver2); + elements.add(ovserver4); + elements.add(ovserver4); + elements.add(ovserver6); + elements.add(ovserver6); + int size = elements.size(); +// System.out.println("size=" + size); +// for(int i = 0; i < size; i ++) { +// System.out.println("array " + i + " = " + ((MasterOpExecutor)elements.get(i)).getTargetJournalId()); +// } + Assert.assertEquals(0, JournalObservable.upperBound(elements.toArray(), size, 1L)); + Assert.assertEquals(2, JournalObservable.upperBound(elements.toArray(), size, 2L)); + Assert.assertEquals(2, JournalObservable.upperBound(elements.toArray(), size, 3L)); + Assert.assertEquals(4, JournalObservable.upperBound(elements.toArray(), size, 4L)); + Assert.assertEquals(4, JournalObservable.upperBound(elements.toArray(), size, 5L)); + Assert.assertEquals(6, JournalObservable.upperBound(elements.toArray(), size, 6L)); + Assert.assertEquals(6, JournalObservable.upperBound(elements.toArray(), size, 7L)); + } + // even elements + { + elements.clear(); + elements.add(ovserver2); + elements.add(ovserver2); + elements.add(ovserver4); + elements.add(ovserver4); + elements.add(ovserver4); + elements.add(ovserver6); + elements.add(ovserver6); + int size = elements.size(); + Assert.assertEquals(0, JournalObservable.upperBound(elements.toArray(), size, 1L)); + Assert.assertEquals(2, JournalObservable.upperBound(elements.toArray(), size, 2L)); + Assert.assertEquals(2, JournalObservable.upperBound(elements.toArray(), size, 3L)); + Assert.assertEquals(5, JournalObservable.upperBound(elements.toArray(), size, 4L)); + Assert.assertEquals(5, JournalObservable.upperBound(elements.toArray(), size, 5L)); + Assert.assertEquals(7, JournalObservable.upperBound(elements.toArray(), size, 6L)); + Assert.assertEquals(7, JournalObservable.upperBound(elements.toArray(), size, 7L)); + } + { + CountDownLatch latch = new CountDownLatch(1); + System.out.println(latch.getCount()); + + latch.countDown(); + System.out.println(latch.getCount()); + + latch.countDown(); + System.out.println(latch.getCount()); + + latch.countDown(); + System.out.println(latch.getCount()); + } + System.out.println("success"); + } +} + diff --git a/fe/test/com/baidu/palo/qe/ShowExecutorTest.java b/fe/test/com/baidu/palo/qe/ShowExecutorTest.java index 1367606f3a..18144d8973 100644 --- a/fe/test/com/baidu/palo/qe/ShowExecutorTest.java +++ b/fe/test/com/baidu/palo/qe/ShowExecutorTest.java @@ -151,11 +151,11 @@ public class ShowExecutorTest { EasyMock.expect(Catalog.getCurrentCatalog()).andReturn(catalog).anyTimes(); Catalog.getDdlStmt(EasyMock.isA(Table.class), EasyMock.isA(List.class), EasyMock.isA(List.class), EasyMock.isA(List.class), EasyMock.anyBoolean(), - EasyMock.anyShort()); + EasyMock.anyShort(), EasyMock.anyBoolean()); EasyMock.expectLastCall().anyTimes(); Catalog.getDdlStmt(EasyMock.isA(Table.class), EasyMock.isA(List.class), EasyMock.isNull(List.class), EasyMock.isNull(List.class), EasyMock.anyBoolean(), - EasyMock.anyShort()); + EasyMock.anyShort(), EasyMock.anyBoolean()); EasyMock.expectLastCall().anyTimes(); PowerMock.replay(Catalog.class); diff --git a/fe/test/com/baidu/palo/qe/SimpleSchedulerTest.java b/fe/test/com/baidu/palo/qe/SimpleSchedulerTest.java index aeb445c22c..64d1513f58 100644 --- a/fe/test/com/baidu/palo/qe/SimpleSchedulerTest.java +++ b/fe/test/com/baidu/palo/qe/SimpleSchedulerTest.java @@ -18,8 +18,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.qe; - +package com.baidu.palo.qe; + import com.baidu.palo.catalog.Catalog; import com.baidu.palo.common.FeConstants; import com.baidu.palo.common.Reference; @@ -42,164 +42,164 @@ import org.powermock.modules.junit4.PowerMockRunner; import java.util.ArrayList; import java.util.List; -import java.util.Map; - -@RunWith(PowerMockRunner.class) -@PowerMockIgnore("org.apache.log4j.*") -@PrepareForTest(Catalog.class) -public class SimpleSchedulerTest { - static Reference ref = new Reference(); - - private Catalog catalog; - private EditLog editLog; - - @Before - public void setUp() { - editLog = EasyMock.createMock(EditLog.class); - editLog.logAddBackend(EasyMock.anyObject(Backend.class)); - EasyMock.expectLastCall().anyTimes(); - editLog.logDropBackend(EasyMock.anyObject(Backend.class)); - EasyMock.expectLastCall().anyTimes(); - editLog.logBackendStateChange(EasyMock.anyObject(Backend.class)); - EasyMock.expectLastCall().anyTimes(); - EasyMock.replay(editLog); - - catalog = EasyMock.createMock(Catalog.class); - EasyMock.expect(catalog.getEditLog()).andReturn(editLog).anyTimes(); - EasyMock.replay(catalog); - - PowerMock.mockStatic(Catalog.class); - EasyMock.expect(Catalog.getInstance()).andReturn(catalog).anyTimes(); - PowerMock.replay(Catalog.class); - } - - // TODO(lingbin): PALO-2051. - // Comment out these code temporatily. - // @Test - public void testGetHostWithBackendId() { - FeConstants.heartbeat_interval_second = Integer.MAX_VALUE; - TNetworkAddress address; - // three locations - List nullLocations = null; - List emptyLocations = new ArrayList(); - - List twoLocations = new ArrayList(); - TScanRangeLocation locationA = new TScanRangeLocation(); - TScanRangeLocation locationB = new TScanRangeLocation(); - locationA.setBackend_id(20); - locationA.setBackend_id(30); - twoLocations.add(locationA); - twoLocations.add(locationB); - - // three Backends - ImmutableMap nullBackends = null; - ImmutableMap emptyBackends = ImmutableMap.of(); - - Backend backendA = new Backend(0, "addressA", 0); - backendA.updateOnce(0, 0, 0); - Backend backendB = new Backend(1, "addressB", 0); - backendB.updateOnce(0, 0, 0); - Backend backendC = new Backend(2, "addressC", 0); - backendC.updateOnce(0, 0, 0); - - Map threeBackends = Maps.newHashMap(); - threeBackends.put((long) 0, backendA); - threeBackends.put((long) 1, backendB); - threeBackends.put((long) 2, backendC); - ImmutableMap immutableThreeBackends = ImmutableMap.copyOf(threeBackends); - - { // null Backends - address = SimpleScheduler.getHost(Long.valueOf(0), nullLocations, - nullBackends, ref); - Assert.assertNull(address); - } - { // empty Backends - address = SimpleScheduler.getHost(Long.valueOf(0), emptyLocations, - emptyBackends, ref); - Assert.assertNull(address); - } - { // normal Backends - - // BackendId exists - Assert.assertEquals(SimpleScheduler.getHost(0, emptyLocations, immutableThreeBackends, ref) - .hostname, "addressA"); - Assert.assertEquals(SimpleScheduler.getHost(2, emptyLocations, immutableThreeBackends, ref) - .hostname, "addressC"); - - // BacknedId not exists and location exists, choose the locations's first - Assert.assertEquals(SimpleScheduler.getHost(3, twoLocations, immutableThreeBackends, ref) - .hostname, "addressA"); - } - { // abnormal - // BackendId not exists and location not exists - Assert.assertNull(SimpleScheduler.getHost(3, emptyLocations, immutableThreeBackends, ref)); - } - - } - - // TODO(lingbin): PALO-2051. - // Comment out these code temporatily. - // @Test - public void testGetHostWithNoParams() { - FeConstants.heartbeat_interval_second = Integer.MAX_VALUE; - ImmutableMap nullBackends = null; - ImmutableMap emptyBackends = ImmutableMap.of(); - - Backend backendA = new Backend(0, "addressA", 0); - backendA.updateOnce(0, 0, 0); - Backend backendB = new Backend(1, "addressB", 0); - backendB.updateOnce(0, 0, 0); - Backend backendC = new Backend(2, "addressC", 0); - backendC.updateOnce(0, 0, 0); - Map threeBackends = Maps.newHashMap(); - threeBackends.put((long) 0, backendA); - threeBackends.put((long) 1, backendB); - threeBackends.put((long) 2, backendC); - ImmutableMap immutableThreeBackends = ImmutableMap.copyOf(threeBackends); - - { // abmormal - Assert.assertNull(SimpleScheduler.getHost(nullBackends, ref)); - Assert.assertNull(SimpleScheduler.getHost(emptyBackends, ref)); - } // normal - { - String a = SimpleScheduler.getHost(immutableThreeBackends, ref).hostname; - String b = SimpleScheduler.getHost(immutableThreeBackends, ref).hostname; - String c = SimpleScheduler.getHost(immutableThreeBackends, ref).hostname; - Assert.assertTrue(!a.equals(b) && !a.equals(c) && !b.equals(c)); - a = SimpleScheduler.getHost(immutableThreeBackends, ref).hostname; - b = SimpleScheduler.getHost(immutableThreeBackends, ref).hostname; - c = SimpleScheduler.getHost(immutableThreeBackends, ref).hostname; - Assert.assertTrue(!a.equals(b) && !a.equals(c) && !b.equals(c)); - } - } - - // TODO(lingbin): PALO-2051. - // Comment out these code temporatily. - // @Test - public void testBlackList() { - FeConstants.heartbeat_interval_second = Integer.MAX_VALUE; - TNetworkAddress address = null; - - Backend backendA = new Backend(0, "addressA", 0); - backendA.updateOnce(0, 0, 0); - Backend backendB = new Backend(1, "addressB", 0); - backendB.updateOnce(0, 0, 0); - Backend backendC = new Backend(2, "addressC", 0); - backendC.updateOnce(0, 0, 0); - Map threeBackends = Maps.newHashMap(); - threeBackends.put((long) 100, backendA); - threeBackends.put((long) 101, backendB); - threeBackends.put((long) 102, backendC); - ImmutableMap immutableThreeBackends = ImmutableMap.copyOf(threeBackends); - - SimpleScheduler.updateBlacklistBackends(Long.valueOf(100)); - SimpleScheduler.updateBlacklistBackends(Long.valueOf(101)); - address = SimpleScheduler.getHost(immutableThreeBackends, ref); - // only backendc can work - Assert.assertEquals(address.hostname, "addressC"); - SimpleScheduler.updateBlacklistBackends(Long.valueOf(102)); - // no backend can work - address = SimpleScheduler.getHost(immutableThreeBackends, ref); - Assert.assertNull(address); - } -} +import java.util.Map; + +@RunWith(PowerMockRunner.class) +@PowerMockIgnore("org.apache.log4j.*") +@PrepareForTest(Catalog.class) +public class SimpleSchedulerTest { + static Reference ref = new Reference(); + + private Catalog catalog; + private EditLog editLog; + + @Before + public void setUp() { + editLog = EasyMock.createMock(EditLog.class); + editLog.logAddBackend(EasyMock.anyObject(Backend.class)); + EasyMock.expectLastCall().anyTimes(); + editLog.logDropBackend(EasyMock.anyObject(Backend.class)); + EasyMock.expectLastCall().anyTimes(); + editLog.logBackendStateChange(EasyMock.anyObject(Backend.class)); + EasyMock.expectLastCall().anyTimes(); + EasyMock.replay(editLog); + + catalog = EasyMock.createMock(Catalog.class); + EasyMock.expect(catalog.getEditLog()).andReturn(editLog).anyTimes(); + EasyMock.replay(catalog); + + PowerMock.mockStatic(Catalog.class); + EasyMock.expect(Catalog.getInstance()).andReturn(catalog).anyTimes(); + PowerMock.replay(Catalog.class); + } + + // TODO(lingbin): PALO-2051. + // Comment out these code temporatily. + // @Test + public void testGetHostWithBackendId() { + FeConstants.heartbeat_interval_second = Integer.MAX_VALUE; + TNetworkAddress address; + // three locations + List nullLocations = null; + List emptyLocations = new ArrayList(); + + List twoLocations = new ArrayList(); + TScanRangeLocation locationA = new TScanRangeLocation(); + TScanRangeLocation locationB = new TScanRangeLocation(); + locationA.setBackend_id(20); + locationA.setBackend_id(30); + twoLocations.add(locationA); + twoLocations.add(locationB); + + // three Backends + ImmutableMap nullBackends = null; + ImmutableMap emptyBackends = ImmutableMap.of(); + + Backend backendA = new Backend(0, "addressA", 0); + backendA.updateOnce(0, 0, 0); + Backend backendB = new Backend(1, "addressB", 0); + backendB.updateOnce(0, 0, 0); + Backend backendC = new Backend(2, "addressC", 0); + backendC.updateOnce(0, 0, 0); + + Map threeBackends = Maps.newHashMap(); + threeBackends.put((long) 0, backendA); + threeBackends.put((long) 1, backendB); + threeBackends.put((long) 2, backendC); + ImmutableMap immutableThreeBackends = ImmutableMap.copyOf(threeBackends); + + { // null Backends + address = SimpleScheduler.getHost(Long.valueOf(0), nullLocations, + nullBackends, ref); + Assert.assertNull(address); + } + { // empty Backends + address = SimpleScheduler.getHost(Long.valueOf(0), emptyLocations, + emptyBackends, ref); + Assert.assertNull(address); + } + { // normal Backends + + // BackendId exists + Assert.assertEquals(SimpleScheduler.getHost(0, emptyLocations, immutableThreeBackends, ref) + .hostname, "addressA"); + Assert.assertEquals(SimpleScheduler.getHost(2, emptyLocations, immutableThreeBackends, ref) + .hostname, "addressC"); + + // BacknedId not exists and location exists, choose the locations's first + Assert.assertEquals(SimpleScheduler.getHost(3, twoLocations, immutableThreeBackends, ref) + .hostname, "addressA"); + } + { // abnormal + // BackendId not exists and location not exists + Assert.assertNull(SimpleScheduler.getHost(3, emptyLocations, immutableThreeBackends, ref)); + } + + } + + // TODO(lingbin): PALO-2051. + // Comment out these code temporatily. + // @Test + public void testGetHostWithNoParams() { + FeConstants.heartbeat_interval_second = Integer.MAX_VALUE; + ImmutableMap nullBackends = null; + ImmutableMap emptyBackends = ImmutableMap.of(); + + Backend backendA = new Backend(0, "addressA", 0); + backendA.updateOnce(0, 0, 0); + Backend backendB = new Backend(1, "addressB", 0); + backendB.updateOnce(0, 0, 0); + Backend backendC = new Backend(2, "addressC", 0); + backendC.updateOnce(0, 0, 0); + Map threeBackends = Maps.newHashMap(); + threeBackends.put((long) 0, backendA); + threeBackends.put((long) 1, backendB); + threeBackends.put((long) 2, backendC); + ImmutableMap immutableThreeBackends = ImmutableMap.copyOf(threeBackends); + + { // abmormal + Assert.assertNull(SimpleScheduler.getHost(nullBackends, ref)); + Assert.assertNull(SimpleScheduler.getHost(emptyBackends, ref)); + } // normal + { + String a = SimpleScheduler.getHost(immutableThreeBackends, ref).hostname; + String b = SimpleScheduler.getHost(immutableThreeBackends, ref).hostname; + String c = SimpleScheduler.getHost(immutableThreeBackends, ref).hostname; + Assert.assertTrue(!a.equals(b) && !a.equals(c) && !b.equals(c)); + a = SimpleScheduler.getHost(immutableThreeBackends, ref).hostname; + b = SimpleScheduler.getHost(immutableThreeBackends, ref).hostname; + c = SimpleScheduler.getHost(immutableThreeBackends, ref).hostname; + Assert.assertTrue(!a.equals(b) && !a.equals(c) && !b.equals(c)); + } + } + + // TODO(lingbin): PALO-2051. + // Comment out these code temporatily. + // @Test + public void testBlackList() { + FeConstants.heartbeat_interval_second = Integer.MAX_VALUE; + TNetworkAddress address = null; + + Backend backendA = new Backend(0, "addressA", 0); + backendA.updateOnce(0, 0, 0); + Backend backendB = new Backend(1, "addressB", 0); + backendB.updateOnce(0, 0, 0); + Backend backendC = new Backend(2, "addressC", 0); + backendC.updateOnce(0, 0, 0); + Map threeBackends = Maps.newHashMap(); + threeBackends.put((long) 100, backendA); + threeBackends.put((long) 101, backendB); + threeBackends.put((long) 102, backendC); + ImmutableMap immutableThreeBackends = ImmutableMap.copyOf(threeBackends); + + SimpleScheduler.updateBlacklistBackends(Long.valueOf(100)); + SimpleScheduler.updateBlacklistBackends(Long.valueOf(101)); + address = SimpleScheduler.getHost(immutableThreeBackends, ref); + // only backendc can work + Assert.assertEquals(address.hostname, "addressC"); + SimpleScheduler.updateBlacklistBackends(Long.valueOf(102)); + // no backend can work + address = SimpleScheduler.getHost(immutableThreeBackends, ref); + Assert.assertNull(address); + } +} diff --git a/fe/test/com/baidu/palo/task/AgentTaskTest.java b/fe/test/com/baidu/palo/task/AgentTaskTest.java index d36e14831c..395205961d 100644 --- a/fe/test/com/baidu/palo/task/AgentTaskTest.java +++ b/fe/test/com/baidu/palo/task/AgentTaskTest.java @@ -17,8 +17,8 @@ // specific language governing permissions and limitations // under the License. -package com.baidu.palo.task; - +package com.baidu.palo.task; + import com.baidu.palo.catalog.AggregateType; import com.baidu.palo.catalog.Column; import com.baidu.palo.catalog.ColumnType; @@ -49,235 +49,235 @@ import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; -import java.util.Set; - -public class AgentTaskTest { - - private AgentBatchTask agentBatchTask; - - private long backendId1 = 1000L; - private long backendId2 = 1001L; - - private long dbId = 10000L; - private long tableId = 20000L; - private long partitionId = 20000L; - private long indexId1 = 30000L; - private long indexId2 = 30001L; - - private long tabletId1 = 40000L; - private long tabletId2 = 40001L; - - private long replicaId1 = 50000L; - private long replicaId2 = 50001L; - - private short shortKeyNum = (short) 2; - private int schemaHash1 = 60000; - private int schemaHash2 = 60001; - private long version = 1L; - private long versionHash = 70000L; - - private TStorageType storageType = TStorageType.COLUMN; - private List columns; - private MarkedCountDownLatch latch = new MarkedCountDownLatch(3); - - private Range range1; - private Range range2; - - private AgentTask createReplicaTask; - private AgentTask dropTask; - private AgentTask pushTask; - private AgentTask cloneTask; - private AgentTask rollupTask; - private AgentTask schemaChangeTask; - private AgentTask cancelDeleteTask; - - @Before - public void setUp() throws AnalysisException { - agentBatchTask = new AgentBatchTask(); - - columns = new LinkedList(); - columns.add(new Column("k1", new ColumnType(PrimitiveType.INT), false, null, "1", "")); - columns.add(new Column("v1", new ColumnType(PrimitiveType.INT), false, AggregateType.SUM, "1", "")); - - PartitionKey pk1 = PartitionKey.createInfinityPartitionKey(Arrays.asList(columns.get(0)), false); - PartitionKey pk2 = PartitionKey.createPartitionKey(Arrays.asList("10"), Arrays.asList(columns.get(0))); - range1 = Range.closedOpen(pk1, pk2); - - PartitionKey pk3 = PartitionKey.createInfinityPartitionKey(Arrays.asList(columns.get(0)), true); - range2 = Range.closedOpen(pk2, pk3); - - // create tasks - - // create - createReplicaTask = new CreateReplicaTask(backendId1, dbId, tableId, partitionId, - indexId1, tabletId1, shortKeyNum, schemaHash1, - version, versionHash, KeysType.AGG_KEYS, - storageType, TStorageMedium.SSD, - columns, null, 0, latch); - - // drop - dropTask = new DropReplicaTask(backendId1, tabletId1, schemaHash1); - - // push - pushTask = - new PushTask(null, backendId1, dbId, tableId, partitionId, indexId1, tabletId1, - replicaId1, schemaHash1, version, versionHash, "/home/a", 10L, 200, 80000L, - TPushType.LOAD, null, false, TPriority.NORMAL); - - // clone - cloneTask = - new CloneTask(backendId1, dbId, tableId, partitionId, indexId1, tabletId1, schemaHash1, - Arrays.asList(new TBackend("host1", 8290, 8390)), TStorageMedium.HDD, -1, -1); - - // rollup - rollupTask = - new CreateRollupTask(null, backendId1, dbId, tableId, partitionId, indexId2, indexId1, - tabletId2, tabletId1, replicaId2, shortKeyNum, schemaHash2, schemaHash1, - storageType, columns, null, 0, TKeysType.AGG_KEYS); - - // schemaChange - schemaChangeTask = - new SchemaChangeTask(null, backendId1, dbId, tableId, partitionId, indexId1, - tabletId1, replicaId1, columns, schemaHash2, schemaHash1, - shortKeyNum, storageType, null, 0, TKeysType.AGG_KEYS); - - // cancel delete - cancelDeleteTask = - new CancelDeleteTask(backendId1, dbId, tableId, partitionId, indexId1, tabletId1, - schemaHash1, version, versionHash); - } - - @Test - public void addTaskTest() { - // add null - agentBatchTask.addTask(null); - Assert.assertEquals(0, agentBatchTask.getTaskNum()); - - // normal - agentBatchTask.addTask(createReplicaTask); - Assert.assertEquals(1, agentBatchTask.getTaskNum()); - - agentBatchTask.addTask(rollupTask); - Assert.assertEquals(2, agentBatchTask.getTaskNum()); - - List allTasks = agentBatchTask.getAllTasks(); - Assert.assertEquals(2, allTasks.size()); - - for (AgentTask agentTask : allTasks) { - if (agentTask instanceof CreateReplicaTask) { - Assert.assertEquals(createReplicaTask, agentTask); - } else if (agentTask instanceof CreateRollupTask) { - Assert.assertEquals(rollupTask, agentTask); - } else { - Assert.fail(); - } - } - } - - @Test - public void toThriftTest() throws Exception { - Class agentBatchTaskClass = agentBatchTask.getClass(); - Class[] typeParams = new Class[] { AgentTask.class }; - Method toAgentTaskRequest = agentBatchTaskClass.getDeclaredMethod("toAgentTaskRequest", typeParams); - toAgentTaskRequest.setAccessible(true); - - // create - TAgentTaskRequest request = (TAgentTaskRequest) toAgentTaskRequest.invoke(agentBatchTask, createReplicaTask); - Assert.assertEquals(TTaskType.CREATE, request.getTask_type()); - Assert.assertEquals(createReplicaTask.getSignature(), request.getSignature()); - Assert.assertNotNull(request.getCreate_tablet_req()); - - // drop - TAgentTaskRequest request2 = (TAgentTaskRequest) toAgentTaskRequest.invoke(agentBatchTask, dropTask); - Assert.assertEquals(TTaskType.DROP, request2.getTask_type()); - Assert.assertEquals(dropTask.getSignature(), request2.getSignature()); - Assert.assertNotNull(request2.getDrop_tablet_req()); - - // push - TAgentTaskRequest request3 = (TAgentTaskRequest) toAgentTaskRequest.invoke(agentBatchTask, pushTask); - Assert.assertEquals(TTaskType.PUSH, request3.getTask_type()); - Assert.assertEquals(pushTask.getSignature(), request3.getSignature()); - Assert.assertNotNull(request3.getPush_req()); - - // clone - TAgentTaskRequest request4 = (TAgentTaskRequest) toAgentTaskRequest.invoke(agentBatchTask, cloneTask); - Assert.assertEquals(TTaskType.CLONE, request4.getTask_type()); - Assert.assertEquals(cloneTask.getSignature(), request4.getSignature()); - Assert.assertNotNull(request4.getClone_req()); - - // rollup - TAgentTaskRequest request5 = (TAgentTaskRequest) toAgentTaskRequest.invoke(agentBatchTask, rollupTask); - Assert.assertEquals(TTaskType.ROLLUP, request5.getTask_type()); - Assert.assertEquals(rollupTask.getSignature(), request5.getSignature()); - Assert.assertNotNull(request5.getAlter_tablet_req()); - - // schemaChange - TAgentTaskRequest request6 = (TAgentTaskRequest) toAgentTaskRequest.invoke(agentBatchTask, schemaChangeTask); - Assert.assertEquals(TTaskType.SCHEMA_CHANGE, request6.getTask_type()); - Assert.assertEquals(schemaChangeTask.getSignature(), request6.getSignature()); - Assert.assertNotNull(request6.getAlter_tablet_req()); - - // cancel delete - TAgentTaskRequest request9 = (TAgentTaskRequest) toAgentTaskRequest.invoke(agentBatchTask, cancelDeleteTask); - Assert.assertEquals(TTaskType.CANCEL_DELETE, request9.getTask_type()); - Assert.assertEquals(cancelDeleteTask.getSignature(), request9.getSignature()); - Assert.assertNotNull(request9.getCancel_delete_data_req()); - } - - @Test - public void agentTaskQueueTest() { - AgentTaskQueue.clearAllTasks(); - Assert.assertEquals(0, AgentTaskQueue.getTaskNum()); - - // add - AgentTaskQueue.addTask(createReplicaTask); - Assert.assertEquals(1, AgentTaskQueue.getTaskNum()); - Assert.assertFalse(AgentTaskQueue.addTask(createReplicaTask)); - - // get - AgentTask task = AgentTaskQueue.getTask(backendId1, TTaskType.CREATE, createReplicaTask.getSignature()); - Assert.assertEquals(createReplicaTask, task); - - // diff - AgentTaskQueue.addTask(rollupTask); - - Map> runningTasks = new HashMap>(); - List diffTasks = AgentTaskQueue.getDiffTasks(backendId1, runningTasks); - Assert.assertEquals(2, diffTasks.size()); - - Set set = new HashSet(); - set.add(createReplicaTask.getSignature()); - runningTasks.put(TTaskType.CREATE, set); - diffTasks = AgentTaskQueue.getDiffTasks(backendId1, runningTasks); - Assert.assertEquals(1, diffTasks.size()); - Assert.assertEquals(rollupTask, diffTasks.get(0)); - - // remove - AgentTaskQueue.removeTask(backendId1, TTaskType.CREATE, createReplicaTask.getSignature()); - Assert.assertEquals(1, AgentTaskQueue.getTaskNum()); - AgentTaskQueue.removeTask(backendId1, TTaskType.ROLLUP, rollupTask.getSignature()); - Assert.assertEquals(0, AgentTaskQueue.getTaskNum()); - } - - @Test - public void failedAgentTaskTest() { - AgentTaskQueue.clearAllTasks(); - - AgentTaskQueue.addTask(dropTask); - Assert.assertEquals(0, dropTask.getFailedTimes()); - dropTask.failed(); - Assert.assertEquals(1, dropTask.getFailedTimes()); - - Assert.assertEquals(1, AgentTaskQueue.getTaskNum()); - Assert.assertEquals(1, AgentTaskQueue.getTaskNum(backendId1, TTaskType.DROP, false)); - Assert.assertEquals(1, AgentTaskQueue.getTaskNum(-1, TTaskType.DROP, false)); - Assert.assertEquals(1, AgentTaskQueue.getTaskNum(backendId1, TTaskType.DROP, true)); - - dropTask.failed(); - DropReplicaTask dropTask2 = new DropReplicaTask(backendId2, tabletId1, schemaHash1); - AgentTaskQueue.addTask(dropTask2); - dropTask2.failed(); - Assert.assertEquals(1, AgentTaskQueue.getTaskNum(backendId1, TTaskType.DROP, true)); - Assert.assertEquals(2, AgentTaskQueue.getTaskNum(-1, TTaskType.DROP, true)); - } -} +import java.util.Set; + +public class AgentTaskTest { + + private AgentBatchTask agentBatchTask; + + private long backendId1 = 1000L; + private long backendId2 = 1001L; + + private long dbId = 10000L; + private long tableId = 20000L; + private long partitionId = 20000L; + private long indexId1 = 30000L; + private long indexId2 = 30001L; + + private long tabletId1 = 40000L; + private long tabletId2 = 40001L; + + private long replicaId1 = 50000L; + private long replicaId2 = 50001L; + + private short shortKeyNum = (short) 2; + private int schemaHash1 = 60000; + private int schemaHash2 = 60001; + private long version = 1L; + private long versionHash = 70000L; + + private TStorageType storageType = TStorageType.COLUMN; + private List columns; + private MarkedCountDownLatch latch = new MarkedCountDownLatch(3); + + private Range range1; + private Range range2; + + private AgentTask createReplicaTask; + private AgentTask dropTask; + private AgentTask pushTask; + private AgentTask cloneTask; + private AgentTask rollupTask; + private AgentTask schemaChangeTask; + private AgentTask cancelDeleteTask; + + @Before + public void setUp() throws AnalysisException { + agentBatchTask = new AgentBatchTask(); + + columns = new LinkedList(); + columns.add(new Column("k1", new ColumnType(PrimitiveType.INT), false, null, "1", "")); + columns.add(new Column("v1", new ColumnType(PrimitiveType.INT), false, AggregateType.SUM, "1", "")); + + PartitionKey pk1 = PartitionKey.createInfinityPartitionKey(Arrays.asList(columns.get(0)), false); + PartitionKey pk2 = PartitionKey.createPartitionKey(Arrays.asList("10"), Arrays.asList(columns.get(0))); + range1 = Range.closedOpen(pk1, pk2); + + PartitionKey pk3 = PartitionKey.createInfinityPartitionKey(Arrays.asList(columns.get(0)), true); + range2 = Range.closedOpen(pk2, pk3); + + // create tasks + + // create + createReplicaTask = new CreateReplicaTask(backendId1, dbId, tableId, partitionId, + indexId1, tabletId1, shortKeyNum, schemaHash1, + version, versionHash, KeysType.AGG_KEYS, + storageType, TStorageMedium.SSD, + columns, null, 0, latch); + + // drop + dropTask = new DropReplicaTask(backendId1, tabletId1, schemaHash1); + + // push + pushTask = + new PushTask(null, backendId1, dbId, tableId, partitionId, indexId1, tabletId1, + replicaId1, schemaHash1, version, versionHash, "/home/a", 10L, 200, 80000L, + TPushType.LOAD, null, false, TPriority.NORMAL); + + // clone + cloneTask = + new CloneTask(backendId1, dbId, tableId, partitionId, indexId1, tabletId1, schemaHash1, + Arrays.asList(new TBackend("host1", 8290, 8390)), TStorageMedium.HDD, -1, -1); + + // rollup + rollupTask = + new CreateRollupTask(null, backendId1, dbId, tableId, partitionId, indexId2, indexId1, + tabletId2, tabletId1, replicaId2, shortKeyNum, schemaHash2, schemaHash1, + storageType, columns, null, 0, TKeysType.AGG_KEYS); + + // schemaChange + schemaChangeTask = + new SchemaChangeTask(null, backendId1, dbId, tableId, partitionId, indexId1, + tabletId1, replicaId1, columns, schemaHash2, schemaHash1, + shortKeyNum, storageType, null, 0, TKeysType.AGG_KEYS); + + // cancel delete + cancelDeleteTask = + new CancelDeleteTask(backendId1, dbId, tableId, partitionId, indexId1, tabletId1, + schemaHash1, version, versionHash); + } + + @Test + public void addTaskTest() { + // add null + agentBatchTask.addTask(null); + Assert.assertEquals(0, agentBatchTask.getTaskNum()); + + // normal + agentBatchTask.addTask(createReplicaTask); + Assert.assertEquals(1, agentBatchTask.getTaskNum()); + + agentBatchTask.addTask(rollupTask); + Assert.assertEquals(2, agentBatchTask.getTaskNum()); + + List allTasks = agentBatchTask.getAllTasks(); + Assert.assertEquals(2, allTasks.size()); + + for (AgentTask agentTask : allTasks) { + if (agentTask instanceof CreateReplicaTask) { + Assert.assertEquals(createReplicaTask, agentTask); + } else if (agentTask instanceof CreateRollupTask) { + Assert.assertEquals(rollupTask, agentTask); + } else { + Assert.fail(); + } + } + } + + @Test + public void toThriftTest() throws Exception { + Class agentBatchTaskClass = agentBatchTask.getClass(); + Class[] typeParams = new Class[] { AgentTask.class }; + Method toAgentTaskRequest = agentBatchTaskClass.getDeclaredMethod("toAgentTaskRequest", typeParams); + toAgentTaskRequest.setAccessible(true); + + // create + TAgentTaskRequest request = (TAgentTaskRequest) toAgentTaskRequest.invoke(agentBatchTask, createReplicaTask); + Assert.assertEquals(TTaskType.CREATE, request.getTask_type()); + Assert.assertEquals(createReplicaTask.getSignature(), request.getSignature()); + Assert.assertNotNull(request.getCreate_tablet_req()); + + // drop + TAgentTaskRequest request2 = (TAgentTaskRequest) toAgentTaskRequest.invoke(agentBatchTask, dropTask); + Assert.assertEquals(TTaskType.DROP, request2.getTask_type()); + Assert.assertEquals(dropTask.getSignature(), request2.getSignature()); + Assert.assertNotNull(request2.getDrop_tablet_req()); + + // push + TAgentTaskRequest request3 = (TAgentTaskRequest) toAgentTaskRequest.invoke(agentBatchTask, pushTask); + Assert.assertEquals(TTaskType.PUSH, request3.getTask_type()); + Assert.assertEquals(pushTask.getSignature(), request3.getSignature()); + Assert.assertNotNull(request3.getPush_req()); + + // clone + TAgentTaskRequest request4 = (TAgentTaskRequest) toAgentTaskRequest.invoke(agentBatchTask, cloneTask); + Assert.assertEquals(TTaskType.CLONE, request4.getTask_type()); + Assert.assertEquals(cloneTask.getSignature(), request4.getSignature()); + Assert.assertNotNull(request4.getClone_req()); + + // rollup + TAgentTaskRequest request5 = (TAgentTaskRequest) toAgentTaskRequest.invoke(agentBatchTask, rollupTask); + Assert.assertEquals(TTaskType.ROLLUP, request5.getTask_type()); + Assert.assertEquals(rollupTask.getSignature(), request5.getSignature()); + Assert.assertNotNull(request5.getAlter_tablet_req()); + + // schemaChange + TAgentTaskRequest request6 = (TAgentTaskRequest) toAgentTaskRequest.invoke(agentBatchTask, schemaChangeTask); + Assert.assertEquals(TTaskType.SCHEMA_CHANGE, request6.getTask_type()); + Assert.assertEquals(schemaChangeTask.getSignature(), request6.getSignature()); + Assert.assertNotNull(request6.getAlter_tablet_req()); + + // cancel delete + TAgentTaskRequest request9 = (TAgentTaskRequest) toAgentTaskRequest.invoke(agentBatchTask, cancelDeleteTask); + Assert.assertEquals(TTaskType.CANCEL_DELETE, request9.getTask_type()); + Assert.assertEquals(cancelDeleteTask.getSignature(), request9.getSignature()); + Assert.assertNotNull(request9.getCancel_delete_data_req()); + } + + @Test + public void agentTaskQueueTest() { + AgentTaskQueue.clearAllTasks(); + Assert.assertEquals(0, AgentTaskQueue.getTaskNum()); + + // add + AgentTaskQueue.addTask(createReplicaTask); + Assert.assertEquals(1, AgentTaskQueue.getTaskNum()); + Assert.assertFalse(AgentTaskQueue.addTask(createReplicaTask)); + + // get + AgentTask task = AgentTaskQueue.getTask(backendId1, TTaskType.CREATE, createReplicaTask.getSignature()); + Assert.assertEquals(createReplicaTask, task); + + // diff + AgentTaskQueue.addTask(rollupTask); + + Map> runningTasks = new HashMap>(); + List diffTasks = AgentTaskQueue.getDiffTasks(backendId1, runningTasks); + Assert.assertEquals(2, diffTasks.size()); + + Set set = new HashSet(); + set.add(createReplicaTask.getSignature()); + runningTasks.put(TTaskType.CREATE, set); + diffTasks = AgentTaskQueue.getDiffTasks(backendId1, runningTasks); + Assert.assertEquals(1, diffTasks.size()); + Assert.assertEquals(rollupTask, diffTasks.get(0)); + + // remove + AgentTaskQueue.removeTask(backendId1, TTaskType.CREATE, createReplicaTask.getSignature()); + Assert.assertEquals(1, AgentTaskQueue.getTaskNum()); + AgentTaskQueue.removeTask(backendId1, TTaskType.ROLLUP, rollupTask.getSignature()); + Assert.assertEquals(0, AgentTaskQueue.getTaskNum()); + } + + @Test + public void failedAgentTaskTest() { + AgentTaskQueue.clearAllTasks(); + + AgentTaskQueue.addTask(dropTask); + Assert.assertEquals(0, dropTask.getFailedTimes()); + dropTask.failed(); + Assert.assertEquals(1, dropTask.getFailedTimes()); + + Assert.assertEquals(1, AgentTaskQueue.getTaskNum()); + Assert.assertEquals(1, AgentTaskQueue.getTaskNum(backendId1, TTaskType.DROP, false)); + Assert.assertEquals(1, AgentTaskQueue.getTaskNum(-1, TTaskType.DROP, false)); + Assert.assertEquals(1, AgentTaskQueue.getTaskNum(backendId1, TTaskType.DROP, true)); + + dropTask.failed(); + DropReplicaTask dropTask2 = new DropReplicaTask(backendId2, tabletId1, schemaHash1); + AgentTaskQueue.addTask(dropTask2); + dropTask2.failed(); + Assert.assertEquals(1, AgentTaskQueue.getTaskNum(backendId1, TTaskType.DROP, true)); + Assert.assertEquals(2, AgentTaskQueue.getTaskNum(-1, TTaskType.DROP, true)); + } +} diff --git a/fs_brokers/apache_hdfs_broker/build.sh b/fs_brokers/apache_hdfs_broker/build.sh index 732117362f..354cf00375 100755 --- a/fs_brokers/apache_hdfs_broker/build.sh +++ b/fs_brokers/apache_hdfs_broker/build.sh @@ -21,8 +21,8 @@ ROOT=`cd "$ROOT"; pwd` # check java version if [ -z $JAVA_HOME ]; then - echo "Error: JAVA_HOME is not set." - exit 1 + echo "Error: JAVA_HOME is not set, use thirdparty/installed/jdk1.8.0_131" + export JAVA_HOME=${ROOT}/../../thirdparty/installed/jdk1.8.0_131 fi JAVA=${JAVA_HOME}/bin/java JAVA_VER=$(${JAVA} -version 2>&1 | sed 's/.* version "\(.*\)\.\(.*\)\..*"/\1\2/; 1q' | cut -f1 -d " ") diff --git a/fs_brokers/apache_hdfs_broker/deps/build.sh b/fs_brokers/apache_hdfs_broker/deps/build.sh index 3f0d4889fa..010238c40f 100755 --- a/fs_brokers/apache_hdfs_broker/deps/build.sh +++ b/fs_brokers/apache_hdfs_broker/deps/build.sh @@ -35,7 +35,7 @@ fi if [ ! -f bin/thrift ];then echo "thrift is not found." - echo "You need to copy thrift binary file from 'thirdparty/installed/bin/thrift' to $CURDIR/bin/" + echo "You need to copy thrift binary file from '$CURDIR/../../../thirdparty/installed/bin/thrift' to $CURDIR/bin/" exit 1 fi diff --git a/fs_brokers/apache_hdfs_broker/src/com/baidu/palo/broker/hdfs/ClientContextManager.java b/fs_brokers/apache_hdfs_broker/src/com/baidu/palo/broker/hdfs/ClientContextManager.java index eb84f53ca2..049cd6d326 100644 --- a/fs_brokers/apache_hdfs_broker/src/com/baidu/palo/broker/hdfs/ClientContextManager.java +++ b/fs_brokers/apache_hdfs_broker/src/com/baidu/palo/broker/hdfs/ClientContextManager.java @@ -93,7 +93,7 @@ public class ClientContextManager { } public synchronized void removeInputStream(TBrokerFD fd) { - String clientId = fdToClientMap.get(fd); + String clientId = fdToClientMap.remove(fd); if (clientId == null) { return; } @@ -109,7 +109,7 @@ public class ClientContextManager { } public synchronized void removeOutputStream(TBrokerFD fd) { - String clientId = fdToClientMap.get(fd); + String clientId = fdToClientMap.remove(fd); if (clientId == null) { return; } diff --git a/gensrc/script/gen_build_version.sh b/gensrc/script/gen_build_version.sh index 5506c1e219..06b02af669 100755 --- a/gensrc/script/gen_build_version.sh +++ b/gensrc/script/gen_build_version.sh @@ -26,7 +26,7 @@ # contains the build version based on the git hash or svn revision. ############################################################## -build_version="3.3-branch" +build_version="3.4-branch" unset LANG unset LC_CTYPE diff --git a/gensrc/thrift/PaloInternalService.thrift b/gensrc/thrift/PaloInternalService.thrift index 9db4c31006..3104a81207 100644 --- a/gensrc/thrift/PaloInternalService.thrift +++ b/gensrc/thrift/PaloInternalService.thrift @@ -303,47 +303,12 @@ struct TFetchDataResult { 4: optional Status.TStatus status } -struct TFetchStartKey { - 1: required list key -} - -struct TFetchEndKey { - 1: required list key -} - struct TCondition { 1: required string column_name 2: required string condition_op 3: required list condition_values } -struct TFetchRequest { - 1: required bool use_compression - 2: optional i32 num_rows - 3: required i32 schema_hash - 4: required Types.TTabletId tablet_id - 5: required i32 version - 6: required i64 version_hash - 7: required list field - 8: optional string user - 9: optional string output - 10: optional string range - 11: required list start_key - 12: required list end_key - 13: required list where - 14: optional string end_range - 15: optional bool aggregation -} - -struct TShowHintsRequest { - 1: required Types.TTabletId tablet_id - 2: required i32 schema_hash - 3: required i32 block_row_count - 4: optional string end_range = "lt" - 5: required list start_key - 6: required list end_key -} - struct TExportStatusResult { 1: required Status.TStatus status 2: required Types.TExportState state