diff --git a/be/src/runtime/mem_pool.h b/be/src/runtime/mem_pool.h index 04d6236831..f51079b204 100644 --- a/be/src/runtime/mem_pool.h +++ b/be/src/runtime/mem_pool.h @@ -163,6 +163,12 @@ public: static constexpr int DEFAULT_ALIGNMENT = 8; +#if defined(__SANITIZE_ADDRESS__) || defined(ADDRESS_SANITIZER) + static constexpr int DEFAULT_PADDING_SIZE = 0x10; +#else + static constexpr int DEFAULT_PADDING_SIZE = 0x0; +#endif + private: friend class MemPoolTest; static const int INITIAL_CHUNK_SIZE = 4 * 1024; @@ -203,26 +209,48 @@ private: return chunks_[current_chunk_idx_].allocated_bytes; } + uint8_t * allocate_from_current_chunk(int64_t size, int alignment) { + // Manually ASAN poisoning is complicated and it is hard to make + // it work right. There are illustrated examples in + // http://blog.hostilefork.com/poison-memory-without-asan/. + // + // Stacks of use after poison do not provide enough information + // to resolve bug, while stacks of use afer free provide. + // https://github.com/google/sanitizers/issues/191 + // + // We'd better implement a mempool using malloc/free directly, + // thus asan works natively. However we cannot do it in a short + // time, so we make manual poisoning work as much as possible. + // I refers to https://github.com/mcgov/asan_alignment_example. + + ChunkInfo& info = chunks_[current_chunk_idx_]; + int64_t aligned_allocated_bytes = + BitUtil::RoundUpToPowerOf2(info.allocated_bytes + DEFAULT_PADDING_SIZE, alignment); + if (aligned_allocated_bytes + size <= info.chunk.size) { + // Ensure the requested alignment is respected. + int64_t padding = aligned_allocated_bytes - info.allocated_bytes; + uint8_t* result = info.chunk.data + aligned_allocated_bytes; + ASAN_UNPOISON_MEMORY_REGION(result, size); + DCHECK_LE(info.allocated_bytes + size, info.chunk.size); + info.allocated_bytes += padding + size; + total_allocated_bytes_ += padding + size; + peak_allocated_bytes_ = std::max(total_allocated_bytes_, peak_allocated_bytes_); + DCHECK_LE(current_chunk_idx_, chunks_.size() - 1); + return result; + } + return nullptr; + } + template uint8_t* ALWAYS_INLINE allocate(int64_t size, int alignment) { DCHECK_GE(size, 0); if (UNLIKELY(size == 0)) return reinterpret_cast(&k_zero_length_region_); if (current_chunk_idx_ != -1) { - ChunkInfo& info = chunks_[current_chunk_idx_]; - int64_t aligned_allocated_bytes = - BitUtil::RoundUpToPowerOf2(info.allocated_bytes, alignment); - if (aligned_allocated_bytes + size <= info.chunk.size) { - // Ensure the requested alignment is respected. - int64_t padding = aligned_allocated_bytes - info.allocated_bytes; - uint8_t* result = info.chunk.data + aligned_allocated_bytes; - ASAN_UNPOISON_MEMORY_REGION(result, size); - DCHECK_LE(info.allocated_bytes + size, info.chunk.size); - info.allocated_bytes += padding + size; - total_allocated_bytes_ += padding + size; - DCHECK_LE(current_chunk_idx_, chunks_.size() - 1); + uint8_t* result = allocate_from_current_chunk(size, alignment); + if (result != nullptr) { return result; - } + } } // If we couldn't allocate a new chunk, return nullptr. malloc() guarantees alignment @@ -230,16 +258,11 @@ private: // guarantee alignment. //static_assert( //INITIAL_CHUNK_SIZE >= config::FLAGS_MEMORY_MAX_ALIGNMENT, "Min chunk size too low"); - if (UNLIKELY(!find_chunk(size, CHECK_LIMIT_FIRST))) return nullptr; + if (UNLIKELY(!find_chunk(size, CHECK_LIMIT_FIRST))) { + return nullptr; + } - ChunkInfo& info = chunks_[current_chunk_idx_]; - uint8_t* result = info.chunk.data + info.allocated_bytes; - ASAN_UNPOISON_MEMORY_REGION(result, size); - DCHECK_LE(info.allocated_bytes + size, info.chunk.size); - info.allocated_bytes += size; - total_allocated_bytes_ += size; - DCHECK_LE(current_chunk_idx_, chunks_.size() - 1); - peak_allocated_bytes_ = std::max(total_allocated_bytes_, peak_allocated_bytes_); + uint8_t* result = allocate_from_current_chunk(size, alignment); return result; }