Ensure proper alignment of tuples in HashMemoryChunkData buffers

pg commitid  5dc692f78d3
This commit is contained in:
wuyuechuan
2020-11-28 09:08:55 +08:00
parent 91064e7f9d
commit deec0bb6d6
2 changed files with 23 additions and 21 deletions

View File

@ -1126,7 +1126,7 @@ static void ExecHashIncreaseNumBatches(HashJoinTable hashtable)
/* process all tuples stored in this chunk (and then free it) */
while (idx < oldchunks->used) {
HashJoinTuple hashTuple = (HashJoinTuple)(oldchunks->data + idx);
HashJoinTuple hashTuple = (HashJoinTuple)(HASH_CHUNK_DATA(oldchunks) + idx);
MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
int hashTupleSize = (HJTUPLE_OVERHEAD + tuple->t_len);
int bucketno;
@ -1402,7 +1402,7 @@ static void ExecParallelHashRepartitionFirst(HashJoinTable hashtable)
/* Repartition all tuples in this chunk. */
while (idx < chunk->used) {
HashJoinTuple hashTuple = (HashJoinTuple)(chunk->data + idx);
HashJoinTuple hashTuple = (HashJoinTuple)(HASH_CHUNK_DATA(chunk) + idx);
MinimalTuple tuple = HJTUPLE_MINTUPLE(hashTuple);
HashJoinTuple copyTuple;
HashJoinTuple shared;
@ -1571,7 +1571,7 @@ static void ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
while ((chunk = ExecParallelHashPopChunkQueue(hashtable, &chunk_s))) {
size_t idx = 0;
while (idx < chunk->used) {
HashJoinTuple hashTuple = (HashJoinTuple)(chunk->data + idx);
HashJoinTuple hashTuple = (HashJoinTuple)(HASH_CHUNK_DATA(chunk) + idx);
int bucketno;
int batchno;
@ -2714,10 +2714,10 @@ static void* dense_alloc(HashJoinTable hashtable, Size size)
*/
if (size > HASH_CHUNK_THRESHOLD) {
/* allocate new chunk and put it at the beginning of the list */
newChunk = (HashMemoryChunk)MemoryContextAlloc(hashtable->batchCxt, offsetof(HashMemoryChunkData, data) + size);
newChunk = (HashMemoryChunk)MemoryContextAlloc(hashtable->batchCxt, HASH_CHUNK_HEADER_SIZE + size);
newChunk->maxlen = size;
newChunk->used = 0;
newChunk->ntuples = 0;
newChunk->used = size;
newChunk->ntuples = 1;
/*
* Add this chunk to the list after the first existing chunk, so that
@ -2731,10 +2731,7 @@ static void* dense_alloc(HashJoinTable hashtable, Size size)
hashtable->chunks = newChunk;
}
newChunk->used += size;
newChunk->ntuples += 1;
return newChunk->data;
return HASH_CHUNK_DATA(newChunk);
}
/*
@ -2743,8 +2740,7 @@ static void* dense_alloc(HashJoinTable hashtable, Size size)
*/
if ((hashtable->chunks == NULL) || (hashtable->chunks->maxlen - hashtable->chunks->used) < size) {
/* allocate new chunk and put it at the beginning of the list */
newChunk = (HashMemoryChunk)MemoryContextAlloc(
hashtable->batchCxt, offsetof(HashMemoryChunkData, data) + HASH_CHUNK_SIZE);
newChunk = (HashMemoryChunk)MemoryContextAlloc(hashtable->batchCxt, HASH_CHUNK_HEADER_SIZE + HASH_CHUNK_SIZE);
newChunk->maxlen = HASH_CHUNK_SIZE;
newChunk->used = size;
@ -2753,11 +2749,11 @@ static void* dense_alloc(HashJoinTable hashtable, Size size)
newChunk->next.unshared = hashtable->chunks;
hashtable->chunks = newChunk;
return newChunk->data;
return HASH_CHUNK_DATA(newChunk);
}
/* There is enough space in the current chunk, let's add the tuple */
ptr = hashtable->chunks->data + hashtable->chunks->used;
ptr = HASH_CHUNK_DATA(hashtable->chunks) + hashtable->chunks->used;
hashtable->chunks->used += size;
hashtable->chunks->ntuples += 1;
@ -2794,7 +2790,7 @@ static HashJoinTuple ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t
if (chunk != NULL && size < HASH_CHUNK_THRESHOLD && chunk->maxlen - chunk->used >= size) {
chunk_shared = (HashMemoryChunk)hashtable->current_chunk_shared;
Assert(chunk == dsa_get_address(hashtable->area, chunk_shared));
HashJoinTuple result = (HashJoinTuple)(chunk->data + chunk->used);
HashJoinTuple result = (HashJoinTuple)(HASH_CHUNK_DATA(chunk) + chunk->used);
*shared = result;
chunk->used += size;
Assert(chunk->used <= chunk->maxlen);
@ -2873,7 +2869,7 @@ static HashJoinTuple ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t
chunk_shared = chunk_shared;
chunk_shared->maxlen = chunk_size - HASH_CHUNK_HEADER_SIZE;
chunk_shared->used = size;
*shared = (HashJoinTuple)chunk_shared->data;
*shared = (HashJoinTuple)HASH_CHUNK_DATA(chunk_shared);
/*
* Push it onto the list of chunks, so that it can be found if we need to
@ -2892,8 +2888,8 @@ static HashJoinTuple ExecParallelHashTupleAlloc(HashJoinTable hashtable, size_t
hashtable->current_chunk_shared = chunk_shared;
}
LWLockRelease(&pstate->lock);
Assert(chunk_shared->data == (void*)dsa_get_address(hashtable->area, *shared));
return (HashJoinTuple)chunk_shared->data;
Assert(HASH_CHUNK_DATA(chunk_shared) == (void*)dsa_get_address(hashtable->area, *shared));
return (HashJoinTuple)HASH_CHUNK_DATA(chunk_shared);
}
/*

View File

@ -114,7 +114,7 @@ typedef struct HashSkewBucket {
*/
typedef struct HashMemoryChunkData {
int ntuples; /* number of tuples stored in this chunk */
size_t maxlen; /* size of the buffer holding the tuples */
size_t maxlen; /* size of the chunk's tuple buffer */
size_t used; /* number of buffer bytes already used */
/* pointer to the next chunk (linked list) */
@ -122,13 +122,19 @@ typedef struct HashMemoryChunkData {
HashMemoryChunkData* unshared;
HashMemoryChunkData* shared;
} next;
char data[FLEXIBLE_ARRAY_MEMBER]; /* buffer allocated at the end */
/*
* The chunk's tuple buffer starts after the HashMemoryChunkData struct,
* at offset HASH_CHUNK_HEADER_SIZE (which must be maxaligned). Note that
* that offset is not included in "maxlen" or "used".
*/
} HashMemoryChunkData;
typedef struct HashMemoryChunkData* HashMemoryChunk;
#define HASH_CHUNK_SIZE (32 * 1024L)
#define HASH_CHUNK_HEADER_SIZE (offsetof(HashMemoryChunkData, data))
#define HASH_CHUNK_HEADER_SIZE MAXALIGN(sizeof(HashMemoryChunkData))
#define HASH_CHUNK_DATA(hc) (((char *) (hc)) + HASH_CHUNK_HEADER_SIZE)
/* tuples exceeding HASH_CHUNK_THRESHOLD bytes are put in their own chunk */
#define HASH_CHUNK_THRESHOLD (HASH_CHUNK_SIZE / 4)
/*