解决段式存储上创建hash索引coredump的问题

This commit is contained in:
z00848344
2023-08-23 15:00:37 +08:00
parent ab6e1c5616
commit ea1c30640e
3 changed files with 106 additions and 24 deletions

View File

@ -969,39 +969,46 @@ static bool _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nbl
if (lastblock < firstblock || lastblock == InvalidBlockNumber) if (lastblock < firstblock || lastblock == InvalidBlockNumber)
return false; return false;
if (IsSegmentFileNode(rel->rd_node)) { /* change segment table insert hash table */
Buffer buf = ReadBuffer(rel, P_NEW); page = (Page)zerobuf;
#ifdef USE_ASSERT_CHECKING /*
BufferDesc *buf_desc = GetBufferDescriptor(buf - 1); * Initialize the page. Just zeroing the page won't work; see
Assert(buf_desc->tag.blockNum == lastblock); * _hash_freeovflpage for similar usage. We take care to make the special
#endif * space valid for the benefit of tools such as pageinspect.
ReleaseBuffer(buf); */
} else { _hash_pageinit(page, BLCKSZ);
page = (Page)zerobuf;
/* ovflopaque = (HashPageOpaque) PageGetSpecialPointer(page);
* Initialize the page. Just zeroing the page won't work; see
* _hash_freeovflpage for similar usage. We take care to make the special
* space valid for the benefit of tools such as pageinspect.
*/
_hash_pageinit(page, BLCKSZ);
ovflopaque = (HashPageOpaque) PageGetSpecialPointer(page); ovflopaque->hasho_prevblkno = InvalidBlockNumber;
ovflopaque->hasho_nextblkno = InvalidBlockNumber;
ovflopaque->hasho_bucket = -1;
ovflopaque->hasho_flag = LH_UNUSED_PAGE;
ovflopaque->hasho_page_id = HASHO_PAGE_ID;
PageSetChecksumInplace(zerobuf, lastblock);
ovflopaque->hasho_prevblkno = InvalidBlockNumber; if (RelationNeedsWAL(rel))
ovflopaque->hasho_nextblkno = InvalidBlockNumber;
ovflopaque->hasho_bucket = -1;
ovflopaque->hasho_flag = LH_UNUSED_PAGE;
ovflopaque->hasho_page_id = HASHO_PAGE_ID;
if (RelationNeedsWAL(rel))
log_newpage(&rel->rd_node, log_newpage(&rel->rd_node,
MAIN_FORKNUM, MAIN_FORKNUM,
lastblock, lastblock,
zerobuf, zerobuf,
true); true);
if (IsSegmentFileNode(rel->rd_node)) {
Buffer buf;
for (int i = firstblock; i <= lastblock; i++) {
buf = ReadBuffer(rel, P_NEW);
ReleaseBuffer(buf);
}
buf = ReadBuffer(rel, lastblock);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
errno_t rel = memcpy_s(BufferGetPage(buf), BLCKSZ, page, BLCKSZ);
securec_check(rel, "", "");
MarkBufferDirty(buf);
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buf);
} else {
RelationOpenSmgr(rel); RelationOpenSmgr(rel);
PageSetChecksumInplace(zerobuf, lastblock);
smgrextend(rel->rd_smgr, MAIN_FORKNUM, lastblock, zerobuf, false); smgrextend(rel->rd_smgr, MAIN_FORKNUM, lastblock, zerobuf, false);
} }

View File

@ -240,3 +240,53 @@ select count(*) from hash_table_7;
1000 1000
(1 row) (1 row)
-- create hash index on segment table, update/delete
drop table if exists hash_table_8;
NOTICE: table "hash_table_8" does not exist, skipping
create table hash_table_8(id int, name varchar, sex varchar default 'male') with(segment = on);
create index hash_tb8_id1 on hash_table_8 using hash(id);
insert into hash_table_8 select generate_series(1, 1000), 'xxx', 'xxx';
select count(*) from hash_table_8;
count
-------
1000
(1 row)
update hash_table_8 set sex = tmp.sex from (values (10, 'xxx', 'female'), (20, 'xxx', 'female'), (30, 'xxx', 'female')) as tmp (id, name, sex) where hash_table_8.id = tmp.id;
select * from hash_table_8 where sex = 'female';
id | name | sex
----+------+--------
10 | xxx | female
20 | xxx | female
30 | xxx | female
(3 rows)
delete from hash_table_8 where sex = 'female';
select * from hash_table_8 where sex = 'female';
id | name | sex
----+------+-----
(0 rows)
drop index hash_tb8_id1;
drop table hash_table_8;
-- create hash index on segment table, delete/vacuum
drop table if exists hash_table_9;
NOTICE: table "hash_table_9" does not exist, skipping
create table hash_table_9(id int, name varchar, sec varchar default 'male') with (segment = on);
create index hash_tb9_id1 on hash_table_9 using hash(id);
insert into hash_table_9 select generate_series(1, 1000), 'XXX', 'XXX';
insert into hash_table_9 select generate_series(1, 200), 'AAA', 'AAA';
select count(*) from hash_table_9 where name = 'AAA';
count
-------
200
(1 row)
delete from hash_table_9 where name = 'AAA';
select * from hash_table_9 where name = 'AAA';
id | name | sec
----+------+-----
(0 rows)
drop index hash_tb9_id1;
drop table hash_table_9;

View File

@ -176,3 +176,28 @@ insert into hash_table_7 select random()*100, 'XXX', 'XXX' from generate_series(
create index hash_t7_id1 on hash_table_7 using hash(id) with (fillfactor = 30); create index hash_t7_id1 on hash_table_7 using hash(id) with (fillfactor = 30);
explain (costs off) select * from hash_table_7 where id = 80; explain (costs off) select * from hash_table_7 where id = 80;
select count(*) from hash_table_7; select count(*) from hash_table_7;
-- create hash index on segment table, update/delete
drop table if exists hash_table_8;
create table hash_table_8(id int, name varchar, sex varchar default 'male') with(segment = on);
create index hash_tb8_id1 on hash_table_8 using hash(id);
insert into hash_table_8 select generate_series(1, 1000), 'xxx', 'xxx';
select count(*) from hash_table_8;
update hash_table_8 set sex = tmp.sex from (values (10, 'xxx', 'female'), (20, 'xxx', 'female'), (30, 'xxx', 'female')) as tmp (id, name, sex) where hash_table_8.id = tmp.id;
select * from hash_table_8 where sex = 'female';
delete from hash_table_8 where sex = 'female';
select * from hash_table_8 where sex = 'female';
drop index hash_tb8_id1;
drop table hash_table_8;
-- create hash index on segment table, delete/vacuum
drop table if exists hash_table_9;
create table hash_table_9(id int, name varchar, sec varchar default 'male') with (segment = on);
create index hash_tb9_id1 on hash_table_9 using hash(id);
insert into hash_table_9 select generate_series(1, 1000), 'XXX', 'XXX';
insert into hash_table_9 select generate_series(1, 200), 'AAA', 'AAA';
select count(*) from hash_table_9 where name = 'AAA';
delete from hash_table_9 where name = 'AAA';
select * from hash_table_9 where name = 'AAA';
drop index hash_tb9_id1;
drop table hash_table_9;