1 /*-------------------------------------------------------------------------
4 * Hash table page management code for the Postgres hash access method
6 * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
14 * Postgres hash pages look like ordinary relation pages. The opaque
15 * data at high addresses includes information about the page including
16 * whether a page is an overflow page or a true bucket, the bucket
17 * number, and the block numbers of the preceding and following pages
20 * The first page in a hash relation, page zero, is special -- it stores
21 * information describing the hash table; it is referred to as the
22 * "meta page." Pages one and higher store the actual data.
24 * There are also bitmap pages, which are not manipulated here;
27 *-------------------------------------------------------------------------
31 #include "access/genam.h"
32 #include "access/hash.h"
33 #include "miscadmin.h"
34 #include "storage/bufmgr.h"
35 #include "storage/lmgr.h"
36 #include "storage/smgr.h"
37 #include "utils/lsyscache.h"
40 static bool _hash_alloc_buckets(Relation rel
, BlockNumber firstblock
,
42 static void _hash_splitbucket(Relation rel
, Buffer metabuf
,
43 Bucket obucket
, Bucket nbucket
,
44 BlockNumber start_oblkno
,
45 BlockNumber start_nblkno
,
47 uint32 highmask
, uint32 lowmask
);
51 * We use high-concurrency locking on hash indexes (see README for an overview
52 * of the locking rules). However, we can skip taking lmgr locks when the
53 * index is local to the current backend (ie, either temp or new in the
54 * current transaction). No one else can see it, so there's no reason to
55 * take locks. We still take buffer-level locks, but not lmgr locks.
57 #define USELOCKING(rel) (!RELATION_IS_LOCAL(rel))
61 * _hash_getlock() -- Acquire an lmgr lock.
63 * 'whichlock' should be zero to acquire the split-control lock, or the
64 * block number of a bucket's primary bucket page to acquire the per-bucket
65 * lock. (See README for details of the use of these locks.)
67 * 'access' must be HASH_SHARE or HASH_EXCLUSIVE.
70 _hash_getlock(Relation rel
, BlockNumber whichlock
, int access
)
73 LockPage(rel
, whichlock
, access
);
77 * _hash_try_getlock() -- Acquire an lmgr lock, but only if it's free.
79 * Same as above except we return FALSE without blocking if lock isn't free.
82 _hash_try_getlock(Relation rel
, BlockNumber whichlock
, int access
)
85 return ConditionalLockPage(rel
, whichlock
, access
);
91 * _hash_droplock() -- Release an lmgr lock.
94 _hash_droplock(Relation rel
, BlockNumber whichlock
, int access
)
97 UnlockPage(rel
, whichlock
, access
);
101 * _hash_getbuf() -- Get a buffer by block number for read or write.
103 * 'access' must be HASH_READ, HASH_WRITE, or HASH_NOLOCK.
104 * 'flags' is a bitwise OR of the allowed page types.
106 * This must be used only to fetch pages that are expected to be valid
107 * already. _hash_checkpage() is applied using the given flags.
109 * When this routine returns, the appropriate lock is set on the
110 * requested buffer and its reference count has been incremented
111 * (ie, the buffer is "locked and pinned").
113 * P_NEW is disallowed because this routine can only be used
114 * to access pages that are known to be before the filesystem EOF.
115 * Extending the index should be done with _hash_getnewbuf.
118 _hash_getbuf(Relation rel
, BlockNumber blkno
, int access
, int flags
)
123 elog(ERROR
, "hash AM does not use P_NEW");
125 buf
= ReadBuffer(rel
, blkno
);
127 if (access
!= HASH_NOLOCK
)
128 LockBuffer(buf
, access
);
130 /* ref count and lock type are correct */
132 _hash_checkpage(rel
, buf
, flags
);
138 * _hash_getinitbuf() -- Get and initialize a buffer by block number.
140 * This must be used only to fetch pages that are known to be before
141 * the index's filesystem EOF, but are to be filled from scratch.
142 * _hash_pageinit() is applied automatically. Otherwise it has
143 * effects similar to _hash_getbuf() with access = HASH_WRITE.
145 * When this routine returns, a write lock is set on the
146 * requested buffer and its reference count has been incremented
147 * (ie, the buffer is "locked and pinned").
149 * P_NEW is disallowed because this routine can only be used
150 * to access pages that are known to be before the filesystem EOF.
151 * Extending the index should be done with _hash_getnewbuf.
154 _hash_getinitbuf(Relation rel
, BlockNumber blkno
)
159 elog(ERROR
, "hash AM does not use P_NEW");
161 buf
= ReadOrZeroBuffer(rel
, MAIN_FORKNUM
, blkno
);
163 LockBuffer(buf
, HASH_WRITE
);
165 /* ref count and lock type are correct */
167 /* initialize the page */
168 _hash_pageinit(BufferGetPage(buf
), BufferGetPageSize(buf
));
174 * _hash_getnewbuf() -- Get a new page at the end of the index.
176 * This has the same API as _hash_getinitbuf, except that we are adding
177 * a page to the index, and hence expect the page to be past the
178 * logical EOF. (However, we have to support the case where it isn't,
179 * since a prior try might have crashed after extending the filesystem
180 * EOF but before updating the metapage to reflect the added page.)
182 * It is caller's responsibility to ensure that only one process can
183 * extend the index at a time.
186 _hash_getnewbuf(Relation rel
, BlockNumber blkno
)
188 BlockNumber nblocks
= RelationGetNumberOfBlocks(rel
);
192 elog(ERROR
, "hash AM does not use P_NEW");
194 elog(ERROR
, "access to noncontiguous page in hash index \"%s\"",
195 RelationGetRelationName(rel
));
197 /* smgr insists we use P_NEW to extend the relation */
198 if (blkno
== nblocks
)
200 buf
= ReadBuffer(rel
, P_NEW
);
201 if (BufferGetBlockNumber(buf
) != blkno
)
202 elog(ERROR
, "unexpected hash relation size: %u, should be %u",
203 BufferGetBlockNumber(buf
), blkno
);
206 buf
= ReadOrZeroBuffer(rel
, MAIN_FORKNUM
, blkno
);
208 LockBuffer(buf
, HASH_WRITE
);
210 /* ref count and lock type are correct */
212 /* initialize the page */
213 _hash_pageinit(BufferGetPage(buf
), BufferGetPageSize(buf
));
219 * _hash_getbuf_with_strategy() -- Get a buffer with nondefault strategy.
221 * This is identical to _hash_getbuf() but also allows a buffer access
222 * strategy to be specified. We use this for VACUUM operations.
225 _hash_getbuf_with_strategy(Relation rel
, BlockNumber blkno
,
226 int access
, int flags
,
227 BufferAccessStrategy bstrategy
)
232 elog(ERROR
, "hash AM does not use P_NEW");
234 buf
= ReadBufferWithStrategy(rel
, blkno
, bstrategy
);
236 if (access
!= HASH_NOLOCK
)
237 LockBuffer(buf
, access
);
239 /* ref count and lock type are correct */
241 _hash_checkpage(rel
, buf
, flags
);
247 * _hash_relbuf() -- release a locked buffer.
249 * Lock and pin (refcount) are both dropped.
252 _hash_relbuf(Relation rel
, Buffer buf
)
254 UnlockReleaseBuffer(buf
);
258 * _hash_dropbuf() -- release an unlocked buffer.
260 * This is used to unpin a buffer on which we hold no lock.
263 _hash_dropbuf(Relation rel
, Buffer buf
)
269 * _hash_wrtbuf() -- write a hash page to disk.
271 * This routine releases the lock held on the buffer and our refcount
272 * for it. It is an error to call _hash_wrtbuf() without a write lock
273 * and a pin on the buffer.
275 * NOTE: this routine should go away when/if hash indexes are WAL-ified.
276 * The correct sequence of operations is to mark the buffer dirty, then
277 * write the WAL record, then release the lock and pin; so marking dirty
278 * can't be combined with releasing.
281 _hash_wrtbuf(Relation rel
, Buffer buf
)
283 MarkBufferDirty(buf
);
284 UnlockReleaseBuffer(buf
);
288 * _hash_chgbufaccess() -- Change the lock type on a buffer, without
289 * dropping our pin on it.
291 * from_access and to_access may be HASH_READ, HASH_WRITE, or HASH_NOLOCK,
292 * the last indicating that no buffer-level lock is held or wanted.
294 * When from_access == HASH_WRITE, we assume the buffer is dirty and tell
295 * bufmgr it must be written out. If the caller wants to release a write
296 * lock on a page that's not been modified, it's okay to pass from_access
297 * as HASH_READ (a bit ugly, but handy in some places).
300 _hash_chgbufaccess(Relation rel
,
305 if (from_access
== HASH_WRITE
)
306 MarkBufferDirty(buf
);
307 if (from_access
!= HASH_NOLOCK
)
308 LockBuffer(buf
, BUFFER_LOCK_UNLOCK
);
309 if (to_access
!= HASH_NOLOCK
)
310 LockBuffer(buf
, to_access
);
315 * _hash_metapinit() -- Initialize the metadata page of a hash index,
316 * the initial buckets, and the initial bitmap page.
318 * The initial number of buckets is dependent on num_tuples, an estimate
319 * of the number of tuples to be loaded into the index initially. The
320 * chosen number of buckets is returned.
322 * We are fairly cavalier about locking here, since we know that no one else
323 * could be accessing this index. In particular the rule about not holding
324 * multiple buffer locks is ignored.
327 _hash_metapinit(Relation rel
, double num_tuples
)
330 HashPageOpaque pageopaque
;
339 uint32 log2_num_buckets
;
343 if (RelationGetNumberOfBlocks(rel
) != 0)
344 elog(ERROR
, "cannot initialize non-empty hash index \"%s\"",
345 RelationGetRelationName(rel
));
348 * Determine the target fill factor (in tuples per bucket) for this index.
349 * The idea is to make the fill factor correspond to pages about as full
350 * as the user-settable fillfactor parameter says. We can compute it
351 * exactly since the index datatype (i.e. uint32 hash key) is fixed-width.
353 data_width
= sizeof(uint32
);
354 item_width
= MAXALIGN(sizeof(IndexTupleData
)) + MAXALIGN(data_width
) +
355 sizeof(ItemIdData
); /* include the line pointer */
356 ffactor
= RelationGetTargetPageUsage(rel
, HASH_DEFAULT_FILLFACTOR
) / item_width
;
357 /* keep to a sane range */
362 * Choose the number of initial bucket pages to match the fill factor
363 * given the estimated number of tuples. We round up the result to the
364 * next power of 2, however, and always force at least 2 bucket pages.
365 * The upper limit is determined by considerations explained in
366 * _hash_expandtable().
368 dnumbuckets
= num_tuples
/ ffactor
;
369 if (dnumbuckets
<= 2.0)
371 else if (dnumbuckets
>= (double) 0x40000000)
372 num_buckets
= 0x40000000;
374 num_buckets
= ((uint32
) 1) << _hash_log2((uint32
) dnumbuckets
);
376 log2_num_buckets
= _hash_log2(num_buckets
);
377 Assert(num_buckets
== (((uint32
) 1) << log2_num_buckets
));
378 Assert(log2_num_buckets
< HASH_MAX_SPLITPOINTS
);
381 * We initialize the metapage, the first N bucket pages, and the first
382 * bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
383 * calls to occur. This ensures that the smgr level has the right idea of
384 * the physical index length.
386 metabuf
= _hash_getnewbuf(rel
, HASH_METAPAGE
);
387 pg
= BufferGetPage(metabuf
);
389 pageopaque
= (HashPageOpaque
) PageGetSpecialPointer(pg
);
390 pageopaque
->hasho_prevblkno
= InvalidBlockNumber
;
391 pageopaque
->hasho_nextblkno
= InvalidBlockNumber
;
392 pageopaque
->hasho_bucket
= -1;
393 pageopaque
->hasho_flag
= LH_META_PAGE
;
394 pageopaque
->hasho_page_id
= HASHO_PAGE_ID
;
396 metap
= HashPageGetMeta(pg
);
398 metap
->hashm_magic
= HASH_MAGIC
;
399 metap
->hashm_version
= HASH_VERSION
;
400 metap
->hashm_ntuples
= 0;
401 metap
->hashm_nmaps
= 0;
402 metap
->hashm_ffactor
= ffactor
;
403 metap
->hashm_bsize
= HashGetMaxBitmapSize(pg
);
404 /* find largest bitmap array size that will fit in page size */
405 for (i
= _hash_log2(metap
->hashm_bsize
); i
> 0; --i
)
407 if ((1 << i
) <= metap
->hashm_bsize
)
411 metap
->hashm_bmsize
= 1 << i
;
412 metap
->hashm_bmshift
= i
+ BYTE_TO_BIT
;
413 Assert((1 << BMPG_SHIFT(metap
)) == (BMPG_MASK(metap
) + 1));
416 * Label the index with its primary hash support function's OID. This is
417 * pretty useless for normal operation (in fact, hashm_procid is not used
418 * anywhere), but it might be handy for forensic purposes so we keep it.
420 metap
->hashm_procid
= index_getprocid(rel
, 1, HASHPROC
);
423 * We initialize the index with N buckets, 0 .. N-1, occupying physical
424 * blocks 1 to N. The first freespace bitmap page is in block N+1.
425 * Since N is a power of 2, we can set the masks this way:
427 metap
->hashm_maxbucket
= metap
->hashm_lowmask
= num_buckets
- 1;
428 metap
->hashm_highmask
= (num_buckets
<< 1) - 1;
430 MemSet(metap
->hashm_spares
, 0, sizeof(metap
->hashm_spares
));
431 MemSet(metap
->hashm_mapp
, 0, sizeof(metap
->hashm_mapp
));
433 /* Set up mapping for one spare page after the initial splitpoints */
434 metap
->hashm_spares
[log2_num_buckets
] = 1;
435 metap
->hashm_ovflpoint
= log2_num_buckets
;
436 metap
->hashm_firstfree
= 0;
439 * Release buffer lock on the metapage while we initialize buckets.
440 * Otherwise, we'll be in interrupt holdoff and the CHECK_FOR_INTERRUPTS
441 * won't accomplish anything. It's a bad idea to hold buffer locks
442 * for long intervals in any case, since that can block the bgwriter.
444 _hash_chgbufaccess(rel
, metabuf
, HASH_WRITE
, HASH_NOLOCK
);
447 * Initialize the first N buckets
449 for (i
= 0; i
< num_buckets
; i
++)
451 /* Allow interrupts, in case N is huge */
452 CHECK_FOR_INTERRUPTS();
454 buf
= _hash_getnewbuf(rel
, BUCKET_TO_BLKNO(metap
, i
));
455 pg
= BufferGetPage(buf
);
456 pageopaque
= (HashPageOpaque
) PageGetSpecialPointer(pg
);
457 pageopaque
->hasho_prevblkno
= InvalidBlockNumber
;
458 pageopaque
->hasho_nextblkno
= InvalidBlockNumber
;
459 pageopaque
->hasho_bucket
= i
;
460 pageopaque
->hasho_flag
= LH_BUCKET_PAGE
;
461 pageopaque
->hasho_page_id
= HASHO_PAGE_ID
;
462 _hash_wrtbuf(rel
, buf
);
465 /* Now reacquire buffer lock on metapage */
466 _hash_chgbufaccess(rel
, metabuf
, HASH_NOLOCK
, HASH_WRITE
);
469 * Initialize first bitmap page
471 _hash_initbitmap(rel
, metap
, num_buckets
+ 1);
474 _hash_wrtbuf(rel
, metabuf
);
480 * _hash_pageinit() -- Initialize a new hash index page.
483 _hash_pageinit(Page page
, Size size
)
485 Assert(PageIsNew(page
));
486 PageInit(page
, size
, sizeof(HashPageOpaqueData
));
490 * Attempt to expand the hash table by creating one new bucket.
492 * This will silently do nothing if it cannot get the needed locks.
494 * The caller should hold no locks on the hash index.
496 * The caller must hold a pin, but no lock, on the metapage buffer.
497 * The buffer is returned in the same state.
500 _hash_expandtable(Relation rel
, Buffer metabuf
)
506 BlockNumber start_oblkno
;
507 BlockNumber start_nblkno
;
513 * Obtain the page-zero lock to assert the right to begin a split (see
516 * Note: deadlock should be impossible here. Our own backend could only be
517 * holding bucket sharelocks due to stopped indexscans; those will not
518 * block other holders of the page-zero lock, who are only interested in
519 * acquiring bucket sharelocks themselves. Exclusive bucket locks are
520 * only taken here and in hashbulkdelete, and neither of these operations
521 * needs any additional locks to complete. (If, due to some flaw in this
522 * reasoning, we manage to deadlock anyway, it's okay to error out; the
523 * index will be left in a consistent state.)
525 _hash_getlock(rel
, 0, HASH_EXCLUSIVE
);
527 /* Write-lock the meta page */
528 _hash_chgbufaccess(rel
, metabuf
, HASH_NOLOCK
, HASH_WRITE
);
530 _hash_checkpage(rel
, metabuf
, LH_META_PAGE
);
531 metap
= HashPageGetMeta(BufferGetPage(metabuf
));
534 * Check to see if split is still needed; someone else might have already
535 * done one while we waited for the lock.
537 * Make sure this stays in sync with _hash_doinsert()
539 if (metap
->hashm_ntuples
<=
540 (double) metap
->hashm_ffactor
* (metap
->hashm_maxbucket
+ 1))
544 * Can't split anymore if maxbucket has reached its maximum possible
547 * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
548 * the calculation maxbucket+1 mustn't overflow). Currently we restrict
549 * to half that because of overflow looping in _hash_log2() and
550 * insufficient space in hashm_spares[]. It's moot anyway because an
551 * index with 2^32 buckets would certainly overflow BlockNumber and hence
552 * _hash_alloc_buckets() would fail, but if we supported buckets smaller
553 * than a disk block then this would be an independent constraint.
555 * If you change this, see also the maximum initial number of buckets
556 * in _hash_metapinit().
558 if (metap
->hashm_maxbucket
>= (uint32
) 0x7FFFFFFE)
562 * Determine which bucket is to be split, and attempt to lock the old
563 * bucket. If we can't get the lock, give up.
565 * The lock protects us against other backends, but not against our own
566 * backend. Must check for active scans separately.
568 new_bucket
= metap
->hashm_maxbucket
+ 1;
570 old_bucket
= (new_bucket
& metap
->hashm_lowmask
);
572 start_oblkno
= BUCKET_TO_BLKNO(metap
, old_bucket
);
574 if (_hash_has_active_scan(rel
, old_bucket
))
577 if (!_hash_try_getlock(rel
, start_oblkno
, HASH_EXCLUSIVE
))
581 * Likewise lock the new bucket (should never fail).
583 * Note: it is safe to compute the new bucket's blkno here, even though we
584 * may still need to update the BUCKET_TO_BLKNO mapping. This is because
585 * the current value of hashm_spares[hashm_ovflpoint] correctly shows
586 * where we are going to put a new splitpoint's worth of buckets.
588 start_nblkno
= BUCKET_TO_BLKNO(metap
, new_bucket
);
590 if (_hash_has_active_scan(rel
, new_bucket
))
591 elog(ERROR
, "scan in progress on supposedly new bucket");
593 if (!_hash_try_getlock(rel
, start_nblkno
, HASH_EXCLUSIVE
))
594 elog(ERROR
, "could not get lock on supposedly new bucket");
597 * If the split point is increasing (hashm_maxbucket's log base 2
598 * increases), we need to allocate a new batch of bucket pages.
600 spare_ndx
= _hash_log2(new_bucket
+ 1);
601 if (spare_ndx
> metap
->hashm_ovflpoint
)
603 Assert(spare_ndx
== metap
->hashm_ovflpoint
+ 1);
606 * The number of buckets in the new splitpoint is equal to the total
607 * number already in existence, i.e. new_bucket. Currently this maps
608 * one-to-one to blocks required, but someday we may need a more
609 * complicated calculation here.
611 if (!_hash_alloc_buckets(rel
, start_nblkno
, new_bucket
))
613 /* can't split due to BlockNumber overflow */
614 _hash_droplock(rel
, start_oblkno
, HASH_EXCLUSIVE
);
615 _hash_droplock(rel
, start_nblkno
, HASH_EXCLUSIVE
);
621 * Okay to proceed with split. Update the metapage bucket mapping info.
623 * Since we are scribbling on the metapage data right in the shared
624 * buffer, any failure in this next little bit leaves us with a big
625 * problem: the metapage is effectively corrupt but could get written back
626 * to disk. We don't really expect any failure, but just to be sure,
627 * establish a critical section.
629 START_CRIT_SECTION();
631 metap
->hashm_maxbucket
= new_bucket
;
633 if (new_bucket
> metap
->hashm_highmask
)
635 /* Starting a new doubling */
636 metap
->hashm_lowmask
= metap
->hashm_highmask
;
637 metap
->hashm_highmask
= new_bucket
| metap
->hashm_lowmask
;
641 * If the split point is increasing (hashm_maxbucket's log base 2
642 * increases), we need to adjust the hashm_spares[] array and
643 * hashm_ovflpoint so that future overflow pages will be created beyond
644 * this new batch of bucket pages.
646 if (spare_ndx
> metap
->hashm_ovflpoint
)
648 metap
->hashm_spares
[spare_ndx
] = metap
->hashm_spares
[metap
->hashm_ovflpoint
];
649 metap
->hashm_ovflpoint
= spare_ndx
;
652 /* Done mucking with metapage */
656 * Copy bucket mapping info now; this saves re-accessing the meta page
657 * inside _hash_splitbucket's inner loop. Note that once we drop the
658 * split lock, other splits could begin, so these values might be out of
659 * date before _hash_splitbucket finishes. That's okay, since all it
660 * needs is to tell which of these two buckets to map hashkeys into.
662 maxbucket
= metap
->hashm_maxbucket
;
663 highmask
= metap
->hashm_highmask
;
664 lowmask
= metap
->hashm_lowmask
;
666 /* Write out the metapage and drop lock, but keep pin */
667 _hash_chgbufaccess(rel
, metabuf
, HASH_WRITE
, HASH_NOLOCK
);
669 /* Release split lock; okay for other splits to occur now */
670 _hash_droplock(rel
, 0, HASH_EXCLUSIVE
);
672 /* Relocate records to the new bucket */
673 _hash_splitbucket(rel
, metabuf
, old_bucket
, new_bucket
,
674 start_oblkno
, start_nblkno
,
675 maxbucket
, highmask
, lowmask
);
677 /* Release bucket locks, allowing others to access them */
678 _hash_droplock(rel
, start_oblkno
, HASH_EXCLUSIVE
);
679 _hash_droplock(rel
, start_nblkno
, HASH_EXCLUSIVE
);
683 /* Here if decide not to split or fail to acquire old bucket lock */
686 /* We didn't write the metapage, so just drop lock */
687 _hash_chgbufaccess(rel
, metabuf
, HASH_READ
, HASH_NOLOCK
);
689 /* Release split lock */
690 _hash_droplock(rel
, 0, HASH_EXCLUSIVE
);
695 * _hash_alloc_buckets -- allocate a new splitpoint's worth of bucket pages
697 * This does not need to initialize the new bucket pages; we'll do that as
698 * each one is used by _hash_expandtable(). But we have to extend the logical
699 * EOF to the end of the splitpoint; this keeps smgr's idea of the EOF in
700 * sync with ours, so that we don't get complaints from smgr.
702 * We do this by writing a page of zeroes at the end of the splitpoint range.
703 * We expect that the filesystem will ensure that the intervening pages read
704 * as zeroes too. On many filesystems this "hole" will not be allocated
705 * immediately, which means that the index file may end up more fragmented
706 * than if we forced it all to be allocated now; but since we don't scan
707 * hash indexes sequentially anyway, that probably doesn't matter.
709 * XXX It's annoying that this code is executed with the metapage lock held.
710 * We need to interlock against _hash_getovflpage() adding a new overflow page
711 * concurrently, but it'd likely be better to use LockRelationForExtension
712 * for the purpose. OTOH, adding a splitpoint is a very infrequent operation,
713 * so it may not be worth worrying about.
715 * Returns TRUE if successful, or FALSE if allocation failed due to
716 * BlockNumber overflow.
719 _hash_alloc_buckets(Relation rel
, BlockNumber firstblock
, uint32 nblocks
)
721 BlockNumber lastblock
;
722 char zerobuf
[BLCKSZ
];
724 lastblock
= firstblock
+ nblocks
- 1;
727 * Check for overflow in block number calculation; if so, we cannot extend
730 if (lastblock
< firstblock
|| lastblock
== InvalidBlockNumber
)
733 MemSet(zerobuf
, 0, sizeof(zerobuf
));
735 RelationOpenSmgr(rel
);
736 smgrextend(rel
->rd_smgr
, MAIN_FORKNUM
, lastblock
, zerobuf
, rel
->rd_istemp
);
743 * _hash_splitbucket -- split 'obucket' into 'obucket' and 'nbucket'
745 * We are splitting a bucket that consists of a base bucket page and zero
746 * or more overflow (bucket chain) pages. We must relocate tuples that
747 * belong in the new bucket, and compress out any free space in the old
750 * The caller must hold exclusive locks on both buckets to ensure that
751 * no one else is trying to access them (see README).
753 * The caller must hold a pin, but no lock, on the metapage buffer.
754 * The buffer is returned in the same state. (The metapage is only
755 * touched if it becomes necessary to add or remove overflow pages.)
758 _hash_splitbucket(Relation rel
,
762 BlockNumber start_oblkno
,
763 BlockNumber start_nblkno
,
773 HashPageOpaque oopaque
;
774 HashPageOpaque nopaque
;
777 OffsetNumber ooffnum
;
778 OffsetNumber noffnum
;
779 OffsetNumber omaxoffnum
;
784 * It should be okay to simultaneously write-lock pages from each bucket,
785 * since no one else can be trying to acquire buffer lock on pages of
788 oblkno
= start_oblkno
;
789 obuf
= _hash_getbuf(rel
, oblkno
, HASH_WRITE
, LH_BUCKET_PAGE
);
790 opage
= BufferGetPage(obuf
);
791 oopaque
= (HashPageOpaque
) PageGetSpecialPointer(opage
);
793 nblkno
= start_nblkno
;
794 nbuf
= _hash_getnewbuf(rel
, nblkno
);
795 npage
= BufferGetPage(nbuf
);
797 /* initialize the new bucket's primary page */
798 nopaque
= (HashPageOpaque
) PageGetSpecialPointer(npage
);
799 nopaque
->hasho_prevblkno
= InvalidBlockNumber
;
800 nopaque
->hasho_nextblkno
= InvalidBlockNumber
;
801 nopaque
->hasho_bucket
= nbucket
;
802 nopaque
->hasho_flag
= LH_BUCKET_PAGE
;
803 nopaque
->hasho_page_id
= HASHO_PAGE_ID
;
806 * Partition the tuples in the old bucket between the old bucket and the
807 * new bucket, advancing along the old bucket's overflow bucket chain and
808 * adding overflow pages to the new bucket as needed.
810 ooffnum
= FirstOffsetNumber
;
811 omaxoffnum
= PageGetMaxOffsetNumber(opage
);
815 * at each iteration through this loop, each of these variables should
816 * be up-to-date: obuf opage oopaque ooffnum omaxoffnum
819 /* check if we're at the end of the page */
820 if (ooffnum
> omaxoffnum
)
822 /* at end of page, but check for an(other) overflow page */
823 oblkno
= oopaque
->hasho_nextblkno
;
824 if (!BlockNumberIsValid(oblkno
))
828 * we ran out of tuples on this particular page, but we have more
829 * overflow pages; advance to next page.
831 _hash_wrtbuf(rel
, obuf
);
833 obuf
= _hash_getbuf(rel
, oblkno
, HASH_WRITE
, LH_OVERFLOW_PAGE
);
834 opage
= BufferGetPage(obuf
);
835 oopaque
= (HashPageOpaque
) PageGetSpecialPointer(opage
);
836 ooffnum
= FirstOffsetNumber
;
837 omaxoffnum
= PageGetMaxOffsetNumber(opage
);
842 * Fetch the item's hash key (conveniently stored in the item)
843 * and determine which bucket it now belongs in.
845 itup
= (IndexTuple
) PageGetItem(opage
, PageGetItemId(opage
, ooffnum
));
846 bucket
= _hash_hashkey2bucket(_hash_get_indextuple_hashkey(itup
),
847 maxbucket
, highmask
, lowmask
);
849 if (bucket
== nbucket
)
852 * insert the tuple into the new bucket. if it doesn't fit on the
853 * current page in the new bucket, we must allocate a new overflow
854 * page and place the tuple on that page instead.
856 itemsz
= IndexTupleDSize(*itup
);
857 itemsz
= MAXALIGN(itemsz
);
859 if (PageGetFreeSpace(npage
) < itemsz
)
861 /* write out nbuf and drop lock, but keep pin */
862 _hash_chgbufaccess(rel
, nbuf
, HASH_WRITE
, HASH_NOLOCK
);
863 /* chain to a new overflow page */
864 nbuf
= _hash_addovflpage(rel
, metabuf
, nbuf
);
865 npage
= BufferGetPage(nbuf
);
866 /* we don't need nopaque within the loop */
869 noffnum
= OffsetNumberNext(PageGetMaxOffsetNumber(npage
));
870 if (PageAddItem(npage
, (Item
) itup
, itemsz
, noffnum
, false, false)
871 == InvalidOffsetNumber
)
872 elog(ERROR
, "failed to add index item to \"%s\"",
873 RelationGetRelationName(rel
));
876 * now delete the tuple from the old bucket. after this section
877 * of code, 'ooffnum' will actually point to the ItemId to which
878 * we would point if we had advanced it before the deletion
879 * (PageIndexTupleDelete repacks the ItemId array). this also
880 * means that 'omaxoffnum' is exactly one less than it used to be,
881 * so we really can just decrement it instead of calling
882 * PageGetMaxOffsetNumber.
884 PageIndexTupleDelete(opage
, ooffnum
);
885 omaxoffnum
= OffsetNumberPrev(omaxoffnum
);
890 * the tuple stays on this page. we didn't move anything, so we
891 * didn't delete anything and therefore we don't have to change
894 Assert(bucket
== obucket
);
895 ooffnum
= OffsetNumberNext(ooffnum
);
900 * We're at the end of the old bucket chain, so we're done partitioning
901 * the tuples. Before quitting, call _hash_squeezebucket to ensure the
902 * tuples remaining in the old bucket (including the overflow pages) are
903 * packed as tightly as possible. The new bucket is already tight.
905 _hash_wrtbuf(rel
, obuf
);
906 _hash_wrtbuf(rel
, nbuf
);
908 _hash_squeezebucket(rel
, obucket
, start_oblkno
, NULL
);