1 /*-------------------------------------------------------------------------
4 * bitmap for tracking visibility of heap tuples
6 * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/access/heap/visibilitymap.c
14 * visibilitymap_clear - clear bits for one page in the visibility map
15 * visibilitymap_pin - pin a map page for setting a bit
16 * visibilitymap_pin_ok - check whether correct map page is already pinned
17 * visibilitymap_set - set a bit in a previously pinned page
18 * visibilitymap_get_status - get status of bits
19 * visibilitymap_count - count number of bits set in visibility map
20 * visibilitymap_prepare_truncate -
21 * prepare for truncation of the visibility map
25 * The visibility map is a bitmap with two bits (all-visible and all-frozen)
26 * per heap page. A set all-visible bit means that all tuples on the page are
27 * known visible to all transactions, and therefore the page doesn't need to
28 * be vacuumed. A set all-frozen bit means that all tuples on the page are
29 * completely frozen, and therefore the page doesn't need to be vacuumed even
30 * if whole table scanning vacuum is required (e.g. anti-wraparound vacuum).
31 * The all-frozen bit must be set only when the page is already all-visible.
33 * The map is conservative in the sense that we make sure that whenever a bit
34 * is set, we know the condition is true, but if a bit is not set, it might or
37 * Clearing visibility map bits is not separately WAL-logged. The callers
38 * must make sure that whenever a bit is cleared, the bit is cleared on WAL
39 * replay of the updating operation as well.
41 * When we *set* a visibility map during VACUUM, we must write WAL. This may
42 * seem counterintuitive, since the bit is basically a hint: if it is clear,
43 * it may still be the case that every tuple on the page is visible to all
44 * transactions; we just don't know that for certain. The difficulty is that
45 * there are two bits which are typically set together: the PD_ALL_VISIBLE bit
46 * on the page itself, and the visibility map bit. If a crash occurs after the
47 * visibility map page makes it to disk and before the updated heap page makes
48 * it to disk, redo must set the bit on the heap page. Otherwise, the next
49 * insert, update, or delete on the heap page will fail to realize that the
50 * visibility map bit must be cleared, possibly causing index-only scans to
51 * return wrong answers.
53 * VACUUM will normally skip pages for which the visibility map bit is set;
54 * such pages can't contain any dead tuples and therefore don't need vacuuming.
58 * In heapam.c, whenever a page is modified so that not all tuples on the
59 * page are visible to everyone anymore, the corresponding bit in the
60 * visibility map is cleared. In order to be crash-safe, we need to do this
61 * while still holding a lock on the heap page and in the same critical
62 * section that logs the page modification. However, we don't want to hold
63 * the buffer lock over any I/O that may be required to read in the visibility
64 * map page. To avoid this, we examine the heap page before locking it;
65 * if the page-level PD_ALL_VISIBLE bit is set, we pin the visibility map
66 * bit. Then, we lock the buffer. But this creates a race condition: there
67 * is a possibility that in the time it takes to lock the buffer, the
68 * PD_ALL_VISIBLE bit gets set. If that happens, we have to unlock the
69 * buffer, pin the visibility map page, and relock the buffer. This shouldn't
70 * happen often, because only VACUUM currently sets visibility map bits,
71 * and the race will only occur if VACUUM processes a given page at almost
72 * exactly the same time that someone tries to further modify it.
74 * To set a bit, you need to hold a lock on the heap page. That prevents
75 * the race condition where VACUUM sees that all tuples on the page are
76 * visible to everyone, but another backend modifies the page before VACUUM
77 * sets the bit in the visibility map.
79 * When a bit is set, the LSN of the visibility map page is updated to make
80 * sure that the visibility map update doesn't get written to disk before the
81 * WAL record of the changes that made it possible to set the bit is flushed.
82 * But when a bit is cleared, we don't have to do that because it's always
83 * safe to clear a bit in the map from correctness point of view.
85 *-------------------------------------------------------------------------
89 #include "access/heapam_xlog.h"
90 #include "access/visibilitymap.h"
91 #include "access/xlogutils.h"
92 #include "miscadmin.h"
93 #include "port/pg_bitutils.h"
94 #include "storage/bufmgr.h"
95 #include "storage/lmgr.h"
96 #include "storage/smgr.h"
97 #include "utils/inval.h"
100 /*#define TRACE_VISIBILITYMAP */
103 * Size of the bitmap on each visibility map page, in bytes. There's no
104 * extra headers, so the whole page minus the standard page header is
105 * used for the bitmap.
107 #define MAPSIZE (BLCKSZ - MAXALIGN(SizeOfPageHeaderData))
109 /* Number of heap blocks we can represent in one byte */
110 #define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / BITS_PER_HEAPBLOCK)
112 /* Number of heap blocks we can represent in one visibility map page. */
113 #define HEAPBLOCKS_PER_PAGE (MAPSIZE * HEAPBLOCKS_PER_BYTE)
115 /* Mapping from heap block number to the right bit in the visibility map */
116 #define HEAPBLK_TO_MAPBLOCK(x) ((x) / HEAPBLOCKS_PER_PAGE)
117 #define HEAPBLK_TO_MAPBYTE(x) (((x) % HEAPBLOCKS_PER_PAGE) / HEAPBLOCKS_PER_BYTE)
118 #define HEAPBLK_TO_OFFSET(x) (((x) % HEAPBLOCKS_PER_BYTE) * BITS_PER_HEAPBLOCK)
120 /* Masks for counting subsets of bits in the visibility map. */
121 #define VISIBLE_MASK64 UINT64CONST(0x5555555555555555) /* The lower bit of each
123 #define FROZEN_MASK64 UINT64CONST(0xaaaaaaaaaaaaaaaa) /* The upper bit of each
126 /* prototypes for internal routines */
127 static Buffer
vm_readbuf(Relation rel
, BlockNumber blkno
, bool extend
);
128 static void vm_extend(Relation rel
, BlockNumber vm_nblocks
);
132 * visibilitymap_clear - clear specified bits for one page in visibility map
134 * You must pass a buffer containing the correct map page to this function.
135 * Call visibilitymap_pin first to pin the right one. This function doesn't do
136 * any I/O. Returns true if any bits have been cleared and false otherwise.
139 visibilitymap_clear(Relation rel
, BlockNumber heapBlk
, Buffer buf
, uint8 flags
)
141 BlockNumber mapBlock
= HEAPBLK_TO_MAPBLOCK(heapBlk
);
142 int mapByte
= HEAPBLK_TO_MAPBYTE(heapBlk
);
143 int mapOffset
= HEAPBLK_TO_OFFSET(heapBlk
);
144 uint8 mask
= flags
<< mapOffset
;
146 bool cleared
= false;
148 Assert(flags
& VISIBILITYMAP_VALID_BITS
);
150 #ifdef TRACE_VISIBILITYMAP
151 elog(DEBUG1
, "vm_clear %s %d", RelationGetRelationName(rel
), heapBlk
);
154 if (!BufferIsValid(buf
) || BufferGetBlockNumber(buf
) != mapBlock
)
155 elog(ERROR
, "wrong buffer passed to visibilitymap_clear");
157 LockBuffer(buf
, BUFFER_LOCK_EXCLUSIVE
);
158 map
= PageGetContents(BufferGetPage(buf
));
160 if (map
[mapByte
] & mask
)
162 map
[mapByte
] &= ~mask
;
164 MarkBufferDirty(buf
);
168 LockBuffer(buf
, BUFFER_LOCK_UNLOCK
);
174 * visibilitymap_pin - pin a map page for setting a bit
176 * Setting a bit in the visibility map is a two-phase operation. First, call
177 * visibilitymap_pin, to pin the visibility map page containing the bit for
178 * the heap page. Because that can require I/O to read the map page, you
179 * shouldn't hold a lock on the heap page while doing that. Then, call
180 * visibilitymap_set to actually set the bit.
182 * On entry, *buf should be InvalidBuffer or a valid buffer returned by
183 * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same
184 * relation. On return, *buf is a valid buffer with the map page containing
185 * the bit for heapBlk.
187 * If the page doesn't exist in the map file yet, it is extended.
190 visibilitymap_pin(Relation rel
, BlockNumber heapBlk
, Buffer
*buf
)
192 BlockNumber mapBlock
= HEAPBLK_TO_MAPBLOCK(heapBlk
);
194 /* Reuse the old pinned buffer if possible */
195 if (BufferIsValid(*buf
))
197 if (BufferGetBlockNumber(*buf
) == mapBlock
)
202 *buf
= vm_readbuf(rel
, mapBlock
, true);
206 * visibilitymap_pin_ok - do we already have the correct page pinned?
208 * On entry, buf should be InvalidBuffer or a valid buffer returned by
209 * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same
210 * relation. The return value indicates whether the buffer covers the
214 visibilitymap_pin_ok(BlockNumber heapBlk
, Buffer buf
)
216 BlockNumber mapBlock
= HEAPBLK_TO_MAPBLOCK(heapBlk
);
218 return BufferIsValid(buf
) && BufferGetBlockNumber(buf
) == mapBlock
;
222 * visibilitymap_set - set bit(s) on a previously pinned page
224 * recptr is the LSN of the XLOG record we're replaying, if we're in recovery,
225 * or InvalidXLogRecPtr in normal running. The page LSN is advanced to the
226 * one provided; in normal running, we generate a new XLOG record and set the
227 * page LSN to that value. cutoff_xid is the largest xmin on the page being
228 * marked all-visible; it is needed for Hot Standby, and can be
229 * InvalidTransactionId if the page contains no tuples. It can also be set
230 * to InvalidTransactionId when a page that is already all-visible is being
233 * Caller is expected to set the heap page's PD_ALL_VISIBLE bit before calling
234 * this function. Except in recovery, caller should also pass the heap
235 * buffer. When checksums are enabled and we're not in recovery, we must add
236 * the heap buffer to the WAL chain to protect it from being torn.
238 * You must pass a buffer containing the correct map page to this function.
239 * Call visibilitymap_pin first to pin the right one. This function doesn't do
243 visibilitymap_set(Relation rel
, BlockNumber heapBlk
, Buffer heapBuf
,
244 XLogRecPtr recptr
, Buffer vmBuf
, TransactionId cutoff_xid
,
247 BlockNumber mapBlock
= HEAPBLK_TO_MAPBLOCK(heapBlk
);
248 uint32 mapByte
= HEAPBLK_TO_MAPBYTE(heapBlk
);
249 uint8 mapOffset
= HEAPBLK_TO_OFFSET(heapBlk
);
253 #ifdef TRACE_VISIBILITYMAP
254 elog(DEBUG1
, "vm_set %s %d", RelationGetRelationName(rel
), heapBlk
);
257 Assert(InRecovery
|| XLogRecPtrIsInvalid(recptr
));
258 Assert(InRecovery
|| BufferIsValid(heapBuf
));
259 Assert(flags
& VISIBILITYMAP_VALID_BITS
);
261 /* Check that we have the right heap page pinned, if present */
262 if (BufferIsValid(heapBuf
) && BufferGetBlockNumber(heapBuf
) != heapBlk
)
263 elog(ERROR
, "wrong heap buffer passed to visibilitymap_set");
265 /* Check that we have the right VM page pinned */
266 if (!BufferIsValid(vmBuf
) || BufferGetBlockNumber(vmBuf
) != mapBlock
)
267 elog(ERROR
, "wrong VM buffer passed to visibilitymap_set");
269 page
= BufferGetPage(vmBuf
);
270 map
= (uint8
*) PageGetContents(page
);
271 LockBuffer(vmBuf
, BUFFER_LOCK_EXCLUSIVE
);
273 if (flags
!= (map
[mapByte
] >> mapOffset
& VISIBILITYMAP_VALID_BITS
))
275 START_CRIT_SECTION();
277 map
[mapByte
] |= (flags
<< mapOffset
);
278 MarkBufferDirty(vmBuf
);
280 if (RelationNeedsWAL(rel
))
282 if (XLogRecPtrIsInvalid(recptr
))
285 recptr
= log_heap_visible(rel
->rd_node
, heapBuf
, vmBuf
,
289 * If data checksums are enabled (or wal_log_hints=on), we
290 * need to protect the heap page from being torn.
292 if (XLogHintBitIsNeeded())
294 Page heapPage
= BufferGetPage(heapBuf
);
296 /* caller is expected to set PD_ALL_VISIBLE first */
297 Assert(PageIsAllVisible(heapPage
));
298 PageSetLSN(heapPage
, recptr
);
301 PageSetLSN(page
, recptr
);
307 LockBuffer(vmBuf
, BUFFER_LOCK_UNLOCK
);
311 * visibilitymap_get_status - get status of bits
313 * Are all tuples on heapBlk visible to all or are marked frozen, according
314 * to the visibility map?
316 * On entry, *buf should be InvalidBuffer or a valid buffer returned by an
317 * earlier call to visibilitymap_pin or visibilitymap_get_status on the same
318 * relation. On return, *buf is a valid buffer with the map page containing
319 * the bit for heapBlk, or InvalidBuffer. The caller is responsible for
320 * releasing *buf after it's done testing and setting bits.
322 * NOTE: This function is typically called without a lock on the heap page,
323 * so somebody else could change the bit just after we look at it. In fact,
324 * since we don't lock the visibility map page either, it's even possible that
325 * someone else could have changed the bit just before we look at it, but yet
326 * we might see the old value. It is the caller's responsibility to deal with
327 * all concurrency issues!
330 visibilitymap_get_status(Relation rel
, BlockNumber heapBlk
, Buffer
*buf
)
332 BlockNumber mapBlock
= HEAPBLK_TO_MAPBLOCK(heapBlk
);
333 uint32 mapByte
= HEAPBLK_TO_MAPBYTE(heapBlk
);
334 uint8 mapOffset
= HEAPBLK_TO_OFFSET(heapBlk
);
338 #ifdef TRACE_VISIBILITYMAP
339 elog(DEBUG1
, "vm_get_status %s %d", RelationGetRelationName(rel
), heapBlk
);
342 /* Reuse the old pinned buffer if possible */
343 if (BufferIsValid(*buf
))
345 if (BufferGetBlockNumber(*buf
) != mapBlock
)
348 *buf
= InvalidBuffer
;
352 if (!BufferIsValid(*buf
))
354 *buf
= vm_readbuf(rel
, mapBlock
, false);
355 if (!BufferIsValid(*buf
))
359 map
= PageGetContents(BufferGetPage(*buf
));
362 * A single byte read is atomic. There could be memory-ordering effects
363 * here, but for performance reasons we make it the caller's job to worry
366 result
= ((map
[mapByte
] >> mapOffset
) & VISIBILITYMAP_VALID_BITS
);
371 * visibilitymap_count - count number of bits set in visibility map
373 * Note: we ignore the possibility of race conditions when the table is being
374 * extended concurrently with the call. New pages added to the table aren't
375 * going to be marked all-visible or all-frozen, so they won't affect the result.
378 visibilitymap_count(Relation rel
, BlockNumber
*all_visible
, BlockNumber
*all_frozen
)
380 BlockNumber mapBlock
;
381 BlockNumber nvisible
= 0;
382 BlockNumber nfrozen
= 0;
384 /* all_visible must be specified */
387 for (mapBlock
= 0;; mapBlock
++)
394 * Read till we fall off the end of the map. We assume that any extra
395 * bytes in the last page are zeroed, so we don't bother excluding
396 * them from the count.
398 mapBuffer
= vm_readbuf(rel
, mapBlock
, false);
399 if (!BufferIsValid(mapBuffer
))
403 * We choose not to lock the page, since the result is going to be
404 * immediately stale anyway if anyone is concurrently setting or
405 * clearing bits, and we only really need an approximate value.
407 map
= (uint64
*) PageGetContents(BufferGetPage(mapBuffer
));
409 StaticAssertStmt(MAPSIZE
% sizeof(uint64
) == 0,
410 "unsupported MAPSIZE");
411 if (all_frozen
== NULL
)
413 for (i
= 0; i
< MAPSIZE
/ sizeof(uint64
); i
++)
414 nvisible
+= pg_popcount64(map
[i
] & VISIBLE_MASK64
);
418 for (i
= 0; i
< MAPSIZE
/ sizeof(uint64
); i
++)
420 nvisible
+= pg_popcount64(map
[i
] & VISIBLE_MASK64
);
421 nfrozen
+= pg_popcount64(map
[i
] & FROZEN_MASK64
);
425 ReleaseBuffer(mapBuffer
);
428 *all_visible
= nvisible
;
430 *all_frozen
= nfrozen
;
434 * visibilitymap_prepare_truncate -
435 * prepare for truncation of the visibility map
437 * nheapblocks is the new size of the heap.
439 * Return the number of blocks of new visibility map.
440 * If it's InvalidBlockNumber, there is nothing to truncate;
441 * otherwise the caller is responsible for calling smgrtruncate()
442 * to truncate the visibility map pages.
445 visibilitymap_prepare_truncate(Relation rel
, BlockNumber nheapblocks
)
447 BlockNumber newnblocks
;
449 /* last remaining block, byte, and bit */
450 BlockNumber truncBlock
= HEAPBLK_TO_MAPBLOCK(nheapblocks
);
451 uint32 truncByte
= HEAPBLK_TO_MAPBYTE(nheapblocks
);
452 uint8 truncOffset
= HEAPBLK_TO_OFFSET(nheapblocks
);
454 #ifdef TRACE_VISIBILITYMAP
455 elog(DEBUG1
, "vm_truncate %s %d", RelationGetRelationName(rel
), nheapblocks
);
459 * If no visibility map has been created yet for this relation, there's
460 * nothing to truncate.
462 if (!smgrexists(RelationGetSmgr(rel
), VISIBILITYMAP_FORKNUM
))
463 return InvalidBlockNumber
;
466 * Unless the new size is exactly at a visibility map page boundary, the
467 * tail bits in the last remaining map page, representing truncated heap
468 * blocks, need to be cleared. This is not only tidy, but also necessary
469 * because we don't get a chance to clear the bits if the heap is extended
472 if (truncByte
!= 0 || truncOffset
!= 0)
478 newnblocks
= truncBlock
+ 1;
480 mapBuffer
= vm_readbuf(rel
, truncBlock
, false);
481 if (!BufferIsValid(mapBuffer
))
483 /* nothing to do, the file was already smaller */
484 return InvalidBlockNumber
;
487 page
= BufferGetPage(mapBuffer
);
488 map
= PageGetContents(page
);
490 LockBuffer(mapBuffer
, BUFFER_LOCK_EXCLUSIVE
);
492 /* NO EREPORT(ERROR) from here till changes are logged */
493 START_CRIT_SECTION();
495 /* Clear out the unwanted bytes. */
496 MemSet(&map
[truncByte
+ 1], 0, MAPSIZE
- (truncByte
+ 1));
499 * Mask out the unwanted bits of the last remaining byte.
501 * ((1 << 0) - 1) = 00000000
502 * ((1 << 1) - 1) = 00000001
504 * ((1 << 6) - 1) = 00111111
505 * ((1 << 7) - 1) = 01111111
508 map
[truncByte
] &= (1 << truncOffset
) - 1;
511 * Truncation of a relation is WAL-logged at a higher-level, and we
512 * will be called at WAL replay. But if checksums are enabled, we need
513 * to still write a WAL record to protect against a torn page, if the
514 * page is flushed to disk before the truncation WAL record. We cannot
515 * use MarkBufferDirtyHint here, because that will not dirty the page
518 MarkBufferDirty(mapBuffer
);
519 if (!InRecovery
&& RelationNeedsWAL(rel
) && XLogHintBitIsNeeded())
520 log_newpage_buffer(mapBuffer
, false);
524 UnlockReleaseBuffer(mapBuffer
);
527 newnblocks
= truncBlock
;
529 if (smgrnblocks(RelationGetSmgr(rel
), VISIBILITYMAP_FORKNUM
) <= newnblocks
)
531 /* nothing to do, the file was already smaller than requested size */
532 return InvalidBlockNumber
;
539 * Read a visibility map page.
541 * If the page doesn't exist, InvalidBuffer is returned, or if 'extend' is
542 * true, the visibility map file is extended.
545 vm_readbuf(Relation rel
, BlockNumber blkno
, bool extend
)
551 * Caution: re-using this smgr pointer could fail if the relcache entry
552 * gets closed. It's safe as long as we only do smgr-level operations
553 * between here and the last use of the pointer.
555 reln
= RelationGetSmgr(rel
);
558 * If we haven't cached the size of the visibility map fork yet, check it
561 if (reln
->smgr_cached_nblocks
[VISIBILITYMAP_FORKNUM
] == InvalidBlockNumber
)
563 if (smgrexists(reln
, VISIBILITYMAP_FORKNUM
))
564 smgrnblocks(reln
, VISIBILITYMAP_FORKNUM
);
566 reln
->smgr_cached_nblocks
[VISIBILITYMAP_FORKNUM
] = 0;
569 /* Handle requests beyond EOF */
570 if (blkno
>= reln
->smgr_cached_nblocks
[VISIBILITYMAP_FORKNUM
])
573 vm_extend(rel
, blkno
+ 1);
575 return InvalidBuffer
;
579 * Use ZERO_ON_ERROR mode, and initialize the page if necessary. It's
580 * always safe to clear bits, so it's better to clear corrupt pages than
583 * The initialize-the-page part is trickier than it looks, because of the
584 * possibility of multiple backends doing this concurrently, and our
585 * desire to not uselessly take the buffer lock in the normal path where
586 * the page is OK. We must take the lock to initialize the page, so
587 * recheck page newness after we have the lock, in case someone else
588 * already did it. Also, because we initially check PageIsNew with no
589 * lock, it's possible to fall through and return the buffer while someone
590 * else is still initializing the page (i.e., we might see pd_upper as set
591 * but other page header fields are still zeroes). This is harmless for
592 * callers that will take a buffer lock themselves, but some callers
593 * inspect the page without any lock at all. The latter is OK only so
594 * long as it doesn't depend on the page header having correct contents.
595 * Current usage is safe because PageGetContents() does not require that.
597 buf
= ReadBufferExtended(rel
, VISIBILITYMAP_FORKNUM
, blkno
,
598 RBM_ZERO_ON_ERROR
, NULL
);
599 if (PageIsNew(BufferGetPage(buf
)))
601 LockBuffer(buf
, BUFFER_LOCK_EXCLUSIVE
);
602 if (PageIsNew(BufferGetPage(buf
)))
603 PageInit(BufferGetPage(buf
), BLCKSZ
, 0);
604 LockBuffer(buf
, BUFFER_LOCK_UNLOCK
);
610 * Ensure that the visibility map fork is at least vm_nblocks long, extending
611 * it if necessary with zeroed pages.
614 vm_extend(Relation rel
, BlockNumber vm_nblocks
)
616 BlockNumber vm_nblocks_now
;
620 PageInit((Page
) pg
.data
, BLCKSZ
, 0);
623 * We use the relation extension lock to lock out other backends trying to
624 * extend the visibility map at the same time. It also locks out extension
625 * of the main fork, unnecessarily, but extending the visibility map
626 * happens seldom enough that it doesn't seem worthwhile to have a
627 * separate lock tag type for it.
629 * Note that another backend might have extended or created the relation
630 * by the time we get the lock.
632 LockRelationForExtension(rel
, ExclusiveLock
);
635 * Caution: re-using this smgr pointer could fail if the relcache entry
636 * gets closed. It's safe as long as we only do smgr-level operations
637 * between here and the last use of the pointer.
639 reln
= RelationGetSmgr(rel
);
642 * Create the file first if it doesn't exist. If smgr_vm_nblocks is
643 * positive then it must exist, no need for an smgrexists call.
645 if ((reln
->smgr_cached_nblocks
[VISIBILITYMAP_FORKNUM
] == 0 ||
646 reln
->smgr_cached_nblocks
[VISIBILITYMAP_FORKNUM
] == InvalidBlockNumber
) &&
647 !smgrexists(reln
, VISIBILITYMAP_FORKNUM
))
648 smgrcreate(reln
, VISIBILITYMAP_FORKNUM
, false);
650 /* Invalidate cache so that smgrnblocks() asks the kernel. */
651 reln
->smgr_cached_nblocks
[VISIBILITYMAP_FORKNUM
] = InvalidBlockNumber
;
652 vm_nblocks_now
= smgrnblocks(reln
, VISIBILITYMAP_FORKNUM
);
654 /* Now extend the file */
655 while (vm_nblocks_now
< vm_nblocks
)
657 PageSetChecksumInplace((Page
) pg
.data
, vm_nblocks_now
);
659 smgrextend(reln
, VISIBILITYMAP_FORKNUM
, vm_nblocks_now
, pg
.data
, false);
664 * Send a shared-inval message to force other backends to close any smgr
665 * references they may have for this rel, which we are about to change.
666 * This is a useful optimization because it means that backends don't have
667 * to keep checking for creation or extension of the file, which happens
670 CacheInvalidateSmgr(reln
->smgr_rnode
);
672 UnlockRelationForExtension(rel
, ExclusiveLock
);