1 /*-------------------------------------------------------------------------
4 * heap access method code
6 * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
15 * relation_open - open any relation by relation OID
16 * relation_openrv - open any relation specified by a RangeVar
17 * relation_close - close any relation
18 * heap_open - open a heap relation by relation OID
19 * heap_openrv - open a heap relation specified by a RangeVar
20 * heap_close - (now just a macro for relation_close)
21 * heap_beginscan - begin relation scan
22 * heap_rescan - restart a relation scan
23 * heap_endscan - end relation scan
24 * heap_getnext - retrieve next tuple in scan
25 * heap_fetch - retrieve tuple with given tid
26 * heap_insert - insert tuple into a relation
27 * heap_delete - delete a tuple from a relation
28 * heap_update - replace a tuple in a relation with another tuple
29 * heap_markpos - mark scan position
30 * heap_restrpos - restore position to marked location
31 * heap_sync - sync heap, for when no WAL has been written
34 * This file contains the heap_ routines which implement
35 * the POSTGRES heap access method used for all POSTGRES
38 *-------------------------------------------------------------------------
42 #include "access/heapam.h"
43 #include "access/hio.h"
44 #include "access/multixact.h"
45 #include "access/transam.h"
46 #include "access/tuptoaster.h"
47 #include "access/valid.h"
48 #include "access/xact.h"
49 #include "catalog/catalog.h"
50 #include "catalog/namespace.h"
51 #include "miscadmin.h"
53 #include "storage/procarray.h"
54 #include "storage/smgr.h"
55 #include "utils/datum.h"
56 #include "utils/inval.h"
57 #include "utils/lsyscache.h"
58 #include "utils/relcache.h"
59 #include "utils/snapmgr.h"
60 #include "utils/syscache.h"
61 #include "utils/tqual.h"
65 bool synchronize_seqscans
= true;
68 static HeapScanDesc
heap_beginscan_internal(Relation relation
,
70 int nkeys
, ScanKey key
,
71 bool allow_strat
, bool allow_sync
,
73 static XLogRecPtr
log_heap_update(Relation reln
, Buffer oldbuf
,
74 ItemPointerData from
, Buffer newbuf
, HeapTuple newtup
, bool move
);
75 static bool HeapSatisfiesHOTUpdate(Relation relation
, Bitmapset
*hot_attrs
,
76 HeapTuple oldtup
, HeapTuple newtup
);
79 /* ----------------------------------------------------------------
80 * heap support routines
81 * ----------------------------------------------------------------
85 * initscan - scan code common to heap_beginscan and heap_rescan
89 initscan(HeapScanDesc scan
, ScanKey key
)
95 * Determine the number of blocks we have to scan.
97 * It is sufficient to do this once at scan start, since any tuples added
98 * while the scan is in progress will be invisible to my snapshot anyway.
99 * (That is not true when using a non-MVCC snapshot. However, we couldn't
100 * guarantee to return tuples added after scan start anyway, since they
101 * might go into pages we already scanned. To guarantee consistent
102 * results for a non-MVCC snapshot, the caller must hold some higher-level
103 * lock that ensures the interesting tuple(s) won't change.)
105 scan
->rs_nblocks
= RelationGetNumberOfBlocks(scan
->rs_rd
);
108 * If the table is large relative to NBuffers, use a bulk-read access
109 * strategy and enable synchronized scanning (see syncscan.c). Although
110 * the thresholds for these features could be different, we make them the
111 * same so that there are only two behaviors to tune rather than four.
112 * (However, some callers need to be able to disable one or both of
113 * these behaviors, independently of the size of the table; also there
114 * is a GUC variable that can disable synchronized scanning.)
116 * During a rescan, don't make a new strategy object if we don't have to.
118 if (!scan
->rs_rd
->rd_istemp
&&
119 scan
->rs_nblocks
> NBuffers
/ 4)
121 allow_strat
= scan
->rs_allow_strat
;
122 allow_sync
= scan
->rs_allow_sync
;
125 allow_strat
= allow_sync
= false;
129 if (scan
->rs_strategy
== NULL
)
130 scan
->rs_strategy
= GetAccessStrategy(BAS_BULKREAD
);
134 if (scan
->rs_strategy
!= NULL
)
135 FreeAccessStrategy(scan
->rs_strategy
);
136 scan
->rs_strategy
= NULL
;
139 if (allow_sync
&& synchronize_seqscans
)
141 scan
->rs_syncscan
= true;
142 scan
->rs_startblock
= ss_get_location(scan
->rs_rd
, scan
->rs_nblocks
);
146 scan
->rs_syncscan
= false;
147 scan
->rs_startblock
= 0;
150 scan
->rs_inited
= false;
151 scan
->rs_ctup
.t_data
= NULL
;
152 ItemPointerSetInvalid(&scan
->rs_ctup
.t_self
);
153 scan
->rs_cbuf
= InvalidBuffer
;
154 scan
->rs_cblock
= InvalidBlockNumber
;
156 /* we don't have a marked position... */
157 ItemPointerSetInvalid(&(scan
->rs_mctid
));
159 /* page-at-a-time fields are always invalid when not rs_inited */
162 * copy the scan key, if appropriate
165 memcpy(scan
->rs_key
, key
, scan
->rs_nkeys
* sizeof(ScanKeyData
));
168 * Currently, we don't have a stats counter for bitmap heap scans (but the
169 * underlying bitmap index scans will be counted).
171 if (!scan
->rs_bitmapscan
)
172 pgstat_count_heap_scan(scan
->rs_rd
);
176 * heapgetpage - subroutine for heapgettup()
178 * This routine reads and pins the specified page of the relation.
179 * In page-at-a-time mode it performs additional work, namely determining
180 * which tuples on the page are visible.
183 heapgetpage(HeapScanDesc scan
, BlockNumber page
)
190 OffsetNumber lineoff
;
193 Assert(page
< scan
->rs_nblocks
);
195 /* release previous scan buffer, if any */
196 if (BufferIsValid(scan
->rs_cbuf
))
198 ReleaseBuffer(scan
->rs_cbuf
);
199 scan
->rs_cbuf
= InvalidBuffer
;
202 /* read page using selected strategy */
203 scan
->rs_cbuf
= ReadBufferWithStrategy(scan
->rs_rd
,
206 scan
->rs_cblock
= page
;
208 if (!scan
->rs_pageatatime
)
211 buffer
= scan
->rs_cbuf
;
212 snapshot
= scan
->rs_snapshot
;
215 * Prune and repair fragmentation for the whole page, if possible.
217 heap_page_prune_opt(scan
->rs_rd
, buffer
, RecentGlobalXmin
);
220 * We must hold share lock on the buffer content while examining tuple
221 * visibility. Afterwards, however, the tuples we have found to be
222 * visible are guaranteed good as long as we hold the buffer pin.
224 LockBuffer(buffer
, BUFFER_LOCK_SHARE
);
226 dp
= (Page
) BufferGetPage(buffer
);
227 lines
= PageGetMaxOffsetNumber(dp
);
230 for (lineoff
= FirstOffsetNumber
, lpp
= PageGetItemId(dp
, lineoff
);
234 if (ItemIdIsNormal(lpp
))
236 HeapTupleData loctup
;
239 loctup
.t_data
= (HeapTupleHeader
) PageGetItem((Page
) dp
, lpp
);
240 loctup
.t_len
= ItemIdGetLength(lpp
);
241 ItemPointerSet(&(loctup
.t_self
), page
, lineoff
);
243 valid
= HeapTupleSatisfiesVisibility(&loctup
, snapshot
, buffer
);
245 scan
->rs_vistuples
[ntup
++] = lineoff
;
249 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
251 Assert(ntup
<= MaxHeapTuplesPerPage
);
252 scan
->rs_ntuples
= ntup
;
256 * heapgettup - fetch next heap tuple
258 * Initialize the scan if not already done; then advance to the next
259 * tuple as indicated by "dir"; return the next tuple in scan->rs_ctup,
260 * or set scan->rs_ctup.t_data = NULL if no more tuples.
262 * dir == NoMovementScanDirection means "re-fetch the tuple indicated
265 * Note: the reason nkeys/key are passed separately, even though they are
266 * kept in the scan descriptor, is that the caller may not want us to check
269 * Note: when we fall off the end of the scan in either direction, we
270 * reset rs_inited. This means that a further request with the same
271 * scan direction will restart the scan, which is a bit odd, but a
272 * request with the opposite scan direction will start a fresh scan
273 * in the proper direction. The latter is required behavior for cursors,
274 * while the former case is generally undefined behavior in Postgres
275 * so we don't care too much.
279 heapgettup(HeapScanDesc scan
,
284 HeapTuple tuple
= &(scan
->rs_ctup
);
285 Snapshot snapshot
= scan
->rs_snapshot
;
286 bool backward
= ScanDirectionIsBackward(dir
);
291 OffsetNumber lineoff
;
296 * calculate next starting lineoff, given scan direction
298 if (ScanDirectionIsForward(dir
))
300 if (!scan
->rs_inited
)
303 * return null immediately if relation is empty
305 if (scan
->rs_nblocks
== 0)
307 Assert(!BufferIsValid(scan
->rs_cbuf
));
308 tuple
->t_data
= NULL
;
311 page
= scan
->rs_startblock
; /* first page */
312 heapgetpage(scan
, page
);
313 lineoff
= FirstOffsetNumber
; /* first offnum */
314 scan
->rs_inited
= true;
318 /* continue from previously returned page/tuple */
319 page
= scan
->rs_cblock
; /* current page */
320 lineoff
= /* next offnum */
321 OffsetNumberNext(ItemPointerGetOffsetNumber(&(tuple
->t_self
)));
324 LockBuffer(scan
->rs_cbuf
, BUFFER_LOCK_SHARE
);
326 dp
= (Page
) BufferGetPage(scan
->rs_cbuf
);
327 lines
= PageGetMaxOffsetNumber(dp
);
328 /* page and lineoff now reference the physically next tid */
330 linesleft
= lines
- lineoff
+ 1;
334 if (!scan
->rs_inited
)
337 * return null immediately if relation is empty
339 if (scan
->rs_nblocks
== 0)
341 Assert(!BufferIsValid(scan
->rs_cbuf
));
342 tuple
->t_data
= NULL
;
347 * Disable reporting to syncscan logic in a backwards scan; it's
348 * not very likely anyone else is doing the same thing at the same
349 * time, and much more likely that we'll just bollix things for
352 scan
->rs_syncscan
= false;
353 /* start from last page of the scan */
354 if (scan
->rs_startblock
> 0)
355 page
= scan
->rs_startblock
- 1;
357 page
= scan
->rs_nblocks
- 1;
358 heapgetpage(scan
, page
);
362 /* continue from previously returned page/tuple */
363 page
= scan
->rs_cblock
; /* current page */
366 LockBuffer(scan
->rs_cbuf
, BUFFER_LOCK_SHARE
);
368 dp
= (Page
) BufferGetPage(scan
->rs_cbuf
);
369 lines
= PageGetMaxOffsetNumber(dp
);
371 if (!scan
->rs_inited
)
373 lineoff
= lines
; /* final offnum */
374 scan
->rs_inited
= true;
378 lineoff
= /* previous offnum */
379 OffsetNumberPrev(ItemPointerGetOffsetNumber(&(tuple
->t_self
)));
381 /* page and lineoff now reference the physically previous tid */
388 * ``no movement'' scan direction: refetch prior tuple
390 if (!scan
->rs_inited
)
392 Assert(!BufferIsValid(scan
->rs_cbuf
));
393 tuple
->t_data
= NULL
;
397 page
= ItemPointerGetBlockNumber(&(tuple
->t_self
));
398 if (page
!= scan
->rs_cblock
)
399 heapgetpage(scan
, page
);
401 /* Since the tuple was previously fetched, needn't lock page here */
402 dp
= (Page
) BufferGetPage(scan
->rs_cbuf
);
403 lineoff
= ItemPointerGetOffsetNumber(&(tuple
->t_self
));
404 lpp
= PageGetItemId(dp
, lineoff
);
405 Assert(ItemIdIsNormal(lpp
));
407 tuple
->t_data
= (HeapTupleHeader
) PageGetItem((Page
) dp
, lpp
);
408 tuple
->t_len
= ItemIdGetLength(lpp
);
414 * advance the scan until we find a qualifying tuple or run out of stuff
417 lpp
= PageGetItemId(dp
, lineoff
);
420 while (linesleft
> 0)
422 if (ItemIdIsNormal(lpp
))
426 tuple
->t_data
= (HeapTupleHeader
) PageGetItem((Page
) dp
, lpp
);
427 tuple
->t_len
= ItemIdGetLength(lpp
);
428 ItemPointerSet(&(tuple
->t_self
), page
, lineoff
);
431 * if current tuple qualifies, return it.
433 valid
= HeapTupleSatisfiesVisibility(tuple
,
437 if (valid
&& key
!= NULL
)
438 HeapKeyTest(tuple
, RelationGetDescr(scan
->rs_rd
),
443 LockBuffer(scan
->rs_cbuf
, BUFFER_LOCK_UNLOCK
);
449 * otherwise move to the next item on the page
454 --lpp
; /* move back in this page's ItemId array */
459 ++lpp
; /* move forward in this page's ItemId array */
465 * if we get here, it means we've exhausted the items on this page and
466 * it's time to move to the next.
468 LockBuffer(scan
->rs_cbuf
, BUFFER_LOCK_UNLOCK
);
471 * advance to next/prior page and detect end of scan
475 finished
= (page
== scan
->rs_startblock
);
477 page
= scan
->rs_nblocks
;
483 if (page
>= scan
->rs_nblocks
)
485 finished
= (page
== scan
->rs_startblock
);
488 * Report our new scan position for synchronization purposes. We
489 * don't do that when moving backwards, however. That would just
490 * mess up any other forward-moving scanners.
492 * Note: we do this before checking for end of scan so that the
493 * final state of the position hint is back at the start of the
494 * rel. That's not strictly necessary, but otherwise when you run
495 * the same query multiple times the starting position would shift
496 * a little bit backwards on every invocation, which is confusing.
497 * We don't guarantee any specific ordering in general, though.
499 if (scan
->rs_syncscan
)
500 ss_report_location(scan
->rs_rd
, page
);
504 * return NULL if we've exhausted all the pages
508 if (BufferIsValid(scan
->rs_cbuf
))
509 ReleaseBuffer(scan
->rs_cbuf
);
510 scan
->rs_cbuf
= InvalidBuffer
;
511 scan
->rs_cblock
= InvalidBlockNumber
;
512 tuple
->t_data
= NULL
;
513 scan
->rs_inited
= false;
517 heapgetpage(scan
, page
);
519 LockBuffer(scan
->rs_cbuf
, BUFFER_LOCK_SHARE
);
521 dp
= (Page
) BufferGetPage(scan
->rs_cbuf
);
522 lines
= PageGetMaxOffsetNumber((Page
) dp
);
527 lpp
= PageGetItemId(dp
, lines
);
531 lineoff
= FirstOffsetNumber
;
532 lpp
= PageGetItemId(dp
, FirstOffsetNumber
);
538 * heapgettup_pagemode - fetch next heap tuple in page-at-a-time mode
540 * Same API as heapgettup, but used in page-at-a-time mode
542 * The internal logic is much the same as heapgettup's too, but there are some
543 * differences: we do not take the buffer content lock (that only needs to
544 * happen inside heapgetpage), and we iterate through just the tuples listed
545 * in rs_vistuples[] rather than all tuples on the page. Notice that
546 * lineindex is 0-based, where the corresponding loop variable lineoff in
547 * heapgettup is 1-based.
551 heapgettup_pagemode(HeapScanDesc scan
,
556 HeapTuple tuple
= &(scan
->rs_ctup
);
557 bool backward
= ScanDirectionIsBackward(dir
);
563 OffsetNumber lineoff
;
568 * calculate next starting lineindex, given scan direction
570 if (ScanDirectionIsForward(dir
))
572 if (!scan
->rs_inited
)
575 * return null immediately if relation is empty
577 if (scan
->rs_nblocks
== 0)
579 Assert(!BufferIsValid(scan
->rs_cbuf
));
580 tuple
->t_data
= NULL
;
583 page
= scan
->rs_startblock
; /* first page */
584 heapgetpage(scan
, page
);
586 scan
->rs_inited
= true;
590 /* continue from previously returned page/tuple */
591 page
= scan
->rs_cblock
; /* current page */
592 lineindex
= scan
->rs_cindex
+ 1;
595 dp
= (Page
) BufferGetPage(scan
->rs_cbuf
);
596 lines
= scan
->rs_ntuples
;
597 /* page and lineindex now reference the next visible tid */
599 linesleft
= lines
- lineindex
;
603 if (!scan
->rs_inited
)
606 * return null immediately if relation is empty
608 if (scan
->rs_nblocks
== 0)
610 Assert(!BufferIsValid(scan
->rs_cbuf
));
611 tuple
->t_data
= NULL
;
616 * Disable reporting to syncscan logic in a backwards scan; it's
617 * not very likely anyone else is doing the same thing at the same
618 * time, and much more likely that we'll just bollix things for
621 scan
->rs_syncscan
= false;
622 /* start from last page of the scan */
623 if (scan
->rs_startblock
> 0)
624 page
= scan
->rs_startblock
- 1;
626 page
= scan
->rs_nblocks
- 1;
627 heapgetpage(scan
, page
);
631 /* continue from previously returned page/tuple */
632 page
= scan
->rs_cblock
; /* current page */
635 dp
= (Page
) BufferGetPage(scan
->rs_cbuf
);
636 lines
= scan
->rs_ntuples
;
638 if (!scan
->rs_inited
)
640 lineindex
= lines
- 1;
641 scan
->rs_inited
= true;
645 lineindex
= scan
->rs_cindex
- 1;
647 /* page and lineindex now reference the previous visible tid */
649 linesleft
= lineindex
+ 1;
654 * ``no movement'' scan direction: refetch prior tuple
656 if (!scan
->rs_inited
)
658 Assert(!BufferIsValid(scan
->rs_cbuf
));
659 tuple
->t_data
= NULL
;
663 page
= ItemPointerGetBlockNumber(&(tuple
->t_self
));
664 if (page
!= scan
->rs_cblock
)
665 heapgetpage(scan
, page
);
667 /* Since the tuple was previously fetched, needn't lock page here */
668 dp
= (Page
) BufferGetPage(scan
->rs_cbuf
);
669 lineoff
= ItemPointerGetOffsetNumber(&(tuple
->t_self
));
670 lpp
= PageGetItemId(dp
, lineoff
);
671 Assert(ItemIdIsNormal(lpp
));
673 tuple
->t_data
= (HeapTupleHeader
) PageGetItem((Page
) dp
, lpp
);
674 tuple
->t_len
= ItemIdGetLength(lpp
);
676 /* check that rs_cindex is in sync */
677 Assert(scan
->rs_cindex
< scan
->rs_ntuples
);
678 Assert(lineoff
== scan
->rs_vistuples
[scan
->rs_cindex
]);
684 * advance the scan until we find a qualifying tuple or run out of stuff
689 while (linesleft
> 0)
691 lineoff
= scan
->rs_vistuples
[lineindex
];
692 lpp
= PageGetItemId(dp
, lineoff
);
693 Assert(ItemIdIsNormal(lpp
));
695 tuple
->t_data
= (HeapTupleHeader
) PageGetItem((Page
) dp
, lpp
);
696 tuple
->t_len
= ItemIdGetLength(lpp
);
697 ItemPointerSet(&(tuple
->t_self
), page
, lineoff
);
700 * if current tuple qualifies, return it.
706 HeapKeyTest(tuple
, RelationGetDescr(scan
->rs_rd
),
710 scan
->rs_cindex
= lineindex
;
716 scan
->rs_cindex
= lineindex
;
721 * otherwise move to the next item on the page
731 * if we get here, it means we've exhausted the items on this page and
732 * it's time to move to the next.
736 finished
= (page
== scan
->rs_startblock
);
738 page
= scan
->rs_nblocks
;
744 if (page
>= scan
->rs_nblocks
)
746 finished
= (page
== scan
->rs_startblock
);
749 * Report our new scan position for synchronization purposes. We
750 * don't do that when moving backwards, however. That would just
751 * mess up any other forward-moving scanners.
753 * Note: we do this before checking for end of scan so that the
754 * final state of the position hint is back at the start of the
755 * rel. That's not strictly necessary, but otherwise when you run
756 * the same query multiple times the starting position would shift
757 * a little bit backwards on every invocation, which is confusing.
758 * We don't guarantee any specific ordering in general, though.
760 if (scan
->rs_syncscan
)
761 ss_report_location(scan
->rs_rd
, page
);
765 * return NULL if we've exhausted all the pages
769 if (BufferIsValid(scan
->rs_cbuf
))
770 ReleaseBuffer(scan
->rs_cbuf
);
771 scan
->rs_cbuf
= InvalidBuffer
;
772 scan
->rs_cblock
= InvalidBlockNumber
;
773 tuple
->t_data
= NULL
;
774 scan
->rs_inited
= false;
778 heapgetpage(scan
, page
);
780 dp
= (Page
) BufferGetPage(scan
->rs_cbuf
);
781 lines
= scan
->rs_ntuples
;
784 lineindex
= lines
- 1;
791 #if defined(DISABLE_COMPLEX_MACRO)
793 * This is formatted so oddly so that the correspondence to the macro
794 * definition in access/heapam.h is maintained.
797 fastgetattr(HeapTuple tup
, int attnum
, TupleDesc tupleDesc
,
803 ((isnull
) ? (*(isnull
) = false) : (dummyret
) NULL
),
804 HeapTupleNoNulls(tup
) ?
806 (tupleDesc
)->attrs
[(attnum
) - 1]->attcacheoff
>= 0 ?
808 fetchatt((tupleDesc
)->attrs
[(attnum
) - 1],
809 (char *) (tup
)->t_data
+ (tup
)->t_data
->t_hoff
+
810 (tupleDesc
)->attrs
[(attnum
) - 1]->attcacheoff
)
813 nocachegetattr((tup
), (attnum
), (tupleDesc
), (isnull
))
817 att_isnull((attnum
) - 1, (tup
)->t_data
->t_bits
) ?
819 ((isnull
) ? (*(isnull
) = true) : (dummyret
) NULL
),
824 nocachegetattr((tup
), (attnum
), (tupleDesc
), (isnull
))
834 #endif /* defined(DISABLE_COMPLEX_MACRO) */
837 /* ----------------------------------------------------------------
838 * heap access method interface
839 * ----------------------------------------------------------------
843 * relation_open - open any relation by relation OID
845 * If lockmode is not "NoLock", the specified kind of lock is
846 * obtained on the relation. (Generally, NoLock should only be
847 * used if the caller knows it has some appropriate lock on the
850 * An error is raised if the relation does not exist.
852 * NB: a "relation" is anything with a pg_class entry. The caller is
853 * expected to check whether the relkind is something it can handle.
857 relation_open(Oid relationId
, LOCKMODE lockmode
)
861 Assert(lockmode
>= NoLock
&& lockmode
< MAX_LOCKMODES
);
863 /* Get the lock before trying to open the relcache entry */
864 if (lockmode
!= NoLock
)
865 LockRelationOid(relationId
, lockmode
);
867 /* The relcache does all the real work... */
868 r
= RelationIdGetRelation(relationId
);
870 if (!RelationIsValid(r
))
871 elog(ERROR
, "could not open relation with OID %u", relationId
);
873 /* Make note that we've accessed a temporary relation */
875 MyXactAccessedTempRel
= true;
883 * try_relation_open - open any relation by relation OID
885 * Same as relation_open, except return NULL instead of failing
886 * if the relation does not exist.
890 try_relation_open(Oid relationId
, LOCKMODE lockmode
)
894 Assert(lockmode
>= NoLock
&& lockmode
< MAX_LOCKMODES
);
896 /* Get the lock first */
897 if (lockmode
!= NoLock
)
898 LockRelationOid(relationId
, lockmode
);
901 * Now that we have the lock, probe to see if the relation really exists
904 if (!SearchSysCacheExists(RELOID
,
905 ObjectIdGetDatum(relationId
),
908 /* Release useless lock */
909 if (lockmode
!= NoLock
)
910 UnlockRelationOid(relationId
, lockmode
);
915 /* Should be safe to do a relcache load */
916 r
= RelationIdGetRelation(relationId
);
918 if (!RelationIsValid(r
))
919 elog(ERROR
, "could not open relation with OID %u", relationId
);
921 /* Make note that we've accessed a temporary relation */
923 MyXactAccessedTempRel
= true;
931 * relation_open_nowait - open but don't wait for lock
933 * Same as relation_open, except throw an error instead of waiting
934 * when the requested lock is not immediately obtainable.
938 relation_open_nowait(Oid relationId
, LOCKMODE lockmode
)
942 Assert(lockmode
>= NoLock
&& lockmode
< MAX_LOCKMODES
);
944 /* Get the lock before trying to open the relcache entry */
945 if (lockmode
!= NoLock
)
947 if (!ConditionalLockRelationOid(relationId
, lockmode
))
949 /* try to throw error by name; relation could be deleted... */
950 char *relname
= get_rel_name(relationId
);
954 (errcode(ERRCODE_LOCK_NOT_AVAILABLE
),
955 errmsg("could not obtain lock on relation \"%s\"",
959 (errcode(ERRCODE_LOCK_NOT_AVAILABLE
),
960 errmsg("could not obtain lock on relation with OID %u",
965 /* The relcache does all the real work... */
966 r
= RelationIdGetRelation(relationId
);
968 if (!RelationIsValid(r
))
969 elog(ERROR
, "could not open relation with OID %u", relationId
);
971 /* Make note that we've accessed a temporary relation */
973 MyXactAccessedTempRel
= true;
981 * relation_openrv - open any relation specified by a RangeVar
983 * Same as relation_open, but the relation is specified by a RangeVar.
987 relation_openrv(const RangeVar
*relation
, LOCKMODE lockmode
)
992 * Check for shared-cache-inval messages before trying to open the
993 * relation. This is needed to cover the case where the name identifies a
994 * rel that has been dropped and recreated since the start of our
995 * transaction: if we don't flush the old syscache entry then we'll latch
996 * onto that entry and suffer an error when we do RelationIdGetRelation.
997 * Note that relation_open does not need to do this, since a relation's
1000 * We skip this if asked for NoLock, on the assumption that the caller has
1001 * already ensured some appropriate lock is held.
1003 if (lockmode
!= NoLock
)
1004 AcceptInvalidationMessages();
1006 /* Look up the appropriate relation using namespace search */
1007 relOid
= RangeVarGetRelid(relation
, false);
1009 /* Let relation_open do the rest */
1010 return relation_open(relOid
, lockmode
);
1014 * relation_close - close any relation
1016 * If lockmode is not "NoLock", we then release the specified lock.
1018 * Note that it is often sensible to hold a lock beyond relation_close;
1019 * in that case, the lock is released automatically at xact end.
1023 relation_close(Relation relation
, LOCKMODE lockmode
)
1025 LockRelId relid
= relation
->rd_lockInfo
.lockRelId
;
1027 Assert(lockmode
>= NoLock
&& lockmode
< MAX_LOCKMODES
);
1029 /* The relcache does the real work... */
1030 RelationClose(relation
);
1032 if (lockmode
!= NoLock
)
1033 UnlockRelationId(&relid
, lockmode
);
1038 * heap_open - open a heap relation by relation OID
1040 * This is essentially relation_open plus check that the relation
1041 * is not an index nor a composite type. (The caller should also
1042 * check that it's not a view before assuming it has storage.)
1046 heap_open(Oid relationId
, LOCKMODE lockmode
)
1050 r
= relation_open(relationId
, lockmode
);
1052 if (r
->rd_rel
->relkind
== RELKIND_INDEX
)
1054 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1055 errmsg("\"%s\" is an index",
1056 RelationGetRelationName(r
))));
1057 else if (r
->rd_rel
->relkind
== RELKIND_COMPOSITE_TYPE
)
1059 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1060 errmsg("\"%s\" is a composite type",
1061 RelationGetRelationName(r
))));
1067 * heap_openrv - open a heap relation specified
1068 * by a RangeVar node
1070 * As above, but relation is specified by a RangeVar.
1074 heap_openrv(const RangeVar
*relation
, LOCKMODE lockmode
)
1078 r
= relation_openrv(relation
, lockmode
);
1080 if (r
->rd_rel
->relkind
== RELKIND_INDEX
)
1082 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1083 errmsg("\"%s\" is an index",
1084 RelationGetRelationName(r
))));
1085 else if (r
->rd_rel
->relkind
== RELKIND_COMPOSITE_TYPE
)
1087 (errcode(ERRCODE_WRONG_OBJECT_TYPE
),
1088 errmsg("\"%s\" is a composite type",
1089 RelationGetRelationName(r
))));
1096 * heap_beginscan - begin relation scan
1098 * heap_beginscan_strat offers an extended API that lets the caller control
1099 * whether a nondefault buffer access strategy can be used, and whether
1100 * syncscan can be chosen (possibly resulting in the scan not starting from
1101 * block zero). Both of these default to TRUE with plain heap_beginscan.
1103 * heap_beginscan_bm is an alternative entry point for setting up a
1104 * HeapScanDesc for a bitmap heap scan. Although that scan technology is
1105 * really quite unlike a standard seqscan, there is just enough commonality
1106 * to make it worth using the same data structure.
1110 heap_beginscan(Relation relation
, Snapshot snapshot
,
1111 int nkeys
, ScanKey key
)
1113 return heap_beginscan_internal(relation
, snapshot
, nkeys
, key
,
1118 heap_beginscan_strat(Relation relation
, Snapshot snapshot
,
1119 int nkeys
, ScanKey key
,
1120 bool allow_strat
, bool allow_sync
)
1122 return heap_beginscan_internal(relation
, snapshot
, nkeys
, key
,
1123 allow_strat
, allow_sync
, false);
1127 heap_beginscan_bm(Relation relation
, Snapshot snapshot
,
1128 int nkeys
, ScanKey key
)
1130 return heap_beginscan_internal(relation
, snapshot
, nkeys
, key
,
1131 false, false, true);
1135 heap_beginscan_internal(Relation relation
, Snapshot snapshot
,
1136 int nkeys
, ScanKey key
,
1137 bool allow_strat
, bool allow_sync
,
1143 * increment relation ref count while scanning relation
1145 * This is just to make really sure the relcache entry won't go away while
1146 * the scan has a pointer to it. Caller should be holding the rel open
1147 * anyway, so this is redundant in all normal scenarios...
1149 RelationIncrementReferenceCount(relation
);
1152 * allocate and initialize scan descriptor
1154 scan
= (HeapScanDesc
) palloc(sizeof(HeapScanDescData
));
1156 scan
->rs_rd
= relation
;
1157 scan
->rs_snapshot
= snapshot
;
1158 scan
->rs_nkeys
= nkeys
;
1159 scan
->rs_bitmapscan
= is_bitmapscan
;
1160 scan
->rs_strategy
= NULL
; /* set in initscan */
1161 scan
->rs_allow_strat
= allow_strat
;
1162 scan
->rs_allow_sync
= allow_sync
;
1165 * we can use page-at-a-time mode if it's an MVCC-safe snapshot
1167 scan
->rs_pageatatime
= IsMVCCSnapshot(snapshot
);
1169 /* we only need to set this up once */
1170 scan
->rs_ctup
.t_tableOid
= RelationGetRelid(relation
);
1173 * we do this here instead of in initscan() because heap_rescan also calls
1174 * initscan() and we don't want to allocate memory again
1177 scan
->rs_key
= (ScanKey
) palloc(sizeof(ScanKeyData
) * nkeys
);
1179 scan
->rs_key
= NULL
;
1181 initscan(scan
, key
);
1187 * heap_rescan - restart a relation scan
1191 heap_rescan(HeapScanDesc scan
,
1195 * unpin scan buffers
1197 if (BufferIsValid(scan
->rs_cbuf
))
1198 ReleaseBuffer(scan
->rs_cbuf
);
1201 * reinitialize scan descriptor
1203 initscan(scan
, key
);
1207 * heap_endscan - end relation scan
1209 * See how to integrate with index scans.
1210 * Check handling if reldesc caching.
1214 heap_endscan(HeapScanDesc scan
)
1216 /* Note: no locking manipulations needed */
1219 * unpin scan buffers
1221 if (BufferIsValid(scan
->rs_cbuf
))
1222 ReleaseBuffer(scan
->rs_cbuf
);
1225 * decrement relation reference count and free scan descriptor storage
1227 RelationDecrementReferenceCount(scan
->rs_rd
);
1230 pfree(scan
->rs_key
);
1232 if (scan
->rs_strategy
!= NULL
)
1233 FreeAccessStrategy(scan
->rs_strategy
);
1239 * heap_getnext - retrieve next tuple in scan
1241 * Fix to work with index relations.
1242 * We don't return the buffer anymore, but you can get it from the
1243 * returned HeapTuple.
1248 #define HEAPDEBUG_1 \
1249 elog(DEBUG2, "heap_getnext([%s,nkeys=%d],dir=%d) called", \
1250 RelationGetRelationName(scan->rs_rd), scan->rs_nkeys, (int) direction)
1251 #define HEAPDEBUG_2 \
1252 elog(DEBUG2, "heap_getnext returning EOS")
1253 #define HEAPDEBUG_3 \
1254 elog(DEBUG2, "heap_getnext returning tuple")
1259 #endif /* !defined(HEAPDEBUGALL) */
1263 heap_getnext(HeapScanDesc scan
, ScanDirection direction
)
1265 /* Note: no locking manipulations needed */
1267 HEAPDEBUG_1
; /* heap_getnext( info ) */
1269 if (scan
->rs_pageatatime
)
1270 heapgettup_pagemode(scan
, direction
,
1271 scan
->rs_nkeys
, scan
->rs_key
);
1273 heapgettup(scan
, direction
, scan
->rs_nkeys
, scan
->rs_key
);
1275 if (scan
->rs_ctup
.t_data
== NULL
)
1277 HEAPDEBUG_2
; /* heap_getnext returning EOS */
1282 * if we get here it means we have a new current scan tuple, so point to
1283 * the proper return buffer and return the tuple.
1285 HEAPDEBUG_3
; /* heap_getnext returning tuple */
1287 pgstat_count_heap_getnext(scan
->rs_rd
);
1289 return &(scan
->rs_ctup
);
1293 * heap_fetch - retrieve tuple with given tid
1295 * On entry, tuple->t_self is the TID to fetch. We pin the buffer holding
1296 * the tuple, fill in the remaining fields of *tuple, and check the tuple
1297 * against the specified snapshot.
1299 * If successful (tuple found and passes snapshot time qual), then *userbuf
1300 * is set to the buffer holding the tuple and TRUE is returned. The caller
1301 * must unpin the buffer when done with the tuple.
1303 * If the tuple is not found (ie, item number references a deleted slot),
1304 * then tuple->t_data is set to NULL and FALSE is returned.
1306 * If the tuple is found but fails the time qual check, then FALSE is returned
1307 * but tuple->t_data is left pointing to the tuple.
1309 * keep_buf determines what is done with the buffer in the FALSE-result cases.
1310 * When the caller specifies keep_buf = true, we retain the pin on the buffer
1311 * and return it in *userbuf (so the caller must eventually unpin it); when
1312 * keep_buf = false, the pin is released and *userbuf is set to InvalidBuffer.
1314 * stats_relation is the relation to charge the heap_fetch operation against
1315 * for statistical purposes. (This could be the heap rel itself, an
1316 * associated index, or NULL to not count the fetch at all.)
1318 * heap_fetch does not follow HOT chains: only the exact TID requested will
1321 * It is somewhat inconsistent that we ereport() on invalid block number but
1322 * return false on invalid item number. There are a couple of reasons though.
1323 * One is that the caller can relatively easily check the block number for
1324 * validity, but cannot check the item number without reading the page
1325 * himself. Another is that when we are following a t_ctid link, we can be
1326 * reasonably confident that the page number is valid (since VACUUM shouldn't
1327 * truncate off the destination page without having killed the referencing
1328 * tuple first), but the item number might well not be good.
1331 heap_fetch(Relation relation
,
1336 Relation stats_relation
)
1338 /* Assume *userbuf is undefined on entry */
1339 *userbuf
= InvalidBuffer
;
1340 return heap_release_fetch(relation
, snapshot
, tuple
,
1341 userbuf
, keep_buf
, stats_relation
);
1345 * heap_release_fetch - retrieve tuple with given tid
1347 * This has the same API as heap_fetch except that if *userbuf is not
1348 * InvalidBuffer on entry, that buffer will be released before reading
1349 * the new page. This saves a separate ReleaseBuffer step and hence
1350 * one entry into the bufmgr when looping through multiple fetches.
1351 * Also, if *userbuf is the same buffer that holds the target tuple,
1352 * we avoid bufmgr manipulation altogether.
1355 heap_release_fetch(Relation relation
,
1360 Relation stats_relation
)
1362 ItemPointer tid
= &(tuple
->t_self
);
1366 OffsetNumber offnum
;
1370 * get the buffer from the relation descriptor. Note that this does a
1371 * buffer pin, and releases the old *userbuf if not InvalidBuffer.
1373 buffer
= ReleaseAndReadBuffer(*userbuf
, relation
,
1374 ItemPointerGetBlockNumber(tid
));
1377 * Need share lock on buffer to examine tuple commit status.
1379 LockBuffer(buffer
, BUFFER_LOCK_SHARE
);
1380 dp
= (PageHeader
) BufferGetPage(buffer
);
1383 * We'd better check for out-of-range offnum in case of VACUUM since the
1386 offnum
= ItemPointerGetOffsetNumber(tid
);
1387 if (offnum
< FirstOffsetNumber
|| offnum
> PageGetMaxOffsetNumber(dp
))
1389 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
1394 ReleaseBuffer(buffer
);
1395 *userbuf
= InvalidBuffer
;
1397 tuple
->t_data
= NULL
;
1402 * get the item line pointer corresponding to the requested tid
1404 lp
= PageGetItemId(dp
, offnum
);
1407 * Must check for deleted tuple.
1409 if (!ItemIdIsNormal(lp
))
1411 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
1416 ReleaseBuffer(buffer
);
1417 *userbuf
= InvalidBuffer
;
1419 tuple
->t_data
= NULL
;
1424 * fill in *tuple fields
1426 tuple
->t_data
= (HeapTupleHeader
) PageGetItem((Page
) dp
, lp
);
1427 tuple
->t_len
= ItemIdGetLength(lp
);
1428 tuple
->t_tableOid
= RelationGetRelid(relation
);
1431 * check time qualification of tuple, then release lock
1433 valid
= HeapTupleSatisfiesVisibility(tuple
, snapshot
, buffer
);
1435 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
1440 * All checks passed, so return the tuple as valid. Caller is now
1441 * responsible for releasing the buffer.
1445 /* Count the successful fetch against appropriate rel, if any */
1446 if (stats_relation
!= NULL
)
1447 pgstat_count_heap_fetch(stats_relation
);
1452 /* Tuple failed time qual, but maybe caller wants to see it anyway. */
1457 ReleaseBuffer(buffer
);
1458 *userbuf
= InvalidBuffer
;
1465 * heap_hot_search_buffer - search HOT chain for tuple satisfying snapshot
1467 * On entry, *tid is the TID of a tuple (either a simple tuple, or the root
1468 * of a HOT chain), and buffer is the buffer holding this tuple. We search
1469 * for the first chain member satisfying the given snapshot. If one is
1470 * found, we update *tid to reference that tuple's offset number, and
1471 * return TRUE. If no match, return FALSE without modifying *tid.
1473 * If all_dead is not NULL, we check non-visible tuples to see if they are
1474 * globally dead; *all_dead is set TRUE if all members of the HOT chain
1475 * are vacuumable, FALSE if not.
1477 * Unlike heap_fetch, the caller must already have pin and (at least) share
1478 * lock on the buffer; it is still pinned/locked at exit. Also unlike
1479 * heap_fetch, we do not report any pgstats count; caller may do so if wanted.
1482 heap_hot_search_buffer(ItemPointer tid
, Buffer buffer
, Snapshot snapshot
,
1485 Page dp
= (Page
) BufferGetPage(buffer
);
1486 TransactionId prev_xmax
= InvalidTransactionId
;
1487 OffsetNumber offnum
;
1488 bool at_chain_start
;
1493 Assert(ItemPointerGetBlockNumber(tid
) == BufferGetBlockNumber(buffer
));
1494 offnum
= ItemPointerGetOffsetNumber(tid
);
1495 at_chain_start
= true;
1497 /* Scan through possible multiple members of HOT-chain */
1501 HeapTupleData heapTuple
;
1503 /* check for bogus TID */
1504 if (offnum
< FirstOffsetNumber
|| offnum
> PageGetMaxOffsetNumber(dp
))
1507 lp
= PageGetItemId(dp
, offnum
);
1509 /* check for unused, dead, or redirected items */
1510 if (!ItemIdIsNormal(lp
))
1512 /* We should only see a redirect at start of chain */
1513 if (ItemIdIsRedirected(lp
) && at_chain_start
)
1515 /* Follow the redirect */
1516 offnum
= ItemIdGetRedirect(lp
);
1517 at_chain_start
= false;
1520 /* else must be end of chain */
1524 heapTuple
.t_data
= (HeapTupleHeader
) PageGetItem(dp
, lp
);
1525 heapTuple
.t_len
= ItemIdGetLength(lp
);
1528 * Shouldn't see a HEAP_ONLY tuple at chain start.
1530 if (at_chain_start
&& HeapTupleIsHeapOnly(&heapTuple
))
1534 * The xmin should match the previous xmax value, else chain is
1537 if (TransactionIdIsValid(prev_xmax
) &&
1538 !TransactionIdEquals(prev_xmax
,
1539 HeapTupleHeaderGetXmin(heapTuple
.t_data
)))
1542 /* If it's visible per the snapshot, we must return it */
1543 if (HeapTupleSatisfiesVisibility(&heapTuple
, snapshot
, buffer
))
1545 ItemPointerSetOffsetNumber(tid
, offnum
);
1552 * If we can't see it, maybe no one else can either. At caller
1553 * request, check whether all chain members are dead to all
1556 if (all_dead
&& *all_dead
&&
1557 HeapTupleSatisfiesVacuum(heapTuple
.t_data
, RecentGlobalXmin
,
1558 buffer
) != HEAPTUPLE_DEAD
)
1562 * Check to see if HOT chain continues past this tuple; if so fetch
1563 * the next offnum and loop around.
1565 if (HeapTupleIsHotUpdated(&heapTuple
))
1567 Assert(ItemPointerGetBlockNumber(&heapTuple
.t_data
->t_ctid
) ==
1568 ItemPointerGetBlockNumber(tid
));
1569 offnum
= ItemPointerGetOffsetNumber(&heapTuple
.t_data
->t_ctid
);
1570 at_chain_start
= false;
1571 prev_xmax
= HeapTupleHeaderGetXmax(heapTuple
.t_data
);
1574 break; /* end of chain */
1581 * heap_hot_search - search HOT chain for tuple satisfying snapshot
1583 * This has the same API as heap_hot_search_buffer, except that the caller
1584 * does not provide the buffer containing the page, rather we access it
1588 heap_hot_search(ItemPointer tid
, Relation relation
, Snapshot snapshot
,
1594 buffer
= ReadBuffer(relation
, ItemPointerGetBlockNumber(tid
));
1595 LockBuffer(buffer
, BUFFER_LOCK_SHARE
);
1596 result
= heap_hot_search_buffer(tid
, buffer
, snapshot
, all_dead
);
1597 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
1598 ReleaseBuffer(buffer
);
1603 * heap_get_latest_tid - get the latest tid of a specified tuple
1605 * Actually, this gets the latest version that is visible according to
1606 * the passed snapshot. You can pass SnapshotDirty to get the very latest,
1607 * possibly uncommitted version.
1609 * *tid is both an input and an output parameter: it is updated to
1610 * show the latest version of the row. Note that it will not be changed
1611 * if no version of the row passes the snapshot test.
1614 heap_get_latest_tid(Relation relation
,
1619 ItemPointerData ctid
;
1620 TransactionId priorXmax
;
1622 /* this is to avoid Assert failures on bad input */
1623 if (!ItemPointerIsValid(tid
))
1627 * Since this can be called with user-supplied TID, don't trust the input
1628 * too much. (RelationGetNumberOfBlocks is an expensive check, so we
1629 * don't check t_ctid links again this way. Note that it would not do to
1630 * call it just once and save the result, either.)
1632 blk
= ItemPointerGetBlockNumber(tid
);
1633 if (blk
>= RelationGetNumberOfBlocks(relation
))
1634 elog(ERROR
, "block number %u is out of range for relation \"%s\"",
1635 blk
, RelationGetRelationName(relation
));
1638 * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
1639 * need to examine, and *tid is the TID we will return if ctid turns out
1642 * Note that we will loop until we reach the end of the t_ctid chain.
1643 * Depending on the snapshot passed, there might be at most one visible
1644 * version of the row, but we don't try to optimize for that.
1647 priorXmax
= InvalidTransactionId
; /* cannot check first XMIN */
1652 OffsetNumber offnum
;
1658 * Read, pin, and lock the page.
1660 buffer
= ReadBuffer(relation
, ItemPointerGetBlockNumber(&ctid
));
1661 LockBuffer(buffer
, BUFFER_LOCK_SHARE
);
1662 dp
= (PageHeader
) BufferGetPage(buffer
);
1665 * Check for bogus item number. This is not treated as an error
1666 * condition because it can happen while following a t_ctid link. We
1667 * just assume that the prior tid is OK and return it unchanged.
1669 offnum
= ItemPointerGetOffsetNumber(&ctid
);
1670 if (offnum
< FirstOffsetNumber
|| offnum
> PageGetMaxOffsetNumber(dp
))
1672 UnlockReleaseBuffer(buffer
);
1675 lp
= PageGetItemId(dp
, offnum
);
1676 if (!ItemIdIsNormal(lp
))
1678 UnlockReleaseBuffer(buffer
);
1682 /* OK to access the tuple */
1684 tp
.t_data
= (HeapTupleHeader
) PageGetItem(dp
, lp
);
1685 tp
.t_len
= ItemIdGetLength(lp
);
1688 * After following a t_ctid link, we might arrive at an unrelated
1689 * tuple. Check for XMIN match.
1691 if (TransactionIdIsValid(priorXmax
) &&
1692 !TransactionIdEquals(priorXmax
, HeapTupleHeaderGetXmin(tp
.t_data
)))
1694 UnlockReleaseBuffer(buffer
);
1699 * Check time qualification of tuple; if visible, set it as the new
1702 valid
= HeapTupleSatisfiesVisibility(&tp
, snapshot
, buffer
);
1707 * If there's a valid t_ctid link, follow it, else we're done.
1709 if ((tp
.t_data
->t_infomask
& (HEAP_XMAX_INVALID
| HEAP_IS_LOCKED
)) ||
1710 ItemPointerEquals(&tp
.t_self
, &tp
.t_data
->t_ctid
))
1712 UnlockReleaseBuffer(buffer
);
1716 ctid
= tp
.t_data
->t_ctid
;
1717 priorXmax
= HeapTupleHeaderGetXmax(tp
.t_data
);
1718 UnlockReleaseBuffer(buffer
);
1724 * UpdateXmaxHintBits - update tuple hint bits after xmax transaction ends
1726 * This is called after we have waited for the XMAX transaction to terminate.
1727 * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will
1728 * be set on exit. If the transaction committed, we set the XMAX_COMMITTED
1729 * hint bit if possible --- but beware that that may not yet be possible,
1730 * if the transaction committed asynchronously. Hence callers should look
1731 * only at XMAX_INVALID.
1734 UpdateXmaxHintBits(HeapTupleHeader tuple
, Buffer buffer
, TransactionId xid
)
1736 Assert(TransactionIdEquals(HeapTupleHeaderGetXmax(tuple
), xid
));
1738 if (!(tuple
->t_infomask
& (HEAP_XMAX_COMMITTED
| HEAP_XMAX_INVALID
)))
1740 if (TransactionIdDidCommit(xid
))
1741 HeapTupleSetHintBits(tuple
, buffer
, HEAP_XMAX_COMMITTED
,
1744 HeapTupleSetHintBits(tuple
, buffer
, HEAP_XMAX_INVALID
,
1745 InvalidTransactionId
);
1751 * heap_insert - insert tuple into a heap
1753 * The new tuple is stamped with current transaction ID and the specified
1756 * If use_wal is false, the new tuple is not logged in WAL, even for a
1757 * non-temp relation. Safe usage of this behavior requires that we arrange
1758 * that all new tuples go into new pages not containing any tuples from other
1759 * transactions, and that the relation gets fsync'd before commit.
1760 * (See also heap_sync() comments)
1762 * use_fsm is passed directly to RelationGetBufferForTuple, which see for
1765 * Note that use_wal and use_fsm will be applied when inserting into the
1766 * heap's TOAST table, too, if the tuple requires any out-of-line data.
1768 * The return value is the OID assigned to the tuple (either here or by the
1769 * caller), or InvalidOid if no OID. The header fields of *tup are updated
1770 * to match the stored tuple; in particular tup->t_self receives the actual
1771 * TID where the tuple was stored. But note that any toasting of fields
1772 * within the tuple data is NOT reflected into *tup.
1775 heap_insert(Relation relation
, HeapTuple tup
, CommandId cid
,
1776 bool use_wal
, bool use_fsm
)
1778 TransactionId xid
= GetCurrentTransactionId();
1782 if (relation
->rd_rel
->relhasoids
)
1785 /* this is redundant with an Assert in HeapTupleSetOid */
1786 Assert(tup
->t_data
->t_infomask
& HEAP_HASOID
);
1790 * If the object id of this tuple has already been assigned, trust the
1791 * caller. There are a couple of ways this can happen. At initial db
1792 * creation, the backend program sets oids for tuples. When we define
1793 * an index, we set the oid. Finally, in the future, we may allow
1794 * users to set their own object ids in order to support a persistent
1795 * object store (objects need to contain pointers to one another).
1797 if (!OidIsValid(HeapTupleGetOid(tup
)))
1798 HeapTupleSetOid(tup
, GetNewOid(relation
));
1802 /* check there is not space for an OID */
1803 Assert(!(tup
->t_data
->t_infomask
& HEAP_HASOID
));
1806 tup
->t_data
->t_infomask
&= ~(HEAP_XACT_MASK
);
1807 tup
->t_data
->t_infomask2
&= ~(HEAP2_XACT_MASK
);
1808 tup
->t_data
->t_infomask
|= HEAP_XMAX_INVALID
;
1809 HeapTupleHeaderSetXmin(tup
->t_data
, xid
);
1810 HeapTupleHeaderSetCmin(tup
->t_data
, cid
);
1811 HeapTupleHeaderSetXmax(tup
->t_data
, 0); /* for cleanliness */
1812 tup
->t_tableOid
= RelationGetRelid(relation
);
1815 * If the new tuple is too big for storage or contains already toasted
1816 * out-of-line attributes from some other relation, invoke the toaster.
1818 * Note: below this point, heaptup is the data we actually intend to store
1819 * into the relation; tup is the caller's original untoasted data.
1821 if (relation
->rd_rel
->relkind
!= RELKIND_RELATION
)
1823 /* toast table entries should never be recursively toasted */
1824 Assert(!HeapTupleHasExternal(tup
));
1827 else if (HeapTupleHasExternal(tup
) || tup
->t_len
> TOAST_TUPLE_THRESHOLD
)
1828 heaptup
= toast_insert_or_update(relation
, tup
, NULL
,
1833 /* Find buffer to insert this tuple into */
1834 buffer
= RelationGetBufferForTuple(relation
, heaptup
->t_len
,
1835 InvalidBuffer
, use_fsm
);
1837 /* NO EREPORT(ERROR) from here till changes are logged */
1838 START_CRIT_SECTION();
1840 RelationPutHeapTuple(relation
, buffer
, heaptup
);
1843 * XXX Should we set PageSetPrunable on this page ?
1845 * The inserting transaction may eventually abort thus making this tuple
1846 * DEAD and hence available for pruning. Though we don't want to optimize
1847 * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
1848 * aborted tuple will never be pruned until next vacuum is triggered.
1850 * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
1853 MarkBufferDirty(buffer
);
1856 if (use_wal
&& !relation
->rd_istemp
)
1858 xl_heap_insert xlrec
;
1859 xl_heap_header xlhdr
;
1861 XLogRecData rdata
[3];
1862 Page page
= BufferGetPage(buffer
);
1863 uint8 info
= XLOG_HEAP_INSERT
;
1865 xlrec
.target
.node
= relation
->rd_node
;
1866 xlrec
.target
.tid
= heaptup
->t_self
;
1867 rdata
[0].data
= (char *) &xlrec
;
1868 rdata
[0].len
= SizeOfHeapInsert
;
1869 rdata
[0].buffer
= InvalidBuffer
;
1870 rdata
[0].next
= &(rdata
[1]);
1872 xlhdr
.t_infomask2
= heaptup
->t_data
->t_infomask2
;
1873 xlhdr
.t_infomask
= heaptup
->t_data
->t_infomask
;
1874 xlhdr
.t_hoff
= heaptup
->t_data
->t_hoff
;
1877 * note we mark rdata[1] as belonging to buffer; if XLogInsert decides
1878 * to write the whole page to the xlog, we don't need to store
1879 * xl_heap_header in the xlog.
1881 rdata
[1].data
= (char *) &xlhdr
;
1882 rdata
[1].len
= SizeOfHeapHeader
;
1883 rdata
[1].buffer
= buffer
;
1884 rdata
[1].buffer_std
= true;
1885 rdata
[1].next
= &(rdata
[2]);
1887 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
1888 rdata
[2].data
= (char *) heaptup
->t_data
+ offsetof(HeapTupleHeaderData
, t_bits
);
1889 rdata
[2].len
= heaptup
->t_len
- offsetof(HeapTupleHeaderData
, t_bits
);
1890 rdata
[2].buffer
= buffer
;
1891 rdata
[2].buffer_std
= true;
1892 rdata
[2].next
= NULL
;
1895 * If this is the single and first tuple on page, we can reinit the
1896 * page instead of restoring the whole thing. Set flag, and hide
1897 * buffer references from XLogInsert.
1899 if (ItemPointerGetOffsetNumber(&(heaptup
->t_self
)) == FirstOffsetNumber
&&
1900 PageGetMaxOffsetNumber(page
) == FirstOffsetNumber
)
1902 info
|= XLOG_HEAP_INIT_PAGE
;
1903 rdata
[1].buffer
= rdata
[2].buffer
= InvalidBuffer
;
1906 recptr
= XLogInsert(RM_HEAP_ID
, info
, rdata
);
1908 PageSetLSN(page
, recptr
);
1909 PageSetTLI(page
, ThisTimeLineID
);
1914 UnlockReleaseBuffer(buffer
);
1917 * If tuple is cachable, mark it for invalidation from the caches in case
1918 * we abort. Note it is OK to do this after releasing the buffer, because
1919 * the heaptup data structure is all in local memory, not in the shared
1922 CacheInvalidateHeapTuple(relation
, heaptup
);
1924 pgstat_count_heap_insert(relation
);
1927 * If heaptup is a private copy, release it. Don't forget to copy t_self
1928 * back to the caller's image, too.
1932 tup
->t_self
= heaptup
->t_self
;
1933 heap_freetuple(heaptup
);
1936 return HeapTupleGetOid(tup
);
1940 * simple_heap_insert - insert a tuple
1942 * Currently, this routine differs from heap_insert only in supplying
1943 * a default command ID and not allowing access to the speedup options.
1945 * This should be used rather than using heap_insert directly in most places
1946 * where we are modifying system catalogs.
1949 simple_heap_insert(Relation relation
, HeapTuple tup
)
1951 return heap_insert(relation
, tup
, GetCurrentCommandId(true), true, true);
1955 * heap_delete - delete a tuple
1957 * NB: do not call this directly unless you are prepared to deal with
1958 * concurrent-update conditions. Use simple_heap_delete instead.
1960 * relation - table to be modified (caller must hold suitable lock)
1961 * tid - TID of tuple to be deleted
1962 * ctid - output parameter, used only for failure case (see below)
1963 * update_xmax - output parameter, used only for failure case (see below)
1964 * cid - delete command ID (used for visibility test, and stored into
1965 * cmax if successful)
1966 * crosscheck - if not InvalidSnapshot, also check tuple against this
1967 * wait - true if should wait for any conflicting update to commit/abort
1969 * Normal, successful return value is HeapTupleMayBeUpdated, which
1970 * actually means we did delete it. Failure return codes are
1971 * HeapTupleSelfUpdated, HeapTupleUpdated, or HeapTupleBeingUpdated
1972 * (the last only possible if wait == false).
1974 * In the failure cases, the routine returns the tuple's t_ctid and t_xmax.
1975 * If t_ctid is the same as tid, the tuple was deleted; if different, the
1976 * tuple was updated, and t_ctid is the location of the replacement tuple.
1977 * (t_xmax is needed to verify that the replacement tuple matches.)
1980 heap_delete(Relation relation
, ItemPointer tid
,
1981 ItemPointer ctid
, TransactionId
*update_xmax
,
1982 CommandId cid
, Snapshot crosscheck
, bool wait
)
1985 TransactionId xid
= GetCurrentTransactionId();
1990 bool have_tuple_lock
= false;
1993 Assert(ItemPointerIsValid(tid
));
1995 buffer
= ReadBuffer(relation
, ItemPointerGetBlockNumber(tid
));
1996 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
1998 dp
= (PageHeader
) BufferGetPage(buffer
);
1999 lp
= PageGetItemId(dp
, ItemPointerGetOffsetNumber(tid
));
2000 Assert(ItemIdIsNormal(lp
));
2002 tp
.t_data
= (HeapTupleHeader
) PageGetItem(dp
, lp
);
2003 tp
.t_len
= ItemIdGetLength(lp
);
2007 result
= HeapTupleSatisfiesUpdate(tp
.t_data
, cid
, buffer
);
2009 if (result
== HeapTupleInvisible
)
2011 UnlockReleaseBuffer(buffer
);
2012 elog(ERROR
, "attempted to delete invisible tuple");
2014 else if (result
== HeapTupleBeingUpdated
&& wait
)
2016 TransactionId xwait
;
2019 /* must copy state data before unlocking buffer */
2020 xwait
= HeapTupleHeaderGetXmax(tp
.t_data
);
2021 infomask
= tp
.t_data
->t_infomask
;
2023 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
2026 * Acquire tuple lock to establish our priority for the tuple (see
2027 * heap_lock_tuple). LockTuple will release us when we are
2028 * next-in-line for the tuple.
2030 * If we are forced to "start over" below, we keep the tuple lock;
2031 * this arranges that we stay at the head of the line while rechecking
2034 if (!have_tuple_lock
)
2036 LockTuple(relation
, &(tp
.t_self
), ExclusiveLock
);
2037 have_tuple_lock
= true;
2041 * Sleep until concurrent transaction ends. Note that we don't care
2042 * if the locker has an exclusive or shared lock, because we need
2046 if (infomask
& HEAP_XMAX_IS_MULTI
)
2048 /* wait for multixact */
2049 MultiXactIdWait((MultiXactId
) xwait
);
2050 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
2053 * If xwait had just locked the tuple then some other xact could
2054 * update this tuple before we get to this point. Check for xmax
2055 * change, and start over if so.
2057 if (!(tp
.t_data
->t_infomask
& HEAP_XMAX_IS_MULTI
) ||
2058 !TransactionIdEquals(HeapTupleHeaderGetXmax(tp
.t_data
),
2063 * You might think the multixact is necessarily done here, but not
2064 * so: it could have surviving members, namely our own xact or
2065 * other subxacts of this backend. It is legal for us to delete
2066 * the tuple in either case, however (the latter case is
2067 * essentially a situation of upgrading our former shared lock to
2068 * exclusive). We don't bother changing the on-disk hint bits
2069 * since we are about to overwrite the xmax altogether.
2074 /* wait for regular transaction to end */
2075 XactLockTableWait(xwait
);
2076 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
2079 * xwait is done, but if xwait had just locked the tuple then some
2080 * other xact could update this tuple before we get to this point.
2081 * Check for xmax change, and start over if so.
2083 if ((tp
.t_data
->t_infomask
& HEAP_XMAX_IS_MULTI
) ||
2084 !TransactionIdEquals(HeapTupleHeaderGetXmax(tp
.t_data
),
2088 /* Otherwise check if it committed or aborted */
2089 UpdateXmaxHintBits(tp
.t_data
, buffer
, xwait
);
2093 * We may overwrite if previous xmax aborted, or if it committed but
2094 * only locked the tuple without updating it.
2096 if (tp
.t_data
->t_infomask
& (HEAP_XMAX_INVALID
|
2098 result
= HeapTupleMayBeUpdated
;
2100 result
= HeapTupleUpdated
;
2103 if (crosscheck
!= InvalidSnapshot
&& result
== HeapTupleMayBeUpdated
)
2105 /* Perform additional check for serializable RI updates */
2106 if (!HeapTupleSatisfiesVisibility(&tp
, crosscheck
, buffer
))
2107 result
= HeapTupleUpdated
;
2110 if (result
!= HeapTupleMayBeUpdated
)
2112 Assert(result
== HeapTupleSelfUpdated
||
2113 result
== HeapTupleUpdated
||
2114 result
== HeapTupleBeingUpdated
);
2115 Assert(!(tp
.t_data
->t_infomask
& HEAP_XMAX_INVALID
));
2116 *ctid
= tp
.t_data
->t_ctid
;
2117 *update_xmax
= HeapTupleHeaderGetXmax(tp
.t_data
);
2118 UnlockReleaseBuffer(buffer
);
2119 if (have_tuple_lock
)
2120 UnlockTuple(relation
, &(tp
.t_self
), ExclusiveLock
);
2124 /* replace cid with a combo cid if necessary */
2125 HeapTupleHeaderAdjustCmax(tp
.t_data
, &cid
, &iscombo
);
2127 START_CRIT_SECTION();
2130 * If this transaction commits, the tuple will become DEAD sooner or
2131 * later. Set flag that this page is a candidate for pruning once our xid
2132 * falls below the OldestXmin horizon. If the transaction finally aborts,
2133 * the subsequent page pruning will be a no-op and the hint will be
2136 PageSetPrunable(dp
, xid
);
2138 /* store transaction information of xact deleting the tuple */
2139 tp
.t_data
->t_infomask
&= ~(HEAP_XMAX_COMMITTED
|
2141 HEAP_XMAX_IS_MULTI
|
2144 HeapTupleHeaderClearHotUpdated(tp
.t_data
);
2145 HeapTupleHeaderSetXmax(tp
.t_data
, xid
);
2146 HeapTupleHeaderSetCmax(tp
.t_data
, cid
, iscombo
);
2147 /* Make sure there is no forward chain link in t_ctid */
2148 tp
.t_data
->t_ctid
= tp
.t_self
;
2150 MarkBufferDirty(buffer
);
2153 if (!relation
->rd_istemp
)
2155 xl_heap_delete xlrec
;
2157 XLogRecData rdata
[2];
2159 xlrec
.target
.node
= relation
->rd_node
;
2160 xlrec
.target
.tid
= tp
.t_self
;
2161 rdata
[0].data
= (char *) &xlrec
;
2162 rdata
[0].len
= SizeOfHeapDelete
;
2163 rdata
[0].buffer
= InvalidBuffer
;
2164 rdata
[0].next
= &(rdata
[1]);
2166 rdata
[1].data
= NULL
;
2168 rdata
[1].buffer
= buffer
;
2169 rdata
[1].buffer_std
= true;
2170 rdata
[1].next
= NULL
;
2172 recptr
= XLogInsert(RM_HEAP_ID
, XLOG_HEAP_DELETE
, rdata
);
2174 PageSetLSN(dp
, recptr
);
2175 PageSetTLI(dp
, ThisTimeLineID
);
2180 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
2183 * If the tuple has toasted out-of-line attributes, we need to delete
2184 * those items too. We have to do this before releasing the buffer
2185 * because we need to look at the contents of the tuple, but it's OK to
2186 * release the content lock on the buffer first.
2188 if (relation
->rd_rel
->relkind
!= RELKIND_RELATION
)
2190 /* toast table entries should never be recursively toasted */
2191 Assert(!HeapTupleHasExternal(&tp
));
2193 else if (HeapTupleHasExternal(&tp
))
2194 toast_delete(relation
, &tp
);
2197 * Mark tuple for invalidation from system caches at next command
2198 * boundary. We have to do this before releasing the buffer because we
2199 * need to look at the contents of the tuple.
2201 CacheInvalidateHeapTuple(relation
, &tp
);
2203 /* Now we can release the buffer */
2204 ReleaseBuffer(buffer
);
2207 * Release the lmgr tuple lock, if we had it.
2209 if (have_tuple_lock
)
2210 UnlockTuple(relation
, &(tp
.t_self
), ExclusiveLock
);
2212 pgstat_count_heap_delete(relation
);
2214 return HeapTupleMayBeUpdated
;
2218 * simple_heap_delete - delete a tuple
2220 * This routine may be used to delete a tuple when concurrent updates of
2221 * the target tuple are not expected (for example, because we have a lock
2222 * on the relation associated with the tuple). Any failure is reported
2226 simple_heap_delete(Relation relation
, ItemPointer tid
)
2229 ItemPointerData update_ctid
;
2230 TransactionId update_xmax
;
2232 result
= heap_delete(relation
, tid
,
2233 &update_ctid
, &update_xmax
,
2234 GetCurrentCommandId(true), InvalidSnapshot
,
2235 true /* wait for commit */ );
2238 case HeapTupleSelfUpdated
:
2239 /* Tuple was already updated in current command? */
2240 elog(ERROR
, "tuple already updated by self");
2243 case HeapTupleMayBeUpdated
:
2244 /* done successfully */
2247 case HeapTupleUpdated
:
2248 elog(ERROR
, "tuple concurrently updated");
2252 elog(ERROR
, "unrecognized heap_delete status: %u", result
);
2258 * heap_update - replace a tuple
2260 * NB: do not call this directly unless you are prepared to deal with
2261 * concurrent-update conditions. Use simple_heap_update instead.
2263 * relation - table to be modified (caller must hold suitable lock)
2264 * otid - TID of old tuple to be replaced
2265 * newtup - newly constructed tuple data to store
2266 * ctid - output parameter, used only for failure case (see below)
2267 * update_xmax - output parameter, used only for failure case (see below)
2268 * cid - update command ID (used for visibility test, and stored into
2269 * cmax/cmin if successful)
2270 * crosscheck - if not InvalidSnapshot, also check old tuple against this
2271 * wait - true if should wait for any conflicting update to commit/abort
2273 * Normal, successful return value is HeapTupleMayBeUpdated, which
2274 * actually means we *did* update it. Failure return codes are
2275 * HeapTupleSelfUpdated, HeapTupleUpdated, or HeapTupleBeingUpdated
2276 * (the last only possible if wait == false).
2278 * On success, the header fields of *newtup are updated to match the new
2279 * stored tuple; in particular, newtup->t_self is set to the TID where the
2280 * new tuple was inserted, and its HEAP_ONLY_TUPLE flag is set iff a HOT
2281 * update was done. However, any TOAST changes in the new tuple's
2282 * data are not reflected into *newtup.
2284 * In the failure cases, the routine returns the tuple's t_ctid and t_xmax.
2285 * If t_ctid is the same as otid, the tuple was deleted; if different, the
2286 * tuple was updated, and t_ctid is the location of the replacement tuple.
2287 * (t_xmax is needed to verify that the replacement tuple matches.)
2290 heap_update(Relation relation
, ItemPointer otid
, HeapTuple newtup
,
2291 ItemPointer ctid
, TransactionId
*update_xmax
,
2292 CommandId cid
, Snapshot crosscheck
, bool wait
)
2295 TransactionId xid
= GetCurrentTransactionId();
2296 Bitmapset
*hot_attrs
;
2298 HeapTupleData oldtup
;
2307 bool have_tuple_lock
= false;
2309 bool use_hot_update
= false;
2311 Assert(ItemPointerIsValid(otid
));
2314 * Fetch the list of attributes to be checked for HOT update. This is
2315 * wasted effort if we fail to update or have to put the new tuple on a
2316 * different page. But we must compute the list before obtaining buffer
2317 * lock --- in the worst case, if we are doing an update on one of the
2318 * relevant system catalogs, we could deadlock if we try to fetch the list
2319 * later. In any case, the relcache caches the data so this is usually
2322 * Note that we get a copy here, so we need not worry about relcache flush
2323 * happening midway through.
2325 hot_attrs
= RelationGetIndexAttrBitmap(relation
);
2327 buffer
= ReadBuffer(relation
, ItemPointerGetBlockNumber(otid
));
2328 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
2330 dp
= (PageHeader
) BufferGetPage(buffer
);
2331 lp
= PageGetItemId(dp
, ItemPointerGetOffsetNumber(otid
));
2332 Assert(ItemIdIsNormal(lp
));
2334 oldtup
.t_data
= (HeapTupleHeader
) PageGetItem(dp
, lp
);
2335 oldtup
.t_len
= ItemIdGetLength(lp
);
2336 oldtup
.t_self
= *otid
;
2339 * Note: beyond this point, use oldtup not otid to refer to old tuple.
2340 * otid may very well point at newtup->t_self, which we will overwrite
2341 * with the new tuple's location, so there's great risk of confusion if we
2346 result
= HeapTupleSatisfiesUpdate(oldtup
.t_data
, cid
, buffer
);
2348 if (result
== HeapTupleInvisible
)
2350 UnlockReleaseBuffer(buffer
);
2351 elog(ERROR
, "attempted to update invisible tuple");
2353 else if (result
== HeapTupleBeingUpdated
&& wait
)
2355 TransactionId xwait
;
2358 /* must copy state data before unlocking buffer */
2359 xwait
= HeapTupleHeaderGetXmax(oldtup
.t_data
);
2360 infomask
= oldtup
.t_data
->t_infomask
;
2362 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
2365 * Acquire tuple lock to establish our priority for the tuple (see
2366 * heap_lock_tuple). LockTuple will release us when we are
2367 * next-in-line for the tuple.
2369 * If we are forced to "start over" below, we keep the tuple lock;
2370 * this arranges that we stay at the head of the line while rechecking
2373 if (!have_tuple_lock
)
2375 LockTuple(relation
, &(oldtup
.t_self
), ExclusiveLock
);
2376 have_tuple_lock
= true;
2380 * Sleep until concurrent transaction ends. Note that we don't care
2381 * if the locker has an exclusive or shared lock, because we need
2385 if (infomask
& HEAP_XMAX_IS_MULTI
)
2387 /* wait for multixact */
2388 MultiXactIdWait((MultiXactId
) xwait
);
2389 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
2392 * If xwait had just locked the tuple then some other xact could
2393 * update this tuple before we get to this point. Check for xmax
2394 * change, and start over if so.
2396 if (!(oldtup
.t_data
->t_infomask
& HEAP_XMAX_IS_MULTI
) ||
2397 !TransactionIdEquals(HeapTupleHeaderGetXmax(oldtup
.t_data
),
2402 * You might think the multixact is necessarily done here, but not
2403 * so: it could have surviving members, namely our own xact or
2404 * other subxacts of this backend. It is legal for us to update
2405 * the tuple in either case, however (the latter case is
2406 * essentially a situation of upgrading our former shared lock to
2407 * exclusive). We don't bother changing the on-disk hint bits
2408 * since we are about to overwrite the xmax altogether.
2413 /* wait for regular transaction to end */
2414 XactLockTableWait(xwait
);
2415 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
2418 * xwait is done, but if xwait had just locked the tuple then some
2419 * other xact could update this tuple before we get to this point.
2420 * Check for xmax change, and start over if so.
2422 if ((oldtup
.t_data
->t_infomask
& HEAP_XMAX_IS_MULTI
) ||
2423 !TransactionIdEquals(HeapTupleHeaderGetXmax(oldtup
.t_data
),
2427 /* Otherwise check if it committed or aborted */
2428 UpdateXmaxHintBits(oldtup
.t_data
, buffer
, xwait
);
2432 * We may overwrite if previous xmax aborted, or if it committed but
2433 * only locked the tuple without updating it.
2435 if (oldtup
.t_data
->t_infomask
& (HEAP_XMAX_INVALID
|
2437 result
= HeapTupleMayBeUpdated
;
2439 result
= HeapTupleUpdated
;
2442 if (crosscheck
!= InvalidSnapshot
&& result
== HeapTupleMayBeUpdated
)
2444 /* Perform additional check for serializable RI updates */
2445 if (!HeapTupleSatisfiesVisibility(&oldtup
, crosscheck
, buffer
))
2446 result
= HeapTupleUpdated
;
2449 if (result
!= HeapTupleMayBeUpdated
)
2451 Assert(result
== HeapTupleSelfUpdated
||
2452 result
== HeapTupleUpdated
||
2453 result
== HeapTupleBeingUpdated
);
2454 Assert(!(oldtup
.t_data
->t_infomask
& HEAP_XMAX_INVALID
));
2455 *ctid
= oldtup
.t_data
->t_ctid
;
2456 *update_xmax
= HeapTupleHeaderGetXmax(oldtup
.t_data
);
2457 UnlockReleaseBuffer(buffer
);
2458 if (have_tuple_lock
)
2459 UnlockTuple(relation
, &(oldtup
.t_self
), ExclusiveLock
);
2460 bms_free(hot_attrs
);
2464 /* Fill in OID and transaction status data for newtup */
2465 if (relation
->rd_rel
->relhasoids
)
2468 /* this is redundant with an Assert in HeapTupleSetOid */
2469 Assert(newtup
->t_data
->t_infomask
& HEAP_HASOID
);
2471 HeapTupleSetOid(newtup
, HeapTupleGetOid(&oldtup
));
2475 /* check there is not space for an OID */
2476 Assert(!(newtup
->t_data
->t_infomask
& HEAP_HASOID
));
2479 newtup
->t_data
->t_infomask
&= ~(HEAP_XACT_MASK
);
2480 newtup
->t_data
->t_infomask2
&= ~(HEAP2_XACT_MASK
);
2481 newtup
->t_data
->t_infomask
|= (HEAP_XMAX_INVALID
| HEAP_UPDATED
);
2482 HeapTupleHeaderSetXmin(newtup
->t_data
, xid
);
2483 HeapTupleHeaderSetCmin(newtup
->t_data
, cid
);
2484 HeapTupleHeaderSetXmax(newtup
->t_data
, 0); /* for cleanliness */
2487 * Replace cid with a combo cid if necessary. Note that we already put
2488 * the plain cid into the new tuple.
2490 HeapTupleHeaderAdjustCmax(oldtup
.t_data
, &cid
, &iscombo
);
2493 * If the toaster needs to be activated, OR if the new tuple will not fit
2494 * on the same page as the old, then we need to release the content lock
2495 * (but not the pin!) on the old tuple's buffer while we are off doing
2496 * TOAST and/or table-file-extension work. We must mark the old tuple to
2497 * show that it's already being updated, else other processes may try to
2498 * update it themselves.
2500 * We need to invoke the toaster if there are already any out-of-line
2501 * toasted values present, or if the new tuple is over-threshold.
2503 if (relation
->rd_rel
->relkind
!= RELKIND_RELATION
)
2505 /* toast table entries should never be recursively toasted */
2506 Assert(!HeapTupleHasExternal(&oldtup
));
2507 Assert(!HeapTupleHasExternal(newtup
));
2511 need_toast
= (HeapTupleHasExternal(&oldtup
) ||
2512 HeapTupleHasExternal(newtup
) ||
2513 newtup
->t_len
> TOAST_TUPLE_THRESHOLD
);
2515 pagefree
= PageGetHeapFreeSpace((Page
) dp
);
2517 newtupsize
= MAXALIGN(newtup
->t_len
);
2519 if (need_toast
|| newtupsize
> pagefree
)
2521 /* Clear obsolete visibility flags ... */
2522 oldtup
.t_data
->t_infomask
&= ~(HEAP_XMAX_COMMITTED
|
2524 HEAP_XMAX_IS_MULTI
|
2527 HeapTupleClearHotUpdated(&oldtup
);
2528 /* ... and store info about transaction updating this tuple */
2529 HeapTupleHeaderSetXmax(oldtup
.t_data
, xid
);
2530 HeapTupleHeaderSetCmax(oldtup
.t_data
, cid
, iscombo
);
2531 /* temporarily make it look not-updated */
2532 oldtup
.t_data
->t_ctid
= oldtup
.t_self
;
2533 already_marked
= true;
2534 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
2537 * Let the toaster do its thing, if needed.
2539 * Note: below this point, heaptup is the data we actually intend to
2540 * store into the relation; newtup is the caller's original untoasted
2545 /* Note we always use WAL and FSM during updates */
2546 heaptup
= toast_insert_or_update(relation
, newtup
, &oldtup
,
2548 newtupsize
= MAXALIGN(heaptup
->t_len
);
2554 * Now, do we need a new page for the tuple, or not? This is a bit
2555 * tricky since someone else could have added tuples to the page while
2556 * we weren't looking. We have to recheck the available space after
2557 * reacquiring the buffer lock. But don't bother to do that if the
2558 * former amount of free space is still not enough; it's unlikely
2559 * there's more free now than before.
2561 * What's more, if we need to get a new page, we will need to acquire
2562 * buffer locks on both old and new pages. To avoid deadlock against
2563 * some other backend trying to get the same two locks in the other
2564 * order, we must be consistent about the order we get the locks in.
2565 * We use the rule "lock the lower-numbered page of the relation
2566 * first". To implement this, we must do RelationGetBufferForTuple
2567 * while not holding the lock on the old page, and we must rely on it
2568 * to get the locks on both pages in the correct order.
2570 if (newtupsize
> pagefree
)
2572 /* Assume there's no chance to put heaptup on same page. */
2573 newbuf
= RelationGetBufferForTuple(relation
, heaptup
->t_len
,
2578 /* Re-acquire the lock on the old tuple's page. */
2579 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
2580 /* Re-check using the up-to-date free space */
2581 pagefree
= PageGetHeapFreeSpace((Page
) dp
);
2582 if (newtupsize
> pagefree
)
2585 * Rats, it doesn't fit anymore. We must now unlock and
2586 * relock to avoid deadlock. Fortunately, this path should
2589 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
2590 newbuf
= RelationGetBufferForTuple(relation
, heaptup
->t_len
,
2595 /* OK, it fits here, so we're done. */
2602 /* No TOAST work needed, and it'll fit on same page */
2603 already_marked
= false;
2609 * At this point newbuf and buffer are both pinned and locked, and newbuf
2610 * has enough space for the new tuple. If they are the same buffer, only
2614 if (newbuf
== buffer
)
2617 * Since the new tuple is going into the same page, we might be able
2618 * to do a HOT update. Check if any of the index columns have been
2619 * changed. If not, then HOT update is possible.
2621 if (HeapSatisfiesHOTUpdate(relation
, hot_attrs
, &oldtup
, heaptup
))
2622 use_hot_update
= true;
2626 /* Set a hint that the old page could use prune/defrag */
2630 /* NO EREPORT(ERROR) from here till changes are logged */
2631 START_CRIT_SECTION();
2634 * If this transaction commits, the old tuple will become DEAD sooner or
2635 * later. Set flag that this page is a candidate for pruning once our xid
2636 * falls below the OldestXmin horizon. If the transaction finally aborts,
2637 * the subsequent page pruning will be a no-op and the hint will be
2640 * XXX Should we set hint on newbuf as well? If the transaction aborts,
2641 * there would be a prunable tuple in the newbuf; but for now we choose
2642 * not to optimize for aborts. Note that heap_xlog_update must be kept in
2643 * sync if this decision changes.
2645 PageSetPrunable(dp
, xid
);
2649 /* Mark the old tuple as HOT-updated */
2650 HeapTupleSetHotUpdated(&oldtup
);
2651 /* And mark the new tuple as heap-only */
2652 HeapTupleSetHeapOnly(heaptup
);
2653 /* Mark the caller's copy too, in case different from heaptup */
2654 HeapTupleSetHeapOnly(newtup
);
2658 /* Make sure tuples are correctly marked as not-HOT */
2659 HeapTupleClearHotUpdated(&oldtup
);
2660 HeapTupleClearHeapOnly(heaptup
);
2661 HeapTupleClearHeapOnly(newtup
);
2664 RelationPutHeapTuple(relation
, newbuf
, heaptup
); /* insert new tuple */
2666 if (!already_marked
)
2668 /* Clear obsolete visibility flags ... */
2669 oldtup
.t_data
->t_infomask
&= ~(HEAP_XMAX_COMMITTED
|
2671 HEAP_XMAX_IS_MULTI
|
2674 /* ... and store info about transaction updating this tuple */
2675 HeapTupleHeaderSetXmax(oldtup
.t_data
, xid
);
2676 HeapTupleHeaderSetCmax(oldtup
.t_data
, cid
, iscombo
);
2679 /* record address of new tuple in t_ctid of old one */
2680 oldtup
.t_data
->t_ctid
= heaptup
->t_self
;
2682 if (newbuf
!= buffer
)
2683 MarkBufferDirty(newbuf
);
2684 MarkBufferDirty(buffer
);
2687 if (!relation
->rd_istemp
)
2689 XLogRecPtr recptr
= log_heap_update(relation
, buffer
, oldtup
.t_self
,
2690 newbuf
, heaptup
, false);
2692 if (newbuf
!= buffer
)
2694 PageSetLSN(BufferGetPage(newbuf
), recptr
);
2695 PageSetTLI(BufferGetPage(newbuf
), ThisTimeLineID
);
2697 PageSetLSN(BufferGetPage(buffer
), recptr
);
2698 PageSetTLI(BufferGetPage(buffer
), ThisTimeLineID
);
2703 if (newbuf
!= buffer
)
2704 LockBuffer(newbuf
, BUFFER_LOCK_UNLOCK
);
2705 LockBuffer(buffer
, BUFFER_LOCK_UNLOCK
);
2708 * Mark old tuple for invalidation from system caches at next command
2709 * boundary. We have to do this before releasing the buffer because we
2710 * need to look at the contents of the tuple.
2712 CacheInvalidateHeapTuple(relation
, &oldtup
);
2714 /* Now we can release the buffer(s) */
2715 if (newbuf
!= buffer
)
2716 ReleaseBuffer(newbuf
);
2717 ReleaseBuffer(buffer
);
2720 * If new tuple is cachable, mark it for invalidation from the caches in
2721 * case we abort. Note it is OK to do this after releasing the buffer,
2722 * because the heaptup data structure is all in local memory, not in the
2725 CacheInvalidateHeapTuple(relation
, heaptup
);
2728 * Release the lmgr tuple lock, if we had it.
2730 if (have_tuple_lock
)
2731 UnlockTuple(relation
, &(oldtup
.t_self
), ExclusiveLock
);
2733 pgstat_count_heap_update(relation
, use_hot_update
);
2736 * If heaptup is a private copy, release it. Don't forget to copy t_self
2737 * back to the caller's image, too.
2739 if (heaptup
!= newtup
)
2741 newtup
->t_self
= heaptup
->t_self
;
2742 heap_freetuple(heaptup
);
2745 bms_free(hot_attrs
);
2747 return HeapTupleMayBeUpdated
;
2751 * Check if the specified attribute's value is same in both given tuples.
2752 * Subroutine for HeapSatisfiesHOTUpdate.
2755 heap_tuple_attr_equals(TupleDesc tupdesc
, int attrnum
,
2756 HeapTuple tup1
, HeapTuple tup2
)
2762 Form_pg_attribute att
;
2765 * If it's a whole-tuple reference, say "not equal". It's not really
2766 * worth supporting this case, since it could only succeed after a no-op
2767 * update, which is hardly a case worth optimizing for.
2773 * Likewise, automatically say "not equal" for any system attribute other
2774 * than OID and tableOID; we cannot expect these to be consistent in a HOT
2775 * chain, or even to be set correctly yet in the new tuple.
2779 if (attrnum
!= ObjectIdAttributeNumber
&&
2780 attrnum
!= TableOidAttributeNumber
)
2785 * Extract the corresponding values. XXX this is pretty inefficient if
2786 * there are many indexed columns. Should HeapSatisfiesHOTUpdate do a
2787 * single heap_deform_tuple call on each tuple, instead? But that doesn't
2788 * work for system columns ...
2790 value1
= heap_getattr(tup1
, attrnum
, tupdesc
, &isnull1
);
2791 value2
= heap_getattr(tup2
, attrnum
, tupdesc
, &isnull2
);
2794 * If one value is NULL and other is not, then they are certainly not
2797 if (isnull1
!= isnull2
)
2801 * If both are NULL, they can be considered equal.
2807 * We do simple binary comparison of the two datums. This may be overly
2808 * strict because there can be multiple binary representations for the
2809 * same logical value. But we should be OK as long as there are no false
2810 * positives. Using a type-specific equality operator is messy because
2811 * there could be multiple notions of equality in different operator
2812 * classes; furthermore, we cannot safely invoke user-defined functions
2813 * while holding exclusive buffer lock.
2817 /* The only allowed system columns are OIDs, so do this */
2818 return (DatumGetObjectId(value1
) == DatumGetObjectId(value2
));
2822 Assert(attrnum
<= tupdesc
->natts
);
2823 att
= tupdesc
->attrs
[attrnum
- 1];
2824 return datumIsEqual(value1
, value2
, att
->attbyval
, att
->attlen
);
2829 * Check if the old and new tuples represent a HOT-safe update. To be able
2830 * to do a HOT update, we must not have changed any columns used in index
2833 * The set of attributes to be checked is passed in (we dare not try to
2834 * compute it while holding exclusive buffer lock...) NOTE that hot_attrs
2835 * is destructively modified! That is OK since this is invoked at most once
2838 * Returns true if safe to do HOT update.
2841 HeapSatisfiesHOTUpdate(Relation relation
, Bitmapset
*hot_attrs
,
2842 HeapTuple oldtup
, HeapTuple newtup
)
2846 while ((attrnum
= bms_first_member(hot_attrs
)) >= 0)
2848 /* Adjust for system attributes */
2849 attrnum
+= FirstLowInvalidHeapAttributeNumber
;
2851 /* If the attribute value has changed, we can't do HOT update */
2852 if (!heap_tuple_attr_equals(RelationGetDescr(relation
), attrnum
,
2861 * simple_heap_update - replace a tuple
2863 * This routine may be used to update a tuple when concurrent updates of
2864 * the target tuple are not expected (for example, because we have a lock
2865 * on the relation associated with the tuple). Any failure is reported
2869 simple_heap_update(Relation relation
, ItemPointer otid
, HeapTuple tup
)
2872 ItemPointerData update_ctid
;
2873 TransactionId update_xmax
;
2875 result
= heap_update(relation
, otid
, tup
,
2876 &update_ctid
, &update_xmax
,
2877 GetCurrentCommandId(true), InvalidSnapshot
,
2878 true /* wait for commit */ );
2881 case HeapTupleSelfUpdated
:
2882 /* Tuple was already updated in current command? */
2883 elog(ERROR
, "tuple already updated by self");
2886 case HeapTupleMayBeUpdated
:
2887 /* done successfully */
2890 case HeapTupleUpdated
:
2891 elog(ERROR
, "tuple concurrently updated");
2895 elog(ERROR
, "unrecognized heap_update status: %u", result
);
2901 * heap_lock_tuple - lock a tuple in shared or exclusive mode
2903 * Note that this acquires a buffer pin, which the caller must release.
2906 * relation: relation containing tuple (caller must hold suitable lock)
2907 * tuple->t_self: TID of tuple to lock (rest of struct need not be valid)
2908 * cid: current command ID (used for visibility test, and stored into
2909 * tuple's cmax if lock is successful)
2910 * mode: indicates if shared or exclusive tuple lock is desired
2911 * nowait: if true, ereport rather than blocking if lock not available
2913 * Output parameters:
2914 * *tuple: all fields filled in
2915 * *buffer: set to buffer holding tuple (pinned but not locked at exit)
2916 * *ctid: set to tuple's t_ctid, but only in failure cases
2917 * *update_xmax: set to tuple's xmax, but only in failure cases
2919 * Function result may be:
2920 * HeapTupleMayBeUpdated: lock was successfully acquired
2921 * HeapTupleSelfUpdated: lock failed because tuple updated by self
2922 * HeapTupleUpdated: lock failed because tuple updated by other xact
2924 * In the failure cases, the routine returns the tuple's t_ctid and t_xmax.
2925 * If t_ctid is the same as t_self, the tuple was deleted; if different, the
2926 * tuple was updated, and t_ctid is the location of the replacement tuple.
2927 * (t_xmax is needed to verify that the replacement tuple matches.)
2930 * NOTES: because the shared-memory lock table is of finite size, but users
2931 * could reasonably want to lock large numbers of tuples, we do not rely on
2932 * the standard lock manager to store tuple-level locks over the long term.
2933 * Instead, a tuple is marked as locked by setting the current transaction's
2934 * XID as its XMAX, and setting additional infomask bits to distinguish this
2935 * usage from the more normal case of having deleted the tuple. When
2936 * multiple transactions concurrently share-lock a tuple, the first locker's
2937 * XID is replaced in XMAX with a MultiTransactionId representing the set of
2938 * XIDs currently holding share-locks.
2940 * When it is necessary to wait for a tuple-level lock to be released, the
2941 * basic delay is provided by XactLockTableWait or MultiXactIdWait on the
2942 * contents of the tuple's XMAX. However, that mechanism will release all
2943 * waiters concurrently, so there would be a race condition as to which
2944 * waiter gets the tuple, potentially leading to indefinite starvation of
2945 * some waiters. The possibility of share-locking makes the problem much
2946 * worse --- a steady stream of share-lockers can easily block an exclusive
2947 * locker forever. To provide more reliable semantics about who gets a
2948 * tuple-level lock first, we use the standard lock manager. The protocol
2949 * for waiting for a tuple-level lock is really
2951 * XactLockTableWait()
2952 * mark tuple as locked by me
2954 * When there are multiple waiters, arbitration of who is to get the lock next
2955 * is provided by LockTuple(). However, at most one tuple-level lock will
2956 * be held or awaited per backend at any time, so we don't risk overflow
2957 * of the lock table. Note that incoming share-lockers are required to
2958 * do LockTuple as well, if there is any conflict, to ensure that they don't
2959 * starve out waiting exclusive-lockers. However, if there is not any active
2960 * conflict for a tuple, we don't incur any extra overhead.
2963 heap_lock_tuple(Relation relation
, HeapTuple tuple
, Buffer
*buffer
,
2964 ItemPointer ctid
, TransactionId
*update_xmax
,
2965 CommandId cid
, LockTupleMode mode
, bool nowait
)
2968 ItemPointer tid
= &(tuple
->t_self
);
2973 uint16 old_infomask
;
2974 uint16 new_infomask
;
2975 LOCKMODE tuple_lock_type
;
2976 bool have_tuple_lock
= false;
2978 tuple_lock_type
= (mode
== LockTupleShared
) ? ShareLock
: ExclusiveLock
;
2980 *buffer
= ReadBuffer(relation
, ItemPointerGetBlockNumber(tid
));
2981 LockBuffer(*buffer
, BUFFER_LOCK_EXCLUSIVE
);
2983 dp
= (PageHeader
) BufferGetPage(*buffer
);
2984 lp
= PageGetItemId(dp
, ItemPointerGetOffsetNumber(tid
));
2985 Assert(ItemIdIsNormal(lp
));
2987 tuple
->t_data
= (HeapTupleHeader
) PageGetItem((Page
) dp
, lp
);
2988 tuple
->t_len
= ItemIdGetLength(lp
);
2989 tuple
->t_tableOid
= RelationGetRelid(relation
);
2992 result
= HeapTupleSatisfiesUpdate(tuple
->t_data
, cid
, *buffer
);
2994 if (result
== HeapTupleInvisible
)
2996 UnlockReleaseBuffer(*buffer
);
2997 elog(ERROR
, "attempted to lock invisible tuple");
2999 else if (result
== HeapTupleBeingUpdated
)
3001 TransactionId xwait
;
3004 /* must copy state data before unlocking buffer */
3005 xwait
= HeapTupleHeaderGetXmax(tuple
->t_data
);
3006 infomask
= tuple
->t_data
->t_infomask
;
3008 LockBuffer(*buffer
, BUFFER_LOCK_UNLOCK
);
3011 * If we wish to acquire share lock, and the tuple is already
3012 * share-locked by a multixact that includes any subtransaction of the
3013 * current top transaction, then we effectively hold the desired lock
3014 * already. We *must* succeed without trying to take the tuple lock,
3015 * else we will deadlock against anyone waiting to acquire exclusive
3016 * lock. We don't need to make any state changes in this case.
3018 if (mode
== LockTupleShared
&&
3019 (infomask
& HEAP_XMAX_IS_MULTI
) &&
3020 MultiXactIdIsCurrent((MultiXactId
) xwait
))
3022 Assert(infomask
& HEAP_XMAX_SHARED_LOCK
);
3023 /* Probably can't hold tuple lock here, but may as well check */
3024 if (have_tuple_lock
)
3025 UnlockTuple(relation
, tid
, tuple_lock_type
);
3026 return HeapTupleMayBeUpdated
;
3030 * Acquire tuple lock to establish our priority for the tuple.
3031 * LockTuple will release us when we are next-in-line for the tuple.
3032 * We must do this even if we are share-locking.
3034 * If we are forced to "start over" below, we keep the tuple lock;
3035 * this arranges that we stay at the head of the line while rechecking
3038 if (!have_tuple_lock
)
3042 if (!ConditionalLockTuple(relation
, tid
, tuple_lock_type
))
3044 (errcode(ERRCODE_LOCK_NOT_AVAILABLE
),
3045 errmsg("could not obtain lock on row in relation \"%s\"",
3046 RelationGetRelationName(relation
))));
3049 LockTuple(relation
, tid
, tuple_lock_type
);
3050 have_tuple_lock
= true;
3053 if (mode
== LockTupleShared
&& (infomask
& HEAP_XMAX_SHARED_LOCK
))
3056 * Acquiring sharelock when there's at least one sharelocker
3057 * already. We need not wait for him/them to complete.
3059 LockBuffer(*buffer
, BUFFER_LOCK_EXCLUSIVE
);
3062 * Make sure it's still a shared lock, else start over. (It's OK
3063 * if the ownership of the shared lock has changed, though.)
3065 if (!(tuple
->t_data
->t_infomask
& HEAP_XMAX_SHARED_LOCK
))
3068 else if (infomask
& HEAP_XMAX_IS_MULTI
)
3070 /* wait for multixact to end */
3073 if (!ConditionalMultiXactIdWait((MultiXactId
) xwait
))
3075 (errcode(ERRCODE_LOCK_NOT_AVAILABLE
),
3076 errmsg("could not obtain lock on row in relation \"%s\"",
3077 RelationGetRelationName(relation
))));
3080 MultiXactIdWait((MultiXactId
) xwait
);
3082 LockBuffer(*buffer
, BUFFER_LOCK_EXCLUSIVE
);
3085 * If xwait had just locked the tuple then some other xact could
3086 * update this tuple before we get to this point. Check for xmax
3087 * change, and start over if so.
3089 if (!(tuple
->t_data
->t_infomask
& HEAP_XMAX_IS_MULTI
) ||
3090 !TransactionIdEquals(HeapTupleHeaderGetXmax(tuple
->t_data
),
3095 * You might think the multixact is necessarily done here, but not
3096 * so: it could have surviving members, namely our own xact or
3097 * other subxacts of this backend. It is legal for us to lock the
3098 * tuple in either case, however. We don't bother changing the
3099 * on-disk hint bits since we are about to overwrite the xmax
3105 /* wait for regular transaction to end */
3108 if (!ConditionalXactLockTableWait(xwait
))
3110 (errcode(ERRCODE_LOCK_NOT_AVAILABLE
),
3111 errmsg("could not obtain lock on row in relation \"%s\"",
3112 RelationGetRelationName(relation
))));
3115 XactLockTableWait(xwait
);
3117 LockBuffer(*buffer
, BUFFER_LOCK_EXCLUSIVE
);
3120 * xwait is done, but if xwait had just locked the tuple then some
3121 * other xact could update this tuple before we get to this point.
3122 * Check for xmax change, and start over if so.
3124 if ((tuple
->t_data
->t_infomask
& HEAP_XMAX_IS_MULTI
) ||
3125 !TransactionIdEquals(HeapTupleHeaderGetXmax(tuple
->t_data
),
3129 /* Otherwise check if it committed or aborted */
3130 UpdateXmaxHintBits(tuple
->t_data
, *buffer
, xwait
);
3134 * We may lock if previous xmax aborted, or if it committed but only
3135 * locked the tuple without updating it. The case where we didn't
3136 * wait because we are joining an existing shared lock is correctly
3139 if (tuple
->t_data
->t_infomask
& (HEAP_XMAX_INVALID
|
3141 result
= HeapTupleMayBeUpdated
;
3143 result
= HeapTupleUpdated
;
3146 if (result
!= HeapTupleMayBeUpdated
)
3148 Assert(result
== HeapTupleSelfUpdated
|| result
== HeapTupleUpdated
);
3149 Assert(!(tuple
->t_data
->t_infomask
& HEAP_XMAX_INVALID
));
3150 *ctid
= tuple
->t_data
->t_ctid
;
3151 *update_xmax
= HeapTupleHeaderGetXmax(tuple
->t_data
);
3152 LockBuffer(*buffer
, BUFFER_LOCK_UNLOCK
);
3153 if (have_tuple_lock
)
3154 UnlockTuple(relation
, tid
, tuple_lock_type
);
3159 * We might already hold the desired lock (or stronger), possibly under a
3160 * different subtransaction of the current top transaction. If so, there
3161 * is no need to change state or issue a WAL record. We already handled
3162 * the case where this is true for xmax being a MultiXactId, so now check
3163 * for cases where it is a plain TransactionId.
3165 * Note in particular that this covers the case where we already hold
3166 * exclusive lock on the tuple and the caller only wants shared lock. It
3167 * would certainly not do to give up the exclusive lock.
3169 xmax
= HeapTupleHeaderGetXmax(tuple
->t_data
);
3170 old_infomask
= tuple
->t_data
->t_infomask
;
3172 if (!(old_infomask
& (HEAP_XMAX_INVALID
|
3173 HEAP_XMAX_COMMITTED
|
3174 HEAP_XMAX_IS_MULTI
)) &&
3175 (mode
== LockTupleShared
?
3176 (old_infomask
& HEAP_IS_LOCKED
) :
3177 (old_infomask
& HEAP_XMAX_EXCL_LOCK
)) &&
3178 TransactionIdIsCurrentTransactionId(xmax
))
3180 LockBuffer(*buffer
, BUFFER_LOCK_UNLOCK
);
3181 /* Probably can't hold tuple lock here, but may as well check */
3182 if (have_tuple_lock
)
3183 UnlockTuple(relation
, tid
, tuple_lock_type
);
3184 return HeapTupleMayBeUpdated
;
3188 * Compute the new xmax and infomask to store into the tuple. Note we do
3189 * not modify the tuple just yet, because that would leave it in the wrong
3190 * state if multixact.c elogs.
3192 xid
= GetCurrentTransactionId();
3194 new_infomask
= old_infomask
& ~(HEAP_XMAX_COMMITTED
|
3196 HEAP_XMAX_IS_MULTI
|
3200 if (mode
== LockTupleShared
)
3203 * If this is the first acquisition of a shared lock in the current
3204 * transaction, set my per-backend OldestMemberMXactId setting. We can
3205 * be certain that the transaction will never become a member of any
3206 * older MultiXactIds than that. (We have to do this even if we end
3207 * up just using our own TransactionId below, since some other backend
3208 * could incorporate our XID into a MultiXact immediately afterwards.)
3210 MultiXactIdSetOldestMember();
3212 new_infomask
|= HEAP_XMAX_SHARED_LOCK
;
3215 * Check to see if we need a MultiXactId because there are multiple
3218 * HeapTupleSatisfiesUpdate will have set the HEAP_XMAX_INVALID bit if
3219 * the xmax was a MultiXactId but it was not running anymore. There is
3220 * a race condition, which is that the MultiXactId may have finished
3221 * since then, but that uncommon case is handled within
3222 * MultiXactIdExpand.
3224 * There is a similar race condition possible when the old xmax was a
3225 * regular TransactionId. We test TransactionIdIsInProgress again
3226 * just to narrow the window, but it's still possible to end up
3227 * creating an unnecessary MultiXactId. Fortunately this is harmless.
3229 if (!(old_infomask
& (HEAP_XMAX_INVALID
| HEAP_XMAX_COMMITTED
)))
3231 if (old_infomask
& HEAP_XMAX_IS_MULTI
)
3234 * If the XMAX is already a MultiXactId, then we need to
3235 * expand it to include our own TransactionId.
3237 xid
= MultiXactIdExpand((MultiXactId
) xmax
, xid
);
3238 new_infomask
|= HEAP_XMAX_IS_MULTI
;
3240 else if (TransactionIdIsInProgress(xmax
))
3243 * If the XMAX is a valid TransactionId, then we need to
3244 * create a new MultiXactId that includes both the old locker
3245 * and our own TransactionId.
3247 xid
= MultiXactIdCreate(xmax
, xid
);
3248 new_infomask
|= HEAP_XMAX_IS_MULTI
;
3253 * Can get here iff HeapTupleSatisfiesUpdate saw the old xmax
3254 * as running, but it finished before
3255 * TransactionIdIsInProgress() got to run. Treat it like
3256 * there's no locker in the tuple.
3263 * There was no previous locker, so just insert our own
3270 /* We want an exclusive lock on the tuple */
3271 new_infomask
|= HEAP_XMAX_EXCL_LOCK
;
3274 START_CRIT_SECTION();
3277 * Store transaction information of xact locking the tuple.
3279 * Note: Cmax is meaningless in this context, so don't set it; this avoids
3280 * possibly generating a useless combo CID.
3282 tuple
->t_data
->t_infomask
= new_infomask
;
3283 HeapTupleHeaderClearHotUpdated(tuple
->t_data
);
3284 HeapTupleHeaderSetXmax(tuple
->t_data
, xid
);
3285 /* Make sure there is no forward chain link in t_ctid */
3286 tuple
->t_data
->t_ctid
= *tid
;
3288 MarkBufferDirty(*buffer
);
3291 * XLOG stuff. You might think that we don't need an XLOG record because
3292 * there is no state change worth restoring after a crash. You would be
3293 * wrong however: we have just written either a TransactionId or a
3294 * MultiXactId that may never have been seen on disk before, and we need
3295 * to make sure that there are XLOG entries covering those ID numbers.
3296 * Else the same IDs might be re-used after a crash, which would be
3297 * disastrous if this page made it to disk before the crash. Essentially
3298 * we have to enforce the WAL log-before-data rule even in this case.
3299 * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
3300 * entries for everything anyway.)
3302 if (!relation
->rd_istemp
)
3306 XLogRecData rdata
[2];
3308 xlrec
.target
.node
= relation
->rd_node
;
3309 xlrec
.target
.tid
= tuple
->t_self
;
3310 xlrec
.locking_xid
= xid
;
3311 xlrec
.xid_is_mxact
= ((new_infomask
& HEAP_XMAX_IS_MULTI
) != 0);
3312 xlrec
.shared_lock
= (mode
== LockTupleShared
);
3313 rdata
[0].data
= (char *) &xlrec
;
3314 rdata
[0].len
= SizeOfHeapLock
;
3315 rdata
[0].buffer
= InvalidBuffer
;
3316 rdata
[0].next
= &(rdata
[1]);
3318 rdata
[1].data
= NULL
;
3320 rdata
[1].buffer
= *buffer
;
3321 rdata
[1].buffer_std
= true;
3322 rdata
[1].next
= NULL
;
3324 recptr
= XLogInsert(RM_HEAP_ID
, XLOG_HEAP_LOCK
, rdata
);
3326 PageSetLSN(dp
, recptr
);
3327 PageSetTLI(dp
, ThisTimeLineID
);
3332 LockBuffer(*buffer
, BUFFER_LOCK_UNLOCK
);
3335 * Now that we have successfully marked the tuple as locked, we can
3336 * release the lmgr tuple lock, if we had it.
3338 if (have_tuple_lock
)
3339 UnlockTuple(relation
, tid
, tuple_lock_type
);
3341 return HeapTupleMayBeUpdated
;
3346 * heap_inplace_update - update a tuple "in place" (ie, overwrite it)
3348 * Overwriting violates both MVCC and transactional safety, so the uses
3349 * of this function in Postgres are extremely limited. Nonetheless we
3350 * find some places to use it.
3352 * The tuple cannot change size, and therefore it's reasonable to assume
3353 * that its null bitmap (if any) doesn't change either. So we just
3354 * overwrite the data portion of the tuple without touching the null
3355 * bitmap or any of the header fields.
3357 * tuple is an in-memory tuple structure containing the data to be written
3358 * over the target tuple. Also, tuple->t_self identifies the target tuple.
3361 heap_inplace_update(Relation relation
, HeapTuple tuple
)
3365 OffsetNumber offnum
;
3367 HeapTupleHeader htup
;
3371 buffer
= ReadBuffer(relation
, ItemPointerGetBlockNumber(&(tuple
->t_self
)));
3372 LockBuffer(buffer
, BUFFER_LOCK_EXCLUSIVE
);
3373 page
= (Page
) BufferGetPage(buffer
);
3375 offnum
= ItemPointerGetOffsetNumber(&(tuple
->t_self
));
3376 if (PageGetMaxOffsetNumber(page
) >= offnum
)
3377 lp
= PageGetItemId(page
, offnum
);
3379 if (PageGetMaxOffsetNumber(page
) < offnum
|| !ItemIdIsNormal(lp
))
3380 elog(ERROR
, "heap_inplace_update: invalid lp");
3382 htup
= (HeapTupleHeader
) PageGetItem(page
, lp
);
3384 oldlen
= ItemIdGetLength(lp
) - htup
->t_hoff
;
3385 newlen
= tuple
->t_len
- tuple
->t_data
->t_hoff
;
3386 if (oldlen
!= newlen
|| htup
->t_hoff
!= tuple
->t_data
->t_hoff
)
3387 elog(ERROR
, "heap_inplace_update: wrong tuple length");
3389 /* NO EREPORT(ERROR) from here till changes are logged */
3390 START_CRIT_SECTION();
3392 memcpy((char *) htup
+ htup
->t_hoff
,
3393 (char *) tuple
->t_data
+ tuple
->t_data
->t_hoff
,
3396 MarkBufferDirty(buffer
);
3399 if (!relation
->rd_istemp
)
3401 xl_heap_inplace xlrec
;
3403 XLogRecData rdata
[2];
3405 xlrec
.target
.node
= relation
->rd_node
;
3406 xlrec
.target
.tid
= tuple
->t_self
;
3408 rdata
[0].data
= (char *) &xlrec
;
3409 rdata
[0].len
= SizeOfHeapInplace
;
3410 rdata
[0].buffer
= InvalidBuffer
;
3411 rdata
[0].next
= &(rdata
[1]);
3413 rdata
[1].data
= (char *) htup
+ htup
->t_hoff
;
3414 rdata
[1].len
= newlen
;
3415 rdata
[1].buffer
= buffer
;
3416 rdata
[1].buffer_std
= true;
3417 rdata
[1].next
= NULL
;
3419 recptr
= XLogInsert(RM_HEAP_ID
, XLOG_HEAP_INPLACE
, rdata
);
3421 PageSetLSN(page
, recptr
);
3422 PageSetTLI(page
, ThisTimeLineID
);
3427 UnlockReleaseBuffer(buffer
);
3429 /* Send out shared cache inval if necessary */
3430 if (!IsBootstrapProcessingMode())
3431 CacheInvalidateHeapTuple(relation
, tuple
);
3438 * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
3439 * are older than the specified cutoff XID. If so, replace them with
3440 * FrozenTransactionId or InvalidTransactionId as appropriate, and return
3441 * TRUE. Return FALSE if nothing was changed.
3443 * It is assumed that the caller has checked the tuple with
3444 * HeapTupleSatisfiesVacuum() and determined that it is not HEAPTUPLE_DEAD
3445 * (else we should be removing the tuple, not freezing it).
3447 * NB: cutoff_xid *must* be <= the current global xmin, to ensure that any
3448 * XID older than it could neither be running nor seen as running by any
3449 * open transaction. This ensures that the replacement will not change
3450 * anyone's idea of the tuple state. Also, since we assume the tuple is
3451 * not HEAPTUPLE_DEAD, the fact that an XID is not still running allows us
3452 * to assume that it is either committed good or aborted, as appropriate;
3453 * so we need no external state checks to decide what to do. (This is good
3454 * because this function is applied during WAL recovery, when we don't have
3455 * access to any such state, and can't depend on the hint bits to be set.)
3457 * In lazy VACUUM, we call this while initially holding only a shared lock
3458 * on the tuple's buffer. If any change is needed, we trade that in for an
3459 * exclusive lock before making the change. Caller should pass the buffer ID
3460 * if shared lock is held, InvalidBuffer if exclusive lock is already held.
3462 * Note: it might seem we could make the changes without exclusive lock, since
3463 * TransactionId read/write is assumed atomic anyway. However there is a race
3464 * condition: someone who just fetched an old XID that we overwrite here could
3465 * conceivably not finish checking the XID against pg_clog before we finish
3466 * the VACUUM and perhaps truncate off the part of pg_clog he needs. Getting
3467 * exclusive lock ensures no other backend is in process of checking the
3468 * tuple status. Also, getting exclusive lock makes it safe to adjust the
3472 heap_freeze_tuple(HeapTupleHeader tuple
, TransactionId cutoff_xid
,
3475 bool changed
= false;
3478 xid
= HeapTupleHeaderGetXmin(tuple
);
3479 if (TransactionIdIsNormal(xid
) &&
3480 TransactionIdPrecedes(xid
, cutoff_xid
))
3482 if (buf
!= InvalidBuffer
)
3484 /* trade in share lock for exclusive lock */
3485 LockBuffer(buf
, BUFFER_LOCK_UNLOCK
);
3486 LockBuffer(buf
, BUFFER_LOCK_EXCLUSIVE
);
3487 buf
= InvalidBuffer
;
3489 HeapTupleHeaderSetXmin(tuple
, FrozenTransactionId
);
3492 * Might as well fix the hint bits too; usually XMIN_COMMITTED will
3493 * already be set here, but there's a small chance not.
3495 Assert(!(tuple
->t_infomask
& HEAP_XMIN_INVALID
));
3496 tuple
->t_infomask
|= HEAP_XMIN_COMMITTED
;
3501 * When we release shared lock, it's possible for someone else to change
3502 * xmax before we get the lock back, so repeat the check after acquiring
3503 * exclusive lock. (We don't need this pushup for xmin, because only
3504 * VACUUM could be interested in changing an existing tuple's xmin, and
3505 * there's only one VACUUM allowed on a table at a time.)
3508 if (!(tuple
->t_infomask
& HEAP_XMAX_IS_MULTI
))
3510 xid
= HeapTupleHeaderGetXmax(tuple
);
3511 if (TransactionIdIsNormal(xid
) &&
3512 TransactionIdPrecedes(xid
, cutoff_xid
))
3514 if (buf
!= InvalidBuffer
)
3516 /* trade in share lock for exclusive lock */
3517 LockBuffer(buf
, BUFFER_LOCK_UNLOCK
);
3518 LockBuffer(buf
, BUFFER_LOCK_EXCLUSIVE
);
3519 buf
= InvalidBuffer
;
3520 goto recheck_xmax
; /* see comment above */
3522 HeapTupleHeaderSetXmax(tuple
, InvalidTransactionId
);
3525 * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED
3526 * + LOCKED. Normalize to INVALID just to be sure no one gets
3529 tuple
->t_infomask
&= ~HEAP_XMAX_COMMITTED
;
3530 tuple
->t_infomask
|= HEAP_XMAX_INVALID
;
3531 HeapTupleHeaderClearHotUpdated(tuple
);
3538 * XXX perhaps someday we should zero out very old MultiXactIds here?
3540 * The only way a stale MultiXactId could pose a problem is if a
3541 * tuple, having once been multiply-share-locked, is not touched by
3542 * any vacuum or attempted lock or deletion for just over 4G MultiXact
3543 * creations, and then in the probably-narrow window where its xmax
3544 * is again a live MultiXactId, someone tries to lock or delete it.
3545 * Even then, another share-lock attempt would work fine. An
3546 * exclusive-lock or delete attempt would face unexpected delay, or
3547 * in the very worst case get a deadlock error. This seems an
3548 * extremely low-probability scenario with minimal downside even if
3549 * it does happen, so for now we don't do the extra bookkeeping that
3550 * would be needed to clean out MultiXactIds.
3556 * Although xvac per se could only be set by VACUUM, it shares physical
3557 * storage space with cmax, and so could be wiped out by someone setting
3558 * xmax. Hence recheck after changing lock, same as for xmax itself.
3561 if (tuple
->t_infomask
& HEAP_MOVED
)
3563 xid
= HeapTupleHeaderGetXvac(tuple
);
3564 if (TransactionIdIsNormal(xid
) &&
3565 TransactionIdPrecedes(xid
, cutoff_xid
))
3567 if (buf
!= InvalidBuffer
)
3569 /* trade in share lock for exclusive lock */
3570 LockBuffer(buf
, BUFFER_LOCK_UNLOCK
);
3571 LockBuffer(buf
, BUFFER_LOCK_EXCLUSIVE
);
3572 buf
= InvalidBuffer
;
3573 goto recheck_xvac
; /* see comment above */
3577 * If a MOVED_OFF tuple is not dead, the xvac transaction must
3578 * have failed; whereas a non-dead MOVED_IN tuple must mean the
3579 * xvac transaction succeeded.
3581 if (tuple
->t_infomask
& HEAP_MOVED_OFF
)
3582 HeapTupleHeaderSetXvac(tuple
, InvalidTransactionId
);
3584 HeapTupleHeaderSetXvac(tuple
, FrozenTransactionId
);
3587 * Might as well fix the hint bits too; usually XMIN_COMMITTED
3588 * will already be set here, but there's a small chance not.
3590 Assert(!(tuple
->t_infomask
& HEAP_XMIN_INVALID
));
3591 tuple
->t_infomask
|= HEAP_XMIN_COMMITTED
;
3601 * heap_markpos - mark scan position
3605 heap_markpos(HeapScanDesc scan
)
3607 /* Note: no locking manipulations needed */
3609 if (scan
->rs_ctup
.t_data
!= NULL
)
3611 scan
->rs_mctid
= scan
->rs_ctup
.t_self
;
3612 if (scan
->rs_pageatatime
)
3613 scan
->rs_mindex
= scan
->rs_cindex
;
3616 ItemPointerSetInvalid(&scan
->rs_mctid
);
3620 * heap_restrpos - restore position to marked location
3624 heap_restrpos(HeapScanDesc scan
)
3626 /* XXX no amrestrpos checking that ammarkpos called */
3628 if (!ItemPointerIsValid(&scan
->rs_mctid
))
3630 scan
->rs_ctup
.t_data
= NULL
;
3633 * unpin scan buffers
3635 if (BufferIsValid(scan
->rs_cbuf
))
3636 ReleaseBuffer(scan
->rs_cbuf
);
3637 scan
->rs_cbuf
= InvalidBuffer
;
3638 scan
->rs_cblock
= InvalidBlockNumber
;
3639 scan
->rs_inited
= false;
3644 * If we reached end of scan, rs_inited will now be false. We must
3645 * reset it to true to keep heapgettup from doing the wrong thing.
3647 scan
->rs_inited
= true;
3648 scan
->rs_ctup
.t_self
= scan
->rs_mctid
;
3649 if (scan
->rs_pageatatime
)
3651 scan
->rs_cindex
= scan
->rs_mindex
;
3652 heapgettup_pagemode(scan
,
3653 NoMovementScanDirection
,
3654 0, /* needn't recheck scan keys */
3659 NoMovementScanDirection
,
3660 0, /* needn't recheck scan keys */
3666 * Perform XLogInsert for a heap-clean operation. Caller must already
3667 * have modified the buffer and marked it dirty.
3669 * Note: prior to Postgres 8.3, the entries in the nowunused[] array were
3670 * zero-based tuple indexes. Now they are one-based like other uses
3674 log_heap_clean(Relation reln
, Buffer buffer
,
3675 OffsetNumber
*redirected
, int nredirected
,
3676 OffsetNumber
*nowdead
, int ndead
,
3677 OffsetNumber
*nowunused
, int nunused
,
3680 xl_heap_clean xlrec
;
3683 XLogRecData rdata
[4];
3685 /* Caller should not call me on a temp relation */
3686 Assert(!reln
->rd_istemp
);
3688 xlrec
.node
= reln
->rd_node
;
3689 xlrec
.block
= BufferGetBlockNumber(buffer
);
3690 xlrec
.nredirected
= nredirected
;
3691 xlrec
.ndead
= ndead
;
3693 rdata
[0].data
= (char *) &xlrec
;
3694 rdata
[0].len
= SizeOfHeapClean
;
3695 rdata
[0].buffer
= InvalidBuffer
;
3696 rdata
[0].next
= &(rdata
[1]);
3699 * The OffsetNumber arrays are not actually in the buffer, but we pretend
3700 * that they are. When XLogInsert stores the whole buffer, the offset
3701 * arrays need not be stored too. Note that even if all three arrays are
3702 * empty, we want to expose the buffer as a candidate for whole-page
3703 * storage, since this record type implies a defragmentation operation
3704 * even if no item pointers changed state.
3706 if (nredirected
> 0)
3708 rdata
[1].data
= (char *) redirected
;
3709 rdata
[1].len
= nredirected
* sizeof(OffsetNumber
) * 2;
3713 rdata
[1].data
= NULL
;
3716 rdata
[1].buffer
= buffer
;
3717 rdata
[1].buffer_std
= true;
3718 rdata
[1].next
= &(rdata
[2]);
3722 rdata
[2].data
= (char *) nowdead
;
3723 rdata
[2].len
= ndead
* sizeof(OffsetNumber
);
3727 rdata
[2].data
= NULL
;
3730 rdata
[2].buffer
= buffer
;
3731 rdata
[2].buffer_std
= true;
3732 rdata
[2].next
= &(rdata
[3]);
3736 rdata
[3].data
= (char *) nowunused
;
3737 rdata
[3].len
= nunused
* sizeof(OffsetNumber
);
3741 rdata
[3].data
= NULL
;
3744 rdata
[3].buffer
= buffer
;
3745 rdata
[3].buffer_std
= true;
3746 rdata
[3].next
= NULL
;
3748 info
= redirect_move
? XLOG_HEAP2_CLEAN_MOVE
: XLOG_HEAP2_CLEAN
;
3749 recptr
= XLogInsert(RM_HEAP2_ID
, info
, rdata
);
3755 * Perform XLogInsert for a heap-freeze operation. Caller must already
3756 * have modified the buffer and marked it dirty.
3759 log_heap_freeze(Relation reln
, Buffer buffer
,
3760 TransactionId cutoff_xid
,
3761 OffsetNumber
*offsets
, int offcnt
)
3763 xl_heap_freeze xlrec
;
3765 XLogRecData rdata
[2];
3767 /* Caller should not call me on a temp relation */
3768 Assert(!reln
->rd_istemp
);
3770 xlrec
.node
= reln
->rd_node
;
3771 xlrec
.block
= BufferGetBlockNumber(buffer
);
3772 xlrec
.cutoff_xid
= cutoff_xid
;
3774 rdata
[0].data
= (char *) &xlrec
;
3775 rdata
[0].len
= SizeOfHeapFreeze
;
3776 rdata
[0].buffer
= InvalidBuffer
;
3777 rdata
[0].next
= &(rdata
[1]);
3780 * The tuple-offsets array is not actually in the buffer, but pretend that
3781 * it is. When XLogInsert stores the whole buffer, the offsets array need
3782 * not be stored too.
3786 rdata
[1].data
= (char *) offsets
;
3787 rdata
[1].len
= offcnt
* sizeof(OffsetNumber
);
3791 rdata
[1].data
= NULL
;
3794 rdata
[1].buffer
= buffer
;
3795 rdata
[1].buffer_std
= true;
3796 rdata
[1].next
= NULL
;
3798 recptr
= XLogInsert(RM_HEAP2_ID
, XLOG_HEAP2_FREEZE
, rdata
);
3804 * Perform XLogInsert for a heap-update operation. Caller must already
3805 * have modified the buffer(s) and marked them dirty.
3808 log_heap_update(Relation reln
, Buffer oldbuf
, ItemPointerData from
,
3809 Buffer newbuf
, HeapTuple newtup
, bool move
)
3812 * Note: xlhdr is declared to have adequate size and correct alignment for
3813 * an xl_heap_header. However the two tids, if present at all, will be
3814 * packed in with no wasted space after the xl_heap_header; they aren't
3815 * necessarily aligned as implied by this struct declaration.
3823 int hsize
= SizeOfHeapHeader
;
3824 xl_heap_update xlrec
;
3827 XLogRecData rdata
[4];
3828 Page page
= BufferGetPage(newbuf
);
3830 /* Caller should not call me on a temp relation */
3831 Assert(!reln
->rd_istemp
);
3835 Assert(!HeapTupleIsHeapOnly(newtup
));
3836 info
= XLOG_HEAP_MOVE
;
3838 else if (HeapTupleIsHeapOnly(newtup
))
3839 info
= XLOG_HEAP_HOT_UPDATE
;
3841 info
= XLOG_HEAP_UPDATE
;
3843 xlrec
.target
.node
= reln
->rd_node
;
3844 xlrec
.target
.tid
= from
;
3845 xlrec
.newtid
= newtup
->t_self
;
3847 rdata
[0].data
= (char *) &xlrec
;
3848 rdata
[0].len
= SizeOfHeapUpdate
;
3849 rdata
[0].buffer
= InvalidBuffer
;
3850 rdata
[0].next
= &(rdata
[1]);
3852 rdata
[1].data
= NULL
;
3854 rdata
[1].buffer
= oldbuf
;
3855 rdata
[1].buffer_std
= true;
3856 rdata
[1].next
= &(rdata
[2]);
3858 xlhdr
.hdr
.t_infomask2
= newtup
->t_data
->t_infomask2
;
3859 xlhdr
.hdr
.t_infomask
= newtup
->t_data
->t_infomask
;
3860 xlhdr
.hdr
.t_hoff
= newtup
->t_data
->t_hoff
;
3861 if (move
) /* remember xmax & xmin */
3863 TransactionId xid
[2]; /* xmax, xmin */
3865 if (newtup
->t_data
->t_infomask
& (HEAP_XMAX_INVALID
| HEAP_IS_LOCKED
))
3866 xid
[0] = InvalidTransactionId
;
3868 xid
[0] = HeapTupleHeaderGetXmax(newtup
->t_data
);
3869 xid
[1] = HeapTupleHeaderGetXmin(newtup
->t_data
);
3870 memcpy((char *) &xlhdr
+ hsize
,
3872 2 * sizeof(TransactionId
));
3873 hsize
+= 2 * sizeof(TransactionId
);
3877 * As with insert records, we need not store the rdata[2] segment if we
3878 * decide to store the whole buffer instead.
3880 rdata
[2].data
= (char *) &xlhdr
;
3881 rdata
[2].len
= hsize
;
3882 rdata
[2].buffer
= newbuf
;
3883 rdata
[2].buffer_std
= true;
3884 rdata
[2].next
= &(rdata
[3]);
3886 /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
3887 rdata
[3].data
= (char *) newtup
->t_data
+ offsetof(HeapTupleHeaderData
, t_bits
);
3888 rdata
[3].len
= newtup
->t_len
- offsetof(HeapTupleHeaderData
, t_bits
);
3889 rdata
[3].buffer
= newbuf
;
3890 rdata
[3].buffer_std
= true;
3891 rdata
[3].next
= NULL
;
3893 /* If new tuple is the single and first tuple on page... */
3894 if (ItemPointerGetOffsetNumber(&(newtup
->t_self
)) == FirstOffsetNumber
&&
3895 PageGetMaxOffsetNumber(page
) == FirstOffsetNumber
)
3897 info
|= XLOG_HEAP_INIT_PAGE
;
3898 rdata
[2].buffer
= rdata
[3].buffer
= InvalidBuffer
;
3901 recptr
= XLogInsert(RM_HEAP_ID
, info
, rdata
);
3907 * Perform XLogInsert for a heap-move operation. Caller must already
3908 * have modified the buffers and marked them dirty.
3911 log_heap_move(Relation reln
, Buffer oldbuf
, ItemPointerData from
,
3912 Buffer newbuf
, HeapTuple newtup
)
3914 return log_heap_update(reln
, oldbuf
, from
, newbuf
, newtup
, true);
3918 * Perform XLogInsert of a HEAP_NEWPAGE record to WAL. Caller is responsible
3919 * for writing the page to disk after calling this routine.
3921 * Note: all current callers build pages in private memory and write them
3922 * directly to smgr, rather than using bufmgr. Therefore there is no need
3923 * to pass a buffer ID to XLogInsert, nor to perform MarkBufferDirty within
3924 * the critical section.
3926 * Note: the NEWPAGE log record is used for both heaps and indexes, so do
3927 * not do anything that assumes we are touching a heap.
3930 log_newpage(RelFileNode
*rnode
, BlockNumber blkno
, Page page
)
3932 xl_heap_newpage xlrec
;
3934 XLogRecData rdata
[2];
3936 /* NO ELOG(ERROR) from here till newpage op is logged */
3937 START_CRIT_SECTION();
3939 xlrec
.node
= *rnode
;
3940 xlrec
.blkno
= blkno
;
3942 rdata
[0].data
= (char *) &xlrec
;
3943 rdata
[0].len
= SizeOfHeapNewpage
;
3944 rdata
[0].buffer
= InvalidBuffer
;
3945 rdata
[0].next
= &(rdata
[1]);
3947 rdata
[1].data
= (char *) page
;
3948 rdata
[1].len
= BLCKSZ
;
3949 rdata
[1].buffer
= InvalidBuffer
;
3950 rdata
[1].next
= NULL
;
3952 recptr
= XLogInsert(RM_HEAP_ID
, XLOG_HEAP_NEWPAGE
, rdata
);
3954 PageSetLSN(page
, recptr
);
3955 PageSetTLI(page
, ThisTimeLineID
);
3963 * Handles CLEAN and CLEAN_MOVE record types
3966 heap_xlog_clean(XLogRecPtr lsn
, XLogRecord
*record
, bool clean_move
)
3968 xl_heap_clean
*xlrec
= (xl_heap_clean
*) XLogRecGetData(record
);
3973 OffsetNumber
*redirected
;
3974 OffsetNumber
*nowdead
;
3975 OffsetNumber
*nowunused
;
3980 if (record
->xl_info
& XLR_BKP_BLOCK_1
)
3983 reln
= XLogOpenRelation(xlrec
->node
);
3984 buffer
= XLogReadBuffer(reln
, xlrec
->block
, false);
3985 if (!BufferIsValid(buffer
))
3987 page
= (Page
) BufferGetPage(buffer
);
3989 if (XLByteLE(lsn
, PageGetLSN(page
)))
3991 UnlockReleaseBuffer(buffer
);
3995 nredirected
= xlrec
->nredirected
;
3996 ndead
= xlrec
->ndead
;
3997 end
= (OffsetNumber
*) ((char *) xlrec
+ record
->xl_len
);
3998 redirected
= (OffsetNumber
*) ((char *) xlrec
+ SizeOfHeapClean
);
3999 nowdead
= redirected
+ (nredirected
* 2);
4000 nowunused
= nowdead
+ ndead
;
4001 nunused
= (end
- nowunused
);
4002 Assert(nunused
>= 0);
4004 /* Update all item pointers per the record, and repair fragmentation */
4005 heap_page_prune_execute(reln
, buffer
,
4006 redirected
, nredirected
,
4012 * Note: we don't worry about updating the page's prunability hints.
4013 * At worst this will cause an extra prune cycle to occur soon.
4016 PageSetLSN(page
, lsn
);
4017 PageSetTLI(page
, ThisTimeLineID
);
4018 MarkBufferDirty(buffer
);
4019 UnlockReleaseBuffer(buffer
);
4023 heap_xlog_freeze(XLogRecPtr lsn
, XLogRecord
*record
)
4025 xl_heap_freeze
*xlrec
= (xl_heap_freeze
*) XLogRecGetData(record
);
4026 TransactionId cutoff_xid
= xlrec
->cutoff_xid
;
4031 if (record
->xl_info
& XLR_BKP_BLOCK_1
)
4034 reln
= XLogOpenRelation(xlrec
->node
);
4035 buffer
= XLogReadBuffer(reln
, xlrec
->block
, false);
4036 if (!BufferIsValid(buffer
))
4038 page
= (Page
) BufferGetPage(buffer
);
4040 if (XLByteLE(lsn
, PageGetLSN(page
)))
4042 UnlockReleaseBuffer(buffer
);
4046 if (record
->xl_len
> SizeOfHeapFreeze
)
4048 OffsetNumber
*offsets
;
4049 OffsetNumber
*offsets_end
;
4051 offsets
= (OffsetNumber
*) ((char *) xlrec
+ SizeOfHeapFreeze
);
4052 offsets_end
= (OffsetNumber
*) ((char *) xlrec
+ record
->xl_len
);
4054 while (offsets
< offsets_end
)
4056 /* offsets[] entries are one-based */
4057 ItemId lp
= PageGetItemId(page
, *offsets
);
4058 HeapTupleHeader tuple
= (HeapTupleHeader
) PageGetItem(page
, lp
);
4060 (void) heap_freeze_tuple(tuple
, cutoff_xid
, InvalidBuffer
);
4065 PageSetLSN(page
, lsn
);
4066 PageSetTLI(page
, ThisTimeLineID
);
4067 MarkBufferDirty(buffer
);
4068 UnlockReleaseBuffer(buffer
);
4072 heap_xlog_newpage(XLogRecPtr lsn
, XLogRecord
*record
)
4074 xl_heap_newpage
*xlrec
= (xl_heap_newpage
*) XLogRecGetData(record
);
4080 * Note: the NEWPAGE log record is used for both heaps and indexes, so do
4081 * not do anything that assumes we are touching a heap.
4083 reln
= XLogOpenRelation(xlrec
->node
);
4084 buffer
= XLogReadBuffer(reln
, xlrec
->blkno
, true);
4085 Assert(BufferIsValid(buffer
));
4086 page
= (Page
) BufferGetPage(buffer
);
4088 Assert(record
->xl_len
== SizeOfHeapNewpage
+ BLCKSZ
);
4089 memcpy(page
, (char *) xlrec
+ SizeOfHeapNewpage
, BLCKSZ
);
4091 PageSetLSN(page
, lsn
);
4092 PageSetTLI(page
, ThisTimeLineID
);
4093 MarkBufferDirty(buffer
);
4094 UnlockReleaseBuffer(buffer
);
4098 heap_xlog_delete(XLogRecPtr lsn
, XLogRecord
*record
)
4100 xl_heap_delete
*xlrec
= (xl_heap_delete
*) XLogRecGetData(record
);
4104 OffsetNumber offnum
;
4106 HeapTupleHeader htup
;
4108 if (record
->xl_info
& XLR_BKP_BLOCK_1
)
4111 reln
= XLogOpenRelation(xlrec
->target
.node
);
4112 buffer
= XLogReadBuffer(reln
,
4113 ItemPointerGetBlockNumber(&(xlrec
->target
.tid
)),
4115 if (!BufferIsValid(buffer
))
4117 page
= (Page
) BufferGetPage(buffer
);
4119 if (XLByteLE(lsn
, PageGetLSN(page
))) /* changes are applied */
4121 UnlockReleaseBuffer(buffer
);
4125 offnum
= ItemPointerGetOffsetNumber(&(xlrec
->target
.tid
));
4126 if (PageGetMaxOffsetNumber(page
) >= offnum
)
4127 lp
= PageGetItemId(page
, offnum
);
4129 if (PageGetMaxOffsetNumber(page
) < offnum
|| !ItemIdIsNormal(lp
))
4130 elog(PANIC
, "heap_delete_redo: invalid lp");
4132 htup
= (HeapTupleHeader
) PageGetItem(page
, lp
);
4134 htup
->t_infomask
&= ~(HEAP_XMAX_COMMITTED
|
4136 HEAP_XMAX_IS_MULTI
|
4139 HeapTupleHeaderClearHotUpdated(htup
);
4140 HeapTupleHeaderSetXmax(htup
, record
->xl_xid
);
4141 HeapTupleHeaderSetCmax(htup
, FirstCommandId
, false);
4143 /* Mark the page as a candidate for pruning */
4144 PageSetPrunable(page
, record
->xl_xid
);
4146 /* Make sure there is no forward chain link in t_ctid */
4147 htup
->t_ctid
= xlrec
->target
.tid
;
4148 PageSetLSN(page
, lsn
);
4149 PageSetTLI(page
, ThisTimeLineID
);
4150 MarkBufferDirty(buffer
);
4151 UnlockReleaseBuffer(buffer
);
4155 heap_xlog_insert(XLogRecPtr lsn
, XLogRecord
*record
)
4157 xl_heap_insert
*xlrec
= (xl_heap_insert
*) XLogRecGetData(record
);
4161 OffsetNumber offnum
;
4164 HeapTupleHeaderData hdr
;
4165 char data
[MaxHeapTupleSize
];
4167 HeapTupleHeader htup
;
4168 xl_heap_header xlhdr
;
4171 if (record
->xl_info
& XLR_BKP_BLOCK_1
)
4174 reln
= XLogOpenRelation(xlrec
->target
.node
);
4176 if (record
->xl_info
& XLOG_HEAP_INIT_PAGE
)
4178 buffer
= XLogReadBuffer(reln
,
4179 ItemPointerGetBlockNumber(&(xlrec
->target
.tid
)),
4181 Assert(BufferIsValid(buffer
));
4182 page
= (Page
) BufferGetPage(buffer
);
4184 PageInit(page
, BufferGetPageSize(buffer
), 0);
4188 buffer
= XLogReadBuffer(reln
,
4189 ItemPointerGetBlockNumber(&(xlrec
->target
.tid
)),
4191 if (!BufferIsValid(buffer
))
4193 page
= (Page
) BufferGetPage(buffer
);
4195 if (XLByteLE(lsn
, PageGetLSN(page
))) /* changes are applied */
4197 UnlockReleaseBuffer(buffer
);
4202 offnum
= ItemPointerGetOffsetNumber(&(xlrec
->target
.tid
));
4203 if (PageGetMaxOffsetNumber(page
) + 1 < offnum
)
4204 elog(PANIC
, "heap_insert_redo: invalid max offset number");
4206 newlen
= record
->xl_len
- SizeOfHeapInsert
- SizeOfHeapHeader
;
4207 Assert(newlen
<= MaxHeapTupleSize
);
4208 memcpy((char *) &xlhdr
,
4209 (char *) xlrec
+ SizeOfHeapInsert
,
4212 MemSet((char *) htup
, 0, sizeof(HeapTupleHeaderData
));
4213 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
4214 memcpy((char *) htup
+ offsetof(HeapTupleHeaderData
, t_bits
),
4215 (char *) xlrec
+ SizeOfHeapInsert
+ SizeOfHeapHeader
,
4217 newlen
+= offsetof(HeapTupleHeaderData
, t_bits
);
4218 htup
->t_infomask2
= xlhdr
.t_infomask2
;
4219 htup
->t_infomask
= xlhdr
.t_infomask
;
4220 htup
->t_hoff
= xlhdr
.t_hoff
;
4221 HeapTupleHeaderSetXmin(htup
, record
->xl_xid
);
4222 HeapTupleHeaderSetCmin(htup
, FirstCommandId
);
4223 htup
->t_ctid
= xlrec
->target
.tid
;
4225 offnum
= PageAddItem(page
, (Item
) htup
, newlen
, offnum
, true, true);
4226 if (offnum
== InvalidOffsetNumber
)
4227 elog(PANIC
, "heap_insert_redo: failed to add tuple");
4228 PageSetLSN(page
, lsn
);
4229 PageSetTLI(page
, ThisTimeLineID
);
4230 MarkBufferDirty(buffer
);
4231 UnlockReleaseBuffer(buffer
);
4235 * Handles UPDATE, HOT_UPDATE & MOVE
4238 heap_xlog_update(XLogRecPtr lsn
, XLogRecord
*record
, bool move
, bool hot_update
)
4240 xl_heap_update
*xlrec
= (xl_heap_update
*) XLogRecGetData(record
);
4241 Relation reln
= XLogOpenRelation(xlrec
->target
.node
);
4243 bool samepage
= (ItemPointerGetBlockNumber(&(xlrec
->newtid
)) ==
4244 ItemPointerGetBlockNumber(&(xlrec
->target
.tid
)));
4246 OffsetNumber offnum
;
4248 HeapTupleHeader htup
;
4251 HeapTupleHeaderData hdr
;
4252 char data
[MaxHeapTupleSize
];
4254 xl_heap_header xlhdr
;
4258 if (record
->xl_info
& XLR_BKP_BLOCK_1
)
4261 return; /* backup block covered both changes */
4265 /* Deal with old tuple version */
4267 buffer
= XLogReadBuffer(reln
,
4268 ItemPointerGetBlockNumber(&(xlrec
->target
.tid
)),
4270 if (!BufferIsValid(buffer
))
4272 page
= (Page
) BufferGetPage(buffer
);
4274 if (XLByteLE(lsn
, PageGetLSN(page
))) /* changes are applied */
4276 UnlockReleaseBuffer(buffer
);
4282 offnum
= ItemPointerGetOffsetNumber(&(xlrec
->target
.tid
));
4283 if (PageGetMaxOffsetNumber(page
) >= offnum
)
4284 lp
= PageGetItemId(page
, offnum
);
4286 if (PageGetMaxOffsetNumber(page
) < offnum
|| !ItemIdIsNormal(lp
))
4287 elog(PANIC
, "heap_update_redo: invalid lp");
4289 htup
= (HeapTupleHeader
) PageGetItem(page
, lp
);
4293 htup
->t_infomask
&= ~(HEAP_XMIN_COMMITTED
|
4296 htup
->t_infomask
|= HEAP_MOVED_OFF
;
4297 HeapTupleHeaderClearHotUpdated(htup
);
4298 HeapTupleHeaderSetXvac(htup
, record
->xl_xid
);
4299 /* Make sure there is no forward chain link in t_ctid */
4300 htup
->t_ctid
= xlrec
->target
.tid
;
4304 htup
->t_infomask
&= ~(HEAP_XMAX_COMMITTED
|
4306 HEAP_XMAX_IS_MULTI
|
4310 HeapTupleHeaderSetHotUpdated(htup
);
4312 HeapTupleHeaderClearHotUpdated(htup
);
4313 HeapTupleHeaderSetXmax(htup
, record
->xl_xid
);
4314 HeapTupleHeaderSetCmax(htup
, FirstCommandId
, false);
4315 /* Set forward chain link in t_ctid */
4316 htup
->t_ctid
= xlrec
->newtid
;
4319 /* Mark the page as a candidate for pruning */
4320 PageSetPrunable(page
, record
->xl_xid
);
4323 * this test is ugly, but necessary to avoid thinking that insert change
4324 * is already applied
4328 PageSetLSN(page
, lsn
);
4329 PageSetTLI(page
, ThisTimeLineID
);
4330 MarkBufferDirty(buffer
);
4331 UnlockReleaseBuffer(buffer
);
4333 /* Deal with new tuple */
4337 if (record
->xl_info
& XLR_BKP_BLOCK_2
)
4340 if (record
->xl_info
& XLOG_HEAP_INIT_PAGE
)
4342 buffer
= XLogReadBuffer(reln
,
4343 ItemPointerGetBlockNumber(&(xlrec
->newtid
)),
4345 Assert(BufferIsValid(buffer
));
4346 page
= (Page
) BufferGetPage(buffer
);
4348 PageInit(page
, BufferGetPageSize(buffer
), 0);
4352 buffer
= XLogReadBuffer(reln
,
4353 ItemPointerGetBlockNumber(&(xlrec
->newtid
)),
4355 if (!BufferIsValid(buffer
))
4357 page
= (Page
) BufferGetPage(buffer
);
4359 if (XLByteLE(lsn
, PageGetLSN(page
))) /* changes are applied */
4361 UnlockReleaseBuffer(buffer
);
4368 offnum
= ItemPointerGetOffsetNumber(&(xlrec
->newtid
));
4369 if (PageGetMaxOffsetNumber(page
) + 1 < offnum
)
4370 elog(PANIC
, "heap_update_redo: invalid max offset number");
4372 hsize
= SizeOfHeapUpdate
+ SizeOfHeapHeader
;
4374 hsize
+= (2 * sizeof(TransactionId
));
4376 newlen
= record
->xl_len
- hsize
;
4377 Assert(newlen
<= MaxHeapTupleSize
);
4378 memcpy((char *) &xlhdr
,
4379 (char *) xlrec
+ SizeOfHeapUpdate
,
4382 MemSet((char *) htup
, 0, sizeof(HeapTupleHeaderData
));
4383 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
4384 memcpy((char *) htup
+ offsetof(HeapTupleHeaderData
, t_bits
),
4385 (char *) xlrec
+ hsize
,
4387 newlen
+= offsetof(HeapTupleHeaderData
, t_bits
);
4388 htup
->t_infomask2
= xlhdr
.t_infomask2
;
4389 htup
->t_infomask
= xlhdr
.t_infomask
;
4390 htup
->t_hoff
= xlhdr
.t_hoff
;
4394 TransactionId xid
[2]; /* xmax, xmin */
4396 memcpy((char *) xid
,
4397 (char *) xlrec
+ SizeOfHeapUpdate
+ SizeOfHeapHeader
,
4398 2 * sizeof(TransactionId
));
4399 HeapTupleHeaderSetXmin(htup
, xid
[1]);
4400 HeapTupleHeaderSetXmax(htup
, xid
[0]);
4401 HeapTupleHeaderSetXvac(htup
, record
->xl_xid
);
4405 HeapTupleHeaderSetXmin(htup
, record
->xl_xid
);
4406 HeapTupleHeaderSetCmin(htup
, FirstCommandId
);
4408 /* Make sure there is no forward chain link in t_ctid */
4409 htup
->t_ctid
= xlrec
->newtid
;
4411 offnum
= PageAddItem(page
, (Item
) htup
, newlen
, offnum
, true, true);
4412 if (offnum
== InvalidOffsetNumber
)
4413 elog(PANIC
, "heap_update_redo: failed to add tuple");
4414 PageSetLSN(page
, lsn
);
4415 PageSetTLI(page
, ThisTimeLineID
);
4416 MarkBufferDirty(buffer
);
4417 UnlockReleaseBuffer(buffer
);
4421 heap_xlog_lock(XLogRecPtr lsn
, XLogRecord
*record
)
4423 xl_heap_lock
*xlrec
= (xl_heap_lock
*) XLogRecGetData(record
);
4427 OffsetNumber offnum
;
4429 HeapTupleHeader htup
;
4431 if (record
->xl_info
& XLR_BKP_BLOCK_1
)
4434 reln
= XLogOpenRelation(xlrec
->target
.node
);
4435 buffer
= XLogReadBuffer(reln
,
4436 ItemPointerGetBlockNumber(&(xlrec
->target
.tid
)),
4438 if (!BufferIsValid(buffer
))
4440 page
= (Page
) BufferGetPage(buffer
);
4442 if (XLByteLE(lsn
, PageGetLSN(page
))) /* changes are applied */
4444 UnlockReleaseBuffer(buffer
);
4448 offnum
= ItemPointerGetOffsetNumber(&(xlrec
->target
.tid
));
4449 if (PageGetMaxOffsetNumber(page
) >= offnum
)
4450 lp
= PageGetItemId(page
, offnum
);
4452 if (PageGetMaxOffsetNumber(page
) < offnum
|| !ItemIdIsNormal(lp
))
4453 elog(PANIC
, "heap_lock_redo: invalid lp");
4455 htup
= (HeapTupleHeader
) PageGetItem(page
, lp
);
4457 htup
->t_infomask
&= ~(HEAP_XMAX_COMMITTED
|
4459 HEAP_XMAX_IS_MULTI
|
4462 if (xlrec
->xid_is_mxact
)
4463 htup
->t_infomask
|= HEAP_XMAX_IS_MULTI
;
4464 if (xlrec
->shared_lock
)
4465 htup
->t_infomask
|= HEAP_XMAX_SHARED_LOCK
;
4467 htup
->t_infomask
|= HEAP_XMAX_EXCL_LOCK
;
4468 HeapTupleHeaderClearHotUpdated(htup
);
4469 HeapTupleHeaderSetXmax(htup
, xlrec
->locking_xid
);
4470 HeapTupleHeaderSetCmax(htup
, FirstCommandId
, false);
4471 /* Make sure there is no forward chain link in t_ctid */
4472 htup
->t_ctid
= xlrec
->target
.tid
;
4473 PageSetLSN(page
, lsn
);
4474 PageSetTLI(page
, ThisTimeLineID
);
4475 MarkBufferDirty(buffer
);
4476 UnlockReleaseBuffer(buffer
);
4480 heap_xlog_inplace(XLogRecPtr lsn
, XLogRecord
*record
)
4482 xl_heap_inplace
*xlrec
= (xl_heap_inplace
*) XLogRecGetData(record
);
4483 Relation reln
= XLogOpenRelation(xlrec
->target
.node
);
4486 OffsetNumber offnum
;
4488 HeapTupleHeader htup
;
4492 if (record
->xl_info
& XLR_BKP_BLOCK_1
)
4495 buffer
= XLogReadBuffer(reln
,
4496 ItemPointerGetBlockNumber(&(xlrec
->target
.tid
)),
4498 if (!BufferIsValid(buffer
))
4500 page
= (Page
) BufferGetPage(buffer
);
4502 if (XLByteLE(lsn
, PageGetLSN(page
))) /* changes are applied */
4504 UnlockReleaseBuffer(buffer
);
4508 offnum
= ItemPointerGetOffsetNumber(&(xlrec
->target
.tid
));
4509 if (PageGetMaxOffsetNumber(page
) >= offnum
)
4510 lp
= PageGetItemId(page
, offnum
);
4512 if (PageGetMaxOffsetNumber(page
) < offnum
|| !ItemIdIsNormal(lp
))
4513 elog(PANIC
, "heap_inplace_redo: invalid lp");
4515 htup
= (HeapTupleHeader
) PageGetItem(page
, lp
);
4517 oldlen
= ItemIdGetLength(lp
) - htup
->t_hoff
;
4518 newlen
= record
->xl_len
- SizeOfHeapInplace
;
4519 if (oldlen
!= newlen
)
4520 elog(PANIC
, "heap_inplace_redo: wrong tuple length");
4522 memcpy((char *) htup
+ htup
->t_hoff
,
4523 (char *) xlrec
+ SizeOfHeapInplace
,
4526 PageSetLSN(page
, lsn
);
4527 PageSetTLI(page
, ThisTimeLineID
);
4528 MarkBufferDirty(buffer
);
4529 UnlockReleaseBuffer(buffer
);
4533 heap_redo(XLogRecPtr lsn
, XLogRecord
*record
)
4535 uint8 info
= record
->xl_info
& ~XLR_INFO_MASK
;
4537 switch (info
& XLOG_HEAP_OPMASK
)
4539 case XLOG_HEAP_INSERT
:
4540 heap_xlog_insert(lsn
, record
);
4542 case XLOG_HEAP_DELETE
:
4543 heap_xlog_delete(lsn
, record
);
4545 case XLOG_HEAP_UPDATE
:
4546 heap_xlog_update(lsn
, record
, false, false);
4548 case XLOG_HEAP_MOVE
:
4549 heap_xlog_update(lsn
, record
, true, false);
4551 case XLOG_HEAP_HOT_UPDATE
:
4552 heap_xlog_update(lsn
, record
, false, true);
4554 case XLOG_HEAP_NEWPAGE
:
4555 heap_xlog_newpage(lsn
, record
);
4557 case XLOG_HEAP_LOCK
:
4558 heap_xlog_lock(lsn
, record
);
4560 case XLOG_HEAP_INPLACE
:
4561 heap_xlog_inplace(lsn
, record
);
4564 elog(PANIC
, "heap_redo: unknown op code %u", info
);
4569 heap2_redo(XLogRecPtr lsn
, XLogRecord
*record
)
4571 uint8 info
= record
->xl_info
& ~XLR_INFO_MASK
;
4573 switch (info
& XLOG_HEAP_OPMASK
)
4575 case XLOG_HEAP2_FREEZE
:
4576 heap_xlog_freeze(lsn
, record
);
4578 case XLOG_HEAP2_CLEAN
:
4579 heap_xlog_clean(lsn
, record
, false);
4581 case XLOG_HEAP2_CLEAN_MOVE
:
4582 heap_xlog_clean(lsn
, record
, true);
4585 elog(PANIC
, "heap2_redo: unknown op code %u", info
);
4590 out_target(StringInfo buf
, xl_heaptid
*target
)
4592 appendStringInfo(buf
, "rel %u/%u/%u; tid %u/%u",
4593 target
->node
.spcNode
, target
->node
.dbNode
, target
->node
.relNode
,
4594 ItemPointerGetBlockNumber(&(target
->tid
)),
4595 ItemPointerGetOffsetNumber(&(target
->tid
)));
4599 heap_desc(StringInfo buf
, uint8 xl_info
, char *rec
)
4601 uint8 info
= xl_info
& ~XLR_INFO_MASK
;
4603 info
&= XLOG_HEAP_OPMASK
;
4604 if (info
== XLOG_HEAP_INSERT
)
4606 xl_heap_insert
*xlrec
= (xl_heap_insert
*) rec
;
4608 if (xl_info
& XLOG_HEAP_INIT_PAGE
)
4609 appendStringInfo(buf
, "insert(init): ");
4611 appendStringInfo(buf
, "insert: ");
4612 out_target(buf
, &(xlrec
->target
));
4614 else if (info
== XLOG_HEAP_DELETE
)
4616 xl_heap_delete
*xlrec
= (xl_heap_delete
*) rec
;
4618 appendStringInfo(buf
, "delete: ");
4619 out_target(buf
, &(xlrec
->target
));
4621 else if (info
== XLOG_HEAP_UPDATE
)
4623 xl_heap_update
*xlrec
= (xl_heap_update
*) rec
;
4625 if (xl_info
& XLOG_HEAP_INIT_PAGE
)
4626 appendStringInfo(buf
, "update(init): ");
4628 appendStringInfo(buf
, "update: ");
4629 out_target(buf
, &(xlrec
->target
));
4630 appendStringInfo(buf
, "; new %u/%u",
4631 ItemPointerGetBlockNumber(&(xlrec
->newtid
)),
4632 ItemPointerGetOffsetNumber(&(xlrec
->newtid
)));
4634 else if (info
== XLOG_HEAP_MOVE
)
4636 xl_heap_update
*xlrec
= (xl_heap_update
*) rec
;
4638 if (xl_info
& XLOG_HEAP_INIT_PAGE
)
4639 appendStringInfo(buf
, "move(init): ");
4641 appendStringInfo(buf
, "move: ");
4642 out_target(buf
, &(xlrec
->target
));
4643 appendStringInfo(buf
, "; new %u/%u",
4644 ItemPointerGetBlockNumber(&(xlrec
->newtid
)),
4645 ItemPointerGetOffsetNumber(&(xlrec
->newtid
)));
4647 else if (info
== XLOG_HEAP_HOT_UPDATE
)
4649 xl_heap_update
*xlrec
= (xl_heap_update
*) rec
;
4651 if (xl_info
& XLOG_HEAP_INIT_PAGE
) /* can this case happen? */
4652 appendStringInfo(buf
, "hot_update(init): ");
4654 appendStringInfo(buf
, "hot_update: ");
4655 out_target(buf
, &(xlrec
->target
));
4656 appendStringInfo(buf
, "; new %u/%u",
4657 ItemPointerGetBlockNumber(&(xlrec
->newtid
)),
4658 ItemPointerGetOffsetNumber(&(xlrec
->newtid
)));
4660 else if (info
== XLOG_HEAP_NEWPAGE
)
4662 xl_heap_newpage
*xlrec
= (xl_heap_newpage
*) rec
;
4664 appendStringInfo(buf
, "newpage: rel %u/%u/%u; blk %u",
4665 xlrec
->node
.spcNode
, xlrec
->node
.dbNode
,
4666 xlrec
->node
.relNode
, xlrec
->blkno
);
4668 else if (info
== XLOG_HEAP_LOCK
)
4670 xl_heap_lock
*xlrec
= (xl_heap_lock
*) rec
;
4672 if (xlrec
->shared_lock
)
4673 appendStringInfo(buf
, "shared_lock: ");
4675 appendStringInfo(buf
, "exclusive_lock: ");
4676 if (xlrec
->xid_is_mxact
)
4677 appendStringInfo(buf
, "mxid ");
4679 appendStringInfo(buf
, "xid ");
4680 appendStringInfo(buf
, "%u ", xlrec
->locking_xid
);
4681 out_target(buf
, &(xlrec
->target
));
4683 else if (info
== XLOG_HEAP_INPLACE
)
4685 xl_heap_inplace
*xlrec
= (xl_heap_inplace
*) rec
;
4687 appendStringInfo(buf
, "inplace: ");
4688 out_target(buf
, &(xlrec
->target
));
4691 appendStringInfo(buf
, "UNKNOWN");
4695 heap2_desc(StringInfo buf
, uint8 xl_info
, char *rec
)
4697 uint8 info
= xl_info
& ~XLR_INFO_MASK
;
4699 info
&= XLOG_HEAP_OPMASK
;
4700 if (info
== XLOG_HEAP2_FREEZE
)
4702 xl_heap_freeze
*xlrec
= (xl_heap_freeze
*) rec
;
4704 appendStringInfo(buf
, "freeze: rel %u/%u/%u; blk %u; cutoff %u",
4705 xlrec
->node
.spcNode
, xlrec
->node
.dbNode
,
4706 xlrec
->node
.relNode
, xlrec
->block
,
4709 else if (info
== XLOG_HEAP2_CLEAN
)
4711 xl_heap_clean
*xlrec
= (xl_heap_clean
*) rec
;
4713 appendStringInfo(buf
, "clean: rel %u/%u/%u; blk %u",
4714 xlrec
->node
.spcNode
, xlrec
->node
.dbNode
,
4715 xlrec
->node
.relNode
, xlrec
->block
);
4717 else if (info
== XLOG_HEAP2_CLEAN_MOVE
)
4719 xl_heap_clean
*xlrec
= (xl_heap_clean
*) rec
;
4721 appendStringInfo(buf
, "clean_move: rel %u/%u/%u; blk %u",
4722 xlrec
->node
.spcNode
, xlrec
->node
.dbNode
,
4723 xlrec
->node
.relNode
, xlrec
->block
);
4726 appendStringInfo(buf
, "UNKNOWN");
4730 * heap_sync - sync a heap, for use when no WAL has been written
4732 * This forces the heap contents (including TOAST heap if any) down to disk.
4733 * If we skipped using WAL, and it's not a temp relation, we must force the
4734 * relation down to disk before it's safe to commit the transaction. This
4735 * requires writing out any dirty buffers and then doing a forced fsync.
4737 * Indexes are not touched. (Currently, index operations associated with
4738 * the commands that use this are WAL-logged and so do not need fsync.
4739 * That behavior might change someday, but in any case it's likely that
4740 * any fsync decisions required would be per-index and hence not appropriate
4744 heap_sync(Relation rel
)
4746 /* temp tables never need fsync */
4751 FlushRelationBuffers(rel
);
4752 /* FlushRelationBuffers will have opened rd_smgr */
4753 smgrimmedsync(rel
->rd_smgr
);
4755 /* toast heap, if any */
4756 if (OidIsValid(rel
->rd_rel
->reltoastrelid
))
4760 toastrel
= heap_open(rel
->rd_rel
->reltoastrelid
, AccessShareLock
);
4761 FlushRelationBuffers(toastrel
);
4762 smgrimmedsync(toastrel
->rd_smgr
);
4763 heap_close(toastrel
, AccessShareLock
);