Introduce the concept of relation forks. An smgr relation can now consist
[PostgreSQL.git] / src / include / access / htup.h
blob0c80361414d2d506be4505abc31d1205be330349
1 /*-------------------------------------------------------------------------
3 * htup.h
4 * POSTGRES heap tuple definitions.
7 * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
10 * $PostgreSQL$
12 *-------------------------------------------------------------------------
14 #ifndef HTUP_H
15 #define HTUP_H
17 #include "access/tupdesc.h"
18 #include "access/tupmacs.h"
19 #include "storage/itemptr.h"
20 #include "storage/relfilenode.h"
23 * MaxTupleAttributeNumber limits the number of (user) columns in a tuple.
24 * The key limit on this value is that the size of the fixed overhead for
25 * a tuple, plus the size of the null-values bitmap (at 1 bit per column),
26 * plus MAXALIGN alignment, must fit into t_hoff which is uint8. On most
27 * machines the upper limit without making t_hoff wider would be a little
28 * over 1700. We use round numbers here and for MaxHeapAttributeNumber
29 * so that alterations in HeapTupleHeaderData layout won't change the
30 * supported max number of columns.
32 #define MaxTupleAttributeNumber 1664 /* 8 * 208 */
35 * MaxHeapAttributeNumber limits the number of (user) columns in a table.
36 * This should be somewhat less than MaxTupleAttributeNumber. It must be
37 * at least one less, else we will fail to do UPDATEs on a maximal-width
38 * table (because UPDATE has to form working tuples that include CTID).
39 * In practice we want some additional daylight so that we can gracefully
40 * support operations that add hidden "resjunk" columns, for example
41 * SELECT * FROM wide_table ORDER BY foo, bar, baz.
42 * In any case, depending on column data types you will likely be running
43 * into the disk-block-based limit on overall tuple size if you have more
44 * than a thousand or so columns. TOAST won't help.
46 #define MaxHeapAttributeNumber 1600 /* 8 * 200 */
49 * Heap tuple header. To avoid wasting space, the fields should be
50 * laid out in such a way as to avoid structure padding.
52 * Datums of composite types (row types) share the same general structure
53 * as on-disk tuples, so that the same routines can be used to build and
54 * examine them. However the requirements are slightly different: a Datum
55 * does not need any transaction visibility information, and it does need
56 * a length word and some embedded type information. We can achieve this
57 * by overlaying the xmin/cmin/xmax/cmax/xvac fields of a heap tuple
58 * with the fields needed in the Datum case. Typically, all tuples built
59 * in-memory will be initialized with the Datum fields; but when a tuple is
60 * about to be inserted in a table, the transaction fields will be filled,
61 * overwriting the datum fields.
63 * The overall structure of a heap tuple looks like:
64 * fixed fields (HeapTupleHeaderData struct)
65 * nulls bitmap (if HEAP_HASNULL is set in t_infomask)
66 * alignment padding (as needed to make user data MAXALIGN'd)
67 * object ID (if HEAP_HASOID is set in t_infomask)
68 * user data fields
70 * We store five "virtual" fields Xmin, Cmin, Xmax, Cmax, and Xvac in three
71 * physical fields. Xmin and Xmax are always really stored, but Cmin, Cmax
72 * and Xvac share a field. This works because we know that Cmin and Cmax
73 * are only interesting for the lifetime of the inserting and deleting
74 * transaction respectively. If a tuple is inserted and deleted in the same
75 * transaction, we store a "combo" command id that can be mapped to the real
76 * cmin and cmax, but only by use of local state within the originating
77 * backend. See combocid.c for more details. Meanwhile, Xvac is only set
78 * by VACUUM FULL, which does not have any command sub-structure and so does
79 * not need either Cmin or Cmax. (This requires that VACUUM FULL never try
80 * to move a tuple whose Cmin or Cmax is still interesting, ie, an insert-
81 * in-progress or delete-in-progress tuple.)
83 * A word about t_ctid: whenever a new tuple is stored on disk, its t_ctid
84 * is initialized with its own TID (location). If the tuple is ever updated,
85 * its t_ctid is changed to point to the replacement version of the tuple.
86 * Thus, a tuple is the latest version of its row iff XMAX is invalid or
87 * t_ctid points to itself (in which case, if XMAX is valid, the tuple is
88 * either locked or deleted). One can follow the chain of t_ctid links
89 * to find the newest version of the row. Beware however that VACUUM might
90 * erase the pointed-to (newer) tuple before erasing the pointing (older)
91 * tuple. Hence, when following a t_ctid link, it is necessary to check
92 * to see if the referenced slot is empty or contains an unrelated tuple.
93 * Check that the referenced tuple has XMIN equal to the referencing tuple's
94 * XMAX to verify that it is actually the descendant version and not an
95 * unrelated tuple stored into a slot recently freed by VACUUM. If either
96 * check fails, one may assume that there is no live descendant version.
98 * Following the fixed header fields, the nulls bitmap is stored (beginning
99 * at t_bits). The bitmap is *not* stored if t_infomask shows that there
100 * are no nulls in the tuple. If an OID field is present (as indicated by
101 * t_infomask), then it is stored just before the user data, which begins at
102 * the offset shown by t_hoff. Note that t_hoff must be a multiple of
103 * MAXALIGN.
106 typedef struct HeapTupleFields
108 TransactionId t_xmin; /* inserting xact ID */
109 TransactionId t_xmax; /* deleting or locking xact ID */
111 union
113 CommandId t_cid; /* inserting or deleting command ID, or both */
114 TransactionId t_xvac; /* VACUUM FULL xact ID */
115 } t_field3;
116 } HeapTupleFields;
118 typedef struct DatumTupleFields
120 int32 datum_len_; /* varlena header (do not touch directly!) */
122 int32 datum_typmod; /* -1, or identifier of a record type */
124 Oid datum_typeid; /* composite type OID, or RECORDOID */
127 * Note: field ordering is chosen with thought that Oid might someday
128 * widen to 64 bits.
130 } DatumTupleFields;
132 typedef struct HeapTupleHeaderData
134 union
136 HeapTupleFields t_heap;
137 DatumTupleFields t_datum;
138 } t_choice;
140 ItemPointerData t_ctid; /* current TID of this or newer tuple */
142 /* Fields below here must match MinimalTupleData! */
144 uint16 t_infomask2; /* number of attributes + various flags */
146 uint16 t_infomask; /* various flag bits, see below */
148 uint8 t_hoff; /* sizeof header incl. bitmap, padding */
150 /* ^ - 23 bytes - ^ */
152 bits8 t_bits[1]; /* bitmap of NULLs -- VARIABLE LENGTH */
154 /* MORE DATA FOLLOWS AT END OF STRUCT */
155 } HeapTupleHeaderData;
157 typedef HeapTupleHeaderData *HeapTupleHeader;
160 * information stored in t_infomask:
162 #define HEAP_HASNULL 0x0001 /* has null attribute(s) */
163 #define HEAP_HASVARWIDTH 0x0002 /* has variable-width attribute(s) */
164 #define HEAP_HASEXTERNAL 0x0004 /* has external stored attribute(s) */
165 #define HEAP_HASOID 0x0008 /* has an object-id field */
166 /* bit 0x0010 is available */
167 #define HEAP_COMBOCID 0x0020 /* t_cid is a combo cid */
168 #define HEAP_XMAX_EXCL_LOCK 0x0040 /* xmax is exclusive locker */
169 #define HEAP_XMAX_SHARED_LOCK 0x0080 /* xmax is shared locker */
170 /* if either LOCK bit is set, xmax hasn't deleted the tuple, only locked it */
171 #define HEAP_IS_LOCKED (HEAP_XMAX_EXCL_LOCK | HEAP_XMAX_SHARED_LOCK)
172 #define HEAP_XMIN_COMMITTED 0x0100 /* t_xmin committed */
173 #define HEAP_XMIN_INVALID 0x0200 /* t_xmin invalid/aborted */
174 #define HEAP_XMAX_COMMITTED 0x0400 /* t_xmax committed */
175 #define HEAP_XMAX_INVALID 0x0800 /* t_xmax invalid/aborted */
176 #define HEAP_XMAX_IS_MULTI 0x1000 /* t_xmax is a MultiXactId */
177 #define HEAP_UPDATED 0x2000 /* this is UPDATEd version of row */
178 #define HEAP_MOVED_OFF 0x4000 /* moved to another place by VACUUM
179 * FULL */
180 #define HEAP_MOVED_IN 0x8000 /* moved from another place by VACUUM
181 * FULL */
182 #define HEAP_MOVED (HEAP_MOVED_OFF | HEAP_MOVED_IN)
184 #define HEAP_XACT_MASK 0xFFE0 /* visibility-related bits */
187 * information stored in t_infomask2:
189 #define HEAP_NATTS_MASK 0x07FF /* 11 bits for number of attributes */
190 /* bits 0x3800 are available */
191 #define HEAP_HOT_UPDATED 0x4000 /* tuple was HOT-updated */
192 #define HEAP_ONLY_TUPLE 0x8000 /* this is heap-only tuple */
194 #define HEAP2_XACT_MASK 0xC000 /* visibility-related bits */
197 * HeapTupleHeader accessor macros
199 * Note: beware of multiple evaluations of "tup" argument. But the Set
200 * macros evaluate their other argument only once.
203 #define HeapTupleHeaderGetXmin(tup) \
205 (tup)->t_choice.t_heap.t_xmin \
208 #define HeapTupleHeaderSetXmin(tup, xid) \
210 (tup)->t_choice.t_heap.t_xmin = (xid) \
213 #define HeapTupleHeaderGetXmax(tup) \
215 (tup)->t_choice.t_heap.t_xmax \
218 #define HeapTupleHeaderSetXmax(tup, xid) \
220 (tup)->t_choice.t_heap.t_xmax = (xid) \
224 * HeapTupleHeaderGetRawCommandId will give you what's in the header whether
225 * it is useful or not. Most code should use HeapTupleHeaderGetCmin or
226 * HeapTupleHeaderGetCmax instead, but note that those Assert that you can
227 * get a legitimate result, ie you are in the originating transaction!
229 #define HeapTupleHeaderGetRawCommandId(tup) \
231 (tup)->t_choice.t_heap.t_field3.t_cid \
234 /* SetCmin is reasonably simple since we never need a combo CID */
235 #define HeapTupleHeaderSetCmin(tup, cid) \
236 do { \
237 Assert(!((tup)->t_infomask & HEAP_MOVED)); \
238 (tup)->t_choice.t_heap.t_field3.t_cid = (cid); \
239 (tup)->t_infomask &= ~HEAP_COMBOCID; \
240 } while (0)
242 /* SetCmax must be used after HeapTupleHeaderAdjustCmax; see combocid.c */
243 #define HeapTupleHeaderSetCmax(tup, cid, iscombo) \
244 do { \
245 Assert(!((tup)->t_infomask & HEAP_MOVED)); \
246 (tup)->t_choice.t_heap.t_field3.t_cid = (cid); \
247 if (iscombo) \
248 (tup)->t_infomask |= HEAP_COMBOCID; \
249 else \
250 (tup)->t_infomask &= ~HEAP_COMBOCID; \
251 } while (0)
253 #define HeapTupleHeaderGetXvac(tup) \
255 ((tup)->t_infomask & HEAP_MOVED) ? \
256 (tup)->t_choice.t_heap.t_field3.t_xvac \
258 InvalidTransactionId \
261 #define HeapTupleHeaderSetXvac(tup, xid) \
262 do { \
263 Assert((tup)->t_infomask & HEAP_MOVED); \
264 (tup)->t_choice.t_heap.t_field3.t_xvac = (xid); \
265 } while (0)
267 #define HeapTupleHeaderGetDatumLength(tup) \
268 VARSIZE(tup)
270 #define HeapTupleHeaderSetDatumLength(tup, len) \
271 SET_VARSIZE(tup, len)
273 #define HeapTupleHeaderGetTypeId(tup) \
275 (tup)->t_choice.t_datum.datum_typeid \
278 #define HeapTupleHeaderSetTypeId(tup, typeid) \
280 (tup)->t_choice.t_datum.datum_typeid = (typeid) \
283 #define HeapTupleHeaderGetTypMod(tup) \
285 (tup)->t_choice.t_datum.datum_typmod \
288 #define HeapTupleHeaderSetTypMod(tup, typmod) \
290 (tup)->t_choice.t_datum.datum_typmod = (typmod) \
293 #define HeapTupleHeaderGetOid(tup) \
295 ((tup)->t_infomask & HEAP_HASOID) ? \
296 *((Oid *) ((char *)(tup) + (tup)->t_hoff - sizeof(Oid))) \
298 InvalidOid \
301 #define HeapTupleHeaderSetOid(tup, oid) \
302 do { \
303 Assert((tup)->t_infomask & HEAP_HASOID); \
304 *((Oid *) ((char *)(tup) + (tup)->t_hoff - sizeof(Oid))) = (oid); \
305 } while (0)
308 * Note that we stop considering a tuple HOT-updated as soon as it is known
309 * aborted or the would-be updating transaction is known aborted. For best
310 * efficiency, check tuple visibility before using this macro, so that the
311 * INVALID bits will be as up to date as possible.
313 #define HeapTupleHeaderIsHotUpdated(tup) \
315 ((tup)->t_infomask2 & HEAP_HOT_UPDATED) != 0 && \
316 ((tup)->t_infomask & (HEAP_XMIN_INVALID | HEAP_XMAX_INVALID)) == 0 \
319 #define HeapTupleHeaderSetHotUpdated(tup) \
321 (tup)->t_infomask2 |= HEAP_HOT_UPDATED \
324 #define HeapTupleHeaderClearHotUpdated(tup) \
326 (tup)->t_infomask2 &= ~HEAP_HOT_UPDATED \
329 #define HeapTupleHeaderIsHeapOnly(tup) \
331 (tup)->t_infomask2 & HEAP_ONLY_TUPLE \
334 #define HeapTupleHeaderSetHeapOnly(tup) \
336 (tup)->t_infomask2 |= HEAP_ONLY_TUPLE \
339 #define HeapTupleHeaderClearHeapOnly(tup) \
341 (tup)->t_infomask2 &= ~HEAP_ONLY_TUPLE \
344 #define HeapTupleHeaderGetNatts(tup) \
345 ((tup)->t_infomask2 & HEAP_NATTS_MASK)
347 #define HeapTupleHeaderSetNatts(tup, natts) \
349 (tup)->t_infomask2 = ((tup)->t_infomask2 & ~HEAP_NATTS_MASK) | (natts) \
354 * BITMAPLEN(NATTS) -
355 * Computes size of null bitmap given number of data columns.
357 #define BITMAPLEN(NATTS) (((int)(NATTS) + 7) / 8)
360 * MaxHeapTupleSize is the maximum allowed size of a heap tuple, including
361 * header and MAXALIGN alignment padding. Basically it's BLCKSZ minus the
362 * other stuff that has to be on a disk page. Since heap pages use no
363 * "special space", there's no deduction for that.
365 * NOTE: we allow for the ItemId that must point to the tuple, ensuring that
366 * an otherwise-empty page can indeed hold a tuple of this size. Because
367 * ItemIds and tuples have different alignment requirements, don't assume that
368 * you can, say, fit 2 tuples of size MaxHeapTupleSize/2 on the same page.
370 #define MaxHeapTupleSize (BLCKSZ - MAXALIGN(SizeOfPageHeaderData + sizeof(ItemIdData)))
373 * MaxHeapTuplesPerPage is an upper bound on the number of tuples that can
374 * fit on one heap page. (Note that indexes could have more, because they
375 * use a smaller tuple header.) We arrive at the divisor because each tuple
376 * must be maxaligned, and it must have an associated item pointer.
378 * Note: with HOT, there could theoretically be more line pointers (not actual
379 * tuples) than this on a heap page. However we constrain the number of line
380 * pointers to this anyway, to avoid excessive line-pointer bloat and not
381 * require increases in the size of work arrays.
383 #define MaxHeapTuplesPerPage \
384 ((int) ((BLCKSZ - SizeOfPageHeaderData) / \
385 (MAXALIGN(offsetof(HeapTupleHeaderData, t_bits)) + sizeof(ItemIdData))))
388 * MaxAttrSize is a somewhat arbitrary upper limit on the declared size of
389 * data fields of char(n) and similar types. It need not have anything
390 * directly to do with the *actual* upper limit of varlena values, which
391 * is currently 1Gb (see TOAST structures in postgres.h). I've set it
392 * at 10Mb which seems like a reasonable number --- tgl 8/6/00.
394 #define MaxAttrSize (10 * 1024 * 1024)
398 * MinimalTuple is an alternative representation that is used for transient
399 * tuples inside the executor, in places where transaction status information
400 * is not required, the tuple rowtype is known, and shaving off a few bytes
401 * is worthwhile because we need to store many tuples. The representation
402 * is chosen so that tuple access routines can work with either full or
403 * minimal tuples via a HeapTupleData pointer structure. The access routines
404 * see no difference, except that they must not access the transaction status
405 * or t_ctid fields because those aren't there.
407 * For the most part, MinimalTuples should be accessed via TupleTableSlot
408 * routines. These routines will prevent access to the "system columns"
409 * and thereby prevent accidental use of the nonexistent fields.
411 * MinimalTupleData contains a length word, some padding, and fields matching
412 * HeapTupleHeaderData beginning with t_infomask2. The padding is chosen so
413 * that offsetof(t_infomask2) is the same modulo MAXIMUM_ALIGNOF in both
414 * structs. This makes data alignment rules equivalent in both cases.
416 * When a minimal tuple is accessed via a HeapTupleData pointer, t_data is
417 * set to point MINIMAL_TUPLE_OFFSET bytes before the actual start of the
418 * minimal tuple --- that is, where a full tuple matching the minimal tuple's
419 * data would start. This trick is what makes the structs seem equivalent.
421 * Note that t_hoff is computed the same as in a full tuple, hence it includes
422 * the MINIMAL_TUPLE_OFFSET distance. t_len does not include that, however.
424 #define MINIMAL_TUPLE_OFFSET \
425 ((offsetof(HeapTupleHeaderData, t_infomask2) - sizeof(uint32)) / MAXIMUM_ALIGNOF * MAXIMUM_ALIGNOF)
426 #define MINIMAL_TUPLE_PADDING \
427 ((offsetof(HeapTupleHeaderData, t_infomask2) - sizeof(uint32)) % MAXIMUM_ALIGNOF)
429 typedef struct MinimalTupleData
431 uint32 t_len; /* actual length of minimal tuple */
433 char mt_padding[MINIMAL_TUPLE_PADDING];
435 /* Fields below here must match HeapTupleHeaderData! */
437 uint16 t_infomask2; /* number of attributes + various flags */
439 uint16 t_infomask; /* various flag bits, see below */
441 uint8 t_hoff; /* sizeof header incl. bitmap, padding */
443 /* ^ - 23 bytes - ^ */
445 bits8 t_bits[1]; /* bitmap of NULLs -- VARIABLE LENGTH */
447 /* MORE DATA FOLLOWS AT END OF STRUCT */
448 } MinimalTupleData;
450 typedef MinimalTupleData *MinimalTuple;
454 * HeapTupleData is an in-memory data structure that points to a tuple.
456 * There are several ways in which this data structure is used:
458 * * Pointer to a tuple in a disk buffer: t_data points directly into the
459 * buffer (which the code had better be holding a pin on, but this is not
460 * reflected in HeapTupleData itself).
462 * * Pointer to nothing: t_data is NULL. This is used as a failure indication
463 * in some functions.
465 * * Part of a palloc'd tuple: the HeapTupleData itself and the tuple
466 * form a single palloc'd chunk. t_data points to the memory location
467 * immediately following the HeapTupleData struct (at offset HEAPTUPLESIZE).
468 * This is the output format of heap_form_tuple and related routines.
470 * * Separately allocated tuple: t_data points to a palloc'd chunk that
471 * is not adjacent to the HeapTupleData. (This case is deprecated since
472 * it's difficult to tell apart from case #1. It should be used only in
473 * limited contexts where the code knows that case #1 will never apply.)
475 * * Separately allocated minimal tuple: t_data points MINIMAL_TUPLE_OFFSET
476 * bytes before the start of a MinimalTuple. As with the previous case,
477 * this can't be told apart from case #1 by inspection; code setting up
478 * or destroying this representation has to know what it's doing.
480 * t_len should always be valid, except in the pointer-to-nothing case.
481 * t_self and t_tableOid should be valid if the HeapTupleData points to
482 * a disk buffer, or if it represents a copy of a tuple on disk. They
483 * should be explicitly set invalid in manufactured tuples.
485 typedef struct HeapTupleData
487 uint32 t_len; /* length of *t_data */
488 ItemPointerData t_self; /* SelfItemPointer */
489 Oid t_tableOid; /* table the tuple came from */
490 HeapTupleHeader t_data; /* -> tuple header and data */
491 } HeapTupleData;
493 typedef HeapTupleData *HeapTuple;
495 #define HEAPTUPLESIZE MAXALIGN(sizeof(HeapTupleData))
498 * GETSTRUCT - given a HeapTuple pointer, return address of the user data
500 #define GETSTRUCT(TUP) ((char *) ((TUP)->t_data) + (TUP)->t_data->t_hoff)
503 * Accessor macros to be used with HeapTuple pointers.
505 #define HeapTupleIsValid(tuple) PointerIsValid(tuple)
507 #define HeapTupleHasNulls(tuple) \
508 (((tuple)->t_data->t_infomask & HEAP_HASNULL) != 0)
510 #define HeapTupleNoNulls(tuple) \
511 (!((tuple)->t_data->t_infomask & HEAP_HASNULL))
513 #define HeapTupleHasVarWidth(tuple) \
514 (((tuple)->t_data->t_infomask & HEAP_HASVARWIDTH) != 0)
516 #define HeapTupleAllFixed(tuple) \
517 (!((tuple)->t_data->t_infomask & HEAP_HASVARWIDTH))
519 #define HeapTupleHasExternal(tuple) \
520 (((tuple)->t_data->t_infomask & HEAP_HASEXTERNAL) != 0)
522 #define HeapTupleIsHotUpdated(tuple) \
523 HeapTupleHeaderIsHotUpdated((tuple)->t_data)
525 #define HeapTupleSetHotUpdated(tuple) \
526 HeapTupleHeaderSetHotUpdated((tuple)->t_data)
528 #define HeapTupleClearHotUpdated(tuple) \
529 HeapTupleHeaderClearHotUpdated((tuple)->t_data)
531 #define HeapTupleIsHeapOnly(tuple) \
532 HeapTupleHeaderIsHeapOnly((tuple)->t_data)
534 #define HeapTupleSetHeapOnly(tuple) \
535 HeapTupleHeaderSetHeapOnly((tuple)->t_data)
537 #define HeapTupleClearHeapOnly(tuple) \
538 HeapTupleHeaderClearHeapOnly((tuple)->t_data)
540 #define HeapTupleGetOid(tuple) \
541 HeapTupleHeaderGetOid((tuple)->t_data)
543 #define HeapTupleSetOid(tuple, oid) \
544 HeapTupleHeaderSetOid((tuple)->t_data, (oid))
548 * WAL record definitions for heapam.c's WAL operations
550 * XLOG allows to store some information in high 4 bits of log
551 * record xl_info field. We use 3 for opcode and one for init bit.
553 #define XLOG_HEAP_INSERT 0x00
554 #define XLOG_HEAP_DELETE 0x10
555 #define XLOG_HEAP_UPDATE 0x20
556 #define XLOG_HEAP_MOVE 0x30
557 #define XLOG_HEAP_HOT_UPDATE 0x40
558 #define XLOG_HEAP_NEWPAGE 0x50
559 #define XLOG_HEAP_LOCK 0x60
560 #define XLOG_HEAP_INPLACE 0x70
562 #define XLOG_HEAP_OPMASK 0x70
564 * When we insert 1st item on new page in INSERT/UPDATE
565 * we can (and we do) restore entire page in redo
567 #define XLOG_HEAP_INIT_PAGE 0x80
569 * We ran out of opcodes, so heapam.c now has a second RmgrId. These opcodes
570 * are associated with RM_HEAP2_ID, but are not logically different from
571 * the ones above associated with RM_HEAP_ID. We apply XLOG_HEAP_OPMASK,
572 * although currently XLOG_HEAP_INIT_PAGE is not used for any of these.
574 #define XLOG_HEAP2_FREEZE 0x00
575 #define XLOG_HEAP2_CLEAN 0x10
576 #define XLOG_HEAP2_CLEAN_MOVE 0x20
579 * All what we need to find changed tuple
581 * NB: on most machines, sizeof(xl_heaptid) will include some trailing pad
582 * bytes for alignment. We don't want to store the pad space in the XLOG,
583 * so use SizeOfHeapTid for space calculations. Similar comments apply for
584 * the other xl_FOO structs.
586 typedef struct xl_heaptid
588 RelFileNode node;
589 ItemPointerData tid; /* changed tuple id */
590 } xl_heaptid;
592 #define SizeOfHeapTid (offsetof(xl_heaptid, tid) + SizeOfIptrData)
594 /* This is what we need to know about delete */
595 typedef struct xl_heap_delete
597 xl_heaptid target; /* deleted tuple id */
598 } xl_heap_delete;
600 #define SizeOfHeapDelete (offsetof(xl_heap_delete, target) + SizeOfHeapTid)
603 * We don't store the whole fixed part (HeapTupleHeaderData) of an inserted
604 * or updated tuple in WAL; we can save a few bytes by reconstructing the
605 * fields that are available elsewhere in the WAL record, or perhaps just
606 * plain needn't be reconstructed. These are the fields we must store.
607 * NOTE: t_hoff could be recomputed, but we may as well store it because
608 * it will come for free due to alignment considerations.
610 typedef struct xl_heap_header
612 uint16 t_infomask2;
613 uint16 t_infomask;
614 uint8 t_hoff;
615 } xl_heap_header;
617 #define SizeOfHeapHeader (offsetof(xl_heap_header, t_hoff) + sizeof(uint8))
619 /* This is what we need to know about insert */
620 typedef struct xl_heap_insert
622 xl_heaptid target; /* inserted tuple id */
623 /* xl_heap_header & TUPLE DATA FOLLOWS AT END OF STRUCT */
624 } xl_heap_insert;
626 #define SizeOfHeapInsert (offsetof(xl_heap_insert, target) + SizeOfHeapTid)
628 /* This is what we need to know about update|move|hot_update */
629 typedef struct xl_heap_update
631 xl_heaptid target; /* deleted tuple id */
632 ItemPointerData newtid; /* new inserted tuple id */
633 /* NEW TUPLE xl_heap_header (PLUS xmax & xmin IF MOVE OP) */
634 /* and TUPLE DATA FOLLOWS AT END OF STRUCT */
635 } xl_heap_update;
637 #define SizeOfHeapUpdate (offsetof(xl_heap_update, newtid) + SizeOfIptrData)
640 * This is what we need to know about vacuum page cleanup/redirect
642 * The array of OffsetNumbers following the fixed part of the record contains:
643 * * for each redirected item: the item offset, then the offset redirected to
644 * * for each now-dead item: the item offset
645 * * for each now-unused item: the item offset
646 * The total number of OffsetNumbers is therefore 2*nredirected+ndead+nunused.
647 * Note that nunused is not explicitly stored, but may be found by reference
648 * to the total record length.
650 * If the opcode is CLEAN_MOVE instead of CLEAN, then each redirection pair
651 * should be interpreted as physically moving the "to" item pointer to the
652 * "from" slot, rather than placing a redirection item in the "from" slot.
653 * The moved pointers should be replaced by LP_UNUSED items (there will not
654 * be explicit entries in the "now-unused" list for this). Also, the
655 * HEAP_ONLY bit in the moved tuples must be turned off.
657 typedef struct xl_heap_clean
659 RelFileNode node;
660 BlockNumber block;
661 uint16 nredirected;
662 uint16 ndead;
663 /* OFFSET NUMBERS FOLLOW */
664 } xl_heap_clean;
666 #define SizeOfHeapClean (offsetof(xl_heap_clean, ndead) + sizeof(uint16))
668 /* This is for replacing a page's contents in toto */
669 /* NB: this is used for indexes as well as heaps */
670 typedef struct xl_heap_newpage
672 RelFileNode node;
673 ForkNumber forknum;
674 BlockNumber blkno; /* location of new page */
675 /* entire page contents follow at end of record */
676 } xl_heap_newpage;
678 #define SizeOfHeapNewpage (offsetof(xl_heap_newpage, blkno) + sizeof(BlockNumber))
680 /* This is what we need to know about lock */
681 typedef struct xl_heap_lock
683 xl_heaptid target; /* locked tuple id */
684 TransactionId locking_xid; /* might be a MultiXactId not xid */
685 bool xid_is_mxact; /* is it? */
686 bool shared_lock; /* shared or exclusive row lock? */
687 } xl_heap_lock;
689 #define SizeOfHeapLock (offsetof(xl_heap_lock, shared_lock) + sizeof(bool))
691 /* This is what we need to know about in-place update */
692 typedef struct xl_heap_inplace
694 xl_heaptid target; /* updated tuple id */
695 /* TUPLE DATA FOLLOWS AT END OF STRUCT */
696 } xl_heap_inplace;
698 #define SizeOfHeapInplace (offsetof(xl_heap_inplace, target) + SizeOfHeapTid)
700 /* This is what we need to know about tuple freezing during vacuum */
701 typedef struct xl_heap_freeze
703 RelFileNode node;
704 BlockNumber block;
705 TransactionId cutoff_xid;
706 /* TUPLE OFFSET NUMBERS FOLLOW AT THE END */
707 } xl_heap_freeze;
709 #define SizeOfHeapFreeze (offsetof(xl_heap_freeze, cutoff_xid) + sizeof(TransactionId))
711 /* HeapTupleHeader functions implemented in utils/time/combocid.c */
712 extern CommandId HeapTupleHeaderGetCmin(HeapTupleHeader tup);
713 extern CommandId HeapTupleHeaderGetCmax(HeapTupleHeader tup);
714 extern void HeapTupleHeaderAdjustCmax(HeapTupleHeader tup,
715 CommandId *cmax,
716 bool *iscombo);
718 /* ----------------
719 * fastgetattr
721 * Fetch a user attribute's value as a Datum (might be either a
722 * value, or a pointer into the data area of the tuple).
724 * This must not be used when a system attribute might be requested.
725 * Furthermore, the passed attnum MUST be valid. Use heap_getattr()
726 * instead, if in doubt.
728 * This gets called many times, so we macro the cacheable and NULL
729 * lookups, and call nocachegetattr() for the rest.
730 * ----------------
733 #if !defined(DISABLE_COMPLEX_MACRO)
735 #define fastgetattr(tup, attnum, tupleDesc, isnull) \
737 AssertMacro((attnum) > 0), \
738 (((isnull) != NULL) ? (*(isnull) = false) : (dummyret)NULL), \
739 HeapTupleNoNulls(tup) ? \
741 (tupleDesc)->attrs[(attnum)-1]->attcacheoff >= 0 ? \
743 fetchatt((tupleDesc)->attrs[(attnum)-1], \
744 (char *) (tup)->t_data + (tup)->t_data->t_hoff + \
745 (tupleDesc)->attrs[(attnum)-1]->attcacheoff) \
748 nocachegetattr((tup), (attnum), (tupleDesc), (isnull)) \
752 att_isnull((attnum)-1, (tup)->t_data->t_bits) ? \
754 (((isnull) != NULL) ? (*(isnull) = true) : (dummyret)NULL), \
755 (Datum)NULL \
759 nocachegetattr((tup), (attnum), (tupleDesc), (isnull)) \
763 #else /* defined(DISABLE_COMPLEX_MACRO) */
765 extern Datum fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
766 bool *isnull);
767 #endif /* defined(DISABLE_COMPLEX_MACRO) */
770 /* ----------------
771 * heap_getattr
773 * Extract an attribute of a heap tuple and return it as a Datum.
774 * This works for either system or user attributes. The given attnum
775 * is properly range-checked.
777 * If the field in question has a NULL value, we return a zero Datum
778 * and set *isnull == true. Otherwise, we set *isnull == false.
780 * <tup> is the pointer to the heap tuple. <attnum> is the attribute
781 * number of the column (field) caller wants. <tupleDesc> is a
782 * pointer to the structure describing the row and all its fields.
783 * ----------------
785 #define heap_getattr(tup, attnum, tupleDesc, isnull) \
787 AssertMacro((tup) != NULL), \
789 ((attnum) > 0) ? \
791 ((attnum) > (int) HeapTupleHeaderGetNatts((tup)->t_data)) ? \
793 (((isnull) != NULL) ? (*(isnull) = true) : (dummyret)NULL), \
794 (Datum)NULL \
797 fastgetattr((tup), (attnum), (tupleDesc), (isnull)) \
800 heap_getsysattr((tup), (attnum), (tupleDesc), (isnull)) \
804 /* prototypes for functions in common/heaptuple.c */
805 extern Size heap_compute_data_size(TupleDesc tupleDesc,
806 Datum *values, bool *isnull);
807 extern void heap_fill_tuple(TupleDesc tupleDesc,
808 Datum *values, bool *isnull,
809 char *data, Size data_size,
810 uint16 *infomask, bits8 *bit);
811 extern bool heap_attisnull(HeapTuple tup, int attnum);
812 extern Datum nocachegetattr(HeapTuple tup, int attnum,
813 TupleDesc att, bool *isnull);
814 extern Datum heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
815 bool *isnull);
816 extern HeapTuple heap_copytuple(HeapTuple tuple);
817 extern void heap_copytuple_with_tuple(HeapTuple src, HeapTuple dest);
818 extern HeapTuple heap_form_tuple(TupleDesc tupleDescriptor,
819 Datum *values, bool *isnull);
820 extern HeapTuple heap_formtuple(TupleDesc tupleDescriptor,
821 Datum *values, char *nulls);
822 extern HeapTuple heap_modify_tuple(HeapTuple tuple,
823 TupleDesc tupleDesc,
824 Datum *replValues,
825 bool *replIsnull,
826 bool *doReplace);
827 extern HeapTuple heap_modifytuple(HeapTuple tuple,
828 TupleDesc tupleDesc,
829 Datum *replValues,
830 char *replNulls,
831 char *replActions);
832 extern void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc,
833 Datum *values, bool *isnull);
834 extern void heap_deformtuple(HeapTuple tuple, TupleDesc tupleDesc,
835 Datum *values, char *nulls);
836 extern void heap_freetuple(HeapTuple htup);
837 extern MinimalTuple heap_form_minimal_tuple(TupleDesc tupleDescriptor,
838 Datum *values, bool *isnull);
839 extern void heap_free_minimal_tuple(MinimalTuple mtup);
840 extern MinimalTuple heap_copy_minimal_tuple(MinimalTuple mtup);
841 extern HeapTuple heap_tuple_from_minimal_tuple(MinimalTuple mtup);
842 extern MinimalTuple minimal_tuple_from_heap_tuple(HeapTuple htup);
843 extern HeapTuple heap_addheader(int natts, bool withoid,
844 Size structlen, void *structure);
846 #endif /* HTUP_H */