1 /*-------------------------------------------------------------------------
4 * System catalog cache for tuples matching a key.
6 * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/utils/cache/catcache.c
13 *-------------------------------------------------------------------------
17 #include "access/genam.h"
18 #include "access/heaptoast.h"
19 #include "access/relscan.h"
20 #include "access/sysattr.h"
21 #include "access/table.h"
22 #include "access/xact.h"
23 #include "catalog/pg_collation.h"
24 #include "catalog/pg_operator.h"
25 #include "catalog/pg_type.h"
26 #include "common/hashfn.h"
27 #include "common/pg_prng.h"
28 #include "miscadmin.h"
29 #include "port/pg_bitutils.h"
31 #include "storage/ipc.h" /* for on_proc_exit */
33 #include "storage/lmgr.h"
34 #include "utils/builtins.h"
35 #include "utils/datum.h"
36 #include "utils/fmgroids.h"
37 #include "utils/inval.h"
38 #include "utils/memutils.h"
39 #include "utils/rel.h"
40 #include "utils/resowner_private.h"
41 #include "utils/syscache.h"
44 /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
47 * Given a hash value and the size of the hash table, find the bucket
48 * in which the hash value belongs. Since the hash table must contain
49 * a power-of-2 number of elements, this is a simple bitmask.
51 #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
55 * variables, macros and other stuff
59 #define CACHE_elog(...) elog(__VA_ARGS__)
61 #define CACHE_elog(...)
64 /* Cache management header --- pointer is NULL until created */
65 static CatCacheHeader
*CacheHdr
= NULL
;
67 static inline HeapTuple
SearchCatCacheInternal(CatCache
*cache
,
72 static pg_noinline HeapTuple
SearchCatCacheMiss(CatCache
*cache
,
79 static uint32
CatalogCacheComputeHashValue(CatCache
*cache
, int nkeys
,
80 Datum v1
, Datum v2
, Datum v3
, Datum v4
);
81 static uint32
CatalogCacheComputeTupleHashValue(CatCache
*cache
, int nkeys
,
83 static inline bool CatalogCacheCompareTuple(const CatCache
*cache
, int nkeys
,
84 const Datum
*cachekeys
,
85 const Datum
*searchkeys
);
88 static void CatCachePrintStats(int code
, Datum arg
);
90 static void CatCacheRemoveCTup(CatCache
*cache
, CatCTup
*ct
);
91 static void CatCacheRemoveCList(CatCache
*cache
, CatCList
*cl
);
92 static void CatalogCacheInitializeCache(CatCache
*cache
);
93 static CatCTup
*CatalogCacheCreateEntry(CatCache
*cache
,
94 HeapTuple ntp
, SysScanDesc scandesc
,
96 uint32 hashValue
, Index hashIndex
);
98 static void CatCacheFreeKeys(TupleDesc tupdesc
, int nkeys
, int *attnos
,
100 static void CatCacheCopyKeys(TupleDesc tupdesc
, int nkeys
, int *attnos
,
101 Datum
*srckeys
, Datum
*dstkeys
);
105 * internal support functions
109 * Hash and equality functions for system types that are used as cache key
110 * fields. In some cases, we just call the regular SQL-callable functions for
111 * the appropriate data type, but that tends to be a little slow, and the
112 * speed of these functions is performance-critical. Therefore, for data
113 * types that frequently occur as catcache keys, we hard-code the logic here.
114 * Avoiding the overhead of DirectFunctionCallN(...) is a substantial win, and
115 * in certain cases (like int4) we can adopt a faster hash algorithm as well.
119 chareqfast(Datum a
, Datum b
)
121 return DatumGetChar(a
) == DatumGetChar(b
);
125 charhashfast(Datum datum
)
127 return murmurhash32((int32
) DatumGetChar(datum
));
131 nameeqfast(Datum a
, Datum b
)
133 char *ca
= NameStr(*DatumGetName(a
));
134 char *cb
= NameStr(*DatumGetName(b
));
136 return strncmp(ca
, cb
, NAMEDATALEN
) == 0;
140 namehashfast(Datum datum
)
142 char *key
= NameStr(*DatumGetName(datum
));
144 return hash_any((unsigned char *) key
, strlen(key
));
148 int2eqfast(Datum a
, Datum b
)
150 return DatumGetInt16(a
) == DatumGetInt16(b
);
154 int2hashfast(Datum datum
)
156 return murmurhash32((int32
) DatumGetInt16(datum
));
160 int4eqfast(Datum a
, Datum b
)
162 return DatumGetInt32(a
) == DatumGetInt32(b
);
166 int4hashfast(Datum datum
)
168 return murmurhash32((int32
) DatumGetInt32(datum
));
172 texteqfast(Datum a
, Datum b
)
175 * The use of DEFAULT_COLLATION_OID is fairly arbitrary here. We just
176 * want to take the fast "deterministic" path in texteq().
178 return DatumGetBool(DirectFunctionCall2Coll(texteq
, DEFAULT_COLLATION_OID
, a
, b
));
182 texthashfast(Datum datum
)
184 /* analogously here as in texteqfast() */
185 return DatumGetInt32(DirectFunctionCall1Coll(hashtext
, DEFAULT_COLLATION_OID
, datum
));
189 oidvectoreqfast(Datum a
, Datum b
)
191 return DatumGetBool(DirectFunctionCall2(oidvectoreq
, a
, b
));
195 oidvectorhashfast(Datum datum
)
197 return DatumGetInt32(DirectFunctionCall1(hashoidvector
, datum
));
200 /* Lookup support functions for a type. */
202 GetCCHashEqFuncs(Oid keytype
, CCHashFN
*hashfunc
, RegProcedure
*eqfunc
, CCFastEqualFN
*fasteqfunc
)
207 *hashfunc
= charhashfast
;
208 *fasteqfunc
= chareqfast
;
212 *hashfunc
= charhashfast
;
213 *fasteqfunc
= chareqfast
;
217 *hashfunc
= namehashfast
;
218 *fasteqfunc
= nameeqfast
;
222 *hashfunc
= int2hashfast
;
223 *fasteqfunc
= int2eqfast
;
227 *hashfunc
= int4hashfast
;
228 *fasteqfunc
= int4eqfast
;
232 *hashfunc
= texthashfast
;
233 *fasteqfunc
= texteqfast
;
238 case REGPROCEDUREOID
:
243 case REGCOLLATIONOID
:
245 case REGDICTIONARYOID
:
247 case REGNAMESPACEOID
:
248 *hashfunc
= int4hashfast
;
249 *fasteqfunc
= int4eqfast
;
253 *hashfunc
= oidvectorhashfast
;
254 *fasteqfunc
= oidvectoreqfast
;
255 *eqfunc
= F_OIDVECTOREQ
;
258 elog(FATAL
, "type %u not supported as catcache key", keytype
);
259 *hashfunc
= NULL
; /* keep compiler quiet */
261 *eqfunc
= InvalidOid
;
267 * CatalogCacheComputeHashValue
269 * Compute the hash value associated with a given set of lookup keys
272 CatalogCacheComputeHashValue(CatCache
*cache
, int nkeys
,
273 Datum v1
, Datum v2
, Datum v3
, Datum v4
)
275 uint32 hashValue
= 0;
277 CCHashFN
*cc_hashfunc
= cache
->cc_hashfunc
;
279 CACHE_elog(DEBUG2
, "CatalogCacheComputeHashValue %s %d %p",
280 cache
->cc_relname
, nkeys
, cache
);
285 oneHash
= (cc_hashfunc
[3]) (v4
);
286 hashValue
^= pg_rotate_left32(oneHash
, 24);
289 oneHash
= (cc_hashfunc
[2]) (v3
);
290 hashValue
^= pg_rotate_left32(oneHash
, 16);
293 oneHash
= (cc_hashfunc
[1]) (v2
);
294 hashValue
^= pg_rotate_left32(oneHash
, 8);
297 oneHash
= (cc_hashfunc
[0]) (v1
);
298 hashValue
^= oneHash
;
301 elog(FATAL
, "wrong number of hash keys: %d", nkeys
);
309 * CatalogCacheComputeTupleHashValue
311 * Compute the hash value associated with a given tuple to be cached
314 CatalogCacheComputeTupleHashValue(CatCache
*cache
, int nkeys
, HeapTuple tuple
)
321 int *cc_keyno
= cache
->cc_keyno
;
322 TupleDesc cc_tupdesc
= cache
->cc_tupdesc
;
324 /* Now extract key fields from tuple, insert into scankey */
328 v4
= fastgetattr(tuple
,
335 v3
= fastgetattr(tuple
,
342 v2
= fastgetattr(tuple
,
349 v1
= fastgetattr(tuple
,
356 elog(FATAL
, "wrong number of hash keys: %d", nkeys
);
360 return CatalogCacheComputeHashValue(cache
, nkeys
, v1
, v2
, v3
, v4
);
364 * CatalogCacheCompareTuple
366 * Compare a tuple to the passed arguments.
369 CatalogCacheCompareTuple(const CatCache
*cache
, int nkeys
,
370 const Datum
*cachekeys
,
371 const Datum
*searchkeys
)
373 const CCFastEqualFN
*cc_fastequal
= cache
->cc_fastequal
;
376 for (i
= 0; i
< nkeys
; i
++)
378 if (!(cc_fastequal
[i
]) (cachekeys
[i
], searchkeys
[i
]))
385 #ifdef CATCACHE_STATS
388 CatCachePrintStats(int code
, Datum arg
)
391 long cc_searches
= 0;
393 long cc_neg_hits
= 0;
394 long cc_newloads
= 0;
396 long cc_lsearches
= 0;
399 slist_foreach(iter
, &CacheHdr
->ch_caches
)
401 CatCache
*cache
= slist_container(CatCache
, cc_next
, iter
.cur
);
403 if (cache
->cc_ntup
== 0 && cache
->cc_searches
== 0)
404 continue; /* don't print unused caches */
405 elog(DEBUG2
, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
412 cache
->cc_hits
+ cache
->cc_neg_hits
,
414 cache
->cc_searches
- cache
->cc_hits
- cache
->cc_neg_hits
- cache
->cc_newloads
,
415 cache
->cc_searches
- cache
->cc_hits
- cache
->cc_neg_hits
,
419 cc_searches
+= cache
->cc_searches
;
420 cc_hits
+= cache
->cc_hits
;
421 cc_neg_hits
+= cache
->cc_neg_hits
;
422 cc_newloads
+= cache
->cc_newloads
;
423 cc_invals
+= cache
->cc_invals
;
424 cc_lsearches
+= cache
->cc_lsearches
;
425 cc_lhits
+= cache
->cc_lhits
;
427 elog(DEBUG2
, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
432 cc_hits
+ cc_neg_hits
,
434 cc_searches
- cc_hits
- cc_neg_hits
- cc_newloads
,
435 cc_searches
- cc_hits
- cc_neg_hits
,
440 #endif /* CATCACHE_STATS */
446 * Unlink and delete the given cache entry
448 * NB: if it is a member of a CatCList, the CatCList is deleted too.
449 * Both the cache entry and the list had better have zero refcount.
452 CatCacheRemoveCTup(CatCache
*cache
, CatCTup
*ct
)
454 Assert(ct
->refcount
== 0);
455 Assert(ct
->my_cache
== cache
);
460 * The cleanest way to handle this is to call CatCacheRemoveCList,
461 * which will recurse back to me, and the recursive call will do the
462 * work. Set the "dead" flag to make sure it does recurse.
465 CatCacheRemoveCList(cache
, ct
->c_list
);
466 return; /* nothing left to do */
469 /* delink from linked list */
470 dlist_delete(&ct
->cache_elem
);
473 * Free keys when we're dealing with a negative entry, normal entries just
474 * point into tuple, allocated together with the CatCTup.
477 CatCacheFreeKeys(cache
->cc_tupdesc
, cache
->cc_nkeys
,
478 cache
->cc_keyno
, ct
->keys
);
487 * CatCacheRemoveCList
489 * Unlink and delete the given cache list entry
491 * NB: any dead member entries that become unreferenced are deleted too.
494 CatCacheRemoveCList(CatCache
*cache
, CatCList
*cl
)
498 Assert(cl
->refcount
== 0);
499 Assert(cl
->my_cache
== cache
);
501 /* delink from member tuples */
502 for (i
= cl
->n_members
; --i
>= 0;)
504 CatCTup
*ct
= cl
->members
[i
];
506 Assert(ct
->c_list
== cl
);
508 /* if the member is dead and now has no references, remove it */
510 #ifndef CATCACHE_FORCE_RELEASE
514 CatCacheRemoveCTup(cache
, ct
);
517 /* delink from linked list */
518 dlist_delete(&cl
->cache_elem
);
520 /* free associated column data */
521 CatCacheFreeKeys(cache
->cc_tupdesc
, cl
->nkeys
,
522 cache
->cc_keyno
, cl
->keys
);
531 * Invalidate entries in the specified cache, given a hash value.
533 * We delete cache entries that match the hash value, whether positive
534 * or negative. We don't care whether the invalidation is the result
535 * of a tuple insertion or a deletion.
537 * We used to try to match positive cache entries by TID, but that is
538 * unsafe after a VACUUM FULL on a system catalog: an inval event could
539 * be queued before VACUUM FULL, and then processed afterwards, when the
540 * target tuple that has to be invalidated has a different TID than it
541 * did when the event was created. So now we just compare hash values and
542 * accept the small risk of unnecessary invalidations due to false matches.
544 * This routine is only quasi-public: it should only be used by inval.c.
547 CatCacheInvalidate(CatCache
*cache
, uint32 hashValue
)
550 dlist_mutable_iter iter
;
552 CACHE_elog(DEBUG2
, "CatCacheInvalidate: called");
555 * We don't bother to check whether the cache has finished initialization
556 * yet; if not, there will be no entries in it so no problem.
560 * Invalidate *all* CatCLists in this cache; it's too hard to tell which
561 * searches might still be correct, so just zap 'em all.
563 dlist_foreach_modify(iter
, &cache
->cc_lists
)
565 CatCList
*cl
= dlist_container(CatCList
, cache_elem
, iter
.cur
);
567 if (cl
->refcount
> 0)
570 CatCacheRemoveCList(cache
, cl
);
574 * inspect the proper hash bucket for tuple matches
576 hashIndex
= HASH_INDEX(hashValue
, cache
->cc_nbuckets
);
577 dlist_foreach_modify(iter
, &cache
->cc_bucket
[hashIndex
])
579 CatCTup
*ct
= dlist_container(CatCTup
, cache_elem
, iter
.cur
);
581 if (hashValue
== ct
->hash_value
)
583 if (ct
->refcount
> 0 ||
584 (ct
->c_list
&& ct
->c_list
->refcount
> 0))
587 /* list, if any, was marked dead above */
588 Assert(ct
->c_list
== NULL
|| ct
->c_list
->dead
);
591 CatCacheRemoveCTup(cache
, ct
);
592 CACHE_elog(DEBUG2
, "CatCacheInvalidate: invalidated");
593 #ifdef CATCACHE_STATS
596 /* could be multiple matches, so keep looking! */
601 /* ----------------------------------------------------------------
603 * ----------------------------------------------------------------
608 * Standard routine for creating cache context if it doesn't exist yet
610 * There are a lot of places (probably far more than necessary) that check
611 * whether CacheMemoryContext exists yet and want to create it if not.
612 * We centralize knowledge of exactly how to create it here.
615 CreateCacheMemoryContext(void)
618 * Purely for paranoia, check that context doesn't exist; caller probably
621 if (!CacheMemoryContext
)
622 CacheMemoryContext
= AllocSetContextCreate(TopMemoryContext
,
623 "CacheMemoryContext",
624 ALLOCSET_DEFAULT_SIZES
);
631 * Reset one catalog cache to empty.
633 * This is not very efficient if the target cache is nearly empty.
634 * However, it shouldn't need to be efficient; we don't invoke it often.
637 ResetCatalogCache(CatCache
*cache
)
639 dlist_mutable_iter iter
;
642 /* Remove each list in this cache, or at least mark it dead */
643 dlist_foreach_modify(iter
, &cache
->cc_lists
)
645 CatCList
*cl
= dlist_container(CatCList
, cache_elem
, iter
.cur
);
647 if (cl
->refcount
> 0)
650 CatCacheRemoveCList(cache
, cl
);
653 /* Remove each tuple in this cache, or at least mark it dead */
654 for (i
= 0; i
< cache
->cc_nbuckets
; i
++)
656 dlist_head
*bucket
= &cache
->cc_bucket
[i
];
658 dlist_foreach_modify(iter
, bucket
)
660 CatCTup
*ct
= dlist_container(CatCTup
, cache_elem
, iter
.cur
);
662 if (ct
->refcount
> 0 ||
663 (ct
->c_list
&& ct
->c_list
->refcount
> 0))
666 /* list, if any, was marked dead above */
667 Assert(ct
->c_list
== NULL
|| ct
->c_list
->dead
);
670 CatCacheRemoveCTup(cache
, ct
);
671 #ifdef CATCACHE_STATS
681 * Reset all caches when a shared cache inval event forces it
684 ResetCatalogCaches(void)
688 CACHE_elog(DEBUG2
, "ResetCatalogCaches called");
690 slist_foreach(iter
, &CacheHdr
->ch_caches
)
692 CatCache
*cache
= slist_container(CatCache
, cc_next
, iter
.cur
);
694 ResetCatalogCache(cache
);
697 CACHE_elog(DEBUG2
, "end of ResetCatalogCaches call");
701 * CatalogCacheFlushCatalog
703 * Flush all catcache entries that came from the specified system catalog.
704 * This is needed after VACUUM FULL/CLUSTER on the catalog, since the
705 * tuples very likely now have different TIDs than before. (At one point
706 * we also tried to force re-execution of CatalogCacheInitializeCache for
707 * the cache(s) on that catalog. This is a bad idea since it leads to all
708 * kinds of trouble if a cache flush occurs while loading cache entries.
709 * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
710 * rather than relying on the relcache to keep a tupdesc for us. Of course
711 * this assumes the tupdesc of a cachable system table will not change...)
714 CatalogCacheFlushCatalog(Oid catId
)
718 CACHE_elog(DEBUG2
, "CatalogCacheFlushCatalog called for %u", catId
);
720 slist_foreach(iter
, &CacheHdr
->ch_caches
)
722 CatCache
*cache
= slist_container(CatCache
, cc_next
, iter
.cur
);
724 /* Does this cache store tuples of the target catalog? */
725 if (cache
->cc_reloid
== catId
)
727 /* Yes, so flush all its contents */
728 ResetCatalogCache(cache
);
730 /* Tell inval.c to call syscache callbacks for this cache */
731 CallSyscacheCallbacks(cache
->id
, 0);
735 CACHE_elog(DEBUG2
, "end of CatalogCacheFlushCatalog call");
741 * This allocates and initializes a cache for a system catalog relation.
742 * Actually, the cache is only partially initialized to avoid opening the
743 * relation. The relation will be opened and the rest of the cache
744 * structure initialized on the first access.
747 #define InitCatCache_DEBUG2 \
749 elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
750 cp->cc_reloid, cp->cc_indexoid, cp->id, \
751 cp->cc_nkeys, cp->cc_nbuckets); \
754 #define InitCatCache_DEBUG2
766 MemoryContext oldcxt
;
770 * nbuckets is the initial number of hash buckets to use in this catcache.
771 * It will be enlarged later if it becomes too full.
773 * nbuckets must be a power of two. We check this via Assert rather than
774 * a full runtime check because the values will be coming from constant
777 * If you're confused by the power-of-two check, see comments in
778 * bitmapset.c for an explanation.
780 Assert(nbuckets
> 0 && (nbuckets
& -nbuckets
) == nbuckets
);
783 * first switch to the cache context so our allocations do not vanish at
784 * the end of a transaction
786 if (!CacheMemoryContext
)
787 CreateCacheMemoryContext();
789 oldcxt
= MemoryContextSwitchTo(CacheMemoryContext
);
792 * if first time through, initialize the cache group header
794 if (CacheHdr
== NULL
)
796 CacheHdr
= (CatCacheHeader
*) palloc(sizeof(CatCacheHeader
));
797 slist_init(&CacheHdr
->ch_caches
);
798 CacheHdr
->ch_ntup
= 0;
799 #ifdef CATCACHE_STATS
800 /* set up to dump stats at backend exit */
801 on_proc_exit(CatCachePrintStats
, 0);
806 * Allocate a new cache structure, aligning to a cacheline boundary
808 * Note: we rely on zeroing to initialize all the dlist headers correctly
810 cp
= (CatCache
*) palloc_aligned(sizeof(CatCache
), PG_CACHE_LINE_SIZE
,
812 cp
->cc_bucket
= palloc0(nbuckets
* sizeof(dlist_head
));
815 * initialize the cache's relation information for the relation
816 * corresponding to this cache, and initialize some of the new cache's
817 * other internal fields. But don't open the relation yet.
820 cp
->cc_relname
= "(not known yet)";
821 cp
->cc_reloid
= reloid
;
822 cp
->cc_indexoid
= indexoid
;
823 cp
->cc_relisshared
= false; /* temporary */
824 cp
->cc_tupdesc
= (TupleDesc
) NULL
;
826 cp
->cc_nbuckets
= nbuckets
;
827 cp
->cc_nkeys
= nkeys
;
828 for (i
= 0; i
< nkeys
; ++i
)
829 cp
->cc_keyno
[i
] = key
[i
];
832 * new cache is initialized as far as we can go for now. print some
833 * debugging information, if appropriate.
838 * add completed cache to top of group header's list
840 slist_push_head(&CacheHdr
->ch_caches
, &cp
->cc_next
);
843 * back to the old context before we return...
845 MemoryContextSwitchTo(oldcxt
);
851 * Enlarge a catcache, doubling the number of buckets.
854 RehashCatCache(CatCache
*cp
)
856 dlist_head
*newbucket
;
860 elog(DEBUG1
, "rehashing catalog cache id %d for %s; %d tups, %d buckets",
861 cp
->id
, cp
->cc_relname
, cp
->cc_ntup
, cp
->cc_nbuckets
);
863 /* Allocate a new, larger, hash table. */
864 newnbuckets
= cp
->cc_nbuckets
* 2;
865 newbucket
= (dlist_head
*) MemoryContextAllocZero(CacheMemoryContext
, newnbuckets
* sizeof(dlist_head
));
867 /* Move all entries from old hash table to new. */
868 for (i
= 0; i
< cp
->cc_nbuckets
; i
++)
870 dlist_mutable_iter iter
;
872 dlist_foreach_modify(iter
, &cp
->cc_bucket
[i
])
874 CatCTup
*ct
= dlist_container(CatCTup
, cache_elem
, iter
.cur
);
875 int hashIndex
= HASH_INDEX(ct
->hash_value
, newnbuckets
);
877 dlist_delete(iter
.cur
);
878 dlist_push_head(&newbucket
[hashIndex
], &ct
->cache_elem
);
882 /* Switch to the new array. */
883 pfree(cp
->cc_bucket
);
884 cp
->cc_nbuckets
= newnbuckets
;
885 cp
->cc_bucket
= newbucket
;
889 * CatalogCacheInitializeCache
891 * This function does final initialization of a catcache: obtain the tuple
892 * descriptor and set up the hash and equality function links. We assume
893 * that the relcache entry can be opened at this point!
896 #define CatalogCacheInitializeCache_DEBUG1 \
897 elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
900 #define CatalogCacheInitializeCache_DEBUG2 \
902 if (cache->cc_keyno[i] > 0) { \
903 elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
904 i+1, cache->cc_nkeys, cache->cc_keyno[i], \
905 TupleDescAttr(tupdesc, cache->cc_keyno[i] - 1)->atttypid); \
907 elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
908 i+1, cache->cc_nkeys, cache->cc_keyno[i]); \
912 #define CatalogCacheInitializeCache_DEBUG1
913 #define CatalogCacheInitializeCache_DEBUG2
917 CatalogCacheInitializeCache(CatCache
*cache
)
920 MemoryContext oldcxt
;
924 CatalogCacheInitializeCache_DEBUG1
;
926 relation
= table_open(cache
->cc_reloid
, AccessShareLock
);
929 * switch to the cache context so our allocations do not vanish at the end
932 Assert(CacheMemoryContext
!= NULL
);
934 oldcxt
= MemoryContextSwitchTo(CacheMemoryContext
);
937 * copy the relcache's tuple descriptor to permanent cache storage
939 tupdesc
= CreateTupleDescCopyConstr(RelationGetDescr(relation
));
942 * save the relation's name and relisshared flag, too (cc_relname is used
943 * only for debugging purposes)
945 cache
->cc_relname
= pstrdup(RelationGetRelationName(relation
));
946 cache
->cc_relisshared
= RelationGetForm(relation
)->relisshared
;
949 * return to the caller's memory context and close the rel
951 MemoryContextSwitchTo(oldcxt
);
953 table_close(relation
, AccessShareLock
);
955 CACHE_elog(DEBUG2
, "CatalogCacheInitializeCache: %s, %d keys",
956 cache
->cc_relname
, cache
->cc_nkeys
);
959 * initialize cache's key information
961 for (i
= 0; i
< cache
->cc_nkeys
; ++i
)
966 CatalogCacheInitializeCache_DEBUG2
;
968 if (cache
->cc_keyno
[i
] > 0)
970 Form_pg_attribute attr
= TupleDescAttr(tupdesc
,
971 cache
->cc_keyno
[i
] - 1);
973 keytype
= attr
->atttypid
;
974 /* cache key columns should always be NOT NULL */
975 Assert(attr
->attnotnull
);
979 if (cache
->cc_keyno
[i
] < 0)
980 elog(FATAL
, "sys attributes are not supported in caches");
984 GetCCHashEqFuncs(keytype
,
985 &cache
->cc_hashfunc
[i
],
987 &cache
->cc_fastequal
[i
]);
990 * Do equality-function lookup (we assume this won't need a catalog
991 * lookup for any supported type)
993 fmgr_info_cxt(eqfunc
,
994 &cache
->cc_skey
[i
].sk_func
,
997 /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
998 cache
->cc_skey
[i
].sk_attno
= cache
->cc_keyno
[i
];
1000 /* Fill in sk_strategy as well --- always standard equality */
1001 cache
->cc_skey
[i
].sk_strategy
= BTEqualStrategyNumber
;
1002 cache
->cc_skey
[i
].sk_subtype
= InvalidOid
;
1003 /* If a catcache key requires a collation, it must be C collation */
1004 cache
->cc_skey
[i
].sk_collation
= C_COLLATION_OID
;
1006 CACHE_elog(DEBUG2
, "CatalogCacheInitializeCache %s %d %p",
1007 cache
->cc_relname
, i
, cache
);
1011 * mark this cache fully initialized
1013 cache
->cc_tupdesc
= tupdesc
;
1017 * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
1019 * One reason to call this routine is to ensure that the relcache has
1020 * created entries for all the catalogs and indexes referenced by catcaches.
1021 * Therefore, provide an option to open the index as well as fixing the
1022 * cache itself. An exception is the indexes on pg_am, which we don't use
1023 * (cf. IndexScanOK).
1026 InitCatCachePhase2(CatCache
*cache
, bool touch_index
)
1028 if (cache
->cc_tupdesc
== NULL
)
1029 CatalogCacheInitializeCache(cache
);
1032 cache
->id
!= AMOID
&&
1033 cache
->id
!= AMNAME
)
1038 * We must lock the underlying catalog before opening the index to
1039 * avoid deadlock, since index_open could possibly result in reading
1040 * this same catalog, and if anyone else is exclusive-locking this
1041 * catalog and index they'll be doing it in that order.
1043 LockRelationOid(cache
->cc_reloid
, AccessShareLock
);
1044 idesc
= index_open(cache
->cc_indexoid
, AccessShareLock
);
1047 * While we've got the index open, let's check that it's unique (and
1048 * not just deferrable-unique, thank you very much). This is just to
1049 * catch thinkos in definitions of new catcaches, so we don't worry
1050 * about the pg_am indexes not getting tested.
1052 Assert(idesc
->rd_index
->indisunique
&&
1053 idesc
->rd_index
->indimmediate
);
1055 index_close(idesc
, AccessShareLock
);
1056 UnlockRelationOid(cache
->cc_reloid
, AccessShareLock
);
1064 * This function checks for tuples that will be fetched by
1065 * IndexSupportInitialize() during relcache initialization for
1066 * certain system indexes that support critical syscaches.
1067 * We can't use an indexscan to fetch these, else we'll get into
1068 * infinite recursion. A plain heap scan will work, however.
1069 * Once we have completed relcache initialization (signaled by
1070 * criticalRelcachesBuilt), we don't have to worry anymore.
1072 * Similarly, during backend startup we have to be able to use the
1073 * pg_authid, pg_auth_members and pg_database syscaches for
1074 * authentication even if we don't yet have relcache entries for those
1075 * catalogs' indexes.
1078 IndexScanOK(CatCache
*cache
, ScanKey cur_skey
)
1085 * Rather than tracking exactly which indexes have to be loaded
1086 * before we can use indexscans (which changes from time to time),
1087 * just force all pg_index searches to be heap scans until we've
1088 * built the critical relcaches.
1090 if (!criticalRelcachesBuilt
)
1098 * Always do heap scans in pg_am, because it's so small there's
1099 * not much point in an indexscan anyway. We *must* do this when
1100 * initially building critical relcache entries, but we might as
1101 * well just always do it.
1107 case AUTHMEMMEMROLE
:
1111 * Protect authentication lookups occurring before relcache has
1112 * collected entries for shared indexes.
1114 if (!criticalSharedRelcachesBuilt
)
1122 /* Normal case, allow index scan */
1129 * This call searches a system cache for a tuple, opening the relation
1130 * if necessary (on the first access to a particular cache).
1132 * The result is NULL if not found, or a pointer to a HeapTuple in
1133 * the cache. The caller must not modify the tuple, and must call
1134 * ReleaseCatCache() when done with it.
1136 * The search key values should be expressed as Datums of the key columns'
1137 * datatype(s). (Pass zeroes for any unused parameters.) As a special
1138 * exception, the passed-in key for a NAME column can be just a C string;
1139 * the caller need not go to the trouble of converting it to a fully
1143 SearchCatCache(CatCache
*cache
,
1149 return SearchCatCacheInternal(cache
, cache
->cc_nkeys
, v1
, v2
, v3
, v4
);
1154 * SearchCatCacheN() are SearchCatCache() versions for a specific number of
1155 * arguments. The compiler can inline the body and unroll loops, making them a
1156 * bit faster than SearchCatCache().
1160 SearchCatCache1(CatCache
*cache
,
1163 return SearchCatCacheInternal(cache
, 1, v1
, 0, 0, 0);
1168 SearchCatCache2(CatCache
*cache
,
1171 return SearchCatCacheInternal(cache
, 2, v1
, v2
, 0, 0);
1176 SearchCatCache3(CatCache
*cache
,
1177 Datum v1
, Datum v2
, Datum v3
)
1179 return SearchCatCacheInternal(cache
, 3, v1
, v2
, v3
, 0);
1184 SearchCatCache4(CatCache
*cache
,
1185 Datum v1
, Datum v2
, Datum v3
, Datum v4
)
1187 return SearchCatCacheInternal(cache
, 4, v1
, v2
, v3
, v4
);
1191 * Work-horse for SearchCatCache/SearchCatCacheN.
1193 static inline HeapTuple
1194 SearchCatCacheInternal(CatCache
*cache
,
1201 Datum arguments
[CATCACHE_MAXKEYS
];
1208 /* Make sure we're in an xact, even if this ends up being a cache hit */
1209 Assert(IsTransactionState());
1211 Assert(cache
->cc_nkeys
== nkeys
);
1214 * one-time startup overhead for each cache
1216 if (unlikely(cache
->cc_tupdesc
== NULL
))
1217 CatalogCacheInitializeCache(cache
);
1219 #ifdef CATCACHE_STATS
1220 cache
->cc_searches
++;
1223 /* Initialize local parameter array */
1230 * find the hash bucket in which to look for the tuple
1232 hashValue
= CatalogCacheComputeHashValue(cache
, nkeys
, v1
, v2
, v3
, v4
);
1233 hashIndex
= HASH_INDEX(hashValue
, cache
->cc_nbuckets
);
1236 * scan the hash bucket until we find a match or exhaust our tuples
1238 * Note: it's okay to use dlist_foreach here, even though we modify the
1239 * dlist within the loop, because we don't continue the loop afterwards.
1241 bucket
= &cache
->cc_bucket
[hashIndex
];
1242 dlist_foreach(iter
, bucket
)
1244 ct
= dlist_container(CatCTup
, cache_elem
, iter
.cur
);
1247 continue; /* ignore dead entries */
1249 if (ct
->hash_value
!= hashValue
)
1250 continue; /* quickly skip entry if wrong hash val */
1252 if (!CatalogCacheCompareTuple(cache
, nkeys
, ct
->keys
, arguments
))
1256 * We found a match in the cache. Move it to the front of the list
1257 * for its hashbucket, in order to speed subsequent searches. (The
1258 * most frequently accessed elements in any hashbucket will tend to be
1259 * near the front of the hashbucket's list.)
1261 dlist_move_head(bucket
, &ct
->cache_elem
);
1264 * If it's a positive entry, bump its refcount and return it. If it's
1265 * negative, we can report failure to the caller.
1269 ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner
);
1271 ResourceOwnerRememberCatCacheRef(CurrentResourceOwner
, &ct
->tuple
);
1273 CACHE_elog(DEBUG2
, "SearchCatCache(%s): found in bucket %d",
1274 cache
->cc_relname
, hashIndex
);
1276 #ifdef CATCACHE_STATS
1284 CACHE_elog(DEBUG2
, "SearchCatCache(%s): found neg entry in bucket %d",
1285 cache
->cc_relname
, hashIndex
);
1287 #ifdef CATCACHE_STATS
1288 cache
->cc_neg_hits
++;
1295 return SearchCatCacheMiss(cache
, nkeys
, hashValue
, hashIndex
, v1
, v2
, v3
, v4
);
1299 * Search the actual catalogs, rather than the cache.
1301 * This is kept separate from SearchCatCacheInternal() to keep the fast-path
1302 * as small as possible. To avoid that effort being undone by a helpful
1303 * compiler, try to explicitly forbid inlining.
1305 static pg_noinline HeapTuple
1306 SearchCatCacheMiss(CatCache
*cache
,
1315 ScanKeyData cur_skey
[CATCACHE_MAXKEYS
];
1317 SysScanDesc scandesc
;
1321 Datum arguments
[CATCACHE_MAXKEYS
];
1323 /* Initialize local parameter array */
1330 * Tuple was not found in cache, so we have to try to retrieve it directly
1331 * from the relation. If found, we will add it to the cache; if not
1332 * found, we will add a negative cache entry instead.
1334 * NOTE: it is possible for recursive cache lookups to occur while reading
1335 * the relation --- for example, due to shared-cache-inval messages being
1336 * processed during table_open(). This is OK. It's even possible for one
1337 * of those lookups to find and enter the very same tuple we are trying to
1338 * fetch here. If that happens, we will enter a second copy of the tuple
1339 * into the cache. The first copy will never be referenced again, and
1340 * will eventually age out of the cache, so there's no functional problem.
1341 * This case is rare enough that it's not worth expending extra cycles to
1344 * Another case, which we *must* handle, is that the tuple could become
1345 * outdated during CatalogCacheCreateEntry's attempt to detoast it (since
1346 * AcceptInvalidationMessages can run during TOAST table access). We do
1347 * not want to return already-stale catcache entries, so we loop around
1348 * and do the table scan again if that happens.
1350 relation
= table_open(cache
->cc_reloid
, AccessShareLock
);
1355 * Ok, need to make a lookup in the relation, copy the scankey and
1356 * fill out any per-call fields. (We must re-do this when retrying,
1357 * because systable_beginscan scribbles on the scankey.)
1359 memcpy(cur_skey
, cache
->cc_skey
, sizeof(ScanKeyData
) * nkeys
);
1360 cur_skey
[0].sk_argument
= v1
;
1361 cur_skey
[1].sk_argument
= v2
;
1362 cur_skey
[2].sk_argument
= v3
;
1363 cur_skey
[3].sk_argument
= v4
;
1365 scandesc
= systable_beginscan(relation
,
1367 IndexScanOK(cache
, cur_skey
),
1375 while (HeapTupleIsValid(ntp
= systable_getnext(scandesc
)))
1377 ct
= CatalogCacheCreateEntry(cache
, ntp
, scandesc
, NULL
,
1378 hashValue
, hashIndex
);
1379 /* upon failure, we must start the scan over */
1385 /* immediately set the refcount to 1 */
1386 ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner
);
1388 ResourceOwnerRememberCatCacheRef(CurrentResourceOwner
, &ct
->tuple
);
1389 break; /* assume only one match */
1392 systable_endscan(scandesc
);
1395 table_close(relation
, AccessShareLock
);
1398 * If tuple was not found, we need to build a negative cache entry
1399 * containing a fake tuple. The fake tuple has the correct key columns,
1400 * but nulls everywhere else.
1402 * In bootstrap mode, we don't build negative entries, because the cache
1403 * invalidation mechanism isn't alive and can't clear them if the tuple
1404 * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
1405 * cache inval for that.)
1409 if (IsBootstrapProcessingMode())
1412 ct
= CatalogCacheCreateEntry(cache
, NULL
, NULL
, arguments
,
1413 hashValue
, hashIndex
);
1415 /* Creating a negative cache entry shouldn't fail */
1418 CACHE_elog(DEBUG2
, "SearchCatCache(%s): Contains %d/%d tuples",
1419 cache
->cc_relname
, cache
->cc_ntup
, CacheHdr
->ch_ntup
);
1420 CACHE_elog(DEBUG2
, "SearchCatCache(%s): put neg entry in bucket %d",
1421 cache
->cc_relname
, hashIndex
);
1424 * We are not returning the negative entry to the caller, so leave its
1431 CACHE_elog(DEBUG2
, "SearchCatCache(%s): Contains %d/%d tuples",
1432 cache
->cc_relname
, cache
->cc_ntup
, CacheHdr
->ch_ntup
);
1433 CACHE_elog(DEBUG2
, "SearchCatCache(%s): put in bucket %d",
1434 cache
->cc_relname
, hashIndex
);
1436 #ifdef CATCACHE_STATS
1437 cache
->cc_newloads
++;
1446 * Decrement the reference count of a catcache entry (releasing the
1447 * hold grabbed by a successful SearchCatCache).
1449 * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1450 * will be freed as soon as their refcount goes to zero. In combination
1451 * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1452 * to catch references to already-released catcache entries.
1455 ReleaseCatCache(HeapTuple tuple
)
1457 CatCTup
*ct
= (CatCTup
*) (((char *) tuple
) -
1458 offsetof(CatCTup
, tuple
));
1460 /* Safety checks to ensure we were handed a cache entry */
1461 Assert(ct
->ct_magic
== CT_MAGIC
);
1462 Assert(ct
->refcount
> 0);
1465 ResourceOwnerForgetCatCacheRef(CurrentResourceOwner
, &ct
->tuple
);
1468 #ifndef CATCACHE_FORCE_RELEASE
1471 ct
->refcount
== 0 &&
1472 (ct
->c_list
== NULL
|| ct
->c_list
->refcount
== 0))
1473 CatCacheRemoveCTup(ct
->my_cache
, ct
);
1478 * GetCatCacheHashValue
1480 * Compute the hash value for a given set of search keys.
1482 * The reason for exposing this as part of the API is that the hash value is
1483 * exposed in cache invalidation operations, so there are places outside the
1484 * catcache code that need to be able to compute the hash values.
1487 GetCatCacheHashValue(CatCache
*cache
,
1494 * one-time startup overhead for each cache
1496 if (cache
->cc_tupdesc
== NULL
)
1497 CatalogCacheInitializeCache(cache
);
1500 * calculate the hash value
1502 return CatalogCacheComputeHashValue(cache
, cache
->cc_nkeys
, v1
, v2
, v3
, v4
);
1507 * SearchCatCacheList
1509 * Generate a list of all tuples matching a partial key (that is,
1510 * a key specifying just the first K of the cache's N key columns).
1512 * It doesn't make any sense to specify all of the cache's key columns
1513 * here: since the key is unique, there could be at most one match, so
1514 * you ought to use SearchCatCache() instead. Hence this function takes
1515 * one fewer Datum argument than SearchCatCache() does.
1517 * The caller must not modify the list object or the pointed-to tuples,
1518 * and must call ReleaseCatCacheList() when done with the list.
1521 SearchCatCacheList(CatCache
*cache
,
1527 Datum v4
= 0; /* dummy last-column value */
1528 Datum arguments
[CATCACHE_MAXKEYS
];
1533 List
*volatile ctlist
;
1534 ListCell
*ctlist_item
;
1538 MemoryContext oldcxt
;
1542 * one-time startup overhead for each cache
1544 if (cache
->cc_tupdesc
== NULL
)
1545 CatalogCacheInitializeCache(cache
);
1547 Assert(nkeys
> 0 && nkeys
< cache
->cc_nkeys
);
1549 #ifdef CATCACHE_STATS
1550 cache
->cc_lsearches
++;
1553 /* Initialize local parameter array */
1560 * compute a hash value of the given keys for faster search. We don't
1561 * presently divide the CatCList items into buckets, but this still lets
1562 * us skip non-matching items quickly most of the time.
1564 lHashValue
= CatalogCacheComputeHashValue(cache
, nkeys
, v1
, v2
, v3
, v4
);
1567 * scan the items until we find a match or exhaust our list
1569 * Note: it's okay to use dlist_foreach here, even though we modify the
1570 * dlist within the loop, because we don't continue the loop afterwards.
1572 dlist_foreach(iter
, &cache
->cc_lists
)
1574 cl
= dlist_container(CatCList
, cache_elem
, iter
.cur
);
1577 continue; /* ignore dead entries */
1579 if (cl
->hash_value
!= lHashValue
)
1580 continue; /* quickly skip entry if wrong hash val */
1583 * see if the cached list matches our key.
1585 if (cl
->nkeys
!= nkeys
)
1588 if (!CatalogCacheCompareTuple(cache
, nkeys
, cl
->keys
, arguments
))
1592 * We found a matching list. Move the list to the front of the
1593 * cache's list-of-lists, to speed subsequent searches. (We do not
1594 * move the members to the fronts of their hashbucket lists, however,
1595 * since there's no point in that unless they are searched for
1598 dlist_move_head(&cache
->cc_lists
, &cl
->cache_elem
);
1600 /* Bump the list's refcount and return it */
1601 ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner
);
1603 ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner
, cl
);
1605 CACHE_elog(DEBUG2
, "SearchCatCacheList(%s): found list",
1608 #ifdef CATCACHE_STATS
1616 * List was not found in cache, so we have to build it by reading the
1617 * relation. For each matching tuple found in the relation, use an
1618 * existing cache entry if possible, else build a new one.
1620 * We have to bump the member refcounts temporarily to ensure they won't
1621 * get dropped from the cache while loading other members. We use a PG_TRY
1622 * block to ensure we can undo those refcounts if we get an error before
1623 * we finish constructing the CatCList. ctlist must be valid throughout
1626 ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner
);
1632 ScanKeyData cur_skey
[CATCACHE_MAXKEYS
];
1634 SysScanDesc scandesc
;
1637 relation
= table_open(cache
->cc_reloid
, AccessShareLock
);
1642 * Ok, need to make a lookup in the relation, copy the scankey and
1643 * fill out any per-call fields. (We must re-do this when
1644 * retrying, because systable_beginscan scribbles on the scankey.)
1646 memcpy(cur_skey
, cache
->cc_skey
, sizeof(ScanKeyData
) * cache
->cc_nkeys
);
1647 cur_skey
[0].sk_argument
= v1
;
1648 cur_skey
[1].sk_argument
= v2
;
1649 cur_skey
[2].sk_argument
= v3
;
1650 cur_skey
[3].sk_argument
= v4
;
1652 scandesc
= systable_beginscan(relation
,
1654 IndexScanOK(cache
, cur_skey
),
1659 /* The list will be ordered iff we are doing an index scan */
1660 ordered
= (scandesc
->irel
!= NULL
);
1664 while (HeapTupleIsValid(ntp
= systable_getnext(scandesc
)))
1672 * See if there's an entry for this tuple already.
1675 hashValue
= CatalogCacheComputeTupleHashValue(cache
, cache
->cc_nkeys
, ntp
);
1676 hashIndex
= HASH_INDEX(hashValue
, cache
->cc_nbuckets
);
1678 bucket
= &cache
->cc_bucket
[hashIndex
];
1679 dlist_foreach(iter
, bucket
)
1681 ct
= dlist_container(CatCTup
, cache_elem
, iter
.cur
);
1683 if (ct
->dead
|| ct
->negative
)
1684 continue; /* ignore dead and negative entries */
1686 if (ct
->hash_value
!= hashValue
)
1687 continue; /* quickly skip entry if wrong hash val */
1689 if (!ItemPointerEquals(&(ct
->tuple
.t_self
), &(ntp
->t_self
)))
1690 continue; /* not same tuple */
1693 * Found a match, but can't use it if it belongs to another
1705 /* We didn't find a usable entry, so make a new one */
1706 ct
= CatalogCacheCreateEntry(cache
, ntp
, scandesc
, NULL
,
1707 hashValue
, hashIndex
);
1708 /* upon failure, we must start the scan over */
1712 * Release refcounts on any items we already had. We dare
1713 * not try to free them if they're now unreferenced, since
1714 * an error while doing that would result in the PG_CATCH
1715 * below doing extra refcount decrements. Besides, we'll
1716 * likely re-adopt those items in the next iteration, so
1717 * it's not worth complicating matters to try to get rid
1720 foreach(ctlist_item
, ctlist
)
1722 ct
= (CatCTup
*) lfirst(ctlist_item
);
1723 Assert(ct
->c_list
== NULL
);
1724 Assert(ct
->refcount
> 0);
1727 /* Reset ctlist in preparation for new try */
1734 /* Careful here: add entry to ctlist, then bump its refcount */
1735 /* This way leaves state correct if lappend runs out of memory */
1736 ctlist
= lappend(ctlist
, ct
);
1740 systable_endscan(scandesc
);
1743 table_close(relation
, AccessShareLock
);
1745 /* Now we can build the CatCList entry. */
1746 oldcxt
= MemoryContextSwitchTo(CacheMemoryContext
);
1747 nmembers
= list_length(ctlist
);
1749 palloc(offsetof(CatCList
, members
) + nmembers
* sizeof(CatCTup
*));
1751 /* Extract key values */
1752 CatCacheCopyKeys(cache
->cc_tupdesc
, nkeys
, cache
->cc_keyno
,
1753 arguments
, cl
->keys
);
1754 MemoryContextSwitchTo(oldcxt
);
1757 * We are now past the last thing that could trigger an elog before we
1758 * have finished building the CatCList and remembering it in the
1759 * resource owner. So it's OK to fall out of the PG_TRY, and indeed
1760 * we'd better do so before we start marking the members as belonging
1766 foreach(ctlist_item
, ctlist
)
1768 ct
= (CatCTup
*) lfirst(ctlist_item
);
1769 Assert(ct
->c_list
== NULL
);
1770 Assert(ct
->refcount
> 0);
1773 #ifndef CATCACHE_FORCE_RELEASE
1776 ct
->refcount
== 0 &&
1777 (ct
->c_list
== NULL
|| ct
->c_list
->refcount
== 0))
1778 CatCacheRemoveCTup(cache
, ct
);
1785 cl
->cl_magic
= CL_MAGIC
;
1786 cl
->my_cache
= cache
;
1787 cl
->refcount
= 0; /* for the moment */
1789 cl
->ordered
= ordered
;
1791 cl
->hash_value
= lHashValue
;
1792 cl
->n_members
= nmembers
;
1795 foreach(ctlist_item
, ctlist
)
1797 cl
->members
[i
++] = ct
= (CatCTup
*) lfirst(ctlist_item
);
1798 Assert(ct
->c_list
== NULL
);
1800 /* release the temporary refcount on the member */
1801 Assert(ct
->refcount
> 0);
1803 /* mark list dead if any members already dead */
1807 Assert(i
== nmembers
);
1809 dlist_push_head(&cache
->cc_lists
, &cl
->cache_elem
);
1811 /* Finally, bump the list's refcount and return it */
1813 ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner
, cl
);
1815 CACHE_elog(DEBUG2
, "SearchCatCacheList(%s): made list of %d members",
1816 cache
->cc_relname
, nmembers
);
1822 * ReleaseCatCacheList
1824 * Decrement the reference count of a catcache list.
1827 ReleaseCatCacheList(CatCList
*list
)
1829 /* Safety checks to ensure we were handed a cache entry */
1830 Assert(list
->cl_magic
== CL_MAGIC
);
1831 Assert(list
->refcount
> 0);
1833 ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner
, list
);
1836 #ifndef CATCACHE_FORCE_RELEASE
1839 list
->refcount
== 0)
1840 CatCacheRemoveCList(list
->my_cache
, list
);
1845 * CatalogCacheCreateEntry
1846 * Create a new CatCTup entry, copying the given HeapTuple and other
1847 * supplied data into it. The new entry initially has refcount 0.
1849 * To create a normal cache entry, ntp must be the HeapTuple just fetched
1850 * from scandesc, and "arguments" is not used. To create a negative cache
1851 * entry, pass NULL for ntp and scandesc; then "arguments" is the cache
1852 * keys to use. In either case, hashValue/hashIndex are the hash values
1853 * computed from the cache keys.
1855 * Returns NULL if we attempt to detoast the tuple and observe that it
1856 * became stale. (This cannot happen for a negative entry.) Caller must
1857 * retry the tuple lookup in that case.
1860 CatalogCacheCreateEntry(CatCache
*cache
, HeapTuple ntp
, SysScanDesc scandesc
,
1862 uint32 hashValue
, Index hashIndex
)
1866 MemoryContext oldcxt
;
1873 * The visibility recheck below essentially never fails during our
1874 * regression tests, and there's no easy way to force it to fail for
1875 * testing purposes. To ensure we have test coverage for the retry
1876 * paths in our callers, make debug builds randomly fail about 0.1% of
1877 * the times through this code path, even when there's no toasted
1880 #ifdef USE_ASSERT_CHECKING
1881 if (pg_prng_uint32(&pg_global_prng_state
) <= (PG_UINT32_MAX
/ 1000))
1886 * If there are any out-of-line toasted fields in the tuple, expand
1887 * them in-line. This saves cycles during later use of the catcache
1888 * entry, and also protects us against the possibility of the toast
1889 * tuples being freed before we attempt to fetch them, in case of
1890 * something using a slightly stale catcache entry.
1892 if (HeapTupleHasExternal(ntp
))
1894 dtp
= toast_flatten_tuple(ntp
, cache
->cc_tupdesc
);
1897 * The tuple could become stale while we are doing toast table
1898 * access (since AcceptInvalidationMessages can run then), so we
1899 * must recheck its visibility afterwards.
1901 if (!systable_recheck_tuple(scandesc
, ntp
))
1903 heap_freetuple(dtp
);
1910 /* Allocate memory for CatCTup and the cached tuple in one go */
1911 oldcxt
= MemoryContextSwitchTo(CacheMemoryContext
);
1913 ct
= (CatCTup
*) palloc(sizeof(CatCTup
) +
1914 MAXIMUM_ALIGNOF
+ dtp
->t_len
);
1915 ct
->tuple
.t_len
= dtp
->t_len
;
1916 ct
->tuple
.t_self
= dtp
->t_self
;
1917 ct
->tuple
.t_tableOid
= dtp
->t_tableOid
;
1918 ct
->tuple
.t_data
= (HeapTupleHeader
)
1919 MAXALIGN(((char *) ct
) + sizeof(CatCTup
));
1920 /* copy tuple contents */
1921 memcpy((char *) ct
->tuple
.t_data
,
1922 (const char *) dtp
->t_data
,
1924 MemoryContextSwitchTo(oldcxt
);
1927 heap_freetuple(dtp
);
1929 /* extract keys - they'll point into the tuple if not by-value */
1930 for (i
= 0; i
< cache
->cc_nkeys
; i
++)
1935 atp
= heap_getattr(&ct
->tuple
,
1945 /* Set up keys for a negative cache entry */
1946 oldcxt
= MemoryContextSwitchTo(CacheMemoryContext
);
1947 ct
= (CatCTup
*) palloc(sizeof(CatCTup
));
1950 * Store keys - they'll point into separately allocated memory if not
1953 CatCacheCopyKeys(cache
->cc_tupdesc
, cache
->cc_nkeys
, cache
->cc_keyno
,
1954 arguments
, ct
->keys
);
1955 MemoryContextSwitchTo(oldcxt
);
1959 * Finish initializing the CatCTup header, and add it to the cache's
1960 * linked list and counts.
1962 ct
->ct_magic
= CT_MAGIC
;
1963 ct
->my_cache
= cache
;
1965 ct
->refcount
= 0; /* for the moment */
1967 ct
->negative
= (ntp
== NULL
);
1968 ct
->hash_value
= hashValue
;
1970 dlist_push_head(&cache
->cc_bucket
[hashIndex
], &ct
->cache_elem
);
1973 CacheHdr
->ch_ntup
++;
1976 * If the hash table has become too full, enlarge the buckets array. Quite
1977 * arbitrarily, we enlarge when fill factor > 2.
1979 if (cache
->cc_ntup
> cache
->cc_nbuckets
* 2)
1980 RehashCatCache(cache
);
1986 * Helper routine that frees keys stored in the keys array.
1989 CatCacheFreeKeys(TupleDesc tupdesc
, int nkeys
, int *attnos
, Datum
*keys
)
1993 for (i
= 0; i
< nkeys
; i
++)
1995 int attnum
= attnos
[i
];
1996 Form_pg_attribute att
;
1998 /* system attribute are not supported in caches */
2001 att
= TupleDescAttr(tupdesc
, attnum
- 1);
2004 pfree(DatumGetPointer(keys
[i
]));
2009 * Helper routine that copies the keys in the srckeys array into the dstkeys
2010 * one, guaranteeing that the datums are fully allocated in the current memory
2014 CatCacheCopyKeys(TupleDesc tupdesc
, int nkeys
, int *attnos
,
2015 Datum
*srckeys
, Datum
*dstkeys
)
2020 * XXX: memory and lookup performance could possibly be improved by
2021 * storing all keys in one allocation.
2024 for (i
= 0; i
< nkeys
; i
++)
2026 int attnum
= attnos
[i
];
2027 Form_pg_attribute att
= TupleDescAttr(tupdesc
, attnum
- 1);
2028 Datum src
= srckeys
[i
];
2032 * Must be careful in case the caller passed a C string where a NAME
2033 * is wanted: convert the given argument to a correctly padded NAME.
2034 * Otherwise the memcpy() done by datumCopy() could fall off the end
2037 if (att
->atttypid
== NAMEOID
)
2039 namestrcpy(&srcname
, DatumGetCString(src
));
2040 src
= NameGetDatum(&srcname
);
2043 dstkeys
[i
] = datumCopy(src
,
2050 * PrepareToInvalidateCacheTuple()
2052 * This is part of a rather subtle chain of events, so pay attention:
2054 * When a tuple is inserted or deleted, it cannot be flushed from the
2055 * catcaches immediately, for reasons explained at the top of cache/inval.c.
2056 * Instead we have to add entry(s) for the tuple to a list of pending tuple
2057 * invalidations that will be done at the end of the command or transaction.
2059 * The lists of tuples that need to be flushed are kept by inval.c. This
2060 * routine is a helper routine for inval.c. Given a tuple belonging to
2061 * the specified relation, find all catcaches it could be in, compute the
2062 * correct hash value for each such catcache, and call the specified
2063 * function to record the cache id and hash value in inval.c's lists.
2064 * SysCacheInvalidate will be called later, if appropriate,
2065 * using the recorded information.
2067 * For an insert or delete, tuple is the target tuple and newtuple is NULL.
2068 * For an update, we are called just once, with tuple being the old tuple
2069 * version and newtuple the new version. We should make two list entries
2070 * if the tuple's hash value changed, but only one if it didn't.
2072 * Note that it is irrelevant whether the given tuple is actually loaded
2073 * into the catcache at the moment. Even if it's not there now, it might
2074 * be by the end of the command, or there might be a matching negative entry
2075 * to flush --- or other backends' caches might have such entries --- so
2076 * we have to make list entries to flush it later.
2078 * Also note that it's not an error if there are no catcaches for the
2079 * specified relation. inval.c doesn't know exactly which rels have
2080 * catcaches --- it will call this routine for any tuple that's in a
2084 PrepareToInvalidateCacheTuple(Relation relation
,
2087 void (*function
) (int, uint32
, Oid
))
2092 CACHE_elog(DEBUG2
, "PrepareToInvalidateCacheTuple: called");
2097 Assert(RelationIsValid(relation
));
2098 Assert(HeapTupleIsValid(tuple
));
2099 Assert(PointerIsValid(function
));
2100 Assert(CacheHdr
!= NULL
);
2102 reloid
= RelationGetRelid(relation
);
2106 * if the cache contains tuples from the specified relation
2107 * compute the tuple's hash value(s) in this cache,
2108 * and call the passed function to register the information.
2112 slist_foreach(iter
, &CacheHdr
->ch_caches
)
2114 CatCache
*ccp
= slist_container(CatCache
, cc_next
, iter
.cur
);
2118 if (ccp
->cc_reloid
!= reloid
)
2121 /* Just in case cache hasn't finished initialization yet... */
2122 if (ccp
->cc_tupdesc
== NULL
)
2123 CatalogCacheInitializeCache(ccp
);
2125 hashvalue
= CatalogCacheComputeTupleHashValue(ccp
, ccp
->cc_nkeys
, tuple
);
2126 dbid
= ccp
->cc_relisshared
? (Oid
) 0 : MyDatabaseId
;
2128 (*function
) (ccp
->id
, hashvalue
, dbid
);
2132 uint32 newhashvalue
;
2134 newhashvalue
= CatalogCacheComputeTupleHashValue(ccp
, ccp
->cc_nkeys
, newtuple
);
2136 if (newhashvalue
!= hashvalue
)
2137 (*function
) (ccp
->id
, newhashvalue
, dbid
);
2144 * Subroutines for warning about reference leaks. These are exported so
2145 * that resowner.c can call them.
2148 PrintCatCacheLeakWarning(HeapTuple tuple
)
2150 CatCTup
*ct
= (CatCTup
*) (((char *) tuple
) -
2151 offsetof(CatCTup
, tuple
));
2153 /* Safety check to ensure we were handed a cache entry */
2154 Assert(ct
->ct_magic
== CT_MAGIC
);
2156 elog(WARNING
, "cache reference leak: cache %s (%d), tuple %u/%u has count %d",
2157 ct
->my_cache
->cc_relname
, ct
->my_cache
->id
,
2158 ItemPointerGetBlockNumber(&(tuple
->t_self
)),
2159 ItemPointerGetOffsetNumber(&(tuple
->t_self
)),
2164 PrintCatCacheListLeakWarning(CatCList
*list
)
2166 elog(WARNING
, "cache reference leak: cache %s (%d), list %p has count %d",
2167 list
->my_cache
->cc_relname
, list
->my_cache
->id
,
2168 list
, list
->refcount
);