Cope with catcache entries becoming stale during detoasting.
[pgsql.git] / src / backend / utils / cache / catcache.c
blob303d3776fc0d7fab0d427c94ec55a17ad00e2fb5
1 /*-------------------------------------------------------------------------
3 * catcache.c
4 * System catalog cache for tuples matching a key.
6 * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
10 * IDENTIFICATION
11 * src/backend/utils/cache/catcache.c
13 *-------------------------------------------------------------------------
15 #include "postgres.h"
17 #include "access/genam.h"
18 #include "access/heaptoast.h"
19 #include "access/relscan.h"
20 #include "access/sysattr.h"
21 #include "access/table.h"
22 #include "access/xact.h"
23 #include "catalog/pg_collation.h"
24 #include "catalog/pg_operator.h"
25 #include "catalog/pg_type.h"
26 #include "common/hashfn.h"
27 #include "common/pg_prng.h"
28 #include "miscadmin.h"
29 #include "port/pg_bitutils.h"
30 #ifdef CATCACHE_STATS
31 #include "storage/ipc.h" /* for on_proc_exit */
32 #endif
33 #include "storage/lmgr.h"
34 #include "utils/builtins.h"
35 #include "utils/datum.h"
36 #include "utils/fmgroids.h"
37 #include "utils/inval.h"
38 #include "utils/memutils.h"
39 #include "utils/rel.h"
40 #include "utils/resowner_private.h"
41 #include "utils/syscache.h"
44 /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
47 * Given a hash value and the size of the hash table, find the bucket
48 * in which the hash value belongs. Since the hash table must contain
49 * a power-of-2 number of elements, this is a simple bitmask.
51 #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
55 * variables, macros and other stuff
58 #ifdef CACHEDEBUG
59 #define CACHE_elog(...) elog(__VA_ARGS__)
60 #else
61 #define CACHE_elog(...)
62 #endif
64 /* Cache management header --- pointer is NULL until created */
65 static CatCacheHeader *CacheHdr = NULL;
67 static inline HeapTuple SearchCatCacheInternal(CatCache *cache,
68 int nkeys,
69 Datum v1, Datum v2,
70 Datum v3, Datum v4);
72 static pg_noinline HeapTuple SearchCatCacheMiss(CatCache *cache,
73 int nkeys,
74 uint32 hashValue,
75 Index hashIndex,
76 Datum v1, Datum v2,
77 Datum v3, Datum v4);
79 static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
80 Datum v1, Datum v2, Datum v3, Datum v4);
81 static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys,
82 HeapTuple tuple);
83 static inline bool CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
84 const Datum *cachekeys,
85 const Datum *searchkeys);
87 #ifdef CATCACHE_STATS
88 static void CatCachePrintStats(int code, Datum arg);
89 #endif
90 static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
91 static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
92 static void CatalogCacheInitializeCache(CatCache *cache);
93 static CatCTup *CatalogCacheCreateEntry(CatCache *cache,
94 HeapTuple ntp, SysScanDesc scandesc,
95 Datum *arguments,
96 uint32 hashValue, Index hashIndex);
98 static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos,
99 Datum *keys);
100 static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
101 Datum *srckeys, Datum *dstkeys);
105 * internal support functions
109 * Hash and equality functions for system types that are used as cache key
110 * fields. In some cases, we just call the regular SQL-callable functions for
111 * the appropriate data type, but that tends to be a little slow, and the
112 * speed of these functions is performance-critical. Therefore, for data
113 * types that frequently occur as catcache keys, we hard-code the logic here.
114 * Avoiding the overhead of DirectFunctionCallN(...) is a substantial win, and
115 * in certain cases (like int4) we can adopt a faster hash algorithm as well.
118 static bool
119 chareqfast(Datum a, Datum b)
121 return DatumGetChar(a) == DatumGetChar(b);
124 static uint32
125 charhashfast(Datum datum)
127 return murmurhash32((int32) DatumGetChar(datum));
130 static bool
131 nameeqfast(Datum a, Datum b)
133 char *ca = NameStr(*DatumGetName(a));
134 char *cb = NameStr(*DatumGetName(b));
136 return strncmp(ca, cb, NAMEDATALEN) == 0;
139 static uint32
140 namehashfast(Datum datum)
142 char *key = NameStr(*DatumGetName(datum));
144 return hash_any((unsigned char *) key, strlen(key));
147 static bool
148 int2eqfast(Datum a, Datum b)
150 return DatumGetInt16(a) == DatumGetInt16(b);
153 static uint32
154 int2hashfast(Datum datum)
156 return murmurhash32((int32) DatumGetInt16(datum));
159 static bool
160 int4eqfast(Datum a, Datum b)
162 return DatumGetInt32(a) == DatumGetInt32(b);
165 static uint32
166 int4hashfast(Datum datum)
168 return murmurhash32((int32) DatumGetInt32(datum));
171 static bool
172 texteqfast(Datum a, Datum b)
175 * The use of DEFAULT_COLLATION_OID is fairly arbitrary here. We just
176 * want to take the fast "deterministic" path in texteq().
178 return DatumGetBool(DirectFunctionCall2Coll(texteq, DEFAULT_COLLATION_OID, a, b));
181 static uint32
182 texthashfast(Datum datum)
184 /* analogously here as in texteqfast() */
185 return DatumGetInt32(DirectFunctionCall1Coll(hashtext, DEFAULT_COLLATION_OID, datum));
188 static bool
189 oidvectoreqfast(Datum a, Datum b)
191 return DatumGetBool(DirectFunctionCall2(oidvectoreq, a, b));
194 static uint32
195 oidvectorhashfast(Datum datum)
197 return DatumGetInt32(DirectFunctionCall1(hashoidvector, datum));
200 /* Lookup support functions for a type. */
201 static void
202 GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEqualFN *fasteqfunc)
204 switch (keytype)
206 case BOOLOID:
207 *hashfunc = charhashfast;
208 *fasteqfunc = chareqfast;
209 *eqfunc = F_BOOLEQ;
210 break;
211 case CHAROID:
212 *hashfunc = charhashfast;
213 *fasteqfunc = chareqfast;
214 *eqfunc = F_CHAREQ;
215 break;
216 case NAMEOID:
217 *hashfunc = namehashfast;
218 *fasteqfunc = nameeqfast;
219 *eqfunc = F_NAMEEQ;
220 break;
221 case INT2OID:
222 *hashfunc = int2hashfast;
223 *fasteqfunc = int2eqfast;
224 *eqfunc = F_INT2EQ;
225 break;
226 case INT4OID:
227 *hashfunc = int4hashfast;
228 *fasteqfunc = int4eqfast;
229 *eqfunc = F_INT4EQ;
230 break;
231 case TEXTOID:
232 *hashfunc = texthashfast;
233 *fasteqfunc = texteqfast;
234 *eqfunc = F_TEXTEQ;
235 break;
236 case OIDOID:
237 case REGPROCOID:
238 case REGPROCEDUREOID:
239 case REGOPEROID:
240 case REGOPERATOROID:
241 case REGCLASSOID:
242 case REGTYPEOID:
243 case REGCOLLATIONOID:
244 case REGCONFIGOID:
245 case REGDICTIONARYOID:
246 case REGROLEOID:
247 case REGNAMESPACEOID:
248 *hashfunc = int4hashfast;
249 *fasteqfunc = int4eqfast;
250 *eqfunc = F_OIDEQ;
251 break;
252 case OIDVECTOROID:
253 *hashfunc = oidvectorhashfast;
254 *fasteqfunc = oidvectoreqfast;
255 *eqfunc = F_OIDVECTOREQ;
256 break;
257 default:
258 elog(FATAL, "type %u not supported as catcache key", keytype);
259 *hashfunc = NULL; /* keep compiler quiet */
261 *eqfunc = InvalidOid;
262 break;
267 * CatalogCacheComputeHashValue
269 * Compute the hash value associated with a given set of lookup keys
271 static uint32
272 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
273 Datum v1, Datum v2, Datum v3, Datum v4)
275 uint32 hashValue = 0;
276 uint32 oneHash;
277 CCHashFN *cc_hashfunc = cache->cc_hashfunc;
279 CACHE_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
280 cache->cc_relname, nkeys, cache);
282 switch (nkeys)
284 case 4:
285 oneHash = (cc_hashfunc[3]) (v4);
286 hashValue ^= pg_rotate_left32(oneHash, 24);
287 /* FALLTHROUGH */
288 case 3:
289 oneHash = (cc_hashfunc[2]) (v3);
290 hashValue ^= pg_rotate_left32(oneHash, 16);
291 /* FALLTHROUGH */
292 case 2:
293 oneHash = (cc_hashfunc[1]) (v2);
294 hashValue ^= pg_rotate_left32(oneHash, 8);
295 /* FALLTHROUGH */
296 case 1:
297 oneHash = (cc_hashfunc[0]) (v1);
298 hashValue ^= oneHash;
299 break;
300 default:
301 elog(FATAL, "wrong number of hash keys: %d", nkeys);
302 break;
305 return hashValue;
309 * CatalogCacheComputeTupleHashValue
311 * Compute the hash value associated with a given tuple to be cached
313 static uint32
314 CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys, HeapTuple tuple)
316 Datum v1 = 0,
317 v2 = 0,
318 v3 = 0,
319 v4 = 0;
320 bool isNull = false;
321 int *cc_keyno = cache->cc_keyno;
322 TupleDesc cc_tupdesc = cache->cc_tupdesc;
324 /* Now extract key fields from tuple, insert into scankey */
325 switch (nkeys)
327 case 4:
328 v4 = fastgetattr(tuple,
329 cc_keyno[3],
330 cc_tupdesc,
331 &isNull);
332 Assert(!isNull);
333 /* FALLTHROUGH */
334 case 3:
335 v3 = fastgetattr(tuple,
336 cc_keyno[2],
337 cc_tupdesc,
338 &isNull);
339 Assert(!isNull);
340 /* FALLTHROUGH */
341 case 2:
342 v2 = fastgetattr(tuple,
343 cc_keyno[1],
344 cc_tupdesc,
345 &isNull);
346 Assert(!isNull);
347 /* FALLTHROUGH */
348 case 1:
349 v1 = fastgetattr(tuple,
350 cc_keyno[0],
351 cc_tupdesc,
352 &isNull);
353 Assert(!isNull);
354 break;
355 default:
356 elog(FATAL, "wrong number of hash keys: %d", nkeys);
357 break;
360 return CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
364 * CatalogCacheCompareTuple
366 * Compare a tuple to the passed arguments.
368 static inline bool
369 CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
370 const Datum *cachekeys,
371 const Datum *searchkeys)
373 const CCFastEqualFN *cc_fastequal = cache->cc_fastequal;
374 int i;
376 for (i = 0; i < nkeys; i++)
378 if (!(cc_fastequal[i]) (cachekeys[i], searchkeys[i]))
379 return false;
381 return true;
385 #ifdef CATCACHE_STATS
387 static void
388 CatCachePrintStats(int code, Datum arg)
390 slist_iter iter;
391 long cc_searches = 0;
392 long cc_hits = 0;
393 long cc_neg_hits = 0;
394 long cc_newloads = 0;
395 long cc_invals = 0;
396 long cc_lsearches = 0;
397 long cc_lhits = 0;
399 slist_foreach(iter, &CacheHdr->ch_caches)
401 CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
403 if (cache->cc_ntup == 0 && cache->cc_searches == 0)
404 continue; /* don't print unused caches */
405 elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
406 cache->cc_relname,
407 cache->cc_indexoid,
408 cache->cc_ntup,
409 cache->cc_searches,
410 cache->cc_hits,
411 cache->cc_neg_hits,
412 cache->cc_hits + cache->cc_neg_hits,
413 cache->cc_newloads,
414 cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
415 cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
416 cache->cc_invals,
417 cache->cc_lsearches,
418 cache->cc_lhits);
419 cc_searches += cache->cc_searches;
420 cc_hits += cache->cc_hits;
421 cc_neg_hits += cache->cc_neg_hits;
422 cc_newloads += cache->cc_newloads;
423 cc_invals += cache->cc_invals;
424 cc_lsearches += cache->cc_lsearches;
425 cc_lhits += cache->cc_lhits;
427 elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
428 CacheHdr->ch_ntup,
429 cc_searches,
430 cc_hits,
431 cc_neg_hits,
432 cc_hits + cc_neg_hits,
433 cc_newloads,
434 cc_searches - cc_hits - cc_neg_hits - cc_newloads,
435 cc_searches - cc_hits - cc_neg_hits,
436 cc_invals,
437 cc_lsearches,
438 cc_lhits);
440 #endif /* CATCACHE_STATS */
444 * CatCacheRemoveCTup
446 * Unlink and delete the given cache entry
448 * NB: if it is a member of a CatCList, the CatCList is deleted too.
449 * Both the cache entry and the list had better have zero refcount.
451 static void
452 CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
454 Assert(ct->refcount == 0);
455 Assert(ct->my_cache == cache);
457 if (ct->c_list)
460 * The cleanest way to handle this is to call CatCacheRemoveCList,
461 * which will recurse back to me, and the recursive call will do the
462 * work. Set the "dead" flag to make sure it does recurse.
464 ct->dead = true;
465 CatCacheRemoveCList(cache, ct->c_list);
466 return; /* nothing left to do */
469 /* delink from linked list */
470 dlist_delete(&ct->cache_elem);
473 * Free keys when we're dealing with a negative entry, normal entries just
474 * point into tuple, allocated together with the CatCTup.
476 if (ct->negative)
477 CatCacheFreeKeys(cache->cc_tupdesc, cache->cc_nkeys,
478 cache->cc_keyno, ct->keys);
480 pfree(ct);
482 --cache->cc_ntup;
483 --CacheHdr->ch_ntup;
487 * CatCacheRemoveCList
489 * Unlink and delete the given cache list entry
491 * NB: any dead member entries that become unreferenced are deleted too.
493 static void
494 CatCacheRemoveCList(CatCache *cache, CatCList *cl)
496 int i;
498 Assert(cl->refcount == 0);
499 Assert(cl->my_cache == cache);
501 /* delink from member tuples */
502 for (i = cl->n_members; --i >= 0;)
504 CatCTup *ct = cl->members[i];
506 Assert(ct->c_list == cl);
507 ct->c_list = NULL;
508 /* if the member is dead and now has no references, remove it */
509 if (
510 #ifndef CATCACHE_FORCE_RELEASE
511 ct->dead &&
512 #endif
513 ct->refcount == 0)
514 CatCacheRemoveCTup(cache, ct);
517 /* delink from linked list */
518 dlist_delete(&cl->cache_elem);
520 /* free associated column data */
521 CatCacheFreeKeys(cache->cc_tupdesc, cl->nkeys,
522 cache->cc_keyno, cl->keys);
524 pfree(cl);
529 * CatCacheInvalidate
531 * Invalidate entries in the specified cache, given a hash value.
533 * We delete cache entries that match the hash value, whether positive
534 * or negative. We don't care whether the invalidation is the result
535 * of a tuple insertion or a deletion.
537 * We used to try to match positive cache entries by TID, but that is
538 * unsafe after a VACUUM FULL on a system catalog: an inval event could
539 * be queued before VACUUM FULL, and then processed afterwards, when the
540 * target tuple that has to be invalidated has a different TID than it
541 * did when the event was created. So now we just compare hash values and
542 * accept the small risk of unnecessary invalidations due to false matches.
544 * This routine is only quasi-public: it should only be used by inval.c.
546 void
547 CatCacheInvalidate(CatCache *cache, uint32 hashValue)
549 Index hashIndex;
550 dlist_mutable_iter iter;
552 CACHE_elog(DEBUG2, "CatCacheInvalidate: called");
555 * We don't bother to check whether the cache has finished initialization
556 * yet; if not, there will be no entries in it so no problem.
560 * Invalidate *all* CatCLists in this cache; it's too hard to tell which
561 * searches might still be correct, so just zap 'em all.
563 dlist_foreach_modify(iter, &cache->cc_lists)
565 CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
567 if (cl->refcount > 0)
568 cl->dead = true;
569 else
570 CatCacheRemoveCList(cache, cl);
574 * inspect the proper hash bucket for tuple matches
576 hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
577 dlist_foreach_modify(iter, &cache->cc_bucket[hashIndex])
579 CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
581 if (hashValue == ct->hash_value)
583 if (ct->refcount > 0 ||
584 (ct->c_list && ct->c_list->refcount > 0))
586 ct->dead = true;
587 /* list, if any, was marked dead above */
588 Assert(ct->c_list == NULL || ct->c_list->dead);
590 else
591 CatCacheRemoveCTup(cache, ct);
592 CACHE_elog(DEBUG2, "CatCacheInvalidate: invalidated");
593 #ifdef CATCACHE_STATS
594 cache->cc_invals++;
595 #endif
596 /* could be multiple matches, so keep looking! */
601 /* ----------------------------------------------------------------
602 * public functions
603 * ----------------------------------------------------------------
608 * Standard routine for creating cache context if it doesn't exist yet
610 * There are a lot of places (probably far more than necessary) that check
611 * whether CacheMemoryContext exists yet and want to create it if not.
612 * We centralize knowledge of exactly how to create it here.
614 void
615 CreateCacheMemoryContext(void)
618 * Purely for paranoia, check that context doesn't exist; caller probably
619 * did so already.
621 if (!CacheMemoryContext)
622 CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
623 "CacheMemoryContext",
624 ALLOCSET_DEFAULT_SIZES);
629 * ResetCatalogCache
631 * Reset one catalog cache to empty.
633 * This is not very efficient if the target cache is nearly empty.
634 * However, it shouldn't need to be efficient; we don't invoke it often.
636 static void
637 ResetCatalogCache(CatCache *cache)
639 dlist_mutable_iter iter;
640 int i;
642 /* Remove each list in this cache, or at least mark it dead */
643 dlist_foreach_modify(iter, &cache->cc_lists)
645 CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
647 if (cl->refcount > 0)
648 cl->dead = true;
649 else
650 CatCacheRemoveCList(cache, cl);
653 /* Remove each tuple in this cache, or at least mark it dead */
654 for (i = 0; i < cache->cc_nbuckets; i++)
656 dlist_head *bucket = &cache->cc_bucket[i];
658 dlist_foreach_modify(iter, bucket)
660 CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
662 if (ct->refcount > 0 ||
663 (ct->c_list && ct->c_list->refcount > 0))
665 ct->dead = true;
666 /* list, if any, was marked dead above */
667 Assert(ct->c_list == NULL || ct->c_list->dead);
669 else
670 CatCacheRemoveCTup(cache, ct);
671 #ifdef CATCACHE_STATS
672 cache->cc_invals++;
673 #endif
679 * ResetCatalogCaches
681 * Reset all caches when a shared cache inval event forces it
683 void
684 ResetCatalogCaches(void)
686 slist_iter iter;
688 CACHE_elog(DEBUG2, "ResetCatalogCaches called");
690 slist_foreach(iter, &CacheHdr->ch_caches)
692 CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
694 ResetCatalogCache(cache);
697 CACHE_elog(DEBUG2, "end of ResetCatalogCaches call");
701 * CatalogCacheFlushCatalog
703 * Flush all catcache entries that came from the specified system catalog.
704 * This is needed after VACUUM FULL/CLUSTER on the catalog, since the
705 * tuples very likely now have different TIDs than before. (At one point
706 * we also tried to force re-execution of CatalogCacheInitializeCache for
707 * the cache(s) on that catalog. This is a bad idea since it leads to all
708 * kinds of trouble if a cache flush occurs while loading cache entries.
709 * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
710 * rather than relying on the relcache to keep a tupdesc for us. Of course
711 * this assumes the tupdesc of a cachable system table will not change...)
713 void
714 CatalogCacheFlushCatalog(Oid catId)
716 slist_iter iter;
718 CACHE_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
720 slist_foreach(iter, &CacheHdr->ch_caches)
722 CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
724 /* Does this cache store tuples of the target catalog? */
725 if (cache->cc_reloid == catId)
727 /* Yes, so flush all its contents */
728 ResetCatalogCache(cache);
730 /* Tell inval.c to call syscache callbacks for this cache */
731 CallSyscacheCallbacks(cache->id, 0);
735 CACHE_elog(DEBUG2, "end of CatalogCacheFlushCatalog call");
739 * InitCatCache
741 * This allocates and initializes a cache for a system catalog relation.
742 * Actually, the cache is only partially initialized to avoid opening the
743 * relation. The relation will be opened and the rest of the cache
744 * structure initialized on the first access.
746 #ifdef CACHEDEBUG
747 #define InitCatCache_DEBUG2 \
748 do { \
749 elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
750 cp->cc_reloid, cp->cc_indexoid, cp->id, \
751 cp->cc_nkeys, cp->cc_nbuckets); \
752 } while(0)
753 #else
754 #define InitCatCache_DEBUG2
755 #endif
757 CatCache *
758 InitCatCache(int id,
759 Oid reloid,
760 Oid indexoid,
761 int nkeys,
762 const int *key,
763 int nbuckets)
765 CatCache *cp;
766 MemoryContext oldcxt;
767 int i;
770 * nbuckets is the initial number of hash buckets to use in this catcache.
771 * It will be enlarged later if it becomes too full.
773 * nbuckets must be a power of two. We check this via Assert rather than
774 * a full runtime check because the values will be coming from constant
775 * tables.
777 * If you're confused by the power-of-two check, see comments in
778 * bitmapset.c for an explanation.
780 Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
783 * first switch to the cache context so our allocations do not vanish at
784 * the end of a transaction
786 if (!CacheMemoryContext)
787 CreateCacheMemoryContext();
789 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
792 * if first time through, initialize the cache group header
794 if (CacheHdr == NULL)
796 CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
797 slist_init(&CacheHdr->ch_caches);
798 CacheHdr->ch_ntup = 0;
799 #ifdef CATCACHE_STATS
800 /* set up to dump stats at backend exit */
801 on_proc_exit(CatCachePrintStats, 0);
802 #endif
806 * Allocate a new cache structure, aligning to a cacheline boundary
808 * Note: we rely on zeroing to initialize all the dlist headers correctly
810 cp = (CatCache *) palloc_aligned(sizeof(CatCache), PG_CACHE_LINE_SIZE,
811 MCXT_ALLOC_ZERO);
812 cp->cc_bucket = palloc0(nbuckets * sizeof(dlist_head));
815 * initialize the cache's relation information for the relation
816 * corresponding to this cache, and initialize some of the new cache's
817 * other internal fields. But don't open the relation yet.
819 cp->id = id;
820 cp->cc_relname = "(not known yet)";
821 cp->cc_reloid = reloid;
822 cp->cc_indexoid = indexoid;
823 cp->cc_relisshared = false; /* temporary */
824 cp->cc_tupdesc = (TupleDesc) NULL;
825 cp->cc_ntup = 0;
826 cp->cc_nbuckets = nbuckets;
827 cp->cc_nkeys = nkeys;
828 for (i = 0; i < nkeys; ++i)
829 cp->cc_keyno[i] = key[i];
832 * new cache is initialized as far as we can go for now. print some
833 * debugging information, if appropriate.
835 InitCatCache_DEBUG2;
838 * add completed cache to top of group header's list
840 slist_push_head(&CacheHdr->ch_caches, &cp->cc_next);
843 * back to the old context before we return...
845 MemoryContextSwitchTo(oldcxt);
847 return cp;
851 * Enlarge a catcache, doubling the number of buckets.
853 static void
854 RehashCatCache(CatCache *cp)
856 dlist_head *newbucket;
857 int newnbuckets;
858 int i;
860 elog(DEBUG1, "rehashing catalog cache id %d for %s; %d tups, %d buckets",
861 cp->id, cp->cc_relname, cp->cc_ntup, cp->cc_nbuckets);
863 /* Allocate a new, larger, hash table. */
864 newnbuckets = cp->cc_nbuckets * 2;
865 newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
867 /* Move all entries from old hash table to new. */
868 for (i = 0; i < cp->cc_nbuckets; i++)
870 dlist_mutable_iter iter;
872 dlist_foreach_modify(iter, &cp->cc_bucket[i])
874 CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
875 int hashIndex = HASH_INDEX(ct->hash_value, newnbuckets);
877 dlist_delete(iter.cur);
878 dlist_push_head(&newbucket[hashIndex], &ct->cache_elem);
882 /* Switch to the new array. */
883 pfree(cp->cc_bucket);
884 cp->cc_nbuckets = newnbuckets;
885 cp->cc_bucket = newbucket;
889 * CatalogCacheInitializeCache
891 * This function does final initialization of a catcache: obtain the tuple
892 * descriptor and set up the hash and equality function links. We assume
893 * that the relcache entry can be opened at this point!
895 #ifdef CACHEDEBUG
896 #define CatalogCacheInitializeCache_DEBUG1 \
897 elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
898 cache->cc_reloid)
900 #define CatalogCacheInitializeCache_DEBUG2 \
901 do { \
902 if (cache->cc_keyno[i] > 0) { \
903 elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
904 i+1, cache->cc_nkeys, cache->cc_keyno[i], \
905 TupleDescAttr(tupdesc, cache->cc_keyno[i] - 1)->atttypid); \
906 } else { \
907 elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
908 i+1, cache->cc_nkeys, cache->cc_keyno[i]); \
910 } while(0)
911 #else
912 #define CatalogCacheInitializeCache_DEBUG1
913 #define CatalogCacheInitializeCache_DEBUG2
914 #endif
916 static void
917 CatalogCacheInitializeCache(CatCache *cache)
919 Relation relation;
920 MemoryContext oldcxt;
921 TupleDesc tupdesc;
922 int i;
924 CatalogCacheInitializeCache_DEBUG1;
926 relation = table_open(cache->cc_reloid, AccessShareLock);
929 * switch to the cache context so our allocations do not vanish at the end
930 * of a transaction
932 Assert(CacheMemoryContext != NULL);
934 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
937 * copy the relcache's tuple descriptor to permanent cache storage
939 tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
942 * save the relation's name and relisshared flag, too (cc_relname is used
943 * only for debugging purposes)
945 cache->cc_relname = pstrdup(RelationGetRelationName(relation));
946 cache->cc_relisshared = RelationGetForm(relation)->relisshared;
949 * return to the caller's memory context and close the rel
951 MemoryContextSwitchTo(oldcxt);
953 table_close(relation, AccessShareLock);
955 CACHE_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
956 cache->cc_relname, cache->cc_nkeys);
959 * initialize cache's key information
961 for (i = 0; i < cache->cc_nkeys; ++i)
963 Oid keytype;
964 RegProcedure eqfunc;
966 CatalogCacheInitializeCache_DEBUG2;
968 if (cache->cc_keyno[i] > 0)
970 Form_pg_attribute attr = TupleDescAttr(tupdesc,
971 cache->cc_keyno[i] - 1);
973 keytype = attr->atttypid;
974 /* cache key columns should always be NOT NULL */
975 Assert(attr->attnotnull);
977 else
979 if (cache->cc_keyno[i] < 0)
980 elog(FATAL, "sys attributes are not supported in caches");
981 keytype = OIDOID;
984 GetCCHashEqFuncs(keytype,
985 &cache->cc_hashfunc[i],
986 &eqfunc,
987 &cache->cc_fastequal[i]);
990 * Do equality-function lookup (we assume this won't need a catalog
991 * lookup for any supported type)
993 fmgr_info_cxt(eqfunc,
994 &cache->cc_skey[i].sk_func,
995 CacheMemoryContext);
997 /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
998 cache->cc_skey[i].sk_attno = cache->cc_keyno[i];
1000 /* Fill in sk_strategy as well --- always standard equality */
1001 cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber;
1002 cache->cc_skey[i].sk_subtype = InvalidOid;
1003 /* If a catcache key requires a collation, it must be C collation */
1004 cache->cc_skey[i].sk_collation = C_COLLATION_OID;
1006 CACHE_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",
1007 cache->cc_relname, i, cache);
1011 * mark this cache fully initialized
1013 cache->cc_tupdesc = tupdesc;
1017 * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
1019 * One reason to call this routine is to ensure that the relcache has
1020 * created entries for all the catalogs and indexes referenced by catcaches.
1021 * Therefore, provide an option to open the index as well as fixing the
1022 * cache itself. An exception is the indexes on pg_am, which we don't use
1023 * (cf. IndexScanOK).
1025 void
1026 InitCatCachePhase2(CatCache *cache, bool touch_index)
1028 if (cache->cc_tupdesc == NULL)
1029 CatalogCacheInitializeCache(cache);
1031 if (touch_index &&
1032 cache->id != AMOID &&
1033 cache->id != AMNAME)
1035 Relation idesc;
1038 * We must lock the underlying catalog before opening the index to
1039 * avoid deadlock, since index_open could possibly result in reading
1040 * this same catalog, and if anyone else is exclusive-locking this
1041 * catalog and index they'll be doing it in that order.
1043 LockRelationOid(cache->cc_reloid, AccessShareLock);
1044 idesc = index_open(cache->cc_indexoid, AccessShareLock);
1047 * While we've got the index open, let's check that it's unique (and
1048 * not just deferrable-unique, thank you very much). This is just to
1049 * catch thinkos in definitions of new catcaches, so we don't worry
1050 * about the pg_am indexes not getting tested.
1052 Assert(idesc->rd_index->indisunique &&
1053 idesc->rd_index->indimmediate);
1055 index_close(idesc, AccessShareLock);
1056 UnlockRelationOid(cache->cc_reloid, AccessShareLock);
1062 * IndexScanOK
1064 * This function checks for tuples that will be fetched by
1065 * IndexSupportInitialize() during relcache initialization for
1066 * certain system indexes that support critical syscaches.
1067 * We can't use an indexscan to fetch these, else we'll get into
1068 * infinite recursion. A plain heap scan will work, however.
1069 * Once we have completed relcache initialization (signaled by
1070 * criticalRelcachesBuilt), we don't have to worry anymore.
1072 * Similarly, during backend startup we have to be able to use the
1073 * pg_authid, pg_auth_members and pg_database syscaches for
1074 * authentication even if we don't yet have relcache entries for those
1075 * catalogs' indexes.
1077 static bool
1078 IndexScanOK(CatCache *cache, ScanKey cur_skey)
1080 switch (cache->id)
1082 case INDEXRELID:
1085 * Rather than tracking exactly which indexes have to be loaded
1086 * before we can use indexscans (which changes from time to time),
1087 * just force all pg_index searches to be heap scans until we've
1088 * built the critical relcaches.
1090 if (!criticalRelcachesBuilt)
1091 return false;
1092 break;
1094 case AMOID:
1095 case AMNAME:
1098 * Always do heap scans in pg_am, because it's so small there's
1099 * not much point in an indexscan anyway. We *must* do this when
1100 * initially building critical relcache entries, but we might as
1101 * well just always do it.
1103 return false;
1105 case AUTHNAME:
1106 case AUTHOID:
1107 case AUTHMEMMEMROLE:
1108 case DATABASEOID:
1111 * Protect authentication lookups occurring before relcache has
1112 * collected entries for shared indexes.
1114 if (!criticalSharedRelcachesBuilt)
1115 return false;
1116 break;
1118 default:
1119 break;
1122 /* Normal case, allow index scan */
1123 return true;
1127 * SearchCatCache
1129 * This call searches a system cache for a tuple, opening the relation
1130 * if necessary (on the first access to a particular cache).
1132 * The result is NULL if not found, or a pointer to a HeapTuple in
1133 * the cache. The caller must not modify the tuple, and must call
1134 * ReleaseCatCache() when done with it.
1136 * The search key values should be expressed as Datums of the key columns'
1137 * datatype(s). (Pass zeroes for any unused parameters.) As a special
1138 * exception, the passed-in key for a NAME column can be just a C string;
1139 * the caller need not go to the trouble of converting it to a fully
1140 * null-padded NAME.
1142 HeapTuple
1143 SearchCatCache(CatCache *cache,
1144 Datum v1,
1145 Datum v2,
1146 Datum v3,
1147 Datum v4)
1149 return SearchCatCacheInternal(cache, cache->cc_nkeys, v1, v2, v3, v4);
1154 * SearchCatCacheN() are SearchCatCache() versions for a specific number of
1155 * arguments. The compiler can inline the body and unroll loops, making them a
1156 * bit faster than SearchCatCache().
1159 HeapTuple
1160 SearchCatCache1(CatCache *cache,
1161 Datum v1)
1163 return SearchCatCacheInternal(cache, 1, v1, 0, 0, 0);
1167 HeapTuple
1168 SearchCatCache2(CatCache *cache,
1169 Datum v1, Datum v2)
1171 return SearchCatCacheInternal(cache, 2, v1, v2, 0, 0);
1175 HeapTuple
1176 SearchCatCache3(CatCache *cache,
1177 Datum v1, Datum v2, Datum v3)
1179 return SearchCatCacheInternal(cache, 3, v1, v2, v3, 0);
1183 HeapTuple
1184 SearchCatCache4(CatCache *cache,
1185 Datum v1, Datum v2, Datum v3, Datum v4)
1187 return SearchCatCacheInternal(cache, 4, v1, v2, v3, v4);
1191 * Work-horse for SearchCatCache/SearchCatCacheN.
1193 static inline HeapTuple
1194 SearchCatCacheInternal(CatCache *cache,
1195 int nkeys,
1196 Datum v1,
1197 Datum v2,
1198 Datum v3,
1199 Datum v4)
1201 Datum arguments[CATCACHE_MAXKEYS];
1202 uint32 hashValue;
1203 Index hashIndex;
1204 dlist_iter iter;
1205 dlist_head *bucket;
1206 CatCTup *ct;
1208 /* Make sure we're in an xact, even if this ends up being a cache hit */
1209 Assert(IsTransactionState());
1211 Assert(cache->cc_nkeys == nkeys);
1214 * one-time startup overhead for each cache
1216 if (unlikely(cache->cc_tupdesc == NULL))
1217 CatalogCacheInitializeCache(cache);
1219 #ifdef CATCACHE_STATS
1220 cache->cc_searches++;
1221 #endif
1223 /* Initialize local parameter array */
1224 arguments[0] = v1;
1225 arguments[1] = v2;
1226 arguments[2] = v3;
1227 arguments[3] = v4;
1230 * find the hash bucket in which to look for the tuple
1232 hashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1233 hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1236 * scan the hash bucket until we find a match or exhaust our tuples
1238 * Note: it's okay to use dlist_foreach here, even though we modify the
1239 * dlist within the loop, because we don't continue the loop afterwards.
1241 bucket = &cache->cc_bucket[hashIndex];
1242 dlist_foreach(iter, bucket)
1244 ct = dlist_container(CatCTup, cache_elem, iter.cur);
1246 if (ct->dead)
1247 continue; /* ignore dead entries */
1249 if (ct->hash_value != hashValue)
1250 continue; /* quickly skip entry if wrong hash val */
1252 if (!CatalogCacheCompareTuple(cache, nkeys, ct->keys, arguments))
1253 continue;
1256 * We found a match in the cache. Move it to the front of the list
1257 * for its hashbucket, in order to speed subsequent searches. (The
1258 * most frequently accessed elements in any hashbucket will tend to be
1259 * near the front of the hashbucket's list.)
1261 dlist_move_head(bucket, &ct->cache_elem);
1264 * If it's a positive entry, bump its refcount and return it. If it's
1265 * negative, we can report failure to the caller.
1267 if (!ct->negative)
1269 ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1270 ct->refcount++;
1271 ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1273 CACHE_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1274 cache->cc_relname, hashIndex);
1276 #ifdef CATCACHE_STATS
1277 cache->cc_hits++;
1278 #endif
1280 return &ct->tuple;
1282 else
1284 CACHE_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1285 cache->cc_relname, hashIndex);
1287 #ifdef CATCACHE_STATS
1288 cache->cc_neg_hits++;
1289 #endif
1291 return NULL;
1295 return SearchCatCacheMiss(cache, nkeys, hashValue, hashIndex, v1, v2, v3, v4);
1299 * Search the actual catalogs, rather than the cache.
1301 * This is kept separate from SearchCatCacheInternal() to keep the fast-path
1302 * as small as possible. To avoid that effort being undone by a helpful
1303 * compiler, try to explicitly forbid inlining.
1305 static pg_noinline HeapTuple
1306 SearchCatCacheMiss(CatCache *cache,
1307 int nkeys,
1308 uint32 hashValue,
1309 Index hashIndex,
1310 Datum v1,
1311 Datum v2,
1312 Datum v3,
1313 Datum v4)
1315 ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1316 Relation relation;
1317 SysScanDesc scandesc;
1318 HeapTuple ntp;
1319 CatCTup *ct;
1320 bool stale;
1321 Datum arguments[CATCACHE_MAXKEYS];
1323 /* Initialize local parameter array */
1324 arguments[0] = v1;
1325 arguments[1] = v2;
1326 arguments[2] = v3;
1327 arguments[3] = v4;
1330 * Tuple was not found in cache, so we have to try to retrieve it directly
1331 * from the relation. If found, we will add it to the cache; if not
1332 * found, we will add a negative cache entry instead.
1334 * NOTE: it is possible for recursive cache lookups to occur while reading
1335 * the relation --- for example, due to shared-cache-inval messages being
1336 * processed during table_open(). This is OK. It's even possible for one
1337 * of those lookups to find and enter the very same tuple we are trying to
1338 * fetch here. If that happens, we will enter a second copy of the tuple
1339 * into the cache. The first copy will never be referenced again, and
1340 * will eventually age out of the cache, so there's no functional problem.
1341 * This case is rare enough that it's not worth expending extra cycles to
1342 * detect.
1344 * Another case, which we *must* handle, is that the tuple could become
1345 * outdated during CatalogCacheCreateEntry's attempt to detoast it (since
1346 * AcceptInvalidationMessages can run during TOAST table access). We do
1347 * not want to return already-stale catcache entries, so we loop around
1348 * and do the table scan again if that happens.
1350 relation = table_open(cache->cc_reloid, AccessShareLock);
1355 * Ok, need to make a lookup in the relation, copy the scankey and
1356 * fill out any per-call fields. (We must re-do this when retrying,
1357 * because systable_beginscan scribbles on the scankey.)
1359 memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * nkeys);
1360 cur_skey[0].sk_argument = v1;
1361 cur_skey[1].sk_argument = v2;
1362 cur_skey[2].sk_argument = v3;
1363 cur_skey[3].sk_argument = v4;
1365 scandesc = systable_beginscan(relation,
1366 cache->cc_indexoid,
1367 IndexScanOK(cache, cur_skey),
1368 NULL,
1369 nkeys,
1370 cur_skey);
1372 ct = NULL;
1373 stale = false;
1375 while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1377 ct = CatalogCacheCreateEntry(cache, ntp, scandesc, NULL,
1378 hashValue, hashIndex);
1379 /* upon failure, we must start the scan over */
1380 if (ct == NULL)
1382 stale = true;
1383 break;
1385 /* immediately set the refcount to 1 */
1386 ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1387 ct->refcount++;
1388 ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1389 break; /* assume only one match */
1392 systable_endscan(scandesc);
1393 } while (stale);
1395 table_close(relation, AccessShareLock);
1398 * If tuple was not found, we need to build a negative cache entry
1399 * containing a fake tuple. The fake tuple has the correct key columns,
1400 * but nulls everywhere else.
1402 * In bootstrap mode, we don't build negative entries, because the cache
1403 * invalidation mechanism isn't alive and can't clear them if the tuple
1404 * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
1405 * cache inval for that.)
1407 if (ct == NULL)
1409 if (IsBootstrapProcessingMode())
1410 return NULL;
1412 ct = CatalogCacheCreateEntry(cache, NULL, NULL, arguments,
1413 hashValue, hashIndex);
1415 /* Creating a negative cache entry shouldn't fail */
1416 Assert(ct != NULL);
1418 CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1419 cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1420 CACHE_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1421 cache->cc_relname, hashIndex);
1424 * We are not returning the negative entry to the caller, so leave its
1425 * refcount zero.
1428 return NULL;
1431 CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1432 cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1433 CACHE_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1434 cache->cc_relname, hashIndex);
1436 #ifdef CATCACHE_STATS
1437 cache->cc_newloads++;
1438 #endif
1440 return &ct->tuple;
1444 * ReleaseCatCache
1446 * Decrement the reference count of a catcache entry (releasing the
1447 * hold grabbed by a successful SearchCatCache).
1449 * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1450 * will be freed as soon as their refcount goes to zero. In combination
1451 * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1452 * to catch references to already-released catcache entries.
1454 void
1455 ReleaseCatCache(HeapTuple tuple)
1457 CatCTup *ct = (CatCTup *) (((char *) tuple) -
1458 offsetof(CatCTup, tuple));
1460 /* Safety checks to ensure we were handed a cache entry */
1461 Assert(ct->ct_magic == CT_MAGIC);
1462 Assert(ct->refcount > 0);
1464 ct->refcount--;
1465 ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple);
1467 if (
1468 #ifndef CATCACHE_FORCE_RELEASE
1469 ct->dead &&
1470 #endif
1471 ct->refcount == 0 &&
1472 (ct->c_list == NULL || ct->c_list->refcount == 0))
1473 CatCacheRemoveCTup(ct->my_cache, ct);
1478 * GetCatCacheHashValue
1480 * Compute the hash value for a given set of search keys.
1482 * The reason for exposing this as part of the API is that the hash value is
1483 * exposed in cache invalidation operations, so there are places outside the
1484 * catcache code that need to be able to compute the hash values.
1486 uint32
1487 GetCatCacheHashValue(CatCache *cache,
1488 Datum v1,
1489 Datum v2,
1490 Datum v3,
1491 Datum v4)
1494 * one-time startup overhead for each cache
1496 if (cache->cc_tupdesc == NULL)
1497 CatalogCacheInitializeCache(cache);
1500 * calculate the hash value
1502 return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, v1, v2, v3, v4);
1507 * SearchCatCacheList
1509 * Generate a list of all tuples matching a partial key (that is,
1510 * a key specifying just the first K of the cache's N key columns).
1512 * It doesn't make any sense to specify all of the cache's key columns
1513 * here: since the key is unique, there could be at most one match, so
1514 * you ought to use SearchCatCache() instead. Hence this function takes
1515 * one fewer Datum argument than SearchCatCache() does.
1517 * The caller must not modify the list object or the pointed-to tuples,
1518 * and must call ReleaseCatCacheList() when done with the list.
1520 CatCList *
1521 SearchCatCacheList(CatCache *cache,
1522 int nkeys,
1523 Datum v1,
1524 Datum v2,
1525 Datum v3)
1527 Datum v4 = 0; /* dummy last-column value */
1528 Datum arguments[CATCACHE_MAXKEYS];
1529 uint32 lHashValue;
1530 dlist_iter iter;
1531 CatCList *cl;
1532 CatCTup *ct;
1533 List *volatile ctlist;
1534 ListCell *ctlist_item;
1535 int nmembers;
1536 bool ordered;
1537 HeapTuple ntp;
1538 MemoryContext oldcxt;
1539 int i;
1542 * one-time startup overhead for each cache
1544 if (cache->cc_tupdesc == NULL)
1545 CatalogCacheInitializeCache(cache);
1547 Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1549 #ifdef CATCACHE_STATS
1550 cache->cc_lsearches++;
1551 #endif
1553 /* Initialize local parameter array */
1554 arguments[0] = v1;
1555 arguments[1] = v2;
1556 arguments[2] = v3;
1557 arguments[3] = v4;
1560 * compute a hash value of the given keys for faster search. We don't
1561 * presently divide the CatCList items into buckets, but this still lets
1562 * us skip non-matching items quickly most of the time.
1564 lHashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1567 * scan the items until we find a match or exhaust our list
1569 * Note: it's okay to use dlist_foreach here, even though we modify the
1570 * dlist within the loop, because we don't continue the loop afterwards.
1572 dlist_foreach(iter, &cache->cc_lists)
1574 cl = dlist_container(CatCList, cache_elem, iter.cur);
1576 if (cl->dead)
1577 continue; /* ignore dead entries */
1579 if (cl->hash_value != lHashValue)
1580 continue; /* quickly skip entry if wrong hash val */
1583 * see if the cached list matches our key.
1585 if (cl->nkeys != nkeys)
1586 continue;
1588 if (!CatalogCacheCompareTuple(cache, nkeys, cl->keys, arguments))
1589 continue;
1592 * We found a matching list. Move the list to the front of the
1593 * cache's list-of-lists, to speed subsequent searches. (We do not
1594 * move the members to the fronts of their hashbucket lists, however,
1595 * since there's no point in that unless they are searched for
1596 * individually.)
1598 dlist_move_head(&cache->cc_lists, &cl->cache_elem);
1600 /* Bump the list's refcount and return it */
1601 ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1602 cl->refcount++;
1603 ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1605 CACHE_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1606 cache->cc_relname);
1608 #ifdef CATCACHE_STATS
1609 cache->cc_lhits++;
1610 #endif
1612 return cl;
1616 * List was not found in cache, so we have to build it by reading the
1617 * relation. For each matching tuple found in the relation, use an
1618 * existing cache entry if possible, else build a new one.
1620 * We have to bump the member refcounts temporarily to ensure they won't
1621 * get dropped from the cache while loading other members. We use a PG_TRY
1622 * block to ensure we can undo those refcounts if we get an error before
1623 * we finish constructing the CatCList. ctlist must be valid throughout
1624 * the PG_TRY block.
1626 ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1628 ctlist = NIL;
1630 PG_TRY();
1632 ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1633 Relation relation;
1634 SysScanDesc scandesc;
1635 bool stale;
1637 relation = table_open(cache->cc_reloid, AccessShareLock);
1642 * Ok, need to make a lookup in the relation, copy the scankey and
1643 * fill out any per-call fields. (We must re-do this when
1644 * retrying, because systable_beginscan scribbles on the scankey.)
1646 memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * cache->cc_nkeys);
1647 cur_skey[0].sk_argument = v1;
1648 cur_skey[1].sk_argument = v2;
1649 cur_skey[2].sk_argument = v3;
1650 cur_skey[3].sk_argument = v4;
1652 scandesc = systable_beginscan(relation,
1653 cache->cc_indexoid,
1654 IndexScanOK(cache, cur_skey),
1655 NULL,
1656 nkeys,
1657 cur_skey);
1659 /* The list will be ordered iff we are doing an index scan */
1660 ordered = (scandesc->irel != NULL);
1662 stale = false;
1664 while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1666 uint32 hashValue;
1667 Index hashIndex;
1668 bool found = false;
1669 dlist_head *bucket;
1672 * See if there's an entry for this tuple already.
1674 ct = NULL;
1675 hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp);
1676 hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1678 bucket = &cache->cc_bucket[hashIndex];
1679 dlist_foreach(iter, bucket)
1681 ct = dlist_container(CatCTup, cache_elem, iter.cur);
1683 if (ct->dead || ct->negative)
1684 continue; /* ignore dead and negative entries */
1686 if (ct->hash_value != hashValue)
1687 continue; /* quickly skip entry if wrong hash val */
1689 if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1690 continue; /* not same tuple */
1693 * Found a match, but can't use it if it belongs to another
1694 * list already
1696 if (ct->c_list)
1697 continue;
1699 found = true;
1700 break; /* A-OK */
1703 if (!found)
1705 /* We didn't find a usable entry, so make a new one */
1706 ct = CatalogCacheCreateEntry(cache, ntp, scandesc, NULL,
1707 hashValue, hashIndex);
1708 /* upon failure, we must start the scan over */
1709 if (ct == NULL)
1712 * Release refcounts on any items we already had. We dare
1713 * not try to free them if they're now unreferenced, since
1714 * an error while doing that would result in the PG_CATCH
1715 * below doing extra refcount decrements. Besides, we'll
1716 * likely re-adopt those items in the next iteration, so
1717 * it's not worth complicating matters to try to get rid
1718 * of them.
1720 foreach(ctlist_item, ctlist)
1722 ct = (CatCTup *) lfirst(ctlist_item);
1723 Assert(ct->c_list == NULL);
1724 Assert(ct->refcount > 0);
1725 ct->refcount--;
1727 /* Reset ctlist in preparation for new try */
1728 ctlist = NIL;
1729 stale = true;
1730 break;
1734 /* Careful here: add entry to ctlist, then bump its refcount */
1735 /* This way leaves state correct if lappend runs out of memory */
1736 ctlist = lappend(ctlist, ct);
1737 ct->refcount++;
1740 systable_endscan(scandesc);
1741 } while (stale);
1743 table_close(relation, AccessShareLock);
1745 /* Now we can build the CatCList entry. */
1746 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1747 nmembers = list_length(ctlist);
1748 cl = (CatCList *)
1749 palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *));
1751 /* Extract key values */
1752 CatCacheCopyKeys(cache->cc_tupdesc, nkeys, cache->cc_keyno,
1753 arguments, cl->keys);
1754 MemoryContextSwitchTo(oldcxt);
1757 * We are now past the last thing that could trigger an elog before we
1758 * have finished building the CatCList and remembering it in the
1759 * resource owner. So it's OK to fall out of the PG_TRY, and indeed
1760 * we'd better do so before we start marking the members as belonging
1761 * to the list.
1764 PG_CATCH();
1766 foreach(ctlist_item, ctlist)
1768 ct = (CatCTup *) lfirst(ctlist_item);
1769 Assert(ct->c_list == NULL);
1770 Assert(ct->refcount > 0);
1771 ct->refcount--;
1772 if (
1773 #ifndef CATCACHE_FORCE_RELEASE
1774 ct->dead &&
1775 #endif
1776 ct->refcount == 0 &&
1777 (ct->c_list == NULL || ct->c_list->refcount == 0))
1778 CatCacheRemoveCTup(cache, ct);
1781 PG_RE_THROW();
1783 PG_END_TRY();
1785 cl->cl_magic = CL_MAGIC;
1786 cl->my_cache = cache;
1787 cl->refcount = 0; /* for the moment */
1788 cl->dead = false;
1789 cl->ordered = ordered;
1790 cl->nkeys = nkeys;
1791 cl->hash_value = lHashValue;
1792 cl->n_members = nmembers;
1794 i = 0;
1795 foreach(ctlist_item, ctlist)
1797 cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
1798 Assert(ct->c_list == NULL);
1799 ct->c_list = cl;
1800 /* release the temporary refcount on the member */
1801 Assert(ct->refcount > 0);
1802 ct->refcount--;
1803 /* mark list dead if any members already dead */
1804 if (ct->dead)
1805 cl->dead = true;
1807 Assert(i == nmembers);
1809 dlist_push_head(&cache->cc_lists, &cl->cache_elem);
1811 /* Finally, bump the list's refcount and return it */
1812 cl->refcount++;
1813 ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1815 CACHE_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
1816 cache->cc_relname, nmembers);
1818 return cl;
1822 * ReleaseCatCacheList
1824 * Decrement the reference count of a catcache list.
1826 void
1827 ReleaseCatCacheList(CatCList *list)
1829 /* Safety checks to ensure we were handed a cache entry */
1830 Assert(list->cl_magic == CL_MAGIC);
1831 Assert(list->refcount > 0);
1832 list->refcount--;
1833 ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner, list);
1835 if (
1836 #ifndef CATCACHE_FORCE_RELEASE
1837 list->dead &&
1838 #endif
1839 list->refcount == 0)
1840 CatCacheRemoveCList(list->my_cache, list);
1845 * CatalogCacheCreateEntry
1846 * Create a new CatCTup entry, copying the given HeapTuple and other
1847 * supplied data into it. The new entry initially has refcount 0.
1849 * To create a normal cache entry, ntp must be the HeapTuple just fetched
1850 * from scandesc, and "arguments" is not used. To create a negative cache
1851 * entry, pass NULL for ntp and scandesc; then "arguments" is the cache
1852 * keys to use. In either case, hashValue/hashIndex are the hash values
1853 * computed from the cache keys.
1855 * Returns NULL if we attempt to detoast the tuple and observe that it
1856 * became stale. (This cannot happen for a negative entry.) Caller must
1857 * retry the tuple lookup in that case.
1859 static CatCTup *
1860 CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, SysScanDesc scandesc,
1861 Datum *arguments,
1862 uint32 hashValue, Index hashIndex)
1864 CatCTup *ct;
1865 HeapTuple dtp;
1866 MemoryContext oldcxt;
1868 if (ntp)
1870 int i;
1873 * The visibility recheck below essentially never fails during our
1874 * regression tests, and there's no easy way to force it to fail for
1875 * testing purposes. To ensure we have test coverage for the retry
1876 * paths in our callers, make debug builds randomly fail about 0.1% of
1877 * the times through this code path, even when there's no toasted
1878 * fields.
1880 #ifdef USE_ASSERT_CHECKING
1881 if (pg_prng_uint32(&pg_global_prng_state) <= (PG_UINT32_MAX / 1000))
1882 return NULL;
1883 #endif
1886 * If there are any out-of-line toasted fields in the tuple, expand
1887 * them in-line. This saves cycles during later use of the catcache
1888 * entry, and also protects us against the possibility of the toast
1889 * tuples being freed before we attempt to fetch them, in case of
1890 * something using a slightly stale catcache entry.
1892 if (HeapTupleHasExternal(ntp))
1894 dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc);
1897 * The tuple could become stale while we are doing toast table
1898 * access (since AcceptInvalidationMessages can run then), so we
1899 * must recheck its visibility afterwards.
1901 if (!systable_recheck_tuple(scandesc, ntp))
1903 heap_freetuple(dtp);
1904 return NULL;
1907 else
1908 dtp = ntp;
1910 /* Allocate memory for CatCTup and the cached tuple in one go */
1911 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1913 ct = (CatCTup *) palloc(sizeof(CatCTup) +
1914 MAXIMUM_ALIGNOF + dtp->t_len);
1915 ct->tuple.t_len = dtp->t_len;
1916 ct->tuple.t_self = dtp->t_self;
1917 ct->tuple.t_tableOid = dtp->t_tableOid;
1918 ct->tuple.t_data = (HeapTupleHeader)
1919 MAXALIGN(((char *) ct) + sizeof(CatCTup));
1920 /* copy tuple contents */
1921 memcpy((char *) ct->tuple.t_data,
1922 (const char *) dtp->t_data,
1923 dtp->t_len);
1924 MemoryContextSwitchTo(oldcxt);
1926 if (dtp != ntp)
1927 heap_freetuple(dtp);
1929 /* extract keys - they'll point into the tuple if not by-value */
1930 for (i = 0; i < cache->cc_nkeys; i++)
1932 Datum atp;
1933 bool isnull;
1935 atp = heap_getattr(&ct->tuple,
1936 cache->cc_keyno[i],
1937 cache->cc_tupdesc,
1938 &isnull);
1939 Assert(!isnull);
1940 ct->keys[i] = atp;
1943 else
1945 /* Set up keys for a negative cache entry */
1946 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1947 ct = (CatCTup *) palloc(sizeof(CatCTup));
1950 * Store keys - they'll point into separately allocated memory if not
1951 * by-value.
1953 CatCacheCopyKeys(cache->cc_tupdesc, cache->cc_nkeys, cache->cc_keyno,
1954 arguments, ct->keys);
1955 MemoryContextSwitchTo(oldcxt);
1959 * Finish initializing the CatCTup header, and add it to the cache's
1960 * linked list and counts.
1962 ct->ct_magic = CT_MAGIC;
1963 ct->my_cache = cache;
1964 ct->c_list = NULL;
1965 ct->refcount = 0; /* for the moment */
1966 ct->dead = false;
1967 ct->negative = (ntp == NULL);
1968 ct->hash_value = hashValue;
1970 dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
1972 cache->cc_ntup++;
1973 CacheHdr->ch_ntup++;
1976 * If the hash table has become too full, enlarge the buckets array. Quite
1977 * arbitrarily, we enlarge when fill factor > 2.
1979 if (cache->cc_ntup > cache->cc_nbuckets * 2)
1980 RehashCatCache(cache);
1982 return ct;
1986 * Helper routine that frees keys stored in the keys array.
1988 static void
1989 CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos, Datum *keys)
1991 int i;
1993 for (i = 0; i < nkeys; i++)
1995 int attnum = attnos[i];
1996 Form_pg_attribute att;
1998 /* system attribute are not supported in caches */
1999 Assert(attnum > 0);
2001 att = TupleDescAttr(tupdesc, attnum - 1);
2003 if (!att->attbyval)
2004 pfree(DatumGetPointer(keys[i]));
2009 * Helper routine that copies the keys in the srckeys array into the dstkeys
2010 * one, guaranteeing that the datums are fully allocated in the current memory
2011 * context.
2013 static void
2014 CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
2015 Datum *srckeys, Datum *dstkeys)
2017 int i;
2020 * XXX: memory and lookup performance could possibly be improved by
2021 * storing all keys in one allocation.
2024 for (i = 0; i < nkeys; i++)
2026 int attnum = attnos[i];
2027 Form_pg_attribute att = TupleDescAttr(tupdesc, attnum - 1);
2028 Datum src = srckeys[i];
2029 NameData srcname;
2032 * Must be careful in case the caller passed a C string where a NAME
2033 * is wanted: convert the given argument to a correctly padded NAME.
2034 * Otherwise the memcpy() done by datumCopy() could fall off the end
2035 * of memory.
2037 if (att->atttypid == NAMEOID)
2039 namestrcpy(&srcname, DatumGetCString(src));
2040 src = NameGetDatum(&srcname);
2043 dstkeys[i] = datumCopy(src,
2044 att->attbyval,
2045 att->attlen);
2050 * PrepareToInvalidateCacheTuple()
2052 * This is part of a rather subtle chain of events, so pay attention:
2054 * When a tuple is inserted or deleted, it cannot be flushed from the
2055 * catcaches immediately, for reasons explained at the top of cache/inval.c.
2056 * Instead we have to add entry(s) for the tuple to a list of pending tuple
2057 * invalidations that will be done at the end of the command or transaction.
2059 * The lists of tuples that need to be flushed are kept by inval.c. This
2060 * routine is a helper routine for inval.c. Given a tuple belonging to
2061 * the specified relation, find all catcaches it could be in, compute the
2062 * correct hash value for each such catcache, and call the specified
2063 * function to record the cache id and hash value in inval.c's lists.
2064 * SysCacheInvalidate will be called later, if appropriate,
2065 * using the recorded information.
2067 * For an insert or delete, tuple is the target tuple and newtuple is NULL.
2068 * For an update, we are called just once, with tuple being the old tuple
2069 * version and newtuple the new version. We should make two list entries
2070 * if the tuple's hash value changed, but only one if it didn't.
2072 * Note that it is irrelevant whether the given tuple is actually loaded
2073 * into the catcache at the moment. Even if it's not there now, it might
2074 * be by the end of the command, or there might be a matching negative entry
2075 * to flush --- or other backends' caches might have such entries --- so
2076 * we have to make list entries to flush it later.
2078 * Also note that it's not an error if there are no catcaches for the
2079 * specified relation. inval.c doesn't know exactly which rels have
2080 * catcaches --- it will call this routine for any tuple that's in a
2081 * system relation.
2083 void
2084 PrepareToInvalidateCacheTuple(Relation relation,
2085 HeapTuple tuple,
2086 HeapTuple newtuple,
2087 void (*function) (int, uint32, Oid))
2089 slist_iter iter;
2090 Oid reloid;
2092 CACHE_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
2095 * sanity checks
2097 Assert(RelationIsValid(relation));
2098 Assert(HeapTupleIsValid(tuple));
2099 Assert(PointerIsValid(function));
2100 Assert(CacheHdr != NULL);
2102 reloid = RelationGetRelid(relation);
2104 /* ----------------
2105 * for each cache
2106 * if the cache contains tuples from the specified relation
2107 * compute the tuple's hash value(s) in this cache,
2108 * and call the passed function to register the information.
2109 * ----------------
2112 slist_foreach(iter, &CacheHdr->ch_caches)
2114 CatCache *ccp = slist_container(CatCache, cc_next, iter.cur);
2115 uint32 hashvalue;
2116 Oid dbid;
2118 if (ccp->cc_reloid != reloid)
2119 continue;
2121 /* Just in case cache hasn't finished initialization yet... */
2122 if (ccp->cc_tupdesc == NULL)
2123 CatalogCacheInitializeCache(ccp);
2125 hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
2126 dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
2128 (*function) (ccp->id, hashvalue, dbid);
2130 if (newtuple)
2132 uint32 newhashvalue;
2134 newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple);
2136 if (newhashvalue != hashvalue)
2137 (*function) (ccp->id, newhashvalue, dbid);
2144 * Subroutines for warning about reference leaks. These are exported so
2145 * that resowner.c can call them.
2147 void
2148 PrintCatCacheLeakWarning(HeapTuple tuple)
2150 CatCTup *ct = (CatCTup *) (((char *) tuple) -
2151 offsetof(CatCTup, tuple));
2153 /* Safety check to ensure we were handed a cache entry */
2154 Assert(ct->ct_magic == CT_MAGIC);
2156 elog(WARNING, "cache reference leak: cache %s (%d), tuple %u/%u has count %d",
2157 ct->my_cache->cc_relname, ct->my_cache->id,
2158 ItemPointerGetBlockNumber(&(tuple->t_self)),
2159 ItemPointerGetOffsetNumber(&(tuple->t_self)),
2160 ct->refcount);
2163 void
2164 PrintCatCacheListLeakWarning(CatCList *list)
2166 elog(WARNING, "cache reference leak: cache %s (%d), list %p has count %d",
2167 list->my_cache->cc_relname, list->my_cache->id,
2168 list, list->refcount);