Header And Logo

PostgreSQL
| The world's most advanced open source database.

catcache.c

Go to the documentation of this file.
00001 /*-------------------------------------------------------------------------
00002  *
00003  * catcache.c
00004  *    System catalog cache for tuples matching a key.
00005  *
00006  * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
00007  * Portions Copyright (c) 1994, Regents of the University of California
00008  *
00009  *
00010  * IDENTIFICATION
00011  *    src/backend/utils/cache/catcache.c
00012  *
00013  *-------------------------------------------------------------------------
00014  */
00015 #include "postgres.h"
00016 
00017 #include "access/genam.h"
00018 #include "access/hash.h"
00019 #include "access/heapam.h"
00020 #include "access/relscan.h"
00021 #include "access/sysattr.h"
00022 #include "access/tuptoaster.h"
00023 #include "access/valid.h"
00024 #include "catalog/pg_operator.h"
00025 #include "catalog/pg_type.h"
00026 #include "miscadmin.h"
00027 #ifdef CATCACHE_STATS
00028 #include "storage/ipc.h"        /* for on_proc_exit */
00029 #endif
00030 #include "storage/lmgr.h"
00031 #include "utils/builtins.h"
00032 #include "utils/fmgroids.h"
00033 #include "utils/inval.h"
00034 #include "utils/memutils.h"
00035 #include "utils/rel.h"
00036 #include "utils/resowner_private.h"
00037 #include "utils/syscache.h"
00038 #include "utils/tqual.h"
00039 
00040 
00041  /* #define CACHEDEBUG */   /* turns DEBUG elogs on */
00042 
00043 /*
00044  * Given a hash value and the size of the hash table, find the bucket
00045  * in which the hash value belongs. Since the hash table must contain
00046  * a power-of-2 number of elements, this is a simple bitmask.
00047  */
00048 #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
00049 
00050 
00051 /*
00052  *      variables, macros and other stuff
00053  */
00054 
00055 #ifdef CACHEDEBUG
00056 #define CACHE1_elog(a,b)                elog(a,b)
00057 #define CACHE2_elog(a,b,c)              elog(a,b,c)
00058 #define CACHE3_elog(a,b,c,d)            elog(a,b,c,d)
00059 #define CACHE4_elog(a,b,c,d,e)          elog(a,b,c,d,e)
00060 #define CACHE5_elog(a,b,c,d,e,f)        elog(a,b,c,d,e,f)
00061 #define CACHE6_elog(a,b,c,d,e,f,g)      elog(a,b,c,d,e,f,g)
00062 #else
00063 #define CACHE1_elog(a,b)
00064 #define CACHE2_elog(a,b,c)
00065 #define CACHE3_elog(a,b,c,d)
00066 #define CACHE4_elog(a,b,c,d,e)
00067 #define CACHE5_elog(a,b,c,d,e,f)
00068 #define CACHE6_elog(a,b,c,d,e,f,g)
00069 #endif
00070 
00071 /* Cache management header --- pointer is NULL until created */
00072 static CatCacheHeader *CacheHdr = NULL;
00073 
00074 
00075 static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
00076                              ScanKey cur_skey);
00077 static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache,
00078                                   HeapTuple tuple);
00079 
00080 #ifdef CATCACHE_STATS
00081 static void CatCachePrintStats(int code, Datum arg);
00082 #endif
00083 static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
00084 static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
00085 static void CatalogCacheInitializeCache(CatCache *cache);
00086 static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
00087                         uint32 hashValue, Index hashIndex,
00088                         bool negative);
00089 static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys);
00090 
00091 
00092 /*
00093  *                  internal support functions
00094  */
00095 
00096 /*
00097  * Look up the hash and equality functions for system types that are used
00098  * as cache key fields.
00099  *
00100  * XXX this should be replaced by catalog lookups,
00101  * but that seems to pose considerable risk of circularity...
00102  */
00103 static void
00104 GetCCHashEqFuncs(Oid keytype, PGFunction *hashfunc, RegProcedure *eqfunc)
00105 {
00106     switch (keytype)
00107     {
00108         case BOOLOID:
00109             *hashfunc = hashchar;
00110 
00111             *eqfunc = F_BOOLEQ;
00112             break;
00113         case CHAROID:
00114             *hashfunc = hashchar;
00115 
00116             *eqfunc = F_CHAREQ;
00117             break;
00118         case NAMEOID:
00119             *hashfunc = hashname;
00120 
00121             *eqfunc = F_NAMEEQ;
00122             break;
00123         case INT2OID:
00124             *hashfunc = hashint2;
00125 
00126             *eqfunc = F_INT2EQ;
00127             break;
00128         case INT2VECTOROID:
00129             *hashfunc = hashint2vector;
00130 
00131             *eqfunc = F_INT2VECTOREQ;
00132             break;
00133         case INT4OID:
00134             *hashfunc = hashint4;
00135 
00136             *eqfunc = F_INT4EQ;
00137             break;
00138         case TEXTOID:
00139             *hashfunc = hashtext;
00140 
00141             *eqfunc = F_TEXTEQ;
00142             break;
00143         case OIDOID:
00144         case REGPROCOID:
00145         case REGPROCEDUREOID:
00146         case REGOPEROID:
00147         case REGOPERATOROID:
00148         case REGCLASSOID:
00149         case REGTYPEOID:
00150         case REGCONFIGOID:
00151         case REGDICTIONARYOID:
00152             *hashfunc = hashoid;
00153 
00154             *eqfunc = F_OIDEQ;
00155             break;
00156         case OIDVECTOROID:
00157             *hashfunc = hashoidvector;
00158 
00159             *eqfunc = F_OIDVECTOREQ;
00160             break;
00161         default:
00162             elog(FATAL, "type %u not supported as catcache key", keytype);
00163             *hashfunc = NULL;   /* keep compiler quiet */
00164 
00165             *eqfunc = InvalidOid;
00166             break;
00167     }
00168 }
00169 
00170 /*
00171  *      CatalogCacheComputeHashValue
00172  *
00173  * Compute the hash value associated with a given set of lookup keys
00174  */
00175 static uint32
00176 CatalogCacheComputeHashValue(CatCache *cache, int nkeys, ScanKey cur_skey)
00177 {
00178     uint32      hashValue = 0;
00179     uint32      oneHash;
00180 
00181     CACHE4_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
00182                 cache->cc_relname,
00183                 nkeys,
00184                 cache);
00185 
00186     switch (nkeys)
00187     {
00188         case 4:
00189             oneHash =
00190                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[3],
00191                                                    cur_skey[3].sk_argument));
00192             hashValue ^= oneHash << 24;
00193             hashValue ^= oneHash >> 8;
00194             /* FALLTHROUGH */
00195         case 3:
00196             oneHash =
00197                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[2],
00198                                                    cur_skey[2].sk_argument));
00199             hashValue ^= oneHash << 16;
00200             hashValue ^= oneHash >> 16;
00201             /* FALLTHROUGH */
00202         case 2:
00203             oneHash =
00204                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[1],
00205                                                    cur_skey[1].sk_argument));
00206             hashValue ^= oneHash << 8;
00207             hashValue ^= oneHash >> 24;
00208             /* FALLTHROUGH */
00209         case 1:
00210             oneHash =
00211                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[0],
00212                                                    cur_skey[0].sk_argument));
00213             hashValue ^= oneHash;
00214             break;
00215         default:
00216             elog(FATAL, "wrong number of hash keys: %d", nkeys);
00217             break;
00218     }
00219 
00220     return hashValue;
00221 }
00222 
00223 /*
00224  *      CatalogCacheComputeTupleHashValue
00225  *
00226  * Compute the hash value associated with a given tuple to be cached
00227  */
00228 static uint32
00229 CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple)
00230 {
00231     ScanKeyData cur_skey[CATCACHE_MAXKEYS];
00232     bool        isNull = false;
00233 
00234     /* Copy pre-initialized overhead data for scankey */
00235     memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
00236 
00237     /* Now extract key fields from tuple, insert into scankey */
00238     switch (cache->cc_nkeys)
00239     {
00240         case 4:
00241             cur_skey[3].sk_argument =
00242                 (cache->cc_key[3] == ObjectIdAttributeNumber)
00243                 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
00244                 : fastgetattr(tuple,
00245                               cache->cc_key[3],
00246                               cache->cc_tupdesc,
00247                               &isNull);
00248             Assert(!isNull);
00249             /* FALLTHROUGH */
00250         case 3:
00251             cur_skey[2].sk_argument =
00252                 (cache->cc_key[2] == ObjectIdAttributeNumber)
00253                 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
00254                 : fastgetattr(tuple,
00255                               cache->cc_key[2],
00256                               cache->cc_tupdesc,
00257                               &isNull);
00258             Assert(!isNull);
00259             /* FALLTHROUGH */
00260         case 2:
00261             cur_skey[1].sk_argument =
00262                 (cache->cc_key[1] == ObjectIdAttributeNumber)
00263                 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
00264                 : fastgetattr(tuple,
00265                               cache->cc_key[1],
00266                               cache->cc_tupdesc,
00267                               &isNull);
00268             Assert(!isNull);
00269             /* FALLTHROUGH */
00270         case 1:
00271             cur_skey[0].sk_argument =
00272                 (cache->cc_key[0] == ObjectIdAttributeNumber)
00273                 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
00274                 : fastgetattr(tuple,
00275                               cache->cc_key[0],
00276                               cache->cc_tupdesc,
00277                               &isNull);
00278             Assert(!isNull);
00279             break;
00280         default:
00281             elog(FATAL, "wrong number of hash keys: %d", cache->cc_nkeys);
00282             break;
00283     }
00284 
00285     return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
00286 }
00287 
00288 
00289 #ifdef CATCACHE_STATS
00290 
00291 static void
00292 CatCachePrintStats(int code, Datum arg)
00293 {
00294     slist_iter  iter;
00295     long        cc_searches = 0;
00296     long        cc_hits = 0;
00297     long        cc_neg_hits = 0;
00298     long        cc_newloads = 0;
00299     long        cc_invals = 0;
00300     long        cc_lsearches = 0;
00301     long        cc_lhits = 0;
00302 
00303     slist_foreach(iter, &CacheHdr->ch_caches)
00304     {
00305         CatCache   *cache = slist_container(CatCache, cc_next, iter.cur);
00306 
00307         if (cache->cc_ntup == 0 && cache->cc_searches == 0)
00308             continue;           /* don't print unused caches */
00309         elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
00310              cache->cc_relname,
00311              cache->cc_indexoid,
00312              cache->cc_ntup,
00313              cache->cc_searches,
00314              cache->cc_hits,
00315              cache->cc_neg_hits,
00316              cache->cc_hits + cache->cc_neg_hits,
00317              cache->cc_newloads,
00318              cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
00319              cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
00320              cache->cc_invals,
00321              cache->cc_lsearches,
00322              cache->cc_lhits);
00323         cc_searches += cache->cc_searches;
00324         cc_hits += cache->cc_hits;
00325         cc_neg_hits += cache->cc_neg_hits;
00326         cc_newloads += cache->cc_newloads;
00327         cc_invals += cache->cc_invals;
00328         cc_lsearches += cache->cc_lsearches;
00329         cc_lhits += cache->cc_lhits;
00330     }
00331     elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
00332          CacheHdr->ch_ntup,
00333          cc_searches,
00334          cc_hits,
00335          cc_neg_hits,
00336          cc_hits + cc_neg_hits,
00337          cc_newloads,
00338          cc_searches - cc_hits - cc_neg_hits - cc_newloads,
00339          cc_searches - cc_hits - cc_neg_hits,
00340          cc_invals,
00341          cc_lsearches,
00342          cc_lhits);
00343 }
00344 #endif   /* CATCACHE_STATS */
00345 
00346 
00347 /*
00348  *      CatCacheRemoveCTup
00349  *
00350  * Unlink and delete the given cache entry
00351  *
00352  * NB: if it is a member of a CatCList, the CatCList is deleted too.
00353  * Both the cache entry and the list had better have zero refcount.
00354  */
00355 static void
00356 CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
00357 {
00358     Assert(ct->refcount == 0);
00359     Assert(ct->my_cache == cache);
00360 
00361     if (ct->c_list)
00362     {
00363         /*
00364          * The cleanest way to handle this is to call CatCacheRemoveCList,
00365          * which will recurse back to me, and the recursive call will do the
00366          * work.  Set the "dead" flag to make sure it does recurse.
00367          */
00368         ct->dead = true;
00369         CatCacheRemoveCList(cache, ct->c_list);
00370         return;                 /* nothing left to do */
00371     }
00372 
00373     /* delink from linked list */
00374     dlist_delete(&ct->cache_elem);
00375 
00376     /* free associated tuple data */
00377     if (ct->tuple.t_data != NULL)
00378         pfree(ct->tuple.t_data);
00379     pfree(ct);
00380 
00381     --cache->cc_ntup;
00382     --CacheHdr->ch_ntup;
00383 }
00384 
00385 /*
00386  *      CatCacheRemoveCList
00387  *
00388  * Unlink and delete the given cache list entry
00389  *
00390  * NB: any dead member entries that become unreferenced are deleted too.
00391  */
00392 static void
00393 CatCacheRemoveCList(CatCache *cache, CatCList *cl)
00394 {
00395     int         i;
00396 
00397     Assert(cl->refcount == 0);
00398     Assert(cl->my_cache == cache);
00399 
00400     /* delink from member tuples */
00401     for (i = cl->n_members; --i >= 0;)
00402     {
00403         CatCTup    *ct = cl->members[i];
00404 
00405         Assert(ct->c_list == cl);
00406         ct->c_list = NULL;
00407         /* if the member is dead and now has no references, remove it */
00408         if (
00409 #ifndef CATCACHE_FORCE_RELEASE
00410             ct->dead &&
00411 #endif
00412             ct->refcount == 0)
00413             CatCacheRemoveCTup(cache, ct);
00414     }
00415 
00416     /* delink from linked list */
00417     dlist_delete(&cl->cache_elem);
00418 
00419     /* free associated tuple data */
00420     if (cl->tuple.t_data != NULL)
00421         pfree(cl->tuple.t_data);
00422     pfree(cl);
00423 }
00424 
00425 
00426 /*
00427  *  CatalogCacheIdInvalidate
00428  *
00429  *  Invalidate entries in the specified cache, given a hash value.
00430  *
00431  *  We delete cache entries that match the hash value, whether positive
00432  *  or negative.  We don't care whether the invalidation is the result
00433  *  of a tuple insertion or a deletion.
00434  *
00435  *  We used to try to match positive cache entries by TID, but that is
00436  *  unsafe after a VACUUM FULL on a system catalog: an inval event could
00437  *  be queued before VACUUM FULL, and then processed afterwards, when the
00438  *  target tuple that has to be invalidated has a different TID than it
00439  *  did when the event was created.  So now we just compare hash values and
00440  *  accept the small risk of unnecessary invalidations due to false matches.
00441  *
00442  *  This routine is only quasi-public: it should only be used by inval.c.
00443  */
00444 void
00445 CatalogCacheIdInvalidate(int cacheId, uint32 hashValue)
00446 {
00447     slist_iter cache_iter;
00448 
00449     CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: called");
00450 
00451     /*
00452      * inspect caches to find the proper cache
00453      */
00454     slist_foreach(cache_iter, &CacheHdr->ch_caches)
00455     {
00456         CatCache   *ccp = slist_container(CatCache, cc_next, cache_iter.cur);
00457         Index       hashIndex;
00458         dlist_mutable_iter iter;
00459 
00460         if (cacheId != ccp->id)
00461             continue;
00462 
00463         /*
00464          * We don't bother to check whether the cache has finished
00465          * initialization yet; if not, there will be no entries in it so no
00466          * problem.
00467          */
00468 
00469         /*
00470          * Invalidate *all* CatCLists in this cache; it's too hard to tell
00471          * which searches might still be correct, so just zap 'em all.
00472          */
00473         dlist_foreach_modify(iter, &ccp->cc_lists)
00474         {
00475             CatCList   *cl = dlist_container(CatCList, cache_elem, iter.cur);
00476 
00477             if (cl->refcount > 0)
00478                 cl->dead = true;
00479             else
00480                 CatCacheRemoveCList(ccp, cl);
00481         }
00482 
00483         /*
00484          * inspect the proper hash bucket for tuple matches
00485          */
00486         hashIndex = HASH_INDEX(hashValue, ccp->cc_nbuckets);
00487         dlist_foreach_modify(iter, &ccp->cc_bucket[hashIndex])
00488         {
00489             CatCTup    *ct = dlist_container(CatCTup, cache_elem, iter.cur);
00490 
00491             if (hashValue == ct->hash_value)
00492             {
00493                 if (ct->refcount > 0 ||
00494                     (ct->c_list && ct->c_list->refcount > 0))
00495                 {
00496                     ct->dead = true;
00497                     /* list, if any, was marked dead above */
00498                     Assert(ct->c_list == NULL || ct->c_list->dead);
00499                 }
00500                 else
00501                     CatCacheRemoveCTup(ccp, ct);
00502                 CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: invalidated");
00503 #ifdef CATCACHE_STATS
00504                 ccp->cc_invals++;
00505 #endif
00506                 /* could be multiple matches, so keep looking! */
00507             }
00508         }
00509         break;                  /* need only search this one cache */
00510     }
00511 }
00512 
00513 /* ----------------------------------------------------------------
00514  *                     public functions
00515  * ----------------------------------------------------------------
00516  */
00517 
00518 
00519 /*
00520  * Standard routine for creating cache context if it doesn't exist yet
00521  *
00522  * There are a lot of places (probably far more than necessary) that check
00523  * whether CacheMemoryContext exists yet and want to create it if not.
00524  * We centralize knowledge of exactly how to create it here.
00525  */
00526 void
00527 CreateCacheMemoryContext(void)
00528 {
00529     /*
00530      * Purely for paranoia, check that context doesn't exist; caller probably
00531      * did so already.
00532      */
00533     if (!CacheMemoryContext)
00534         CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
00535                                                    "CacheMemoryContext",
00536                                                    ALLOCSET_DEFAULT_MINSIZE,
00537                                                    ALLOCSET_DEFAULT_INITSIZE,
00538                                                    ALLOCSET_DEFAULT_MAXSIZE);
00539 }
00540 
00541 
00542 /*
00543  *      AtEOXact_CatCache
00544  *
00545  * Clean up catcaches at end of main transaction (either commit or abort)
00546  *
00547  * As of PostgreSQL 8.1, catcache pins should get released by the
00548  * ResourceOwner mechanism.  This routine is just a debugging
00549  * cross-check that no pins remain.
00550  */
00551 void
00552 AtEOXact_CatCache(bool isCommit)
00553 {
00554 #ifdef USE_ASSERT_CHECKING
00555     if (assert_enabled)
00556     {
00557         slist_iter  cache_iter;
00558 
00559         slist_foreach(cache_iter, &CacheHdr->ch_caches)
00560         {
00561             CatCache   *ccp = slist_container(CatCache, cc_next, cache_iter.cur);
00562             dlist_iter  iter;
00563             int         i;
00564 
00565             /* Check CatCLists */
00566             dlist_foreach(iter, &ccp->cc_lists)
00567             {
00568                 CatCList   *cl = dlist_container(CatCList, cache_elem, iter.cur);
00569 
00570                 Assert(cl->cl_magic == CL_MAGIC);
00571                 Assert(cl->refcount == 0);
00572                 Assert(!cl->dead);
00573             }
00574 
00575             /* Check individual tuples */
00576             for (i = 0; i < ccp->cc_nbuckets; i++)
00577             {
00578                 dlist_head *bucket = &ccp->cc_bucket[i];
00579 
00580                 dlist_foreach(iter, bucket)
00581                 {
00582                     CatCTup    *ct = dlist_container(CatCTup, cache_elem, iter.cur);
00583 
00584                     Assert(ct->ct_magic == CT_MAGIC);
00585                     Assert(ct->refcount == 0);
00586                     Assert(!ct->dead);
00587                 }
00588             }
00589         }
00590     }
00591 #endif
00592 }
00593 
00594 /*
00595  *      ResetCatalogCache
00596  *
00597  * Reset one catalog cache to empty.
00598  *
00599  * This is not very efficient if the target cache is nearly empty.
00600  * However, it shouldn't need to be efficient; we don't invoke it often.
00601  */
00602 static void
00603 ResetCatalogCache(CatCache *cache)
00604 {
00605     dlist_mutable_iter iter;
00606     int         i;
00607 
00608     /* Remove each list in this cache, or at least mark it dead */
00609     dlist_foreach_modify(iter, &cache->cc_lists)
00610     {
00611         CatCList   *cl = dlist_container(CatCList, cache_elem, iter.cur);
00612 
00613         if (cl->refcount > 0)
00614             cl->dead = true;
00615         else
00616             CatCacheRemoveCList(cache, cl);
00617     }
00618 
00619     /* Remove each tuple in this cache, or at least mark it dead */
00620     for (i = 0; i < cache->cc_nbuckets; i++)
00621     {
00622         dlist_head *bucket = &cache->cc_bucket[i];
00623 
00624         dlist_foreach_modify(iter, bucket)
00625         {
00626             CatCTup    *ct = dlist_container(CatCTup, cache_elem, iter.cur);
00627 
00628             if (ct->refcount > 0 ||
00629                 (ct->c_list && ct->c_list->refcount > 0))
00630             {
00631                 ct->dead = true;
00632                 /* list, if any, was marked dead above */
00633                 Assert(ct->c_list == NULL || ct->c_list->dead);
00634             }
00635             else
00636                 CatCacheRemoveCTup(cache, ct);
00637 #ifdef CATCACHE_STATS
00638             cache->cc_invals++;
00639 #endif
00640         }
00641     }
00642 }
00643 
00644 /*
00645  *      ResetCatalogCaches
00646  *
00647  * Reset all caches when a shared cache inval event forces it
00648  */
00649 void
00650 ResetCatalogCaches(void)
00651 {
00652     slist_iter    iter;
00653 
00654     CACHE1_elog(DEBUG2, "ResetCatalogCaches called");
00655 
00656     slist_foreach(iter, &CacheHdr->ch_caches)
00657     {
00658         CatCache   *cache = slist_container(CatCache, cc_next, iter.cur);
00659 
00660         ResetCatalogCache(cache);
00661     }
00662 
00663     CACHE1_elog(DEBUG2, "end of ResetCatalogCaches call");
00664 }
00665 
00666 /*
00667  *      CatalogCacheFlushCatalog
00668  *
00669  *  Flush all catcache entries that came from the specified system catalog.
00670  *  This is needed after VACUUM FULL/CLUSTER on the catalog, since the
00671  *  tuples very likely now have different TIDs than before.  (At one point
00672  *  we also tried to force re-execution of CatalogCacheInitializeCache for
00673  *  the cache(s) on that catalog.  This is a bad idea since it leads to all
00674  *  kinds of trouble if a cache flush occurs while loading cache entries.
00675  *  We now avoid the need to do it by copying cc_tupdesc out of the relcache,
00676  *  rather than relying on the relcache to keep a tupdesc for us.  Of course
00677  *  this assumes the tupdesc of a cachable system table will not change...)
00678  */
00679 void
00680 CatalogCacheFlushCatalog(Oid catId)
00681 {
00682     slist_iter  iter;
00683 
00684     CACHE2_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
00685 
00686     slist_foreach(iter, &CacheHdr->ch_caches)
00687     {
00688         CatCache   *cache = slist_container(CatCache, cc_next, iter.cur);
00689 
00690         /* Does this cache store tuples of the target catalog? */
00691         if (cache->cc_reloid == catId)
00692         {
00693             /* Yes, so flush all its contents */
00694             ResetCatalogCache(cache);
00695 
00696             /* Tell inval.c to call syscache callbacks for this cache */
00697             CallSyscacheCallbacks(cache->id, 0);
00698         }
00699     }
00700 
00701     CACHE1_elog(DEBUG2, "end of CatalogCacheFlushCatalog call");
00702 }
00703 
00704 /*
00705  *      InitCatCache
00706  *
00707  *  This allocates and initializes a cache for a system catalog relation.
00708  *  Actually, the cache is only partially initialized to avoid opening the
00709  *  relation.  The relation will be opened and the rest of the cache
00710  *  structure initialized on the first access.
00711  */
00712 #ifdef CACHEDEBUG
00713 #define InitCatCache_DEBUG2 \
00714 do { \
00715     elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
00716          cp->cc_reloid, cp->cc_indexoid, cp->id, \
00717          cp->cc_nkeys, cp->cc_nbuckets); \
00718 } while(0)
00719 #else
00720 #define InitCatCache_DEBUG2
00721 #endif
00722 
00723 CatCache *
00724 InitCatCache(int id,
00725              Oid reloid,
00726              Oid indexoid,
00727              int nkeys,
00728              const int *key,
00729              int nbuckets)
00730 {
00731     CatCache   *cp;
00732     MemoryContext oldcxt;
00733     int         i;
00734 
00735     /*
00736      * nbuckets is the number of hash buckets to use in this catcache.
00737      * Currently we just use a hard-wired estimate of an appropriate size for
00738      * each cache; maybe later make them dynamically resizable?
00739      *
00740      * nbuckets must be a power of two.  We check this via Assert rather than
00741      * a full runtime check because the values will be coming from constant
00742      * tables.
00743      *
00744      * If you're confused by the power-of-two check, see comments in
00745      * bitmapset.c for an explanation.
00746      */
00747     Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
00748 
00749     /*
00750      * first switch to the cache context so our allocations do not vanish at
00751      * the end of a transaction
00752      */
00753     if (!CacheMemoryContext)
00754         CreateCacheMemoryContext();
00755 
00756     oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
00757 
00758     /*
00759      * if first time through, initialize the cache group header
00760      */
00761     if (CacheHdr == NULL)
00762     {
00763         CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
00764         slist_init(&CacheHdr->ch_caches);
00765         CacheHdr->ch_ntup = 0;
00766 #ifdef CATCACHE_STATS
00767         /* set up to dump stats at backend exit */
00768         on_proc_exit(CatCachePrintStats, 0);
00769 #endif
00770     }
00771 
00772     /*
00773      * allocate a new cache structure
00774      *
00775      * Note: we rely on zeroing to initialize all the dlist headers correctly
00776      */
00777     cp = (CatCache *) palloc0(sizeof(CatCache) + nbuckets * sizeof(dlist_head));
00778 
00779     /*
00780      * initialize the cache's relation information for the relation
00781      * corresponding to this cache, and initialize some of the new cache's
00782      * other internal fields.  But don't open the relation yet.
00783      */
00784     cp->id = id;
00785     cp->cc_relname = "(not known yet)";
00786     cp->cc_reloid = reloid;
00787     cp->cc_indexoid = indexoid;
00788     cp->cc_relisshared = false; /* temporary */
00789     cp->cc_tupdesc = (TupleDesc) NULL;
00790     cp->cc_ntup = 0;
00791     cp->cc_nbuckets = nbuckets;
00792     cp->cc_nkeys = nkeys;
00793     for (i = 0; i < nkeys; ++i)
00794         cp->cc_key[i] = key[i];
00795 
00796     /*
00797      * new cache is initialized as far as we can go for now. print some
00798      * debugging information, if appropriate.
00799      */
00800     InitCatCache_DEBUG2;
00801 
00802     /*
00803      * add completed cache to top of group header's list
00804      */
00805     slist_push_head(&CacheHdr->ch_caches, &cp->cc_next);
00806 
00807     /*
00808      * back to the old context before we return...
00809      */
00810     MemoryContextSwitchTo(oldcxt);
00811 
00812     return cp;
00813 }
00814 
00815 /*
00816  *      CatalogCacheInitializeCache
00817  *
00818  * This function does final initialization of a catcache: obtain the tuple
00819  * descriptor and set up the hash and equality function links.  We assume
00820  * that the relcache entry can be opened at this point!
00821  */
00822 #ifdef CACHEDEBUG
00823 #define CatalogCacheInitializeCache_DEBUG1 \
00824     elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
00825          cache->cc_reloid)
00826 
00827 #define CatalogCacheInitializeCache_DEBUG2 \
00828 do { \
00829         if (cache->cc_key[i] > 0) { \
00830             elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
00831                 i+1, cache->cc_nkeys, cache->cc_key[i], \
00832                  tupdesc->attrs[cache->cc_key[i] - 1]->atttypid); \
00833         } else { \
00834             elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
00835                 i+1, cache->cc_nkeys, cache->cc_key[i]); \
00836         } \
00837 } while(0)
00838 #else
00839 #define CatalogCacheInitializeCache_DEBUG1
00840 #define CatalogCacheInitializeCache_DEBUG2
00841 #endif
00842 
00843 static void
00844 CatalogCacheInitializeCache(CatCache *cache)
00845 {
00846     Relation    relation;
00847     MemoryContext oldcxt;
00848     TupleDesc   tupdesc;
00849     int         i;
00850 
00851     CatalogCacheInitializeCache_DEBUG1;
00852 
00853     relation = heap_open(cache->cc_reloid, AccessShareLock);
00854 
00855     /*
00856      * switch to the cache context so our allocations do not vanish at the end
00857      * of a transaction
00858      */
00859     Assert(CacheMemoryContext != NULL);
00860 
00861     oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
00862 
00863     /*
00864      * copy the relcache's tuple descriptor to permanent cache storage
00865      */
00866     tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
00867 
00868     /*
00869      * save the relation's name and relisshared flag, too (cc_relname is used
00870      * only for debugging purposes)
00871      */
00872     cache->cc_relname = pstrdup(RelationGetRelationName(relation));
00873     cache->cc_relisshared = RelationGetForm(relation)->relisshared;
00874 
00875     /*
00876      * return to the caller's memory context and close the rel
00877      */
00878     MemoryContextSwitchTo(oldcxt);
00879 
00880     heap_close(relation, AccessShareLock);
00881 
00882     CACHE3_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
00883                 cache->cc_relname, cache->cc_nkeys);
00884 
00885     /*
00886      * initialize cache's key information
00887      */
00888     for (i = 0; i < cache->cc_nkeys; ++i)
00889     {
00890         Oid         keytype;
00891         RegProcedure eqfunc;
00892 
00893         CatalogCacheInitializeCache_DEBUG2;
00894 
00895         if (cache->cc_key[i] > 0)
00896             keytype = tupdesc->attrs[cache->cc_key[i] - 1]->atttypid;
00897         else
00898         {
00899             if (cache->cc_key[i] != ObjectIdAttributeNumber)
00900                 elog(FATAL, "only sys attr supported in caches is OID");
00901             keytype = OIDOID;
00902         }
00903 
00904         GetCCHashEqFuncs(keytype,
00905                          &cache->cc_hashfunc[i],
00906                          &eqfunc);
00907 
00908         cache->cc_isname[i] = (keytype == NAMEOID);
00909 
00910         /*
00911          * Do equality-function lookup (we assume this won't need a catalog
00912          * lookup for any supported type)
00913          */
00914         fmgr_info_cxt(eqfunc,
00915                       &cache->cc_skey[i].sk_func,
00916                       CacheMemoryContext);
00917 
00918         /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
00919         cache->cc_skey[i].sk_attno = cache->cc_key[i];
00920 
00921         /* Fill in sk_strategy as well --- always standard equality */
00922         cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber;
00923         cache->cc_skey[i].sk_subtype = InvalidOid;
00924         /* Currently, there are no catcaches on collation-aware data types */
00925         cache->cc_skey[i].sk_collation = InvalidOid;
00926 
00927         CACHE4_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",
00928                     cache->cc_relname,
00929                     i,
00930                     cache);
00931     }
00932 
00933     /*
00934      * mark this cache fully initialized
00935      */
00936     cache->cc_tupdesc = tupdesc;
00937 }
00938 
00939 /*
00940  * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
00941  *
00942  * One reason to call this routine is to ensure that the relcache has
00943  * created entries for all the catalogs and indexes referenced by catcaches.
00944  * Therefore, provide an option to open the index as well as fixing the
00945  * cache itself.  An exception is the indexes on pg_am, which we don't use
00946  * (cf. IndexScanOK).
00947  */
00948 void
00949 InitCatCachePhase2(CatCache *cache, bool touch_index)
00950 {
00951     if (cache->cc_tupdesc == NULL)
00952         CatalogCacheInitializeCache(cache);
00953 
00954     if (touch_index &&
00955         cache->id != AMOID &&
00956         cache->id != AMNAME)
00957     {
00958         Relation    idesc;
00959 
00960         /*
00961          * We must lock the underlying catalog before opening the index to
00962          * avoid deadlock, since index_open could possibly result in reading
00963          * this same catalog, and if anyone else is exclusive-locking this
00964          * catalog and index they'll be doing it in that order.
00965          */
00966         LockRelationOid(cache->cc_reloid, AccessShareLock);
00967         idesc = index_open(cache->cc_indexoid, AccessShareLock);
00968         index_close(idesc, AccessShareLock);
00969         UnlockRelationOid(cache->cc_reloid, AccessShareLock);
00970     }
00971 }
00972 
00973 
00974 /*
00975  *      IndexScanOK
00976  *
00977  *      This function checks for tuples that will be fetched by
00978  *      IndexSupportInitialize() during relcache initialization for
00979  *      certain system indexes that support critical syscaches.
00980  *      We can't use an indexscan to fetch these, else we'll get into
00981  *      infinite recursion.  A plain heap scan will work, however.
00982  *      Once we have completed relcache initialization (signaled by
00983  *      criticalRelcachesBuilt), we don't have to worry anymore.
00984  *
00985  *      Similarly, during backend startup we have to be able to use the
00986  *      pg_authid and pg_auth_members syscaches for authentication even if
00987  *      we don't yet have relcache entries for those catalogs' indexes.
00988  */
00989 static bool
00990 IndexScanOK(CatCache *cache, ScanKey cur_skey)
00991 {
00992     switch (cache->id)
00993     {
00994         case INDEXRELID:
00995 
00996             /*
00997              * Rather than tracking exactly which indexes have to be loaded
00998              * before we can use indexscans (which changes from time to time),
00999              * just force all pg_index searches to be heap scans until we've
01000              * built the critical relcaches.
01001              */
01002             if (!criticalRelcachesBuilt)
01003                 return false;
01004             break;
01005 
01006         case AMOID:
01007         case AMNAME:
01008 
01009             /*
01010              * Always do heap scans in pg_am, because it's so small there's
01011              * not much point in an indexscan anyway.  We *must* do this when
01012              * initially building critical relcache entries, but we might as
01013              * well just always do it.
01014              */
01015             return false;
01016 
01017         case AUTHNAME:
01018         case AUTHOID:
01019         case AUTHMEMMEMROLE:
01020 
01021             /*
01022              * Protect authentication lookups occurring before relcache has
01023              * collected entries for shared indexes.
01024              */
01025             if (!criticalSharedRelcachesBuilt)
01026                 return false;
01027             break;
01028 
01029         default:
01030             break;
01031     }
01032 
01033     /* Normal case, allow index scan */
01034     return true;
01035 }
01036 
01037 /*
01038  *  SearchCatCache
01039  *
01040  *      This call searches a system cache for a tuple, opening the relation
01041  *      if necessary (on the first access to a particular cache).
01042  *
01043  *      The result is NULL if not found, or a pointer to a HeapTuple in
01044  *      the cache.  The caller must not modify the tuple, and must call
01045  *      ReleaseCatCache() when done with it.
01046  *
01047  * The search key values should be expressed as Datums of the key columns'
01048  * datatype(s).  (Pass zeroes for any unused parameters.)  As a special
01049  * exception, the passed-in key for a NAME column can be just a C string;
01050  * the caller need not go to the trouble of converting it to a fully
01051  * null-padded NAME.
01052  */
01053 HeapTuple
01054 SearchCatCache(CatCache *cache,
01055                Datum v1,
01056                Datum v2,
01057                Datum v3,
01058                Datum v4)
01059 {
01060     ScanKeyData cur_skey[CATCACHE_MAXKEYS];
01061     uint32      hashValue;
01062     Index       hashIndex;
01063     dlist_iter  iter;
01064     dlist_head *bucket;
01065     CatCTup    *ct;
01066     Relation    relation;
01067     SysScanDesc scandesc;
01068     HeapTuple   ntp;
01069 
01070     /*
01071      * one-time startup overhead for each cache
01072      */
01073     if (cache->cc_tupdesc == NULL)
01074         CatalogCacheInitializeCache(cache);
01075 
01076 #ifdef CATCACHE_STATS
01077     cache->cc_searches++;
01078 #endif
01079 
01080     /*
01081      * initialize the search key information
01082      */
01083     memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
01084     cur_skey[0].sk_argument = v1;
01085     cur_skey[1].sk_argument = v2;
01086     cur_skey[2].sk_argument = v3;
01087     cur_skey[3].sk_argument = v4;
01088 
01089     /*
01090      * find the hash bucket in which to look for the tuple
01091      */
01092     hashValue = CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
01093     hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
01094 
01095     /*
01096      * scan the hash bucket until we find a match or exhaust our tuples
01097      *
01098      * Note: it's okay to use dlist_foreach here, even though we modify the
01099      * dlist within the loop, because we don't continue the loop afterwards.
01100      */
01101     bucket = &cache->cc_bucket[hashIndex];
01102     dlist_foreach(iter, bucket)
01103     {
01104         bool        res;
01105 
01106         ct = dlist_container(CatCTup, cache_elem, iter.cur);
01107 
01108         if (ct->dead)
01109             continue;           /* ignore dead entries */
01110 
01111         if (ct->hash_value != hashValue)
01112             continue;           /* quickly skip entry if wrong hash val */
01113 
01114         /*
01115          * see if the cached tuple matches our key.
01116          */
01117         HeapKeyTest(&ct->tuple,
01118                     cache->cc_tupdesc,
01119                     cache->cc_nkeys,
01120                     cur_skey,
01121                     res);
01122         if (!res)
01123             continue;
01124 
01125         /*
01126          * We found a match in the cache.  Move it to the front of the list
01127          * for its hashbucket, in order to speed subsequent searches.  (The
01128          * most frequently accessed elements in any hashbucket will tend to be
01129          * near the front of the hashbucket's list.)
01130          */
01131         dlist_move_head(bucket, &ct->cache_elem);
01132 
01133         /*
01134          * If it's a positive entry, bump its refcount and return it. If it's
01135          * negative, we can report failure to the caller.
01136          */
01137         if (!ct->negative)
01138         {
01139             ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
01140             ct->refcount++;
01141             ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
01142 
01143             CACHE3_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
01144                         cache->cc_relname, hashIndex);
01145 
01146 #ifdef CATCACHE_STATS
01147             cache->cc_hits++;
01148 #endif
01149 
01150             return &ct->tuple;
01151         }
01152         else
01153         {
01154             CACHE3_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
01155                         cache->cc_relname, hashIndex);
01156 
01157 #ifdef CATCACHE_STATS
01158             cache->cc_neg_hits++;
01159 #endif
01160 
01161             return NULL;
01162         }
01163     }
01164 
01165     /*
01166      * Tuple was not found in cache, so we have to try to retrieve it directly
01167      * from the relation.  If found, we will add it to the cache; if not
01168      * found, we will add a negative cache entry instead.
01169      *
01170      * NOTE: it is possible for recursive cache lookups to occur while reading
01171      * the relation --- for example, due to shared-cache-inval messages being
01172      * processed during heap_open().  This is OK.  It's even possible for one
01173      * of those lookups to find and enter the very same tuple we are trying to
01174      * fetch here.  If that happens, we will enter a second copy of the tuple
01175      * into the cache.  The first copy will never be referenced again, and
01176      * will eventually age out of the cache, so there's no functional problem.
01177      * This case is rare enough that it's not worth expending extra cycles to
01178      * detect.
01179      */
01180     relation = heap_open(cache->cc_reloid, AccessShareLock);
01181 
01182     scandesc = systable_beginscan(relation,
01183                                   cache->cc_indexoid,
01184                                   IndexScanOK(cache, cur_skey),
01185                                   SnapshotNow,
01186                                   cache->cc_nkeys,
01187                                   cur_skey);
01188 
01189     ct = NULL;
01190 
01191     while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
01192     {
01193         ct = CatalogCacheCreateEntry(cache, ntp,
01194                                      hashValue, hashIndex,
01195                                      false);
01196         /* immediately set the refcount to 1 */
01197         ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
01198         ct->refcount++;
01199         ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
01200         break;                  /* assume only one match */
01201     }
01202 
01203     systable_endscan(scandesc);
01204 
01205     heap_close(relation, AccessShareLock);
01206 
01207     /*
01208      * If tuple was not found, we need to build a negative cache entry
01209      * containing a fake tuple.  The fake tuple has the correct key columns,
01210      * but nulls everywhere else.
01211      *
01212      * In bootstrap mode, we don't build negative entries, because the cache
01213      * invalidation mechanism isn't alive and can't clear them if the tuple
01214      * gets created later.  (Bootstrap doesn't do UPDATEs, so it doesn't need
01215      * cache inval for that.)
01216      */
01217     if (ct == NULL)
01218     {
01219         if (IsBootstrapProcessingMode())
01220             return NULL;
01221 
01222         ntp = build_dummy_tuple(cache, cache->cc_nkeys, cur_skey);
01223         ct = CatalogCacheCreateEntry(cache, ntp,
01224                                      hashValue, hashIndex,
01225                                      true);
01226         heap_freetuple(ntp);
01227 
01228         CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
01229                     cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
01230         CACHE3_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
01231                     cache->cc_relname, hashIndex);
01232 
01233         /*
01234          * We are not returning the negative entry to the caller, so leave its
01235          * refcount zero.
01236          */
01237 
01238         return NULL;
01239     }
01240 
01241     CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
01242                 cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
01243     CACHE3_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
01244                 cache->cc_relname, hashIndex);
01245 
01246 #ifdef CATCACHE_STATS
01247     cache->cc_newloads++;
01248 #endif
01249 
01250     return &ct->tuple;
01251 }
01252 
01253 /*
01254  *  ReleaseCatCache
01255  *
01256  *  Decrement the reference count of a catcache entry (releasing the
01257  *  hold grabbed by a successful SearchCatCache).
01258  *
01259  *  NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
01260  *  will be freed as soon as their refcount goes to zero.  In combination
01261  *  with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
01262  *  to catch references to already-released catcache entries.
01263  */
01264 void
01265 ReleaseCatCache(HeapTuple tuple)
01266 {
01267     CatCTup    *ct = (CatCTup *) (((char *) tuple) -
01268                                   offsetof(CatCTup, tuple));
01269 
01270     /* Safety checks to ensure we were handed a cache entry */
01271     Assert(ct->ct_magic == CT_MAGIC);
01272     Assert(ct->refcount > 0);
01273 
01274     ct->refcount--;
01275     ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple);
01276 
01277     if (
01278 #ifndef CATCACHE_FORCE_RELEASE
01279         ct->dead &&
01280 #endif
01281         ct->refcount == 0 &&
01282         (ct->c_list == NULL || ct->c_list->refcount == 0))
01283         CatCacheRemoveCTup(ct->my_cache, ct);
01284 }
01285 
01286 
01287 /*
01288  *  GetCatCacheHashValue
01289  *
01290  *      Compute the hash value for a given set of search keys.
01291  *
01292  * The reason for exposing this as part of the API is that the hash value is
01293  * exposed in cache invalidation operations, so there are places outside the
01294  * catcache code that need to be able to compute the hash values.
01295  */
01296 uint32
01297 GetCatCacheHashValue(CatCache *cache,
01298                      Datum v1,
01299                      Datum v2,
01300                      Datum v3,
01301                      Datum v4)
01302 {
01303     ScanKeyData cur_skey[CATCACHE_MAXKEYS];
01304 
01305     /*
01306      * one-time startup overhead for each cache
01307      */
01308     if (cache->cc_tupdesc == NULL)
01309         CatalogCacheInitializeCache(cache);
01310 
01311     /*
01312      * initialize the search key information
01313      */
01314     memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
01315     cur_skey[0].sk_argument = v1;
01316     cur_skey[1].sk_argument = v2;
01317     cur_skey[2].sk_argument = v3;
01318     cur_skey[3].sk_argument = v4;
01319 
01320     /*
01321      * calculate the hash value
01322      */
01323     return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
01324 }
01325 
01326 
01327 /*
01328  *  SearchCatCacheList
01329  *
01330  *      Generate a list of all tuples matching a partial key (that is,
01331  *      a key specifying just the first K of the cache's N key columns).
01332  *
01333  *      The caller must not modify the list object or the pointed-to tuples,
01334  *      and must call ReleaseCatCacheList() when done with the list.
01335  */
01336 CatCList *
01337 SearchCatCacheList(CatCache *cache,
01338                    int nkeys,
01339                    Datum v1,
01340                    Datum v2,
01341                    Datum v3,
01342                    Datum v4)
01343 {
01344     ScanKeyData cur_skey[CATCACHE_MAXKEYS];
01345     uint32      lHashValue;
01346     dlist_iter  iter;
01347     CatCList   *cl;
01348     CatCTup    *ct;
01349     List       *volatile ctlist;
01350     ListCell   *ctlist_item;
01351     int         nmembers;
01352     bool        ordered;
01353     HeapTuple   ntp;
01354     MemoryContext oldcxt;
01355     int         i;
01356 
01357     /*
01358      * one-time startup overhead for each cache
01359      */
01360     if (cache->cc_tupdesc == NULL)
01361         CatalogCacheInitializeCache(cache);
01362 
01363     Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
01364 
01365 #ifdef CATCACHE_STATS
01366     cache->cc_lsearches++;
01367 #endif
01368 
01369     /*
01370      * initialize the search key information
01371      */
01372     memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
01373     cur_skey[0].sk_argument = v1;
01374     cur_skey[1].sk_argument = v2;
01375     cur_skey[2].sk_argument = v3;
01376     cur_skey[3].sk_argument = v4;
01377 
01378     /*
01379      * compute a hash value of the given keys for faster search.  We don't
01380      * presently divide the CatCList items into buckets, but this still lets
01381      * us skip non-matching items quickly most of the time.
01382      */
01383     lHashValue = CatalogCacheComputeHashValue(cache, nkeys, cur_skey);
01384 
01385     /*
01386      * scan the items until we find a match or exhaust our list
01387      *
01388      * Note: it's okay to use dlist_foreach here, even though we modify the
01389      * dlist within the loop, because we don't continue the loop afterwards.
01390      */
01391     dlist_foreach(iter, &cache->cc_lists)
01392     {
01393         bool        res;
01394 
01395         cl = dlist_container(CatCList, cache_elem, iter.cur);
01396 
01397         if (cl->dead)
01398             continue;           /* ignore dead entries */
01399 
01400         if (cl->hash_value != lHashValue)
01401             continue;           /* quickly skip entry if wrong hash val */
01402 
01403         /*
01404          * see if the cached list matches our key.
01405          */
01406         if (cl->nkeys != nkeys)
01407             continue;
01408         HeapKeyTest(&cl->tuple,
01409                     cache->cc_tupdesc,
01410                     nkeys,
01411                     cur_skey,
01412                     res);
01413         if (!res)
01414             continue;
01415 
01416         /*
01417          * We found a matching list.  Move the list to the front of the
01418          * cache's list-of-lists, to speed subsequent searches.  (We do not
01419          * move the members to the fronts of their hashbucket lists, however,
01420          * since there's no point in that unless they are searched for
01421          * individually.)
01422          */
01423         dlist_move_head(&cache->cc_lists, &cl->cache_elem);
01424 
01425         /* Bump the list's refcount and return it */
01426         ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
01427         cl->refcount++;
01428         ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
01429 
01430         CACHE2_elog(DEBUG2, "SearchCatCacheList(%s): found list",
01431                     cache->cc_relname);
01432 
01433 #ifdef CATCACHE_STATS
01434         cache->cc_lhits++;
01435 #endif
01436 
01437         return cl;
01438     }
01439 
01440     /*
01441      * List was not found in cache, so we have to build it by reading the
01442      * relation.  For each matching tuple found in the relation, use an
01443      * existing cache entry if possible, else build a new one.
01444      *
01445      * We have to bump the member refcounts temporarily to ensure they won't
01446      * get dropped from the cache while loading other members. We use a PG_TRY
01447      * block to ensure we can undo those refcounts if we get an error before
01448      * we finish constructing the CatCList.
01449      */
01450     ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
01451 
01452     ctlist = NIL;
01453 
01454     PG_TRY();
01455     {
01456         Relation    relation;
01457         SysScanDesc scandesc;
01458 
01459         relation = heap_open(cache->cc_reloid, AccessShareLock);
01460 
01461         scandesc = systable_beginscan(relation,
01462                                       cache->cc_indexoid,
01463                                       IndexScanOK(cache, cur_skey),
01464                                       SnapshotNow,
01465                                       nkeys,
01466                                       cur_skey);
01467 
01468         /* The list will be ordered iff we are doing an index scan */
01469         ordered = (scandesc->irel != NULL);
01470 
01471         while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
01472         {
01473             uint32      hashValue;
01474             Index       hashIndex;
01475             bool        found = false;
01476             dlist_head *bucket;
01477 
01478             /*
01479              * See if there's an entry for this tuple already.
01480              */
01481             ct = NULL;
01482             hashValue = CatalogCacheComputeTupleHashValue(cache, ntp);
01483             hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
01484 
01485             bucket = &cache->cc_bucket[hashIndex];
01486             dlist_foreach(iter, bucket)
01487             {
01488                 ct = dlist_container(CatCTup, cache_elem, iter.cur);
01489 
01490                 if (ct->dead || ct->negative)
01491                     continue;   /* ignore dead and negative entries */
01492 
01493                 if (ct->hash_value != hashValue)
01494                     continue;   /* quickly skip entry if wrong hash val */
01495 
01496                 if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
01497                     continue;   /* not same tuple */
01498 
01499                 /*
01500                  * Found a match, but can't use it if it belongs to another
01501                  * list already
01502                  */
01503                 if (ct->c_list)
01504                     continue;
01505 
01506                 found = true;
01507                 break;          /* A-OK */
01508             }
01509 
01510             if (!found)
01511             {
01512                 /* We didn't find a usable entry, so make a new one */
01513                 ct = CatalogCacheCreateEntry(cache, ntp,
01514                                              hashValue, hashIndex,
01515                                              false);
01516             }
01517 
01518             /* Careful here: add entry to ctlist, then bump its refcount */
01519             /* This way leaves state correct if lappend runs out of memory */
01520             ctlist = lappend(ctlist, ct);
01521             ct->refcount++;
01522         }
01523 
01524         systable_endscan(scandesc);
01525 
01526         heap_close(relation, AccessShareLock);
01527 
01528         /*
01529          * Now we can build the CatCList entry.  First we need a dummy tuple
01530          * containing the key values...
01531          */
01532         ntp = build_dummy_tuple(cache, nkeys, cur_skey);
01533         oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
01534         nmembers = list_length(ctlist);
01535         cl = (CatCList *)
01536             palloc(sizeof(CatCList) + nmembers * sizeof(CatCTup *));
01537         heap_copytuple_with_tuple(ntp, &cl->tuple);
01538         MemoryContextSwitchTo(oldcxt);
01539         heap_freetuple(ntp);
01540 
01541         /*
01542          * We are now past the last thing that could trigger an elog before we
01543          * have finished building the CatCList and remembering it in the
01544          * resource owner.  So it's OK to fall out of the PG_TRY, and indeed
01545          * we'd better do so before we start marking the members as belonging
01546          * to the list.
01547          */
01548 
01549     }
01550     PG_CATCH();
01551     {
01552         foreach(ctlist_item, ctlist)
01553         {
01554             ct = (CatCTup *) lfirst(ctlist_item);
01555             Assert(ct->c_list == NULL);
01556             Assert(ct->refcount > 0);
01557             ct->refcount--;
01558             if (
01559 #ifndef CATCACHE_FORCE_RELEASE
01560                 ct->dead &&
01561 #endif
01562                 ct->refcount == 0 &&
01563                 (ct->c_list == NULL || ct->c_list->refcount == 0))
01564                 CatCacheRemoveCTup(cache, ct);
01565         }
01566 
01567         PG_RE_THROW();
01568     }
01569     PG_END_TRY();
01570 
01571     cl->cl_magic = CL_MAGIC;
01572     cl->my_cache = cache;
01573     cl->refcount = 0;           /* for the moment */
01574     cl->dead = false;
01575     cl->ordered = ordered;
01576     cl->nkeys = nkeys;
01577     cl->hash_value = lHashValue;
01578     cl->n_members = nmembers;
01579 
01580     i = 0;
01581     foreach(ctlist_item, ctlist)
01582     {
01583         cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
01584         Assert(ct->c_list == NULL);
01585         ct->c_list = cl;
01586         /* release the temporary refcount on the member */
01587         Assert(ct->refcount > 0);
01588         ct->refcount--;
01589         /* mark list dead if any members already dead */
01590         if (ct->dead)
01591             cl->dead = true;
01592     }
01593     Assert(i == nmembers);
01594 
01595     dlist_push_head(&cache->cc_lists, &cl->cache_elem);
01596 
01597     /* Finally, bump the list's refcount and return it */
01598     cl->refcount++;
01599     ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
01600 
01601     CACHE3_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
01602                 cache->cc_relname, nmembers);
01603 
01604     return cl;
01605 }
01606 
01607 /*
01608  *  ReleaseCatCacheList
01609  *
01610  *  Decrement the reference count of a catcache list.
01611  */
01612 void
01613 ReleaseCatCacheList(CatCList *list)
01614 {
01615     /* Safety checks to ensure we were handed a cache entry */
01616     Assert(list->cl_magic == CL_MAGIC);
01617     Assert(list->refcount > 0);
01618     list->refcount--;
01619     ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner, list);
01620 
01621     if (
01622 #ifndef CATCACHE_FORCE_RELEASE
01623         list->dead &&
01624 #endif
01625         list->refcount == 0)
01626         CatCacheRemoveCList(list->my_cache, list);
01627 }
01628 
01629 
01630 /*
01631  * CatalogCacheCreateEntry
01632  *      Create a new CatCTup entry, copying the given HeapTuple and other
01633  *      supplied data into it.  The new entry initially has refcount 0.
01634  */
01635 static CatCTup *
01636 CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
01637                         uint32 hashValue, Index hashIndex, bool negative)
01638 {
01639     CatCTup    *ct;
01640     HeapTuple   dtp;
01641     MemoryContext oldcxt;
01642 
01643     /*
01644      * If there are any out-of-line toasted fields in the tuple, expand them
01645      * in-line.  This saves cycles during later use of the catcache entry, and
01646      * also protects us against the possibility of the toast tuples being
01647      * freed before we attempt to fetch them, in case of something using a
01648      * slightly stale catcache entry.
01649      */
01650     if (HeapTupleHasExternal(ntp))
01651         dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc);
01652     else
01653         dtp = ntp;
01654 
01655     /*
01656      * Allocate CatCTup header in cache memory, and copy the tuple there too.
01657      */
01658     oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
01659     ct = (CatCTup *) palloc(sizeof(CatCTup));
01660     heap_copytuple_with_tuple(dtp, &ct->tuple);
01661     MemoryContextSwitchTo(oldcxt);
01662 
01663     if (dtp != ntp)
01664         heap_freetuple(dtp);
01665 
01666     /*
01667      * Finish initializing the CatCTup header, and add it to the cache's
01668      * linked list and counts.
01669      */
01670     ct->ct_magic = CT_MAGIC;
01671     ct->my_cache = cache;
01672     ct->c_list = NULL;
01673     ct->refcount = 0;           /* for the moment */
01674     ct->dead = false;
01675     ct->negative = negative;
01676     ct->hash_value = hashValue;
01677 
01678     dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
01679 
01680     cache->cc_ntup++;
01681     CacheHdr->ch_ntup++;
01682 
01683     return ct;
01684 }
01685 
01686 /*
01687  * build_dummy_tuple
01688  *      Generate a palloc'd HeapTuple that contains the specified key
01689  *      columns, and NULLs for other columns.
01690  *
01691  * This is used to store the keys for negative cache entries and CatCList
01692  * entries, which don't have real tuples associated with them.
01693  */
01694 static HeapTuple
01695 build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
01696 {
01697     HeapTuple   ntp;
01698     TupleDesc   tupDesc = cache->cc_tupdesc;
01699     Datum      *values;
01700     bool       *nulls;
01701     Oid         tupOid = InvalidOid;
01702     NameData    tempNames[4];
01703     int         i;
01704 
01705     values = (Datum *) palloc(tupDesc->natts * sizeof(Datum));
01706     nulls = (bool *) palloc(tupDesc->natts * sizeof(bool));
01707 
01708     memset(values, 0, tupDesc->natts * sizeof(Datum));
01709     memset(nulls, true, tupDesc->natts * sizeof(bool));
01710 
01711     for (i = 0; i < nkeys; i++)
01712     {
01713         int         attindex = cache->cc_key[i];
01714         Datum       keyval = skeys[i].sk_argument;
01715 
01716         if (attindex > 0)
01717         {
01718             /*
01719              * Here we must be careful in case the caller passed a C string
01720              * where a NAME is wanted: convert the given argument to a
01721              * correctly padded NAME.  Otherwise the memcpy() done in
01722              * heap_form_tuple could fall off the end of memory.
01723              */
01724             if (cache->cc_isname[i])
01725             {
01726                 Name        newval = &tempNames[i];
01727 
01728                 namestrcpy(newval, DatumGetCString(keyval));
01729                 keyval = NameGetDatum(newval);
01730             }
01731             values[attindex - 1] = keyval;
01732             nulls[attindex - 1] = false;
01733         }
01734         else
01735         {
01736             Assert(attindex == ObjectIdAttributeNumber);
01737             tupOid = DatumGetObjectId(keyval);
01738         }
01739     }
01740 
01741     ntp = heap_form_tuple(tupDesc, values, nulls);
01742     if (tupOid != InvalidOid)
01743         HeapTupleSetOid(ntp, tupOid);
01744 
01745     pfree(values);
01746     pfree(nulls);
01747 
01748     return ntp;
01749 }
01750 
01751 
01752 /*
01753  *  PrepareToInvalidateCacheTuple()
01754  *
01755  *  This is part of a rather subtle chain of events, so pay attention:
01756  *
01757  *  When a tuple is inserted or deleted, it cannot be flushed from the
01758  *  catcaches immediately, for reasons explained at the top of cache/inval.c.
01759  *  Instead we have to add entry(s) for the tuple to a list of pending tuple
01760  *  invalidations that will be done at the end of the command or transaction.
01761  *
01762  *  The lists of tuples that need to be flushed are kept by inval.c.  This
01763  *  routine is a helper routine for inval.c.  Given a tuple belonging to
01764  *  the specified relation, find all catcaches it could be in, compute the
01765  *  correct hash value for each such catcache, and call the specified
01766  *  function to record the cache id and hash value in inval.c's lists.
01767  *  CatalogCacheIdInvalidate will be called later, if appropriate,
01768  *  using the recorded information.
01769  *
01770  *  For an insert or delete, tuple is the target tuple and newtuple is NULL.
01771  *  For an update, we are called just once, with tuple being the old tuple
01772  *  version and newtuple the new version.  We should make two list entries
01773  *  if the tuple's hash value changed, but only one if it didn't.
01774  *
01775  *  Note that it is irrelevant whether the given tuple is actually loaded
01776  *  into the catcache at the moment.  Even if it's not there now, it might
01777  *  be by the end of the command, or there might be a matching negative entry
01778  *  to flush --- or other backends' caches might have such entries --- so
01779  *  we have to make list entries to flush it later.
01780  *
01781  *  Also note that it's not an error if there are no catcaches for the
01782  *  specified relation.  inval.c doesn't know exactly which rels have
01783  *  catcaches --- it will call this routine for any tuple that's in a
01784  *  system relation.
01785  */
01786 void
01787 PrepareToInvalidateCacheTuple(Relation relation,
01788                               HeapTuple tuple,
01789                               HeapTuple newtuple,
01790                               void (*function) (int, uint32, Oid))
01791 {
01792     slist_iter  iter;
01793     Oid         reloid;
01794 
01795     CACHE1_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
01796 
01797     /*
01798      * sanity checks
01799      */
01800     Assert(RelationIsValid(relation));
01801     Assert(HeapTupleIsValid(tuple));
01802     Assert(PointerIsValid(function));
01803     Assert(CacheHdr != NULL);
01804 
01805     reloid = RelationGetRelid(relation);
01806 
01807     /* ----------------
01808      *  for each cache
01809      *     if the cache contains tuples from the specified relation
01810      *         compute the tuple's hash value(s) in this cache,
01811      *         and call the passed function to register the information.
01812      * ----------------
01813      */
01814 
01815     slist_foreach(iter, &CacheHdr->ch_caches)
01816     {
01817         CatCache   *ccp = slist_container(CatCache, cc_next, iter.cur);
01818         uint32      hashvalue;
01819         Oid         dbid;
01820 
01821         if (ccp->cc_reloid != reloid)
01822             continue;
01823 
01824         /* Just in case cache hasn't finished initialization yet... */
01825         if (ccp->cc_tupdesc == NULL)
01826             CatalogCacheInitializeCache(ccp);
01827 
01828         hashvalue = CatalogCacheComputeTupleHashValue(ccp, tuple);
01829         dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
01830 
01831         (*function) (ccp->id, hashvalue, dbid);
01832 
01833         if (newtuple)
01834         {
01835             uint32      newhashvalue;
01836 
01837             newhashvalue = CatalogCacheComputeTupleHashValue(ccp, newtuple);
01838 
01839             if (newhashvalue != hashvalue)
01840                 (*function) (ccp->id, newhashvalue, dbid);
01841         }
01842     }
01843 }
01844 
01845 
01846 /*
01847  * Subroutines for warning about reference leaks.  These are exported so
01848  * that resowner.c can call them.
01849  */
01850 void
01851 PrintCatCacheLeakWarning(HeapTuple tuple)
01852 {
01853     CatCTup    *ct = (CatCTup *) (((char *) tuple) -
01854                                   offsetof(CatCTup, tuple));
01855 
01856     /* Safety check to ensure we were handed a cache entry */
01857     Assert(ct->ct_magic == CT_MAGIC);
01858 
01859     elog(WARNING, "cache reference leak: cache %s (%d), tuple %u/%u has count %d",
01860          ct->my_cache->cc_relname, ct->my_cache->id,
01861          ItemPointerGetBlockNumber(&(tuple->t_self)),
01862          ItemPointerGetOffsetNumber(&(tuple->t_self)),
01863          ct->refcount);
01864 }
01865 
01866 void
01867 PrintCatCacheListLeakWarning(CatCList *list)
01868 {
01869     elog(WARNING, "cache reference leak: cache %s (%d), list %p has count %d",
01870          list->my_cache->cc_relname, list->my_cache->id,
01871          list, list->refcount);
01872 }