Header And Logo

PostgreSQL
| The world's most advanced open source database.

typcache.c

Go to the documentation of this file.
00001 /*-------------------------------------------------------------------------
00002  *
00003  * typcache.c
00004  *    POSTGRES type cache code
00005  *
00006  * The type cache exists to speed lookup of certain information about data
00007  * types that is not directly available from a type's pg_type row.  For
00008  * example, we use a type's default btree opclass, or the default hash
00009  * opclass if no btree opclass exists, to determine which operators should
00010  * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
00011  *
00012  * Several seemingly-odd choices have been made to support use of the type
00013  * cache by generic array and record handling routines, such as array_eq(),
00014  * record_cmp(), and hash_array().  Because those routines are used as index
00015  * support operations, they cannot leak memory.  To allow them to execute
00016  * efficiently, all information that they would like to re-use across calls
00017  * is kept in the type cache.
00018  *
00019  * Once created, a type cache entry lives as long as the backend does, so
00020  * there is no need for a call to release a cache entry.  (For present uses,
00021  * it would be okay to flush type cache entries at the ends of transactions,
00022  * if we needed to reclaim space.)
00023  *
00024  * There is presently no provision for clearing out a cache entry if the
00025  * stored data becomes obsolete.  (The code will work if a type acquires
00026  * opclasses it didn't have before while a backend runs --- but not if the
00027  * definition of an existing opclass is altered.)  However, the relcache
00028  * doesn't cope with opclasses changing under it, either, so this seems
00029  * a low-priority problem.
00030  *
00031  * We do support clearing the tuple descriptor and operator/function parts
00032  * of a rowtype's cache entry, since those may need to change as a consequence
00033  * of ALTER TABLE.
00034  *
00035  *
00036  * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
00037  * Portions Copyright (c) 1994, Regents of the University of California
00038  *
00039  * IDENTIFICATION
00040  *    src/backend/utils/cache/typcache.c
00041  *
00042  *-------------------------------------------------------------------------
00043  */
00044 #include "postgres.h"
00045 
00046 #include <limits.h>
00047 
00048 #include "access/hash.h"
00049 #include "access/heapam.h"
00050 #include "access/htup_details.h"
00051 #include "access/nbtree.h"
00052 #include "catalog/indexing.h"
00053 #include "catalog/pg_enum.h"
00054 #include "catalog/pg_operator.h"
00055 #include "catalog/pg_range.h"
00056 #include "catalog/pg_type.h"
00057 #include "commands/defrem.h"
00058 #include "utils/builtins.h"
00059 #include "utils/catcache.h"
00060 #include "utils/fmgroids.h"
00061 #include "utils/inval.h"
00062 #include "utils/lsyscache.h"
00063 #include "utils/rel.h"
00064 #include "utils/snapmgr.h"
00065 #include "utils/syscache.h"
00066 #include "utils/typcache.h"
00067 
00068 
00069 /* The main type cache hashtable searched by lookup_type_cache */
00070 static HTAB *TypeCacheHash = NULL;
00071 
00072 /* Private flag bits in the TypeCacheEntry.flags field */
00073 #define TCFLAGS_CHECKED_ELEM_PROPERTIES     0x0001
00074 #define TCFLAGS_HAVE_ELEM_EQUALITY          0x0002
00075 #define TCFLAGS_HAVE_ELEM_COMPARE           0x0004
00076 #define TCFLAGS_HAVE_ELEM_HASHING           0x0008
00077 #define TCFLAGS_CHECKED_FIELD_PROPERTIES    0x0010
00078 #define TCFLAGS_HAVE_FIELD_EQUALITY         0x0020
00079 #define TCFLAGS_HAVE_FIELD_COMPARE          0x0040
00080 
00081 /* Private information to support comparisons of enum values */
00082 typedef struct
00083 {
00084     Oid         enum_oid;       /* OID of one enum value */
00085     float4      sort_order;     /* its sort position */
00086 } EnumItem;
00087 
00088 typedef struct TypeCacheEnumData
00089 {
00090     Oid         bitmap_base;    /* OID corresponding to bit 0 of bitmapset */
00091     Bitmapset  *sorted_values;  /* Set of OIDs known to be in order */
00092     int         num_values;     /* total number of values in enum */
00093     EnumItem    enum_values[1]; /* VARIABLE LENGTH ARRAY */
00094 } TypeCacheEnumData;
00095 
00096 /*
00097  * We use a separate table for storing the definitions of non-anonymous
00098  * record types.  Once defined, a record type will be remembered for the
00099  * life of the backend.  Subsequent uses of the "same" record type (where
00100  * sameness means equalTupleDescs) will refer to the existing table entry.
00101  *
00102  * Stored record types are remembered in a linear array of TupleDescs,
00103  * which can be indexed quickly with the assigned typmod.  There is also
00104  * a hash table to speed searches for matching TupleDescs.  The hash key
00105  * uses just the first N columns' type OIDs, and so we may have multiple
00106  * entries with the same hash key.
00107  */
00108 #define REC_HASH_KEYS   16      /* use this many columns in hash key */
00109 
00110 typedef struct RecordCacheEntry
00111 {
00112     /* the hash lookup key MUST BE FIRST */
00113     Oid         hashkey[REC_HASH_KEYS]; /* column type IDs, zero-filled */
00114 
00115     /* list of TupleDescs for record types with this hashkey */
00116     List       *tupdescs;
00117 } RecordCacheEntry;
00118 
00119 static HTAB *RecordCacheHash = NULL;
00120 
00121 static TupleDesc *RecordCacheArray = NULL;
00122 static int32 RecordCacheArrayLen = 0;   /* allocated length of array */
00123 static int32 NextRecordTypmod = 0;      /* number of entries used */
00124 
00125 static void load_typcache_tupdesc(TypeCacheEntry *typentry);
00126 static void load_rangetype_info(TypeCacheEntry *typentry);
00127 static bool array_element_has_equality(TypeCacheEntry *typentry);
00128 static bool array_element_has_compare(TypeCacheEntry *typentry);
00129 static bool array_element_has_hashing(TypeCacheEntry *typentry);
00130 static void cache_array_element_properties(TypeCacheEntry *typentry);
00131 static bool record_fields_have_equality(TypeCacheEntry *typentry);
00132 static bool record_fields_have_compare(TypeCacheEntry *typentry);
00133 static void cache_record_field_properties(TypeCacheEntry *typentry);
00134 static void TypeCacheRelCallback(Datum arg, Oid relid);
00135 static void load_enum_cache_data(TypeCacheEntry *tcache);
00136 static EnumItem *find_enumitem(TypeCacheEnumData *enumdata, Oid arg);
00137 static int  enum_oid_cmp(const void *left, const void *right);
00138 
00139 
00140 /*
00141  * lookup_type_cache
00142  *
00143  * Fetch the type cache entry for the specified datatype, and make sure that
00144  * all the fields requested by bits in 'flags' are valid.
00145  *
00146  * The result is never NULL --- we will elog() if the passed type OID is
00147  * invalid.  Note however that we may fail to find one or more of the
00148  * requested opclass-dependent fields; the caller needs to check whether
00149  * the fields are InvalidOid or not.
00150  */
00151 TypeCacheEntry *
00152 lookup_type_cache(Oid type_id, int flags)
00153 {
00154     TypeCacheEntry *typentry;
00155     bool        found;
00156 
00157     if (TypeCacheHash == NULL)
00158     {
00159         /* First time through: initialize the hash table */
00160         HASHCTL     ctl;
00161 
00162         MemSet(&ctl, 0, sizeof(ctl));
00163         ctl.keysize = sizeof(Oid);
00164         ctl.entrysize = sizeof(TypeCacheEntry);
00165         ctl.hash = oid_hash;
00166         TypeCacheHash = hash_create("Type information cache", 64,
00167                                     &ctl, HASH_ELEM | HASH_FUNCTION);
00168 
00169         /* Also set up a callback for relcache SI invalidations */
00170         CacheRegisterRelcacheCallback(TypeCacheRelCallback, (Datum) 0);
00171 
00172         /* Also make sure CacheMemoryContext exists */
00173         if (!CacheMemoryContext)
00174             CreateCacheMemoryContext();
00175     }
00176 
00177     /* Try to look up an existing entry */
00178     typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
00179                                               (void *) &type_id,
00180                                               HASH_FIND, NULL);
00181     if (typentry == NULL)
00182     {
00183         /*
00184          * If we didn't find one, we want to make one.  But first look up the
00185          * pg_type row, just to make sure we don't make a cache entry for an
00186          * invalid type OID.
00187          */
00188         HeapTuple   tp;
00189         Form_pg_type typtup;
00190 
00191         tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
00192         if (!HeapTupleIsValid(tp))
00193             elog(ERROR, "cache lookup failed for type %u", type_id);
00194         typtup = (Form_pg_type) GETSTRUCT(tp);
00195         if (!typtup->typisdefined)
00196             ereport(ERROR,
00197                     (errcode(ERRCODE_UNDEFINED_OBJECT),
00198                      errmsg("type \"%s\" is only a shell",
00199                             NameStr(typtup->typname))));
00200 
00201         /* Now make the typcache entry */
00202         typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
00203                                                   (void *) &type_id,
00204                                                   HASH_ENTER, &found);
00205         Assert(!found);         /* it wasn't there a moment ago */
00206 
00207         MemSet(typentry, 0, sizeof(TypeCacheEntry));
00208         typentry->type_id = type_id;
00209         typentry->typlen = typtup->typlen;
00210         typentry->typbyval = typtup->typbyval;
00211         typentry->typalign = typtup->typalign;
00212         typentry->typstorage = typtup->typstorage;
00213         typentry->typtype = typtup->typtype;
00214         typentry->typrelid = typtup->typrelid;
00215 
00216         ReleaseSysCache(tp);
00217     }
00218 
00219     /*
00220      * If we haven't already found the opclasses, try to do so
00221      */
00222     if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_LT_OPR | TYPECACHE_GT_OPR |
00223                   TYPECACHE_CMP_PROC |
00224                   TYPECACHE_EQ_OPR_FINFO | TYPECACHE_CMP_PROC_FINFO |
00225                   TYPECACHE_BTREE_OPFAMILY)) &&
00226         typentry->btree_opf == InvalidOid)
00227     {
00228         Oid         opclass;
00229 
00230         opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
00231         if (OidIsValid(opclass))
00232         {
00233             typentry->btree_opf = get_opclass_family(opclass);
00234             typentry->btree_opintype = get_opclass_input_type(opclass);
00235         }
00236         /* If no btree opclass, we force lookup of the hash opclass */
00237         if (typentry->btree_opf == InvalidOid)
00238         {
00239             if (typentry->hash_opf == InvalidOid)
00240             {
00241                 opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
00242                 if (OidIsValid(opclass))
00243                 {
00244                     typentry->hash_opf = get_opclass_family(opclass);
00245                     typentry->hash_opintype = get_opclass_input_type(opclass);
00246                 }
00247             }
00248         }
00249         else
00250         {
00251             /*
00252              * In case we find a btree opclass where previously we only found
00253              * a hash opclass, reset eq_opr and derived information so that we
00254              * can fetch the btree equality operator instead of the hash
00255              * equality operator.  (They're probably the same operator, but we
00256              * don't assume that here.)
00257              */
00258             typentry->eq_opr = InvalidOid;
00259             typentry->eq_opr_finfo.fn_oid = InvalidOid;
00260             typentry->hash_proc = InvalidOid;
00261             typentry->hash_proc_finfo.fn_oid = InvalidOid;
00262         }
00263     }
00264 
00265     if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO |
00266                   TYPECACHE_HASH_OPFAMILY)) &&
00267         typentry->hash_opf == InvalidOid)
00268     {
00269         Oid         opclass;
00270 
00271         opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
00272         if (OidIsValid(opclass))
00273         {
00274             typentry->hash_opf = get_opclass_family(opclass);
00275             typentry->hash_opintype = get_opclass_input_type(opclass);
00276         }
00277     }
00278 
00279     /* Look for requested operators and functions */
00280     if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
00281         typentry->eq_opr == InvalidOid)
00282     {
00283         Oid         eq_opr = InvalidOid;
00284 
00285         if (typentry->btree_opf != InvalidOid)
00286             eq_opr = get_opfamily_member(typentry->btree_opf,
00287                                          typentry->btree_opintype,
00288                                          typentry->btree_opintype,
00289                                          BTEqualStrategyNumber);
00290         if (eq_opr == InvalidOid &&
00291             typentry->hash_opf != InvalidOid)
00292             eq_opr = get_opfamily_member(typentry->hash_opf,
00293                                          typentry->hash_opintype,
00294                                          typentry->hash_opintype,
00295                                          HTEqualStrategyNumber);
00296 
00297         /*
00298          * If the proposed equality operator is array_eq or record_eq, check
00299          * to see if the element type or column types support equality. If
00300          * not, array_eq or record_eq would fail at runtime, so we don't want
00301          * to report that the type has equality.
00302          */
00303         if (eq_opr == ARRAY_EQ_OP &&
00304             !array_element_has_equality(typentry))
00305             eq_opr = InvalidOid;
00306         else if (eq_opr == RECORD_EQ_OP &&
00307                  !record_fields_have_equality(typentry))
00308             eq_opr = InvalidOid;
00309 
00310         typentry->eq_opr = eq_opr;
00311 
00312         /*
00313          * Reset info about hash function whenever we pick up new info about
00314          * equality operator.  This is so we can ensure that the hash function
00315          * matches the operator.
00316          */
00317         typentry->hash_proc = InvalidOid;
00318         typentry->hash_proc_finfo.fn_oid = InvalidOid;
00319     }
00320     if ((flags & TYPECACHE_LT_OPR) && typentry->lt_opr == InvalidOid)
00321     {
00322         Oid         lt_opr = InvalidOid;
00323 
00324         if (typentry->btree_opf != InvalidOid)
00325             lt_opr = get_opfamily_member(typentry->btree_opf,
00326                                          typentry->btree_opintype,
00327                                          typentry->btree_opintype,
00328                                          BTLessStrategyNumber);
00329 
00330         /* As above, make sure array_cmp or record_cmp will succeed */
00331         if (lt_opr == ARRAY_LT_OP &&
00332             !array_element_has_compare(typentry))
00333             lt_opr = InvalidOid;
00334         else if (lt_opr == RECORD_LT_OP &&
00335                  !record_fields_have_compare(typentry))
00336             lt_opr = InvalidOid;
00337 
00338         typentry->lt_opr = lt_opr;
00339     }
00340     if ((flags & TYPECACHE_GT_OPR) && typentry->gt_opr == InvalidOid)
00341     {
00342         Oid         gt_opr = InvalidOid;
00343 
00344         if (typentry->btree_opf != InvalidOid)
00345             gt_opr = get_opfamily_member(typentry->btree_opf,
00346                                          typentry->btree_opintype,
00347                                          typentry->btree_opintype,
00348                                          BTGreaterStrategyNumber);
00349 
00350         /* As above, make sure array_cmp or record_cmp will succeed */
00351         if (gt_opr == ARRAY_GT_OP &&
00352             !array_element_has_compare(typentry))
00353             gt_opr = InvalidOid;
00354         else if (gt_opr == RECORD_GT_OP &&
00355                  !record_fields_have_compare(typentry))
00356             gt_opr = InvalidOid;
00357 
00358         typentry->gt_opr = gt_opr;
00359     }
00360     if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
00361         typentry->cmp_proc == InvalidOid)
00362     {
00363         Oid         cmp_proc = InvalidOid;
00364 
00365         if (typentry->btree_opf != InvalidOid)
00366             cmp_proc = get_opfamily_proc(typentry->btree_opf,
00367                                          typentry->btree_opintype,
00368                                          typentry->btree_opintype,
00369                                          BTORDER_PROC);
00370 
00371         /* As above, make sure array_cmp or record_cmp will succeed */
00372         if (cmp_proc == F_BTARRAYCMP &&
00373             !array_element_has_compare(typentry))
00374             cmp_proc = InvalidOid;
00375         else if (cmp_proc == F_BTRECORDCMP &&
00376                  !record_fields_have_compare(typentry))
00377             cmp_proc = InvalidOid;
00378 
00379         typentry->cmp_proc = cmp_proc;
00380     }
00381     if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO)) &&
00382         typentry->hash_proc == InvalidOid)
00383     {
00384         Oid         hash_proc = InvalidOid;
00385 
00386         /*
00387          * We insist that the eq_opr, if one has been determined, match the
00388          * hash opclass; else report there is no hash function.
00389          */
00390         if (typentry->hash_opf != InvalidOid &&
00391             (!OidIsValid(typentry->eq_opr) ||
00392              typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
00393                                                      typentry->hash_opintype,
00394                                                      typentry->hash_opintype,
00395                                                      HTEqualStrategyNumber)))
00396             hash_proc = get_opfamily_proc(typentry->hash_opf,
00397                                           typentry->hash_opintype,
00398                                           typentry->hash_opintype,
00399                                           HASHPROC);
00400 
00401         /*
00402          * As above, make sure hash_array will succeed.  We don't currently
00403          * support hashing for composite types, but when we do, we'll need
00404          * more logic here to check that case too.
00405          */
00406         if (hash_proc == F_HASH_ARRAY &&
00407             !array_element_has_hashing(typentry))
00408             hash_proc = InvalidOid;
00409 
00410         typentry->hash_proc = hash_proc;
00411     }
00412 
00413     /*
00414      * Set up fmgr lookup info as requested
00415      *
00416      * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
00417      * which is not quite right (they're really in the hash table's private
00418      * memory context) but this will do for our purposes.
00419      */
00420     if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
00421         typentry->eq_opr_finfo.fn_oid == InvalidOid &&
00422         typentry->eq_opr != InvalidOid)
00423     {
00424         Oid         eq_opr_func;
00425 
00426         eq_opr_func = get_opcode(typentry->eq_opr);
00427         if (eq_opr_func != InvalidOid)
00428             fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo,
00429                           CacheMemoryContext);
00430     }
00431     if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
00432         typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
00433         typentry->cmp_proc != InvalidOid)
00434     {
00435         fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
00436                       CacheMemoryContext);
00437     }
00438     if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
00439         typentry->hash_proc_finfo.fn_oid == InvalidOid &&
00440         typentry->hash_proc != InvalidOid)
00441     {
00442         fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
00443                       CacheMemoryContext);
00444     }
00445 
00446     /*
00447      * If it's a composite type (row type), get tupdesc if requested
00448      */
00449     if ((flags & TYPECACHE_TUPDESC) &&
00450         typentry->tupDesc == NULL &&
00451         typentry->typtype == TYPTYPE_COMPOSITE)
00452     {
00453         load_typcache_tupdesc(typentry);
00454     }
00455 
00456     /*
00457      * If requested, get information about a range type
00458      */
00459     if ((flags & TYPECACHE_RANGE_INFO) &&
00460         typentry->rngelemtype == NULL &&
00461         typentry->typtype == TYPTYPE_RANGE)
00462     {
00463         load_rangetype_info(typentry);
00464     }
00465 
00466     return typentry;
00467 }
00468 
00469 /*
00470  * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
00471  */
00472 static void
00473 load_typcache_tupdesc(TypeCacheEntry *typentry)
00474 {
00475     Relation    rel;
00476 
00477     if (!OidIsValid(typentry->typrelid))        /* should not happen */
00478         elog(ERROR, "invalid typrelid for composite type %u",
00479              typentry->type_id);
00480     rel = relation_open(typentry->typrelid, AccessShareLock);
00481     Assert(rel->rd_rel->reltype == typentry->type_id);
00482 
00483     /*
00484      * Link to the tupdesc and increment its refcount (we assert it's a
00485      * refcounted descriptor).  We don't use IncrTupleDescRefCount() for this,
00486      * because the reference mustn't be entered in the current resource owner;
00487      * it can outlive the current query.
00488      */
00489     typentry->tupDesc = RelationGetDescr(rel);
00490 
00491     Assert(typentry->tupDesc->tdrefcount > 0);
00492     typentry->tupDesc->tdrefcount++;
00493 
00494     relation_close(rel, AccessShareLock);
00495 }
00496 
00497 /*
00498  * load_rangetype_info --- helper routine to set up range type information
00499  */
00500 static void
00501 load_rangetype_info(TypeCacheEntry *typentry)
00502 {
00503     Form_pg_range pg_range;
00504     HeapTuple   tup;
00505     Oid         subtypeOid;
00506     Oid         opclassOid;
00507     Oid         canonicalOid;
00508     Oid         subdiffOid;
00509     Oid         opfamilyOid;
00510     Oid         opcintype;
00511     Oid         cmpFnOid;
00512 
00513     /* get information from pg_range */
00514     tup = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(typentry->type_id));
00515     /* should not fail, since we already checked typtype ... */
00516     if (!HeapTupleIsValid(tup))
00517         elog(ERROR, "cache lookup failed for range type %u",
00518              typentry->type_id);
00519     pg_range = (Form_pg_range) GETSTRUCT(tup);
00520 
00521     subtypeOid = pg_range->rngsubtype;
00522     typentry->rng_collation = pg_range->rngcollation;
00523     opclassOid = pg_range->rngsubopc;
00524     canonicalOid = pg_range->rngcanonical;
00525     subdiffOid = pg_range->rngsubdiff;
00526 
00527     ReleaseSysCache(tup);
00528 
00529     /* get opclass properties and look up the comparison function */
00530     opfamilyOid = get_opclass_family(opclassOid);
00531     opcintype = get_opclass_input_type(opclassOid);
00532 
00533     cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
00534                                  BTORDER_PROC);
00535     if (!RegProcedureIsValid(cmpFnOid))
00536         elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
00537              BTORDER_PROC, opcintype, opcintype, opfamilyOid);
00538 
00539     /* set up cached fmgrinfo structs */
00540     fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo,
00541                   CacheMemoryContext);
00542     if (OidIsValid(canonicalOid))
00543         fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo,
00544                       CacheMemoryContext);
00545     if (OidIsValid(subdiffOid))
00546         fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo,
00547                       CacheMemoryContext);
00548 
00549     /* Lastly, set up link to the element type --- this marks data valid */
00550     typentry->rngelemtype = lookup_type_cache(subtypeOid, 0);
00551 }
00552 
00553 
00554 /*
00555  * array_element_has_equality and friends are helper routines to check
00556  * whether we should believe that array_eq and related functions will work
00557  * on the given array type or composite type.
00558  *
00559  * The logic above may call these repeatedly on the same type entry, so we
00560  * make use of the typentry->flags field to cache the results once known.
00561  * Also, we assume that we'll probably want all these facts about the type
00562  * if we want any, so we cache them all using only one lookup of the
00563  * component datatype(s).
00564  */
00565 
00566 static bool
00567 array_element_has_equality(TypeCacheEntry *typentry)
00568 {
00569     if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
00570         cache_array_element_properties(typentry);
00571     return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
00572 }
00573 
00574 static bool
00575 array_element_has_compare(TypeCacheEntry *typentry)
00576 {
00577     if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
00578         cache_array_element_properties(typentry);
00579     return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
00580 }
00581 
00582 static bool
00583 array_element_has_hashing(TypeCacheEntry *typentry)
00584 {
00585     if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
00586         cache_array_element_properties(typentry);
00587     return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
00588 }
00589 
00590 static void
00591 cache_array_element_properties(TypeCacheEntry *typentry)
00592 {
00593     Oid         elem_type = get_base_element_type(typentry->type_id);
00594 
00595     if (OidIsValid(elem_type))
00596     {
00597         TypeCacheEntry *elementry;
00598 
00599         elementry = lookup_type_cache(elem_type,
00600                                       TYPECACHE_EQ_OPR |
00601                                       TYPECACHE_CMP_PROC |
00602                                       TYPECACHE_HASH_PROC);
00603         if (OidIsValid(elementry->eq_opr))
00604             typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
00605         if (OidIsValid(elementry->cmp_proc))
00606             typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
00607         if (OidIsValid(elementry->hash_proc))
00608             typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
00609     }
00610     typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
00611 }
00612 
00613 static bool
00614 record_fields_have_equality(TypeCacheEntry *typentry)
00615 {
00616     if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
00617         cache_record_field_properties(typentry);
00618     return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
00619 }
00620 
00621 static bool
00622 record_fields_have_compare(TypeCacheEntry *typentry)
00623 {
00624     if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
00625         cache_record_field_properties(typentry);
00626     return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
00627 }
00628 
00629 static void
00630 cache_record_field_properties(TypeCacheEntry *typentry)
00631 {
00632     /*
00633      * For type RECORD, we can't really tell what will work, since we don't
00634      * have access here to the specific anonymous type.  Just assume that
00635      * everything will (we may get a failure at runtime ...)
00636      */
00637     if (typentry->type_id == RECORDOID)
00638         typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
00639                             TCFLAGS_HAVE_FIELD_COMPARE);
00640     else if (typentry->typtype == TYPTYPE_COMPOSITE)
00641     {
00642         TupleDesc   tupdesc;
00643         int         newflags;
00644         int         i;
00645 
00646         /* Fetch composite type's tupdesc if we don't have it already */
00647         if (typentry->tupDesc == NULL)
00648             load_typcache_tupdesc(typentry);
00649         tupdesc = typentry->tupDesc;
00650 
00651         /* Have each property if all non-dropped fields have the property */
00652         newflags = (TCFLAGS_HAVE_FIELD_EQUALITY |
00653                     TCFLAGS_HAVE_FIELD_COMPARE);
00654         for (i = 0; i < tupdesc->natts; i++)
00655         {
00656             TypeCacheEntry *fieldentry;
00657 
00658             if (tupdesc->attrs[i]->attisdropped)
00659                 continue;
00660 
00661             fieldentry = lookup_type_cache(tupdesc->attrs[i]->atttypid,
00662                                            TYPECACHE_EQ_OPR |
00663                                            TYPECACHE_CMP_PROC);
00664             if (!OidIsValid(fieldentry->eq_opr))
00665                 newflags &= ~TCFLAGS_HAVE_FIELD_EQUALITY;
00666             if (!OidIsValid(fieldentry->cmp_proc))
00667                 newflags &= ~TCFLAGS_HAVE_FIELD_COMPARE;
00668 
00669             /* We can drop out of the loop once we disprove all bits */
00670             if (newflags == 0)
00671                 break;
00672         }
00673         typentry->flags |= newflags;
00674     }
00675     typentry->flags |= TCFLAGS_CHECKED_FIELD_PROPERTIES;
00676 }
00677 
00678 
00679 /*
00680  * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
00681  *
00682  * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
00683  * hasn't had its refcount bumped.
00684  */
00685 static TupleDesc
00686 lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
00687 {
00688     if (type_id != RECORDOID)
00689     {
00690         /*
00691          * It's a named composite type, so use the regular typcache.
00692          */
00693         TypeCacheEntry *typentry;
00694 
00695         typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
00696         if (typentry->tupDesc == NULL && !noError)
00697             ereport(ERROR,
00698                     (errcode(ERRCODE_WRONG_OBJECT_TYPE),
00699                      errmsg("type %s is not composite",
00700                             format_type_be(type_id))));
00701         return typentry->tupDesc;
00702     }
00703     else
00704     {
00705         /*
00706          * It's a transient record type, so look in our record-type table.
00707          */
00708         if (typmod < 0 || typmod >= NextRecordTypmod)
00709         {
00710             if (!noError)
00711                 ereport(ERROR,
00712                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
00713                          errmsg("record type has not been registered")));
00714             return NULL;
00715         }
00716         return RecordCacheArray[typmod];
00717     }
00718 }
00719 
00720 /*
00721  * lookup_rowtype_tupdesc
00722  *
00723  * Given a typeid/typmod that should describe a known composite type,
00724  * return the tuple descriptor for the type.  Will ereport on failure.
00725  *
00726  * Note: on success, we increment the refcount of the returned TupleDesc,
00727  * and log the reference in CurrentResourceOwner.  Caller should call
00728  * ReleaseTupleDesc or DecrTupleDescRefCount when done using the tupdesc.
00729  */
00730 TupleDesc
00731 lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
00732 {
00733     TupleDesc   tupDesc;
00734 
00735     tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
00736     IncrTupleDescRefCount(tupDesc);
00737     return tupDesc;
00738 }
00739 
00740 /*
00741  * lookup_rowtype_tupdesc_noerror
00742  *
00743  * As above, but if the type is not a known composite type and noError
00744  * is true, returns NULL instead of ereport'ing.  (Note that if a bogus
00745  * type_id is passed, you'll get an ereport anyway.)
00746  */
00747 TupleDesc
00748 lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
00749 {
00750     TupleDesc   tupDesc;
00751 
00752     tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
00753     if (tupDesc != NULL)
00754         IncrTupleDescRefCount(tupDesc);
00755     return tupDesc;
00756 }
00757 
00758 /*
00759  * lookup_rowtype_tupdesc_copy
00760  *
00761  * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
00762  * copied into the CurrentMemoryContext and is not reference-counted.
00763  */
00764 TupleDesc
00765 lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
00766 {
00767     TupleDesc   tmp;
00768 
00769     tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
00770     return CreateTupleDescCopyConstr(tmp);
00771 }
00772 
00773 
00774 /*
00775  * assign_record_type_typmod
00776  *
00777  * Given a tuple descriptor for a RECORD type, find or create a cache entry
00778  * for the type, and set the tupdesc's tdtypmod field to a value that will
00779  * identify this cache entry to lookup_rowtype_tupdesc.
00780  */
00781 void
00782 assign_record_type_typmod(TupleDesc tupDesc)
00783 {
00784     RecordCacheEntry *recentry;
00785     TupleDesc   entDesc;
00786     Oid         hashkey[REC_HASH_KEYS];
00787     bool        found;
00788     int         i;
00789     ListCell   *l;
00790     int32       newtypmod;
00791     MemoryContext oldcxt;
00792 
00793     Assert(tupDesc->tdtypeid == RECORDOID);
00794 
00795     if (RecordCacheHash == NULL)
00796     {
00797         /* First time through: initialize the hash table */
00798         HASHCTL     ctl;
00799 
00800         MemSet(&ctl, 0, sizeof(ctl));
00801         ctl.keysize = REC_HASH_KEYS * sizeof(Oid);
00802         ctl.entrysize = sizeof(RecordCacheEntry);
00803         ctl.hash = tag_hash;
00804         RecordCacheHash = hash_create("Record information cache", 64,
00805                                       &ctl, HASH_ELEM | HASH_FUNCTION);
00806 
00807         /* Also make sure CacheMemoryContext exists */
00808         if (!CacheMemoryContext)
00809             CreateCacheMemoryContext();
00810     }
00811 
00812     /* Find or create a hashtable entry for this hash class */
00813     MemSet(hashkey, 0, sizeof(hashkey));
00814     for (i = 0; i < tupDesc->natts; i++)
00815     {
00816         if (i >= REC_HASH_KEYS)
00817             break;
00818         hashkey[i] = tupDesc->attrs[i]->atttypid;
00819     }
00820     recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
00821                                                 (void *) hashkey,
00822                                                 HASH_ENTER, &found);
00823     if (!found)
00824     {
00825         /* New entry ... hash_search initialized only the hash key */
00826         recentry->tupdescs = NIL;
00827     }
00828 
00829     /* Look for existing record cache entry */
00830     foreach(l, recentry->tupdescs)
00831     {
00832         entDesc = (TupleDesc) lfirst(l);
00833         if (equalTupleDescs(tupDesc, entDesc))
00834         {
00835             tupDesc->tdtypmod = entDesc->tdtypmod;
00836             return;
00837         }
00838     }
00839 
00840     /* Not present, so need to manufacture an entry */
00841     oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
00842 
00843     if (RecordCacheArray == NULL)
00844     {
00845         RecordCacheArray = (TupleDesc *) palloc(64 * sizeof(TupleDesc));
00846         RecordCacheArrayLen = 64;
00847     }
00848     else if (NextRecordTypmod >= RecordCacheArrayLen)
00849     {
00850         int32       newlen = RecordCacheArrayLen * 2;
00851 
00852         RecordCacheArray = (TupleDesc *) repalloc(RecordCacheArray,
00853                                                   newlen * sizeof(TupleDesc));
00854         RecordCacheArrayLen = newlen;
00855     }
00856 
00857     /* if fail in subrs, no damage except possibly some wasted memory... */
00858     entDesc = CreateTupleDescCopy(tupDesc);
00859     recentry->tupdescs = lcons(entDesc, recentry->tupdescs);
00860     /* mark it as a reference-counted tupdesc */
00861     entDesc->tdrefcount = 1;
00862     /* now it's safe to advance NextRecordTypmod */
00863     newtypmod = NextRecordTypmod++;
00864     entDesc->tdtypmod = newtypmod;
00865     RecordCacheArray[newtypmod] = entDesc;
00866 
00867     /* report to caller as well */
00868     tupDesc->tdtypmod = newtypmod;
00869 
00870     MemoryContextSwitchTo(oldcxt);
00871 }
00872 
00873 /*
00874  * TypeCacheRelCallback
00875  *      Relcache inval callback function
00876  *
00877  * Delete the cached tuple descriptor (if any) for the given rel's composite
00878  * type, or for all composite types if relid == InvalidOid.  Also reset
00879  * whatever info we have cached about the composite type's comparability.
00880  *
00881  * This is called when a relcache invalidation event occurs for the given
00882  * relid.  We must scan the whole typcache hash since we don't know the
00883  * type OID corresponding to the relid.  We could do a direct search if this
00884  * were a syscache-flush callback on pg_type, but then we would need all
00885  * ALTER-TABLE-like commands that could modify a rowtype to issue syscache
00886  * invals against the rel's pg_type OID.  The extra SI signaling could very
00887  * well cost more than we'd save, since in most usages there are not very
00888  * many entries in a backend's typcache.  The risk of bugs-of-omission seems
00889  * high, too.
00890  *
00891  * Another possibility, with only localized impact, is to maintain a second
00892  * hashtable that indexes composite-type typcache entries by their typrelid.
00893  * But it's still not clear it's worth the trouble.
00894  */
00895 static void
00896 TypeCacheRelCallback(Datum arg, Oid relid)
00897 {
00898     HASH_SEQ_STATUS status;
00899     TypeCacheEntry *typentry;
00900 
00901     /* TypeCacheHash must exist, else this callback wouldn't be registered */
00902     hash_seq_init(&status, TypeCacheHash);
00903     while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
00904     {
00905         if (typentry->typtype != TYPTYPE_COMPOSITE)
00906             continue;           /* skip non-composites */
00907 
00908         /* Skip if no match, unless we're zapping all composite types */
00909         if (relid != typentry->typrelid && relid != InvalidOid)
00910             continue;
00911 
00912         /* Delete tupdesc if we have it */
00913         if (typentry->tupDesc != NULL)
00914         {
00915             /*
00916              * Release our refcount, and free the tupdesc if none remain.
00917              * (Can't use DecrTupleDescRefCount because this reference is not
00918              * logged in current resource owner.)
00919              */
00920             Assert(typentry->tupDesc->tdrefcount > 0);
00921             if (--typentry->tupDesc->tdrefcount == 0)
00922                 FreeTupleDesc(typentry->tupDesc);
00923             typentry->tupDesc = NULL;
00924         }
00925 
00926         /* Reset equality/comparison/hashing information */
00927         typentry->eq_opr = InvalidOid;
00928         typentry->lt_opr = InvalidOid;
00929         typentry->gt_opr = InvalidOid;
00930         typentry->cmp_proc = InvalidOid;
00931         typentry->hash_proc = InvalidOid;
00932         typentry->eq_opr_finfo.fn_oid = InvalidOid;
00933         typentry->cmp_proc_finfo.fn_oid = InvalidOid;
00934         typentry->hash_proc_finfo.fn_oid = InvalidOid;
00935         typentry->flags = 0;
00936     }
00937 }
00938 
00939 
00940 /*
00941  * Check if given OID is part of the subset that's sortable by comparisons
00942  */
00943 static inline bool
00944 enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
00945 {
00946     Oid         offset;
00947 
00948     if (arg < enumdata->bitmap_base)
00949         return false;
00950     offset = arg - enumdata->bitmap_base;
00951     if (offset > (Oid) INT_MAX)
00952         return false;
00953     return bms_is_member((int) offset, enumdata->sorted_values);
00954 }
00955 
00956 
00957 /*
00958  * compare_values_of_enum
00959  *      Compare two members of an enum type.
00960  *      Return <0, 0, or >0 according as arg1 <, =, or > arg2.
00961  *
00962  * Note: currently, the enumData cache is refreshed only if we are asked
00963  * to compare an enum value that is not already in the cache.  This is okay
00964  * because there is no support for re-ordering existing values, so comparisons
00965  * of previously cached values will return the right answer even if other
00966  * values have been added since we last loaded the cache.
00967  *
00968  * Note: the enum logic has a special-case rule about even-numbered versus
00969  * odd-numbered OIDs, but we take no account of that rule here; this
00970  * routine shouldn't even get called when that rule applies.
00971  */
00972 int
00973 compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
00974 {
00975     TypeCacheEnumData *enumdata;
00976     EnumItem   *item1;
00977     EnumItem   *item2;
00978 
00979     /*
00980      * Equal OIDs are certainly equal --- this case was probably handled by
00981      * our caller, but we may as well check.
00982      */
00983     if (arg1 == arg2)
00984         return 0;
00985 
00986     /* Load up the cache if first time through */
00987     if (tcache->enumData == NULL)
00988         load_enum_cache_data(tcache);
00989     enumdata = tcache->enumData;
00990 
00991     /*
00992      * If both OIDs are known-sorted, we can just compare them directly.
00993      */
00994     if (enum_known_sorted(enumdata, arg1) &&
00995         enum_known_sorted(enumdata, arg2))
00996     {
00997         if (arg1 < arg2)
00998             return -1;
00999         else
01000             return 1;
01001     }
01002 
01003     /*
01004      * Slow path: we have to identify their actual sort-order positions.
01005      */
01006     item1 = find_enumitem(enumdata, arg1);
01007     item2 = find_enumitem(enumdata, arg2);
01008 
01009     if (item1 == NULL || item2 == NULL)
01010     {
01011         /*
01012          * We couldn't find one or both values.  That means the enum has
01013          * changed under us, so re-initialize the cache and try again. We
01014          * don't bother retrying the known-sorted case in this path.
01015          */
01016         load_enum_cache_data(tcache);
01017         enumdata = tcache->enumData;
01018 
01019         item1 = find_enumitem(enumdata, arg1);
01020         item2 = find_enumitem(enumdata, arg2);
01021 
01022         /*
01023          * If we still can't find the values, complain: we must have corrupt
01024          * data.
01025          */
01026         if (item1 == NULL)
01027             elog(ERROR, "enum value %u not found in cache for enum %s",
01028                  arg1, format_type_be(tcache->type_id));
01029         if (item2 == NULL)
01030             elog(ERROR, "enum value %u not found in cache for enum %s",
01031                  arg2, format_type_be(tcache->type_id));
01032     }
01033 
01034     if (item1->sort_order < item2->sort_order)
01035         return -1;
01036     else if (item1->sort_order > item2->sort_order)
01037         return 1;
01038     else
01039         return 0;
01040 }
01041 
01042 /*
01043  * Load (or re-load) the enumData member of the typcache entry.
01044  */
01045 static void
01046 load_enum_cache_data(TypeCacheEntry *tcache)
01047 {
01048     TypeCacheEnumData *enumdata;
01049     Relation    enum_rel;
01050     SysScanDesc enum_scan;
01051     HeapTuple   enum_tuple;
01052     ScanKeyData skey;
01053     EnumItem   *items;
01054     int         numitems;
01055     int         maxitems;
01056     Oid         bitmap_base;
01057     Bitmapset  *bitmap;
01058     MemoryContext oldcxt;
01059     int         bm_size,
01060                 start_pos;
01061 
01062     /* Check that this is actually an enum */
01063     if (tcache->typtype != TYPTYPE_ENUM)
01064         ereport(ERROR,
01065                 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
01066                  errmsg("%s is not an enum",
01067                         format_type_be(tcache->type_id))));
01068 
01069     /*
01070      * Read all the information for members of the enum type.  We collect the
01071      * info in working memory in the caller's context, and then transfer it to
01072      * permanent memory in CacheMemoryContext.  This minimizes the risk of
01073      * leaking memory from CacheMemoryContext in the event of an error partway
01074      * through.
01075      */
01076     maxitems = 64;
01077     items = (EnumItem *) palloc(sizeof(EnumItem) * maxitems);
01078     numitems = 0;
01079 
01080     /*
01081      * Scan pg_enum for the members of the target enum type.  We use a current
01082      * MVCC snapshot, *not* SnapshotNow, so that we see a consistent set of
01083      * rows even if someone commits a renumbering of the enum meanwhile. See
01084      * comments for RenumberEnumType in catalog/pg_enum.c for more info.
01085      */
01086     ScanKeyInit(&skey,
01087                 Anum_pg_enum_enumtypid,
01088                 BTEqualStrategyNumber, F_OIDEQ,
01089                 ObjectIdGetDatum(tcache->type_id));
01090 
01091     enum_rel = heap_open(EnumRelationId, AccessShareLock);
01092     enum_scan = systable_beginscan(enum_rel,
01093                                    EnumTypIdLabelIndexId,
01094                                    true, GetLatestSnapshot(),
01095                                    1, &skey);
01096 
01097     while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan)))
01098     {
01099         Form_pg_enum en = (Form_pg_enum) GETSTRUCT(enum_tuple);
01100 
01101         if (numitems >= maxitems)
01102         {
01103             maxitems *= 2;
01104             items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
01105         }
01106         items[numitems].enum_oid = HeapTupleGetOid(enum_tuple);
01107         items[numitems].sort_order = en->enumsortorder;
01108         numitems++;
01109     }
01110 
01111     systable_endscan(enum_scan);
01112     heap_close(enum_rel, AccessShareLock);
01113 
01114     /* Sort the items into OID order */
01115     qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
01116 
01117     /*
01118      * Here, we create a bitmap listing a subset of the enum's OIDs that are
01119      * known to be in order and can thus be compared with just OID comparison.
01120      *
01121      * The point of this is that the enum's initial OIDs were certainly in
01122      * order, so there is some subset that can be compared via OID comparison;
01123      * and we'd rather not do binary searches unnecessarily.
01124      *
01125      * This is somewhat heuristic, and might identify a subset of OIDs that
01126      * isn't exactly what the type started with.  That's okay as long as the
01127      * subset is correctly sorted.
01128      */
01129     bitmap_base = InvalidOid;
01130     bitmap = NULL;
01131     bm_size = 1;                /* only save sets of at least 2 OIDs */
01132 
01133     for (start_pos = 0; start_pos < numitems - 1; start_pos++)
01134     {
01135         /*
01136          * Identify longest sorted subsequence starting at start_pos
01137          */
01138         Bitmapset  *this_bitmap = bms_make_singleton(0);
01139         int         this_bm_size = 1;
01140         Oid         start_oid = items[start_pos].enum_oid;
01141         float4      prev_order = items[start_pos].sort_order;
01142         int         i;
01143 
01144         for (i = start_pos + 1; i < numitems; i++)
01145         {
01146             Oid         offset;
01147 
01148             offset = items[i].enum_oid - start_oid;
01149             /* quit if bitmap would be too large; cutoff is arbitrary */
01150             if (offset >= 8192)
01151                 break;
01152             /* include the item if it's in-order */
01153             if (items[i].sort_order > prev_order)
01154             {
01155                 prev_order = items[i].sort_order;
01156                 this_bitmap = bms_add_member(this_bitmap, (int) offset);
01157                 this_bm_size++;
01158             }
01159         }
01160 
01161         /* Remember it if larger than previous best */
01162         if (this_bm_size > bm_size)
01163         {
01164             bms_free(bitmap);
01165             bitmap_base = start_oid;
01166             bitmap = this_bitmap;
01167             bm_size = this_bm_size;
01168         }
01169         else
01170             bms_free(this_bitmap);
01171 
01172         /*
01173          * Done if it's not possible to find a longer sequence in the rest of
01174          * the list.  In typical cases this will happen on the first
01175          * iteration, which is why we create the bitmaps on the fly instead of
01176          * doing a second pass over the list.
01177          */
01178         if (bm_size >= (numitems - start_pos - 1))
01179             break;
01180     }
01181 
01182     /* OK, copy the data into CacheMemoryContext */
01183     oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
01184     enumdata = (TypeCacheEnumData *)
01185         palloc(offsetof(TypeCacheEnumData, enum_values) +
01186                numitems * sizeof(EnumItem));
01187     enumdata->bitmap_base = bitmap_base;
01188     enumdata->sorted_values = bms_copy(bitmap);
01189     enumdata->num_values = numitems;
01190     memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
01191     MemoryContextSwitchTo(oldcxt);
01192 
01193     pfree(items);
01194     bms_free(bitmap);
01195 
01196     /* And link the finished cache struct into the typcache */
01197     if (tcache->enumData != NULL)
01198         pfree(tcache->enumData);
01199     tcache->enumData = enumdata;
01200 }
01201 
01202 /*
01203  * Locate the EnumItem with the given OID, if present
01204  */
01205 static EnumItem *
01206 find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
01207 {
01208     EnumItem    srch;
01209 
01210     /* On some versions of Solaris, bsearch of zero items dumps core */
01211     if (enumdata->num_values <= 0)
01212         return NULL;
01213 
01214     srch.enum_oid = arg;
01215     return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
01216                    sizeof(EnumItem), enum_oid_cmp);
01217 }
01218 
01219 /*
01220  * qsort comparison function for OID-ordered EnumItems
01221  */
01222 static int
01223 enum_oid_cmp(const void *left, const void *right)
01224 {
01225     const EnumItem *l = (const EnumItem *) left;
01226     const EnumItem *r = (const EnumItem *) right;
01227 
01228     if (l->enum_oid < r->enum_oid)
01229         return -1;
01230     else if (l->enum_oid > r->enum_oid)
01231         return 1;
01232     else
01233         return 0;
01234 }