Header And Logo

PostgreSQL
| The world's most advanced open source database.

Data Structures | Defines | Typedefs | Functions | Variables

dynahash.c File Reference

#include "postgres.h"
#include <limits.h>
#include "access/xact.h"
#include "storage/shmem.h"
#include "storage/spin.h"
#include "utils/dynahash.h"
#include "utils/memutils.h"
Include dependency graph for dynahash.c:

Go to the source code of this file.

Data Structures

struct  HASHHDR
struct  HTAB

Defines

#define DEF_SEGSIZE   256
#define DEF_SEGSIZE_SHIFT   8
#define DEF_DIRSIZE   256
#define DEF_FFACTOR   1
#define IS_PARTITIONED(hctl)   ((hctl)->num_partitions != 0)
#define ELEMENTKEY(helem)   (((char *)(helem)) + MAXALIGN(sizeof(HASHELEMENT)))
#define ELEMENT_FROM_KEY(key)   ((HASHELEMENT *) (((char *) (key)) - MAXALIGN(sizeof(HASHELEMENT))))
#define MOD(x, y)   ((x) & ((y)-1))
#define MAX_SEQ_SCANS   100

Typedefs

typedef HASHELEMENTHASHBUCKET
typedef HASHBUCKETHASHSEGMENT

Functions

static void * DynaHashAlloc (Size size)
static HASHSEGMENT seg_alloc (HTAB *hashp)
static bool element_alloc (HTAB *hashp, int nelem)
static bool dir_realloc (HTAB *hashp)
static bool expand_table (HTAB *hashp)
static HASHBUCKET get_hash_entry (HTAB *hashp)
static void hdefault (HTAB *hashp)
static int choose_nelem_alloc (Size entrysize)
static bool init_htab (HTAB *hashp, long nelem)
static void hash_corrupted (HTAB *hashp)
static long next_pow2_long (long num)
static int next_pow2_int (long num)
static void register_seq_scan (HTAB *hashp)
static void deregister_seq_scan (HTAB *hashp)
static bool has_seq_scans (HTAB *hashp)
static int string_compare (const char *key1, const char *key2, Size keysize)
HTABhash_create (const char *tabname, long nelem, HASHCTL *info, int flags)
Size hash_estimate_size (long num_entries, Size entrysize)
long hash_select_dirsize (long num_entries)
Size hash_get_shared_size (HASHCTL *info, int flags)
void hash_destroy (HTAB *hashp)
void hash_stats (const char *where, HTAB *hashp)
uint32 get_hash_value (HTAB *hashp, const void *keyPtr)
static uint32 calc_bucket (HASHHDR *hctl, uint32 hash_val)
void * hash_search (HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
void * hash_search_with_hash_value (HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
bool hash_update_hash_key (HTAB *hashp, void *existingEntry, const void *newKeyPtr)
long hash_get_num_entries (HTAB *hashp)
void hash_seq_init (HASH_SEQ_STATUS *status, HTAB *hashp)
void * hash_seq_search (HASH_SEQ_STATUS *status)
void hash_seq_term (HASH_SEQ_STATUS *status)
void hash_freeze (HTAB *hashp)
int my_log2 (long num)
void AtEOXact_HashTables (bool isCommit)
void AtEOSubXact_HashTables (bool isCommit, int nestDepth)

Variables

static MemoryContext CurrentDynaHashCxt = NULL
static HTABseq_scan_tables [MAX_SEQ_SCANS]
static int seq_scan_level [MAX_SEQ_SCANS]
static int num_seq_scans = 0

Define Documentation

#define DEF_DIRSIZE   256

Definition at line 97 of file dynahash.c.

#define DEF_FFACTOR   1

Definition at line 98 of file dynahash.c.

Referenced by hash_estimate_size(), and hash_select_dirsize().

#define DEF_SEGSIZE   256

Definition at line 95 of file dynahash.c.

Referenced by hash_estimate_size(), and hash_select_dirsize().

#define DEF_SEGSIZE_SHIFT   8

Definition at line 96 of file dynahash.c.

#define ELEMENT_FROM_KEY (   key  )     ((HASHELEMENT *) (((char *) (key)) - MAXALIGN(sizeof(HASHELEMENT))))

Definition at line 189 of file dynahash.c.

Referenced by hash_update_hash_key().

#define ELEMENTKEY (   helem  )     (((char *)(helem)) + MAXALIGN(sizeof(HASHELEMENT)))

Definition at line 184 of file dynahash.c.

Referenced by hash_search_with_hash_value(), hash_seq_search(), and hash_update_hash_key().

#define IS_PARTITIONED (   hctl  )     ((hctl)->num_partitions != 0)
#define MAX_SEQ_SCANS   100

Definition at line 1627 of file dynahash.c.

Referenced by register_seq_scan().

#define MOD (   x,
  y 
)    ((x) & ((y)-1))

Typedef Documentation

Definition at line 102 of file dynahash.c.

Definition at line 105 of file dynahash.c.


Function Documentation

void AtEOSubXact_HashTables ( bool  isCommit,
int  nestDepth 
)

Definition at line 1709 of file dynahash.c.

References elog, i, num_seq_scans, seq_scan_level, and WARNING.

Referenced by AbortSubTransaction(), and CommitSubTransaction().

{
    int         i;

    /*
     * Search backward to make cleanup easy.  Note we must check all entries,
     * not only those at the end of the array, because deletion technique
     * doesn't keep them in order.
     */
    for (i = num_seq_scans - 1; i >= 0; i--)
    {
        if (seq_scan_level[i] >= nestDepth)
        {
            if (isCommit)
                elog(WARNING, "leaked hash_seq_search scan for hash table %p",
                     seq_scan_tables[i]);
            seq_scan_tables[i] = seq_scan_tables[num_seq_scans - 1];
            seq_scan_level[i] = seq_scan_level[num_seq_scans - 1];
            num_seq_scans--;
        }
    }
}

void AtEOXact_HashTables ( bool  isCommit  ) 

Definition at line 1683 of file dynahash.c.

References elog, i, num_seq_scans, and WARNING.

Referenced by AbortTransaction(), BackgroundWriterMain(), CheckpointerMain(), CommitTransaction(), PrepareTransaction(), and WalWriterMain().

{
    /*
     * During abort cleanup, open scans are expected; just silently clean 'em
     * out.  An open scan at commit means someone forgot a hash_seq_term()
     * call, so complain.
     *
     * Note: it's tempting to try to print the tabname here, but refrain for
     * fear of touching deallocated memory.  This isn't a user-facing message
     * anyway, so it needn't be pretty.
     */
    if (isCommit)
    {
        int         i;

        for (i = 0; i < num_seq_scans; i++)
        {
            elog(WARNING, "leaked hash_seq_search scan for hash table %p",
                 seq_scan_tables[i]);
        }
    }
    num_seq_scans = 0;
}

static uint32 calc_bucket ( HASHHDR hctl,
uint32  hash_val 
) [inline, static]

Definition at line 767 of file dynahash.c.

References HASHHDR::high_mask, HASHHDR::low_mask, and HASHHDR::max_bucket.

Referenced by expand_table(), hash_search_with_hash_value(), and hash_update_hash_key().

{
    uint32      bucket;

    bucket = hash_val & hctl->high_mask;
    if (bucket > hctl->max_bucket)
        bucket = bucket & hctl->low_mask;

    return bucket;
}

static int choose_nelem_alloc ( Size  entrysize  )  [static]

Definition at line 504 of file dynahash.c.

References MAXALIGN.

Referenced by hash_estimate_size(), and init_htab().

{
    int         nelem_alloc;
    Size        elementSize;
    Size        allocSize;

    /* Each element has a HASHELEMENT header plus user data. */
    /* NB: this had better match element_alloc() */
    elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(entrysize);

    /*
     * The idea here is to choose nelem_alloc at least 32, but round up so
     * that the allocation request will be a power of 2 or just less. This
     * makes little difference for hash tables in shared memory, but for hash
     * tables managed by palloc, the allocation request will be rounded up to
     * a power of 2 anyway.  If we fail to take this into account, we'll waste
     * as much as half the allocated space.
     */
    allocSize = 32 * 4;         /* assume elementSize at least 8 */
    do
    {
        allocSize <<= 1;
        nelem_alloc = allocSize / elementSize;
    } while (nelem_alloc < 32);

    return nelem_alloc;
}

static void deregister_seq_scan ( HTAB hashp  )  [static]

Definition at line 1648 of file dynahash.c.

References elog, ERROR, i, num_seq_scans, seq_scan_level, and HTAB::tabname.

Referenced by hash_seq_term().

{
    int         i;

    /* Search backward since it's most likely at the stack top */
    for (i = num_seq_scans - 1; i >= 0; i--)
    {
        if (seq_scan_tables[i] == hashp)
        {
            seq_scan_tables[i] = seq_scan_tables[num_seq_scans - 1];
            seq_scan_level[i] = seq_scan_level[num_seq_scans - 1];
            num_seq_scans--;
            return;
        }
    }
    elog(ERROR, "no hash_seq_search scan for hash table \"%s\"",
         hashp->tabname);
}

static bool dir_realloc ( HTAB hashp  )  [static]

Definition at line 1447 of file dynahash.c.

References HTAB::alloc, Assert, HTAB::dir, HASHHDR::dsize, DynaHashAlloc(), HTAB::hctl, HTAB::hcxt, HASHHDR::max_dsize, MemSet, NO_MAX_DSIZE, NULL, and pfree().

Referenced by expand_table().

{
    HASHSEGMENT *p;
    HASHSEGMENT *old_p;
    long        new_dsize;
    long        old_dirsize;
    long        new_dirsize;

    if (hashp->hctl->max_dsize != NO_MAX_DSIZE)
        return false;

    /* Reallocate directory */
    new_dsize = hashp->hctl->dsize << 1;
    old_dirsize = hashp->hctl->dsize * sizeof(HASHSEGMENT);
    new_dirsize = new_dsize * sizeof(HASHSEGMENT);

    old_p = hashp->dir;
    CurrentDynaHashCxt = hashp->hcxt;
    p = (HASHSEGMENT *) hashp->alloc((Size) new_dirsize);

    if (p != NULL)
    {
        memcpy(p, old_p, old_dirsize);
        MemSet(((char *) p) + old_dirsize, 0, new_dirsize - old_dirsize);
        hashp->dir = p;
        hashp->hctl->dsize = new_dsize;

        /* XXX assume the allocator is palloc, so we know how to free */
        Assert(hashp->alloc == DynaHashAlloc);
        pfree(old_p);

        return true;
    }

    return false;
}

static void * DynaHashAlloc ( Size  size  )  [static]
static bool element_alloc ( HTAB hashp,
int  nelem 
) [static]

Definition at line 1505 of file dynahash.c.

References HTAB::alloc, HASHHDR::entrysize, HASHHDR::freeList, HTAB::hctl, HTAB::hcxt, i, IS_PARTITIONED, HTAB::isfixed, HASHELEMENT::link, MAXALIGN, HASHHDR::mutex, SpinLockAcquire, and SpinLockRelease.

Referenced by get_hash_entry(), and hash_create().

{
    /* use volatile pointer to prevent code rearrangement */
    volatile HASHHDR *hctlv = hashp->hctl;
    Size        elementSize;
    HASHELEMENT *firstElement;
    HASHELEMENT *tmpElement;
    HASHELEMENT *prevElement;
    int         i;

    if (hashp->isfixed)
        return false;

    /* Each element has a HASHELEMENT header plus user data. */
    elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(hctlv->entrysize);

    CurrentDynaHashCxt = hashp->hcxt;
    firstElement = (HASHELEMENT *) hashp->alloc(nelem * elementSize);

    if (!firstElement)
        return false;

    /* prepare to link all the new entries into the freelist */
    prevElement = NULL;
    tmpElement = firstElement;
    for (i = 0; i < nelem; i++)
    {
        tmpElement->link = prevElement;
        prevElement = tmpElement;
        tmpElement = (HASHELEMENT *) (((char *) tmpElement) + elementSize);
    }

    /* if partitioned, must lock to touch freeList */
    if (IS_PARTITIONED(hctlv))
        SpinLockAcquire(&hctlv->mutex);

    /* freelist could be nonempty if two backends did this concurrently */
    firstElement->link = hctlv->freeList;
    hctlv->freeList = prevElement;

    if (IS_PARTITIONED(hctlv))
        SpinLockRelease(&hctlv->mutex);

    return true;
}

static bool expand_table ( HTAB hashp  )  [static]

Definition at line 1350 of file dynahash.c.

References Assert, calc_bucket(), HTAB::dir, dir_realloc(), HASHHDR::dsize, HASHELEMENT::hashvalue, HTAB::hctl, HASHHDR::high_mask, IS_PARTITIONED, HASHELEMENT::link, HASHHDR::low_mask, HASHHDR::max_bucket, MOD, HASHHDR::nsegs, seg_alloc(), HTAB::sshift, and HTAB::ssize.

Referenced by hash_search_with_hash_value().

{
    HASHHDR    *hctl = hashp->hctl;
    HASHSEGMENT old_seg,
                new_seg;
    long        old_bucket,
                new_bucket;
    long        new_segnum,
                new_segndx;
    long        old_segnum,
                old_segndx;
    HASHBUCKET *oldlink,
               *newlink;
    HASHBUCKET  currElement,
                nextElement;

    Assert(!IS_PARTITIONED(hctl));

#ifdef HASH_STATISTICS
    hash_expansions++;
#endif

    new_bucket = hctl->max_bucket + 1;
    new_segnum = new_bucket >> hashp->sshift;
    new_segndx = MOD(new_bucket, hashp->ssize);

    if (new_segnum >= hctl->nsegs)
    {
        /* Allocate new segment if necessary -- could fail if dir full */
        if (new_segnum >= hctl->dsize)
            if (!dir_realloc(hashp))
                return false;
        if (!(hashp->dir[new_segnum] = seg_alloc(hashp)))
            return false;
        hctl->nsegs++;
    }

    /* OK, we created a new bucket */
    hctl->max_bucket++;

    /*
     * *Before* changing masks, find old bucket corresponding to same hash
     * values; values in that bucket may need to be relocated to new bucket.
     * Note that new_bucket is certainly larger than low_mask at this point,
     * so we can skip the first step of the regular hash mask calc.
     */
    old_bucket = (new_bucket & hctl->low_mask);

    /*
     * If we crossed a power of 2, readjust masks.
     */
    if ((uint32) new_bucket > hctl->high_mask)
    {
        hctl->low_mask = hctl->high_mask;
        hctl->high_mask = (uint32) new_bucket | hctl->low_mask;
    }

    /*
     * Relocate records to the new bucket.  NOTE: because of the way the hash
     * masking is done in calc_bucket, only one old bucket can need to be
     * split at this point.  With a different way of reducing the hash value,
     * that might not be true!
     */
    old_segnum = old_bucket >> hashp->sshift;
    old_segndx = MOD(old_bucket, hashp->ssize);

    old_seg = hashp->dir[old_segnum];
    new_seg = hashp->dir[new_segnum];

    oldlink = &old_seg[old_segndx];
    newlink = &new_seg[new_segndx];

    for (currElement = *oldlink;
         currElement != NULL;
         currElement = nextElement)
    {
        nextElement = currElement->link;
        if ((long) calc_bucket(hctl, currElement->hashvalue) == old_bucket)
        {
            *oldlink = currElement;
            oldlink = &currElement->link;
        }
        else
        {
            *newlink = currElement;
            newlink = &currElement->link;
        }
    }
    /* don't forget to terminate the rebuilt hash chains... */
    *oldlink = NULL;
    *newlink = NULL;

    return true;
}

static HASHBUCKET get_hash_entry ( HTAB hashp  )  [static]

Definition at line 1149 of file dynahash.c.

References element_alloc(), HASHHDR::freeList, HTAB::hctl, IS_PARTITIONED, HASHELEMENT::link, HASHHDR::mutex, HASHHDR::nelem_alloc, HASHHDR::nentries, NULL, SpinLockAcquire, and SpinLockRelease.

Referenced by hash_search_with_hash_value().

{
    /* use volatile pointer to prevent code rearrangement */
    volatile HASHHDR *hctlv = hashp->hctl;
    HASHBUCKET  newElement;

    for (;;)
    {
        /* if partitioned, must lock to touch nentries and freeList */
        if (IS_PARTITIONED(hctlv))
            SpinLockAcquire(&hctlv->mutex);

        /* try to get an entry from the freelist */
        newElement = hctlv->freeList;
        if (newElement != NULL)
            break;

        /* no free elements.  allocate another chunk of buckets */
        if (IS_PARTITIONED(hctlv))
            SpinLockRelease(&hctlv->mutex);

        if (!element_alloc(hashp, hctlv->nelem_alloc))
        {
            /* out of memory */
            return NULL;
        }
    }

    /* remove entry from freelist, bump nentries */
    hctlv->freeList = newElement->link;
    hctlv->nentries++;

    if (IS_PARTITIONED(hctlv))
        SpinLockRelease(&hctlv->mutex);

    return newElement;
}

uint32 get_hash_value ( HTAB hashp,
const void *  keyPtr 
)

Definition at line 760 of file dynahash.c.

References HTAB::hash, and HTAB::keysize.

Referenced by BufTableHashCode(), and LockTagHashCode().

{
    return hashp->hash(keyPtr, hashp->keysize);
}

static bool has_seq_scans ( HTAB hashp  )  [static]

Definition at line 1669 of file dynahash.c.

References i, and num_seq_scans.

Referenced by hash_freeze(), and hash_search_with_hash_value().

{
    int         i;

    for (i = 0; i < num_seq_scans; i++)
    {
        if (seq_scan_tables[i] == hashp)
            return true;
    }
    return false;
}

static void hash_corrupted ( HTAB hashp  )  [static]

Definition at line 1553 of file dynahash.c.

References elog, FATAL, HTAB::isshared, PANIC, and HTAB::tabname.

Referenced by hash_search_with_hash_value(), and hash_update_hash_key().

{
    /*
     * If the corruption is in a shared hashtable, we'd better force a
     * systemwide restart.  Otherwise, just shut down this one backend.
     */
    if (hashp->isshared)
        elog(PANIC, "hash table \"%s\" corrupted", hashp->tabname);
    else
        elog(FATAL, "hash table \"%s\" corrupted", hashp->tabname);
}

HTAB* hash_create ( const char *  tabname,
long  nelem,
HASHCTL info,
int  flags 
)

Definition at line 267 of file dynahash.c.

References HASHCTL::alloc, HTAB::alloc, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE, ALLOCSET_DEFAULT_MINSIZE, AllocSetContextCreate(), Assert, HTAB::dir, HASHCTL::dsize, HASHHDR::dsize, DynaHashAlloc(), element_alloc(), elog, HASHHDR::entrysize, HASHCTL::entrysize, ereport, errcode(), errmsg(), ERROR, HASHCTL::ffactor, HASHHDR::ffactor, HTAB::frozen, HASHCTL::hash, HTAB::hash, HASH_ALLOC, HASH_ATTACH, HASH_COMPARE, HASH_CONTEXT, HASH_DIRSIZE, HASH_ELEM, HASH_FFACTOR, HASH_FIXED_SIZE, HASH_FUNCTION, HASH_KEYCOPY, HASH_PARTITION, HASH_SEGMENT, HASH_SHARED_MEM, HASHCTL::hctl, HTAB::hctl, HTAB::hcxt, HASHCTL::hcxt, hdefault(), init_htab(), HTAB::isfixed, HTAB::isshared, HASHCTL::keycopy, HTAB::keycopy, HASHCTL::keysize, HASHHDR::keysize, HTAB::keysize, HASHCTL::match, HTAB::match, HASHCTL::max_dsize, HASHHDR::max_dsize, MemSet, my_log2(), next_pow2_int(), HASHHDR::num_partitions, HASHCTL::num_partitions, HASHHDR::sshift, HTAB::sshift, HASHCTL::ssize, HASHHDR::ssize, HTAB::ssize, string_compare(), string_hash(), strlcpy(), HTAB::tabname, and TopMemoryContext.

Referenced by _PG_init(), assign_record_type_typmod(), begin_heap_rewrite(), build_join_rel_hash(), BuildEventTriggerCache(), BuildTupleHashTable(), CompactCheckpointerRequestQueue(), compute_array_stats(), compute_tsvector_stats(), createConnHash(), do_autovacuum(), EnablePortalManager(), find_oper_cache_entry(), find_rendezvous_variable(), get_btree_test_op(), get_json_object_as_hash(), GetComboCommandId(), GetConnection(), GetSerializableTransactionSnapshotInt(), gistInitBuildBuffers(), gistInitParentMap(), init_procedure_caches(), init_timezone_hashtable(), init_ts_config_cache(), InitializeAttoptCache(), InitializeTableSpaceCache(), InitLocalBuffers(), InitLocks(), InitQueryHashTable(), load_categories_hash(), log_invalid_page(), lookup_collation_cache(), lookup_ts_dictionary_cache(), lookup_ts_parser_cache(), lookup_type_cache(), LookupOpclassInfo(), mdinit(), pgstat_collect_oids(), pgstat_init_function_usage(), pgstat_read_statsfiles(), plpgsql_HashTableInit(), PLy_add_exceptions(), populate_recordset_object_start(), rebuild_database_list(), record_C_func(), RelationCacheInitialize(), reset_dbentry_counters(), ResetUnloggedRelationsInDbspaceDir(), ri_InitHashTables(), select_perl_context(), ShmemInitHash(), smgropen(), tbm_create_pagetable(), and transformGraph().

{
    HTAB       *hashp;
    HASHHDR    *hctl;

    /*
     * For shared hash tables, we have a local hash header (HTAB struct) that
     * we allocate in TopMemoryContext; all else is in shared memory.
     *
     * For non-shared hash tables, everything including the hash header is in
     * a memory context created specially for the hash table --- this makes
     * hash_destroy very simple.  The memory context is made a child of either
     * a context specified by the caller, or TopMemoryContext if nothing is
     * specified.
     */
    if (flags & HASH_SHARED_MEM)
    {
        /* Set up to allocate the hash header */
        CurrentDynaHashCxt = TopMemoryContext;
    }
    else
    {
        /* Create the hash table's private memory context */
        if (flags & HASH_CONTEXT)
            CurrentDynaHashCxt = info->hcxt;
        else
            CurrentDynaHashCxt = TopMemoryContext;
        CurrentDynaHashCxt = AllocSetContextCreate(CurrentDynaHashCxt,
                                                   tabname,
                                                   ALLOCSET_DEFAULT_MINSIZE,
                                                   ALLOCSET_DEFAULT_INITSIZE,
                                                   ALLOCSET_DEFAULT_MAXSIZE);
    }

    /* Initialize the hash header, plus a copy of the table name */
    hashp = (HTAB *) DynaHashAlloc(sizeof(HTAB) + strlen(tabname) +1);
    MemSet(hashp, 0, sizeof(HTAB));

    hashp->tabname = (char *) (hashp + 1);
    strcpy(hashp->tabname, tabname);

    if (flags & HASH_FUNCTION)
        hashp->hash = info->hash;
    else
        hashp->hash = string_hash;      /* default hash function */

    /*
     * If you don't specify a match function, it defaults to string_compare if
     * you used string_hash (either explicitly or by default) and to memcmp
     * otherwise.  (Prior to PostgreSQL 7.4, memcmp was always used.)
     */
    if (flags & HASH_COMPARE)
        hashp->match = info->match;
    else if (hashp->hash == string_hash)
        hashp->match = (HashCompareFunc) string_compare;
    else
        hashp->match = memcmp;

    /*
     * Similarly, the key-copying function defaults to strlcpy or memcpy.
     */
    if (flags & HASH_KEYCOPY)
        hashp->keycopy = info->keycopy;
    else if (hashp->hash == string_hash)
        hashp->keycopy = (HashCopyFunc) strlcpy;
    else
        hashp->keycopy = memcpy;

    if (flags & HASH_ALLOC)
        hashp->alloc = info->alloc;
    else
        hashp->alloc = DynaHashAlloc;

    if (flags & HASH_SHARED_MEM)
    {
        /*
         * ctl structure and directory are preallocated for shared memory
         * tables.  Note that HASH_DIRSIZE and HASH_ALLOC had better be set as
         * well.
         */
        hashp->hctl = info->hctl;
        hashp->dir = (HASHSEGMENT *) (((char *) info->hctl) + sizeof(HASHHDR));
        hashp->hcxt = NULL;
        hashp->isshared = true;

        /* hash table already exists, we're just attaching to it */
        if (flags & HASH_ATTACH)
        {
            /* make local copies of some heavily-used values */
            hctl = hashp->hctl;
            hashp->keysize = hctl->keysize;
            hashp->ssize = hctl->ssize;
            hashp->sshift = hctl->sshift;

            return hashp;
        }
    }
    else
    {
        /* setup hash table defaults */
        hashp->hctl = NULL;
        hashp->dir = NULL;
        hashp->hcxt = CurrentDynaHashCxt;
        hashp->isshared = false;
    }

    if (!hashp->hctl)
    {
        hashp->hctl = (HASHHDR *) hashp->alloc(sizeof(HASHHDR));
        if (!hashp->hctl)
            ereport(ERROR,
                    (errcode(ERRCODE_OUT_OF_MEMORY),
                     errmsg("out of memory")));
    }

    hashp->frozen = false;

    hdefault(hashp);

    hctl = hashp->hctl;

    if (flags & HASH_PARTITION)
    {
        /* Doesn't make sense to partition a local hash table */
        Assert(flags & HASH_SHARED_MEM);

        /*
         * The number of partitions had better be a power of 2. Also, it must
         * be less than INT_MAX (see init_htab()), so call the int version of
         * next_pow2.
         */
        Assert(info->num_partitions == next_pow2_int(info->num_partitions));

        hctl->num_partitions = info->num_partitions;
    }

    if (flags & HASH_SEGMENT)
    {
        hctl->ssize = info->ssize;
        hctl->sshift = my_log2(info->ssize);
        /* ssize had better be a power of 2 */
        Assert(hctl->ssize == (1L << hctl->sshift));
    }
    if (flags & HASH_FFACTOR)
        hctl->ffactor = info->ffactor;

    /*
     * SHM hash tables have fixed directory size passed by the caller.
     */
    if (flags & HASH_DIRSIZE)
    {
        hctl->max_dsize = info->max_dsize;
        hctl->dsize = info->dsize;
    }

    /*
     * hash table now allocates space for key and data but you have to say how
     * much space to allocate
     */
    if (flags & HASH_ELEM)
    {
        Assert(info->entrysize >= info->keysize);
        hctl->keysize = info->keysize;
        hctl->entrysize = info->entrysize;
    }

    /* make local copies of heavily-used constant fields */
    hashp->keysize = hctl->keysize;
    hashp->ssize = hctl->ssize;
    hashp->sshift = hctl->sshift;

    /* Build the hash directory structure */
    if (!init_htab(hashp, nelem))
        elog(ERROR, "failed to initialize hash table \"%s\"", hashp->tabname);

    /*
     * For a shared hash table, preallocate the requested number of elements.
     * This reduces problems with run-time out-of-shared-memory conditions.
     *
     * For a non-shared hash table, preallocate the requested number of
     * elements if it's less than our chosen nelem_alloc.  This avoids wasting
     * space if the caller correctly estimates a small table size.
     */
    if ((flags & HASH_SHARED_MEM) ||
        nelem < hctl->nelem_alloc)
    {
        if (!element_alloc(hashp, (int) nelem))
            ereport(ERROR,
                    (errcode(ERRCODE_OUT_OF_MEMORY),
                     errmsg("out of memory")));
    }

    if (flags & HASH_FIXED_SIZE)
        hashp->isfixed = true;
    return hashp;
}

void hash_destroy ( HTAB hashp  ) 

Definition at line 714 of file dynahash.c.

References HTAB::alloc, Assert, DynaHashAlloc(), hash_stats(), HTAB::hcxt, MemoryContextDelete(), and NULL.

Referenced by clear_external_function_hash(), CompactCheckpointerRequestQueue(), InitLocks(), pgstat_recv_dropdb(), pgstat_recv_resetcounter(), pgstat_vacuum_stat(), populate_recordset_object_end(), PostPrepare_PredicateLocks(), ReleasePredicateLocks(), ResetUnloggedRelationsInDbspaceDir(), SetForwardFsyncRequests(), tbm_free(), and XLogCheckInvalidPages().

{
    if (hashp != NULL)
    {
        /* allocation method must be one we know how to free, too */
        Assert(hashp->alloc == DynaHashAlloc);
        /* so this hashtable must have it's own context */
        Assert(hashp->hcxt != NULL);

        hash_stats("destroy", hashp);

        /*
         * Free everything by destroying the hash table's memory context.
         */
        MemoryContextDelete(hashp->hcxt);
    }
}

Size hash_estimate_size ( long  num_entries,
Size  entrysize 
)

Definition at line 632 of file dynahash.c.

References add_size(), choose_nelem_alloc(), DEF_FFACTOR, DEF_SEGSIZE, MAXALIGN, mul_size(), and next_pow2_long().

Referenced by BufTableShmemSize(), CreateSharedMemoryAndSemaphores(), LockShmemSize(), pgss_memsize(), and PredicateLockShmemSize().

{
    Size        size;
    long        nBuckets,
                nSegments,
                nDirEntries,
                nElementAllocs,
                elementSize,
                elementAllocCnt;

    /* estimate number of buckets wanted */
    nBuckets = next_pow2_long((num_entries - 1) / DEF_FFACTOR + 1);
    /* # of segments needed for nBuckets */
    nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1);
    /* directory entries */
    nDirEntries = DEF_DIRSIZE;
    while (nDirEntries < nSegments)
        nDirEntries <<= 1;      /* dir_alloc doubles dsize at each call */

    /* fixed control info */
    size = MAXALIGN(sizeof(HASHHDR));   /* but not HTAB, per above */
    /* directory */
    size = add_size(size, mul_size(nDirEntries, sizeof(HASHSEGMENT)));
    /* segments */
    size = add_size(size, mul_size(nSegments,
                                MAXALIGN(DEF_SEGSIZE * sizeof(HASHBUCKET))));
    /* elements --- allocated in groups of choose_nelem_alloc() entries */
    elementAllocCnt = choose_nelem_alloc(entrysize);
    nElementAllocs = (num_entries - 1) / elementAllocCnt + 1;
    elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(entrysize);
    size = add_size(size,
                    mul_size(nElementAllocs,
                             mul_size(elementAllocCnt, elementSize)));

    return size;
}

void hash_freeze ( HTAB hashp  ) 

Definition at line 1333 of file dynahash.c.

References elog, ERROR, HTAB::frozen, has_seq_scans(), HTAB::isshared, and HTAB::tabname.

{
    if (hashp->isshared)
        elog(ERROR, "cannot freeze shared hashtable \"%s\"", hashp->tabname);
    if (!hashp->frozen && has_seq_scans(hashp))
        elog(ERROR, "cannot freeze hashtable \"%s\" because it has active scans",
             hashp->tabname);
    hashp->frozen = true;
}

long hash_get_num_entries ( HTAB hashp  ) 

Definition at line 1191 of file dynahash.c.

References HTAB::hctl, and HASHHDR::nentries.

Referenced by compute_array_stats(), compute_tsvector_stats(), entry_alloc(), entry_dealloc(), get_crosstab_tuplestore(), GetLockStatusData(), GetPredicateLockStatusData(), GetRunningTransactionLocks(), json_populate_record(), pgss_shmem_shutdown(), pgstat_vacuum_stat(), ResetUnloggedRelationsInDbspaceDir(), transformGraph(), and XLogHaveInvalidPages().

{
    /*
     * We currently don't bother with the mutex; it's only sensible to call
     * this function if you've got lock on all partitions of the table.
     */
    return hashp->hctl->nentries;
}

Size hash_get_shared_size ( HASHCTL info,
int  flags 
)

Definition at line 703 of file dynahash.c.

References Assert, HASHCTL::dsize, HASH_DIRSIZE, and HASHCTL::max_dsize.

Referenced by ShmemInitHash().

{
    Assert(flags & HASH_DIRSIZE);
    Assert(info->dsize == info->max_dsize);
    return sizeof(HASHHDR) + info->dsize * sizeof(HASHSEGMENT);
}

void* hash_search ( HTAB hashp,
const void *  keyPtr,
HASHACTION  action,
bool foundPtr 
)

Definition at line 806 of file dynahash.c.

References HTAB::hash, hash_search_with_hash_value(), and HTAB::keysize.

Referenced by assign_record_type_typmod(), AtEOSubXact_RelationCache(), AtEOXact_RelationCache(), build_join_rel(), build_join_rel_hash(), BuildEventTriggerCache(), CheckAndPromotePredicateLockRequest(), CheckForSerializableConflictOut(), CompactCheckpointerRequestQueue(), compile_plperl_function(), compile_pltcl_function(), compute_array_stats(), compute_tsvector_stats(), createNewConnection(), deleteConnection(), do_autovacuum(), DropAllPredicateLocksFromTable(), DropAllPreparedStatements(), DropPreparedStatement(), DropRelFileNodeAllLocalBuffers(), DropRelFileNodeLocalBuffers(), entry_alloc(), entry_dealloc(), entry_reset(), EventCacheLookup(), FetchPreparedStatement(), find_funcstat_entry(), find_join_rel(), find_oper_cache_entry(), find_rendezvous_variable(), FindTupleHashEntry(), forget_invalid_pages(), forget_invalid_pages_db(), get_attribute_options(), get_btree_test_op(), get_pgstat_tabentry_relid(), get_tablespace(), GetComboCommandId(), GetConnection(), getConnectionByName(), getState(), gistGetNodeBuffer(), gistGetParent(), gistMemorizeParent(), gistRelocateBuildBuffersOnSplit(), hash_object_field_end(), InitPredicateLocks(), InvalidateAttoptCacheCallback(), InvalidateOprCacheCallBack(), InvalidateTableSpaceCacheCallback(), json_populate_record(), LocalBufferAlloc(), LocalPrefetchBuffer(), LockAcquireExtended(), LockHasWaiters(), LockRelease(), log_invalid_page(), lookup_C_func(), lookup_collation_cache(), lookup_ts_config_cache(), lookup_ts_dictionary_cache(), lookup_ts_parser_cache(), lookup_type_cache(), LookupOpclassInfo(), LookupTupleHashEntry(), make_oper_cache_entry(), mdsync(), pg_tzset(), pgss_store(), pgstat_collect_oids(), pgstat_fetch_stat_dbentry(), pgstat_fetch_stat_funcentry(), pgstat_fetch_stat_tabentry(), pgstat_get_db_entry(), pgstat_get_tab_entry(), pgstat_init_function_usage(), pgstat_read_db_statsfile(), pgstat_read_statsfiles(), pgstat_recv_dropdb(), pgstat_recv_funcpurge(), pgstat_recv_funcstat(), pgstat_recv_resetsinglecounter(), pgstat_recv_tabpurge(), pgstat_recv_tabstat(), pgstat_vacuum_stat(), plperl_spi_exec_prepared(), plperl_spi_freeplan(), plperl_spi_prepare(), plperl_spi_query_prepared(), plpgsql_HashTableDelete(), plpgsql_HashTableInsert(), plpgsql_HashTableLookup(), pltcl_fetch_interp(), PLy_generate_spi_exceptions(), PLy_procedure_get(), PLy_spi_subtransaction_abort(), populate_recordset_object_end(), populate_recordset_object_field_end(), predicatelock_twophase_recover(), PredicateLockExists(), PredicateLockTwoPhaseFinish(), prune_element_hashtable(), prune_lexemes_hashtable(), rebuild_database_list(), record_C_func(), RegisterPredicateLockingXid(), ReleaseOneSerializableXact(), RememberFsyncRequest(), RemoveLocalLock(), ResetUnloggedRelationsInDbspaceDir(), rewrite_heap_dead_tuple(), rewrite_heap_tuple(), ri_FetchPreparedPlan(), ri_HashCompareOp(), ri_HashPreparedPlan(), ri_LoadConstraintInfo(), select_perl_context(), ShmemInitStruct(), smgrclose(), smgrclosenode(), smgropen(), StorePreparedStatement(), table_recheck_autovac(), tbm_create_pagetable(), tbm_find_pageentry(), tbm_get_pageentry(), tbm_intersect(), tbm_mark_page_lossy(), and tbm_page_is_lossy().

{
    return hash_search_with_hash_value(hashp,
                                       keyPtr,
                                       hashp->hash(keyPtr, hashp->keysize),
                                       action,
                                       foundPtr);
}

void* hash_search_with_hash_value ( HTAB hashp,
const void *  keyPtr,
uint32  hashvalue,
HASHACTION  action,
bool foundPtr 
)

Definition at line 819 of file dynahash.c.

References HTAB::alloc, Assert, calc_bucket(), HTAB::dir, DynaHashAlloc(), ELEMENTKEY, elog, ereport, errcode(), errmsg(), ERROR, expand_table(), HASHHDR::ffactor, HASHHDR::freeList, HTAB::frozen, get_hash_entry(), has_seq_scans(), hash_corrupted(), HASH_ENTER, HASH_ENTER_NULL, HASH_FIND, HASH_REMOVE, HASHELEMENT::hashvalue, HTAB::hctl, IS_PARTITIONED, HTAB::isshared, HTAB::keycopy, HTAB::keysize, HASHELEMENT::link, HTAB::match, HASHHDR::max_bucket, MOD, HASHHDR::mutex, HASHHDR::nentries, NULL, SpinLockAcquire, SpinLockRelease, HTAB::sshift, HTAB::ssize, and HTAB::tabname.

Referenced by BufTableDelete(), BufTableInsert(), BufTableLookup(), CheckTargetForConflictsIn(), CleanUpLock(), ClearOldPredicateLocks(), CreatePredicateLock(), DecrementParentLocks(), DeleteChildTargetLocks(), DeleteLockTarget(), DropAllPredicateLocksFromTable(), FastPathGetRelationLockEntry(), GetLockConflicts(), hash_search(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), LockRelease(), PageIsPredicateLocked(), PredicateLockAcquire(), ReleaseOneSerializableXact(), RemoveScratchTarget(), RemoveTargetIfNoLongerUsed(), RestoreScratchTarget(), SetupLockInTable(), and TransferPredicateLocksToNewTarget().

{
    HASHHDR    *hctl = hashp->hctl;
    Size        keysize;
    uint32      bucket;
    long        segment_num;
    long        segment_ndx;
    HASHSEGMENT segp;
    HASHBUCKET  currBucket;
    HASHBUCKET *prevBucketPtr;
    HashCompareFunc match;

#if HASH_STATISTICS
    hash_accesses++;
    hctl->accesses++;
#endif

    /*
     * If inserting, check if it is time to split a bucket.
     *
     * NOTE: failure to expand table is not a fatal error, it just means we
     * have to run at higher fill factor than we wanted.  However, if we're
     * using the palloc allocator then it will throw error anyway on
     * out-of-memory, so we must do this before modifying the table.
     */
    if (action == HASH_ENTER || action == HASH_ENTER_NULL)
    {
        /*
         * Can't split if running in partitioned mode, nor if frozen, nor if
         * table is the subject of any active hash_seq_search scans.  Strange
         * order of these tests is to try to check cheaper conditions first.
         */
        if (!IS_PARTITIONED(hctl) && !hashp->frozen &&
            hctl->nentries / (long) (hctl->max_bucket + 1) >= hctl->ffactor &&
            !has_seq_scans(hashp))
            (void) expand_table(hashp);
    }

    /*
     * Do the initial lookup
     */
    bucket = calc_bucket(hctl, hashvalue);

    segment_num = bucket >> hashp->sshift;
    segment_ndx = MOD(bucket, hashp->ssize);

    segp = hashp->dir[segment_num];

    if (segp == NULL)
        hash_corrupted(hashp);

    prevBucketPtr = &segp[segment_ndx];
    currBucket = *prevBucketPtr;

    /*
     * Follow collision chain looking for matching key
     */
    match = hashp->match;       /* save one fetch in inner loop */
    keysize = hashp->keysize;   /* ditto */

    while (currBucket != NULL)
    {
        if (currBucket->hashvalue == hashvalue &&
            match(ELEMENTKEY(currBucket), keyPtr, keysize) == 0)
            break;
        prevBucketPtr = &(currBucket->link);
        currBucket = *prevBucketPtr;
#if HASH_STATISTICS
        hash_collisions++;
        hctl->collisions++;
#endif
    }

    if (foundPtr)
        *foundPtr = (bool) (currBucket != NULL);

    /*
     * OK, now what?
     */
    switch (action)
    {
        case HASH_FIND:
            if (currBucket != NULL)
                return (void *) ELEMENTKEY(currBucket);
            return NULL;

        case HASH_REMOVE:
            if (currBucket != NULL)
            {
                /* use volatile pointer to prevent code rearrangement */
                volatile HASHHDR *hctlv = hctl;

                /* if partitioned, must lock to touch nentries and freeList */
                if (IS_PARTITIONED(hctlv))
                    SpinLockAcquire(&hctlv->mutex);

                Assert(hctlv->nentries > 0);
                hctlv->nentries--;

                /* remove record from hash bucket's chain. */
                *prevBucketPtr = currBucket->link;

                /* add the record to the freelist for this table.  */
                currBucket->link = hctlv->freeList;
                hctlv->freeList = currBucket;

                if (IS_PARTITIONED(hctlv))
                    SpinLockRelease(&hctlv->mutex);

                /*
                 * better hope the caller is synchronizing access to this
                 * element, because someone else is going to reuse it the next
                 * time something is added to the table
                 */
                return (void *) ELEMENTKEY(currBucket);
            }
            return NULL;

        case HASH_ENTER_NULL:
            /* ENTER_NULL does not work with palloc-based allocator */
            Assert(hashp->alloc != DynaHashAlloc);
            /* FALL THRU */

        case HASH_ENTER:
            /* Return existing element if found, else create one */
            if (currBucket != NULL)
                return (void *) ELEMENTKEY(currBucket);

            /* disallow inserts if frozen */
            if (hashp->frozen)
                elog(ERROR, "cannot insert into frozen hashtable \"%s\"",
                     hashp->tabname);

            currBucket = get_hash_entry(hashp);
            if (currBucket == NULL)
            {
                /* out of memory */
                if (action == HASH_ENTER_NULL)
                    return NULL;
                /* report a generic message */
                if (hashp->isshared)
                    ereport(ERROR,
                            (errcode(ERRCODE_OUT_OF_MEMORY),
                             errmsg("out of shared memory")));
                else
                    ereport(ERROR,
                            (errcode(ERRCODE_OUT_OF_MEMORY),
                             errmsg("out of memory")));
            }

            /* link into hashbucket chain */
            *prevBucketPtr = currBucket;
            currBucket->link = NULL;

            /* copy key into record */
            currBucket->hashvalue = hashvalue;
            hashp->keycopy(ELEMENTKEY(currBucket), keyPtr, keysize);

            /*
             * Caller is expected to fill the data field on return.  DO NOT
             * insert any code that could possibly throw error here, as doing
             * so would leave the table entry incomplete and hence corrupt the
             * caller's data structure.
             */

            return (void *) ELEMENTKEY(currBucket);
    }

    elog(ERROR, "unrecognized hash action code: %d", (int) action);

    return NULL;                /* keep compiler quiet */
}

long hash_select_dirsize ( long  num_entries  ) 

Definition at line 679 of file dynahash.c.

References DEF_FFACTOR, DEF_SEGSIZE, and next_pow2_long().

Referenced by ShmemInitHash().

{
    long        nBuckets,
                nSegments,
                nDirEntries;

    /* estimate number of buckets wanted */
    nBuckets = next_pow2_long((num_entries - 1) / DEF_FFACTOR + 1);
    /* # of segments needed for nBuckets */
    nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1);
    /* directory entries */
    nDirEntries = DEF_DIRSIZE;
    while (nDirEntries < nSegments)
        nDirEntries <<= 1;      /* dir_alloc doubles dsize at each call */

    return nDirEntries;
}

void hash_seq_init ( HASH_SEQ_STATUS status,
HTAB hashp 
)

Definition at line 1227 of file dynahash.c.

References HASH_SEQ_STATUS::curBucket, HASH_SEQ_STATUS::curEntry, HTAB::frozen, HASH_SEQ_STATUS::hashp, and register_seq_scan().

Referenced by AtAbort_Portals(), AtCleanup_Portals(), AtEOSubXact_RelationCache(), AtEOXact_RelationCache(), AtPrepare_Locks(), AtSubAbort_Portals(), AtSubCleanup_Portals(), AtSubCommit_Portals(), CheckTableForSerializableConflictIn(), compute_array_stats(), compute_tsvector_stats(), dblink_get_connections(), DropAllPredicateLocksFromTable(), DropAllPreparedStatements(), end_heap_rewrite(), entry_dealloc(), entry_reset(), forget_invalid_pages(), forget_invalid_pages_db(), GetLockStatusData(), GetPredicateLockStatusData(), GetRunningTransactionLocks(), InvalidateAttoptCacheCallback(), InvalidateConstraintCacheCallBack(), InvalidateOprCacheCallBack(), InvalidateOprProofCacheCallBack(), InvalidateTableSpaceCacheCallback(), InvalidateTSCacheCallBack(), LockReassignCurrentOwner(), LockReleaseAll(), LockReleaseCurrentOwner(), LockReleaseSession(), mdsync(), packGraph(), pg_cursor(), pg_prepared_statement(), pg_stat_statements(), pgfdw_subxact_callback(), pgfdw_xact_callback(), pgss_shmem_shutdown(), pgstat_send_funcstats(), pgstat_vacuum_stat(), pgstat_write_db_statsfile(), pgstat_write_statsfiles(), plperl_fini(), PortalHashTableDeleteAll(), PostPrepare_Locks(), PreCommit_Portals(), prune_element_hashtable(), prune_lexemes_hashtable(), rebuild_database_list(), RelationCacheInitializePhase3(), RelationCacheInvalidate(), RememberFsyncRequest(), selectColorTrigrams(), smgrcloseall(), tbm_begin_iterate(), tbm_intersect(), tbm_lossify(), tbm_union(), ThereAreNoReadyPortals(), TypeCacheRelCallback(), write_relcache_init_file(), and XLogCheckInvalidPages().

{
    status->hashp = hashp;
    status->curBucket = 0;
    status->curEntry = NULL;
    if (!hashp->frozen)
        register_seq_scan(hashp);
}

void* hash_seq_search ( HASH_SEQ_STATUS status  ) 

Definition at line 1237 of file dynahash.c.

References HASH_SEQ_STATUS::curBucket, HASH_SEQ_STATUS::curEntry, HTAB::dir, ELEMENTKEY, hash_seq_term(), HASH_SEQ_STATUS::hashp, HTAB::hctl, HASHELEMENT::link, HASHHDR::max_bucket, MOD, NULL, HTAB::sshift, and HTAB::ssize.

Referenced by AtAbort_Portals(), AtCleanup_Portals(), AtEOSubXact_RelationCache(), AtEOXact_RelationCache(), AtPrepare_Locks(), AtSubAbort_Portals(), AtSubCleanup_Portals(), AtSubCommit_Portals(), CheckTableForSerializableConflictIn(), compute_array_stats(), compute_tsvector_stats(), dblink_get_connections(), DropAllPredicateLocksFromTable(), DropAllPreparedStatements(), end_heap_rewrite(), entry_dealloc(), entry_reset(), forget_invalid_pages(), forget_invalid_pages_db(), GetLockStatusData(), GetPredicateLockStatusData(), GetRunningTransactionLocks(), InvalidateAttoptCacheCallback(), InvalidateConstraintCacheCallBack(), InvalidateOprCacheCallBack(), InvalidateOprProofCacheCallBack(), InvalidateTableSpaceCacheCallback(), InvalidateTSCacheCallBack(), LockReassignCurrentOwner(), LockReleaseAll(), LockReleaseCurrentOwner(), LockReleaseSession(), mdsync(), packGraph(), pg_cursor(), pg_prepared_statement(), pg_stat_statements(), pgfdw_subxact_callback(), pgfdw_xact_callback(), pgss_shmem_shutdown(), pgstat_send_funcstats(), pgstat_vacuum_stat(), pgstat_write_db_statsfile(), pgstat_write_statsfiles(), plperl_fini(), PortalHashTableDeleteAll(), PostPrepare_Locks(), PreCommit_Portals(), prune_element_hashtable(), prune_lexemes_hashtable(), rebuild_database_list(), RelationCacheInitializePhase3(), RelationCacheInvalidate(), RememberFsyncRequest(), selectColorTrigrams(), smgrcloseall(), tbm_begin_iterate(), tbm_intersect(), tbm_lossify(), tbm_union(), ThereAreNoReadyPortals(), TypeCacheRelCallback(), write_relcache_init_file(), and XLogCheckInvalidPages().

{
    HTAB       *hashp;
    HASHHDR    *hctl;
    uint32      max_bucket;
    long        ssize;
    long        segment_num;
    long        segment_ndx;
    HASHSEGMENT segp;
    uint32      curBucket;
    HASHELEMENT *curElem;

    if ((curElem = status->curEntry) != NULL)
    {
        /* Continuing scan of curBucket... */
        status->curEntry = curElem->link;
        if (status->curEntry == NULL)   /* end of this bucket */
            ++status->curBucket;
        return (void *) ELEMENTKEY(curElem);
    }

    /*
     * Search for next nonempty bucket starting at curBucket.
     */
    curBucket = status->curBucket;
    hashp = status->hashp;
    hctl = hashp->hctl;
    ssize = hashp->ssize;
    max_bucket = hctl->max_bucket;

    if (curBucket > max_bucket)
    {
        hash_seq_term(status);
        return NULL;            /* search is done */
    }

    /*
     * first find the right segment in the table directory.
     */
    segment_num = curBucket >> hashp->sshift;
    segment_ndx = MOD(curBucket, ssize);

    segp = hashp->dir[segment_num];

    /*
     * Pick up the first item in this bucket's chain.  If chain is not empty
     * we can begin searching it.  Otherwise we have to advance to find the
     * next nonempty bucket.  We try to optimize that case since searching a
     * near-empty hashtable has to iterate this loop a lot.
     */
    while ((curElem = segp[segment_ndx]) == NULL)
    {
        /* empty bucket, advance to next */
        if (++curBucket > max_bucket)
        {
            status->curBucket = curBucket;
            hash_seq_term(status);
            return NULL;        /* search is done */
        }
        if (++segment_ndx >= ssize)
        {
            segment_num++;
            segment_ndx = 0;
            segp = hashp->dir[segment_num];
        }
    }

    /* Begin scan of curBucket... */
    status->curEntry = curElem->link;
    if (status->curEntry == NULL)       /* end of this bucket */
        ++curBucket;
    status->curBucket = curBucket;
    return (void *) ELEMENTKEY(curElem);
}

void hash_seq_term ( HASH_SEQ_STATUS status  ) 
void hash_stats ( const char *  where,
HTAB hashp 
)

Definition at line 733 of file dynahash.c.

References HTAB::hctl, HASHHDR::keysize, HASHHDR::max_bucket, HASHHDR::nentries, and HASHHDR::nsegs.

Referenced by hash_destroy().

{
#if HASH_STATISTICS
    fprintf(stderr, "%s: this HTAB -- accesses %ld collisions %ld\n",
            where, hashp->hctl->accesses, hashp->hctl->collisions);

    fprintf(stderr, "hash_stats: entries %ld keysize %ld maxp %u segmentcount %ld\n",
            hashp->hctl->nentries, (long) hashp->hctl->keysize,
            hashp->hctl->max_bucket, hashp->hctl->nsegs);
    fprintf(stderr, "%s: total accesses %ld total collisions %ld\n",
            where, hash_accesses, hash_collisions);
    fprintf(stderr, "hash_stats: total expansions %ld\n",
            hash_expansions);
#endif
}

bool hash_update_hash_key ( HTAB hashp,
void *  existingEntry,
const void *  newKeyPtr 
)

Definition at line 1016 of file dynahash.c.

References calc_bucket(), HTAB::dir, ELEMENT_FROM_KEY, ELEMENTKEY, elog, ERROR, HTAB::frozen, HTAB::hash, hash_corrupted(), HASHELEMENT::hashvalue, HTAB::hctl, HTAB::keycopy, HTAB::keysize, HASHELEMENT::link, HTAB::match, MOD, NULL, HTAB::sshift, HTAB::ssize, and HTAB::tabname.

Referenced by PostPrepare_Locks().

{
    HASHELEMENT *existingElement = ELEMENT_FROM_KEY(existingEntry);
    HASHHDR    *hctl = hashp->hctl;
    uint32      newhashvalue;
    Size        keysize;
    uint32      bucket;
    uint32      newbucket;
    long        segment_num;
    long        segment_ndx;
    HASHSEGMENT segp;
    HASHBUCKET  currBucket;
    HASHBUCKET *prevBucketPtr;
    HASHBUCKET *oldPrevPtr;
    HashCompareFunc match;

#if HASH_STATISTICS
    hash_accesses++;
    hctl->accesses++;
#endif

    /* disallow updates if frozen */
    if (hashp->frozen)
        elog(ERROR, "cannot update in frozen hashtable \"%s\"",
             hashp->tabname);

    /*
     * Lookup the existing element using its saved hash value.  We need to
     * do this to be able to unlink it from its hash chain, but as a side
     * benefit we can verify the validity of the passed existingEntry pointer.
     */
    bucket = calc_bucket(hctl, existingElement->hashvalue);

    segment_num = bucket >> hashp->sshift;
    segment_ndx = MOD(bucket, hashp->ssize);

    segp = hashp->dir[segment_num];

    if (segp == NULL)
        hash_corrupted(hashp);

    prevBucketPtr = &segp[segment_ndx];
    currBucket = *prevBucketPtr;

    while (currBucket != NULL)
    {
        if (currBucket == existingElement)
            break;
        prevBucketPtr = &(currBucket->link);
        currBucket = *prevBucketPtr;
    }

    if (currBucket == NULL)
        elog(ERROR, "hash_update_hash_key argument is not in hashtable \"%s\"",
             hashp->tabname);

    oldPrevPtr = prevBucketPtr;

    /*
     * Now perform the equivalent of a HASH_ENTER operation to locate the
     * hash chain we want to put the entry into.
     */
    newhashvalue = hashp->hash(newKeyPtr, hashp->keysize);

    newbucket = calc_bucket(hctl, newhashvalue);

    segment_num = newbucket >> hashp->sshift;
    segment_ndx = MOD(newbucket, hashp->ssize);

    segp = hashp->dir[segment_num];

    if (segp == NULL)
        hash_corrupted(hashp);

    prevBucketPtr = &segp[segment_ndx];
    currBucket = *prevBucketPtr;

    /*
     * Follow collision chain looking for matching key
     */
    match = hashp->match;       /* save one fetch in inner loop */
    keysize = hashp->keysize;   /* ditto */

    while (currBucket != NULL)
    {
        if (currBucket->hashvalue == newhashvalue &&
            match(ELEMENTKEY(currBucket), newKeyPtr, keysize) == 0)
            break;
        prevBucketPtr = &(currBucket->link);
        currBucket = *prevBucketPtr;
#if HASH_STATISTICS
        hash_collisions++;
        hctl->collisions++;
#endif
    }

    if (currBucket != NULL)
        return false;           /* collision with an existing entry */

    currBucket = existingElement;

    /*
     * If old and new hash values belong to the same bucket, we need not
     * change any chain links, and indeed should not since this simplistic
     * update will corrupt the list if currBucket is the last element.  (We
     * cannot fall out earlier, however, since we need to scan the bucket to
     * check for duplicate keys.)
     */
    if (bucket != newbucket)
    {
        /* OK to remove record from old hash bucket's chain. */
        *oldPrevPtr = currBucket->link;

        /* link into new hashbucket chain */
        *prevBucketPtr = currBucket;
        currBucket->link = NULL;
    }

    /* copy new key into record */
    currBucket->hashvalue = newhashvalue;
    hashp->keycopy(ELEMENTKEY(currBucket), newKeyPtr, keysize);

    /* rest of record is untouched */

    return true;
}

static void hdefault ( HTAB hashp  )  [static]

Definition at line 468 of file dynahash.c.

References HASHHDR::dsize, HASHHDR::entrysize, HASHHDR::ffactor, HASHHDR::freeList, HTAB::hctl, HASHHDR::keysize, HASHHDR::max_dsize, MemSet, HASHHDR::nentries, HASHHDR::nsegs, HASHHDR::num_partitions, HASHHDR::sshift, and HASHHDR::ssize.

Referenced by hash_create().

{
    HASHHDR    *hctl = hashp->hctl;

    MemSet(hctl, 0, sizeof(HASHHDR));

    hctl->nentries = 0;
    hctl->freeList = NULL;

    hctl->dsize = DEF_DIRSIZE;
    hctl->nsegs = 0;

    /* rather pointless defaults for key & entry size */
    hctl->keysize = sizeof(char *);
    hctl->entrysize = 2 * sizeof(char *);

    hctl->num_partitions = 0;   /* not partitioned */

    hctl->ffactor = DEF_FFACTOR;

    /* table has no fixed maximum size */
    hctl->max_dsize = NO_MAX_DSIZE;

    hctl->ssize = DEF_SEGSIZE;
    hctl->sshift = DEF_SEGSIZE_SHIFT;

#ifdef HASH_STATISTICS
    hctl->accesses = hctl->collisions = 0;
#endif
}

static bool init_htab ( HTAB hashp,
long  nelem 
) [static]

Definition at line 537 of file dynahash.c.

References HTAB::alloc, choose_nelem_alloc(), HTAB::dir, HASHHDR::dsize, HASHHDR::entrysize, HASHHDR::ffactor, HTAB::hctl, HTAB::hcxt, HASHHDR::high_mask, IS_PARTITIONED, HASHHDR::low_mask, HASHHDR::max_bucket, HASHHDR::mutex, HASHHDR::nelem_alloc, HASHHDR::nentries, next_pow2_int(), HASHHDR::nsegs, NULL, seg_alloc(), SpinLockInit, HASHHDR::sshift, and HASHHDR::ssize.

Referenced by hash_create().

{
    HASHHDR    *hctl = hashp->hctl;
    HASHSEGMENT *segp;
    int         nbuckets;
    int         nsegs;

    /*
     * initialize mutex if it's a partitioned table
     */
    if (IS_PARTITIONED(hctl))
        SpinLockInit(&hctl->mutex);

    /*
     * Divide number of elements by the fill factor to determine a desired
     * number of buckets.  Allocate space for the next greater power of two
     * number of buckets
     */
    nbuckets = next_pow2_int((nelem - 1) / hctl->ffactor + 1);

    /*
     * In a partitioned table, nbuckets must be at least equal to
     * num_partitions; were it less, keys with apparently different partition
     * numbers would map to the same bucket, breaking partition independence.
     * (Normally nbuckets will be much bigger; this is just a safety check.)
     */
    while (nbuckets < hctl->num_partitions)
        nbuckets <<= 1;

    hctl->max_bucket = hctl->low_mask = nbuckets - 1;
    hctl->high_mask = (nbuckets << 1) - 1;

    /*
     * Figure number of directory segments needed, round up to a power of 2
     */
    nsegs = (nbuckets - 1) / hctl->ssize + 1;
    nsegs = next_pow2_int(nsegs);

    /*
     * Make sure directory is big enough. If pre-allocated directory is too
     * small, choke (caller screwed up).
     */
    if (nsegs > hctl->dsize)
    {
        if (!(hashp->dir))
            hctl->dsize = nsegs;
        else
            return false;
    }

    /* Allocate a directory */
    if (!(hashp->dir))
    {
        CurrentDynaHashCxt = hashp->hcxt;
        hashp->dir = (HASHSEGMENT *)
            hashp->alloc(hctl->dsize * sizeof(HASHSEGMENT));
        if (!hashp->dir)
            return false;
    }

    /* Allocate initial segments */
    for (segp = hashp->dir; hctl->nsegs < nsegs; hctl->nsegs++, segp++)
    {
        *segp = seg_alloc(hashp);
        if (*segp == NULL)
            return false;
    }

    /* Choose number of entries to allocate at a time */
    hctl->nelem_alloc = choose_nelem_alloc(hctl->entrysize);

#if HASH_DEBUG
    fprintf(stderr, "init_htab:\n%s%p\n%s%ld\n%s%ld\n%s%d\n%s%ld\n%s%u\n%s%x\n%s%x\n%s%ld\n%s%ld\n",
            "TABLE POINTER   ", hashp,
            "DIRECTORY SIZE  ", hctl->dsize,
            "SEGMENT SIZE    ", hctl->ssize,
            "SEGMENT SHIFT   ", hctl->sshift,
            "FILL FACTOR     ", hctl->ffactor,
            "MAX BUCKET      ", hctl->max_bucket,
            "HIGH MASK       ", hctl->high_mask,
            "LOW  MASK       ", hctl->low_mask,
            "NSEGS           ", hctl->nsegs,
            "NENTRIES        ", hctl->nentries);
#endif
    return true;
}

int my_log2 ( long  num  ) 

Definition at line 1567 of file dynahash.c.

References i.

Referenced by ExecHashTableCreate(), hash_create(), next_pow2_int(), and next_pow2_long().

{
    int         i;
    long        limit;

    /* guard against too-large input, which would put us into infinite loop */
    if (num > LONG_MAX / 2)
        num = LONG_MAX / 2;

    for (i = 0, limit = 1; limit < num; i++, limit <<= 1)
        ;
    return i;
}

static int next_pow2_int ( long  num  )  [static]

Definition at line 1591 of file dynahash.c.

References my_log2().

Referenced by hash_create(), and init_htab().

{
    if (num > INT_MAX / 2)
        num = INT_MAX / 2;
    return 1 << my_log2(num);
}

static long next_pow2_long ( long  num  )  [static]

Definition at line 1583 of file dynahash.c.

References my_log2().

Referenced by hash_estimate_size(), and hash_select_dirsize().

{
    /* my_log2's internal range check is sufficient */
    return 1L << my_log2(num);
}

static void register_seq_scan ( HTAB hashp  )  [static]

Definition at line 1636 of file dynahash.c.

References elog, ERROR, GetCurrentTransactionNestLevel(), MAX_SEQ_SCANS, num_seq_scans, seq_scan_level, and HTAB::tabname.

Referenced by hash_seq_init().

{
    if (num_seq_scans >= MAX_SEQ_SCANS)
        elog(ERROR, "too many active hash_seq_search scans, cannot start one on \"%s\"",
             hashp->tabname);
    seq_scan_tables[num_seq_scans] = hashp;
    seq_scan_level[num_seq_scans] = GetCurrentTransactionNestLevel();
    num_seq_scans++;
}

static HASHSEGMENT seg_alloc ( HTAB hashp  )  [static]

Definition at line 1486 of file dynahash.c.

References HTAB::alloc, HTAB::hcxt, MemSet, and HTAB::ssize.

Referenced by expand_table(), and init_htab().

{
    HASHSEGMENT segp;

    CurrentDynaHashCxt = hashp->hcxt;
    segp = (HASHSEGMENT) hashp->alloc(sizeof(HASHBUCKET) * hashp->ssize);

    if (!segp)
        return NULL;

    MemSet(segp, 0, sizeof(HASHBUCKET) * hashp->ssize);

    return segp;
}

static int string_compare ( const char *  key1,
const char *  key2,
Size  keysize 
) [static]

Definition at line 244 of file dynahash.c.

Referenced by hash_create().

{
    return strncmp(key1, key2, keysize - 1);
}


Variable Documentation

Definition at line 226 of file dynahash.c.

int num_seq_scans = 0 [static]
int seq_scan_level[MAX_SEQ_SCANS] [static]

Definition at line 1630 of file dynahash.c.

Referenced by AtEOSubXact_HashTables(), deregister_seq_scan(), and register_seq_scan().

HTAB* seq_scan_tables[MAX_SEQ_SCANS] [static]

Definition at line 1629 of file dynahash.c.