Go to the source code of this file.
Data Structures | |
struct | HASHELEMENT |
struct | HASHCTL |
struct | HASH_SEQ_STATUS |
Defines | |
#define | HASH_PARTITION 0x001 |
#define | HASH_SEGMENT 0x002 |
#define | HASH_DIRSIZE 0x004 |
#define | HASH_FFACTOR 0x008 |
#define | HASH_FUNCTION 0x010 |
#define | HASH_ELEM 0x020 |
#define | HASH_SHARED_MEM 0x040 |
#define | HASH_ATTACH 0x080 |
#define | HASH_ALLOC 0x100 |
#define | HASH_CONTEXT 0x200 |
#define | HASH_COMPARE 0x400 |
#define | HASH_KEYCOPY 0x800 |
#define | HASH_FIXED_SIZE 0x1000 |
#define | NO_MAX_DSIZE (-1) |
Typedefs | |
typedef uint32(* | HashValueFunc )(const void *key, Size keysize) |
typedef int(* | HashCompareFunc )(const void *key1, const void *key2, Size keysize) |
typedef void *(* | HashCopyFunc )(void *dest, const void *src, Size keysize) |
typedef void *(* | HashAllocFunc )(Size request) |
typedef struct HASHELEMENT | HASHELEMENT |
typedef struct HASHHDR | HASHHDR |
typedef struct HTAB | HTAB |
typedef struct HASHCTL | HASHCTL |
Enumerations | |
enum | HASHACTION { HASH_FIND, HASH_ENTER, HASH_REMOVE, HASH_ENTER_NULL } |
Functions | |
HTAB * | hash_create (const char *tabname, long nelem, HASHCTL *info, int flags) |
void | hash_destroy (HTAB *hashp) |
void | hash_stats (const char *where, HTAB *hashp) |
void * | hash_search (HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr) |
uint32 | get_hash_value (HTAB *hashp, const void *keyPtr) |
void * | hash_search_with_hash_value (HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr) |
bool | hash_update_hash_key (HTAB *hashp, void *existingEntry, const void *newKeyPtr) |
long | hash_get_num_entries (HTAB *hashp) |
void | hash_seq_init (HASH_SEQ_STATUS *status, HTAB *hashp) |
void * | hash_seq_search (HASH_SEQ_STATUS *status) |
void | hash_seq_term (HASH_SEQ_STATUS *status) |
void | hash_freeze (HTAB *hashp) |
Size | hash_estimate_size (long num_entries, Size entrysize) |
long | hash_select_dirsize (long num_entries) |
Size | hash_get_shared_size (HASHCTL *info, int flags) |
void | AtEOXact_HashTables (bool isCommit) |
void | AtEOSubXact_HashTables (bool isCommit, int nestDepth) |
uint32 | string_hash (const void *key, Size keysize) |
uint32 | tag_hash (const void *key, Size keysize) |
uint32 | oid_hash (const void *key, Size keysize) |
uint32 | bitmap_hash (const void *key, Size keysize) |
int | bitmap_match (const void *key1, const void *key2, Size keysize) |
#define HASH_ALLOC 0x100 |
Definition at line 91 of file hsearch.h.
Referenced by hash_create(), and ShmemInitHash().
#define HASH_ATTACH 0x080 |
Definition at line 90 of file hsearch.h.
Referenced by hash_create().
#define HASH_COMPARE 0x400 |
Definition at line 93 of file hsearch.h.
Referenced by build_join_rel_hash(), BuildTupleHashTable(), compute_array_stats(), compute_tsvector_stats(), gistInitBuildBuffers(), hash_create(), and pgss_shmem_startup().
#define HASH_CONTEXT 0x200 |
Definition at line 92 of file hsearch.h.
Referenced by begin_heap_rewrite(), build_join_rel_hash(), BuildEventTriggerCache(), BuildTupleHashTable(), CompactCheckpointerRequestQueue(), compute_array_stats(), compute_tsvector_stats(), get_json_object_as_hash(), GetComboCommandId(), GetConnection(), gistInitBuildBuffers(), gistInitParentMap(), hash_create(), load_categories_hash(), mdinit(), pgstat_collect_oids(), pgstat_read_statsfiles(), populate_recordset_object_start(), rebuild_database_list(), tbm_create_pagetable(), and transformGraph().
#define HASH_DIRSIZE 0x004 |
Definition at line 85 of file hsearch.h.
Referenced by hash_create(), and hash_get_shared_size().
#define HASH_ELEM 0x020 |
Definition at line 88 of file hsearch.h.
Referenced by _PG_init(), assign_record_type_typmod(), begin_heap_rewrite(), build_join_rel_hash(), BuildEventTriggerCache(), BuildTupleHashTable(), CompactCheckpointerRequestQueue(), compute_array_stats(), compute_tsvector_stats(), createConnHash(), do_autovacuum(), EnablePortalManager(), find_oper_cache_entry(), find_rendezvous_variable(), get_btree_test_op(), get_json_object_as_hash(), GetComboCommandId(), GetConnection(), GetSerializableTransactionSnapshotInt(), gistInitBuildBuffers(), gistInitParentMap(), hash_create(), init_procedure_caches(), init_timezone_hashtable(), init_ts_config_cache(), InitBufTable(), InitializeAttoptCache(), InitializeTableSpaceCache(), InitLocalBuffers(), InitLocks(), InitPredicateLocks(), InitQueryHashTable(), load_categories_hash(), log_invalid_page(), lookup_collation_cache(), lookup_ts_dictionary_cache(), lookup_ts_parser_cache(), lookup_type_cache(), LookupOpclassInfo(), mdinit(), pgss_shmem_startup(), pgstat_collect_oids(), pgstat_init_function_usage(), pgstat_read_statsfiles(), plpgsql_HashTableInit(), PLy_add_exceptions(), populate_recordset_object_start(), rebuild_database_list(), record_C_func(), RelationCacheInitialize(), reset_dbentry_counters(), ResetUnloggedRelationsInDbspaceDir(), ri_InitHashTables(), select_perl_context(), smgropen(), tbm_create_pagetable(), and transformGraph().
#define HASH_FFACTOR 0x008 |
Definition at line 86 of file hsearch.h.
Referenced by hash_create().
#define HASH_FIXED_SIZE 0x1000 |
Definition at line 95 of file hsearch.h.
Referenced by hash_create().
#define HASH_FUNCTION 0x010 |
Definition at line 87 of file hsearch.h.
Referenced by _PG_init(), assign_record_type_typmod(), begin_heap_rewrite(), build_join_rel_hash(), BuildEventTriggerCache(), BuildTupleHashTable(), CompactCheckpointerRequestQueue(), compute_array_stats(), compute_tsvector_stats(), do_autovacuum(), find_oper_cache_entry(), get_btree_test_op(), GetComboCommandId(), GetConnection(), GetSerializableTransactionSnapshotInt(), gistInitBuildBuffers(), gistInitParentMap(), hash_create(), init_procedure_caches(), init_ts_config_cache(), InitBufTable(), InitializeAttoptCache(), InitializeTableSpaceCache(), InitLocalBuffers(), InitLocks(), InitPredicateLocks(), log_invalid_page(), lookup_collation_cache(), lookup_ts_dictionary_cache(), lookup_ts_parser_cache(), lookup_type_cache(), LookupOpclassInfo(), mdinit(), pgss_shmem_startup(), pgstat_collect_oids(), pgstat_init_function_usage(), pgstat_read_statsfiles(), plpgsql_HashTableInit(), PLy_add_exceptions(), rebuild_database_list(), record_C_func(), RelationCacheInitialize(), reset_dbentry_counters(), ri_InitHashTables(), smgropen(), tbm_create_pagetable(), and transformGraph().
#define HASH_KEYCOPY 0x800 |
Definition at line 94 of file hsearch.h.
Referenced by hash_create().
#define HASH_PARTITION 0x001 |
Definition at line 83 of file hsearch.h.
Referenced by hash_create(), InitBufTable(), and InitPredicateLocks().
#define HASH_SEGMENT 0x002 |
Definition at line 84 of file hsearch.h.
Referenced by hash_create().
#define HASH_SHARED_MEM 0x040 |
Definition at line 89 of file hsearch.h.
Referenced by hash_create(), and ShmemInitHash().
#define NO_MAX_DSIZE (-1) |
Definition at line 99 of file hsearch.h.
Referenced by dir_realloc().
typedef void*(* HashAllocFunc)(Size request) |
typedef int(* HashCompareFunc)(const void *key1, const void *key2, Size keysize) |
typedef void*(* HashCopyFunc)(void *dest, const void *src, Size keysize) |
typedef struct HASHELEMENT HASHELEMENT |
typedef uint32(* HashValueFunc)(const void *key, Size keysize) |
enum HASHACTION |
Definition at line 102 of file hsearch.h.
{ HASH_FIND, HASH_ENTER, HASH_REMOVE, HASH_ENTER_NULL } HASHACTION;
void AtEOSubXact_HashTables | ( | bool | isCommit, | |
int | nestDepth | |||
) |
Definition at line 1709 of file dynahash.c.
References elog, i, num_seq_scans, seq_scan_level, and WARNING.
Referenced by AbortSubTransaction(), and CommitSubTransaction().
{ int i; /* * Search backward to make cleanup easy. Note we must check all entries, * not only those at the end of the array, because deletion technique * doesn't keep them in order. */ for (i = num_seq_scans - 1; i >= 0; i--) { if (seq_scan_level[i] >= nestDepth) { if (isCommit) elog(WARNING, "leaked hash_seq_search scan for hash table %p", seq_scan_tables[i]); seq_scan_tables[i] = seq_scan_tables[num_seq_scans - 1]; seq_scan_level[i] = seq_scan_level[num_seq_scans - 1]; num_seq_scans--; } } }
void AtEOXact_HashTables | ( | bool | isCommit | ) |
Definition at line 1683 of file dynahash.c.
References elog, i, num_seq_scans, and WARNING.
Referenced by AbortTransaction(), BackgroundWriterMain(), CheckpointerMain(), CommitTransaction(), PrepareTransaction(), and WalWriterMain().
{ /* * During abort cleanup, open scans are expected; just silently clean 'em * out. An open scan at commit means someone forgot a hash_seq_term() * call, so complain. * * Note: it's tempting to try to print the tabname here, but refrain for * fear of touching deallocated memory. This isn't a user-facing message * anyway, so it needn't be pretty. */ if (isCommit) { int i; for (i = 0; i < num_seq_scans; i++) { elog(WARNING, "leaked hash_seq_search scan for hash table %p", seq_scan_tables[i]); } } num_seq_scans = 0; }
Definition at line 75 of file hashfn.c.
References Assert, and bms_hash_value().
{ Assert(keysize == sizeof(Bitmapset *)); return bms_hash_value(*((const Bitmapset *const *) key)); }
int bitmap_match | ( | const void * | key1, | |
const void * | key2, | |||
Size | keysize | |||
) |
Definition at line 760 of file dynahash.c.
References HTAB::hash, and HTAB::keysize.
Referenced by BufTableHashCode(), and LockTagHashCode().
Definition at line 267 of file dynahash.c.
References HASHCTL::alloc, HTAB::alloc, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE, ALLOCSET_DEFAULT_MINSIZE, AllocSetContextCreate(), Assert, HTAB::dir, HASHCTL::dsize, HASHHDR::dsize, DynaHashAlloc(), element_alloc(), elog, HASHHDR::entrysize, HASHCTL::entrysize, ereport, errcode(), errmsg(), ERROR, HASHCTL::ffactor, HASHHDR::ffactor, HTAB::frozen, HASHCTL::hash, HTAB::hash, HASH_ALLOC, HASH_ATTACH, HASH_COMPARE, HASH_CONTEXT, HASH_DIRSIZE, HASH_ELEM, HASH_FFACTOR, HASH_FIXED_SIZE, HASH_FUNCTION, HASH_KEYCOPY, HASH_PARTITION, HASH_SEGMENT, HASH_SHARED_MEM, HASHCTL::hctl, HTAB::hctl, HTAB::hcxt, HASHCTL::hcxt, hdefault(), init_htab(), HTAB::isfixed, HTAB::isshared, HASHCTL::keycopy, HTAB::keycopy, HASHCTL::keysize, HASHHDR::keysize, HTAB::keysize, HASHCTL::match, HTAB::match, HASHCTL::max_dsize, HASHHDR::max_dsize, MemSet, my_log2(), next_pow2_int(), HASHHDR::num_partitions, HASHCTL::num_partitions, HASHHDR::sshift, HTAB::sshift, HASHCTL::ssize, HASHHDR::ssize, HTAB::ssize, string_compare(), string_hash(), strlcpy(), HTAB::tabname, and TopMemoryContext.
Referenced by _PG_init(), assign_record_type_typmod(), begin_heap_rewrite(), build_join_rel_hash(), BuildEventTriggerCache(), BuildTupleHashTable(), CompactCheckpointerRequestQueue(), compute_array_stats(), compute_tsvector_stats(), createConnHash(), do_autovacuum(), EnablePortalManager(), find_oper_cache_entry(), find_rendezvous_variable(), get_btree_test_op(), get_json_object_as_hash(), GetComboCommandId(), GetConnection(), GetSerializableTransactionSnapshotInt(), gistInitBuildBuffers(), gistInitParentMap(), init_procedure_caches(), init_timezone_hashtable(), init_ts_config_cache(), InitializeAttoptCache(), InitializeTableSpaceCache(), InitLocalBuffers(), InitLocks(), InitQueryHashTable(), load_categories_hash(), log_invalid_page(), lookup_collation_cache(), lookup_ts_dictionary_cache(), lookup_ts_parser_cache(), lookup_type_cache(), LookupOpclassInfo(), mdinit(), pgstat_collect_oids(), pgstat_init_function_usage(), pgstat_read_statsfiles(), plpgsql_HashTableInit(), PLy_add_exceptions(), populate_recordset_object_start(), rebuild_database_list(), record_C_func(), RelationCacheInitialize(), reset_dbentry_counters(), ResetUnloggedRelationsInDbspaceDir(), ri_InitHashTables(), select_perl_context(), ShmemInitHash(), smgropen(), tbm_create_pagetable(), and transformGraph().
{ HTAB *hashp; HASHHDR *hctl; /* * For shared hash tables, we have a local hash header (HTAB struct) that * we allocate in TopMemoryContext; all else is in shared memory. * * For non-shared hash tables, everything including the hash header is in * a memory context created specially for the hash table --- this makes * hash_destroy very simple. The memory context is made a child of either * a context specified by the caller, or TopMemoryContext if nothing is * specified. */ if (flags & HASH_SHARED_MEM) { /* Set up to allocate the hash header */ CurrentDynaHashCxt = TopMemoryContext; } else { /* Create the hash table's private memory context */ if (flags & HASH_CONTEXT) CurrentDynaHashCxt = info->hcxt; else CurrentDynaHashCxt = TopMemoryContext; CurrentDynaHashCxt = AllocSetContextCreate(CurrentDynaHashCxt, tabname, ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); } /* Initialize the hash header, plus a copy of the table name */ hashp = (HTAB *) DynaHashAlloc(sizeof(HTAB) + strlen(tabname) +1); MemSet(hashp, 0, sizeof(HTAB)); hashp->tabname = (char *) (hashp + 1); strcpy(hashp->tabname, tabname); if (flags & HASH_FUNCTION) hashp->hash = info->hash; else hashp->hash = string_hash; /* default hash function */ /* * If you don't specify a match function, it defaults to string_compare if * you used string_hash (either explicitly or by default) and to memcmp * otherwise. (Prior to PostgreSQL 7.4, memcmp was always used.) */ if (flags & HASH_COMPARE) hashp->match = info->match; else if (hashp->hash == string_hash) hashp->match = (HashCompareFunc) string_compare; else hashp->match = memcmp; /* * Similarly, the key-copying function defaults to strlcpy or memcpy. */ if (flags & HASH_KEYCOPY) hashp->keycopy = info->keycopy; else if (hashp->hash == string_hash) hashp->keycopy = (HashCopyFunc) strlcpy; else hashp->keycopy = memcpy; if (flags & HASH_ALLOC) hashp->alloc = info->alloc; else hashp->alloc = DynaHashAlloc; if (flags & HASH_SHARED_MEM) { /* * ctl structure and directory are preallocated for shared memory * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as * well. */ hashp->hctl = info->hctl; hashp->dir = (HASHSEGMENT *) (((char *) info->hctl) + sizeof(HASHHDR)); hashp->hcxt = NULL; hashp->isshared = true; /* hash table already exists, we're just attaching to it */ if (flags & HASH_ATTACH) { /* make local copies of some heavily-used values */ hctl = hashp->hctl; hashp->keysize = hctl->keysize; hashp->ssize = hctl->ssize; hashp->sshift = hctl->sshift; return hashp; } } else { /* setup hash table defaults */ hashp->hctl = NULL; hashp->dir = NULL; hashp->hcxt = CurrentDynaHashCxt; hashp->isshared = false; } if (!hashp->hctl) { hashp->hctl = (HASHHDR *) hashp->alloc(sizeof(HASHHDR)); if (!hashp->hctl) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); } hashp->frozen = false; hdefault(hashp); hctl = hashp->hctl; if (flags & HASH_PARTITION) { /* Doesn't make sense to partition a local hash table */ Assert(flags & HASH_SHARED_MEM); /* * The number of partitions had better be a power of 2. Also, it must * be less than INT_MAX (see init_htab()), so call the int version of * next_pow2. */ Assert(info->num_partitions == next_pow2_int(info->num_partitions)); hctl->num_partitions = info->num_partitions; } if (flags & HASH_SEGMENT) { hctl->ssize = info->ssize; hctl->sshift = my_log2(info->ssize); /* ssize had better be a power of 2 */ Assert(hctl->ssize == (1L << hctl->sshift)); } if (flags & HASH_FFACTOR) hctl->ffactor = info->ffactor; /* * SHM hash tables have fixed directory size passed by the caller. */ if (flags & HASH_DIRSIZE) { hctl->max_dsize = info->max_dsize; hctl->dsize = info->dsize; } /* * hash table now allocates space for key and data but you have to say how * much space to allocate */ if (flags & HASH_ELEM) { Assert(info->entrysize >= info->keysize); hctl->keysize = info->keysize; hctl->entrysize = info->entrysize; } /* make local copies of heavily-used constant fields */ hashp->keysize = hctl->keysize; hashp->ssize = hctl->ssize; hashp->sshift = hctl->sshift; /* Build the hash directory structure */ if (!init_htab(hashp, nelem)) elog(ERROR, "failed to initialize hash table \"%s\"", hashp->tabname); /* * For a shared hash table, preallocate the requested number of elements. * This reduces problems with run-time out-of-shared-memory conditions. * * For a non-shared hash table, preallocate the requested number of * elements if it's less than our chosen nelem_alloc. This avoids wasting * space if the caller correctly estimates a small table size. */ if ((flags & HASH_SHARED_MEM) || nelem < hctl->nelem_alloc) { if (!element_alloc(hashp, (int) nelem)) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); } if (flags & HASH_FIXED_SIZE) hashp->isfixed = true; return hashp; }
void hash_destroy | ( | HTAB * | hashp | ) |
Definition at line 714 of file dynahash.c.
References HTAB::alloc, Assert, DynaHashAlloc(), hash_stats(), HTAB::hcxt, MemoryContextDelete(), and NULL.
Referenced by clear_external_function_hash(), CompactCheckpointerRequestQueue(), InitLocks(), pgstat_recv_dropdb(), pgstat_recv_resetcounter(), pgstat_vacuum_stat(), populate_recordset_object_end(), PostPrepare_PredicateLocks(), ReleasePredicateLocks(), ResetUnloggedRelationsInDbspaceDir(), SetForwardFsyncRequests(), tbm_free(), and XLogCheckInvalidPages().
{ if (hashp != NULL) { /* allocation method must be one we know how to free, too */ Assert(hashp->alloc == DynaHashAlloc); /* so this hashtable must have it's own context */ Assert(hashp->hcxt != NULL); hash_stats("destroy", hashp); /* * Free everything by destroying the hash table's memory context. */ MemoryContextDelete(hashp->hcxt); } }
Definition at line 632 of file dynahash.c.
References add_size(), choose_nelem_alloc(), DEF_FFACTOR, DEF_SEGSIZE, MAXALIGN, mul_size(), and next_pow2_long().
Referenced by BufTableShmemSize(), CreateSharedMemoryAndSemaphores(), LockShmemSize(), pgss_memsize(), and PredicateLockShmemSize().
{ Size size; long nBuckets, nSegments, nDirEntries, nElementAllocs, elementSize, elementAllocCnt; /* estimate number of buckets wanted */ nBuckets = next_pow2_long((num_entries - 1) / DEF_FFACTOR + 1); /* # of segments needed for nBuckets */ nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1); /* directory entries */ nDirEntries = DEF_DIRSIZE; while (nDirEntries < nSegments) nDirEntries <<= 1; /* dir_alloc doubles dsize at each call */ /* fixed control info */ size = MAXALIGN(sizeof(HASHHDR)); /* but not HTAB, per above */ /* directory */ size = add_size(size, mul_size(nDirEntries, sizeof(HASHSEGMENT))); /* segments */ size = add_size(size, mul_size(nSegments, MAXALIGN(DEF_SEGSIZE * sizeof(HASHBUCKET)))); /* elements --- allocated in groups of choose_nelem_alloc() entries */ elementAllocCnt = choose_nelem_alloc(entrysize); nElementAllocs = (num_entries - 1) / elementAllocCnt + 1; elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(entrysize); size = add_size(size, mul_size(nElementAllocs, mul_size(elementAllocCnt, elementSize))); return size; }
void hash_freeze | ( | HTAB * | hashp | ) |
Definition at line 1333 of file dynahash.c.
References elog, ERROR, HTAB::frozen, has_seq_scans(), HTAB::isshared, and HTAB::tabname.
long hash_get_num_entries | ( | HTAB * | hashp | ) |
Definition at line 1191 of file dynahash.c.
References HTAB::hctl, and HASHHDR::nentries.
Referenced by compute_array_stats(), compute_tsvector_stats(), entry_alloc(), entry_dealloc(), get_crosstab_tuplestore(), GetLockStatusData(), GetPredicateLockStatusData(), GetRunningTransactionLocks(), json_populate_record(), pgss_shmem_shutdown(), pgstat_vacuum_stat(), ResetUnloggedRelationsInDbspaceDir(), transformGraph(), and XLogHaveInvalidPages().
Definition at line 703 of file dynahash.c.
References Assert, HASHCTL::dsize, HASH_DIRSIZE, and HASHCTL::max_dsize.
Referenced by ShmemInitHash().
{ Assert(flags & HASH_DIRSIZE); Assert(info->dsize == info->max_dsize); return sizeof(HASHHDR) + info->dsize * sizeof(HASHSEGMENT); }
void* hash_search | ( | HTAB * | hashp, | |
const void * | keyPtr, | |||
HASHACTION | action, | |||
bool * | foundPtr | |||
) |
Definition at line 806 of file dynahash.c.
References HTAB::hash, hash_search_with_hash_value(), and HTAB::keysize.
Referenced by assign_record_type_typmod(), AtEOSubXact_RelationCache(), AtEOXact_RelationCache(), build_join_rel(), build_join_rel_hash(), BuildEventTriggerCache(), CheckAndPromotePredicateLockRequest(), CheckForSerializableConflictOut(), CompactCheckpointerRequestQueue(), compile_plperl_function(), compile_pltcl_function(), compute_array_stats(), compute_tsvector_stats(), createNewConnection(), deleteConnection(), do_autovacuum(), DropAllPredicateLocksFromTable(), DropAllPreparedStatements(), DropPreparedStatement(), DropRelFileNodeAllLocalBuffers(), DropRelFileNodeLocalBuffers(), entry_alloc(), entry_dealloc(), entry_reset(), EventCacheLookup(), FetchPreparedStatement(), find_funcstat_entry(), find_join_rel(), find_oper_cache_entry(), find_rendezvous_variable(), FindTupleHashEntry(), forget_invalid_pages(), forget_invalid_pages_db(), get_attribute_options(), get_btree_test_op(), get_pgstat_tabentry_relid(), get_tablespace(), GetComboCommandId(), GetConnection(), getConnectionByName(), getState(), gistGetNodeBuffer(), gistGetParent(), gistMemorizeParent(), gistRelocateBuildBuffersOnSplit(), hash_object_field_end(), InitPredicateLocks(), InvalidateAttoptCacheCallback(), InvalidateOprCacheCallBack(), InvalidateTableSpaceCacheCallback(), json_populate_record(), LocalBufferAlloc(), LocalPrefetchBuffer(), LockAcquireExtended(), LockHasWaiters(), LockRelease(), log_invalid_page(), lookup_C_func(), lookup_collation_cache(), lookup_ts_config_cache(), lookup_ts_dictionary_cache(), lookup_ts_parser_cache(), lookup_type_cache(), LookupOpclassInfo(), LookupTupleHashEntry(), make_oper_cache_entry(), mdsync(), pg_tzset(), pgss_store(), pgstat_collect_oids(), pgstat_fetch_stat_dbentry(), pgstat_fetch_stat_funcentry(), pgstat_fetch_stat_tabentry(), pgstat_get_db_entry(), pgstat_get_tab_entry(), pgstat_init_function_usage(), pgstat_read_db_statsfile(), pgstat_read_statsfiles(), pgstat_recv_dropdb(), pgstat_recv_funcpurge(), pgstat_recv_funcstat(), pgstat_recv_resetsinglecounter(), pgstat_recv_tabpurge(), pgstat_recv_tabstat(), pgstat_vacuum_stat(), plperl_spi_exec_prepared(), plperl_spi_freeplan(), plperl_spi_prepare(), plperl_spi_query_prepared(), plpgsql_HashTableDelete(), plpgsql_HashTableInsert(), plpgsql_HashTableLookup(), pltcl_fetch_interp(), PLy_generate_spi_exceptions(), PLy_procedure_get(), PLy_spi_subtransaction_abort(), populate_recordset_object_end(), populate_recordset_object_field_end(), predicatelock_twophase_recover(), PredicateLockExists(), PredicateLockTwoPhaseFinish(), prune_element_hashtable(), prune_lexemes_hashtable(), rebuild_database_list(), record_C_func(), RegisterPredicateLockingXid(), ReleaseOneSerializableXact(), RememberFsyncRequest(), RemoveLocalLock(), ResetUnloggedRelationsInDbspaceDir(), rewrite_heap_dead_tuple(), rewrite_heap_tuple(), ri_FetchPreparedPlan(), ri_HashCompareOp(), ri_HashPreparedPlan(), ri_LoadConstraintInfo(), select_perl_context(), ShmemInitStruct(), smgrclose(), smgrclosenode(), smgropen(), StorePreparedStatement(), table_recheck_autovac(), tbm_create_pagetable(), tbm_find_pageentry(), tbm_get_pageentry(), tbm_intersect(), tbm_mark_page_lossy(), and tbm_page_is_lossy().
{ return hash_search_with_hash_value(hashp, keyPtr, hashp->hash(keyPtr, hashp->keysize), action, foundPtr); }
void* hash_search_with_hash_value | ( | HTAB * | hashp, | |
const void * | keyPtr, | |||
uint32 | hashvalue, | |||
HASHACTION | action, | |||
bool * | foundPtr | |||
) |
Definition at line 819 of file dynahash.c.
References HTAB::alloc, Assert, calc_bucket(), HTAB::dir, DynaHashAlloc(), ELEMENTKEY, elog, ereport, errcode(), errmsg(), ERROR, expand_table(), HASHHDR::ffactor, HASHHDR::freeList, HTAB::frozen, get_hash_entry(), has_seq_scans(), hash_corrupted(), HASH_ENTER, HASH_ENTER_NULL, HASH_FIND, HASH_REMOVE, HASHELEMENT::hashvalue, HTAB::hctl, IS_PARTITIONED, HTAB::isshared, HTAB::keycopy, HTAB::keysize, HASHELEMENT::link, HTAB::match, HASHHDR::max_bucket, MOD, HASHHDR::mutex, HASHHDR::nentries, NULL, SpinLockAcquire, SpinLockRelease, HTAB::sshift, HTAB::ssize, and HTAB::tabname.
Referenced by BufTableDelete(), BufTableInsert(), BufTableLookup(), CheckTargetForConflictsIn(), CleanUpLock(), ClearOldPredicateLocks(), CreatePredicateLock(), DecrementParentLocks(), DeleteChildTargetLocks(), DeleteLockTarget(), DropAllPredicateLocksFromTable(), FastPathGetRelationLockEntry(), GetLockConflicts(), hash_search(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), LockRelease(), PageIsPredicateLocked(), PredicateLockAcquire(), ReleaseOneSerializableXact(), RemoveScratchTarget(), RemoveTargetIfNoLongerUsed(), RestoreScratchTarget(), SetupLockInTable(), and TransferPredicateLocksToNewTarget().
{ HASHHDR *hctl = hashp->hctl; Size keysize; uint32 bucket; long segment_num; long segment_ndx; HASHSEGMENT segp; HASHBUCKET currBucket; HASHBUCKET *prevBucketPtr; HashCompareFunc match; #if HASH_STATISTICS hash_accesses++; hctl->accesses++; #endif /* * If inserting, check if it is time to split a bucket. * * NOTE: failure to expand table is not a fatal error, it just means we * have to run at higher fill factor than we wanted. However, if we're * using the palloc allocator then it will throw error anyway on * out-of-memory, so we must do this before modifying the table. */ if (action == HASH_ENTER || action == HASH_ENTER_NULL) { /* * Can't split if running in partitioned mode, nor if frozen, nor if * table is the subject of any active hash_seq_search scans. Strange * order of these tests is to try to check cheaper conditions first. */ if (!IS_PARTITIONED(hctl) && !hashp->frozen && hctl->nentries / (long) (hctl->max_bucket + 1) >= hctl->ffactor && !has_seq_scans(hashp)) (void) expand_table(hashp); } /* * Do the initial lookup */ bucket = calc_bucket(hctl, hashvalue); segment_num = bucket >> hashp->sshift; segment_ndx = MOD(bucket, hashp->ssize); segp = hashp->dir[segment_num]; if (segp == NULL) hash_corrupted(hashp); prevBucketPtr = &segp[segment_ndx]; currBucket = *prevBucketPtr; /* * Follow collision chain looking for matching key */ match = hashp->match; /* save one fetch in inner loop */ keysize = hashp->keysize; /* ditto */ while (currBucket != NULL) { if (currBucket->hashvalue == hashvalue && match(ELEMENTKEY(currBucket), keyPtr, keysize) == 0) break; prevBucketPtr = &(currBucket->link); currBucket = *prevBucketPtr; #if HASH_STATISTICS hash_collisions++; hctl->collisions++; #endif } if (foundPtr) *foundPtr = (bool) (currBucket != NULL); /* * OK, now what? */ switch (action) { case HASH_FIND: if (currBucket != NULL) return (void *) ELEMENTKEY(currBucket); return NULL; case HASH_REMOVE: if (currBucket != NULL) { /* use volatile pointer to prevent code rearrangement */ volatile HASHHDR *hctlv = hctl; /* if partitioned, must lock to touch nentries and freeList */ if (IS_PARTITIONED(hctlv)) SpinLockAcquire(&hctlv->mutex); Assert(hctlv->nentries > 0); hctlv->nentries--; /* remove record from hash bucket's chain. */ *prevBucketPtr = currBucket->link; /* add the record to the freelist for this table. */ currBucket->link = hctlv->freeList; hctlv->freeList = currBucket; if (IS_PARTITIONED(hctlv)) SpinLockRelease(&hctlv->mutex); /* * better hope the caller is synchronizing access to this * element, because someone else is going to reuse it the next * time something is added to the table */ return (void *) ELEMENTKEY(currBucket); } return NULL; case HASH_ENTER_NULL: /* ENTER_NULL does not work with palloc-based allocator */ Assert(hashp->alloc != DynaHashAlloc); /* FALL THRU */ case HASH_ENTER: /* Return existing element if found, else create one */ if (currBucket != NULL) return (void *) ELEMENTKEY(currBucket); /* disallow inserts if frozen */ if (hashp->frozen) elog(ERROR, "cannot insert into frozen hashtable \"%s\"", hashp->tabname); currBucket = get_hash_entry(hashp); if (currBucket == NULL) { /* out of memory */ if (action == HASH_ENTER_NULL) return NULL; /* report a generic message */ if (hashp->isshared) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"))); else ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"))); } /* link into hashbucket chain */ *prevBucketPtr = currBucket; currBucket->link = NULL; /* copy key into record */ currBucket->hashvalue = hashvalue; hashp->keycopy(ELEMENTKEY(currBucket), keyPtr, keysize); /* * Caller is expected to fill the data field on return. DO NOT * insert any code that could possibly throw error here, as doing * so would leave the table entry incomplete and hence corrupt the * caller's data structure. */ return (void *) ELEMENTKEY(currBucket); } elog(ERROR, "unrecognized hash action code: %d", (int) action); return NULL; /* keep compiler quiet */ }
long hash_select_dirsize | ( | long | num_entries | ) |
Definition at line 679 of file dynahash.c.
References DEF_FFACTOR, DEF_SEGSIZE, and next_pow2_long().
Referenced by ShmemInitHash().
{ long nBuckets, nSegments, nDirEntries; /* estimate number of buckets wanted */ nBuckets = next_pow2_long((num_entries - 1) / DEF_FFACTOR + 1); /* # of segments needed for nBuckets */ nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1); /* directory entries */ nDirEntries = DEF_DIRSIZE; while (nDirEntries < nSegments) nDirEntries <<= 1; /* dir_alloc doubles dsize at each call */ return nDirEntries; }
void hash_seq_init | ( | HASH_SEQ_STATUS * | status, | |
HTAB * | hashp | |||
) |
Definition at line 1227 of file dynahash.c.
References HASH_SEQ_STATUS::curBucket, HASH_SEQ_STATUS::curEntry, HTAB::frozen, HASH_SEQ_STATUS::hashp, and register_seq_scan().
Referenced by AtAbort_Portals(), AtCleanup_Portals(), AtEOSubXact_RelationCache(), AtEOXact_RelationCache(), AtPrepare_Locks(), AtSubAbort_Portals(), AtSubCleanup_Portals(), AtSubCommit_Portals(), CheckTableForSerializableConflictIn(), compute_array_stats(), compute_tsvector_stats(), dblink_get_connections(), DropAllPredicateLocksFromTable(), DropAllPreparedStatements(), end_heap_rewrite(), entry_dealloc(), entry_reset(), forget_invalid_pages(), forget_invalid_pages_db(), GetLockStatusData(), GetPredicateLockStatusData(), GetRunningTransactionLocks(), InvalidateAttoptCacheCallback(), InvalidateConstraintCacheCallBack(), InvalidateOprCacheCallBack(), InvalidateOprProofCacheCallBack(), InvalidateTableSpaceCacheCallback(), InvalidateTSCacheCallBack(), LockReassignCurrentOwner(), LockReleaseAll(), LockReleaseCurrentOwner(), LockReleaseSession(), mdsync(), packGraph(), pg_cursor(), pg_prepared_statement(), pg_stat_statements(), pgfdw_subxact_callback(), pgfdw_xact_callback(), pgss_shmem_shutdown(), pgstat_send_funcstats(), pgstat_vacuum_stat(), pgstat_write_db_statsfile(), pgstat_write_statsfiles(), plperl_fini(), PortalHashTableDeleteAll(), PostPrepare_Locks(), PreCommit_Portals(), prune_element_hashtable(), prune_lexemes_hashtable(), rebuild_database_list(), RelationCacheInitializePhase3(), RelationCacheInvalidate(), RememberFsyncRequest(), selectColorTrigrams(), smgrcloseall(), tbm_begin_iterate(), tbm_intersect(), tbm_lossify(), tbm_union(), ThereAreNoReadyPortals(), TypeCacheRelCallback(), write_relcache_init_file(), and XLogCheckInvalidPages().
{ status->hashp = hashp; status->curBucket = 0; status->curEntry = NULL; if (!hashp->frozen) register_seq_scan(hashp); }
void* hash_seq_search | ( | HASH_SEQ_STATUS * | status | ) |
Definition at line 1237 of file dynahash.c.
References HASH_SEQ_STATUS::curBucket, HASH_SEQ_STATUS::curEntry, HTAB::dir, ELEMENTKEY, hash_seq_term(), HASH_SEQ_STATUS::hashp, HTAB::hctl, HASHELEMENT::link, HASHHDR::max_bucket, MOD, NULL, HTAB::sshift, and HTAB::ssize.
Referenced by AtAbort_Portals(), AtCleanup_Portals(), AtEOSubXact_RelationCache(), AtEOXact_RelationCache(), AtPrepare_Locks(), AtSubAbort_Portals(), AtSubCleanup_Portals(), AtSubCommit_Portals(), CheckTableForSerializableConflictIn(), compute_array_stats(), compute_tsvector_stats(), dblink_get_connections(), DropAllPredicateLocksFromTable(), DropAllPreparedStatements(), end_heap_rewrite(), entry_dealloc(), entry_reset(), forget_invalid_pages(), forget_invalid_pages_db(), GetLockStatusData(), GetPredicateLockStatusData(), GetRunningTransactionLocks(), InvalidateAttoptCacheCallback(), InvalidateConstraintCacheCallBack(), InvalidateOprCacheCallBack(), InvalidateOprProofCacheCallBack(), InvalidateTableSpaceCacheCallback(), InvalidateTSCacheCallBack(), LockReassignCurrentOwner(), LockReleaseAll(), LockReleaseCurrentOwner(), LockReleaseSession(), mdsync(), packGraph(), pg_cursor(), pg_prepared_statement(), pg_stat_statements(), pgfdw_subxact_callback(), pgfdw_xact_callback(), pgss_shmem_shutdown(), pgstat_send_funcstats(), pgstat_vacuum_stat(), pgstat_write_db_statsfile(), pgstat_write_statsfiles(), plperl_fini(), PortalHashTableDeleteAll(), PostPrepare_Locks(), PreCommit_Portals(), prune_element_hashtable(), prune_lexemes_hashtable(), rebuild_database_list(), RelationCacheInitializePhase3(), RelationCacheInvalidate(), RememberFsyncRequest(), selectColorTrigrams(), smgrcloseall(), tbm_begin_iterate(), tbm_intersect(), tbm_lossify(), tbm_union(), ThereAreNoReadyPortals(), TypeCacheRelCallback(), write_relcache_init_file(), and XLogCheckInvalidPages().
{ HTAB *hashp; HASHHDR *hctl; uint32 max_bucket; long ssize; long segment_num; long segment_ndx; HASHSEGMENT segp; uint32 curBucket; HASHELEMENT *curElem; if ((curElem = status->curEntry) != NULL) { /* Continuing scan of curBucket... */ status->curEntry = curElem->link; if (status->curEntry == NULL) /* end of this bucket */ ++status->curBucket; return (void *) ELEMENTKEY(curElem); } /* * Search for next nonempty bucket starting at curBucket. */ curBucket = status->curBucket; hashp = status->hashp; hctl = hashp->hctl; ssize = hashp->ssize; max_bucket = hctl->max_bucket; if (curBucket > max_bucket) { hash_seq_term(status); return NULL; /* search is done */ } /* * first find the right segment in the table directory. */ segment_num = curBucket >> hashp->sshift; segment_ndx = MOD(curBucket, ssize); segp = hashp->dir[segment_num]; /* * Pick up the first item in this bucket's chain. If chain is not empty * we can begin searching it. Otherwise we have to advance to find the * next nonempty bucket. We try to optimize that case since searching a * near-empty hashtable has to iterate this loop a lot. */ while ((curElem = segp[segment_ndx]) == NULL) { /* empty bucket, advance to next */ if (++curBucket > max_bucket) { status->curBucket = curBucket; hash_seq_term(status); return NULL; /* search is done */ } if (++segment_ndx >= ssize) { segment_num++; segment_ndx = 0; segp = hashp->dir[segment_num]; } } /* Begin scan of curBucket... */ status->curEntry = curElem->link; if (status->curEntry == NULL) /* end of this bucket */ ++curBucket; status->curBucket = curBucket; return (void *) ELEMENTKEY(curElem); }
void hash_seq_term | ( | HASH_SEQ_STATUS * | status | ) |
Definition at line 1313 of file dynahash.c.
References deregister_seq_scan(), HTAB::frozen, and HASH_SEQ_STATUS::hashp.
Referenced by hash_seq_search(), PortalHashTableDeleteAll(), PreCommit_Portals(), RelationCacheInitializePhase3(), and tbm_lossify().
{ if (!status->hashp->frozen) deregister_seq_scan(status->hashp); }
void hash_stats | ( | const char * | where, | |
HTAB * | hashp | |||
) |
Definition at line 733 of file dynahash.c.
References HTAB::hctl, HASHHDR::keysize, HASHHDR::max_bucket, HASHHDR::nentries, and HASHHDR::nsegs.
Referenced by hash_destroy().
{ #if HASH_STATISTICS fprintf(stderr, "%s: this HTAB -- accesses %ld collisions %ld\n", where, hashp->hctl->accesses, hashp->hctl->collisions); fprintf(stderr, "hash_stats: entries %ld keysize %ld maxp %u segmentcount %ld\n", hashp->hctl->nentries, (long) hashp->hctl->keysize, hashp->hctl->max_bucket, hashp->hctl->nsegs); fprintf(stderr, "%s: total accesses %ld total collisions %ld\n", where, hash_accesses, hash_collisions); fprintf(stderr, "hash_stats: total expansions %ld\n", hash_expansions); #endif }
Definition at line 1016 of file dynahash.c.
References calc_bucket(), HTAB::dir, ELEMENT_FROM_KEY, ELEMENTKEY, elog, ERROR, HTAB::frozen, HTAB::hash, hash_corrupted(), HASHELEMENT::hashvalue, HTAB::hctl, HTAB::keycopy, HTAB::keysize, HASHELEMENT::link, HTAB::match, MOD, NULL, HTAB::sshift, HTAB::ssize, and HTAB::tabname.
Referenced by PostPrepare_Locks().
{ HASHELEMENT *existingElement = ELEMENT_FROM_KEY(existingEntry); HASHHDR *hctl = hashp->hctl; uint32 newhashvalue; Size keysize; uint32 bucket; uint32 newbucket; long segment_num; long segment_ndx; HASHSEGMENT segp; HASHBUCKET currBucket; HASHBUCKET *prevBucketPtr; HASHBUCKET *oldPrevPtr; HashCompareFunc match; #if HASH_STATISTICS hash_accesses++; hctl->accesses++; #endif /* disallow updates if frozen */ if (hashp->frozen) elog(ERROR, "cannot update in frozen hashtable \"%s\"", hashp->tabname); /* * Lookup the existing element using its saved hash value. We need to * do this to be able to unlink it from its hash chain, but as a side * benefit we can verify the validity of the passed existingEntry pointer. */ bucket = calc_bucket(hctl, existingElement->hashvalue); segment_num = bucket >> hashp->sshift; segment_ndx = MOD(bucket, hashp->ssize); segp = hashp->dir[segment_num]; if (segp == NULL) hash_corrupted(hashp); prevBucketPtr = &segp[segment_ndx]; currBucket = *prevBucketPtr; while (currBucket != NULL) { if (currBucket == existingElement) break; prevBucketPtr = &(currBucket->link); currBucket = *prevBucketPtr; } if (currBucket == NULL) elog(ERROR, "hash_update_hash_key argument is not in hashtable \"%s\"", hashp->tabname); oldPrevPtr = prevBucketPtr; /* * Now perform the equivalent of a HASH_ENTER operation to locate the * hash chain we want to put the entry into. */ newhashvalue = hashp->hash(newKeyPtr, hashp->keysize); newbucket = calc_bucket(hctl, newhashvalue); segment_num = newbucket >> hashp->sshift; segment_ndx = MOD(newbucket, hashp->ssize); segp = hashp->dir[segment_num]; if (segp == NULL) hash_corrupted(hashp); prevBucketPtr = &segp[segment_ndx]; currBucket = *prevBucketPtr; /* * Follow collision chain looking for matching key */ match = hashp->match; /* save one fetch in inner loop */ keysize = hashp->keysize; /* ditto */ while (currBucket != NULL) { if (currBucket->hashvalue == newhashvalue && match(ELEMENTKEY(currBucket), newKeyPtr, keysize) == 0) break; prevBucketPtr = &(currBucket->link); currBucket = *prevBucketPtr; #if HASH_STATISTICS hash_collisions++; hctl->collisions++; #endif } if (currBucket != NULL) return false; /* collision with an existing entry */ currBucket = existingElement; /* * If old and new hash values belong to the same bucket, we need not * change any chain links, and indeed should not since this simplistic * update will corrupt the list if currBucket is the last element. (We * cannot fall out earlier, however, since we need to scan the bucket to * check for duplicate keys.) */ if (bucket != newbucket) { /* OK to remove record from old hash bucket's chain. */ *oldPrevPtr = currBucket->link; /* link into new hashbucket chain */ *prevBucketPtr = currBucket; currBucket->link = NULL; } /* copy new key into record */ currBucket->hashvalue = newhashvalue; hashp->keycopy(ELEMENTKEY(currBucket), newKeyPtr, keysize); /* rest of record is untouched */ return true; }
Definition at line 63 of file hashfn.c.
References Assert, DatumGetUInt32, and hash_uint32().
{ Assert(keysize == sizeof(Oid)); return DatumGetUInt32(hash_uint32((uint32) *((const Oid *) key))); }
Definition at line 33 of file hashfn.c.
References DatumGetUInt32, hash_any(), and Min.
Referenced by hash_create().
{ /* * If the string exceeds keysize-1 bytes, we want to hash only that many, * because when it is copied into the hash table it will be truncated at * that length. */ Size s_len = strlen((const char *) key); s_len = Min(s_len, keysize - 1); return DatumGetUInt32(hash_any((const unsigned char *) key, (int) s_len)); }
Definition at line 51 of file hashfn.c.
References DatumGetUInt32, and hash_any().
{ return DatumGetUInt32(hash_any((const unsigned char *) key, (int) keysize)); }