#include "storage/backendid.h"
#include "storage/lwlock.h"
#include "storage/shmem.h"
Go to the source code of this file.
#define AccessExclusiveLock 8 |
Definition at line 154 of file lock.h.
Referenced by AcquireDeletionLock(), AlterObjectNamespace_oid(), AlterTableCreateToastTable(), AlterTableNamespace(), AlterTableNamespaceInternal(), AlterTypeOwner(), ATAddForeignKeyConstraint(), BootstrapToastTable(), cluster(), cluster_rel(), copy_heap_data(), count_nondeletable_pages(), CreateTrigger(), dbase_redo(), DefineQueryRewrite(), DefineRelation(), DefineRule(), DefineSequence(), DefineVirtualRelation(), dropdb(), DropRole(), ExecAlterObjectSchemaStmt(), ExecAlterOwnerStmt(), ExecRefreshMatView(), ExecRenameStmt(), ExecuteTruncate(), GetRunningTransactionLocks(), heap_drop_with_catalog(), heap_truncate(), heap_truncate_one_rel(), index_create(), index_drop(), intorel_startup(), lazy_truncate_heap(), lock_twophase_standby_recover(), LockAcquireExtended(), make_new_heap(), movedb(), PreCommit_Notify(), reindex_index(), ReindexIndex(), RelationTruncateIndexes(), ReleaseDeletionLock(), RemoveAttrDefaultById(), RemoveAttributeById(), RemoveConstraintById(), RemoveObjects(), RemoveRewriteRuleById(), RemoveTriggerById(), rename_constraint_internal(), renameatt(), renameatt_internal(), RenameConstraint(), RenameDatabase(), RenameRelation(), RenameRelationInternal(), RenameRewriteRule(), renametrig(), ResolveRecoveryConflictWithLock(), shdepReassignOwned(), StandbyAcquireAccessExclusiveLock(), StandbyReleaseAllLocks(), StandbyReleaseLocks(), StandbyReleaseOldLocks(), transformRuleStmt(), and vacuum_rel().
#define AccessShareLock 1 |
Definition at line 144 of file lock.h.
Referenced by acquire_inherited_sample_rows(), AfterTriggerSetState(), AlterDatabaseSet(), AlterDomainValidateConstraint(), AlterExtensionNamespace(), AlterSeqNamespaces(), AlterSequence(), AssignTypeArrayOid(), ATExecAddIndexConstraint(), ATExecAddInherit(), ATExecAddOf(), ATExecAlterColumnGenericOptions(), ATExecDropInherit(), AttrDefaultFetch(), bt_metap(), bt_page_items(), bt_page_stats(), build_row_from_class(), BuildEventTriggerCache(), calculate_indexes_size(), calculate_toast_table_size(), CatalogCacheInitializeCache(), change_owner_recurse_to_sequences(), check_db_file_conflict(), check_functional_grouping(), check_of_type(), check_selective_binary_conversion(), CheckAttributeType(), CheckConstraintFetch(), CheckIndexCompatible(), checkSharedDependencies(), ChooseConstraintName(), ConstraintNameIsUsed(), createdb(), CreateTrigger(), currtid_byrelname(), currtid_byreloid(), currtid_for_view(), database_to_xmlschema_internal(), dblink_build_sql_delete(), dblink_build_sql_insert(), dblink_build_sql_update(), dblink_get_pkey(), do_analyze_rel(), do_autovacuum(), DoCopy(), enum_endpoint(), enum_range_internal(), EventTriggerSQLDropAddObject(), exec_object_restorecon(), ExecAlterExtensionStmt(), ExecInitBitmapIndexScan(), ExecInitIndexOnlyScan(), ExecInitIndexScan(), expandRelation(), find_composite_type_dependencies(), find_inheritance_children(), find_language_template(), find_typed_table_dependencies(), finish_heap_swap(), get_actual_variable_range(), get_constraint_index(), get_database_list(), get_database_oid(), get_db_info(), get_domain_constraint_oid(), get_extension_name(), get_extension_oid(), get_extension_schema(), get_file_fdw_attribute_options(), get_index_constraint(), get_object_address_relobject(), get_pkey_attnames(), get_raw_page_internal(), get_rel_oids(), get_relation_constraint_oid(), get_rels_with_domain(), get_rewrite_oid_without_relid(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), get_trigger_oid(), GetComment(), getConstraintTypeDescription(), GetDatabaseTuple(), GetDatabaseTupleByOid(), GetDefaultOpClass(), GetDomainConstraints(), getExtensionOfObject(), getObjectDescription(), getObjectIdentity(), getOwnedSequences(), getRelationsInNamespace(), GetSecurityLabel(), GetSharedSecurityLabel(), gincostestimate(), GrantRole(), heap_sync(), heap_truncate_find_FKs(), InitCatCachePhase2(), InitPlan(), isQueryUsingTempRelation_walker(), LargeObjectExists(), load_critical_index(), load_enum_cache_data(), load_typcache_tupdesc(), LockTableAclCheck(), lookup_ts_config_cache(), LookupOpclassInfo(), make_viewdef(), makeArrayTypeName(), myLargeObjectExists(), objectsInSchemaToOids(), open_share_lock(), pg_extension_ownercheck(), pg_freespace(), pg_get_serial_sequence(), pg_get_triggerdef_worker(), pg_identify_object(), pg_indexes_size(), pg_largeobject_aclmask_snapshot(), pg_largeobject_ownercheck(), pg_relation_is_scannable(), pg_relation_size(), pg_relpages(), pg_table_size(), pg_total_relation_size(), pgrowlocks(), pgstat_collect_oids(), pgstat_heap(), pgstat_index(), pgstatginindex(), pgstatindex(), pgstattuple(), pgstattuplebyid(), pltcl_init_load_unknown(), process_owned_by(), process_settings(), RangeVarGetAndCheckCreationNamespace(), regclassin(), regoperin(), regprocin(), regtypein(), ReindexDatabase(), relation_is_updatable(), RelationBuildRuleLock(), RelationBuildTriggers(), RelationBuildTupleDesc(), RelationGetExclusionInfo(), RelationGetIndexAttrBitmap(), RelationGetIndexList(), RelationNameGetTupleDesc(), remove_dbtablespaces(), RemoveRoleFromObjectACL(), ScanPgRelation(), schema_to_xmlschema_internal(), SearchCatCache(), SearchCatCacheList(), sepgsql_attribute_post_create(), sepgsql_database_post_create(), sepgsql_index_modify(), sepgsql_proc_post_create(), sepgsql_proc_setattr(), sepgsql_relation_post_create(), sepgsql_relation_setattr(), sepgsql_schema_post_create(), sequenceIsOwned(), set_relation_column_names(), shdepLockAndCheckObject(), systable_beginscan(), systable_endscan(), table_to_xml_and_xmlschema(), table_to_xmlschema(), ThereIsAtLeastOneRole(), toast_fetch_datum(), toast_fetch_datum_slice(), toastid_valueid_exists(), transformIndexConstraint(), transformTableLikeClause(), typeInheritsFrom(), UpdateRangeTableOfViewParse(), vac_truncate_clog(), and vac_update_datfrozenxid().
#define DEFAULT_LOCKMETHOD 1 |
Definition at line 133 of file lock.h.
Referenced by LockReleaseAll(), ProcReleaseLocks(), VirtualXactLock(), and VirtualXactLockTableCleanup().
#define ExclusiveLock 7 |
Definition at line 152 of file lock.h.
Referenced by _bt_getbuf(), AddEnumLabel(), btvacuumscan(), fsm_extend(), GetLockStatusData(), GinNewBuffer(), ginvacuumcleanup(), gistNewBuffer(), gistvacuumcleanup(), lazy_scan_heap(), pg_advisory_lock_int4(), pg_advisory_lock_int8(), pg_advisory_unlock_int4(), pg_advisory_unlock_int8(), pg_advisory_xact_lock_int4(), pg_advisory_xact_lock_int8(), pg_try_advisory_lock_int4(), pg_try_advisory_lock_int8(), pg_try_advisory_xact_lock_int4(), pg_try_advisory_xact_lock_int8(), pgstat_index(), RelationGetBufferForTuple(), SpGistNewBuffer(), spgvacuumscan(), VirtualXactLock(), VirtualXactLockTableCleanup(), vm_extend(), XactLockTableDelete(), and XactLockTableInsert().
#define GET_VXID_FROM_PGPROC | ( | vxid, | ||
proc | ||||
) |
((vxid).backendId = (proc).backendId, \ (vxid).localTransactionId = (proc).lxid)
Definition at line 76 of file lock.h.
Referenced by CancelDBBackends(), CancelVirtualTransaction(), GetConflictingVirtualXIDs(), GetCurrentVirtualXIDs(), GetLockConflicts(), GetSerializableTransactionSnapshotInt(), GetVirtualXIDsDelayingChkpt(), and HaveVirtualXIDsDelayingChkpt().
#define InvalidLocalTransactionId 0 |
Definition at line 65 of file lock.h.
Referenced by VirtualXactLockTableInsert().
#define LOCALLOCK_LOCKMETHOD | ( | llock | ) | ((llock).tag.lock.locktag_lockmethodid) |
Definition at line 417 of file lock.h.
Referenced by LockReleaseAll(), LockReleaseSession(), and WaitOnLock().
#define LocalTransactionIdIsValid | ( | lxid | ) | ((lxid) != InvalidLocalTransactionId) |
Definition at line 66 of file lock.h.
Referenced by GetNextLocalTransactionId(), and VirtualXactLockTableCleanup().
#define LOCK_LOCKMETHOD | ( | lock | ) | ((LOCKMETHODID) (lock).tag.locktag_lockmethodid) |
Definition at line 315 of file lock.h.
Referenced by GetLocksMethodTable(), LockReleaseAll(), and RemoveFromWaitQueue().
#define LOCKBIT_OFF | ( | lockmode | ) | (~(1 << (lockmode))) |
Definition at line 93 of file lock.h.
Referenced by GrantLock(), pg_lock_status(), RemoveFromWaitQueue(), and UnGrantLock().
#define LOCKBIT_ON | ( | lockmode | ) | (1 << (lockmode)) |
Definition at line 92 of file lock.h.
Referenced by DoLockModesConflict(), FindLockCycleRecurse(), GetLockStatusData(), GetRunningTransactionLocks(), GrantLock(), lock_twophase_recover(), LockAcquireExtended(), LockCheckConflicts(), LockHasWaiters(), LockRefindAndRelease(), LockRelease(), LockReleaseAll(), pg_lock_status(), PostPrepare_Locks(), ProcLockWakeup(), ProcSleep(), and SetupLockInTable().
#define LockHashPartition | ( | hashcode | ) | ((hashcode) % NUM_LOCK_PARTITIONS) |
Definition at line 469 of file lock.h.
Referenced by lock_twophase_recover(), and SetupLockInTable().
#define LockHashPartitionLock | ( | hashcode | ) | ((LWLockId) (FirstLockMgrLock + LockHashPartition(hashcode))) |
Definition at line 471 of file lock.h.
Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), GetLockConflicts(), lock_twophase_recover(), LockAcquireExtended(), LockErrorCleanup(), LockHasWaiters(), LockRefindAndRelease(), LockRelease(), ProcSleep(), VirtualXactLock(), and WaitOnLock().
#define LOCKTAG_LAST_TYPE LOCKTAG_ADVISORY |
Definition at line 191 of file lock.h.
Referenced by pg_lock_status().
#define MAX_LOCKMODES 10 |
Definition at line 90 of file lock.h.
Referenced by index_close(), lock_twophase_recover(), relation_close(), relation_open(), SetupLockInTable(), and try_relation_open().
#define NoLock 0 |
Definition at line 142 of file lock.h.
Referenced by acquire_inherited_sample_rows(), AcquireRewriteLocks(), addRangeTableEntry(), AddRoleMems(), afterTriggerInvokeEvents(), AlterDatabase(), AlterDatabaseOwner(), AlterDomainDefault(), AlterDomainDropConstraint(), AlterDomainNotNull(), AlterFunction(), AlterObjectNamespace_oid(), AlterRole(), AlterSeqNamespaces(), AlterSequence(), AlterSetting(), AlterTable(), AlterTableCreateToastTable(), AlterTableNamespace(), AlterTableSpaceOptions(), analyze_rel(), ATAddCheckConstraint(), ATAddForeignKeyConstraint(), ATController(), ATExecAddColumn(), ATExecAddIndex(), ATExecAddIndexConstraint(), ATExecAddInherit(), ATExecChangeOwner(), ATExecDropColumn(), ATExecDropConstraint(), ATExecDropInherit(), ATExecSetRelOptions(), ATExecSetTableSpace(), ATExecValidateConstraint(), ATPostAlterTypeParse(), ATPrepAlterColumnType(), ATRewriteCatalogs(), ATRewriteTable(), ATRewriteTables(), ATSimpleRecursion(), ATTypedTableRecursion(), boot_openrel(), BootstrapToastTable(), build_indices(), build_physical_tlist(), change_owner_recurse_to_sequences(), check_index_is_clusterable(), check_of_type(), CheckIndexCompatible(), CheckRelationOwnership(), close_lo_relation(), closerel(), cluster(), CommentObject(), convert_table_name(), copy_heap_data(), create_toast_table(), CreateComments(), createdb(), CreateRole(), CreateSharedComments(), CreateTableSpace(), CreateTrigger(), currval_oid(), database_to_xmlschema_internal(), DefineCompositeType(), DefineIndex(), DefineQueryRewrite(), DefineRelation(), DefineSequence(), DefineVirtualRelation(), DelRoleMems(), deparseSelectSql(), do_analyze_rel(), do_setval(), DoCopy(), dropdb(), DropRole(), DropTableSpace(), EvalPlanQualEnd(), exec_object_restorecon(), ExecAlterExtensionContentsStmt(), ExecCloseScanRelation(), ExecEndBitmapIndexScan(), ExecEndIndexOnlyScan(), ExecEndIndexScan(), ExecEndPlan(), ExecGetTriggerResultRel(), ExecInitBitmapIndexScan(), ExecInitIndexOnlyScan(), ExecInitIndexScan(), ExecRefreshMatView(), ExecSecLabelStmt(), ExecuteTruncate(), expand_inherited_rtentry(), expand_targetlist(), find_inheritance_children(), finish_heap_swap(), fireRIRrules(), free_parsestate(), get_actual_variable_range(), get_db_info(), get_object_address(), get_rel_oids(), get_relation_constraints(), get_relation_data_width(), get_relation_info(), get_rels_with_domain(), gettype(), GrantRole(), heap_create_with_catalog(), heap_drop_with_catalog(), heap_truncate(), heap_truncate_one_rel(), index_close(), index_create(), index_drop(), intorel_shutdown(), lastval(), lazy_vacuum_rel(), LockTableRecurse(), LookupTypeName(), make_new_heap(), MergeAttributes(), movedb(), nextval(), nextval_internal(), objectNamesToOids(), open_share_lock(), pg_get_serial_sequence(), pg_get_viewdef_name(), pg_get_viewdef_name_ext(), pg_lock_status(), pg_sequence_parameters(), plpgsql_parse_cwordrowtype(), plpgsql_parse_cwordtype(), postgresPlanForeignModify(), process_owned_by(), RangeVarGetAndCheckCreationNamespace(), RangeVarGetRelidExtended(), rebuild_relation(), regclassin(), reindex_index(), reindex_relation(), relation_close(), relation_open(), relation_openrv(), relation_openrv_extended(), RelationTruncateIndexes(), RemoveAttrDefaultById(), RemoveAttributeById(), RemoveConstraintById(), RemoveObjects(), RemoveRewriteRuleById(), RemoveTriggerById(), rename_constraint_internal(), renameatt_internal(), RenameConstraint(), RenameDatabase(), RenameRelationInternal(), RenameRewriteRule(), RenameRole(), RenameSchema(), RenameTableSpace(), renametrig(), ResetSequence(), RewriteQuery(), rewriteTargetView(), schema_to_xmlschema_internal(), searchRangeTableForRel(), sepgsql_dml_privileges(), setTargetTable(), shdepReassignOwned(), table_to_xml_and_xmlschema(), table_to_xmlschema(), text_regclass(), transformAlterTableStmt(), transformCreateStmt(), transformIndexConstraint(), transformIndexStmt(), transformRuleStmt(), transformTableLikeClause(), transientrel_shutdown(), transientrel_startup(), try_relation_open(), TryReuseIndex(), vac_open_indexes(), vacuum_rel(), validate_index(), and validateDomainConstraint().
#define PROCLOCK_LOCKMETHOD | ( | proclock | ) | LOCK_LOCKMETHOD(*((proclock).tag.myLock)) |
#define RowExclusiveLock 3 |
Definition at line 146 of file lock.h.
Referenced by AcquireExecutorLocks(), AddEnumLabel(), AddNewAttributeTuples(), AddRoleMems(), AggregateCreate(), AlterConstraintNamespaces(), AlterDatabase(), AlterDatabaseOwner(), AlterDomainAddConstraint(), AlterDomainDefault(), AlterDomainDropConstraint(), AlterDomainNotNull(), AlterDomainValidateConstraint(), AlterEventTrigger(), AlterEventTriggerOwner(), AlterEventTriggerOwner_oid(), AlterExtensionNamespace(), AlterForeignDataWrapper(), AlterForeignDataWrapperOwner(), AlterForeignDataWrapperOwner_oid(), AlterForeignServer(), AlterForeignServerOwner(), AlterForeignServerOwner_oid(), AlterFunction(), AlterObjectNamespace_oid(), AlterRole(), AlterSchemaOwner(), AlterSchemaOwner_oid(), AlterSetting(), AlterTableNamespaceInternal(), AlterTableSpaceOptions(), AlterTSConfiguration(), AlterTSDictionary(), AlterTypeNamespaceInternal(), AlterTypeOwner(), AlterTypeOwnerInternal(), AlterUserMapping(), AppendAttributeTuples(), ApplyExtensionUpdates(), ATExecAddColumn(), ATExecAddInherit(), ATExecAddOf(), ATExecAlterColumnGenericOptions(), ATExecAlterColumnType(), ATExecChangeOwner(), ATExecDropColumn(), ATExecDropConstraint(), ATExecDropInherit(), ATExecDropNotNull(), ATExecDropOf(), ATExecGenericOptions(), ATExecSetNotNull(), ATExecSetOptions(), ATExecSetRelOptions(), ATExecSetStatistics(), ATExecSetStorage(), ATExecSetTableSpace(), ATExecValidateConstraint(), change_owner_fix_column_acls(), changeDependencyFor(), changeDependencyOnOwner(), CollationCreate(), ConversionCreate(), copyTemplateDependencies(), create_proc_lang(), create_toast_table(), CreateCast(), CreateComments(), CreateConstraintEntry(), createdb(), CreateForeignDataWrapper(), CreateForeignServer(), CreateForeignTable(), CreateOpFamily(), CreateRole(), CreateSharedComments(), CreateTableSpace(), CreateTrigger(), CreateUserMapping(), DefineIndex(), DefineOpClass(), DefineQueryRewrite(), DefineTSConfiguration(), DefineTSDictionary(), DefineTSParser(), DefineTSTemplate(), DeleteAttributeTuples(), DeleteComments(), deleteDependencyRecordsFor(), deleteDependencyRecordsForClass(), deleteOneObject(), DeleteRelationTuple(), DeleteSecurityLabel(), DeleteSharedComments(), deleteSharedDependencyRecordsFor(), DeleteSharedSecurityLabel(), DeleteSystemAttributeTuples(), deleteWhatDependsOn(), DelRoleMems(), DoCopy(), drop_parent_dependency(), DropCastById(), dropDatabaseDependencies(), dropdb(), DropProceduralLanguageById(), DropRole(), DropSetting(), DropTableSpace(), EnableDisableRule(), EnableDisableTrigger(), EnumValuesCreate(), EnumValuesDelete(), ExecAlterObjectSchemaStmt(), ExecAlterOwnerStmt(), ExecCloseIndices(), ExecGrant_Database(), ExecGrant_Fdw(), ExecGrant_ForeignServer(), ExecGrant_Function(), ExecGrant_Language(), ExecGrant_Largeobject(), ExecGrant_Namespace(), ExecGrant_Relation(), ExecGrant_Tablespace(), ExecGrant_Type(), ExecOpenIndices(), ExecRenameStmt(), extension_config_remove(), heap_create_with_catalog(), heap_drop_with_catalog(), index_build(), index_constraint_create(), index_create(), index_drop(), index_set_state_flags(), index_update_stats(), InitPlan(), InitPostgres(), insert_event_trigger_tuple(), InsertExtensionTuple(), InsertRule(), LargeObjectCreate(), LargeObjectDrop(), lazy_vacuum_rel(), LockAcquireExtended(), LockTableCommand(), mark_index_clustered(), MergeAttributesIntoExisting(), MergeConstraintsIntoExisting(), MergeWithExistingConstraint(), movedb(), NamespaceCreate(), open_lo_relation(), OperatorCreate(), OperatorShellMake(), OperatorUpd(), performDeletion(), performMultipleDeletions(), pg_extension_config_dump(), ProcedureCreate(), RangeCreate(), RangeDelete(), recordMultipleDependencies(), recordSharedDependencyOn(), reindex_index(), RelationRemoveInheritance(), RelationSetNewRelfilenode(), RemoveAmOpEntryById(), RemoveAmProcEntryById(), RemoveAttrDefault(), RemoveAttrDefaultById(), RemoveAttributeById(), RemoveCollationById(), RemoveConstraintById(), RemoveConversionById(), RemoveDefaultACLById(), RemoveEventTriggerById(), RemoveExtensionById(), RemoveForeignDataWrapperById(), RemoveForeignServerById(), RemoveFunctionById(), RemoveOpClassById(), RemoveOperatorById(), RemoveOpFamilyById(), RemoveRewriteRuleById(), RemoveSchemaById(), RemoveStatistics(), RemoveTriggerById(), RemoveTSConfigurationById(), RemoveTSDictionaryById(), RemoveTSParserById(), RemoveTSTemplateById(), RemoveTypeById(), RemoveUserMappingById(), renameatt_internal(), RenameConstraint(), RenameConstraintById(), RenameDatabase(), RenameRelationInternal(), RenameRewriteRule(), RenameRole(), RenameSchema(), RenameTableSpace(), renametrig(), RenameType(), RenameTypeInternal(), rewriteTargetView(), RI_FKey_cascade_del(), RI_FKey_cascade_upd(), RI_FKey_setdefault_del(), RI_FKey_setdefault_upd(), RI_FKey_setnull_del(), RI_FKey_setnull_upd(), SetDefaultACL(), SetFunctionArgType(), SetFunctionReturnType(), SetRelationHasSubclass(), SetRelationNumChecks(), SetRelationRuleStatus(), SetSecurityLabel(), SetSharedSecurityLabel(), setTargetTable(), shdepDropOwned(), shdepReassignOwned(), StoreAttrDefault(), StoreCatalogInheritance(), storeOperators(), storeProcedures(), swap_relation_files(), toast_delete_datum(), toast_save_datum(), TypeCreate(), TypeShellMake(), update_attstats(), updateAclDependencies(), UpdateIndexRelation(), vac_update_datfrozenxid(), vac_update_relstats(), and validate_index().
#define RowShareLock 2 |
Definition at line 145 of file lock.h.
Referenced by AcquireExecutorLocks(), AcquireRewriteLocks(), addRangeTableEntry(), ATExecValidateConstraint(), ATRewriteTables(), InitPlan(), RI_FKey_check(), ri_restrict_del(), ri_restrict_upd(), and ScanQueryForLocks().
#define SET_LOCKTAG_ADVISORY | ( | locktag, | ||
id1, | ||||
id2, | ||||
id3, | ||||
id4 | ||||
) |
((locktag).locktag_field1 = (id1), \ (locktag).locktag_field2 = (id2), \ (locktag).locktag_field3 = (id3), \ (locktag).locktag_field4 = (id4), \ (locktag).locktag_type = LOCKTAG_ADVISORY, \ (locktag).locktag_lockmethodid = USER_LOCKMETHOD)
#define SET_LOCKTAG_OBJECT | ( | locktag, | ||
dboid, | ||||
classoid, | ||||
objoid, | ||||
objsubid | ||||
) |
((locktag).locktag_field1 = (dboid), \ (locktag).locktag_field2 = (classoid), \ (locktag).locktag_field3 = (objoid), \ (locktag).locktag_field4 = (objsubid), \ (locktag).locktag_type = LOCKTAG_OBJECT, \ (locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
Definition at line 264 of file lock.h.
Referenced by LockDatabaseObject(), LockSharedObject(), LockSharedObjectForSession(), UnlockDatabaseObject(), UnlockSharedObject(), and UnlockSharedObjectForSession().
#define SET_LOCKTAG_PAGE | ( | locktag, | ||
dboid, | ||||
reloid, | ||||
blocknum | ||||
) |
((locktag).locktag_field1 = (dboid), \ (locktag).locktag_field2 = (reloid), \ (locktag).locktag_field3 = (blocknum), \ (locktag).locktag_field4 = 0, \ (locktag).locktag_type = LOCKTAG_PAGE, \ (locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
Definition at line 232 of file lock.h.
Referenced by ConditionalLockPage(), LockPage(), and UnlockPage().
#define SET_LOCKTAG_RELATION | ( | locktag, | ||
dboid, | ||||
reloid | ||||
) |
((locktag).locktag_field1 = (dboid), \ (locktag).locktag_field2 = (reloid), \ (locktag).locktag_field3 = 0, \ (locktag).locktag_field4 = 0, \ (locktag).locktag_type = LOCKTAG_RELATION, \ (locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
Definition at line 216 of file lock.h.
Referenced by ConditionalLockRelation(), DefineIndex(), GetLockStatusData(), index_drop(), LockHasWaitersRelation(), LockRelation(), LockRelationIdForSession(), ResolveRecoveryConflictWithLock(), SetLocktagRelationOid(), StandbyAcquireAccessExclusiveLock(), StandbyReleaseAllLocks(), StandbyReleaseLocks(), StandbyReleaseOldLocks(), UnlockRelation(), UnlockRelationId(), and UnlockRelationIdForSession().
#define SET_LOCKTAG_RELATION_EXTEND | ( | locktag, | ||
dboid, | ||||
reloid | ||||
) |
((locktag).locktag_field1 = (dboid), \ (locktag).locktag_field2 = (reloid), \ (locktag).locktag_field3 = 0, \ (locktag).locktag_field4 = 0, \ (locktag).locktag_type = LOCKTAG_RELATION_EXTEND, \ (locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
Definition at line 224 of file lock.h.
Referenced by LockRelationForExtension(), and UnlockRelationForExtension().
#define SET_LOCKTAG_TRANSACTION | ( | locktag, | ||
xid | ||||
) |
((locktag).locktag_field1 = (xid), \ (locktag).locktag_field2 = 0, \ (locktag).locktag_field3 = 0, \ (locktag).locktag_field4 = 0, \ (locktag).locktag_type = LOCKTAG_TRANSACTION, \ (locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
Definition at line 248 of file lock.h.
Referenced by ConditionalXactLockTableWait(), XactLockTableDelete(), XactLockTableInsert(), and XactLockTableWait().
#define SET_LOCKTAG_TUPLE | ( | locktag, | ||
dboid, | ||||
reloid, | ||||
blocknum, | ||||
offnum | ||||
) |
((locktag).locktag_field1 = (dboid), \ (locktag).locktag_field2 = (reloid), \ (locktag).locktag_field3 = (blocknum), \ (locktag).locktag_field4 = (offnum), \ (locktag).locktag_type = LOCKTAG_TUPLE, \ (locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
Definition at line 240 of file lock.h.
Referenced by ConditionalLockTuple(), LockTuple(), and UnlockTuple().
#define SET_LOCKTAG_VIRTUALTRANSACTION | ( | locktag, | ||
vxid | ||||
) |
((locktag).locktag_field1 = (vxid).backendId, \ (locktag).locktag_field2 = (vxid).localTransactionId, \ (locktag).locktag_field3 = 0, \ (locktag).locktag_field4 = 0, \ (locktag).locktag_type = LOCKTAG_VIRTUALTRANSACTION, \ (locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
Definition at line 256 of file lock.h.
Referenced by GetLockStatusData(), VirtualXactLock(), and VirtualXactLockTableCleanup().
#define SetInvalidVirtualTransactionId | ( | vxid | ) |
((vxid).backendId = InvalidBackendId, \ (vxid).localTransactionId = InvalidLocalTransactionId)
Definition at line 73 of file lock.h.
Referenced by DefineIndex(), and InitPredicateLocks().
#define ShareLock 5 |
Definition at line 149 of file lock.h.
Referenced by AlterDomainNotNull(), ConditionalXactLockTableWait(), create_toast_table(), createdb(), createdb_failure_callback(), DefineIndex(), pg_advisory_lock_shared_int4(), pg_advisory_lock_shared_int8(), pg_advisory_unlock_shared_int4(), pg_advisory_unlock_shared_int8(), pg_advisory_xact_lock_shared_int4(), pg_advisory_xact_lock_shared_int8(), pg_try_advisory_lock_shared_int4(), pg_try_advisory_lock_shared_int8(), pg_try_advisory_xact_lock_shared_int4(), pg_try_advisory_xact_lock_shared_int8(), RangeVarCallbackForReindexIndex(), reindex_index(), reindex_relation(), ReindexTable(), transformIndexStmt(), validateDomainConstraint(), VirtualXactLock(), and XactLockTableWait().
#define ShareUpdateExclusiveLock 4 |
Definition at line 147 of file lock.h.
Referenced by AcquireDeletionLock(), analyze_rel(), ATExecAddInherit(), CommentObject(), DefineIndex(), ExecAlterExtensionContentsStmt(), ExecSecLabelStmt(), index_drop(), MergeAttributes(), RangeVarCallbackForDropRelation(), transformIndexStmt(), vacuum_rel(), and validate_index().
#define USER_LOCKMETHOD 2 |
Definition at line 134 of file lock.h.
Referenced by DiscardAll(), pg_advisory_unlock_all(), ProcReleaseLocks(), and ShutdownPostgres().
#define VirtualTransactionIdEquals | ( | vxid1, | ||
vxid2 | ||||
) |
((vxid1).backendId == (vxid2).backendId && \ (vxid1).localTransactionId == (vxid2).localTransactionId)
Definition at line 70 of file lock.h.
Referenced by DefineIndex(), GetLockConflicts(), and HaveVirtualXIDsDelayingChkpt().
#define VirtualTransactionIdIsValid | ( | vxid | ) |
(((vxid).backendId != InvalidBackendId) && \ LocalTransactionIdIsValid((vxid).localTransactionId))
Definition at line 67 of file lock.h.
Referenced by DefineIndex(), GetConflictingVirtualXIDs(), GetCurrentVirtualXIDs(), GetLockConflicts(), GetVirtualXIDsDelayingChkpt(), HaveVirtualXIDsDelayingChkpt(), index_drop(), ResolveRecoveryConflictWithVirtualXIDs(), VirtualXactLock(), and VirtualXactLockTableInsert().
typedef struct LOCALLOCKOWNER LOCALLOCKOWNER |
typedef struct LOCALLOCKTAG LOCALLOCKTAG |
typedef struct LockInstanceData LockInstanceData |
typedef const LockMethodData* LockMethod |
typedef struct LockMethodData LockMethodData |
typedef uint16 LOCKMETHODID |
typedef enum LockTagType LockTagType |
typedef struct PROC_QUEUE PROC_QUEUE |
typedef struct PROCLOCKTAG PROCLOCKTAG |
typedef struct xl_standby_lock xl_standby_lock |
enum DeadLockState |
DS_NOT_YET_CHECKED | |
DS_NO_DEADLOCK | |
DS_SOFT_DEADLOCK | |
DS_HARD_DEADLOCK | |
DS_BLOCKED_BY_AUTOVACUUM |
Definition at line 452 of file lock.h.
{ DS_NOT_YET_CHECKED, /* no deadlock check has run yet */ DS_NO_DEADLOCK, /* no deadlock detected */ DS_SOFT_DEADLOCK, /* deadlock avoided by queue rearrangement */ DS_HARD_DEADLOCK, /* deadlock, no way out but ERROR */ DS_BLOCKED_BY_AUTOVACUUM /* no deadlock; queue blocked by autovacuum * worker */ } DeadLockState;
enum LockAcquireResult |
Definition at line 444 of file lock.h.
{ LOCKACQUIRE_NOT_AVAIL, /* lock not available, and dontWait=true */ LOCKACQUIRE_OK, /* lock successfully acquired */ LOCKACQUIRE_ALREADY_HELD /* incremented count for lock already held */ } LockAcquireResult;
enum LockTagType |
LOCKTAG_RELATION | |
LOCKTAG_RELATION_EXTEND | |
LOCKTAG_PAGE | |
LOCKTAG_TUPLE | |
LOCKTAG_TRANSACTION | |
LOCKTAG_VIRTUALTRANSACTION | |
LOCKTAG_OBJECT | |
LOCKTAG_USERLOCK | |
LOCKTAG_ADVISORY |
Definition at line 165 of file lock.h.
{ LOCKTAG_RELATION, /* whole relation */ /* ID info for a relation is DB OID + REL OID; DB OID = 0 if shared */ LOCKTAG_RELATION_EXTEND, /* the right to extend a relation */ /* same ID info as RELATION */ LOCKTAG_PAGE, /* one page of a relation */ /* ID info for a page is RELATION info + BlockNumber */ LOCKTAG_TUPLE, /* one physical tuple */ /* ID info for a tuple is PAGE info + OffsetNumber */ LOCKTAG_TRANSACTION, /* transaction (for waiting for xact done) */ /* ID info for a transaction is its TransactionId */ LOCKTAG_VIRTUALTRANSACTION, /* virtual transaction (ditto) */ /* ID info for a virtual transaction is its VirtualTransactionId */ LOCKTAG_OBJECT, /* non-relation database object */ /* ID info for an object is DB OID + CLASS OID + OBJECT OID + SUBID */ /* * Note: object ID has same representation as in pg_depend and * pg_description, but notice that we are constraining SUBID to 16 bits. * Also, we use DB OID = 0 for shared objects such as tablespaces. */ LOCKTAG_USERLOCK, /* reserved for old contrib/userlock code */ LOCKTAG_ADVISORY /* advisory user locks */ } LockTagType;
void AbortStrongLockAcquire | ( | void | ) |
Definition at line 1524 of file lock.c.
References Assert, FastPathStrongRelationLockData::count, FastPathStrongLockHashPartition, LOCALLOCK::hashcode, LOCALLOCK::holdsStrongLockCount, FastPathStrongRelationLockData::mutex, NULL, SpinLockAcquire, SpinLockRelease, and TRUE.
Referenced by LockAcquireExtended(), and LockErrorCleanup().
{ uint32 fasthashcode; LOCALLOCK *locallock = StrongLockInProgress; if (locallock == NULL) return; fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode); Assert(locallock->holdsStrongLockCount == TRUE); SpinLockAcquire(&FastPathStrongRelationLocks->mutex); FastPathStrongRelationLocks->count[fasthashcode]--; locallock->holdsStrongLockCount = FALSE; StrongLockInProgress = NULL; SpinLockRelease(&FastPathStrongRelationLocks->mutex); }
void AtPrepare_Locks | ( | void | ) |
Definition at line 2926 of file lock.c.
References ereport, errcode(), errmsg(), ERROR, FastPathGetRelationLockEntry(), hash_seq_init(), hash_seq_search(), LOCALLOCK::holdsStrongLockCount, i, LOCALLOCK::lock, LOCALLOCKTAG::lock, TwoPhaseLockRecord::lockmode, LOCALLOCK::lockOwners, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, LOCALLOCK::nLocks, NULL, LOCALLOCK::numLockOwners, LOCALLOCK::proclock, RegisterTwoPhaseRecord(), PROCLOCK::tag, LOCALLOCK::tag, and TWOPHASE_RM_LOCK_ID.
Referenced by PrepareTransaction().
{ HASH_SEQ_STATUS status; LOCALLOCK *locallock; /* * For the most part, we don't need to touch shared memory for this --- * all the necessary state information is in the locallock table. * Fast-path locks are an exception, however: we move any such locks to * the main table before allowing PREPARE TRANSACTION to succeed. */ hash_seq_init(&status, LockMethodLocalHash); while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL) { TwoPhaseLockRecord record; LOCALLOCKOWNER *lockOwners = locallock->lockOwners; bool haveSessionLock; bool haveXactLock; int i; /* * Ignore VXID locks. We don't want those to be held by prepared * transactions, since they aren't meaningful after a restart. */ if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION) continue; /* Ignore it if we don't actually hold the lock */ if (locallock->nLocks <= 0) continue; /* Scan to see whether we hold it at session or transaction level */ haveSessionLock = haveXactLock = false; for (i = locallock->numLockOwners - 1; i >= 0; i--) { if (lockOwners[i].owner == NULL) haveSessionLock = true; else haveXactLock = true; } /* Ignore it if we have only session lock */ if (!haveXactLock) continue; /* * If we have both session- and transaction-level locks, fail. This * should never happen with regular locks, since we only take those at * session level in some special operations like VACUUM. It's * possible to hit this with advisory locks, though. * * It would be nice if we could keep the session hold and give away * the transactional hold to the prepared xact. However, that would * require two PROCLOCK objects, and we cannot be sure that another * PROCLOCK will be available when it comes time for PostPrepare_Locks * to do the deed. So for now, we error out while we can still do so * safely. */ if (haveSessionLock) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object"))); /* * If the local lock was taken via the fast-path, we need to move it * to the primary lock table, or just get a pointer to the existing * primary lock table entry if by chance it's already been * transferred. */ if (locallock->proclock == NULL) { locallock->proclock = FastPathGetRelationLockEntry(locallock); locallock->lock = locallock->proclock->tag.myLock; } /* * Arrange to not release any strong lock count held by this lock * entry. We must retain the count until the prepared transaction is * committed or rolled back. */ locallock->holdsStrongLockCount = FALSE; /* * Create a 2PC record. */ memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG)); record.lockmode = locallock->tag.mode; RegisterTwoPhaseRecord(TWOPHASE_RM_LOCK_ID, 0, &record, sizeof(TwoPhaseLockRecord)); } }
DeadLockState DeadLockCheck | ( | PGPROC * | proc | ) |
Definition at line 204 of file deadlock.c.
References Assert, DeadLockCheckRecurse(), elog, FATAL, FindLockCycle(), GetLocksMethodTable(), i, PGPROC::links, PROC_QUEUE::links, WAIT_ORDER::lock, nCurConstraints, nPossibleConstraints, WAIT_ORDER::nProcs, NULL, nWaitOrders, ProcLockWakeup(), ProcQueueInit(), WAIT_ORDER::procs, SHMQueueInsertBefore(), PROC_QUEUE::size, and LOCK::waitProcs.
Referenced by CheckDeadLock().
{ int i, j; /* Initialize to "no constraints" */ nCurConstraints = 0; nPossibleConstraints = 0; nWaitOrders = 0; /* Initialize to not blocked by an autovacuum worker */ blocking_autovacuum_proc = NULL; /* Search for deadlocks and possible fixes */ if (DeadLockCheckRecurse(proc)) { /* * Call FindLockCycle one more time, to record the correct * deadlockDetails[] for the basic state with no rearrangements. */ int nSoftEdges; TRACE_POSTGRESQL_DEADLOCK_FOUND(); nWaitOrders = 0; if (!FindLockCycle(proc, possibleConstraints, &nSoftEdges)) elog(FATAL, "deadlock seems to have disappeared"); return DS_HARD_DEADLOCK; /* cannot find a non-deadlocked state */ } /* Apply any needed rearrangements of wait queues */ for (i = 0; i < nWaitOrders; i++) { LOCK *lock = waitOrders[i].lock; PGPROC **procs = waitOrders[i].procs; int nProcs = waitOrders[i].nProcs; PROC_QUEUE *waitQueue = &(lock->waitProcs); Assert(nProcs == waitQueue->size); #ifdef DEBUG_DEADLOCK PrintLockQueue(lock, "DeadLockCheck:"); #endif /* Reset the queue and re-add procs in the desired order */ ProcQueueInit(waitQueue); for (j = 0; j < nProcs; j++) { SHMQueueInsertBefore(&(waitQueue->links), &(procs[j]->links)); waitQueue->size++; } #ifdef DEBUG_DEADLOCK PrintLockQueue(lock, "rearranged to:"); #endif /* See if any waiters for the lock can be woken up now */ ProcLockWakeup(GetLocksMethodTable(lock), lock); } /* Return code tells caller if we had to escape a deadlock or not */ if (nWaitOrders > 0) return DS_SOFT_DEADLOCK; else if (blocking_autovacuum_proc != NULL) return DS_BLOCKED_BY_AUTOVACUUM; else return DS_NO_DEADLOCK; }
void DeadLockReport | ( | void | ) |
Definition at line 893 of file deadlock.c.
References _, appendStringInfo(), appendStringInfoChar(), appendStringInfoString(), StringInfoData::data, DescribeLockTag(), ereport, errcode(), errdetail_internal(), errdetail_log(), errhint(), errmsg(), ERROR, GetLockmodeName(), initStringInfo(), DEADLOCK_INFO::lockmode, DEADLOCK_INFO::locktag, LOCKTAG::locktag_lockmethodid, nDeadlockDetails, pgstat_get_backend_current_activity(), pgstat_report_deadlock(), DEADLOCK_INFO::pid, and resetStringInfo().
Referenced by WaitOnLock().
{ StringInfoData clientbuf; /* errdetail for client */ StringInfoData logbuf; /* errdetail for server log */ StringInfoData locktagbuf; int i; initStringInfo(&clientbuf); initStringInfo(&logbuf); initStringInfo(&locktagbuf); /* Generate the "waits for" lines sent to the client */ for (i = 0; i < nDeadlockDetails; i++) { DEADLOCK_INFO *info = &deadlockDetails[i]; int nextpid; /* The last proc waits for the first one... */ if (i < nDeadlockDetails - 1) nextpid = info[1].pid; else nextpid = deadlockDetails[0].pid; /* reset locktagbuf to hold next object description */ resetStringInfo(&locktagbuf); DescribeLockTag(&locktagbuf, &info->locktag); if (i > 0) appendStringInfoChar(&clientbuf, '\n'); appendStringInfo(&clientbuf, _("Process %d waits for %s on %s; blocked by process %d."), info->pid, GetLockmodeName(info->locktag.locktag_lockmethodid, info->lockmode), locktagbuf.data, nextpid); } /* Duplicate all the above for the server ... */ appendStringInfoString(&logbuf, clientbuf.data); /* ... and add info about query strings */ for (i = 0; i < nDeadlockDetails; i++) { DEADLOCK_INFO *info = &deadlockDetails[i]; appendStringInfoChar(&logbuf, '\n'); appendStringInfo(&logbuf, _("Process %d: %s"), info->pid, pgstat_get_backend_current_activity(info->pid, false)); } pgstat_report_deadlock(); ereport(ERROR, (errcode(ERRCODE_T_R_DEADLOCK_DETECTED), errmsg("deadlock detected"), errdetail_internal("%s", clientbuf.data), errdetail_log("%s", logbuf.data), errhint("See server log for query details."))); }
Definition at line 545 of file lock.c.
References LockMethodData::conflictTab, and LOCKBIT_ON.
Referenced by Do_MultiXactIdWait().
{ LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD]; if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2)) return true; return false; }
PGPROC* GetBlockingAutoVacuumPgproc | ( | void | ) |
Definition at line 280 of file deadlock.c.
Referenced by ProcSleep().
{ PGPROC *ptr; ptr = blocking_autovacuum_proc; blocking_autovacuum_proc = NULL; return ptr; }
VirtualTransactionId* GetLockConflicts | ( | const LOCKTAG * | locktag, | |
LOCKMODE | lockmode | |||
) |
Definition at line 2619 of file lock.c.
References PROC_HDR::allProcCount, PROC_HDR::allProcs, PGPROC::backendLock, ConflictsWithRelationFastPath, LockMethodData::conflictTab, PGPROC::databaseId, elog, ERROR, FAST_PATH_GET_BITS, PGPROC::fpRelId, GET_VXID_FROM_PGPROC, hash_search_with_hash_value(), PROCLOCK::holdMask, i, InHotStandby, lengthof, LockHashPartitionLock, PROCLOCK::lockLink, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_SHARED, LWLockAcquire(), LWLockRelease(), MaxBackends, MemoryContextAlloc(), PROCLOCKTAG::myProc, MyProc, NULL, LockMethodData::numLockModes, offsetof, palloc0(), PANIC, ProcGlobal, LOCK::procLocks, SHMQueueNext(), PROCLOCK::tag, TopMemoryContext, VirtualTransactionIdEquals, and VirtualTransactionIdIsValid.
Referenced by DefineIndex(), index_drop(), and ResolveRecoveryConflictWithLock().
{ static VirtualTransactionId *vxids; LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid; LockMethod lockMethodTable; LOCK *lock; LOCKMASK conflictMask; SHM_QUEUE *procLocks; PROCLOCK *proclock; uint32 hashcode; LWLockId partitionLock; int count = 0; int fast_count = 0; if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods)) elog(ERROR, "unrecognized lock method: %d", lockmethodid); lockMethodTable = LockMethods[lockmethodid]; if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes) elog(ERROR, "unrecognized lock mode: %d", lockmode); /* * Allocate memory to store results, and fill with InvalidVXID. We only * need enough space for MaxBackends + a terminator, since prepared xacts * don't count. InHotStandby allocate once in TopMemoryContext. */ if (InHotStandby) { if (vxids == NULL) vxids = (VirtualTransactionId *) MemoryContextAlloc(TopMemoryContext, sizeof(VirtualTransactionId) * (MaxBackends + 1)); } else vxids = (VirtualTransactionId *) palloc0(sizeof(VirtualTransactionId) * (MaxBackends + 1)); /* Compute hash code and partiton lock, and look up conflicting modes. */ hashcode = LockTagHashCode(locktag); partitionLock = LockHashPartitionLock(hashcode); conflictMask = lockMethodTable->conflictTab[lockmode]; /* * Fast path locks might not have been entered in the primary lock table. * If the lock we're dealing with could conflict with such a lock, we must * examine each backend's fast-path array for conflicts. */ if (ConflictsWithRelationFastPath(locktag, lockmode)) { int i; Oid relid = locktag->locktag_field2; VirtualTransactionId vxid; /* * Iterate over relevant PGPROCs. Anything held by a prepared * transaction will have been transferred to the primary lock table, * so we need not worry about those. This is all a bit fuzzy, because * new locks could be taken after we've visited a particular * partition, but the callers had better be prepared to deal with that * anyway, since the locks could equally well be taken between the * time we return the value and the time the caller does something * with it. */ for (i = 0; i < ProcGlobal->allProcCount; i++) { PGPROC *proc = &ProcGlobal->allProcs[i]; uint32 f; /* A backend never blocks itself */ if (proc == MyProc) continue; LWLockAcquire(proc->backendLock, LW_SHARED); /* * If the target backend isn't referencing the same database as the * lock, then we needn't examine the individual relation IDs at * all; none of them can be relevant. * * See FastPathTransferLocks() for discussion of why we do this * test after acquiring the lock. */ if (proc->databaseId != locktag->locktag_field1) { LWLockRelease(proc->backendLock); continue; } for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++) { uint32 lockmask; /* Look for an allocated slot matching the given relid. */ if (relid != proc->fpRelId[f]) continue; lockmask = FAST_PATH_GET_BITS(proc, f); if (!lockmask) continue; lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET; /* * There can only be one entry per relation, so if we found it * and it doesn't conflict, we can skip the rest of the slots. */ if ((lockmask & conflictMask) == 0) break; /* Conflict! */ GET_VXID_FROM_PGPROC(vxid, *proc); /* * If we see an invalid VXID, then either the xact has already * committed (or aborted), or it's a prepared xact. In either * case we may ignore it. */ if (VirtualTransactionIdIsValid(vxid)) vxids[count++] = vxid; break; } LWLockRelease(proc->backendLock); } } /* Remember how many fast-path conflicts we found. */ fast_count = count; /* * Look up the lock object matching the tag. */ LWLockAcquire(partitionLock, LW_SHARED); lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash, (const void *) locktag, hashcode, HASH_FIND, NULL); if (!lock) { /* * If the lock object doesn't exist, there is nothing holding a lock * on this lockable object. */ LWLockRelease(partitionLock); return vxids; } /* * Examine each existing holder (or awaiter) of the lock. */ procLocks = &(lock->procLocks); proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, lockLink)); while (proclock) { if (conflictMask & proclock->holdMask) { PGPROC *proc = proclock->tag.myProc; /* A backend never blocks itself */ if (proc != MyProc) { VirtualTransactionId vxid; GET_VXID_FROM_PGPROC(vxid, *proc); /* * If we see an invalid VXID, then either the xact has already * committed (or aborted), or it's a prepared xact. In either * case we may ignore it. */ if (VirtualTransactionIdIsValid(vxid)) { int i; /* Avoid duplicate entries. */ for (i = 0; i < fast_count; ++i) if (VirtualTransactionIdEquals(vxids[i], vxid)) break; if (i >= fast_count) vxids[count++] = vxid; } } } proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink, offsetof(PROCLOCK, lockLink)); } LWLockRelease(partitionLock); if (count > MaxBackends) /* should never happen */ elog(PANIC, "too many conflicting locks found"); return vxids; }
const char* GetLockmodeName | ( | LOCKMETHODID | lockmethodid, | |
LOCKMODE | mode | |||
) |
Definition at line 3486 of file lock.c.
References Assert, lengthof, and LockMethodData::lockModeNames.
Referenced by DeadLockReport(), pg_lock_status(), and ProcSleep().
{ Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods)); Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes); return LockMethods[lockmethodid]->lockModeNames[mode]; }
LockMethod GetLocksMethodTable | ( | const LOCK * | lock | ) |
Definition at line 461 of file lock.c.
References Assert, lengthof, and LOCK_LOCKMETHOD.
Referenced by DeadLockCheck(), and FindLockCycleRecurse().
{ LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock); Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods)); return LockMethods[lockmethodid]; }
LockData* GetLockStatusData | ( | void | ) |
Definition at line 3246 of file lock.c.
References PROC_HDR::allProcCount, PROC_HDR::allProcs, Assert, LockInstanceData::backend, VirtualTransactionId::backendId, PGPROC::backendId, PGPROC::backendLock, PGPROC::databaseId, ExclusiveLock, FAST_PATH_GET_BITS, LockInstanceData::fastpath, FirstLockMgrLock, PGPROC::fpLocalTransactionId, PGPROC::fpRelId, PGPROC::fpVXIDLock, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, LockInstanceData::holdMask, i, VirtualTransactionId::localTransactionId, LOCKBIT_ON, LockData::locks, LockInstanceData::locktag, LW_SHARED, LWLockAcquire(), LWLockRelease(), PGPROC::lxid, LockInstanceData::lxid, MaxBackends, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, LockData::nelements, palloc(), PGPROC::pid, LockInstanceData::pid, ProcGlobal, repalloc(), SET_LOCKTAG_RELATION, SET_LOCKTAG_VIRTUALTRANSACTION, LOCK::tag, PROCLOCK::tag, PGPROC::waitLock, PGPROC::waitLockMode, and LockInstanceData::waitLockMode.
Referenced by pg_lock_status().
{ LockData *data; PROCLOCK *proclock; HASH_SEQ_STATUS seqstat; int els; int el; int i; data = (LockData *) palloc(sizeof(LockData)); /* Guess how much space we'll need. */ els = MaxBackends; el = 0; data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els); /* * First, we iterate through the per-backend fast-path arrays, locking * them one at a time. This might produce an inconsistent picture of the * system state, but taking all of those LWLocks at the same time seems * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't * matter too much, because none of these locks can be involved in lock * conflicts anyway - anything that might must be present in the main lock * table. */ for (i = 0; i < ProcGlobal->allProcCount; ++i) { PGPROC *proc = &ProcGlobal->allProcs[i]; uint32 f; LWLockAcquire(proc->backendLock, LW_SHARED); for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f) { LockInstanceData *instance; uint32 lockbits = FAST_PATH_GET_BITS(proc, f); /* Skip unallocated slots. */ if (!lockbits) continue; if (el >= els) { els += MaxBackends; data->locks = (LockInstanceData *) repalloc(data->locks, sizeof(LockInstanceData) * els); } instance = &data->locks[el]; SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId, proc->fpRelId[f]); instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET; instance->waitLockMode = NoLock; instance->backend = proc->backendId; instance->lxid = proc->lxid; instance->pid = proc->pid; instance->fastpath = true; el++; } if (proc->fpVXIDLock) { VirtualTransactionId vxid; LockInstanceData *instance; if (el >= els) { els += MaxBackends; data->locks = (LockInstanceData *) repalloc(data->locks, sizeof(LockInstanceData) * els); } vxid.backendId = proc->backendId; vxid.localTransactionId = proc->fpLocalTransactionId; instance = &data->locks[el]; SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid); instance->holdMask = LOCKBIT_ON(ExclusiveLock); instance->waitLockMode = NoLock; instance->backend = proc->backendId; instance->lxid = proc->lxid; instance->pid = proc->pid; instance->fastpath = true; el++; } LWLockRelease(proc->backendLock); } /* * Next, acquire lock on the entire shared lock data structure. We do * this so that, at least for locks in the primary lock table, the state * will be self-consistent. * * Since this is a read-only operation, we take shared instead of * exclusive lock. There's not a whole lot of point to this, because all * the normal operations require exclusive lock, but it doesn't hurt * anything either. It will at least allow two backends to do * GetLockStatusData in parallel. * * Must grab LWLocks in partition-number order to avoid LWLock deadlock. */ for (i = 0; i < NUM_LOCK_PARTITIONS; i++) LWLockAcquire(FirstLockMgrLock + i, LW_SHARED); /* Now we can safely count the number of proclocks */ data->nelements = el + hash_get_num_entries(LockMethodProcLockHash); if (data->nelements > els) { els = data->nelements; data->locks = (LockInstanceData *) repalloc(data->locks, sizeof(LockInstanceData) * els); } /* Now scan the tables to copy the data */ hash_seq_init(&seqstat, LockMethodProcLockHash); while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat))) { PGPROC *proc = proclock->tag.myProc; LOCK *lock = proclock->tag.myLock; LockInstanceData *instance = &data->locks[el]; memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG)); instance->holdMask = proclock->holdMask; if (proc->waitLock == proclock->tag.myLock) instance->waitLockMode = proc->waitLockMode; else instance->waitLockMode = NoLock; instance->backend = proc->backendId; instance->lxid = proc->lxid; instance->pid = proc->pid; instance->fastpath = false; el++; } /* * And release locks. We do this in reverse order for two reasons: (1) * Anyone else who needs more than one of the locks will be trying to lock * them in increasing order; we don't want to release the other process * until it can get all the locks it needs. (2) This avoids O(N^2) * behavior inside LWLockRelease. */ for (i = NUM_LOCK_PARTITIONS; --i >= 0;) LWLockRelease(FirstLockMgrLock + i); Assert(el == data->nelements); return data; }
xl_standby_lock* GetRunningTransactionLocks | ( | int * | nlocks | ) |
Definition at line 3405 of file lock.c.
References AccessExclusiveLock, PROC_HDR::allPgXact, xl_standby_lock::dbOid, FirstLockMgrLock, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, LOCKBIT_ON, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG_RELATION, LOCKTAG::locktag_type, LW_SHARED, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, palloc(), PGPROC::pgprocno, ProcGlobal, xl_standby_lock::relOid, LOCK::tag, PROCLOCK::tag, TransactionIdIsValid, xl_standby_lock::xid, and PGXACT::xid.
Referenced by LogStandbySnapshot().
{ PROCLOCK *proclock; HASH_SEQ_STATUS seqstat; int i; int index; int els; xl_standby_lock *accessExclusiveLocks; /* * Acquire lock on the entire shared lock data structure. * * Must grab LWLocks in partition-number order to avoid LWLock deadlock. */ for (i = 0; i < NUM_LOCK_PARTITIONS; i++) LWLockAcquire(FirstLockMgrLock + i, LW_SHARED); /* Now we can safely count the number of proclocks */ els = hash_get_num_entries(LockMethodProcLockHash); /* * Allocating enough space for all locks in the lock table is overkill, * but it's more convenient and faster than having to enlarge the array. */ accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock)); /* Now scan the tables to copy the data */ hash_seq_init(&seqstat, LockMethodProcLockHash); /* * If lock is a currently granted AccessExclusiveLock then it will have * just one proclock holder, so locks are never accessed twice in this * particular case. Don't copy this code for use elsewhere because in the * general case this will give you duplicate locks when looking at * non-exclusive lock types. */ index = 0; while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat))) { /* make sure this definition matches the one used in LockAcquire */ if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) && proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION) { PGPROC *proc = proclock->tag.myProc; PGXACT *pgxact = &ProcGlobal->allPgXact[proc->pgprocno]; LOCK *lock = proclock->tag.myLock; TransactionId xid = pgxact->xid; /* * Don't record locks for transactions if we know they have * already issued their WAL record for commit but not yet released * lock. It is still possible that we see locks held by already * complete transactions, if they haven't yet zeroed their xids. */ if (!TransactionIdIsValid(xid)) continue; accessExclusiveLocks[index].xid = xid; accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1; accessExclusiveLocks[index].relOid = lock->tag.locktag_field2; index++; } } /* * And release locks. We do this in reverse order for two reasons: (1) * Anyone else who needs more than one of the locks will be trying to lock * them in increasing order; we don't want to release the other process * until it can get all the locks it needs. (2) This avoids O(N^2) * behavior inside LWLockRelease. */ for (i = NUM_LOCK_PARTITIONS; --i >= 0;) LWLockRelease(FirstLockMgrLock + i); *nlocks = index; return accessExclusiveLocks; }
void GrantAwaitedLock | ( | void | ) |
Definition at line 1552 of file lock.c.
References GrantLockLocal().
Referenced by LockErrorCleanup(), and ProcSleep().
Definition at line 1325 of file lock.c.
References Assert, LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, LOCK_PRINT, LOCKBIT_OFF, LOCKBIT_ON, LOCK::nGranted, LOCK::nRequested, LOCK::requested, and LOCK::waitMask.
Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), lock_twophase_recover(), LockAcquireExtended(), ProcLockWakeup(), ProcSleep(), and VirtualXactLock().
{ lock->nGranted++; lock->granted[lockmode]++; lock->grantMask |= LOCKBIT_ON(lockmode); if (lock->granted[lockmode] == lock->requested[lockmode]) lock->waitMask &= LOCKBIT_OFF(lockmode); proclock->holdMask |= LOCKBIT_ON(lockmode); LOCK_PRINT("GrantLock", lock, lockmode); Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0)); Assert(lock->nGranted <= lock->nRequested); }
void InitDeadLockChecking | ( | void | ) |
Definition at line 130 of file deadlock.c.
References afterConstraints, beforeConstraints, MaxBackends, maxCurConstraints, maxPossibleConstraints, MemoryContextSwitchTo(), palloc(), and TopMemoryContext.
Referenced by InitProcess().
{ MemoryContext oldcxt; /* Make sure allocations are permanent */ oldcxt = MemoryContextSwitchTo(TopMemoryContext); /* * FindLockCycle needs at most MaxBackends entries in visitedProcs[] and * deadlockDetails[]. */ visitedProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *)); deadlockDetails = (DEADLOCK_INFO *) palloc(MaxBackends * sizeof(DEADLOCK_INFO)); /* * TopoSort needs to consider at most MaxBackends wait-queue entries, and * it needn't run concurrently with FindLockCycle. */ topoProcs = visitedProcs; /* re-use this space */ beforeConstraints = (int *) palloc(MaxBackends * sizeof(int)); afterConstraints = (int *) palloc(MaxBackends * sizeof(int)); /* * We need to consider rearranging at most MaxBackends/2 wait queues * (since it takes at least two waiters in a queue to create a soft edge), * and the expanded form of the wait queues can't involve more than * MaxBackends total waiters. */ waitOrders = (WAIT_ORDER *) palloc((MaxBackends / 2) * sizeof(WAIT_ORDER)); waitOrderProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *)); /* * Allow at most MaxBackends distinct constraints in a configuration. (Is * this enough? In practice it seems it should be, but I don't quite see * how to prove it. If we run out, we might fail to find a workable wait * queue rearrangement even though one exists.) NOTE that this number * limits the maximum recursion depth of DeadLockCheckRecurse. Making it * really big might potentially allow a stack-overflow problem. */ maxCurConstraints = MaxBackends; curConstraints = (EDGE *) palloc(maxCurConstraints * sizeof(EDGE)); /* * Allow up to 3*MaxBackends constraints to be saved without having to * re-run TestConfiguration. (This is probably more than enough, but we * can survive if we run low on space by doing excess runs of * TestConfiguration to re-compute constraint lists each time needed.) The * last MaxBackends entries in possibleConstraints[] are reserved as * output workspace for FindLockCycle. */ maxPossibleConstraints = MaxBackends * 4; possibleConstraints = (EDGE *) palloc(maxPossibleConstraints * sizeof(EDGE)); MemoryContextSwitchTo(oldcxt); }
void InitLocks | ( | void | ) |
Definition at line 372 of file lock.c.
References HASHCTL::entrysize, HASHCTL::hash, hash_create(), hash_destroy(), HASH_ELEM, HASH_FUNCTION, HASHCTL::keysize, MemSet, FastPathStrongRelationLockData::mutex, NLOCKENTS, HASHCTL::num_partitions, ShmemInitHash(), ShmemInitStruct(), and SpinLockInit.
Referenced by CreateSharedMemoryAndSemaphores().
{ HASHCTL info; int hash_flags; long init_table_size, max_table_size; bool found; /* * Compute init/max size to request for lock hashtables. Note these * calculations must agree with LockShmemSize! */ max_table_size = NLOCKENTS(); init_table_size = max_table_size / 2; /* * Allocate hash table for LOCK structs. This stores per-locked-object * information. */ MemSet(&info, 0, sizeof(info)); info.keysize = sizeof(LOCKTAG); info.entrysize = sizeof(LOCK); info.hash = tag_hash; info.num_partitions = NUM_LOCK_PARTITIONS; hash_flags = (HASH_ELEM | HASH_FUNCTION | HASH_PARTITION); LockMethodLockHash = ShmemInitHash("LOCK hash", init_table_size, max_table_size, &info, hash_flags); /* Assume an average of 2 holders per lock */ max_table_size *= 2; init_table_size *= 2; /* * Allocate hash table for PROCLOCK structs. This stores * per-lock-per-holder information. */ info.keysize = sizeof(PROCLOCKTAG); info.entrysize = sizeof(PROCLOCK); info.hash = proclock_hash; info.num_partitions = NUM_LOCK_PARTITIONS; hash_flags = (HASH_ELEM | HASH_FUNCTION | HASH_PARTITION); LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash", init_table_size, max_table_size, &info, hash_flags); /* * Allocate fast-path structures. */ FastPathStrongRelationLocks = ShmemInitStruct("Fast Path Strong Relation Lock Data", sizeof(FastPathStrongRelationLockData), &found); if (!found) SpinLockInit(&FastPathStrongRelationLocks->mutex); /* * Allocate non-shared hash table for LOCALLOCK structs. This stores lock * counts and resource owner information. * * The non-shared table could already exist in this process (this occurs * when the postmaster is recreating shared memory after a backend crash). * If so, delete and recreate it. (We could simply leave it, since it * ought to be empty in the postmaster, but for safety let's zap it.) */ if (LockMethodLocalHash) hash_destroy(LockMethodLocalHash); info.keysize = sizeof(LOCALLOCKTAG); info.entrysize = sizeof(LOCALLOCK); info.hash = tag_hash; hash_flags = (HASH_ELEM | HASH_FUNCTION); LockMethodLocalHash = hash_create("LOCALLOCK hash", 16, &info, hash_flags); }
void lock_twophase_postabort | ( | TransactionId | xid, | |
uint16 | info, | |||
void * | recdata, | |||
uint32 | len | |||
) |
Definition at line 3836 of file lock.c.
References lock_twophase_postcommit().
{ lock_twophase_postcommit(xid, info, recdata, len); }
void lock_twophase_postcommit | ( | TransactionId | xid, | |
uint16 | info, | |||
void * | recdata, | |||
uint32 | len | |||
) |
Definition at line 3810 of file lock.c.
References Assert, elog, ERROR, lengthof, TwoPhaseLockRecord::lockmode, LockRefindAndRelease(), TwoPhaseLockRecord::locktag, LOCKTAG::locktag_lockmethodid, and TwoPhaseGetDummyProc().
Referenced by lock_twophase_postabort().
{ TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata; PGPROC *proc = TwoPhaseGetDummyProc(xid); LOCKTAG *locktag; LOCKMETHODID lockmethodid; LockMethod lockMethodTable; Assert(len == sizeof(TwoPhaseLockRecord)); locktag = &rec->locktag; lockmethodid = locktag->locktag_lockmethodid; if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods)) elog(ERROR, "unrecognized lock method: %d", lockmethodid); lockMethodTable = LockMethods[lockmethodid]; LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true); }
void lock_twophase_recover | ( | TransactionId | xid, | |
uint16 | info, | |||
void * | recdata, | |||
uint32 | len | |||
) |
Definition at line 3599 of file lock.c.
References Assert, ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, elog, ereport, errcode(), errhint(), errmsg(), ERROR, FastPathStrongLockHashPartition, LOCK::granted, GrantLock(), LOCK::grantMask, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, lengthof, LOCK_PRINT, LOCKBIT_ON, LockHashPartition, LockHashPartitionLock, PROCLOCK::lockLink, TwoPhaseLockRecord::lockmode, LockMethodData::lockModeNames, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MAX_LOCKMODES, MemSet, FastPathStrongRelationLockData::mutex, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, NULL, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, ProcQueueInit(), PROCLOCK::releaseMask, LOCK::requested, SHMQueueEmpty(), SHMQueueInit(), SHMQueueInsertBefore(), SpinLockAcquire, SpinLockRelease, LOCK::tag, TwoPhaseGetDummyProc(), LOCK::waitMask, and LOCK::waitProcs.
{ TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata; PGPROC *proc = TwoPhaseGetDummyProc(xid); LOCKTAG *locktag; LOCKMODE lockmode; LOCKMETHODID lockmethodid; LOCK *lock; PROCLOCK *proclock; PROCLOCKTAG proclocktag; bool found; uint32 hashcode; uint32 proclock_hashcode; int partition; LWLockId partitionLock; LockMethod lockMethodTable; Assert(len == sizeof(TwoPhaseLockRecord)); locktag = &rec->locktag; lockmode = rec->lockmode; lockmethodid = locktag->locktag_lockmethodid; if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods)) elog(ERROR, "unrecognized lock method: %d", lockmethodid); lockMethodTable = LockMethods[lockmethodid]; hashcode = LockTagHashCode(locktag); partition = LockHashPartition(hashcode); partitionLock = LockHashPartitionLock(hashcode); LWLockAcquire(partitionLock, LW_EXCLUSIVE); /* * Find or create a lock with this tag. */ lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash, (void *) locktag, hashcode, HASH_ENTER_NULL, &found); if (!lock) { LWLockRelease(partitionLock); ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"), errhint("You might need to increase max_locks_per_transaction."))); } /* * if it's a new lock object, initialize it */ if (!found) { lock->grantMask = 0; lock->waitMask = 0; SHMQueueInit(&(lock->procLocks)); ProcQueueInit(&(lock->waitProcs)); lock->nRequested = 0; lock->nGranted = 0; MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES); MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES); LOCK_PRINT("lock_twophase_recover: new", lock, lockmode); } else { LOCK_PRINT("lock_twophase_recover: found", lock, lockmode); Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0)); Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0)); Assert(lock->nGranted <= lock->nRequested); } /* * Create the hash key for the proclock table. */ proclocktag.myLock = lock; proclocktag.myProc = proc; proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode); /* * Find or create a proclock entry with this tag */ proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash, (void *) &proclocktag, proclock_hashcode, HASH_ENTER_NULL, &found); if (!proclock) { /* Ooops, not enough shmem for the proclock */ if (lock->nRequested == 0) { /* * There are no other requestors of this lock, so garbage-collect * the lock object. We *must* do this to avoid a permanent leak * of shared memory, because there won't be anything to cause * anyone to release the lock object later. */ Assert(SHMQueueEmpty(&(lock->procLocks))); if (!hash_search_with_hash_value(LockMethodLockHash, (void *) &(lock->tag), hashcode, HASH_REMOVE, NULL)) elog(PANIC, "lock table corrupted"); } LWLockRelease(partitionLock); ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"), errhint("You might need to increase max_locks_per_transaction."))); } /* * If new, initialize the new entry */ if (!found) { proclock->holdMask = 0; proclock->releaseMask = 0; /* Add proclock to appropriate lists */ SHMQueueInsertBefore(&lock->procLocks, &proclock->lockLink); SHMQueueInsertBefore(&(proc->myProcLocks[partition]), &proclock->procLink); PROCLOCK_PRINT("lock_twophase_recover: new", proclock); } else { PROCLOCK_PRINT("lock_twophase_recover: found", proclock); Assert((proclock->holdMask & ~lock->grantMask) == 0); } /* * lock->nRequested and lock->requested[] count the total number of * requests, whether granted or waiting, so increment those immediately. */ lock->nRequested++; lock->requested[lockmode]++; Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0)); /* * We shouldn't already hold the desired lock. */ if (proclock->holdMask & LOCKBIT_ON(lockmode)) elog(ERROR, "lock %s on object %u/%u/%u is already held", lockMethodTable->lockModeNames[lockmode], lock->tag.locktag_field1, lock->tag.locktag_field2, lock->tag.locktag_field3); /* * We ignore any possible conflicts and just grant ourselves the lock. Not * only because we don't bother, but also to avoid deadlocks when * switching from standby to normal mode. See function comment. */ GrantLock(lock, proclock, lockmode); /* * Bump strong lock count, to make sure any fast-path lock requests won't * be granted without consulting the primary lock table. */ if (ConflictsWithRelationFastPath(&lock->tag, lockmode)) { uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode); SpinLockAcquire(&FastPathStrongRelationLocks->mutex); FastPathStrongRelationLocks->count[fasthashcode]++; SpinLockRelease(&FastPathStrongRelationLocks->mutex); } LWLockRelease(partitionLock); }
void lock_twophase_standby_recover | ( | TransactionId | xid, | |
uint16 | info, | |||
void * | recdata, | |||
uint32 | len | |||
) |
Definition at line 3778 of file lock.c.
References AccessExclusiveLock, Assert, elog, ERROR, lengthof, TwoPhaseLockRecord::lockmode, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOCKTAG_RELATION, LOCKTAG::locktag_type, and StandbyAcquireAccessExclusiveLock().
{ TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata; LOCKTAG *locktag; LOCKMODE lockmode; LOCKMETHODID lockmethodid; Assert(len == sizeof(TwoPhaseLockRecord)); locktag = &rec->locktag; lockmode = rec->lockmode; lockmethodid = locktag->locktag_lockmethodid; if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods)) elog(ERROR, "unrecognized lock method: %d", lockmethodid); if (lockmode == AccessExclusiveLock && locktag->locktag_type == LOCKTAG_RELATION) { StandbyAcquireAccessExclusiveLock(xid, locktag->locktag_field1 /* dboid */ , locktag->locktag_field2 /* reloid */ ); } }
LockAcquireResult LockAcquire | ( | const LOCKTAG * | locktag, | |
LOCKMODE | lockmode, | |||
bool | sessionLock, | |||
bool | dontWait | |||
) |
Definition at line 672 of file lock.c.
References LockAcquireExtended().
Referenced by ConditionalLockPage(), ConditionalLockRelation(), ConditionalLockRelationOid(), ConditionalLockTuple(), ConditionalXactLockTableWait(), LockDatabaseObject(), LockPage(), LockRelation(), LockRelationForExtension(), LockRelationIdForSession(), LockRelationOid(), LockSharedObject(), LockSharedObjectForSession(), LockTuple(), pg_advisory_lock_int4(), pg_advisory_lock_int8(), pg_advisory_lock_shared_int4(), pg_advisory_lock_shared_int8(), pg_advisory_xact_lock_int4(), pg_advisory_xact_lock_int8(), pg_advisory_xact_lock_shared_int4(), pg_advisory_xact_lock_shared_int8(), pg_try_advisory_lock_int4(), pg_try_advisory_lock_int8(), pg_try_advisory_lock_shared_int4(), pg_try_advisory_lock_shared_int8(), pg_try_advisory_xact_lock_int4(), pg_try_advisory_xact_lock_int8(), pg_try_advisory_xact_lock_shared_int4(), pg_try_advisory_xact_lock_shared_int8(), VirtualXactLock(), XactLockTableInsert(), and XactLockTableWait().
{ return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait, true); }
LockAcquireResult LockAcquireExtended | ( | const LOCKTAG * | locktag, | |
LOCKMODE | lockmode, | |||
bool | sessionLock, | |||
bool | dontWait, | |||
bool | report_memory_error | |||
) |
Definition at line 690 of file lock.c.
References AbortStrongLockAcquire(), AccessExclusiveLock, Assert, PGPROC::backendLock, BeginStrongLockAcquire(), ConflictsWithRelationFastPath, LockMethodData::conflictTab, FastPathStrongRelationLockData::count, CurrentResourceOwner, EligibleForRelationFastPath, elog, ereport, errcode(), errhint(), errmsg(), ERROR, FastPathGrantRelationLock(), FastPathLocalUseCount, FastPathStrongLockHashPartition, FastPathTransferRelationLocks(), FinishStrongLockAcquire(), FP_LOCK_SLOTS_PER_BACKEND, GrantLock(), GrantLockLocal(), HASH_REMOVE, hash_search(), hash_search_with_hash_value(), LOCALLOCK::hashcode, PGPROC::heldLocks, PROCLOCK::holdMask, LOCALLOCK::holdsStrongLockCount, InRecovery, lengthof, LOCALLOCK::lock, LOCALLOCKTAG::lock, LOCK_PRINT, LOCKBIT_ON, LockCheckConflicts(), LockHashPartitionLock, PROCLOCK::lockLink, LockMethodData::lockModeNames, LOCALLOCK::lockOwners, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_field4, LOCKTAG::locktag_lockmethodid, LOCKTAG_OBJECT, LOCKTAG_RELATION, LOCKTAG::locktag_type, LockTagHashCode(), LOG, LogAccessExclusiveLock(), LogAccessExclusiveLockPrepare(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCK::maxLockOwners, MemoryContextAlloc(), MemSet, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, LOCK::nGranted, LOCALLOCK::nLocks, LOCK::nRequested, NULL, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, PANIC, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, ProcLockHashCode(), RecoveryInProgress(), RemoveLocalLock(), repalloc(), LOCK::requested, RowExclusiveLock, SetupLockInTable(), SHMQueueDelete(), STATUS_FOUND, STATUS_OK, PROCLOCK::tag, TopMemoryContext, LOCK::waitMask, WaitOnLock(), and XLogStandbyInfoActive.
Referenced by LockAcquire(), ResolveRecoveryConflictWithLock(), and StandbyAcquireAccessExclusiveLock().
{ LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid; LockMethod lockMethodTable; LOCALLOCKTAG localtag; LOCALLOCK *locallock; LOCK *lock; PROCLOCK *proclock; bool found; ResourceOwner owner; uint32 hashcode; LWLockId partitionLock; int status; bool log_lock = false; if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods)) elog(ERROR, "unrecognized lock method: %d", lockmethodid); lockMethodTable = LockMethods[lockmethodid]; if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes) elog(ERROR, "unrecognized lock mode: %d", lockmode); if (RecoveryInProgress() && !InRecovery && (locktag->locktag_type == LOCKTAG_OBJECT || locktag->locktag_type == LOCKTAG_RELATION) && lockmode > RowExclusiveLock) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("cannot acquire lock mode %s on database objects while recovery is in progress", lockMethodTable->lockModeNames[lockmode]), errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery."))); #ifdef LOCK_DEBUG if (LOCK_DEBUG_ENABLED(locktag)) elog(LOG, "LockAcquire: lock [%u,%u] %s", locktag->locktag_field1, locktag->locktag_field2, lockMethodTable->lockModeNames[lockmode]); #endif /* Identify owner for lock */ if (sessionLock) owner = NULL; else owner = CurrentResourceOwner; /* * Find or create a LOCALLOCK entry for this lock and lockmode */ MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */ localtag.lock = *locktag; localtag.mode = lockmode; locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash, (void *) &localtag, HASH_ENTER, &found); /* * if it's a new locallock object, initialize it */ if (!found) { locallock->lock = NULL; locallock->proclock = NULL; locallock->hashcode = LockTagHashCode(&(localtag.lock)); locallock->nLocks = 0; locallock->numLockOwners = 0; locallock->maxLockOwners = 8; locallock->holdsStrongLockCount = FALSE; locallock->lockOwners = NULL; locallock->lockOwners = (LOCALLOCKOWNER *) MemoryContextAlloc(TopMemoryContext, locallock->maxLockOwners * sizeof(LOCALLOCKOWNER)); } else { /* Make sure there will be room to remember the lock */ if (locallock->numLockOwners >= locallock->maxLockOwners) { int newsize = locallock->maxLockOwners * 2; locallock->lockOwners = (LOCALLOCKOWNER *) repalloc(locallock->lockOwners, newsize * sizeof(LOCALLOCKOWNER)); locallock->maxLockOwners = newsize; } } hashcode = locallock->hashcode; /* * If we already hold the lock, we can just increase the count locally. */ if (locallock->nLocks > 0) { GrantLockLocal(locallock, owner); return LOCKACQUIRE_ALREADY_HELD; } /* * Emit a WAL record if acquisition of this lock needs to be replayed in a * standby server. Only AccessExclusiveLocks can conflict with lock types * that read-only transactions can acquire in a standby server. * * Make sure this definition matches the one in * GetRunningTransactionLocks(). * * First we prepare to log, then after lock acquired we issue log record. */ if (lockmode >= AccessExclusiveLock && locktag->locktag_type == LOCKTAG_RELATION && !RecoveryInProgress() && XLogStandbyInfoActive()) { LogAccessExclusiveLockPrepare(); log_lock = true; } /* * Attempt to take lock via fast path, if eligible. But if we remember * having filled up the fast path array, we don't attempt to make any * further use of it until we release some locks. It's possible that some * other backend has transferred some of those locks to the shared hash * table, leaving space free, but it's not worth acquiring the LWLock just * to check. It's also possible that we're acquiring a second or third * lock type on a relation we have already locked using the fast-path, but * for now we don't worry about that case either. */ if (EligibleForRelationFastPath(locktag, lockmode) && FastPathLocalUseCount < FP_LOCK_SLOTS_PER_BACKEND) { uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode); bool acquired; /* * LWLockAcquire acts as a memory sequencing point, so it's safe to * assume that any strong locker whose increment to * FastPathStrongRelationLocks->counts becomes visible after we test * it has yet to begin to transfer fast-path locks. */ LWLockAcquire(MyProc->backendLock, LW_EXCLUSIVE); if (FastPathStrongRelationLocks->count[fasthashcode] != 0) acquired = false; else acquired = FastPathGrantRelationLock(locktag->locktag_field2, lockmode); LWLockRelease(MyProc->backendLock); if (acquired) { GrantLockLocal(locallock, owner); return LOCKACQUIRE_OK; } } /* * If this lock could potentially have been taken via the fast-path by * some other backend, we must (temporarily) disable further use of the * fast-path for this lock tag, and migrate any locks already taken via * this method to the main lock table. */ if (ConflictsWithRelationFastPath(locktag, lockmode)) { uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode); BeginStrongLockAcquire(locallock, fasthashcode); if (!FastPathTransferRelationLocks(lockMethodTable, locktag, hashcode)) { AbortStrongLockAcquire(); if (reportMemoryError) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"), errhint("You might need to increase max_locks_per_transaction."))); else return LOCKACQUIRE_NOT_AVAIL; } } /* * We didn't find the lock in our LOCALLOCK table, and we didn't manage to * take it via the fast-path, either, so we've got to mess with the shared * lock table. */ partitionLock = LockHashPartitionLock(hashcode); LWLockAcquire(partitionLock, LW_EXCLUSIVE); /* * Find or create a proclock entry with this tag */ proclock = SetupLockInTable(lockMethodTable, MyProc, locktag, hashcode, lockmode); if (!proclock) { AbortStrongLockAcquire(); LWLockRelease(partitionLock); if (reportMemoryError) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"), errhint("You might need to increase max_locks_per_transaction."))); else return LOCKACQUIRE_NOT_AVAIL; } locallock->proclock = proclock; lock = proclock->tag.myLock; locallock->lock = lock; /* * If lock requested conflicts with locks requested by waiters, must join * wait queue. Otherwise, check for conflict with already-held locks. * (That's last because most complex check.) */ if (lockMethodTable->conflictTab[lockmode] & lock->waitMask) status = STATUS_FOUND; else status = LockCheckConflicts(lockMethodTable, lockmode, lock, proclock, MyProc); if (status == STATUS_OK) { /* No conflict with held or previously requested locks */ GrantLock(lock, proclock, lockmode); GrantLockLocal(locallock, owner); } else { Assert(status == STATUS_FOUND); /* * We can't acquire the lock immediately. If caller specified no * blocking, remove useless table entries and return NOT_AVAIL without * waiting. */ if (dontWait) { AbortStrongLockAcquire(); if (proclock->holdMask == 0) { uint32 proclock_hashcode; proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode); SHMQueueDelete(&proclock->lockLink); SHMQueueDelete(&proclock->procLink); if (!hash_search_with_hash_value(LockMethodProcLockHash, (void *) &(proclock->tag), proclock_hashcode, HASH_REMOVE, NULL)) elog(PANIC, "proclock table corrupted"); } else PROCLOCK_PRINT("LockAcquire: NOWAIT", proclock); lock->nRequested--; lock->requested[lockmode]--; LOCK_PRINT("LockAcquire: conditional lock failed", lock, lockmode); Assert((lock->nRequested > 0) && (lock->requested[lockmode] >= 0)); Assert(lock->nGranted <= lock->nRequested); LWLockRelease(partitionLock); if (locallock->nLocks == 0) RemoveLocalLock(locallock); return LOCKACQUIRE_NOT_AVAIL; } /* * Set bitmask of locks this process already holds on this object. */ MyProc->heldLocks = proclock->holdMask; /* * Sleep till someone wakes me up. */ TRACE_POSTGRESQL_LOCK_WAIT_START(locktag->locktag_field1, locktag->locktag_field2, locktag->locktag_field3, locktag->locktag_field4, locktag->locktag_type, lockmode); WaitOnLock(locallock, owner); TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1, locktag->locktag_field2, locktag->locktag_field3, locktag->locktag_field4, locktag->locktag_type, lockmode); /* * NOTE: do not do any material change of state between here and * return. All required changes in locktable state must have been * done when the lock was granted to us --- see notes in WaitOnLock. */ /* * Check the proclock entry status, in case something in the ipc * communication doesn't work correctly. */ if (!(proclock->holdMask & LOCKBIT_ON(lockmode))) { AbortStrongLockAcquire(); PROCLOCK_PRINT("LockAcquire: INCONSISTENT", proclock); LOCK_PRINT("LockAcquire: INCONSISTENT", lock, lockmode); /* Should we retry ? */ LWLockRelease(partitionLock); elog(ERROR, "LockAcquire failed"); } PROCLOCK_PRINT("LockAcquire: granted", proclock); LOCK_PRINT("LockAcquire: granted", lock, lockmode); } /* * Lock state is fully up-to-date now; if we error out after this, no * special error cleanup is required. */ FinishStrongLockAcquire(); LWLockRelease(partitionLock); /* * Emit a WAL record if acquisition of this lock need to be replayed in a * standby server. */ if (log_lock) { /* * Decode the locktag back to the original values, to avoid sending * lots of empty bytes with every message. See lock.h to check how a * locktag is defined for LOCKTAG_RELATION */ LogAccessExclusiveLock(locktag->locktag_field1, locktag->locktag_field2); } return LOCKACQUIRE_OK; }
int LockCheckConflicts | ( | LockMethod | lockMethodTable, | |
LOCKMODE | lockmode, | |||
LOCK * | lock, | |||
PROCLOCK * | proclock, | |||
PGPROC * | proc | |||
) |
Definition at line 1256 of file lock.c.
References LockMethodData::conflictTab, LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, i, LOCKBIT_ON, LockMethodData::numLockModes, and PROCLOCK_PRINT.
Referenced by LockAcquireExtended(), ProcLockWakeup(), and ProcSleep().
{ int numLockModes = lockMethodTable->numLockModes; LOCKMASK myLocks; LOCKMASK otherLocks; int i; /* * first check for global conflicts: If no locks conflict with my request, * then I get the lock. * * Checking for conflict: lock->grantMask represents the types of * currently held locks. conflictTable[lockmode] has a bit set for each * type of lock that conflicts with request. Bitwise compare tells if * there is a conflict. */ if (!(lockMethodTable->conflictTab[lockmode] & lock->grantMask)) { PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock); return STATUS_OK; } /* * Rats. Something conflicts. But it could still be my own lock. We have * to construct a conflict mask that does not reflect our own locks, but * only lock types held by other processes. */ myLocks = proclock->holdMask; otherLocks = 0; for (i = 1; i <= numLockModes; i++) { int myHolding = (myLocks & LOCKBIT_ON(i)) ? 1 : 0; if (lock->granted[i] > myHolding) otherLocks |= LOCKBIT_ON(i); } /* * now check again for conflicts. 'otherLocks' describes the types of * locks held by other processes. If one of these conflicts with the kind * of lock that I want, there is a conflict and I have to sleep. */ if (!(lockMethodTable->conflictTab[lockmode] & otherLocks)) { /* no conflict. OK to get the lock */ PROCLOCK_PRINT("LockCheckConflicts: resolved", proclock); return STATUS_OK; } PROCLOCK_PRINT("LockCheckConflicts: conflicting", proclock); return STATUS_FOUND; }
Definition at line 560 of file lock.c.
References LockMethodData::conflictTab, elog, ERROR, hash_search(), LOCALLOCK::hashcode, PROCLOCK::holdMask, lengthof, LOCALLOCK::lock, LOCALLOCKTAG::lock, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLock, LockMethodData::lockModeNames, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_SHARED, LWLockAcquire(), LWLockRelease(), MemSet, LOCALLOCKTAG::mode, LOCALLOCK::nLocks, LockMethodData::numLockModes, LOCALLOCK::proclock, PROCLOCK_PRINT, RemoveLocalLock(), LOCK::waitMask, and WARNING.
Referenced by LockHasWaitersRelation().
{ LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid; LockMethod lockMethodTable; LOCALLOCKTAG localtag; LOCALLOCK *locallock; LOCK *lock; PROCLOCK *proclock; LWLockId partitionLock; bool hasWaiters = false; if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods)) elog(ERROR, "unrecognized lock method: %d", lockmethodid); lockMethodTable = LockMethods[lockmethodid]; if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes) elog(ERROR, "unrecognized lock mode: %d", lockmode); #ifdef LOCK_DEBUG if (LOCK_DEBUG_ENABLED(locktag)) elog(LOG, "LockHasWaiters: lock [%u,%u] %s", locktag->locktag_field1, locktag->locktag_field2, lockMethodTable->lockModeNames[lockmode]); #endif /* * Find the LOCALLOCK entry for this lock and lockmode */ MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */ localtag.lock = *locktag; localtag.mode = lockmode; locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash, (void *) &localtag, HASH_FIND, NULL); /* * let the caller print its own error message, too. Do not ereport(ERROR). */ if (!locallock || locallock->nLocks <= 0) { elog(WARNING, "you don't own a lock of type %s", lockMethodTable->lockModeNames[lockmode]); return false; } /* * Check the shared lock table. */ partitionLock = LockHashPartitionLock(locallock->hashcode); LWLockAcquire(partitionLock, LW_SHARED); /* * We don't need to re-find the lock or proclock, since we kept their * addresses in the locallock table, and they couldn't have been removed * while we were holding a lock on them. */ lock = locallock->lock; LOCK_PRINT("LockHasWaiters: found", lock, lockmode); proclock = locallock->proclock; PROCLOCK_PRINT("LockHasWaiters: found", proclock); /* * Double-check that we are actually holding a lock of the type we want to * release. */ if (!(proclock->holdMask & LOCKBIT_ON(lockmode))) { PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock); LWLockRelease(partitionLock); elog(WARNING, "you don't own a lock of type %s", lockMethodTable->lockModeNames[lockmode]); RemoveLocalLock(locallock); return false; } /* * Do the checking. */ if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0) hasWaiters = true; LWLockRelease(partitionLock); return hasWaiters; }
void LockReassignCurrentOwner | ( | LOCALLOCK ** | locallocks, | |
int | nlocks | |||
) |
Definition at line 2298 of file lock.c.
References Assert, CurrentResourceOwner, hash_seq_init(), hash_seq_search(), i, LockReassignOwner(), NULL, and ResourceOwnerGetParent().
Referenced by ResourceOwnerReleaseInternal().
{ ResourceOwner parent = ResourceOwnerGetParent(CurrentResourceOwner); Assert(parent != NULL); if (locallocks == NULL) { HASH_SEQ_STATUS status; LOCALLOCK *locallock; hash_seq_init(&status, LockMethodLocalHash); while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL) LockReassignOwner(locallock, parent); } else { int i; for (i = nlocks - 1; i >= 0; i--) LockReassignOwner(locallocks[i], parent); } }
Definition at line 1730 of file lock.c.
References Assert, PGPROC::backendLock, CleanUpLock(), CurrentResourceOwner, EligibleForRelationFastPath, elog, ERROR, FastPathLocalUseCount, FastPathUnGrantRelationLock(), hash_search(), hash_search_with_hash_value(), LOCALLOCK::hashcode, PROCLOCK::holdMask, i, lengthof, LOCALLOCK::lock, LOCALLOCKTAG::lock, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLock, LockMethodData::lockModeNames, LOCALLOCK::lockOwners, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MemSet, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, MyProc, LOCALLOCK::nLocks, NULL, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, LOCALLOCK::proclock, PROCLOCK_PRINT, RemoveLocalLock(), ResourceOwnerForgetLock(), UnGrantLock(), and WARNING.
Referenced by ConditionalXactLockTableWait(), pg_advisory_unlock_int4(), pg_advisory_unlock_int8(), pg_advisory_unlock_shared_int4(), pg_advisory_unlock_shared_int8(), ReleaseLockIfHeld(), StandbyReleaseAllLocks(), StandbyReleaseLocks(), StandbyReleaseOldLocks(), UnlockDatabaseObject(), UnlockPage(), UnlockRelation(), UnlockRelationForExtension(), UnlockRelationId(), UnlockRelationIdForSession(), UnlockRelationOid(), UnlockSharedObject(), UnlockSharedObjectForSession(), UnlockTuple(), VirtualXactLock(), XactLockTableDelete(), and XactLockTableWait().
{ LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid; LockMethod lockMethodTable; LOCALLOCKTAG localtag; LOCALLOCK *locallock; LOCK *lock; PROCLOCK *proclock; LWLockId partitionLock; bool wakeupNeeded; if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods)) elog(ERROR, "unrecognized lock method: %d", lockmethodid); lockMethodTable = LockMethods[lockmethodid]; if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes) elog(ERROR, "unrecognized lock mode: %d", lockmode); #ifdef LOCK_DEBUG if (LOCK_DEBUG_ENABLED(locktag)) elog(LOG, "LockRelease: lock [%u,%u] %s", locktag->locktag_field1, locktag->locktag_field2, lockMethodTable->lockModeNames[lockmode]); #endif /* * Find the LOCALLOCK entry for this lock and lockmode */ MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */ localtag.lock = *locktag; localtag.mode = lockmode; locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash, (void *) &localtag, HASH_FIND, NULL); /* * let the caller print its own error message, too. Do not ereport(ERROR). */ if (!locallock || locallock->nLocks <= 0) { elog(WARNING, "you don't own a lock of type %s", lockMethodTable->lockModeNames[lockmode]); return FALSE; } /* * Decrease the count for the resource owner. */ { LOCALLOCKOWNER *lockOwners = locallock->lockOwners; ResourceOwner owner; int i; /* Identify owner for lock */ if (sessionLock) owner = NULL; else owner = CurrentResourceOwner; for (i = locallock->numLockOwners - 1; i >= 0; i--) { if (lockOwners[i].owner == owner) { Assert(lockOwners[i].nLocks > 0); if (--lockOwners[i].nLocks == 0) { if (owner != NULL) ResourceOwnerForgetLock(owner, locallock); /* compact out unused slot */ locallock->numLockOwners--; if (i < locallock->numLockOwners) lockOwners[i] = lockOwners[locallock->numLockOwners]; } break; } } if (i < 0) { /* don't release a lock belonging to another owner */ elog(WARNING, "you don't own a lock of type %s", lockMethodTable->lockModeNames[lockmode]); return FALSE; } } /* * Decrease the total local count. If we're still holding the lock, we're * done. */ locallock->nLocks--; if (locallock->nLocks > 0) return TRUE; /* Attempt fast release of any lock eligible for the fast path. */ if (EligibleForRelationFastPath(locktag, lockmode) && FastPathLocalUseCount > 0) { bool released; /* * We might not find the lock here, even if we originally entered it * here. Another backend may have moved it to the main table. */ LWLockAcquire(MyProc->backendLock, LW_EXCLUSIVE); released = FastPathUnGrantRelationLock(locktag->locktag_field2, lockmode); LWLockRelease(MyProc->backendLock); if (released) { RemoveLocalLock(locallock); return TRUE; } } /* * Otherwise we've got to mess with the shared lock table. */ partitionLock = LockHashPartitionLock(locallock->hashcode); LWLockAcquire(partitionLock, LW_EXCLUSIVE); /* * Normally, we don't need to re-find the lock or proclock, since we kept * their addresses in the locallock table, and they couldn't have been * removed while we were holding a lock on them. But it's possible that * the locks have been moved to the main hash table by another backend, in * which case we might need to go look them up after all. */ lock = locallock->lock; if (!lock) { PROCLOCKTAG proclocktag; bool found; Assert(EligibleForRelationFastPath(locktag, lockmode)); lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash, (const void *) locktag, locallock->hashcode, HASH_FIND, &found); Assert(found && lock != NULL); locallock->lock = lock; proclocktag.myLock = lock; proclocktag.myProc = MyProc; locallock->proclock = (PROCLOCK *) hash_search(LockMethodProcLockHash, (void *) &proclocktag, HASH_FIND, &found); Assert(found); } LOCK_PRINT("LockRelease: found", lock, lockmode); proclock = locallock->proclock; PROCLOCK_PRINT("LockRelease: found", proclock); /* * Double-check that we are actually holding a lock of the type we want to * release. */ if (!(proclock->holdMask & LOCKBIT_ON(lockmode))) { PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock); LWLockRelease(partitionLock); elog(WARNING, "you don't own a lock of type %s", lockMethodTable->lockModeNames[lockmode]); RemoveLocalLock(locallock); return FALSE; } /* * Do the releasing. CleanUpLock will waken any now-wakable waiters. */ wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable); CleanUpLock(lock, proclock, lockMethodTable, locallock->hashcode, wakeupNeeded); LWLockRelease(partitionLock); RemoveLocalLock(locallock); return TRUE; }
void LockReleaseAll | ( | LOCKMETHODID | lockmethodid, | |
bool | allLocks | |||
) |
Definition at line 1923 of file lock.c.
References Assert, PGPROC::backendLock, CleanUpLock(), DEFAULT_LOCKMETHOD, EligibleForRelationFastPath, elog, ERROR, FastPathUnGrantRelationLock(), FirstLockMgrLock, LOCK::grantMask, hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, lengthof, LOCALLOCK_LOCKMETHOD, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_LOCKMETHOD, LOCK_PRINT, LOCKBIT_ON, LOCALLOCK::lockOwners, LockRefindAndRelease(), LOCKTAG::locktag_field2, LockTagHashCode(), LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, MyProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCK::nRequested, NULL, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, offsetof, LOCALLOCKOWNER::owner, PANIC, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), ResourceOwnerForgetLock(), SHMQueueNext(), LOCK::tag, PROCLOCK::tag, LOCALLOCK::tag, LockMethodData::trace_flag, UnGrantLock(), and VirtualXactLockTableCleanup().
Referenced by DiscardAll(), ProcReleaseLocks(), and ShutdownPostgres().
{ HASH_SEQ_STATUS status; LockMethod lockMethodTable; int i, numLockModes; LOCALLOCK *locallock; LOCK *lock; PROCLOCK *proclock; int partition; bool have_fast_path_lwlock = false; if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods)) elog(ERROR, "unrecognized lock method: %d", lockmethodid); lockMethodTable = LockMethods[lockmethodid]; #ifdef LOCK_DEBUG if (*(lockMethodTable->trace_flag)) elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid); #endif /* * Get rid of our fast-path VXID lock, if appropriate. Note that this is * the only way that the lock we hold on our own VXID can ever get * released: it is always and only released when a toplevel transaction * ends. */ if (lockmethodid == DEFAULT_LOCKMETHOD) VirtualXactLockTableCleanup(); numLockModes = lockMethodTable->numLockModes; /* * First we run through the locallock table and get rid of unwanted * entries, then we scan the process's proclocks and get rid of those. We * do this separately because we may have multiple locallock entries * pointing to the same proclock, and we daren't end up with any dangling * pointers. */ hash_seq_init(&status, LockMethodLocalHash); while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL) { /* * If the LOCALLOCK entry is unused, we must've run out of shared * memory while trying to set up this lock. Just forget the local * entry. */ if (locallock->nLocks == 0) { RemoveLocalLock(locallock); continue; } /* Ignore items that are not of the lockmethod to be removed */ if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid) continue; /* * If we are asked to release all locks, we can just zap the entry. * Otherwise, must scan to see if there are session locks. We assume * there is at most one lockOwners entry for session locks. */ if (!allLocks) { LOCALLOCKOWNER *lockOwners = locallock->lockOwners; /* If session lock is above array position 0, move it down to 0 */ for (i = 0; i < locallock->numLockOwners ; i++) { if (lockOwners[i].owner == NULL) lockOwners[0] = lockOwners[i]; else ResourceOwnerForgetLock(lockOwners[i].owner, locallock); } if (locallock->numLockOwners > 0 && lockOwners[0].owner == NULL && lockOwners[0].nLocks > 0) { /* Fix the locallock to show just the session locks */ locallock->nLocks = lockOwners[0].nLocks; locallock->numLockOwners = 1; /* We aren't deleting this locallock, so done */ continue; } else locallock->numLockOwners = 0; } /* * If the lock or proclock pointers are NULL, this lock was taken via * the relation fast-path. */ if (locallock->proclock == NULL || locallock->lock == NULL) { LOCKMODE lockmode = locallock->tag.mode; Oid relid; /* Verify that a fast-path lock is what we've got. */ if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode)) elog(PANIC, "locallock table corrupted"); /* * If we don't currently hold the LWLock that protects our * fast-path data structures, we must acquire it before attempting * to release the lock via the fast-path. */ if (!have_fast_path_lwlock) { LWLockAcquire(MyProc->backendLock, LW_EXCLUSIVE); have_fast_path_lwlock = true; } /* Attempt fast-path release. */ relid = locallock->tag.lock.locktag_field2; if (FastPathUnGrantRelationLock(relid, lockmode)) { RemoveLocalLock(locallock); continue; } /* * Our lock, originally taken via the fast path, has been * transferred to the main lock table. That's going to require * some extra work, so release our fast-path lock before starting. */ LWLockRelease(MyProc->backendLock); have_fast_path_lwlock = false; /* * Now dump the lock. We haven't got a pointer to the LOCK or * PROCLOCK in this case, so we have to handle this a bit * differently than a normal lock release. Unfortunately, this * requires an extra LWLock acquire-and-release cycle on the * partitionLock, but hopefully it shouldn't happen often. */ LockRefindAndRelease(lockMethodTable, MyProc, &locallock->tag.lock, lockmode, false); RemoveLocalLock(locallock); continue; } /* Mark the proclock to show we need to release this lockmode */ if (locallock->nLocks > 0) locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode); /* And remove the locallock hashtable entry */ RemoveLocalLock(locallock); } if (have_fast_path_lwlock) LWLockRelease(MyProc->backendLock); /* * Now, scan each lock partition separately. */ for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++) { LWLockId partitionLock = FirstLockMgrLock + partition; SHM_QUEUE *procLocks = &(MyProc->myProcLocks[partition]); proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, procLink)); if (!proclock) continue; /* needn't examine this partition */ LWLockAcquire(partitionLock, LW_EXCLUSIVE); while (proclock) { bool wakeupNeeded = false; PROCLOCK *nextplock; /* Get link first, since we may unlink/delete this proclock */ nextplock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->procLink, offsetof(PROCLOCK, procLink)); Assert(proclock->tag.myProc == MyProc); lock = proclock->tag.myLock; /* Ignore items that are not of the lockmethod to be removed */ if (LOCK_LOCKMETHOD(*lock) != lockmethodid) goto next_item; /* * In allLocks mode, force release of all locks even if locallock * table had problems */ if (allLocks) proclock->releaseMask = proclock->holdMask; else Assert((proclock->releaseMask & ~proclock->holdMask) == 0); /* * Ignore items that have nothing to be released, unless they have * holdMask == 0 and are therefore recyclable */ if (proclock->releaseMask == 0 && proclock->holdMask != 0) goto next_item; PROCLOCK_PRINT("LockReleaseAll", proclock); LOCK_PRINT("LockReleaseAll", lock, 0); Assert(lock->nRequested >= 0); Assert(lock->nGranted >= 0); Assert(lock->nGranted <= lock->nRequested); Assert((proclock->holdMask & ~lock->grantMask) == 0); /* * Release the previously-marked lock modes */ for (i = 1; i <= numLockModes; i++) { if (proclock->releaseMask & LOCKBIT_ON(i)) wakeupNeeded |= UnGrantLock(lock, i, proclock, lockMethodTable); } Assert((lock->nRequested >= 0) && (lock->nGranted >= 0)); Assert(lock->nGranted <= lock->nRequested); LOCK_PRINT("LockReleaseAll: updated", lock, 0); proclock->releaseMask = 0; /* CleanUpLock will wake up waiters if needed. */ CleanUpLock(lock, proclock, lockMethodTable, LockTagHashCode(&lock->tag), wakeupNeeded); next_item: proclock = nextplock; } /* loop over PROCLOCKs within this partition */ LWLockRelease(partitionLock); } /* loop over partitions */ #ifdef LOCK_DEBUG if (*(lockMethodTable->trace_flag)) elog(LOG, "LockReleaseAll done"); #endif }
void LockReleaseCurrentOwner | ( | LOCALLOCK ** | locallocks, | |
int | nlocks | |||
) |
Definition at line 2203 of file lock.c.
References hash_seq_init(), hash_seq_search(), i, NULL, and ReleaseLockIfHeld().
Referenced by ResourceOwnerReleaseInternal().
{ if (locallocks == NULL) { HASH_SEQ_STATUS status; LOCALLOCK *locallock; hash_seq_init(&status, LockMethodLocalHash); while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL) ReleaseLockIfHeld(locallock, false); } else { int i; for (i = nlocks - 1; i >= 0; i--) ReleaseLockIfHeld(locallocks[i], false); } }
void LockReleaseSession | ( | LOCKMETHODID | lockmethodid | ) |
Definition at line 2173 of file lock.c.
References elog, ERROR, hash_seq_init(), hash_seq_search(), lengthof, LOCALLOCK_LOCKMETHOD, NULL, and ReleaseLockIfHeld().
Referenced by pg_advisory_unlock_all().
{ HASH_SEQ_STATUS status; LOCALLOCK *locallock; if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods)) elog(ERROR, "unrecognized lock method: %d", lockmethodid); hash_seq_init(&status, LockMethodLocalHash); while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL) { /* Ignore items that are not of the specified lock method */ if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid) continue; ReleaseLockIfHeld(locallock, true); } }
Size LockShmemSize | ( | void | ) |
Definition at line 3210 of file lock.c.
References add_size(), hash_estimate_size(), and NLOCKENTS.
Referenced by CreateSharedMemoryAndSemaphores().
{ Size size = 0; long max_table_size; /* lock hash table */ max_table_size = NLOCKENTS(); size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK))); /* proclock hash table */ max_table_size *= 2; size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK))); /* * Since NLOCKENTS is only an estimate, add 10% safety margin. */ size = add_size(size, size / 10); return size; }
Definition at line 479 of file lock.c.
References get_hash_value().
Referenced by CheckDeadLock(), GetLockConflicts(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), LockReleaseAll(), proclock_hash(), and VirtualXactLock().
{ return get_hash_value(LockMethodLockHash, (const void *) locktag); }
void PostPrepare_Locks | ( | TransactionId | xid | ) |
Definition at line 3036 of file lock.c.
References Assert, elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), FirstLockMgrLock, LOCK::grantMask, hash_seq_init(), hash_seq_search(), hash_update_hash_key(), PROCLOCK::holdMask, i, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, LOCALLOCK::lockOwners, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, MyProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCK::nLocks, LOCK::nRequested, NULL, LOCALLOCK::numLockOwners, offsetof, PANIC, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), SHMQueueDelete(), SHMQueueInsertBefore(), SHMQueueNext(), START_CRIT_SECTION, LOCK::tag, PROCLOCK::tag, LOCALLOCK::tag, and TwoPhaseGetDummyProc().
Referenced by PrepareTransaction().
{ PGPROC *newproc = TwoPhaseGetDummyProc(xid); HASH_SEQ_STATUS status; LOCALLOCK *locallock; LOCK *lock; PROCLOCK *proclock; PROCLOCKTAG proclocktag; int partition; /* This is a critical section: any error means big trouble */ START_CRIT_SECTION(); /* * First we run through the locallock table and get rid of unwanted * entries, then we scan the process's proclocks and transfer them to the * target proc. * * We do this separately because we may have multiple locallock entries * pointing to the same proclock, and we daren't end up with any dangling * pointers. */ hash_seq_init(&status, LockMethodLocalHash); while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL) { LOCALLOCKOWNER *lockOwners = locallock->lockOwners; bool haveSessionLock; bool haveXactLock; int i; if (locallock->proclock == NULL || locallock->lock == NULL) { /* * We must've run out of shared memory while trying to set up this * lock. Just forget the local entry. */ Assert(locallock->nLocks == 0); RemoveLocalLock(locallock); continue; } /* Ignore VXID locks */ if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION) continue; /* Scan to see whether we hold it at session or transaction level */ haveSessionLock = haveXactLock = false; for (i = locallock->numLockOwners - 1; i >= 0; i--) { if (lockOwners[i].owner == NULL) haveSessionLock = true; else haveXactLock = true; } /* Ignore it if we have only session lock */ if (!haveXactLock) continue; /* This can't happen, because we already checked it */ if (haveSessionLock) ereport(PANIC, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object"))); /* Mark the proclock to show we need to release this lockmode */ if (locallock->nLocks > 0) locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode); /* And remove the locallock hashtable entry */ RemoveLocalLock(locallock); } /* * Now, scan each lock partition separately. */ for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++) { LWLockId partitionLock = FirstLockMgrLock + partition; SHM_QUEUE *procLocks = &(MyProc->myProcLocks[partition]); proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, procLink)); if (!proclock) continue; /* needn't examine this partition */ LWLockAcquire(partitionLock, LW_EXCLUSIVE); while (proclock) { PROCLOCK *nextplock; /* Get link first, since we may unlink/relink this proclock */ nextplock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->procLink, offsetof(PROCLOCK, procLink)); Assert(proclock->tag.myProc == MyProc); lock = proclock->tag.myLock; /* Ignore VXID locks */ if (lock->tag.locktag_type == LOCKTAG_VIRTUALTRANSACTION) goto next_item; PROCLOCK_PRINT("PostPrepare_Locks", proclock); LOCK_PRINT("PostPrepare_Locks", lock, 0); Assert(lock->nRequested >= 0); Assert(lock->nGranted >= 0); Assert(lock->nGranted <= lock->nRequested); Assert((proclock->holdMask & ~lock->grantMask) == 0); /* Ignore it if nothing to release (must be a session lock) */ if (proclock->releaseMask == 0) goto next_item; /* Else we should be releasing all locks */ if (proclock->releaseMask != proclock->holdMask) elog(PANIC, "we seem to have dropped a bit somewhere"); /* * We cannot simply modify proclock->tag.myProc to reassign * ownership of the lock, because that's part of the hash key and * the proclock would then be in the wrong hash chain. Instead * use hash_update_hash_key. (We used to create a new hash entry, * but that risks out-of-memory failure if other processes are * busy making proclocks too.) We must unlink the proclock from * our procLink chain and put it into the new proc's chain, too. * * Note: the updated proclock hash key will still belong to the * same hash partition, cf proclock_hash(). So the partition * lock we already hold is sufficient for this. */ SHMQueueDelete(&proclock->procLink); /* * Create the new hash key for the proclock. */ proclocktag.myLock = lock; proclocktag.myProc = newproc; /* * Update the proclock. We should not find any existing entry * for the same hash key, since there can be only one entry for * any given lock with my own proc. */ if (!hash_update_hash_key(LockMethodProcLockHash, (void *) proclock, (void *) &proclocktag)) elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks"); /* Re-link into the new proc's proclock list */ SHMQueueInsertBefore(&(newproc->myProcLocks[partition]), &proclock->procLink); PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock); next_item: proclock = nextplock; } /* loop over PROCLOCKs within this partition */ LWLockRelease(partitionLock); } /* loop over partitions */ END_CRIT_SECTION(); }
Definition at line 965 of file deadlock.c.
References DEADLOCK_INFO::lockmode, DEADLOCK_INFO::locktag, nDeadlockDetails, PGPROC::pid, DEADLOCK_INFO::pid, LOCK::tag, PGPROC::waitLock, and PGPROC::waitLockMode.
Referenced by ProcSleep().
{ DEADLOCK_INFO *info = &deadlockDetails[0]; info->locktag = lock->tag; info->lockmode = lockmode; info->pid = proc1->pid; info++; info->locktag = proc2->waitLock->tag; info->lockmode = proc2->waitLockMode; info->pid = proc2->pid; nDeadlockDetails = 2; }
Definition at line 1673 of file lock.c.
References Assert, CleanUpLock(), LOCK::granted, lengthof, PGPROC::links, LOCK_LOCKMETHOD, LOCKBIT_OFF, SHM_QUEUE::next, LOCK::nGranted, LOCK::nRequested, NULL, LOCK::requested, SHMQueueDelete(), PROC_QUEUE::size, STATUS_WAITING, PGPROC::waitLock, PGPROC::waitLockMode, LOCK::waitMask, PGPROC::waitProcLock, LOCK::waitProcs, and PGPROC::waitStatus.
Referenced by CheckDeadLock(), LockErrorCleanup(), and ProcSleep().
{ LOCK *waitLock = proc->waitLock; PROCLOCK *proclock = proc->waitProcLock; LOCKMODE lockmode = proc->waitLockMode; LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock); /* Make sure proc is waiting */ Assert(proc->waitStatus == STATUS_WAITING); Assert(proc->links.next != NULL); Assert(waitLock); Assert(waitLock->waitProcs.size > 0); Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods)); /* Remove proc from lock's wait queue */ SHMQueueDelete(&(proc->links)); waitLock->waitProcs.size--; /* Undo increments of request counts by waiting process */ Assert(waitLock->nRequested > 0); Assert(waitLock->nRequested > proc->waitLock->nGranted); waitLock->nRequested--; Assert(waitLock->requested[lockmode] > 0); waitLock->requested[lockmode]--; /* don't forget to clear waitMask bit if appropriate */ if (waitLock->granted[lockmode] == waitLock->requested[lockmode]) waitLock->waitMask &= LOCKBIT_OFF(lockmode); /* Clean up the proc's own state, and pass it the ok/fail signal */ proc->waitLock = NULL; proc->waitProcLock = NULL; proc->waitStatus = STATUS_ERROR; /* * Delete the proclock immediately if it represents no already-held locks. * (This must happen now because if the owner of the lock decides to * release it, and the requested/granted counts then go to zero, * LockRelease expects there to be no remaining proclocks.) Then see if * any other waiters for the lock can be woken up now. */ CleanUpLock(waitLock, proclock, LockMethods[lockmethodid], hashcode, true); }
void ReportLockTableError | ( | bool | report | ) |
bool VirtualXactLock | ( | VirtualTransactionId | vxid, | |
bool | wait | |||
) |
Definition at line 3930 of file lock.c.
References Assert, PGPROC::backendId, VirtualTransactionId::backendId, BackendIdGetProc(), PGPROC::backendLock, DEFAULT_LOCKMETHOD, ereport, errcode(), errhint(), errmsg(), ERROR, ExclusiveLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, GrantLock(), VirtualTransactionId::localTransactionId, LockAcquire(), LockHashPartitionLock, LockRelease(), LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, NULL, SET_LOCKTAG_VIRTUALTRANSACTION, SetupLockInTable(), ShareLock, PROCLOCK::tag, and VirtualTransactionIdIsValid.
Referenced by DefineIndex(), index_drop(), and ResolveRecoveryConflictWithVirtualXIDs().
{ LOCKTAG tag; PGPROC *proc; Assert(VirtualTransactionIdIsValid(vxid)); SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid); /* * If a lock table entry must be made, this is the PGPROC on whose behalf * it must be done. Note that the transaction might end or the PGPROC * might be reassigned to a new backend before we get around to examining * it, but it doesn't matter. If we find upon examination that the * relevant lxid is no longer running here, that's enough to prove that * it's no longer running anywhere. */ proc = BackendIdGetProc(vxid.backendId); if (proc == NULL) return true; /* * We must acquire this lock before checking the backendId and lxid * against the ones we're waiting for. The target backend will only set * or clear lxid while holding this lock. */ LWLockAcquire(proc->backendLock, LW_EXCLUSIVE); /* If the transaction has ended, our work here is done. */ if (proc->backendId != vxid.backendId || proc->fpLocalTransactionId != vxid.localTransactionId) { LWLockRelease(proc->backendLock); return true; } /* * If we aren't asked to wait, there's no need to set up a lock table * entry. The transaction is still in progress, so just return false. */ if (!wait) { LWLockRelease(proc->backendLock); return false; } /* * OK, we're going to need to sleep on the VXID. But first, we must set * up the primary lock table entry, if needed (ie, convert the proc's * fast-path lock on its VXID to a regular lock). */ if (proc->fpVXIDLock) { PROCLOCK *proclock; uint32 hashcode; LWLockId partitionLock; hashcode = LockTagHashCode(&tag); partitionLock = LockHashPartitionLock(hashcode); LWLockAcquire(partitionLock, LW_EXCLUSIVE); proclock = SetupLockInTable(LockMethods[DEFAULT_LOCKMETHOD], proc, &tag, hashcode, ExclusiveLock); if (!proclock) { LWLockRelease(partitionLock); ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"), errhint("You might need to increase max_locks_per_transaction."))); } GrantLock(proclock->tag.myLock, proclock, ExclusiveLock); LWLockRelease(partitionLock); proc->fpVXIDLock = false; } /* Done with proc->fpLockBits */ LWLockRelease(proc->backendLock); /* Time to wait. */ (void) LockAcquire(&tag, ShareLock, false, false); LockRelease(&tag, ShareLock, false); return true; }
void VirtualXactLockTableCleanup | ( | void | ) |
Definition at line 3883 of file lock.c.
References Assert, VirtualTransactionId::backendId, PGPROC::backendId, PGPROC::backendLock, DEFAULT_LOCKMETHOD, ExclusiveLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, InvalidBackendId, VirtualTransactionId::localTransactionId, LocalTransactionIdIsValid, LockRefindAndRelease(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MyBackendId, MyProc, and SET_LOCKTAG_VIRTUALTRANSACTION.
Referenced by LockReleaseAll(), and ShutdownRecoveryTransactionEnvironment().
{ bool fastpath; LocalTransactionId lxid; Assert(MyProc->backendId != InvalidBackendId); /* * Clean up shared memory state. */ LWLockAcquire(MyProc->backendLock, LW_EXCLUSIVE); fastpath = MyProc->fpVXIDLock; lxid = MyProc->fpLocalTransactionId; MyProc->fpVXIDLock = false; MyProc->fpLocalTransactionId = InvalidLocalTransactionId; LWLockRelease(MyProc->backendLock); /* * If fpVXIDLock has been cleared without touching fpLocalTransactionId, * that means someone transferred the lock to the main lock table. */ if (!fastpath && LocalTransactionIdIsValid(lxid)) { VirtualTransactionId vxid; LOCKTAG locktag; vxid.backendId = MyBackendId; vxid.localTransactionId = lxid; SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid); LockRefindAndRelease(LockMethods[DEFAULT_LOCKMETHOD], MyProc, &locktag, ExclusiveLock, false); } }
void VirtualXactLockTableInsert | ( | VirtualTransactionId | vxid | ) |
Definition at line 3860 of file lock.c.
References Assert, VirtualTransactionId::backendId, PGPROC::backendId, PGPROC::backendLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, InvalidLocalTransactionId, VirtualTransactionId::localTransactionId, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MyProc, and VirtualTransactionIdIsValid.
Referenced by InitRecoveryTransactionEnvironment(), and StartTransaction().
{ Assert(VirtualTransactionIdIsValid(vxid)); LWLockAcquire(MyProc->backendLock, LW_EXCLUSIVE); Assert(MyProc->backendId == vxid.backendId); Assert(MyProc->fpLocalTransactionId == InvalidLocalTransactionId); Assert(MyProc->fpVXIDLock == false); MyProc->fpVXIDLock = true; MyProc->fpLocalTransactionId = vxid.localTransactionId; LWLockRelease(MyProc->backendLock); }
Definition at line 51 of file lock.c.
Referenced by BootStrapXLOG(), CheckRequiredParameterValues(), and XLogReportParameters().