
Go to the source code of this file.
| #define NUM_BUFFER_PARTITIONS 16 |
Definition at line 24 of file lwlock.h.
Referenced by StrategyInitialize(), and StrategyShmemSize().
| #define NUM_PREDICATELOCK_PARTITIONS (1 << LOG2_NUM_PREDICATELOCK_PARTITIONS) |
| typedef enum LWLockMode LWLockMode |
| enum LWLockId |
Definition at line 47 of file lwlock.h.
{
BufFreelistLock,
ShmemIndexLock,
OidGenLock,
XidGenLock,
ProcArrayLock,
SInvalReadLock,
SInvalWriteLock,
WALInsertLock,
WALWriteLock,
ControlFileLock,
CheckpointLock,
CLogControlLock,
SubtransControlLock,
MultiXactGenLock,
MultiXactOffsetControlLock,
MultiXactMemberControlLock,
RelCacheInitLock,
CheckpointerCommLock,
TwoPhaseStateLock,
TablespaceCreateLock,
BtreeVacuumLock,
AddinShmemInitLock,
AutovacuumLock,
AutovacuumScheduleLock,
SyncScanLock,
RelationMappingLock,
AsyncCtlLock,
AsyncQueueLock,
SerializableXactHashLock,
SerializableFinishedListLock,
SerializablePredicateLockListLock,
OldSerXidLock,
SyncRepLock,
/* Individual lock IDs end here */
FirstBufMappingLock,
FirstLockMgrLock = FirstBufMappingLock + NUM_BUFFER_PARTITIONS,
FirstPredicateLockMgrLock = FirstLockMgrLock + NUM_LOCK_PARTITIONS,
/* must be last except for MaxDynamicLWLock: */
NumFixedLWLocks = FirstPredicateLockMgrLock + NUM_PREDICATELOCK_PARTITIONS,
MaxDynamicLWLock = 1000000000
} LWLockId;
| enum LWLockMode |
Definition at line 94 of file lwlock.h.
{
LW_EXCLUSIVE,
LW_SHARED,
LW_WAIT_UNTIL_FREE /* A special mode used in PGPROC->lwlockMode,
* when waiting for lock to become free. Not
* to be used as LWLockAcquire argument */
} LWLockMode;
| void CreateLWLocks | ( | void | ) |
Definition at line 261 of file lwlock.c.
References LWLock::exclusive, LWLock::head, LWLockPadded::lock, LWLOCK_PADDED_SIZE, LWLockArray, LWLockShmemSize(), LWLock::mutex, NumFixedLWLocks, NumLWLocks(), LWLock::releaseOK, LWLock::shared, ShmemAlloc(), SpinLockInit, and LWLock::tail.
Referenced by CreateSharedMemoryAndSemaphores().
{
int numLocks = NumLWLocks();
Size spaceLocks = LWLockShmemSize();
LWLockPadded *lock;
int *LWLockCounter;
char *ptr;
int id;
/* Allocate space */
ptr = (char *) ShmemAlloc(spaceLocks);
/* Leave room for dynamic allocation counter */
ptr += 2 * sizeof(int);
/* Ensure desired alignment of LWLock array */
ptr += LWLOCK_PADDED_SIZE - ((uintptr_t) ptr) % LWLOCK_PADDED_SIZE;
LWLockArray = (LWLockPadded *) ptr;
/*
* Initialize all LWLocks to "unlocked" state
*/
for (id = 0, lock = LWLockArray; id < numLocks; id++, lock++)
{
SpinLockInit(&lock->lock.mutex);
lock->lock.releaseOK = true;
lock->lock.exclusive = 0;
lock->lock.shared = 0;
lock->lock.head = NULL;
lock->lock.tail = NULL;
}
/*
* Initialize the dynamic-allocation counter, which is stored just before
* the first LWLock.
*/
LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
LWLockCounter[0] = (int) NumFixedLWLocks;
LWLockCounter[1] = numLocks;
}
| void LWLockAcquire | ( | LWLockId | lockid, | |
| LWLockMode | mode | |||
| ) |
Definition at line 341 of file lwlock.c.
References Assert, elog, ERROR, LWLock::exclusive, LWLock::head, held_lwlocks, HOLD_INTERRUPTS, IsUnderPostmaster, LWLockPadded::lock, LOG_LWDEBUG, LW_EXCLUSIVE, LWLockArray, PGPROC::lwWaiting, PGPROC::lwWaitLink, PGPROC::lwWaitMode, MAX_SIMUL_LWLOCKS, LWLock::mutex, MyProc, MyProcPid, NULL, num_held_lwlocks, PANIC, PGSemaphoreLock(), PGSemaphoreUnlock(), PRINT_LWDEBUG, LWLock::releaseOK, PGPROC::sem, LWLock::shared, SpinLockAcquire, SpinLockRelease, and LWLock::tail.
Referenced by _bt_end_vacuum(), _bt_start_vacuum(), _bt_vacuum_cycleid(), AbortBufferIO(), AbsorbFsyncRequests(), AdvanceXLInsertBuffer(), analyze_rel(), asyncQueueAddEntries(), asyncQueueAdvanceTail(), asyncQueueReadAllNotifications(), asyncQueueUnregister(), AsyncShmemInit(), AtPrepare_PredicateLocks(), AutoVacLauncherMain(), AutoVacWorkerMain(), BackendIdGetProc(), BackendPidGetProc(), BackendXidGetPid(), BootStrapCLOG(), BootStrapMultiXact(), BootStrapSUBTRANS(), BufferAlloc(), CancelDBBackends(), CancelVirtualTransaction(), CheckDeadLock(), CheckForSerializableConflictOut(), CheckPointPredicate(), CheckPointRelationMap(), CheckPointTwoPhase(), CheckRecoveryConsistency(), CheckTableForSerializableConflictIn(), CheckTargetForConflictsIn(), CleanupInvalidationState(), ClearOldPredicateLocks(), clog_redo(), CountDBBackends(), CountOtherDBBackends(), CountUserBackends(), CreateCheckPoint(), CreateEndOfRecoveryRecord(), CreatePredicateLock(), CreateRestartPoint(), DeleteChildTargetLocks(), DeleteLockTarget(), do_autovacuum(), do_pg_abort_backup(), do_pg_start_backup(), do_pg_stop_backup(), do_start_worker(), DropAllPredicateLocksFromTable(), DropTableSpace(), entry_reset(), Exec_ListenPreCommit(), ExpireAllKnownAssignedTransactionIds(), ExpireOldKnownAssignedTransactionIds(), ExpireTreeKnownAssignedTransactionIds(), ExtendCLOG(), ExtendMultiXactMember(), ExtendMultiXactOffset(), ExtendSUBTRANS(), FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), FlushDatabaseBuffers(), FlushRelationBuffers(), ForceTransactionIdLimitUpdate(), ForwardFsyncRequest(), FreeWorkerInfo(), GetConflictingVirtualXIDs(), GetCurrentVirtualXIDs(), GetLastSegSwitchTime(), GetLockConflicts(), GetLockStatusData(), GetMultiXactIdMembers(), GetNewMultiXactId(), GetNewObjectId(), GetNewTransactionId(), GetOldestActiveTransactionId(), GetOldestMultiXactId(), GetOldestRestartPoint(), GetOldestXmin(), GetPredicateLockStatusData(), GetPreparedTransactionList(), GetRunningTransactionData(), GetRunningTransactionLocks(), GetSafeSnapshot(), GetSerializableTransactionSnapshotInt(), GetSnapshotData(), GetVirtualXIDsDelayingChkpt(), GetXLogInsertRecPtr(), HaveVirtualXIDsDelayingChkpt(), InstallXLogFileSegment(), InvalidateBuffer(), KnownAssignedXidsAdd(), KnownAssignedXidsReset(), lock_twophase_recover(), LockAcquireExtended(), LockBuffer(), LockErrorCleanup(), LockGXact(), LockHasWaiters(), LockRefindAndRelease(), LockRelease(), LockReleaseAll(), MarkAsPrepared(), MarkAsPreparing(), multixact_redo(), MultiXactAdvanceNextMXact(), MultiXactGetCheckptMulti(), MultiXactIdSetOldestMember(), MultiXactIdSetOldestVisible(), MultiXactSetNextMXact(), OldSerXidAdd(), OldSerXidGetMinConflictCommitSeqNo(), OldSerXidSetActiveSerXmin(), PageIsPredicateLocked(), perform_relmap_update(), pg_buffercache_pages(), pg_start_backup_callback(), pg_stat_get_wal_senders(), pg_stat_statements(), pgss_shmem_startup(), pgss_store(), PostPrepare_Locks(), PostPrepare_MultiXact(), PreCommit_CheckForSerializationFailure(), PreCommit_Notify(), predicatelock_twophase_recover(), PredicateLockPageSplit(), PredicateLockTwoPhaseFinish(), PrefetchBuffer(), PrescanPreparedTransactions(), ProcArrayAdd(), ProcArrayApplyRecoveryInfo(), ProcArrayApplyXidAssignment(), ProcArrayEndTransaction(), ProcArrayInstallImportedXmin(), ProcArrayRemove(), ProcSleep(), ReadNewTransactionId(), ReadNextMultiXactId(), ReadRecord(), RecordKnownAssignedTransactionIds(), RecordNewMultiXact(), RegisterPredicateLockingXid(), RelationCacheInitFilePreInvalidate(), ReleaseOneSerializableXact(), ReleasePredicateLocks(), RemoveGXact(), RemoveScratchTarget(), RestoreScratchTarget(), SetMultiXactIdLimit(), SetTransactionIdLimit(), SharedInvalBackendInit(), ShmemInitStruct(), SICleanupQueue(), SIGetDataEntries(), SignalBackends(), SIInsertDataEntries(), SimpleLruFlush(), SimpleLruReadPage(), SimpleLruReadPage_ReadOnly(), SimpleLruTruncate(), SimpleLruWaitIO(), SlruInternalWritePage(), ss_get_location(), StartBufferIO(), StartupCLOG(), StartupMultiXact(), StartupSUBTRANS(), StartupXLOG(), StrategyFreeBuffer(), StrategyGetBuffer(), StrategyNotifyBgWriter(), StrategySyncStart(), SubTransSetParent(), SummarizeOldestCommittedSxact(), SyncOneBuffer(), SyncRepCancelWait(), SyncRepCleanupAtProcExit(), SyncRepInitConfig(), SyncRepReleaseWaiters(), SyncRepUpdateSyncStandbysDefined(), SyncRepWaitForLSN(), TablespaceCreateDbspace(), TransactionIdIsActive(), TransactionIdIsInProgress(), TransactionIdIsPrepared(), TransactionIdSetPageStatus(), TransferPredicateLocksToNewTarget(), TrimCLOG(), TwoPhaseGetGXact(), UpdateFullPageWrites(), UpdateMinRecoveryPoint(), vacuum_rel(), VirtualXactLock(), VirtualXactLockTableCleanup(), VirtualXactLockTableInsert(), WaitIO(), write_relcache_init_file(), xact_redo_abort(), xact_redo_commit_internal(), XidCacheRemoveRunningXids(), xlog_redo(), XLogBackgroundFlush(), and XLogInsert().
{
volatile LWLock *lock = &(LWLockArray[lockid].lock);
PGPROC *proc = MyProc;
bool retry = false;
int extraWaits = 0;
PRINT_LWDEBUG("LWLockAcquire", lockid, lock);
#ifdef LWLOCK_STATS
/* Set up local count state first time through in a given process */
if (counts_for_pid != MyProcPid)
init_lwlock_stats();
/* Count lock acquisition attempts */
if (mode == LW_EXCLUSIVE)
ex_acquire_counts[lockid]++;
else
sh_acquire_counts[lockid]++;
#endif /* LWLOCK_STATS */
/*
* We can't wait if we haven't got a PGPROC. This should only occur
* during bootstrap or shared memory initialization. Put an Assert here
* to catch unsafe coding practices.
*/
Assert(!(proc == NULL && IsUnderPostmaster));
/* Ensure we will have room to remember the lock */
if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
elog(ERROR, "too many LWLocks taken");
/*
* Lock out cancel/die interrupts until we exit the code section protected
* by the LWLock. This ensures that interrupts will not interfere with
* manipulations of data structures in shared memory.
*/
HOLD_INTERRUPTS();
/*
* Loop here to try to acquire lock after each time we are signaled by
* LWLockRelease.
*
* NOTE: it might seem better to have LWLockRelease actually grant us the
* lock, rather than retrying and possibly having to go back to sleep. But
* in practice that is no good because it means a process swap for every
* lock acquisition when two or more processes are contending for the same
* lock. Since LWLocks are normally used to protect not-very-long
* sections of computation, a process needs to be able to acquire and
* release the same lock many times during a single CPU time slice, even
* in the presence of contention. The efficiency of being able to do that
* outweighs the inefficiency of sometimes wasting a process dispatch
* cycle because the lock is not free when a released waiter finally gets
* to run. See pgsql-hackers archives for 29-Dec-01.
*/
for (;;)
{
bool mustwait;
/* Acquire mutex. Time spent holding mutex should be short! */
#ifdef LWLOCK_STATS
spin_delay_counts[lockid] += SpinLockAcquire(&lock->mutex);
#else
SpinLockAcquire(&lock->mutex);
#endif
/* If retrying, allow LWLockRelease to release waiters again */
if (retry)
lock->releaseOK = true;
/* If I can get the lock, do so quickly. */
if (mode == LW_EXCLUSIVE)
{
if (lock->exclusive == 0 && lock->shared == 0)
{
lock->exclusive++;
mustwait = false;
}
else
mustwait = true;
}
else
{
if (lock->exclusive == 0)
{
lock->shared++;
mustwait = false;
}
else
mustwait = true;
}
if (!mustwait)
break; /* got the lock */
/*
* Add myself to wait queue.
*
* If we don't have a PGPROC structure, there's no way to wait. This
* should never occur, since MyProc should only be null during shared
* memory initialization.
*/
if (proc == NULL)
elog(PANIC, "cannot wait without a PGPROC structure");
proc->lwWaiting = true;
proc->lwWaitMode = mode;
proc->lwWaitLink = NULL;
if (lock->head == NULL)
lock->head = proc;
else
lock->tail->lwWaitLink = proc;
lock->tail = proc;
/* Can release the mutex now */
SpinLockRelease(&lock->mutex);
/*
* Wait until awakened.
*
* Since we share the process wait semaphore with the regular lock
* manager and ProcWaitForSignal, and we may need to acquire an LWLock
* while one of those is pending, it is possible that we get awakened
* for a reason other than being signaled by LWLockRelease. If so,
* loop back and wait again. Once we've gotten the LWLock,
* re-increment the sema by the number of additional signals received,
* so that the lock manager or signal manager will see the received
* signal when it next waits.
*/
LOG_LWDEBUG("LWLockAcquire", lockid, "waiting");
#ifdef LWLOCK_STATS
block_counts[lockid]++;
#endif
TRACE_POSTGRESQL_LWLOCK_WAIT_START(lockid, mode);
for (;;)
{
/* "false" means cannot accept cancel/die interrupt here. */
PGSemaphoreLock(&proc->sem, false);
if (!proc->lwWaiting)
break;
extraWaits++;
}
TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(lockid, mode);
LOG_LWDEBUG("LWLockAcquire", lockid, "awakened");
/* Now loop back and try to acquire lock again. */
retry = true;
}
/* We are done updating shared state of the lock itself. */
SpinLockRelease(&lock->mutex);
TRACE_POSTGRESQL_LWLOCK_ACQUIRE(lockid, mode);
/* Add lock to list of locks held by this backend */
held_lwlocks[num_held_lwlocks++] = lockid;
/*
* Fix the process wait semaphore's count for any absorbed wakeups.
*/
while (extraWaits-- > 0)
PGSemaphoreUnlock(&proc->sem);
}
| bool LWLockAcquireOrWait | ( | LWLockId | lockid, | |
| LWLockMode | mode | |||
| ) |
Definition at line 595 of file lwlock.c.
References elog, ERROR, LWLock::exclusive, LWLock::head, held_lwlocks, HOLD_INTERRUPTS, LWLockPadded::lock, LOG_LWDEBUG, LW_EXCLUSIVE, LWLockArray, PGPROC::lwWaiting, PGPROC::lwWaitLink, PGPROC::lwWaitMode, MAX_SIMUL_LWLOCKS, LWLock::mutex, MyProc, MyProcPid, NULL, num_held_lwlocks, PANIC, PGSemaphoreLock(), PGSemaphoreUnlock(), PRINT_LWDEBUG, RESUME_INTERRUPTS, PGPROC::sem, LWLock::shared, SpinLockAcquire, SpinLockRelease, and LWLock::tail.
Referenced by XLogFlush().
{
volatile LWLock *lock = &(LWLockArray[lockid].lock);
PGPROC *proc = MyProc;
bool mustwait;
int extraWaits = 0;
PRINT_LWDEBUG("LWLockAcquireOrWait", lockid, lock);
#ifdef LWLOCK_STATS
/* Set up local count state first time through in a given process */
if (counts_for_pid != MyProcPid)
init_lwlock_stats();
#endif
/* Ensure we will have room to remember the lock */
if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
elog(ERROR, "too many LWLocks taken");
/*
* Lock out cancel/die interrupts until we exit the code section protected
* by the LWLock. This ensures that interrupts will not interfere with
* manipulations of data structures in shared memory.
*/
HOLD_INTERRUPTS();
/* Acquire mutex. Time spent holding mutex should be short! */
SpinLockAcquire(&lock->mutex);
/* If I can get the lock, do so quickly. */
if (mode == LW_EXCLUSIVE)
{
if (lock->exclusive == 0 && lock->shared == 0)
{
lock->exclusive++;
mustwait = false;
}
else
mustwait = true;
}
else
{
if (lock->exclusive == 0)
{
lock->shared++;
mustwait = false;
}
else
mustwait = true;
}
if (mustwait)
{
/*
* Add myself to wait queue.
*
* If we don't have a PGPROC structure, there's no way to wait. This
* should never occur, since MyProc should only be null during shared
* memory initialization.
*/
if (proc == NULL)
elog(PANIC, "cannot wait without a PGPROC structure");
proc->lwWaiting = true;
proc->lwWaitMode = LW_WAIT_UNTIL_FREE;
proc->lwWaitLink = NULL;
if (lock->head == NULL)
lock->head = proc;
else
lock->tail->lwWaitLink = proc;
lock->tail = proc;
/* Can release the mutex now */
SpinLockRelease(&lock->mutex);
/*
* Wait until awakened. Like in LWLockAcquire, be prepared for bogus
* wakups, because we share the semaphore with ProcWaitForSignal.
*/
LOG_LWDEBUG("LWLockAcquireOrWait", lockid, "waiting");
#ifdef LWLOCK_STATS
block_counts[lockid]++;
#endif
TRACE_POSTGRESQL_LWLOCK_WAIT_START(lockid, mode);
for (;;)
{
/* "false" means cannot accept cancel/die interrupt here. */
PGSemaphoreLock(&proc->sem, false);
if (!proc->lwWaiting)
break;
extraWaits++;
}
TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(lockid, mode);
LOG_LWDEBUG("LWLockAcquireOrWait", lockid, "awakened");
}
else
{
/* We are done updating shared state of the lock itself. */
SpinLockRelease(&lock->mutex);
}
/*
* Fix the process wait semaphore's count for any absorbed wakeups.
*/
while (extraWaits-- > 0)
PGSemaphoreUnlock(&proc->sem);
if (mustwait)
{
/* Failed to get lock, so release interrupt holdoff */
RESUME_INTERRUPTS();
LOG_LWDEBUG("LWLockAcquireOrWait", lockid, "failed");
TRACE_POSTGRESQL_LWLOCK_WAIT_UNTIL_FREE_FAIL(lockid, mode);
}
else
{
/* Add lock to list of locks held by this backend */
held_lwlocks[num_held_lwlocks++] = lockid;
TRACE_POSTGRESQL_LWLOCK_WAIT_UNTIL_FREE(lockid, mode);
}
return !mustwait;
}
| LWLockId LWLockAssign | ( | void | ) |
Definition at line 313 of file lwlock.c.
References elog, ERROR, LWLockArray, ShmemLock, SpinLockAcquire, and SpinLockRelease.
Referenced by InitBufferPool(), InitProcGlobal(), pgss_shmem_startup(), and SimpleLruInit().
{
LWLockId result;
/* use volatile pointer to prevent code rearrangement */
volatile int *LWLockCounter;
LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
SpinLockAcquire(ShmemLock);
if (LWLockCounter[0] >= LWLockCounter[1])
{
SpinLockRelease(ShmemLock);
elog(ERROR, "no more LWLockIds available");
}
result = (LWLockId) (LWLockCounter[0]++);
SpinLockRelease(ShmemLock);
return result;
}
| bool LWLockConditionalAcquire | ( | LWLockId | lockid, | |
| LWLockMode | mode | |||
| ) |
Definition at line 517 of file lwlock.c.
References elog, ERROR, LWLock::exclusive, held_lwlocks, HOLD_INTERRUPTS, LWLockPadded::lock, LOG_LWDEBUG, LW_EXCLUSIVE, LWLockArray, MAX_SIMUL_LWLOCKS, LWLock::mutex, num_held_lwlocks, PRINT_LWDEBUG, RESUME_INTERRUPTS, LWLock::shared, SpinLockAcquire, and SpinLockRelease.
Referenced by BufferAlloc(), ConditionalLockBuffer(), SimpleLruWaitIO(), ss_report_location(), XLogFlush(), and XLogNeedsFlush().
{
volatile LWLock *lock = &(LWLockArray[lockid].lock);
bool mustwait;
PRINT_LWDEBUG("LWLockConditionalAcquire", lockid, lock);
/* Ensure we will have room to remember the lock */
if (num_held_lwlocks >= MAX_SIMUL_LWLOCKS)
elog(ERROR, "too many LWLocks taken");
/*
* Lock out cancel/die interrupts until we exit the code section protected
* by the LWLock. This ensures that interrupts will not interfere with
* manipulations of data structures in shared memory.
*/
HOLD_INTERRUPTS();
/* Acquire mutex. Time spent holding mutex should be short! */
SpinLockAcquire(&lock->mutex);
/* If I can get the lock, do so quickly. */
if (mode == LW_EXCLUSIVE)
{
if (lock->exclusive == 0 && lock->shared == 0)
{
lock->exclusive++;
mustwait = false;
}
else
mustwait = true;
}
else
{
if (lock->exclusive == 0)
{
lock->shared++;
mustwait = false;
}
else
mustwait = true;
}
/* We are done updating shared state of the lock itself. */
SpinLockRelease(&lock->mutex);
if (mustwait)
{
/* Failed to get lock, so release interrupt holdoff */
RESUME_INTERRUPTS();
LOG_LWDEBUG("LWLockConditionalAcquire", lockid, "failed");
TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL(lockid, mode);
}
else
{
/* Add lock to list of locks held by this backend */
held_lwlocks[num_held_lwlocks++] = lockid;
TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE(lockid, mode);
}
return !mustwait;
}
Definition at line 877 of file lwlock.c.
References held_lwlocks, and num_held_lwlocks.
Referenced by CompactCheckpointerRequestQueue(), DeleteLockTarget(), MarkBufferDirty(), MarkBufferDirtyHint(), OnConflict_CheckForSerializationFailure(), ReleaseOneSerializableXact(), RemoveScratchTarget(), RemoveTargetIfNoLongerUsed(), RestoreScratchTarget(), SetNewSxactGlobalXmin(), TransferPredicateLocksToNewTarget(), and UnpinBuffer().
{
int i;
for (i = 0; i < num_held_lwlocks; i++)
{
if (held_lwlocks[i] == lockid)
return true;
}
return false;
}
| void LWLockRelease | ( | LWLockId | lockid | ) |
Definition at line 728 of file lwlock.c.
References Assert, elog, ERROR, LWLock::exclusive, LWLock::head, held_lwlocks, LWLockPadded::lock, LOG_LWDEBUG, LW_EXCLUSIVE, LW_WAIT_UNTIL_FREE, LWLockArray, PGPROC::lwWaiting, PGPROC::lwWaitLink, PGPROC::lwWaitMode, LWLock::mutex, NULL, num_held_lwlocks, PGSemaphoreUnlock(), PRINT_LWDEBUG, LWLock::releaseOK, RESUME_INTERRUPTS, PGPROC::sem, LWLock::shared, SpinLockAcquire, and SpinLockRelease.
Referenced by _bt_end_vacuum(), _bt_start_vacuum(), _bt_vacuum_cycleid(), AbsorbFsyncRequests(), AdvanceXLInsertBuffer(), analyze_rel(), asyncQueueAddEntries(), asyncQueueAdvanceTail(), asyncQueueReadAllNotifications(), asyncQueueUnregister(), AsyncShmemInit(), AtPrepare_PredicateLocks(), AutoVacLauncherMain(), AutoVacWorkerMain(), BackendIdGetProc(), BackendPidGetProc(), BackendXidGetPid(), BootStrapCLOG(), BootStrapMultiXact(), BootStrapSUBTRANS(), BufferAlloc(), CancelDBBackends(), CancelVirtualTransaction(), CheckDeadLock(), CheckForSerializableConflictOut(), CheckPointPredicate(), CheckPointRelationMap(), CheckPointTwoPhase(), CheckRecoveryConsistency(), CheckTableForSerializableConflictIn(), CheckTargetForConflictsIn(), CleanupInvalidationState(), ClearOldPredicateLocks(), clog_redo(), CountDBBackends(), CountOtherDBBackends(), CountUserBackends(), CreateCheckPoint(), CreateEndOfRecoveryRecord(), CreatePredicateLock(), CreateRestartPoint(), DeleteChildTargetLocks(), DeleteLockTarget(), do_autovacuum(), do_pg_abort_backup(), do_pg_start_backup(), do_pg_stop_backup(), do_start_worker(), DropAllPredicateLocksFromTable(), DropTableSpace(), entry_reset(), Exec_ListenPreCommit(), ExpireAllKnownAssignedTransactionIds(), ExpireOldKnownAssignedTransactionIds(), ExpireTreeKnownAssignedTransactionIds(), ExtendCLOG(), ExtendMultiXactMember(), ExtendMultiXactOffset(), ExtendSUBTRANS(), FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), FlushDatabaseBuffers(), FlushRelationBuffers(), ForceTransactionIdLimitUpdate(), ForwardFsyncRequest(), FreeWorkerInfo(), GetConflictingVirtualXIDs(), GetCurrentVirtualXIDs(), GetLastSegSwitchTime(), GetLockConflicts(), GetLockStatusData(), GetMultiXactIdMembers(), GetNewMultiXactId(), GetNewObjectId(), GetNewTransactionId(), GetOldestActiveTransactionId(), GetOldestMultiXactId(), GetOldestRestartPoint(), GetOldestXmin(), GetPredicateLockStatusData(), GetPreparedTransactionList(), GetRunningTransactionData(), GetRunningTransactionLocks(), GetSafeSnapshot(), GetSerializableTransactionSnapshotInt(), GetSnapshotData(), GetVirtualXIDsDelayingChkpt(), GetXLogInsertRecPtr(), HaveVirtualXIDsDelayingChkpt(), InstallXLogFileSegment(), InvalidateBuffer(), KnownAssignedXidsAdd(), KnownAssignedXidsReset(), lock_twophase_recover(), LockAcquireExtended(), LockBuffer(), LockErrorCleanup(), LockGXact(), LockHasWaiters(), LockRefindAndRelease(), LockRelease(), LockReleaseAll(), LogStandbySnapshot(), LWLockReleaseAll(), MarkAsPrepared(), MarkAsPreparing(), multixact_redo(), MultiXactAdvanceNextMXact(), MultiXactGetCheckptMulti(), MultiXactIdSetOldestMember(), MultiXactIdSetOldestVisible(), MultiXactSetNextMXact(), OldSerXidAdd(), OldSerXidGetMinConflictCommitSeqNo(), OldSerXidSetActiveSerXmin(), OnConflict_CheckForSerializationFailure(), PageIsPredicateLocked(), perform_relmap_update(), pg_buffercache_pages(), pg_start_backup_callback(), pg_stat_get_wal_senders(), pg_stat_statements(), pgss_shmem_startup(), pgss_store(), PostPrepare_Locks(), PostPrepare_MultiXact(), PreCommit_CheckForSerializationFailure(), PreCommit_Notify(), predicatelock_twophase_recover(), PredicateLockPageSplit(), PredicateLockTwoPhaseFinish(), PrefetchBuffer(), PrescanPreparedTransactions(), ProcArrayAdd(), ProcArrayApplyRecoveryInfo(), ProcArrayApplyXidAssignment(), ProcArrayEndTransaction(), ProcArrayInstallImportedXmin(), ProcArrayRemove(), ProcSleep(), ReadNewTransactionId(), ReadNextMultiXactId(), ReadRecord(), RecordKnownAssignedTransactionIds(), RecordNewMultiXact(), RegisterPredicateLockingXid(), RelationCacheInitFilePostInvalidate(), ReleaseOneSerializableXact(), ReleasePredicateLocks(), RemoveGXact(), RemoveScratchTarget(), RestoreScratchTarget(), SetMultiXactIdLimit(), SetTransactionIdLimit(), SharedInvalBackendInit(), ShmemInitStruct(), SICleanupQueue(), SIGetDataEntries(), SignalBackends(), SIInsertDataEntries(), SimpleLruFlush(), SimpleLruReadPage(), SimpleLruReadPage_ReadOnly(), SimpleLruTruncate(), SimpleLruWaitIO(), SlruInternalWritePage(), ss_get_location(), ss_report_location(), StartBufferIO(), StartupCLOG(), StartupMultiXact(), StartupSUBTRANS(), StartupXLOG(), StrategyFreeBuffer(), StrategyGetBuffer(), StrategyNotifyBgWriter(), StrategySyncStart(), SubTransGetParent(), SubTransSetParent(), SummarizeOldestCommittedSxact(), SyncOneBuffer(), SyncRepCancelWait(), SyncRepCleanupAtProcExit(), SyncRepInitConfig(), SyncRepReleaseWaiters(), SyncRepUpdateSyncStandbysDefined(), SyncRepWaitForLSN(), TablespaceCreateDbspace(), TerminateBufferIO(), TransactionIdGetStatus(), TransactionIdIsActive(), TransactionIdIsInProgress(), TransactionIdIsPrepared(), TransactionIdSetPageStatus(), TransferPredicateLocksToNewTarget(), TrimCLOG(), TruncateMultiXact(), TwoPhaseGetGXact(), UpdateFullPageWrites(), UpdateMinRecoveryPoint(), vacuum_rel(), VirtualXactLock(), VirtualXactLockTableCleanup(), VirtualXactLockTableInsert(), WaitIO(), WaitOnLock(), write_relcache_init_file(), xact_redo_abort(), xact_redo_commit_internal(), XidCacheRemoveRunningXids(), xlog_redo(), XLogBackgroundFlush(), XLogFlush(), XLogInsert(), and XLogNeedsFlush().
{
volatile LWLock *lock = &(LWLockArray[lockid].lock);
PGPROC *head;
PGPROC *proc;
int i;
PRINT_LWDEBUG("LWLockRelease", lockid, lock);
/*
* Remove lock from list of locks held. Usually, but not always, it will
* be the latest-acquired lock; so search array backwards.
*/
for (i = num_held_lwlocks; --i >= 0;)
{
if (lockid == held_lwlocks[i])
break;
}
if (i < 0)
elog(ERROR, "lock %d is not held", (int) lockid);
num_held_lwlocks--;
for (; i < num_held_lwlocks; i++)
held_lwlocks[i] = held_lwlocks[i + 1];
/* Acquire mutex. Time spent holding mutex should be short! */
SpinLockAcquire(&lock->mutex);
/* Release my hold on lock */
if (lock->exclusive > 0)
lock->exclusive--;
else
{
Assert(lock->shared > 0);
lock->shared--;
}
/*
* See if I need to awaken any waiters. If I released a non-last shared
* hold, there cannot be anything to do. Also, do not awaken any waiters
* if someone has already awakened waiters that haven't yet acquired the
* lock.
*/
head = lock->head;
if (head != NULL)
{
if (lock->exclusive == 0 && lock->shared == 0 && lock->releaseOK)
{
/*
* Remove the to-be-awakened PGPROCs from the queue.
*/
bool releaseOK = true;
proc = head;
/*
* First wake up any backends that want to be woken up without
* acquiring the lock.
*/
while (proc->lwWaitMode == LW_WAIT_UNTIL_FREE && proc->lwWaitLink)
proc = proc->lwWaitLink;
/*
* If the front waiter wants exclusive lock, awaken him only.
* Otherwise awaken as many waiters as want shared access.
*/
if (proc->lwWaitMode != LW_EXCLUSIVE)
{
while (proc->lwWaitLink != NULL &&
proc->lwWaitLink->lwWaitMode != LW_EXCLUSIVE)
{
if (proc->lwWaitMode != LW_WAIT_UNTIL_FREE)
releaseOK = false;
proc = proc->lwWaitLink;
}
}
/* proc is now the last PGPROC to be released */
lock->head = proc->lwWaitLink;
proc->lwWaitLink = NULL;
/*
* Prevent additional wakeups until retryer gets to run. Backends
* that are just waiting for the lock to become free don't retry
* automatically.
*/
if (proc->lwWaitMode != LW_WAIT_UNTIL_FREE)
releaseOK = false;
lock->releaseOK = releaseOK;
}
else
{
/* lock is still held, can't awaken anything */
head = NULL;
}
}
/* We are done updating shared state of the lock itself. */
SpinLockRelease(&lock->mutex);
TRACE_POSTGRESQL_LWLOCK_RELEASE(lockid);
/*
* Awaken any waiters I removed from the queue.
*/
while (head != NULL)
{
LOG_LWDEBUG("LWLockRelease", lockid, "release waiter");
proc = head;
head = proc->lwWaitLink;
proc->lwWaitLink = NULL;
proc->lwWaiting = false;
PGSemaphoreUnlock(&proc->sem);
}
/*
* Now okay to allow cancel/die interrupts.
*/
RESUME_INTERRUPTS();
}
| void LWLockReleaseAll | ( | void | ) |
Definition at line 859 of file lwlock.c.
References held_lwlocks, HOLD_INTERRUPTS, LWLockRelease(), and num_held_lwlocks.
Referenced by AbortSubTransaction(), AbortTransaction(), AuxiliaryProcKill(), BackgroundWriterMain(), CheckpointerMain(), ProcKill(), ShutdownAuxiliaryProcess(), and WalWriterMain().
{
while (num_held_lwlocks > 0)
{
HOLD_INTERRUPTS(); /* match the upcoming RESUME_INTERRUPTS */
LWLockRelease(held_lwlocks[num_held_lwlocks - 1]);
}
}
| Size LWLockShmemSize | ( | void | ) |
Definition at line 242 of file lwlock.c.
References add_size(), LWLOCK_PADDED_SIZE, mul_size(), and NumLWLocks().
Referenced by CreateLWLocks(), and CreateSharedMemoryAndSemaphores().
{
Size size;
int numLocks = NumLWLocks();
/* Space for the LWLock array. */
size = mul_size(numLocks, sizeof(LWLockPadded));
/* Space for dynamic allocation counter, plus room for alignment. */
size = add_size(size, 2 * sizeof(int) + LWLOCK_PADDED_SIZE);
return size;
}
| int NumLWLocks | ( | void | ) |
Definition at line 171 of file lwlock.c.
References CLOGShmemBuffers(), lock_addin_request, lock_addin_request_allowed, Max, MaxBackends, NBuffers, NUM_MXACTOFFSET_BUFFERS, NUM_USER_DEFINED_LWLOCKS, and NumFixedLWLocks.
Referenced by CreateLWLocks(), LWLockShmemSize(), and SpinlockSemas().
{
int numLocks;
/*
* Possibly this logic should be spread out among the affected modules,
* the same way that shmem space estimation is done. But for now, there
* are few enough users of LWLocks that we can get away with just keeping
* the knowledge here.
*/
/* Predefined LWLocks */
numLocks = (int) NumFixedLWLocks;
/* bufmgr.c needs two for each shared buffer */
numLocks += 2 * NBuffers;
/* proc.c needs one for each backend or auxiliary process */
numLocks += MaxBackends + NUM_AUXILIARY_PROCS;
/* clog.c needs one per CLOG buffer */
numLocks += CLOGShmemBuffers();
/* subtrans.c needs one per SubTrans buffer */
numLocks += NUM_SUBTRANS_BUFFERS;
/* multixact.c needs two SLRU areas */
numLocks += NUM_MXACTOFFSET_BUFFERS + NUM_MXACTMEMBER_BUFFERS;
/* async.c needs one per Async buffer */
numLocks += NUM_ASYNC_BUFFERS;
/* predicate.c needs one per old serializable xid buffer */
numLocks += NUM_OLDSERXID_BUFFERS;
/*
* Add any requested by loadable modules; for backwards-compatibility
* reasons, allocate at least NUM_USER_DEFINED_LWLOCKS of them even if
* there are no explicit requests.
*/
lock_addin_request_allowed = false;
numLocks += Max(lock_addin_request, NUM_USER_DEFINED_LWLOCKS);
return numLocks;
}
| void RequestAddinLWLocks | ( | int | n | ) |
Definition at line 230 of file lwlock.c.
References IsUnderPostmaster, lock_addin_request, and lock_addin_request_allowed.
Referenced by _PG_init().
{
if (IsUnderPostmaster || !lock_addin_request_allowed)
return; /* too late */
lock_addin_request += n;
}
1.7.1