summaryrefslogtreecommitdiff
path: root/src/backend/storage/lmgr
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/storage/lmgr')
-rw-r--r--src/backend/storage/lmgr/deadlock.c99
-rw-r--r--src/backend/storage/lmgr/lmgr.c34
-rw-r--r--src/backend/storage/lmgr/lock.c270
-rw-r--r--src/backend/storage/lmgr/lwlock.c93
-rw-r--r--src/backend/storage/lmgr/proc.c225
-rw-r--r--src/backend/storage/lmgr/s_lock.c120
-rw-r--r--src/backend/storage/lmgr/spin.c11
7 files changed, 412 insertions, 440 deletions
diff --git a/src/backend/storage/lmgr/deadlock.c b/src/backend/storage/lmgr/deadlock.c
index 7edabff6dd..06de6071f1 100644
--- a/src/backend/storage/lmgr/deadlock.c
+++ b/src/backend/storage/lmgr/deadlock.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/deadlock.c,v 1.34 2005/04/29 22:28:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/deadlock.c,v 1.35 2005/10/15 02:49:26 momjian Exp $
*
* Interface:
*
@@ -130,15 +130,15 @@ InitDeadLockChecking(void)
oldcxt = MemoryContextSwitchTo(TopMemoryContext);
/*
- * FindLockCycle needs at most MaxBackends entries in visitedProcs[]
- * and deadlockDetails[].
+ * FindLockCycle needs at most MaxBackends entries in visitedProcs[] and
+ * deadlockDetails[].
*/
visitedProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
deadlockDetails = (DEADLOCK_INFO *) palloc(MaxBackends * sizeof(DEADLOCK_INFO));
/*
- * TopoSort needs to consider at most MaxBackends wait-queue entries,
- * and it needn't run concurrently with FindLockCycle.
+ * TopoSort needs to consider at most MaxBackends wait-queue entries, and
+ * it needn't run concurrently with FindLockCycle.
*/
topoProcs = visitedProcs; /* re-use this space */
beforeConstraints = (int *) palloc(MaxBackends * sizeof(int));
@@ -146,33 +146,32 @@ InitDeadLockChecking(void)
/*
* We need to consider rearranging at most MaxBackends/2 wait queues
- * (since it takes at least two waiters in a queue to create a soft
- * edge), and the expanded form of the wait queues can't involve more
- * than MaxBackends total waiters.
+ * (since it takes at least two waiters in a queue to create a soft edge),
+ * and the expanded form of the wait queues can't involve more than
+ * MaxBackends total waiters.
*/
waitOrders = (WAIT_ORDER *)
palloc((MaxBackends / 2) * sizeof(WAIT_ORDER));
waitOrderProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
/*
- * Allow at most MaxBackends distinct constraints in a configuration.
- * (Is this enough? In practice it seems it should be, but I don't
- * quite see how to prove it. If we run out, we might fail to find a
- * workable wait queue rearrangement even though one exists.) NOTE
- * that this number limits the maximum recursion depth of
- * DeadLockCheckRecurse. Making it really big might potentially allow
- * a stack-overflow problem.
+ * Allow at most MaxBackends distinct constraints in a configuration. (Is
+ * this enough? In practice it seems it should be, but I don't quite see
+ * how to prove it. If we run out, we might fail to find a workable wait
+ * queue rearrangement even though one exists.) NOTE that this number
+ * limits the maximum recursion depth of DeadLockCheckRecurse. Making it
+ * really big might potentially allow a stack-overflow problem.
*/
maxCurConstraints = MaxBackends;
curConstraints = (EDGE *) palloc(maxCurConstraints * sizeof(EDGE));
/*
* Allow up to 3*MaxBackends constraints to be saved without having to
- * re-run TestConfiguration. (This is probably more than enough, but
- * we can survive if we run low on space by doing excess runs of
- * TestConfiguration to re-compute constraint lists each time needed.)
- * The last MaxBackends entries in possibleConstraints[] are reserved
- * as output workspace for FindLockCycle.
+ * re-run TestConfiguration. (This is probably more than enough, but we
+ * can survive if we run low on space by doing excess runs of
+ * TestConfiguration to re-compute constraint lists each time needed.) The
+ * last MaxBackends entries in possibleConstraints[] are reserved as
+ * output workspace for FindLockCycle.
*/
maxPossibleConstraints = MaxBackends * 4;
possibleConstraints =
@@ -361,9 +360,9 @@ TestConfiguration(PGPROC *startProc)
return -1;
/*
- * Check for cycles involving startProc or any of the procs mentioned
- * in constraints. We check startProc last because if it has a soft
- * cycle still to be dealt with, we want to deal with that first.
+ * Check for cycles involving startProc or any of the procs mentioned in
+ * constraints. We check startProc last because if it has a soft cycle
+ * still to be dealt with, we want to deal with that first.
*/
for (i = 0; i < nCurConstraints; i++)
{
@@ -447,8 +446,8 @@ FindLockCycleRecurse(PGPROC *checkProc,
if (i == 0)
{
/*
- * record total length of cycle --- outer levels will now
- * fill deadlockDetails[]
+ * record total length of cycle --- outer levels will now fill
+ * deadlockDetails[]
*/
Assert(depth <= MaxBackends);
nDeadlockDetails = depth;
@@ -457,8 +456,8 @@ FindLockCycleRecurse(PGPROC *checkProc,
}
/*
- * Otherwise, we have a cycle but it does not include the
- * start point, so say "no deadlock".
+ * Otherwise, we have a cycle but it does not include the start
+ * point, so say "no deadlock".
*/
return false;
}
@@ -480,8 +479,8 @@ FindLockCycleRecurse(PGPROC *checkProc,
conflictMask = lockMethodTable->conflictTab[checkProc->waitLockMode];
/*
- * Scan for procs that already hold conflicting locks. These are
- * "hard" edges in the waits-for graph.
+ * Scan for procs that already hold conflicting locks. These are "hard"
+ * edges in the waits-for graph.
*/
procLocks = &(lock->procLocks);
@@ -520,15 +519,14 @@ FindLockCycleRecurse(PGPROC *checkProc,
}
proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
- offsetof(PROCLOCK, lockLink));
+ offsetof(PROCLOCK, lockLink));
}
/*
* Scan for procs that are ahead of this one in the lock's wait queue.
- * Those that have conflicting requests soft-block this one. This
- * must be done after the hard-block search, since if another proc
- * both hard- and soft-blocks this one, we want to call it a hard
- * edge.
+ * Those that have conflicting requests soft-block this one. This must be
+ * done after the hard-block search, since if another proc both hard- and
+ * soft-blocks this one, we want to call it a hard edge.
*
* If there is a proposed re-ordering of the lock's wait order, use that
* rather than the current wait order.
@@ -569,8 +567,7 @@ FindLockCycleRecurse(PGPROC *checkProc,
info->pid = checkProc->pid;
/*
- * Add this edge to the list of soft edges in the
- * cycle
+ * Add this edge to the list of soft edges in the cycle
*/
Assert(*nSoftEdges < MaxBackends);
softEdges[*nSoftEdges].waiter = checkProc;
@@ -610,8 +607,7 @@ FindLockCycleRecurse(PGPROC *checkProc,
info->pid = checkProc->pid;
/*
- * Add this edge to the list of soft edges in the
- * cycle
+ * Add this edge to the list of soft edges in the cycle
*/
Assert(*nSoftEdges < MaxBackends);
softEdges[*nSoftEdges].waiter = checkProc;
@@ -655,8 +651,8 @@ ExpandConstraints(EDGE *constraints,
/*
* Scan constraint list backwards. This is because the last-added
- * constraint is the only one that could fail, and so we want to test
- * it for inconsistency first.
+ * constraint is the only one that could fail, and so we want to test it
+ * for inconsistency first.
*/
for (i = nConstraints; --i >= 0;)
{
@@ -679,8 +675,8 @@ ExpandConstraints(EDGE *constraints,
Assert(nWaitOrderProcs <= MaxBackends);
/*
- * Do the topo sort. TopoSort need not examine constraints after
- * this one, since they must be for different locks.
+ * Do the topo sort. TopoSort need not examine constraints after this
+ * one, since they must be for different locks.
*/
if (!TopoSort(lock, constraints, i + 1,
waitOrders[nWaitOrders].procs))
@@ -739,15 +735,14 @@ TopoSort(LOCK *lock,
}
/*
- * Scan the constraints, and for each proc in the array, generate a
- * count of the number of constraints that say it must be before
- * something else, plus a list of the constraints that say it must be
- * after something else. The count for the j'th proc is stored in
- * beforeConstraints[j], and the head of its list in
- * afterConstraints[j]. Each constraint stores its list link in
- * constraints[i].link (note any constraint will be in just one list).
- * The array index for the before-proc of the i'th constraint is
- * remembered in constraints[i].pred.
+ * Scan the constraints, and for each proc in the array, generate a count
+ * of the number of constraints that say it must be before something else,
+ * plus a list of the constraints that say it must be after something
+ * else. The count for the j'th proc is stored in beforeConstraints[j],
+ * and the head of its list in afterConstraints[j]. Each constraint
+ * stores its list link in constraints[i].link (note any constraint will
+ * be in just one list). The array index for the before-proc of the i'th
+ * constraint is remembered in constraints[i].pred.
*/
MemSet(beforeConstraints, 0, queue_size * sizeof(int));
MemSet(afterConstraints, 0, queue_size * sizeof(int));
@@ -933,7 +928,7 @@ DeadLockReport(void)
DescribeLockTag(&buf2, &info->locktag);
appendStringInfo(&buf,
- _("Process %d waits for %s on %s; blocked by process %d."),
+ _("Process %d waits for %s on %s; blocked by process %d."),
info->pid,
GetLockmodeName(info->lockmode),
buf2.data,
diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c
index 7a4ef9f755..8ffeced997 100644
--- a/src/backend/storage/lmgr/lmgr.c
+++ b/src/backend/storage/lmgr/lmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/lmgr.c,v 1.78 2005/08/01 20:31:11 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lmgr.c,v 1.79 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -145,11 +145,11 @@ LockRelation(Relation relation, LOCKMODE lockmode)
lockmode, false, false);
/*
- * Check to see if the relcache entry has been invalidated while we
- * were waiting to lock it. If so, rebuild it, or ereport() trying.
- * Increment the refcount to ensure that RelationFlushRelation will
- * rebuild it and not just delete it. We can skip this if the lock
- * was already held, however.
+ * Check to see if the relcache entry has been invalidated while we were
+ * waiting to lock it. If so, rebuild it, or ereport() trying. Increment
+ * the refcount to ensure that RelationFlushRelation will rebuild it and
+ * not just delete it. We can skip this if the lock was already held,
+ * however.
*/
if (res != LOCKACQUIRE_ALREADY_HELD)
{
@@ -185,11 +185,11 @@ ConditionalLockRelation(Relation relation, LOCKMODE lockmode)
return false;
/*
- * Check to see if the relcache entry has been invalidated while we
- * were waiting to lock it. If so, rebuild it, or ereport() trying.
- * Increment the refcount to ensure that RelationFlushRelation will
- * rebuild it and not just delete it. We can skip this if the lock
- * was already held, however.
+ * Check to see if the relcache entry has been invalidated while we were
+ * waiting to lock it. If so, rebuild it, or ereport() trying. Increment
+ * the refcount to ensure that RelationFlushRelation will rebuild it and
+ * not just delete it. We can skip this if the lock was already held,
+ * however.
*/
if (res != LOCKACQUIRE_ALREADY_HELD)
{
@@ -429,7 +429,7 @@ XactLockTableInsert(TransactionId xid)
*
* Delete the lock showing that the given transaction ID is running.
* (This is never used for main transaction IDs; those locks are only
- * released implicitly at transaction end. But we do use it for subtrans
+ * released implicitly at transaction end. But we do use it for subtrans
* IDs.)
*/
void
@@ -451,7 +451,7 @@ XactLockTableDelete(TransactionId xid)
* subtransaction, we will exit as soon as it aborts or its top parent commits.
* It takes some extra work to ensure this, because to save on shared memory
* the XID lock of a subtransaction is released when it ends, whether
- * successfully or unsuccessfully. So we have to check if it's "still running"
+ * successfully or unsuccessfully. So we have to check if it's "still running"
* and if so wait for its parent.
*/
void
@@ -477,8 +477,8 @@ XactLockTableWait(TransactionId xid)
}
/*
- * Transaction was committed/aborted/crashed - we have to update
- * pg_clog if transaction is still marked as running.
+ * Transaction was committed/aborted/crashed - we have to update pg_clog
+ * if transaction is still marked as running.
*/
if (!TransactionIdDidCommit(xid) && !TransactionIdDidAbort(xid))
TransactionIdAbort(xid);
@@ -514,8 +514,8 @@ ConditionalXactLockTableWait(TransactionId xid)
}
/*
- * Transaction was committed/aborted/crashed - we have to update
- * pg_clog if transaction is still marked as running.
+ * Transaction was committed/aborted/crashed - we have to update pg_clog
+ * if transaction is still marked as running.
*/
if (!TransactionIdDidCommit(xid) && !TransactionIdDidAbort(xid))
TransactionIdAbort(xid);
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index c11070a130..245b8eeee2 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.157 2005/08/20 23:26:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.158 2005/10/15 02:49:26 momjian Exp $
*
* NOTES
* Outside modules can create a lock table and acquire/release
@@ -46,7 +46,7 @@
/* This configuration variable is used to set the lock table size */
int max_locks_per_xact; /* set by guc.c */
-#define NLOCKENTS() \
+#define NLOCKENTS() \
mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
@@ -155,12 +155,11 @@ PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
{
if (LOCK_DEBUG_ENABLED((LOCK *) MAKE_PTR(proclockP->tag.lock)))
elog(LOG,
- "%s: proclock(%lx) lock(%lx) method(%u) proc(%lx) hold(%x)",
+ "%s: proclock(%lx) lock(%lx) method(%u) proc(%lx) hold(%x)",
where, MAKE_OFFSET(proclockP), proclockP->tag.lock,
PROCLOCK_LOCKMETHOD(*(proclockP)),
proclockP->tag.proc, (int) proclockP->holdMask);
}
-
#else /* not LOCK_DEBUG */
#define LOCK_PRINT(where, lock, type)
@@ -171,11 +170,11 @@ PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
static void RemoveLocalLock(LOCALLOCK *locallock);
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
static void WaitOnLock(LOCKMETHODID lockmethodid, LOCALLOCK *locallock,
- ResourceOwner owner);
+ ResourceOwner owner);
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
- PROCLOCK *proclock, LockMethod lockMethodTable);
+ PROCLOCK *proclock, LockMethod lockMethodTable);
static void CleanUpLock(LOCKMETHODID lockmethodid, LOCK *lock,
- PROCLOCK *proclock, bool wakeupNeeded);
+ PROCLOCK *proclock, bool wakeupNeeded);
/*
@@ -320,14 +319,13 @@ LockMethodTableInit(const char *tabName,
elog(FATAL, "could not initialize lock table \"%s\"", tabName);
/*
- * allocate a non-shared hash table for LOCALLOCK structs. This is
- * used to store lock counts and resource owner information.
+ * allocate a non-shared hash table for LOCALLOCK structs. This is used
+ * to store lock counts and resource owner information.
*
- * The non-shared table could already exist in this process (this occurs
- * when the postmaster is recreating shared memory after a backend
- * crash). If so, delete and recreate it. (We could simply leave it,
- * since it ought to be empty in the postmaster, but for safety let's
- * zap it.)
+ * The non-shared table could already exist in this process (this occurs when
+ * the postmaster is recreating shared memory after a backend crash). If
+ * so, delete and recreate it. (We could simply leave it, since it ought
+ * to be empty in the postmaster, but for safety let's zap it.)
*/
if (LockMethodLocalHash[lockmethodid])
hash_destroy(LockMethodLocalHash[lockmethodid]);
@@ -499,7 +497,7 @@ LockAcquire(LOCKMETHODID lockmethodid,
locallock->lockOwners = NULL;
locallock->lockOwners = (LOCALLOCKOWNER *)
MemoryContextAlloc(TopMemoryContext,
- locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
+ locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
}
else
{
@@ -518,8 +516,7 @@ LockAcquire(LOCKMETHODID lockmethodid,
}
/*
- * If we already hold the lock, we can just increase the count
- * locally.
+ * If we already hold the lock, we can just increase the count locally.
*/
if (locallock->nLocks > 0)
{
@@ -537,8 +534,8 @@ LockAcquire(LOCKMETHODID lockmethodid,
/*
* Find or create a lock with this tag.
*
- * Note: if the locallock object already existed, it might have a pointer
- * to the lock already ... but we probably should not assume that that
+ * Note: if the locallock object already existed, it might have a pointer to
+ * the lock already ... but we probably should not assume that that
* pointer is valid, since a lock object with no locks can go away
* anytime.
*/
@@ -551,7 +548,7 @@ LockAcquire(LOCKMETHODID lockmethodid,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You may need to increase max_locks_per_transaction.")));
+ errhint("You may need to increase max_locks_per_transaction.")));
}
locallock->lock = lock;
@@ -581,7 +578,7 @@ LockAcquire(LOCKMETHODID lockmethodid,
/*
* Create the hash key for the proclock table.
*/
- MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding */
+ MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding */
proclocktag.lock = MAKE_OFFSET(lock);
proclocktag.proc = MAKE_OFFSET(MyProc);
@@ -612,7 +609,7 @@ LockAcquire(LOCKMETHODID lockmethodid,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You may need to increase max_locks_per_transaction.")));
+ errhint("You may need to increase max_locks_per_transaction.")));
}
locallock->proclock = proclock;
@@ -636,29 +633,28 @@ LockAcquire(LOCKMETHODID lockmethodid,
#ifdef CHECK_DEADLOCK_RISK
/*
- * Issue warning if we already hold a lower-level lock on this
- * object and do not hold a lock of the requested level or higher.
- * This indicates a deadlock-prone coding practice (eg, we'd have
- * a deadlock if another backend were following the same code path
- * at about the same time).
+ * Issue warning if we already hold a lower-level lock on this object
+ * and do not hold a lock of the requested level or higher. This
+ * indicates a deadlock-prone coding practice (eg, we'd have a
+ * deadlock if another backend were following the same code path at
+ * about the same time).
*
- * This is not enabled by default, because it may generate log
- * entries about user-level coding practices that are in fact safe
- * in context. It can be enabled to help find system-level
- * problems.
+ * This is not enabled by default, because it may generate log entries
+ * about user-level coding practices that are in fact safe in context.
+ * It can be enabled to help find system-level problems.
*
* XXX Doing numeric comparison on the lockmodes is a hack; it'd be
* better to use a table. For now, though, this works.
*/
{
- int i;
+ int i;
for (i = lockMethodTable->numLockModes; i > 0; i--)
{
if (proclock->holdMask & LOCKBIT_ON(i))
{
if (i >= (int) lockmode)
- break; /* safe: we have a lock >= req level */
+ break; /* safe: we have a lock >= req level */
elog(LOG, "deadlock risk: raising lock level"
" from %s to %s on object %u/%u/%u",
lock_mode_names[i], lock_mode_names[lockmode],
@@ -673,16 +669,16 @@ LockAcquire(LOCKMETHODID lockmethodid,
/*
* lock->nRequested and lock->requested[] count the total number of
- * requests, whether granted or waiting, so increment those
- * immediately. The other counts don't increment till we get the lock.
+ * requests, whether granted or waiting, so increment those immediately.
+ * The other counts don't increment till we get the lock.
*/
lock->nRequested++;
lock->requested[lockmode]++;
Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
/*
- * We shouldn't already hold the desired lock; else locallock table
- * is broken.
+ * We shouldn't already hold the desired lock; else locallock table is
+ * broken.
*/
if (proclock->holdMask & LOCKBIT_ON(lockmode))
elog(ERROR, "lock %s on object %u/%u/%u is already held",
@@ -691,9 +687,9 @@ LockAcquire(LOCKMETHODID lockmethodid,
lock->tag.locktag_field3);
/*
- * If lock requested conflicts with locks requested by waiters, must
- * join wait queue. Otherwise, check for conflict with already-held
- * locks. (That's last because most complex check.)
+ * If lock requested conflicts with locks requested by waiters, must join
+ * wait queue. Otherwise, check for conflict with already-held locks.
+ * (That's last because most complex check.)
*/
if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
status = STATUS_FOUND;
@@ -713,8 +709,8 @@ LockAcquire(LOCKMETHODID lockmethodid,
/*
* We can't acquire the lock immediately. If caller specified no
- * blocking, remove useless table entries and return NOT_AVAIL
- * without waiting.
+ * blocking, remove useless table entries and return NOT_AVAIL without
+ * waiting.
*/
if (dontWait)
{
@@ -753,8 +749,7 @@ LockAcquire(LOCKMETHODID lockmethodid,
/*
* NOTE: do not do any material change of state between here and
* return. All required changes in locktable state must have been
- * done when the lock was granted to us --- see notes in
- * WaitOnLock.
+ * done when the lock was granted to us --- see notes in WaitOnLock.
*/
/*
@@ -820,13 +815,13 @@ LockCheckConflicts(LockMethod lockMethodTable,
int i;
/*
- * first check for global conflicts: If no locks conflict with my
- * request, then I get the lock.
+ * first check for global conflicts: If no locks conflict with my request,
+ * then I get the lock.
*
- * Checking for conflict: lock->grantMask represents the types of
- * currently held locks. conflictTable[lockmode] has a bit set for
- * each type of lock that conflicts with request. Bitwise compare
- * tells if there is a conflict.
+ * Checking for conflict: lock->grantMask represents the types of currently
+ * held locks. conflictTable[lockmode] has a bit set for each type of
+ * lock that conflicts with request. Bitwise compare tells if there is a
+ * conflict.
*/
if (!(lockMethodTable->conflictTab[lockmode] & lock->grantMask))
{
@@ -835,15 +830,15 @@ LockCheckConflicts(LockMethod lockMethodTable,
}
/*
- * Rats. Something conflicts. But it could still be my own lock.
- * We have to construct a conflict mask that does not reflect our own
- * locks, but only lock types held by other processes.
+ * Rats. Something conflicts. But it could still be my own lock. We have
+ * to construct a conflict mask that does not reflect our own locks, but
+ * only lock types held by other processes.
*/
myLocks = proclock->holdMask;
otherLocks = 0;
for (i = 1; i <= numLockModes; i++)
{
- int myHolding = (myLocks & LOCKBIT_ON(i)) ? 1 : 0;
+ int myHolding = (myLocks & LOCKBIT_ON(i)) ? 1 : 0;
if (lock->granted[i] > myHolding)
otherLocks |= LOCKBIT_ON(i);
@@ -851,8 +846,8 @@ LockCheckConflicts(LockMethod lockMethodTable,
/*
* now check again for conflicts. 'otherLocks' describes the types of
- * locks held by other processes. If one of these conflicts with the
- * kind of lock that I want, there is a conflict and I have to sleep.
+ * locks held by other processes. If one of these conflicts with the kind
+ * of lock that I want, there is a conflict and I have to sleep.
*/
if (!(lockMethodTable->conflictTab[lockmode] & otherLocks))
{
@@ -891,7 +886,7 @@ GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
}
/*
- * UnGrantLock -- opposite of GrantLock.
+ * UnGrantLock -- opposite of GrantLock.
*
* Updates the lock and proclock data structures to show that the lock
* is no longer held nor requested by the current holder.
@@ -903,7 +898,7 @@ static bool
UnGrantLock(LOCK *lock, LOCKMODE lockmode,
PROCLOCK *proclock, LockMethod lockMethodTable)
{
- bool wakeupNeeded = false;
+ bool wakeupNeeded = false;
Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
@@ -926,13 +921,13 @@ UnGrantLock(LOCK *lock, LOCKMODE lockmode,
LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
/*
- * We need only run ProcLockWakeup if the released lock conflicts with
- * at least one of the lock types requested by waiter(s). Otherwise
- * whatever conflict made them wait must still exist. NOTE: before
- * MVCC, we could skip wakeup if lock->granted[lockmode] was still
- * positive. But that's not true anymore, because the remaining
- * granted locks might belong to some waiter, who could now be
- * awakened because he doesn't conflict with his own locks.
+ * We need only run ProcLockWakeup if the released lock conflicts with at
+ * least one of the lock types requested by waiter(s). Otherwise whatever
+ * conflict made them wait must still exist. NOTE: before MVCC, we could
+ * skip wakeup if lock->granted[lockmode] was still positive. But that's
+ * not true anymore, because the remaining granted locks might belong to
+ * some waiter, who could now be awakened because he doesn't conflict with
+ * his own locks.
*/
if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
wakeupNeeded = true;
@@ -947,7 +942,7 @@ UnGrantLock(LOCK *lock, LOCKMODE lockmode,
}
/*
- * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
+ * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
* proclock and lock objects if possible, and call ProcLockWakeup if there
* are remaining requests and the caller says it's OK. (Normally, this
* should be called after UnGrantLock, and wakeupNeeded is the result from
@@ -961,8 +956,8 @@ CleanUpLock(LOCKMETHODID lockmethodid, LOCK *lock, PROCLOCK *proclock,
bool wakeupNeeded)
{
/*
- * If this was my last hold on this lock, delete my entry in the
- * proclock table.
+ * If this was my last hold on this lock, delete my entry in the proclock
+ * table.
*/
if (proclock->holdMask == 0)
{
@@ -978,8 +973,8 @@ CleanUpLock(LOCKMETHODID lockmethodid, LOCK *lock, PROCLOCK *proclock,
if (lock->nRequested == 0)
{
/*
- * The caller just released the last lock, so garbage-collect the
- * lock object.
+ * The caller just released the last lock, so garbage-collect the lock
+ * object.
*/
LOCK_PRINT("CleanUpLock: deleting", lock, 0);
Assert(SHMQueueEmpty(&(lock->procLocks)));
@@ -991,7 +986,7 @@ CleanUpLock(LOCKMETHODID lockmethodid, LOCK *lock, PROCLOCK *proclock,
else if (wakeupNeeded)
{
/* There are waiters on this lock, so wake them up. */
- ProcLockWakeup(LockMethods[lockmethodid], lock);
+ ProcLockWakeup(LockMethods[lockmethodid], lock);
}
}
@@ -1075,16 +1070,15 @@ WaitOnLock(LOCKMETHODID lockmethodid, LOCALLOCK *locallock,
/*
* NOTE: Think not to put any shared-state cleanup after the call to
- * ProcSleep, in either the normal or failure path. The lock state
- * must be fully set by the lock grantor, or by CheckDeadLock if we
- * give up waiting for the lock. This is necessary because of the
- * possibility that a cancel/die interrupt will interrupt ProcSleep
- * after someone else grants us the lock, but before we've noticed it.
- * Hence, after granting, the locktable state must fully reflect the
- * fact that we own the lock; we can't do additional work on return.
- * Contrariwise, if we fail, any cleanup must happen in xact abort
- * processing, not here, to ensure it will also happen in the
- * cancel/die case.
+ * ProcSleep, in either the normal or failure path. The lock state must
+ * be fully set by the lock grantor, or by CheckDeadLock if we give up
+ * waiting for the lock. This is necessary because of the possibility
+ * that a cancel/die interrupt will interrupt ProcSleep after someone else
+ * grants us the lock, but before we've noticed it. Hence, after granting,
+ * the locktable state must fully reflect the fact that we own the lock;
+ * we can't do additional work on return. Contrariwise, if we fail, any
+ * cleanup must happen in xact abort processing, not here, to ensure it
+ * will also happen in the cancel/die case.
*/
if (ProcSleep(lockMethodTable,
@@ -1093,8 +1087,7 @@ WaitOnLock(LOCKMETHODID lockmethodid, LOCALLOCK *locallock,
locallock->proclock) != STATUS_OK)
{
/*
- * We failed as a result of a deadlock, see CheckDeadLock(). Quit
- * now.
+ * We failed as a result of a deadlock, see CheckDeadLock(). Quit now.
*/
awaitedLock = NULL;
LOCK_PRINT("WaitOnLock: aborting on lock",
@@ -1102,8 +1095,8 @@ WaitOnLock(LOCKMETHODID lockmethodid, LOCALLOCK *locallock,
LWLockRelease(lockMethodTable->masterLock);
/*
- * Now that we aren't holding the LockMgrLock, we can give an
- * error report including details about the detected deadlock.
+ * Now that we aren't holding the LockMgrLock, we can give an error
+ * report including details about the detected deadlock.
*/
DeadLockReport();
/* not reached */
@@ -1163,15 +1156,15 @@ RemoveFromWaitQueue(PGPROC *proc)
* Delete the proclock immediately if it represents no already-held locks.
* (This must happen now because if the owner of the lock decides to
* release it, and the requested/granted counts then go to zero,
- * LockRelease expects there to be no remaining proclocks.)
- * Then see if any other waiters for the lock can be woken up now.
+ * LockRelease expects there to be no remaining proclocks.) Then see if
+ * any other waiters for the lock can be woken up now.
*/
CleanUpLock(lockmethodid, waitLock, proclock, true);
}
/*
* LockRelease -- look up 'locktag' in lock table 'lockmethodid' and
- * release one 'lockmode' lock on it. Release a session lock if
+ * release one 'lockmode' lock on it. Release a session lock if
* 'sessionLock' is true, else release a regular transaction lock.
*
* Side Effects: find any waiting processes that are now wakable,
@@ -1219,8 +1212,7 @@ LockRelease(LOCKMETHODID lockmethodid, LOCKTAG *locktag,
HASH_FIND, NULL);
/*
- * let the caller print its own error message, too. Do not
- * ereport(ERROR).
+ * let the caller print its own error message, too. Do not ereport(ERROR).
*/
if (!locallock || locallock->nLocks <= 0)
{
@@ -1268,8 +1260,8 @@ LockRelease(LOCKMETHODID lockmethodid, LOCKTAG *locktag,
}
/*
- * Decrease the total local count. If we're still holding the lock,
- * we're done.
+ * Decrease the total local count. If we're still holding the lock, we're
+ * done.
*/
locallock->nLocks--;
@@ -1285,8 +1277,8 @@ LockRelease(LOCKMETHODID lockmethodid, LOCKTAG *locktag,
/*
* We don't need to re-find the lock or proclock, since we kept their
- * addresses in the locallock table, and they couldn't have been
- * removed while we were holding a lock on them.
+ * addresses in the locallock table, and they couldn't have been removed
+ * while we were holding a lock on them.
*/
lock = locallock->lock;
LOCK_PRINT("LockRelease: found", lock, lockmode);
@@ -1294,8 +1286,8 @@ LockRelease(LOCKMETHODID lockmethodid, LOCKTAG *locktag,
PROCLOCK_PRINT("LockRelease: found", proclock);
/*
- * Double-check that we are actually holding a lock of the type we
- * want to release.
+ * Double-check that we are actually holding a lock of the type we want to
+ * release.
*/
if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
{
@@ -1356,10 +1348,10 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
/*
* First we run through the locallock table and get rid of unwanted
- * entries, then we scan the process's proclocks and get rid of those.
- * We do this separately because we may have multiple locallock
- * entries pointing to the same proclock, and we daren't end up with
- * any dangling pointers.
+ * entries, then we scan the process's proclocks and get rid of those. We
+ * do this separately because we may have multiple locallock entries
+ * pointing to the same proclock, and we daren't end up with any dangling
+ * pointers.
*/
hash_seq_init(&status, LockMethodLocalHash[lockmethodid]);
@@ -1368,8 +1360,8 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
if (locallock->proclock == NULL || locallock->lock == NULL)
{
/*
- * We must've run out of shared memory while trying to set up
- * this lock. Just forget the local entry.
+ * We must've run out of shared memory while trying to set up this
+ * lock. Just forget the local entry.
*/
Assert(locallock->nLocks == 0);
RemoveLocalLock(locallock);
@@ -1381,9 +1373,9 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
continue;
/*
- * If we are asked to release all locks, we can just zap the
- * entry. Otherwise, must scan to see if there are session locks.
- * We assume there is at most one lockOwners entry for session locks.
+ * If we are asked to release all locks, we can just zap the entry.
+ * Otherwise, must scan to see if there are session locks. We assume
+ * there is at most one lockOwners entry for session locks.
*/
if (!allLocks)
{
@@ -1431,7 +1423,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
/* Get link first, since we may unlink/delete this proclock */
nextplock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->procLink,
- offsetof(PROCLOCK, procLink));
+ offsetof(PROCLOCK, procLink));
Assert(proclock->tag.proc == MAKE_OFFSET(MyProc));
@@ -1581,8 +1573,8 @@ LockReassignCurrentOwner(void)
continue;
/*
- * Scan to see if there are any locks belonging to current owner
- * or its parent
+ * Scan to see if there are any locks belonging to current owner or
+ * its parent
*/
lockOwners = locallock->lockOwners;
for (i = locallock->numLockOwners - 1; i >= 0; i--)
@@ -1644,7 +1636,7 @@ AtPrepare_Locks(void)
{
TwoPhaseLockRecord record;
LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
- int i;
+ int i;
/* Ignore items that are not of the lockmethod to be processed */
if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
@@ -1722,12 +1714,12 @@ PostPrepare_Locks(TransactionId xid)
/*
* First we run through the locallock table and get rid of unwanted
- * entries, then we scan the process's proclocks and transfer them
- * to the target proc.
+ * entries, then we scan the process's proclocks and transfer them to the
+ * target proc.
*
- * We do this separately because we may have multiple locallock
- * entries pointing to the same proclock, and we daren't end up with
- * any dangling pointers.
+ * We do this separately because we may have multiple locallock entries
+ * pointing to the same proclock, and we daren't end up with any dangling
+ * pointers.
*/
hash_seq_init(&status, LockMethodLocalHash[lockmethodid]);
@@ -1736,8 +1728,8 @@ PostPrepare_Locks(TransactionId xid)
if (locallock->proclock == NULL || locallock->lock == NULL)
{
/*
- * We must've run out of shared memory while trying to set up
- * this lock. Just forget the local entry.
+ * We must've run out of shared memory while trying to set up this
+ * lock. Just forget the local entry.
*/
Assert(locallock->nLocks == 0);
RemoveLocalLock(locallock);
@@ -1771,7 +1763,7 @@ PostPrepare_Locks(TransactionId xid)
/* Get link first, since we may unlink/delete this proclock */
nextplock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->procLink,
- offsetof(PROCLOCK, procLink));
+ offsetof(PROCLOCK, procLink));
Assert(proclock->tag.proc == MAKE_OFFSET(MyProc));
@@ -1797,13 +1789,13 @@ PostPrepare_Locks(TransactionId xid)
holdMask = proclock->holdMask;
/*
- * We cannot simply modify proclock->tag.proc to reassign ownership
- * of the lock, because that's part of the hash key and the proclock
+ * We cannot simply modify proclock->tag.proc to reassign ownership of
+ * the lock, because that's part of the hash key and the proclock
* would then be in the wrong hash chain. So, unlink and delete the
- * old proclock; create a new one with the right contents; and link
- * it into place. We do it in this order to be certain we won't
- * run out of shared memory (the way dynahash.c works, the deleted
- * object is certain to be available for reallocation).
+ * old proclock; create a new one with the right contents; and link it
+ * into place. We do it in this order to be certain we won't run out
+ * of shared memory (the way dynahash.c works, the deleted object is
+ * certain to be available for reallocation).
*/
SHMQueueDelete(&proclock->lockLink);
SHMQueueDelete(&proclock->procLink);
@@ -1823,7 +1815,7 @@ PostPrepare_Locks(TransactionId xid)
(void *) &proclocktag,
HASH_ENTER_NULL, &found);
if (!newproclock)
- ereport(PANIC, /* should not happen */
+ ereport(PANIC, /* should not happen */
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
errdetail("Not enough memory for reassigning the prepared transaction's locks.")));
@@ -1881,11 +1873,11 @@ LockShmemSize(void)
size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
/*
- * Note we count only one pair of hash tables, since the userlocks
- * table actually overlays the main one.
+ * Note we count only one pair of hash tables, since the userlocks table
+ * actually overlays the main one.
*
- * Since the lockHash entry count above is only an estimate, add 10%
- * safety margin.
+ * Since the lockHash entry count above is only an estimate, add 10% safety
+ * margin.
*/
size = add_size(size, size / 10);
@@ -2000,7 +1992,7 @@ DumpLocks(PGPROC *proc)
LOCK_PRINT("DumpLocks", lock, 0);
proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->procLink,
- offsetof(PROCLOCK, procLink));
+ offsetof(PROCLOCK, procLink));
}
}
@@ -2046,7 +2038,6 @@ DumpAllLocks(void)
elog(LOG, "DumpAllLocks: proclock->tag.lock = NULL");
}
}
-
#endif /* LOCK_DEBUG */
/*
@@ -2066,7 +2057,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
{
TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
PGPROC *proc = TwoPhaseGetDummyProc(xid);
- LOCKTAG *locktag;
+ LOCKTAG *locktag;
LOCKMODE lockmode;
LOCKMETHODID lockmethodid;
LOCK *lock;
@@ -2102,7 +2093,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You may need to increase max_locks_per_transaction.")));
+ errhint("You may need to increase max_locks_per_transaction.")));
}
/*
@@ -2131,7 +2122,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
/*
* Create the hash key for the proclock table.
*/
- MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding */
+ MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding */
proclocktag.lock = MAKE_OFFSET(lock);
proclocktag.proc = MAKE_OFFSET(proc);
@@ -2162,7 +2153,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You may need to increase max_locks_per_transaction.")));
+ errhint("You may need to increase max_locks_per_transaction.")));
}
/*
@@ -2185,8 +2176,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
/*
* lock->nRequested and lock->requested[] count the total number of
- * requests, whether granted or waiting, so increment those
- * immediately.
+ * requests, whether granted or waiting, so increment those immediately.
*/
lock->nRequested++;
lock->requested[lockmode]++;
@@ -2220,7 +2210,7 @@ lock_twophase_postcommit(TransactionId xid, uint16 info,
{
TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
PGPROC *proc = TwoPhaseGetDummyProc(xid);
- LOCKTAG *locktag;
+ LOCKTAG *locktag;
LOCKMODE lockmode;
LOCKMETHODID lockmethodid;
PROCLOCKTAG proclocktag;
@@ -2256,7 +2246,7 @@ lock_twophase_postcommit(TransactionId xid, uint16 info,
/*
* Re-find the proclock object (ditto).
*/
- MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding */
+ MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding */
proclocktag.lock = MAKE_OFFSET(lock);
proclocktag.proc = MAKE_OFFSET(proc);
proclock = (PROCLOCK *) hash_search(LockMethodProcLockHash[lockmethodid],
@@ -2266,8 +2256,8 @@ lock_twophase_postcommit(TransactionId xid, uint16 info,
elog(PANIC, "failed to re-find shared proclock object");
/*
- * Double-check that we are actually holding a lock of the type we
- * want to release.
+ * Double-check that we are actually holding a lock of the type we want to
+ * release.
*/
if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
{
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index ce0606a3c4..5526c77a67 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.33 2005/10/12 16:55:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.34 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -44,10 +44,10 @@ typedef struct LWLock
/*
* All the LWLock structs are allocated as an array in shared memory.
- * (LWLockIds are indexes into the array.) We force the array stride to
+ * (LWLockIds are indexes into the array.) We force the array stride to
* be a power of 2, which saves a few cycles in indexing, but more
* importantly also ensures that individual LWLocks don't cross cache line
- * boundaries. This reduces cache contention problems, especially on AMD
+ * boundaries. This reduces cache contention problems, especially on AMD
* Opterons. (Of course, we have to also ensure that the array start
* address is suitably aligned.)
*
@@ -101,7 +101,6 @@ LOG_LWDEBUG(const char *where, LWLockId lockid, const char *msg)
if (Trace_lwlocks)
elog(LOG, "%s(%d): %s", where, (int) lockid, msg);
}
-
#else /* not LOCK_DEBUG */
#define PRINT_LWDEBUG(a,b,c)
#define LOG_LWDEBUG(a,b,c)
@@ -117,10 +116,10 @@ NumLWLocks(void)
int numLocks;
/*
- * Possibly this logic should be spread out among the affected
- * modules, the same way that shmem space estimation is done. But for
- * now, there are few enough users of LWLocks that we can get away
- * with just keeping the knowledge here.
+ * Possibly this logic should be spread out among the affected modules,
+ * the same way that shmem space estimation is done. But for now, there
+ * are few enough users of LWLocks that we can get away with just keeping
+ * the knowledge here.
*/
/* Predefined LWLocks */
@@ -136,8 +135,8 @@ NumLWLocks(void)
numLocks += NUM_SLRU_BUFFERS;
/*
- * multixact.c needs one per MultiXact buffer, but there are
- * two SLRU areas for MultiXact
+ * multixact.c needs one per MultiXact buffer, but there are two SLRU
+ * areas for MultiXact
*/
numLocks += 2 * NUM_SLRU_BUFFERS;
@@ -226,6 +225,7 @@ LWLockId
LWLockAssign(void)
{
LWLockId result;
+
/* use volatile pointer to prevent code rearrangement */
volatile int *LWLockCounter;
@@ -261,8 +261,8 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
/*
* We can't wait if we haven't got a PGPROC. This should only occur
- * during bootstrap or shared memory initialization. Put an Assert
- * here to catch unsafe coding practices.
+ * during bootstrap or shared memory initialization. Put an Assert here
+ * to catch unsafe coding practices.
*/
Assert(!(proc == NULL && IsUnderPostmaster));
@@ -271,9 +271,9 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
elog(ERROR, "too many LWLocks taken");
/*
- * Lock out cancel/die interrupts until we exit the code section
- * protected by the LWLock. This ensures that interrupts will not
- * interfere with manipulations of data structures in shared memory.
+ * Lock out cancel/die interrupts until we exit the code section protected
+ * by the LWLock. This ensures that interrupts will not interfere with
+ * manipulations of data structures in shared memory.
*/
HOLD_INTERRUPTS();
@@ -282,17 +282,16 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
* LWLockRelease.
*
* NOTE: it might seem better to have LWLockRelease actually grant us the
- * lock, rather than retrying and possibly having to go back to sleep.
- * But in practice that is no good because it means a process swap for
- * every lock acquisition when two or more processes are contending
- * for the same lock. Since LWLocks are normally used to protect
- * not-very-long sections of computation, a process needs to be able
- * to acquire and release the same lock many times during a single CPU
- * time slice, even in the presence of contention. The efficiency of
- * being able to do that outweighs the inefficiency of sometimes
- * wasting a process dispatch cycle because the lock is not free when
- * a released waiter finally gets to run. See pgsql-hackers archives
- * for 29-Dec-01.
+ * lock, rather than retrying and possibly having to go back to sleep. But
+ * in practice that is no good because it means a process swap for every
+ * lock acquisition when two or more processes are contending for the same
+ * lock. Since LWLocks are normally used to protect not-very-long
+ * sections of computation, a process needs to be able to acquire and
+ * release the same lock many times during a single CPU time slice, even
+ * in the presence of contention. The efficiency of being able to do that
+ * outweighs the inefficiency of sometimes wasting a process dispatch
+ * cycle because the lock is not free when a released waiter finally gets
+ * to run. See pgsql-hackers archives for 29-Dec-01.
*/
for (;;)
{
@@ -334,8 +333,8 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
* Add myself to wait queue.
*
* If we don't have a PGPROC structure, there's no way to wait. This
- * should never occur, since MyProc should only be null during
- * shared memory initialization.
+ * should never occur, since MyProc should only be null during shared
+ * memory initialization.
*/
if (proc == NULL)
elog(FATAL, "cannot wait without a PGPROC structure");
@@ -356,13 +355,13 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
* Wait until awakened.
*
* Since we share the process wait semaphore with the regular lock
- * manager and ProcWaitForSignal, and we may need to acquire an
- * LWLock while one of those is pending, it is possible that we
- * get awakened for a reason other than being signaled by
- * LWLockRelease. If so, loop back and wait again. Once we've
- * gotten the LWLock, re-increment the sema by the number of
- * additional signals received, so that the lock manager or signal
- * manager will see the received signal when it next waits.
+ * manager and ProcWaitForSignal, and we may need to acquire an LWLock
+ * while one of those is pending, it is possible that we get awakened
+ * for a reason other than being signaled by LWLockRelease. If so,
+ * loop back and wait again. Once we've gotten the LWLock,
+ * re-increment the sema by the number of additional signals received,
+ * so that the lock manager or signal manager will see the received
+ * signal when it next waits.
*/
LOG_LWDEBUG("LWLockAcquire", lockid, "waiting");
@@ -414,9 +413,9 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
elog(ERROR, "too many LWLocks taken");
/*
- * Lock out cancel/die interrupts until we exit the code section
- * protected by the LWLock. This ensures that interrupts will not
- * interfere with manipulations of data structures in shared memory.
+ * Lock out cancel/die interrupts until we exit the code section protected
+ * by the LWLock. This ensures that interrupts will not interfere with
+ * manipulations of data structures in shared memory.
*/
HOLD_INTERRUPTS();
@@ -477,8 +476,8 @@ LWLockRelease(LWLockId lockid)
PRINT_LWDEBUG("LWLockRelease", lockid, lock);
/*
- * Remove lock from list of locks held. Usually, but not always, it
- * will be the latest-acquired lock; so search array backwards.
+ * Remove lock from list of locks held. Usually, but not always, it will
+ * be the latest-acquired lock; so search array backwards.
*/
for (i = num_held_lwlocks; --i >= 0;)
{
@@ -504,10 +503,10 @@ LWLockRelease(LWLockId lockid)
}
/*
- * See if I need to awaken any waiters. If I released a non-last
- * shared hold, there cannot be anything to do. Also, do not awaken
- * any waiters if someone has already awakened waiters that haven't
- * yet acquired the lock.
+ * See if I need to awaken any waiters. If I released a non-last shared
+ * hold, there cannot be anything to do. Also, do not awaken any waiters
+ * if someone has already awakened waiters that haven't yet acquired the
+ * lock.
*/
head = lock->head;
if (head != NULL)
@@ -515,9 +514,9 @@ LWLockRelease(LWLockId lockid)
if (lock->exclusive == 0 && lock->shared == 0 && lock->releaseOK)
{
/*
- * Remove the to-be-awakened PGPROCs from the queue. If the
- * front waiter wants exclusive lock, awaken him only.
- * Otherwise awaken as many waiters as want shared access.
+ * Remove the to-be-awakened PGPROCs from the queue. If the front
+ * waiter wants exclusive lock, awaken him only. Otherwise awaken
+ * as many waiters as want shared access.
*/
proc = head;
if (!proc->lwExclusive)
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index 6005cb7ee5..1c26a5934b 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.166 2005/10/13 06:24:05 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.167 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -166,8 +166,7 @@ InitProcGlobal(void)
ProcGlobal->spins_per_delay = DEFAULT_SPINS_PER_DELAY;
/*
- * Pre-create the PGPROC structures and create a semaphore for
- * each.
+ * Pre-create the PGPROC structures and create a semaphore for each.
*/
procs = (PGPROC *) ShmemAlloc(MaxBackends * sizeof(PGPROC));
if (!procs)
@@ -207,8 +206,8 @@ InitProcess(void)
volatile PROC_HDR *procglobal = ProcGlobal;
/*
- * ProcGlobal should be set by a previous call to InitProcGlobal (if
- * we are a backend, we inherit this by fork() from the postmaster).
+ * ProcGlobal should be set by a previous call to InitProcGlobal (if we
+ * are a backend, we inherit this by fork() from the postmaster).
*/
if (procglobal == NULL)
elog(PANIC, "proc header uninitialized");
@@ -217,11 +216,11 @@ InitProcess(void)
elog(ERROR, "you already exist");
/*
- * Try to get a proc struct from the free list. If this fails, we
- * must be out of PGPROC structures (not to mention semaphores).
+ * Try to get a proc struct from the free list. If this fails, we must be
+ * out of PGPROC structures (not to mention semaphores).
*
- * While we are holding the ProcStructLock, also copy the current
- * shared estimate of spins_per_delay to local storage.
+ * While we are holding the ProcStructLock, also copy the current shared
+ * estimate of spins_per_delay to local storage.
*/
SpinLockAcquire(ProcStructLock);
@@ -238,9 +237,9 @@ InitProcess(void)
else
{
/*
- * If we reach here, all the PGPROCs are in use. This is one of
- * the possible places to detect "too many backends", so give the
- * standard error message.
+ * If we reach here, all the PGPROCs are in use. This is one of the
+ * possible places to detect "too many backends", so give the standard
+ * error message.
*/
SpinLockRelease(ProcStructLock);
ereport(FATAL,
@@ -278,14 +277,14 @@ InitProcess(void)
on_shmem_exit(ProcKill, 0);
/*
- * We might be reusing a semaphore that belonged to a failed process.
- * So be careful and reinitialize its value here.
+ * We might be reusing a semaphore that belonged to a failed process. So
+ * be careful and reinitialize its value here.
*/
PGSemaphoreReset(&MyProc->sem);
/*
- * Now that we have a PGPROC, we could try to acquire locks, so
- * initialize the deadlock checker.
+ * Now that we have a PGPROC, we could try to acquire locks, so initialize
+ * the deadlock checker.
*/
InitDeadLockChecking();
}
@@ -322,8 +321,8 @@ InitDummyProcess(int proctype)
* Just for paranoia's sake, we use the ProcStructLock to protect
* assignment and releasing of DummyProcs entries.
*
- * While we are holding the ProcStructLock, also copy the current
- * shared estimate of spins_per_delay to local storage.
+ * While we are holding the ProcStructLock, also copy the current shared
+ * estimate of spins_per_delay to local storage.
*/
SpinLockAcquire(ProcStructLock);
@@ -347,8 +346,8 @@ InitDummyProcess(int proctype)
SpinLockRelease(ProcStructLock);
/*
- * Initialize all fields of MyProc, except MyProc->sem which was set
- * up by InitProcGlobal.
+ * Initialize all fields of MyProc, except MyProc->sem which was set up by
+ * InitProcGlobal.
*/
SHMQueueElemInit(&(MyProc->links));
MyProc->waitStatus = STATUS_OK;
@@ -369,8 +368,8 @@ InitDummyProcess(int proctype)
on_shmem_exit(DummyProcKill, Int32GetDatum(proctype));
/*
- * We might be reusing a semaphore that belonged to a failed process.
- * So be careful and reinitialize its value here.
+ * We might be reusing a semaphore that belonged to a failed process. So
+ * be careful and reinitialize its value here.
*/
PGSemaphoreReset(&MyProc->sem);
}
@@ -385,6 +384,7 @@ HaveNFreeProcs(int n)
{
SHMEM_OFFSET offset;
PGPROC *proc;
+
/* use volatile pointer to prevent code rearrangement */
volatile PROC_HDR *procglobal = ProcGlobal;
@@ -436,9 +436,9 @@ LockWaitCancel(void)
{
/*
* Somebody kicked us off the lock queue already. Perhaps they
- * granted us the lock, or perhaps they detected a deadlock. If
- * they did grant us the lock, we'd better remember it in our
- * local lock table.
+ * granted us the lock, or perhaps they detected a deadlock. If they
+ * did grant us the lock, we'd better remember it in our local lock
+ * table.
*/
if (MyProc->waitStatus == STATUS_OK)
GrantAwaitedLock();
@@ -451,17 +451,17 @@ LockWaitCancel(void)
/*
* Reset the proc wait semaphore to zero. This is necessary in the
* scenario where someone else granted us the lock we wanted before we
- * were able to remove ourselves from the wait-list. The semaphore
- * will have been bumped to 1 by the would-be grantor, and since we
- * are no longer going to wait on the sema, we have to force it back
- * to zero. Otherwise, our next attempt to wait for a lock will fall
- * through prematurely.
+ * were able to remove ourselves from the wait-list. The semaphore will
+ * have been bumped to 1 by the would-be grantor, and since we are no
+ * longer going to wait on the sema, we have to force it back to zero.
+ * Otherwise, our next attempt to wait for a lock will fall through
+ * prematurely.
*/
PGSemaphoreReset(&MyProc->sem);
/*
- * Return true even if we were kicked off the lock before we were able
- * to remove ourselves.
+ * Return true even if we were kicked off the lock before we were able to
+ * remove ourselves.
*/
return true;
}
@@ -508,8 +508,8 @@ ProcKill(int code, Datum arg)
Assert(MyProc != NULL);
/*
- * Release any LW locks I am holding. There really shouldn't be any,
- * but it's cheap to check again before we cut the knees off the LWLock
+ * Release any LW locks I am holding. There really shouldn't be any, but
+ * it's cheap to check again before we cut the knees off the LWLock
* facility by releasing our PGPROC ...
*/
LWLockReleaseAll();
@@ -640,20 +640,19 @@ ProcSleep(LockMethod lockMethodTable,
/*
* Determine where to add myself in the wait queue.
*
- * Normally I should go at the end of the queue. However, if I already
- * hold locks that conflict with the request of any previous waiter,
- * put myself in the queue just in front of the first such waiter.
- * This is not a necessary step, since deadlock detection would move
- * me to before that waiter anyway; but it's relatively cheap to
- * detect such a conflict immediately, and avoid delaying till
- * deadlock timeout.
+ * Normally I should go at the end of the queue. However, if I already hold
+ * locks that conflict with the request of any previous waiter, put myself
+ * in the queue just in front of the first such waiter. This is not a
+ * necessary step, since deadlock detection would move me to before that
+ * waiter anyway; but it's relatively cheap to detect such a conflict
+ * immediately, and avoid delaying till deadlock timeout.
*
- * Special case: if I find I should go in front of some waiter, check to
- * see if I conflict with already-held locks or the requests before
- * that waiter. If not, then just grant myself the requested lock
- * immediately. This is the same as the test for immediate grant in
- * LockAcquire, except we are only considering the part of the wait
- * queue before my insertion point.
+ * Special case: if I find I should go in front of some waiter, check to see
+ * if I conflict with already-held locks or the requests before that
+ * waiter. If not, then just grant myself the requested lock immediately.
+ * This is the same as the test for immediate grant in LockAcquire, except
+ * we are only considering the part of the wait queue before my insertion
+ * point.
*/
if (myHeldLocks != 0)
{
@@ -669,12 +668,11 @@ ProcSleep(LockMethod lockMethodTable,
if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
{
/*
- * Yes, so we have a deadlock. Easiest way to clean
- * up correctly is to call RemoveFromWaitQueue(), but
- * we can't do that until we are *on* the wait queue.
- * So, set a flag to check below, and break out of
- * loop. Also, record deadlock info for later
- * message.
+ * Yes, so we have a deadlock. Easiest way to clean up
+ * correctly is to call RemoveFromWaitQueue(), but we
+ * can't do that until we are *on* the wait queue. So, set
+ * a flag to check below, and break out of loop. Also,
+ * record deadlock info for later message.
*/
RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
early_deadlock = true;
@@ -702,8 +700,8 @@ ProcSleep(LockMethod lockMethodTable,
}
/*
- * If we fall out of loop normally, proc points to waitQueue head,
- * so we will insert at tail of queue as desired.
+ * If we fall out of loop normally, proc points to waitQueue head, so
+ * we will insert at tail of queue as desired.
*/
}
else
@@ -713,8 +711,7 @@ ProcSleep(LockMethod lockMethodTable,
}
/*
- * Insert self into queue, ahead of the given proc (or at tail of
- * queue).
+ * Insert self into queue, ahead of the given proc (or at tail of queue).
*/
SHMQueueInsertBefore(&(proc->links), &(MyProc->links));
waitQueue->size++;
@@ -729,9 +726,9 @@ ProcSleep(LockMethod lockMethodTable,
MyProc->waitStatus = STATUS_ERROR; /* initialize result for error */
/*
- * If we detected deadlock, give up without waiting. This must agree
- * with CheckDeadLock's recovery code, except that we shouldn't
- * release the semaphore since we haven't tried to lock it yet.
+ * If we detected deadlock, give up without waiting. This must agree with
+ * CheckDeadLock's recovery code, except that we shouldn't release the
+ * semaphore since we haven't tried to lock it yet.
*/
if (early_deadlock)
{
@@ -746,39 +743,38 @@ ProcSleep(LockMethod lockMethodTable,
* Release the locktable's masterLock.
*
* NOTE: this may also cause us to exit critical-section state, possibly
- * allowing a cancel/die interrupt to be accepted. This is OK because
- * we have recorded the fact that we are waiting for a lock, and so
+ * allowing a cancel/die interrupt to be accepted. This is OK because we
+ * have recorded the fact that we are waiting for a lock, and so
* LockWaitCancel will clean up if cancel/die happens.
*/
LWLockRelease(masterLock);
/*
- * Set timer so we can wake up after awhile and check for a deadlock.
- * If a deadlock is detected, the handler releases the process's
- * semaphore and sets MyProc->waitStatus = STATUS_ERROR, allowing us
- * to know that we must report failure rather than success.
+ * Set timer so we can wake up after awhile and check for a deadlock. If a
+ * deadlock is detected, the handler releases the process's semaphore and
+ * sets MyProc->waitStatus = STATUS_ERROR, allowing us to know that we
+ * must report failure rather than success.
*
- * By delaying the check until we've waited for a bit, we can avoid
- * running the rather expensive deadlock-check code in most cases.
+ * By delaying the check until we've waited for a bit, we can avoid running
+ * the rather expensive deadlock-check code in most cases.
*/
if (!enable_sig_alarm(DeadlockTimeout, false))
elog(FATAL, "could not set timer for process wakeup");
/*
* If someone wakes us between LWLockRelease and PGSemaphoreLock,
- * PGSemaphoreLock will not block. The wakeup is "saved" by the
- * semaphore implementation. Note also that if CheckDeadLock is
- * invoked but does not detect a deadlock, PGSemaphoreLock() will
- * continue to wait. There used to be a loop here, but it was useless
- * code...
+ * PGSemaphoreLock will not block. The wakeup is "saved" by the semaphore
+ * implementation. Note also that if CheckDeadLock is invoked but does
+ * not detect a deadlock, PGSemaphoreLock() will continue to wait. There
+ * used to be a loop here, but it was useless code...
*
- * We pass interruptOK = true, which eliminates a window in which
- * cancel/die interrupts would be held off undesirably. This is a
- * promise that we don't mind losing control to a cancel/die interrupt
- * here. We don't, because we have no shared-state-change work to do
- * after being granted the lock (the grantor did it all). We do have
- * to worry about updating the locallock table, but if we lose control
- * to an error, LockWaitCancel will fix that up.
+ * We pass interruptOK = true, which eliminates a window in which cancel/die
+ * interrupts would be held off undesirably. This is a promise that we
+ * don't mind losing control to a cancel/die interrupt here. We don't,
+ * because we have no shared-state-change work to do after being granted
+ * the lock (the grantor did it all). We do have to worry about updating
+ * the locallock table, but if we lose control to an error, LockWaitCancel
+ * will fix that up.
*/
PGSemaphoreLock(&MyProc->sem, true);
@@ -789,9 +785,9 @@ ProcSleep(LockMethod lockMethodTable,
elog(FATAL, "could not disable timer for process wakeup");
/*
- * Re-acquire the locktable's masterLock. We have to do this to hold
- * off cancel/die interrupts before we can mess with waitingForLock
- * (else we might have a missed or duplicated locallock update).
+ * Re-acquire the locktable's masterLock. We have to do this to hold off
+ * cancel/die interrupts before we can mess with waitingForLock (else we
+ * might have a missed or duplicated locallock update).
*/
LWLockAcquire(masterLock, LW_EXCLUSIVE);
@@ -879,8 +875,8 @@ ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
LOCKMODE lockmode = proc->waitLockMode;
/*
- * Waken if (a) doesn't conflict with requests of earlier waiters,
- * and (b) doesn't conflict with already-held locks.
+ * Waken if (a) doesn't conflict with requests of earlier waiters, and
+ * (b) doesn't conflict with already-held locks.
*/
if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
LockCheckConflicts(lockMethodTable,
@@ -894,16 +890,15 @@ ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
proc = ProcWakeup(proc, STATUS_OK);
/*
- * ProcWakeup removes proc from the lock's waiting process
- * queue and returns the next proc in chain; don't use proc's
- * next-link, because it's been cleared.
+ * ProcWakeup removes proc from the lock's waiting process queue
+ * and returns the next proc in chain; don't use proc's next-link,
+ * because it's been cleared.
*/
}
else
{
/*
- * Cannot wake this guy. Remember his request for later
- * checks.
+ * Cannot wake this guy. Remember his request for later checks.
*/
aheadRequests |= LOCKBIT_ON(lockmode);
proc = (PGPROC *) MAKE_PTR(proc->links.next);
@@ -928,22 +923,21 @@ CheckDeadLock(void)
* Acquire locktable lock. Note that the deadlock check interrupt had
* better not be enabled anywhere that this process itself holds the
* locktable lock, else this will wait forever. Also note that
- * LWLockAcquire creates a critical section, so that this routine
- * cannot be interrupted by cancel/die interrupts.
+ * LWLockAcquire creates a critical section, so that this routine cannot
+ * be interrupted by cancel/die interrupts.
*/
LWLockAcquire(LockMgrLock, LW_EXCLUSIVE);
/*
* Check to see if we've been awoken by anyone in the interim.
*
- * If we have we can return and resume our transaction -- happy day.
- * Before we are awoken the process releasing the lock grants it to us
- * so we know that we don't have to wait anymore.
+ * If we have we can return and resume our transaction -- happy day. Before
+ * we are awoken the process releasing the lock grants it to us so we know
+ * that we don't have to wait anymore.
*
* We check by looking to see if we've been unlinked from the wait queue.
- * This is quicker than checking our semaphore's state, since no
- * kernel call is needed, and it is safe because we hold the locktable
- * lock.
+ * This is quicker than checking our semaphore's state, since no kernel
+ * call is needed, and it is safe because we hold the locktable lock.
*/
if (MyProc->links.prev == INVALID_OFFSET ||
MyProc->links.next == INVALID_OFFSET)
@@ -972,8 +966,8 @@ CheckDeadLock(void)
RemoveFromWaitQueue(MyProc);
/*
- * Set MyProc->waitStatus to STATUS_ERROR so that ProcSleep will
- * report an error after we return from the signal handler.
+ * Set MyProc->waitStatus to STATUS_ERROR so that ProcSleep will report an
+ * error after we return from the signal handler.
*/
MyProc->waitStatus = STATUS_ERROR;
@@ -984,14 +978,14 @@ CheckDeadLock(void)
PGSemaphoreUnlock(&MyProc->sem);
/*
- * We're done here. Transaction abort caused by the error that
- * ProcSleep will raise will cause any other locks we hold to be
- * released, thus allowing other processes to wake up; we don't need
- * to do that here. NOTE: an exception is that releasing locks we hold
- * doesn't consider the possibility of waiters that were blocked
- * behind us on the lock we just failed to get, and might now be
- * wakable because we're not in front of them anymore. However,
- * RemoveFromWaitQueue took care of waking up any such processes.
+ * We're done here. Transaction abort caused by the error that ProcSleep
+ * will raise will cause any other locks we hold to be released, thus
+ * allowing other processes to wake up; we don't need to do that here.
+ * NOTE: an exception is that releasing locks we hold doesn't consider the
+ * possibility of waiters that were blocked behind us on the lock we just
+ * failed to get, and might now be wakable because we're not in front of
+ * them anymore. However, RemoveFromWaitQueue took care of waking up any
+ * such processes.
*/
LWLockRelease(LockMgrLock);
}
@@ -1061,7 +1055,6 @@ enable_sig_alarm(int delayms, bool is_statement_timeout)
#ifndef __BEOS__
struct itimerval timeval;
-
#else
bigtime_t time_interval;
#endif
@@ -1092,16 +1085,16 @@ enable_sig_alarm(int delayms, bool is_statement_timeout)
/*
* Begin deadlock timeout with statement-level timeout active
*
- * Here, we want to interrupt at the closer of the two timeout times.
- * If fin_time >= statement_fin_time then we need not touch the
- * existing timer setting; else set up to interrupt at the
- * deadlock timeout time.
+ * Here, we want to interrupt at the closer of the two timeout times. If
+ * fin_time >= statement_fin_time then we need not touch the existing
+ * timer setting; else set up to interrupt at the deadlock timeout
+ * time.
*
* NOTE: in this case it is possible that this routine will be
* interrupted by the previously-set timer alarm. This is okay
- * because the signal handler will do only what it should do
- * according to the state variables. The deadlock checker may get
- * run earlier than normal, but that does no harm.
+ * because the signal handler will do only what it should do according
+ * to the state variables. The deadlock checker may get run earlier
+ * than normal, but that does no harm.
*/
deadlock_timeout_active = true;
if (fin_time.tv_sec > statement_fin_time.tv_sec ||
diff --git a/src/backend/storage/lmgr/s_lock.c b/src/backend/storage/lmgr/s_lock.c
index 1fb069d4f3..f1c92d70da 100644
--- a/src/backend/storage/lmgr/s_lock.c
+++ b/src/backend/storage/lmgr/s_lock.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/s_lock.c,v 1.39 2005/10/11 20:41:32 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/s_lock.c,v 1.40 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -50,47 +50,45 @@ void
s_lock(volatile slock_t *lock, const char *file, int line)
{
/*
- * We loop tightly for awhile, then delay using pg_usleep() and try
- * again. Preferably, "awhile" should be a small multiple of the
- * maximum time we expect a spinlock to be held. 100 iterations seems
- * about right as an initial guess. However, on a uniprocessor the
- * loop is a waste of cycles, while in a multi-CPU scenario it's usually
- * better to spin a bit longer than to call the kernel, so we try to
- * adapt the spin loop count depending on whether we seem to be in
- * a uniprocessor or multiprocessor.
+ * We loop tightly for awhile, then delay using pg_usleep() and try again.
+ * Preferably, "awhile" should be a small multiple of the maximum time we
+ * expect a spinlock to be held. 100 iterations seems about right as an
+ * initial guess. However, on a uniprocessor the loop is a waste of
+ * cycles, while in a multi-CPU scenario it's usually better to spin a bit
+ * longer than to call the kernel, so we try to adapt the spin loop count
+ * depending on whether we seem to be in a uniprocessor or multiprocessor.
*
- * Note: you might think MIN_SPINS_PER_DELAY should be just 1, but you'd
- * be wrong; there are platforms where that can result in a "stuck
- * spinlock" failure. This has been seen particularly on Alphas; it
- * seems that the first TAS after returning from kernel space will always
- * fail on that hardware.
+ * Note: you might think MIN_SPINS_PER_DELAY should be just 1, but you'd be
+ * wrong; there are platforms where that can result in a "stuck spinlock"
+ * failure. This has been seen particularly on Alphas; it seems that the
+ * first TAS after returning from kernel space will always fail on that
+ * hardware.
*
- * Once we do decide to block, we use randomly increasing pg_usleep()
- * delays. The first delay is 1 msec, then the delay randomly
- * increases to about one second, after which we reset to 1 msec and
- * start again. The idea here is that in the presence of heavy
- * contention we need to increase the delay, else the spinlock holder
- * may never get to run and release the lock. (Consider situation
- * where spinlock holder has been nice'd down in priority by the
- * scheduler --- it will not get scheduled until all would-be
- * acquirers are sleeping, so if we always use a 1-msec sleep, there
- * is a real possibility of starvation.) But we can't just clamp the
- * delay to an upper bound, else it would take a long time to make a
- * reasonable number of tries.
+ * Once we do decide to block, we use randomly increasing pg_usleep() delays.
+ * The first delay is 1 msec, then the delay randomly increases to about
+ * one second, after which we reset to 1 msec and start again. The idea
+ * here is that in the presence of heavy contention we need to increase
+ * the delay, else the spinlock holder may never get to run and release
+ * the lock. (Consider situation where spinlock holder has been nice'd
+ * down in priority by the scheduler --- it will not get scheduled until
+ * all would-be acquirers are sleeping, so if we always use a 1-msec
+ * sleep, there is a real possibility of starvation.) But we can't just
+ * clamp the delay to an upper bound, else it would take a long time to
+ * make a reasonable number of tries.
*
- * We time out and declare error after NUM_DELAYS delays (thus, exactly
- * that many tries). With the given settings, this will usually take
- * 2 or so minutes. It seems better to fix the total number of tries
- * (and thus the probability of unintended failure) than to fix the
- * total time spent.
+ * We time out and declare error after NUM_DELAYS delays (thus, exactly that
+ * many tries). With the given settings, this will usually take 2 or so
+ * minutes. It seems better to fix the total number of tries (and thus
+ * the probability of unintended failure) than to fix the total time
+ * spent.
*
- * The pg_usleep() delays are measured in milliseconds because 1 msec
- * is a common resolution limit at the OS level for newer platforms.
- * On older platforms the resolution limit is usually 10 msec, in
- * which case the total delay before timeout will be a bit more.
+ * The pg_usleep() delays are measured in milliseconds because 1 msec is a
+ * common resolution limit at the OS level for newer platforms. On older
+ * platforms the resolution limit is usually 10 msec, in which case the
+ * total delay before timeout will be a bit more.
*/
-#define MIN_SPINS_PER_DELAY 10
-#define MAX_SPINS_PER_DELAY 1000
+#define MIN_SPINS_PER_DELAY 10
+#define MAX_SPINS_PER_DELAY 1000
#define NUM_DELAYS 1000
#define MIN_DELAY_MSEC 1
#define MAX_DELAY_MSEC 1000
@@ -110,7 +108,7 @@ s_lock(volatile slock_t *lock, const char *file, int line)
if (++delays > NUM_DELAYS)
s_lock_stuck(lock, file, line);
- if (cur_delay == 0) /* first time to delay? */
+ if (cur_delay == 0) /* first time to delay? */
cur_delay = MIN_DELAY_MSEC;
pg_usleep(cur_delay * 1000L);
@@ -122,7 +120,7 @@ s_lock(volatile slock_t *lock, const char *file, int line)
/* increase delay by a random fraction between 1X and 2X */
cur_delay += (int) (cur_delay *
- (((double) random()) / ((double) MAX_RANDOM_VALUE)) + 0.5);
+ (((double) random()) / ((double) MAX_RANDOM_VALUE)) + 0.5);
/* wrap back to minimum delay when max is exceeded */
if (cur_delay > MAX_DELAY_MSEC)
cur_delay = MIN_DELAY_MSEC;
@@ -133,18 +131,18 @@ s_lock(volatile slock_t *lock, const char *file, int line)
/*
* If we were able to acquire the lock without delaying, it's a good
- * indication we are in a multiprocessor. If we had to delay, it's
- * a sign (but not a sure thing) that we are in a uniprocessor.
- * Hence, we decrement spins_per_delay slowly when we had to delay,
- * and increase it rapidly when we didn't. It's expected that
- * spins_per_delay will converge to the minimum value on a uniprocessor
- * and to the maximum value on a multiprocessor.
+ * indication we are in a multiprocessor. If we had to delay, it's a sign
+ * (but not a sure thing) that we are in a uniprocessor. Hence, we
+ * decrement spins_per_delay slowly when we had to delay, and increase it
+ * rapidly when we didn't. It's expected that spins_per_delay will
+ * converge to the minimum value on a uniprocessor and to the maximum
+ * value on a multiprocessor.
*
- * Note: spins_per_delay is local within our current process.
- * We want to average these observations across multiple backends,
- * since it's relatively rare for this function to even get entered,
- * and so a single backend might not live long enough to converge on
- * a good value. That is handled by the two routines below.
+ * Note: spins_per_delay is local within our current process. We want to
+ * average these observations across multiple backends, since it's
+ * relatively rare for this function to even get entered, and so a single
+ * backend might not live long enough to converge on a good value. That
+ * is handled by the two routines below.
*/
if (cur_delay == 0)
{
@@ -180,15 +178,14 @@ int
update_spins_per_delay(int shared_spins_per_delay)
{
/*
- * We use an exponential moving average with a relatively slow
- * adaption rate, so that noise in any one backend's result won't
- * affect the shared value too much. As long as both inputs are
- * within the allowed range, the result must be too, so we need not
- * worry about clamping the result.
+ * We use an exponential moving average with a relatively slow adaption
+ * rate, so that noise in any one backend's result won't affect the shared
+ * value too much. As long as both inputs are within the allowed range,
+ * the result must be too, so we need not worry about clamping the result.
*
- * We deliberately truncate rather than rounding; this is so that
- * single adjustments inside a backend can affect the shared estimate
- * (see the asymmetric adjustment rules above).
+ * We deliberately truncate rather than rounding; this is so that single
+ * adjustments inside a backend can affect the shared estimate (see the
+ * asymmetric adjustment rules above).
*/
return (shared_spins_per_delay * 15 + spins_per_delay) / 16;
}
@@ -227,7 +224,7 @@ tas_dummy()
__asm__ __volatile__(
#if defined(__NetBSD__) && defined(__ELF__)
/* no underscore for label and % for registers */
- "\
+ "\
.global tas \n\
tas: \n\
movel %sp@(0x4),%a0 \n\
@@ -239,7 +236,7 @@ _success: \n\
moveq #0,%d0 \n\
rts \n"
#else
- "\
+ "\
.global _tas \n\
_tas: \n\
movel sp@(0x4),a0 \n\
@@ -251,11 +248,10 @@ _success: \n\
moveq #0,d0 \n\
rts \n"
#endif /* __NetBSD__ && __ELF__ */
-);
+ );
}
#endif /* __m68k__ && !__linux__ */
-
#else /* not __GNUC__ */
/*
diff --git a/src/backend/storage/lmgr/spin.c b/src/backend/storage/lmgr/spin.c
index 4e7e47afcd..dfec2a7769 100644
--- a/src/backend/storage/lmgr/spin.c
+++ b/src/backend/storage/lmgr/spin.c
@@ -16,7 +16,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/spin.c,v 1.16 2004/12/31 22:01:05 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/spin.c,v 1.17 2005/10/15 02:49:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,7 +37,6 @@ SpinlockSemas(void)
{
return 0;
}
-
#else /* !HAVE_SPINLOCKS */
/*
@@ -52,11 +51,11 @@ int
SpinlockSemas(void)
{
/*
- * It would be cleaner to distribute this logic into the affected
- * modules, similar to the way shmem space estimation is handled.
+ * It would be cleaner to distribute this logic into the affected modules,
+ * similar to the way shmem space estimation is handled.
*
- * For now, though, we just need a few spinlocks (10 should be plenty)
- * plus one for each LWLock.
+ * For now, though, we just need a few spinlocks (10 should be plenty) plus
+ * one for each LWLock.
*/
return NumLWLocks() + 10;
}