diff options
| author | Bruce Momjian <bruce@momjian.us> | 2005-10-15 02:49:52 +0000 |
|---|---|---|
| committer | Bruce Momjian <bruce@momjian.us> | 2005-10-15 02:49:52 +0000 |
| commit | 1dc34982511d91ef8a2b71bdcb870f067c1b3da9 (patch) | |
| tree | 1046adab1d4b964e0c38afeec0ee6546f61d9a8a /src/backend/storage/lmgr/s_lock.c | |
| parent | 790c01d28099587bbe2c623d4389b62ee49b1dee (diff) | |
| download | postgresql-1dc34982511d91ef8a2b71bdcb870f067c1b3da9.tar.gz | |
Standard pgindent run for 8.1.
Diffstat (limited to 'src/backend/storage/lmgr/s_lock.c')
| -rw-r--r-- | src/backend/storage/lmgr/s_lock.c | 120 |
1 files changed, 58 insertions, 62 deletions
diff --git a/src/backend/storage/lmgr/s_lock.c b/src/backend/storage/lmgr/s_lock.c index 1fb069d4f3..f1c92d70da 100644 --- a/src/backend/storage/lmgr/s_lock.c +++ b/src/backend/storage/lmgr/s_lock.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/storage/lmgr/s_lock.c,v 1.39 2005/10/11 20:41:32 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/storage/lmgr/s_lock.c,v 1.40 2005/10/15 02:49:26 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -50,47 +50,45 @@ void s_lock(volatile slock_t *lock, const char *file, int line) { /* - * We loop tightly for awhile, then delay using pg_usleep() and try - * again. Preferably, "awhile" should be a small multiple of the - * maximum time we expect a spinlock to be held. 100 iterations seems - * about right as an initial guess. However, on a uniprocessor the - * loop is a waste of cycles, while in a multi-CPU scenario it's usually - * better to spin a bit longer than to call the kernel, so we try to - * adapt the spin loop count depending on whether we seem to be in - * a uniprocessor or multiprocessor. + * We loop tightly for awhile, then delay using pg_usleep() and try again. + * Preferably, "awhile" should be a small multiple of the maximum time we + * expect a spinlock to be held. 100 iterations seems about right as an + * initial guess. However, on a uniprocessor the loop is a waste of + * cycles, while in a multi-CPU scenario it's usually better to spin a bit + * longer than to call the kernel, so we try to adapt the spin loop count + * depending on whether we seem to be in a uniprocessor or multiprocessor. * - * Note: you might think MIN_SPINS_PER_DELAY should be just 1, but you'd - * be wrong; there are platforms where that can result in a "stuck - * spinlock" failure. This has been seen particularly on Alphas; it - * seems that the first TAS after returning from kernel space will always - * fail on that hardware. + * Note: you might think MIN_SPINS_PER_DELAY should be just 1, but you'd be + * wrong; there are platforms where that can result in a "stuck spinlock" + * failure. This has been seen particularly on Alphas; it seems that the + * first TAS after returning from kernel space will always fail on that + * hardware. * - * Once we do decide to block, we use randomly increasing pg_usleep() - * delays. The first delay is 1 msec, then the delay randomly - * increases to about one second, after which we reset to 1 msec and - * start again. The idea here is that in the presence of heavy - * contention we need to increase the delay, else the spinlock holder - * may never get to run and release the lock. (Consider situation - * where spinlock holder has been nice'd down in priority by the - * scheduler --- it will not get scheduled until all would-be - * acquirers are sleeping, so if we always use a 1-msec sleep, there - * is a real possibility of starvation.) But we can't just clamp the - * delay to an upper bound, else it would take a long time to make a - * reasonable number of tries. + * Once we do decide to block, we use randomly increasing pg_usleep() delays. + * The first delay is 1 msec, then the delay randomly increases to about + * one second, after which we reset to 1 msec and start again. The idea + * here is that in the presence of heavy contention we need to increase + * the delay, else the spinlock holder may never get to run and release + * the lock. (Consider situation where spinlock holder has been nice'd + * down in priority by the scheduler --- it will not get scheduled until + * all would-be acquirers are sleeping, so if we always use a 1-msec + * sleep, there is a real possibility of starvation.) But we can't just + * clamp the delay to an upper bound, else it would take a long time to + * make a reasonable number of tries. * - * We time out and declare error after NUM_DELAYS delays (thus, exactly - * that many tries). With the given settings, this will usually take - * 2 or so minutes. It seems better to fix the total number of tries - * (and thus the probability of unintended failure) than to fix the - * total time spent. + * We time out and declare error after NUM_DELAYS delays (thus, exactly that + * many tries). With the given settings, this will usually take 2 or so + * minutes. It seems better to fix the total number of tries (and thus + * the probability of unintended failure) than to fix the total time + * spent. * - * The pg_usleep() delays are measured in milliseconds because 1 msec - * is a common resolution limit at the OS level for newer platforms. - * On older platforms the resolution limit is usually 10 msec, in - * which case the total delay before timeout will be a bit more. + * The pg_usleep() delays are measured in milliseconds because 1 msec is a + * common resolution limit at the OS level for newer platforms. On older + * platforms the resolution limit is usually 10 msec, in which case the + * total delay before timeout will be a bit more. */ -#define MIN_SPINS_PER_DELAY 10 -#define MAX_SPINS_PER_DELAY 1000 +#define MIN_SPINS_PER_DELAY 10 +#define MAX_SPINS_PER_DELAY 1000 #define NUM_DELAYS 1000 #define MIN_DELAY_MSEC 1 #define MAX_DELAY_MSEC 1000 @@ -110,7 +108,7 @@ s_lock(volatile slock_t *lock, const char *file, int line) if (++delays > NUM_DELAYS) s_lock_stuck(lock, file, line); - if (cur_delay == 0) /* first time to delay? */ + if (cur_delay == 0) /* first time to delay? */ cur_delay = MIN_DELAY_MSEC; pg_usleep(cur_delay * 1000L); @@ -122,7 +120,7 @@ s_lock(volatile slock_t *lock, const char *file, int line) /* increase delay by a random fraction between 1X and 2X */ cur_delay += (int) (cur_delay * - (((double) random()) / ((double) MAX_RANDOM_VALUE)) + 0.5); + (((double) random()) / ((double) MAX_RANDOM_VALUE)) + 0.5); /* wrap back to minimum delay when max is exceeded */ if (cur_delay > MAX_DELAY_MSEC) cur_delay = MIN_DELAY_MSEC; @@ -133,18 +131,18 @@ s_lock(volatile slock_t *lock, const char *file, int line) /* * If we were able to acquire the lock without delaying, it's a good - * indication we are in a multiprocessor. If we had to delay, it's - * a sign (but not a sure thing) that we are in a uniprocessor. - * Hence, we decrement spins_per_delay slowly when we had to delay, - * and increase it rapidly when we didn't. It's expected that - * spins_per_delay will converge to the minimum value on a uniprocessor - * and to the maximum value on a multiprocessor. + * indication we are in a multiprocessor. If we had to delay, it's a sign + * (but not a sure thing) that we are in a uniprocessor. Hence, we + * decrement spins_per_delay slowly when we had to delay, and increase it + * rapidly when we didn't. It's expected that spins_per_delay will + * converge to the minimum value on a uniprocessor and to the maximum + * value on a multiprocessor. * - * Note: spins_per_delay is local within our current process. - * We want to average these observations across multiple backends, - * since it's relatively rare for this function to even get entered, - * and so a single backend might not live long enough to converge on - * a good value. That is handled by the two routines below. + * Note: spins_per_delay is local within our current process. We want to + * average these observations across multiple backends, since it's + * relatively rare for this function to even get entered, and so a single + * backend might not live long enough to converge on a good value. That + * is handled by the two routines below. */ if (cur_delay == 0) { @@ -180,15 +178,14 @@ int update_spins_per_delay(int shared_spins_per_delay) { /* - * We use an exponential moving average with a relatively slow - * adaption rate, so that noise in any one backend's result won't - * affect the shared value too much. As long as both inputs are - * within the allowed range, the result must be too, so we need not - * worry about clamping the result. + * We use an exponential moving average with a relatively slow adaption + * rate, so that noise in any one backend's result won't affect the shared + * value too much. As long as both inputs are within the allowed range, + * the result must be too, so we need not worry about clamping the result. * - * We deliberately truncate rather than rounding; this is so that - * single adjustments inside a backend can affect the shared estimate - * (see the asymmetric adjustment rules above). + * We deliberately truncate rather than rounding; this is so that single + * adjustments inside a backend can affect the shared estimate (see the + * asymmetric adjustment rules above). */ return (shared_spins_per_delay * 15 + spins_per_delay) / 16; } @@ -227,7 +224,7 @@ tas_dummy() __asm__ __volatile__( #if defined(__NetBSD__) && defined(__ELF__) /* no underscore for label and % for registers */ - "\ + "\ .global tas \n\ tas: \n\ movel %sp@(0x4),%a0 \n\ @@ -239,7 +236,7 @@ _success: \n\ moveq #0,%d0 \n\ rts \n" #else - "\ + "\ .global _tas \n\ _tas: \n\ movel sp@(0x4),a0 \n\ @@ -251,11 +248,10 @@ _success: \n\ moveq #0,d0 \n\ rts \n" #endif /* __NetBSD__ && __ELF__ */ -); + ); } #endif /* __m68k__ && !__linux__ */ - #else /* not __GNUC__ */ /* |
