summaryrefslogtreecommitdiff
path: root/src/include/storage
diff options
context:
space:
mode:
authorBruce Momjian <bruce@momjian.us>2014-05-06 12:12:18 -0400
committerBruce Momjian <bruce@momjian.us>2014-05-06 12:12:18 -0400
commit0a7832005792fa6dad171f9cadb8d587fe0dd800 (patch)
tree365cfc42c521a52607e41394b08ef44d338d8fc1 /src/include/storage
parentfb85cd4320414c3f6e9c8bc69ec944200ae1e493 (diff)
downloadpostgresql-0a7832005792fa6dad171f9cadb8d587fe0dd800.tar.gz
pgindent run for 9.4
This includes removing tabs after periods in C comments, which was applied to back branches, so this change should not effect backpatching.
Diffstat (limited to 'src/include/storage')
-rw-r--r--src/include/storage/barrier.h2
-rw-r--r--src/include/storage/block.h2
-rw-r--r--src/include/storage/buf_internals.h8
-rw-r--r--src/include/storage/bufpage.h8
-rw-r--r--src/include/storage/dsm.h4
-rw-r--r--src/include/storage/dsm_impl.h2
-rw-r--r--src/include/storage/ipc.h2
-rw-r--r--src/include/storage/itemid.h2
-rw-r--r--src/include/storage/itemptr.h2
-rw-r--r--src/include/storage/large_object.h2
-rw-r--r--src/include/storage/lock.h14
-rw-r--r--src/include/storage/lwlock.h8
-rw-r--r--src/include/storage/pg_sema.h2
-rw-r--r--src/include/storage/pg_shmem.h6
-rw-r--r--src/include/storage/pos.h2
-rw-r--r--src/include/storage/predicate_internals.h10
-rw-r--r--src/include/storage/proc.h13
-rw-r--r--src/include/storage/procarray.h2
-rw-r--r--src/include/storage/relfilenode.h6
-rw-r--r--src/include/storage/shm_mq.h6
-rw-r--r--src/include/storage/shm_toc.h4
-rw-r--r--src/include/storage/sinval.h4
-rw-r--r--src/include/storage/sinvaladt.h2
-rw-r--r--src/include/storage/smgr.h6
-rw-r--r--src/include/storage/spin.h6
25 files changed, 63 insertions, 62 deletions
diff --git a/src/include/storage/barrier.h b/src/include/storage/barrier.h
index 82ddccd3a2..bc61de0ff1 100644
--- a/src/include/storage/barrier.h
+++ b/src/include/storage/barrier.h
@@ -33,7 +33,7 @@ extern slock_t dummy_spinlock;
*
* A read barrier must act as a compiler barrier, and in addition must
* guarantee that any loads issued prior to the barrier are completed before
- * any loads issued after the barrier. Similarly, a write barrier acts
+ * any loads issued after the barrier. Similarly, a write barrier acts
* as a compiler barrier, and also orders stores. Read and write barriers
* are thus weaker than a full memory barrier, but stronger than a compiler
* barrier. In practice, on machines with strong memory ordering, read and
diff --git a/src/include/storage/block.h b/src/include/storage/block.h
index bc503cfacc..0a61103cf5 100644
--- a/src/include/storage/block.h
+++ b/src/include/storage/block.h
@@ -37,7 +37,7 @@ typedef uint32 BlockNumber;
/*
* BlockId:
*
- * this is a storage type for BlockNumber. in other words, this type
+ * this is a storage type for BlockNumber. in other words, this type
* is used for on-disk structures (e.g., in HeapTupleData) whereas
* BlockNumber is the type on which calculations are performed (e.g.,
* in access method code).
diff --git a/src/include/storage/buf_internals.h b/src/include/storage/buf_internals.h
index 93a0030c3e..c019013e72 100644
--- a/src/include/storage/buf_internals.h
+++ b/src/include/storage/buf_internals.h
@@ -114,9 +114,9 @@ typedef struct buftag
*
* Note: buf_hdr_lock must be held to examine or change the tag, flags,
* usage_count, refcount, or wait_backend_pid fields. buf_id field never
- * changes after initialization, so does not need locking. freeNext is
+ * changes after initialization, so does not need locking. freeNext is
* protected by the BufFreelistLock not buf_hdr_lock. The LWLocks can take
- * care of themselves. The buf_hdr_lock is *not* used to control access to
+ * care of themselves. The buf_hdr_lock is *not* used to control access to
* the data in the buffer!
*
* An exception is that if we have the buffer pinned, its tag can't change
@@ -127,7 +127,7 @@ typedef struct buftag
*
* We can't physically remove items from a disk page if another backend has
* the buffer pinned. Hence, a backend may need to wait for all other pins
- * to go away. This is signaled by storing its own PID into
+ * to go away. This is signaled by storing its own PID into
* wait_backend_pid and setting flag bit BM_PIN_COUNT_WAITER. At present,
* there can be only one such waiter per buffer.
*
@@ -147,7 +147,7 @@ typedef struct sbufdesc
int buf_id; /* buffer's index number (from 0) */
int freeNext; /* link in freelist chain */
- LWLock *io_in_progress_lock; /* to wait for I/O to complete */
+ LWLock *io_in_progress_lock; /* to wait for I/O to complete */
LWLock *content_lock; /* to lock access to buffer contents */
} BufferDesc;
diff --git a/src/include/storage/bufpage.h b/src/include/storage/bufpage.h
index c222c3229f..d96e375f3f 100644
--- a/src/include/storage/bufpage.h
+++ b/src/include/storage/bufpage.h
@@ -27,7 +27,7 @@
* disk page is always a slotted page of the form:
*
* +----------------+---------------------------------+
- * | PageHeaderData | linp1 linp2 linp3 ... |
+ * | PageHeaderData | linp1 linp2 linp3 ... |
* +-----------+----+---------------------------------+
* | ... linpN | |
* +-----------+--------------------------------------+
@@ -35,7 +35,7 @@
* | |
* | v pd_upper |
* +-------------+------------------------------------+
- * | | tupleN ... |
+ * | | tupleN ... |
* +-------------+------------------+-----------------+
* | ... tuple3 tuple2 tuple1 | "special space" |
* +--------------------------------+-----------------+
@@ -66,7 +66,7 @@
*
* AM-specific per-page data (if any) is kept in the area marked "special
* space"; each AM has an "opaque" structure defined somewhere that is
- * stored as the page trailer. an access method should always
+ * stored as the page trailer. an access method should always
* initialize its pages with PageInit and then set its own opaque
* fields.
*/
@@ -128,7 +128,7 @@ typedef struct
* there are no flag bits relating to checksums.
*
* pd_prune_xid is a hint field that helps determine whether pruning will be
- * useful. It is currently unused in index pages.
+ * useful. It is currently unused in index pages.
*
* The page version number and page size are packed together into a single
* uint16 field. This is for historical reasons: before PostgreSQL 7.3,
diff --git a/src/include/storage/dsm.h b/src/include/storage/dsm.h
index 272787adc6..1d0110d4b2 100644
--- a/src/include/storage/dsm.h
+++ b/src/include/storage/dsm.h
@@ -18,7 +18,7 @@
typedef struct dsm_segment dsm_segment;
/* Startup and shutdown functions. */
-struct PGShmemHeader; /* avoid including pg_shmem.h */
+struct PGShmemHeader; /* avoid including pg_shmem.h */
extern void dsm_cleanup_using_control_segment(dsm_handle old_control_handle);
extern void dsm_postmaster_startup(struct PGShmemHeader *);
extern void dsm_backend_shutdown(void);
@@ -50,7 +50,7 @@ typedef void (*on_dsm_detach_callback) (dsm_segment *, Datum arg);
extern void on_dsm_detach(dsm_segment *seg,
on_dsm_detach_callback function, Datum arg);
extern void cancel_on_dsm_detach(dsm_segment *seg,
- on_dsm_detach_callback function, Datum arg);
+ on_dsm_detach_callback function, Datum arg);
extern void reset_on_dsm_detach(void);
#endif /* DSM_H */
diff --git a/src/include/storage/dsm_impl.h b/src/include/storage/dsm_impl.h
index fda551489f..6e2a013411 100644
--- a/src/include/storage/dsm_impl.h
+++ b/src/include/storage/dsm_impl.h
@@ -40,7 +40,7 @@
#endif
/* GUC. */
-extern int dynamic_shared_memory_type;
+extern int dynamic_shared_memory_type;
/*
* Directory for on-disk state.
diff --git a/src/include/storage/ipc.h b/src/include/storage/ipc.h
index 8b9f10b785..52aff5bbe5 100644
--- a/src/include/storage/ipc.h
+++ b/src/include/storage/ipc.h
@@ -4,7 +4,7 @@
* POSTGRES inter-process communication definitions.
*
* This file is misnamed, as it no longer has much of anything directly
- * to do with IPC. The functionality here is concerned with managing
+ * to do with IPC. The functionality here is concerned with managing
* exit-time cleanup for either a postmaster or a backend.
*
*
diff --git a/src/include/storage/itemid.h b/src/include/storage/itemid.h
index a91cf97917..bf2c4bd826 100644
--- a/src/include/storage/itemid.h
+++ b/src/include/storage/itemid.h
@@ -31,7 +31,7 @@ typedef struct ItemIdData
typedef ItemIdData *ItemId;
/*
- * lp_flags has these possible states. An UNUSED line pointer is available
+ * lp_flags has these possible states. An UNUSED line pointer is available
* for immediate re-use, the other states are not.
*/
#define LP_UNUSED 0 /* unused (should always have lp_len=0) */
diff --git a/src/include/storage/itemptr.h b/src/include/storage/itemptr.h
index 0b81d53f5f..78766d0698 100644
--- a/src/include/storage/itemptr.h
+++ b/src/include/storage/itemptr.h
@@ -29,7 +29,7 @@
* tuple header on disk, it's very important not to waste space with
* structure padding bytes. The struct is designed to be six bytes long
* (it contains three int16 fields) but a few compilers will pad it to
- * eight bytes unless coerced. We apply appropriate persuasion where
+ * eight bytes unless coerced. We apply appropriate persuasion where
* possible, and to cope with unpersuadable compilers, we try to use
* "SizeOfIptrData" rather than "sizeof(ItemPointerData)" when computing
* on-disk sizes.
diff --git a/src/include/storage/large_object.h b/src/include/storage/large_object.h
index a85b108c38..0d81a4bc1b 100644
--- a/src/include/storage/large_object.h
+++ b/src/include/storage/large_object.h
@@ -70,7 +70,7 @@ typedef struct LargeObjectDesc
#define LOBLKSIZE (BLCKSZ / 4)
/*
- * Maximum length in bytes for a large object. To make this larger, we'd
+ * Maximum length in bytes for a large object. To make this larger, we'd
* have to widen pg_largeobject.pageno as well as various internal variables.
*/
#define MAX_LARGE_OBJECT_SIZE ((int64) INT_MAX * LOBLKSIZE)
diff --git a/src/include/storage/lock.h b/src/include/storage/lock.h
index ceeab9fc8a..4c49e3c6e6 100644
--- a/src/include/storage/lock.h
+++ b/src/include/storage/lock.h
@@ -43,7 +43,7 @@ extern bool Debug_deadlocks;
/*
* Top-level transactions are identified by VirtualTransactionIDs comprising
* the BackendId of the backend running the xact, plus a locally-assigned
- * LocalTransactionId. These are guaranteed unique over the short term,
+ * LocalTransactionId. These are guaranteed unique over the short term,
* but will be reused after a database restart; hence they should never
* be stored on disk.
*
@@ -157,7 +157,7 @@ typedef uint16 LOCKMETHODID;
/*
* LOCKTAG is the key information needed to look up a LOCK item in the
- * lock hashtable. A LOCKTAG value uniquely identifies a lockable object.
+ * lock hashtable. A LOCKTAG value uniquely identifies a lockable object.
*
* The LockTagType enum defines the different kinds of objects we can lock.
* We can handle up to 256 different LockTagTypes.
@@ -210,7 +210,7 @@ typedef struct LOCKTAG
/*
* These macros define how we map logical IDs of lockable objects into
- * the physical fields of LOCKTAG. Use these to set up LOCKTAG values,
+ * the physical fields of LOCKTAG. Use these to set up LOCKTAG values,
* rather than accessing the fields directly. Note multiple eval of target!
*/
#define SET_LOCKTAG_RELATION(locktag,dboid,reloid) \
@@ -322,14 +322,14 @@ typedef struct LOCK
* a PROCLOCK struct.
*
* PROCLOCKTAG is the key information needed to look up a PROCLOCK item in the
- * proclock hashtable. A PROCLOCKTAG value uniquely identifies the combination
+ * proclock hashtable. A PROCLOCKTAG value uniquely identifies the combination
* of a lockable object and a holder/waiter for that object. (We can use
* pointers here because the PROCLOCKTAG need only be unique for the lifespan
* of the PROCLOCK, and it will never outlive the lock or the proc.)
*
* Internally to a backend, it is possible for the same lock to be held
* for different purposes: the backend tracks transaction locks separately
- * from session locks. However, this is not reflected in the shared-memory
+ * from session locks. However, this is not reflected in the shared-memory
* state: we only track which backend(s) hold the lock. This is OK since a
* backend can never block itself.
*
@@ -340,7 +340,7 @@ typedef struct LOCK
* as soon as convenient.
*
* releaseMask is workspace for LockReleaseAll(): it shows the locks due
- * to be released during the current call. This must only be examined or
+ * to be released during the current call. This must only be examined or
* set by the backend owning the PROCLOCK.
*
* Each PROCLOCK object is linked into lists for both the associated LOCK
@@ -373,7 +373,7 @@ typedef struct PROCLOCK
/*
* Each backend also maintains a local hash table with information about each
- * lock it is currently interested in. In particular the local table counts
+ * lock it is currently interested in. In particular the local table counts
* the number of times that lock has been acquired. This allows multiple
* requests for the same lock to be executed without additional accesses to
* shared memory. We also track the number of lock acquisitions per
diff --git a/src/include/storage/lwlock.h b/src/include/storage/lwlock.h
index 3a1953383e..175fae3a88 100644
--- a/src/include/storage/lwlock.h
+++ b/src/include/storage/lwlock.h
@@ -50,8 +50,8 @@ typedef struct LWLock
char exclusive; /* # of exclusive holders (0 or 1) */
int shared; /* # of shared holders (0..MaxBackends) */
int tranche; /* tranche ID */
- struct PGPROC *head; /* head of list of waiting PGPROCs */
- struct PGPROC *tail; /* tail of list of waiting PGPROCs */
+ struct PGPROC *head; /* head of list of waiting PGPROCs */
+ struct PGPROC *tail; /* tail of list of waiting PGPROCs */
/* tail is undefined when head is NULL */
} LWLock;
@@ -150,7 +150,7 @@ extern PGDLLIMPORT LWLockPadded *MainLWLockArray;
#define BUFFER_MAPPING_LWLOCK_OFFSET NUM_INDIVIDUAL_LWLOCKS
#define LOCK_MANAGER_LWLOCK_OFFSET \
(BUFFER_MAPPING_LWLOCK_OFFSET + NUM_BUFFER_PARTITIONS)
-#define PREDICATELOCK_MANAGER_LWLOCK_OFFSET \
+#define PREDICATELOCK_MANAGER_LWLOCK_OFFSET \
(NUM_INDIVIDUAL_LWLOCKS + NUM_LOCK_PARTITIONS)
#define NUM_FIXED_LWLOCKS \
(PREDICATELOCK_MANAGER_LWLOCK_OFFSET + NUM_PREDICATELOCK_PARTITIONS)
@@ -205,7 +205,7 @@ extern LWLock *LWLockAssign(void);
* mapped at the same address in all coordinating backends, so storing the
* registration in the main shared memory segment wouldn't work for that case.
*/
-extern int LWLockNewTrancheId(void);
+extern int LWLockNewTrancheId(void);
extern void LWLockRegisterTranche(int, LWLockTranche *);
extern void LWLockInitialize(LWLock *, int tranche_id);
diff --git a/src/include/storage/pg_sema.h b/src/include/storage/pg_sema.h
index 51ded817e7..c53aa9795b 100644
--- a/src/include/storage/pg_sema.h
+++ b/src/include/storage/pg_sema.h
@@ -6,7 +6,7 @@
* PostgreSQL requires counting semaphores (the kind that keep track of
* multiple unlock operations, and will allow an equal number of subsequent
* lock operations before blocking). The underlying implementation is
- * not the same on every platform. This file defines the API that must
+ * not the same on every platform. This file defines the API that must
* be provided by each port.
*
*
diff --git a/src/include/storage/pg_shmem.h b/src/include/storage/pg_shmem.h
index ab28ebee84..76bba445bd 100644
--- a/src/include/storage/pg_shmem.h
+++ b/src/include/storage/pg_shmem.h
@@ -10,7 +10,7 @@
*
* To simplify life for the SysV implementation, the ID is assumed to
* consist of two unsigned long values (these are key and ID in SysV
- * terms). Other platforms may ignore the second value if they need
+ * terms). Other platforms may ignore the second value if they need
* only one ID number.
*
*
@@ -42,7 +42,7 @@ typedef struct PGShmemHeader /* standard header for all Postgres shmem */
} PGShmemHeader;
/* GUC variable */
-extern int huge_pages;
+extern int huge_pages;
/* Possible values for huge_pages */
typedef enum
@@ -50,7 +50,7 @@ typedef enum
HUGE_PAGES_OFF,
HUGE_PAGES_ON,
HUGE_PAGES_TRY
-} HugePagesType;
+} HugePagesType;
#ifndef WIN32
extern unsigned long UsedShmemSegID;
diff --git a/src/include/storage/pos.h b/src/include/storage/pos.h
index bc41502a65..662a717e3c 100644
--- a/src/include/storage/pos.h
+++ b/src/include/storage/pos.h
@@ -20,7 +20,7 @@
* been changed to just <offset> as the notion of having multiple pages
* within a block has been removed.
*
- * the 'offset' abstraction is somewhat confusing. it is NOT a byte
+ * the 'offset' abstraction is somewhat confusing. it is NOT a byte
* offset within the page; instead, it is an offset into the line
* pointer array contained on every page that store (heap or index)
* tuples.
diff --git a/src/include/storage/predicate_internals.h b/src/include/storage/predicate_internals.h
index 9652d00c2c..afbd782a21 100644
--- a/src/include/storage/predicate_internals.h
+++ b/src/include/storage/predicate_internals.h
@@ -128,7 +128,7 @@ typedef struct SERIALIZABLEXACT
* The following types are used to provide an ad hoc list for holding
* SERIALIZABLEXACT objects. An HTAB is overkill, since there is no need to
* access these by key -- there are direct pointers to these objects where
- * needed. If a shared memory list is created, these types can probably be
+ * needed. If a shared memory list is created, these types can probably be
* eliminated in favor of using the general solution.
*/
typedef struct PredXactListElementData
@@ -311,9 +311,9 @@ typedef struct PREDICATELOCKTAG
* The PREDICATELOCK struct represents an individual lock.
*
* An entry can be created here when the related database object is read, or
- * by promotion of multiple finer-grained targets. All entries related to a
+ * by promotion of multiple finer-grained targets. All entries related to a
* serializable transaction are removed when that serializable transaction is
- * cleaned up. Entries can also be removed when they are combined into a
+ * cleaned up. Entries can also be removed when they are combined into a
* single coarser-grained lock entry.
*/
typedef struct PREDICATELOCK
@@ -384,7 +384,7 @@ typedef struct PredicateLockData
/*
* These macros define how we map logical IDs of lockable objects into the
- * physical fields of PREDICATELOCKTARGETTAG. Use these to set up values,
+ * physical fields of PREDICATELOCKTARGETTAG. Use these to set up values,
* rather than accessing the fields directly. Note multiple eval of target!
*/
#define SET_PREDICATELOCKTARGETTAG_RELATION(locktag,dboid,reloid) \
@@ -450,7 +450,7 @@ typedef struct TwoPhasePredicateXactRecord
typedef struct TwoPhasePredicateLockRecord
{
PREDICATELOCKTARGETTAG target;
- uint32 filler; /* to avoid length change in back-patched fix */
+ uint32 filler; /* to avoid length change in back-patched fix */
} TwoPhasePredicateLockRecord;
typedef struct TwoPhasePredicateRecord
diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h
index 5218b448cd..c23f4da5b6 100644
--- a/src/include/storage/proc.h
+++ b/src/include/storage/proc.h
@@ -21,7 +21,7 @@
/*
* Each backend advertises up to PGPROC_MAX_CACHED_SUBXIDS TransactionIds
- * for non-aborted subtransactions of its current top transaction. These
+ * for non-aborted subtransactions of its current top transaction. These
* have to be treated as running XIDs by other backends.
*
* We also keep track of whether the cache overflowed (ie, the transaction has
@@ -41,8 +41,9 @@ struct XidCache
#define PROC_IS_AUTOVACUUM 0x01 /* is it an autovac worker? */
#define PROC_IN_VACUUM 0x02 /* currently running lazy vacuum */
#define PROC_IN_ANALYZE 0x04 /* currently running analyze */
-#define PROC_VACUUM_FOR_WRAPAROUND 0x08 /* set by autovac only */
-#define PROC_IN_LOGICAL_DECODING 0x10 /* currently doing logical decoding */
+#define PROC_VACUUM_FOR_WRAPAROUND 0x08 /* set by autovac only */
+#define PROC_IN_LOGICAL_DECODING 0x10 /* currently doing logical
+ * decoding */
/* flags reset at EOXact */
#define PROC_VACUUM_STATE_MASK \
@@ -60,7 +61,7 @@ struct XidCache
* Each backend has a PGPROC struct in shared memory. There is also a list of
* currently-unused PGPROC structs that will be reallocated to new backends.
*
- * links: list link for any list the PGPROC is in. When waiting for a lock,
+ * links: list link for any list the PGPROC is in. When waiting for a lock,
* the PGPROC is linked into that lock's waitProcs queue. A recycled PGPROC
* is linked into ProcGlobal's freeProcs list.
*
@@ -132,7 +133,7 @@ struct PGPROC
struct XidCache subxids; /* cache for subtransaction XIDs */
- /* Per-backend LWLock. Protects fields below. */
+ /* Per-backend LWLock. Protects fields below. */
LWLock *backendLock; /* protects the fields below */
/* Lock manager data, recording fast-path locks taken by this backend. */
@@ -151,7 +152,7 @@ extern PGDLLIMPORT struct PGXACT *MyPgXact;
/*
* Prior to PostgreSQL 9.2, the fields below were stored as part of the
- * PGPROC. However, benchmarking revealed that packing these particular
+ * PGPROC. However, benchmarking revealed that packing these particular
* members into a separate array as tightly as possible sped up GetSnapshotData
* considerably on systems with many CPU cores, by reducing the number of
* cache lines needing to be fetched. Thus, think very carefully before adding
diff --git a/src/include/storage/procarray.h b/src/include/storage/procarray.h
index d0b4103a09..0c4611bda2 100644
--- a/src/include/storage/procarray.h
+++ b/src/include/storage/procarray.h
@@ -83,6 +83,6 @@ extern void ProcArraySetReplicationSlotXmin(TransactionId xmin,
TransactionId catalog_xmin, bool already_locked);
extern void ProcArrayGetReplicationSlotXmin(TransactionId *xmin,
- TransactionId *catalog_xmin);
+ TransactionId *catalog_xmin);
#endif /* PROCARRAY_H */
diff --git a/src/include/storage/relfilenode.h b/src/include/storage/relfilenode.h
index d5b772ca9f..d5809dd4a0 100644
--- a/src/include/storage/relfilenode.h
+++ b/src/include/storage/relfilenode.h
@@ -27,7 +27,7 @@
* spcNode identifies the tablespace of the relation. It corresponds to
* pg_tablespace.oid.
*
- * dbNode identifies the database of the relation. It is zero for
+ * dbNode identifies the database of the relation. It is zero for
* "shared" relations (those common to all databases of a cluster).
* Nonzero dbNode values correspond to pg_database.oid.
*
@@ -50,7 +50,7 @@
* is a "mapped" relation, whose current true filenode number is available
* from relmapper.c. Again, this case is NOT allowed in RelFileNodes.
*
- * Note: various places use RelFileNode in hashtable keys. Therefore,
+ * Note: various places use RelFileNode in hashtable keys. Therefore,
* there *must not* be any unused padding bytes in this struct. That
* should be safe as long as all the fields are of type Oid.
*/
@@ -63,7 +63,7 @@ typedef struct RelFileNode
/*
* Augmenting a relfilenode with the backend ID provides all the information
- * we need to locate the physical storage. The backend ID is InvalidBackendId
+ * we need to locate the physical storage. The backend ID is InvalidBackendId
* for regular relations (those accessible to more than one backend), or the
* owning backend's ID for backend-local relations. Backend-local relations
* are always transient and removed in case of a database crash; they are
diff --git a/src/include/storage/shm_mq.h b/src/include/storage/shm_mq.h
index c7dd90532b..5bae3807af 100644
--- a/src/include/storage/shm_mq.h
+++ b/src/include/storage/shm_mq.h
@@ -28,9 +28,9 @@ typedef struct shm_mq_handle shm_mq_handle;
/* Possible results of a send or receive operation. */
typedef enum
{
- SHM_MQ_SUCCESS, /* Sent or received a message. */
- SHM_MQ_WOULD_BLOCK, /* Not completed; retry later. */
- SHM_MQ_DETACHED /* Other process has detached queue. */
+ SHM_MQ_SUCCESS, /* Sent or received a message. */
+ SHM_MQ_WOULD_BLOCK, /* Not completed; retry later. */
+ SHM_MQ_DETACHED /* Other process has detached queue. */
} shm_mq_result;
/*
diff --git a/src/include/storage/shm_toc.h b/src/include/storage/shm_toc.h
index cb5477e685..6f0804aeef 100644
--- a/src/include/storage/shm_toc.h
+++ b/src/include/storage/shm_toc.h
@@ -40,8 +40,8 @@ extern void *shm_toc_lookup(shm_toc *toc, uint64 key);
*/
typedef struct
{
- Size space_for_chunks;
- Size number_of_keys;
+ Size space_for_chunks;
+ Size number_of_keys;
} shm_toc_estimator;
#define shm_toc_initialize_estimator(e) \
diff --git a/src/include/storage/sinval.h b/src/include/storage/sinval.h
index d5bb850337..812ea95e9b 100644
--- a/src/include/storage/sinval.h
+++ b/src/include/storage/sinval.h
@@ -34,8 +34,8 @@
* updates and deletions in system catalogs (see CacheInvalidateHeapTuple).
* An update can generate two inval events, one for the old tuple and one for
* the new, but this is reduced to one event if the tuple's hash key doesn't
- * change. Note that the inval events themselves don't actually say whether
- * the tuple is being inserted or deleted. Also, since we transmit only a
+ * change. Note that the inval events themselves don't actually say whether
+ * the tuple is being inserted or deleted. Also, since we transmit only a
* hash key, there is a small risk of unnecessary invalidations due to chance
* matches of hash keys.
*
diff --git a/src/include/storage/sinvaladt.h b/src/include/storage/sinvaladt.h
index 9b45b3efef..72f532e4af 100644
--- a/src/include/storage/sinvaladt.h
+++ b/src/include/storage/sinvaladt.h
@@ -4,7 +4,7 @@
* POSTGRES shared cache invalidation data manager.
*
* The shared cache invalidation manager is responsible for transmitting
- * invalidation messages between backends. Any message sent by any backend
+ * invalidation messages between backends. Any message sent by any backend
* must be delivered to all already-running backends before it can be
* forgotten. (If we run out of space, we instead deliver a "RESET"
* message to backends that have fallen too far behind.)
diff --git a/src/include/storage/smgr.h b/src/include/storage/smgr.h
index c7ab235ba4..ba7c909451 100644
--- a/src/include/storage/smgr.h
+++ b/src/include/storage/smgr.h
@@ -29,7 +29,7 @@
*
* An SMgrRelation may have an "owner", which is just a pointer to it from
* somewhere else; smgr.c will clear this pointer if the SMgrRelation is
- * closed. We use this to avoid dangling pointers from relcache to smgr
+ * closed. We use this to avoid dangling pointers from relcache to smgr
* without having to make the smgr explicitly aware of relcache. There
* can't be more than one "owner" pointer per SMgrRelation, but that's
* all we need.
@@ -48,7 +48,7 @@ typedef struct SMgrRelationData
/*
* These next three fields are not actually used or manipulated by smgr,
* except that they are reset to InvalidBlockNumber upon a cache flush
- * event (in particular, upon truncation of the relation). Higher levels
+ * event (in particular, upon truncation of the relation). Higher levels
* store cached state here so that it will be reset when truncation
* happens. In all three cases, InvalidBlockNumber means "unknown".
*/
@@ -60,7 +60,7 @@ typedef struct SMgrRelationData
/*
* Fields below here are intended to be private to smgr.c and its
- * submodules. Do not touch them from elsewhere.
+ * submodules. Do not touch them from elsewhere.
*/
int smgr_which; /* storage manager selector */
diff --git a/src/include/storage/spin.h b/src/include/storage/spin.h
index 7ee2fedf44..b5fd964c0f 100644
--- a/src/include/storage/spin.h
+++ b/src/include/storage/spin.h
@@ -72,11 +72,11 @@
extern int SpinlockSemas(void);
-extern Size SpinlockSemaSize(void);
+extern Size SpinlockSemaSize(void);
#ifndef HAVE_SPINLOCKS
-extern void SpinlockSemaInit(PGSemaphore);
-extern PGSemaphore SpinlockSemaArray;
+extern void SpinlockSemaInit(PGSemaphore);
+extern PGSemaphore SpinlockSemaArray;
#endif
#endif /* SPIN_H */