summaryrefslogtreecommitdiff
path: root/src/backend/commands
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/commands')
-rw-r--r--src/backend/commands/aggregatecmds.c6
-rw-r--r--src/backend/commands/analyze.c14
-rw-r--r--src/backend/commands/async.c22
-rw-r--r--src/backend/commands/cluster.c35
-rw-r--r--src/backend/commands/comment.c6
-rw-r--r--src/backend/commands/conversioncmds.c12
-rw-r--r--src/backend/commands/copy.c30
-rw-r--r--src/backend/commands/dbcommands.c36
-rw-r--r--src/backend/commands/explain.c8
-rw-r--r--src/backend/commands/indexcmds.c14
-rw-r--r--src/backend/commands/opclasscmds.c10
-rw-r--r--src/backend/commands/schemacmds.c4
-rw-r--r--src/backend/commands/sequence.c14
-rw-r--r--src/backend/commands/tablecmds.c82
-rw-r--r--src/backend/commands/trigger.c21
-rw-r--r--src/backend/commands/typecmds.c14
-rw-r--r--src/backend/commands/user.c24
-rw-r--r--src/backend/commands/vacuum.c75
-rw-r--r--src/backend/commands/vacuumlazy.c30
-rw-r--r--src/backend/commands/variable.c10
-rw-r--r--src/backend/commands/view.c5
21 files changed, 237 insertions, 235 deletions
diff --git a/src/backend/commands/aggregatecmds.c b/src/backend/commands/aggregatecmds.c
index 160cd8e488..67c39e5f36 100644
--- a/src/backend/commands/aggregatecmds.c
+++ b/src/backend/commands/aggregatecmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.30 2005/10/15 02:49:14 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.31 2005/11/22 18:17:08 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -119,8 +119,8 @@ DefineAggregate(List *names, List *parameters)
/*
* look up the aggregate's base type (input datatype) and transtype.
*
- * We have historically allowed the command to look like basetype = 'ANY' so
- * we must do a case-insensitive comparison for the name ANY. Ugh.
+ * We have historically allowed the command to look like basetype = 'ANY'
+ * so we must do a case-insensitive comparison for the name ANY. Ugh.
*
* basetype can be a pseudo-type, but transtype can't, since we need to be
* able to store values of the transtype. However, we can allow
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 431e39f3b0..095ffe783f 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.89 2005/10/15 02:49:15 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.90 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -891,9 +891,9 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
* If we didn't find as many tuples as we wanted then we're done. No sort
* is needed, since they're already in order.
*
- * Otherwise we need to sort the collected tuples by position (itempointer).
- * It's not worth worrying about corner cases where the tuples are already
- * sorted.
+ * Otherwise we need to sort the collected tuples by position
+ * (itempointer). It's not worth worrying about corner cases where the
+ * tuples are already sorted.
*/
if (numrows == targrows)
qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows);
@@ -1849,9 +1849,9 @@ compute_scalar_stats(VacAttrStatsP stats,
* Now scan the values in order, find the most common ones, and also
* accumulate ordering-correlation statistics.
*
- * To determine which are most common, we first have to count the number
- * of duplicates of each value. The duplicates are adjacent in the
- * sorted list, so a brute-force approach is to compare successive
+ * To determine which are most common, we first have to count the
+ * number of duplicates of each value. The duplicates are adjacent in
+ * the sorted list, so a brute-force approach is to compare successive
* datum values until we find two that are not equal. However, that
* requires N-1 invocations of the datum comparison routine, which are
* completely redundant with work that was done during the sort. (The
diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c
index 2186aa8d28..1ebee1a3fc 100644
--- a/src/backend/commands/async.c
+++ b/src/backend/commands/async.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.127 2005/11/03 17:11:34 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.128 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -820,18 +820,18 @@ EnableNotifyInterrupt(void)
* steps. (A very small time window, perhaps, but Murphy's Law says you
* can hit it...) Instead, we first set the enable flag, then test the
* occurred flag. If we see an unserviced interrupt has occurred, we
- * re-clear the enable flag before going off to do the service work.
- * (That prevents re-entrant invocation of ProcessIncomingNotify() if
- * another interrupt occurs.) If an interrupt comes in between the setting
- * and clearing of notifyInterruptEnabled, then it will have done the
- * service work and left notifyInterruptOccurred zero, so we have to check
- * again after clearing enable. The whole thing has to be in a loop in
- * case another interrupt occurs while we're servicing the first. Once we
- * get out of the loop, enable is set and we know there is no unserviced
+ * re-clear the enable flag before going off to do the service work. (That
+ * prevents re-entrant invocation of ProcessIncomingNotify() if another
+ * interrupt occurs.) If an interrupt comes in between the setting and
+ * clearing of notifyInterruptEnabled, then it will have done the service
+ * work and left notifyInterruptOccurred zero, so we have to check again
+ * after clearing enable. The whole thing has to be in a loop in case
+ * another interrupt occurs while we're servicing the first. Once we get
+ * out of the loop, enable is set and we know there is no unserviced
* interrupt.
*
- * NB: an overenthusiastic optimizing compiler could easily break this code.
- * Hopefully, they all understand what "volatile" means these days.
+ * NB: an overenthusiastic optimizing compiler could easily break this
+ * code. Hopefully, they all understand what "volatile" means these days.
*/
for (;;)
{
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index 740250835d..671c8bf091 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.141 2005/10/29 00:31:51 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.142 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -177,8 +177,8 @@ cluster(ClusterStmt *stmt)
/*
* Create special memory context for cross-transaction storage.
*
- * Since it is a child of PortalContext, it will go away even in case of
- * error.
+ * Since it is a child of PortalContext, it will go away even in case
+ * of error.
*/
cluster_context = AllocSetContextCreate(PortalContext,
"Cluster",
@@ -242,9 +242,9 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
* Since we may open a new transaction for each relation, we have to check
* that the relation still is what we think it is.
*
- * If this is a single-transaction CLUSTER, we can skip these tests. We *must*
- * skip the one on indisclustered since it would reject an attempt to
- * cluster a not-previously-clustered index.
+ * If this is a single-transaction CLUSTER, we can skip these tests. We
+ * *must* skip the one on indisclustered since it would reject an attempt
+ * to cluster a not-previously-clustered index.
*/
if (recheck)
{
@@ -360,9 +360,9 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid, bool recheck)
RelationGetRelationName(OldIndex)),
recheck
? errhint("You may be able to work around this by marking column \"%s\" NOT NULL, or use ALTER TABLE ... SET WITHOUT CLUSTER to remove the cluster specification from the table.",
- NameStr(OldHeap->rd_att->attrs[colno - 1]->attname))
+ NameStr(OldHeap->rd_att->attrs[colno - 1]->attname))
: errhint("You may be able to work around this by marking column \"%s\" NOT NULL.",
- NameStr(OldHeap->rd_att->attrs[colno - 1]->attname))));
+ NameStr(OldHeap->rd_att->attrs[colno - 1]->attname))));
}
else if (colno < 0)
{
@@ -651,12 +651,13 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
* We cannot simply pass the tuple to heap_insert(), for several
* reasons:
*
- * 1. heap_insert() will overwrite the commit-status fields of the tuple
- * it's handed. This would trash the source relation, which is bad
- * news if we abort later on. (This was a bug in releases thru 7.0)
+ * 1. heap_insert() will overwrite the commit-status fields of the
+ * tuple it's handed. This would trash the source relation, which is
+ * bad news if we abort later on. (This was a bug in releases thru
+ * 7.0)
*
- * 2. We'd like to squeeze out the values of any dropped columns, both to
- * save space and to ensure we have no corner-case failures. (It's
+ * 2. We'd like to squeeze out the values of any dropped columns, both
+ * to save space and to ensure we have no corner-case failures. (It's
* possible for example that the new table hasn't got a TOAST table
* and so is unable to store any large values of dropped cols.)
*
@@ -788,10 +789,10 @@ swap_relation_files(Oid r1, Oid r2)
* happen in CLUSTER if there were dropped columns in the old table, and
* in ALTER TABLE when adding or changing type of columns.
*
- * NOTE: at present, a TOAST table's only dependency is the one on its owning
- * table. If more are ever created, we'd need to use something more
- * selective than deleteDependencyRecordsFor() to get rid of only the link
- * we want.
+ * NOTE: at present, a TOAST table's only dependency is the one on its
+ * owning table. If more are ever created, we'd need to use something
+ * more selective than deleteDependencyRecordsFor() to get rid of only the
+ * link we want.
*/
if (relform1->reltoastrelid || relform2->reltoastrelid)
{
diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c
index cf7dc06fa7..d0385428db 100644
--- a/src/backend/commands/comment.c
+++ b/src/backend/commands/comment.c
@@ -7,7 +7,7 @@
* Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.84 2005/10/15 02:49:15 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.85 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -445,8 +445,8 @@ CommentDatabase(List *qualname, char *comment)
* comment on a database other than the current one. Someday this might be
* improved, but it would take a redesigned infrastructure.
*
- * When loading a dump, we may see a COMMENT ON DATABASE for the old name of
- * the database. Erroring out would prevent pg_restore from completing
+ * When loading a dump, we may see a COMMENT ON DATABASE for the old name
+ * of the database. Erroring out would prevent pg_restore from completing
* (which is really pg_restore's fault, but for now we will work around
* the problem here). Consensus is that the best fix is to treat wrong
* database name as a WARNING not an ERROR.
diff --git a/src/backend/commands/conversioncmds.c b/src/backend/commands/conversioncmds.c
index 42bb0853a1..f32eb0539c 100644
--- a/src/backend/commands/conversioncmds.c
+++ b/src/backend/commands/conversioncmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/conversioncmds.c,v 1.25 2005/11/21 12:49:30 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/conversioncmds.c,v 1.26 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -31,7 +31,7 @@
#include "utils/syscache.h"
static void AlterConversionOwner_internal(Relation rel, Oid conversionOid,
- Oid newOwnerId);
+ Oid newOwnerId);
/*
* CREATE CONVERSION
@@ -107,7 +107,7 @@ DropConversionCommand(List *name, DropBehavior behavior, bool missing_ok)
conversionOid = FindConversionByName(name);
if (!OidIsValid(conversionOid))
{
- if (! missing_ok)
+ if (!missing_ok)
{
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
@@ -117,7 +117,7 @@ DropConversionCommand(List *name, DropBehavior behavior, bool missing_ok)
else
{
ereport(NOTICE,
- (errmsg("conversion \"%s\" does not exist, skipping",
+ (errmsg("conversion \"%s\" does not exist, skipping",
NameListToString(name))));
}
@@ -218,7 +218,7 @@ AlterConversionOwner_oid(Oid conversionOid, Oid newOwnerId)
Relation rel;
rel = heap_open(ConversionRelationId, RowExclusiveLock);
-
+
AlterConversionOwner_internal(rel, conversionOid, newOwnerId);
heap_close(rel, NoLock);
@@ -234,7 +234,7 @@ static void
AlterConversionOwner_internal(Relation rel, Oid conversionOid, Oid newOwnerId)
{
Form_pg_conversion convForm;
- HeapTuple tup;
+ HeapTuple tup;
Assert(RelationGetRelid(rel) == ConversionRelationId);
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index 63d88c5df0..4870e7d001 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.254 2005/11/03 17:11:34 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.255 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -127,8 +127,8 @@ typedef struct CopyStateData
/*
* These variables are used to reduce overhead in textual COPY FROM.
*
- * attribute_buf holds the separated, de-escaped text for each field of the
- * current line. The CopyReadAttributes functions return arrays of
+ * attribute_buf holds the separated, de-escaped text for each field of
+ * the current line. The CopyReadAttributes functions return arrays of
* pointers into this buffer. We avoid palloc/pfree overhead by re-using
* the buffer on each cycle.
*/
@@ -2085,8 +2085,8 @@ CopyReadLineText(CopyState cstate)
* examine; any characters from raw_buf_index to raw_buf_ptr have been
* determined to be part of the line, but not yet transferred to line_buf.
*
- * For a little extra speed within the loop, we copy raw_buf and raw_buf_len
- * into local variables.
+ * For a little extra speed within the loop, we copy raw_buf and
+ * raw_buf_len into local variables.
*/
copy_raw_buf = cstate->raw_buf;
raw_buf_ptr = cstate->raw_buf_index;
@@ -2148,8 +2148,8 @@ CopyReadLineText(CopyState cstate)
/*
* If need more data, go back to loop top to load it.
*
- * Note that if we are at EOF, c will wind up as '\0' because of
- * the guaranteed pad of raw_buf.
+ * Note that if we are at EOF, c will wind up as '\0' because
+ * of the guaranteed pad of raw_buf.
*/
if (raw_buf_ptr >= copy_buf_len && !hit_eof)
{
@@ -2283,8 +2283,8 @@ CopyReadLineText(CopyState cstate)
* Do we need to be careful about trailing bytes of multibyte
* characters? (See note above about client_only_encoding)
*
- * We assume here that pg_encoding_mblen only looks at the first byte of
- * the character!
+ * We assume here that pg_encoding_mblen only looks at the first byte
+ * of the character!
*/
if (cstate->client_only_encoding)
{
@@ -2369,8 +2369,8 @@ CopyReadLineCSV(CopyState cstate)
* examine; any characters from raw_buf_index to raw_buf_ptr have been
* determined to be part of the line, but not yet transferred to line_buf.
*
- * For a little extra speed within the loop, we copy raw_buf and raw_buf_len
- * into local variables.
+ * For a little extra speed within the loop, we copy raw_buf and
+ * raw_buf_len into local variables.
*/
copy_raw_buf = cstate->raw_buf;
raw_buf_ptr = cstate->raw_buf_index;
@@ -2475,8 +2475,8 @@ CopyReadLineCSV(CopyState cstate)
/*
* If need more data, go back to loop top to load it.
*
- * Note that if we are at EOF, c will wind up as '\0' because of
- * the guaranteed pad of raw_buf.
+ * Note that if we are at EOF, c will wind up as '\0' because
+ * of the guaranteed pad of raw_buf.
*/
if (raw_buf_ptr >= copy_buf_len && !hit_eof)
{
@@ -2621,8 +2621,8 @@ CopyReadLineCSV(CopyState cstate)
* Do we need to be careful about trailing bytes of multibyte
* characters? (See note above about client_only_encoding)
*
- * We assume here that pg_encoding_mblen only looks at the first byte of
- * the character!
+ * We assume here that pg_encoding_mblen only looks at the first byte
+ * of the character!
*/
if (cstate->client_only_encoding)
{
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index 119e525d7c..5be522db86 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.174 2005/11/22 15:24:17 adunstan Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.175 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -346,8 +346,8 @@ createdb(const CreatedbStmt *stmt)
src_vacuumxid = src_frozenxid = GetCurrentTransactionId();
/*
- * Preassign OID for pg_database tuple, so that we can compute db path.
- * We have to open pg_database to do this, but we don't want to take
+ * Preassign OID for pg_database tuple, so that we can compute db path. We
+ * have to open pg_database to do this, but we don't want to take
* ExclusiveLock yet, so just do it and close again.
*/
pg_database_rel = heap_open(DatabaseRelationId, AccessShareLock);
@@ -512,14 +512,14 @@ createdb(const CreatedbStmt *stmt)
*
* (Both of these were real bugs in releases 8.0 through 8.0.3.)
*
- * In PITR replay, the first of these isn't an issue, and the second is
- * only a risk if the CREATE DATABASE and subsequent template database
- * change both occur while a base backup is being taken. There doesn't
- * seem to be much we can do about that except document it as a
- * limitation.
+ * In PITR replay, the first of these isn't an issue, and the second
+ * is only a risk if the CREATE DATABASE and subsequent template
+ * database change both occur while a base backup is being taken.
+ * There doesn't seem to be much we can do about that except document
+ * it as a limitation.
*
- * Perhaps if we ever implement CREATE DATABASE in a less cheesy way, we
- * can avoid this.
+ * Perhaps if we ever implement CREATE DATABASE in a less cheesy way,
+ * we can avoid this.
*/
RequestCheckpoint(true, false);
@@ -586,19 +586,19 @@ dropdb(const char *dbname, bool missing_ok)
if (!get_db_info(dbname, &db_id, NULL, NULL,
&db_istemplate, NULL, NULL, NULL, NULL, NULL))
{
- if (! missing_ok)
+ if (!missing_ok)
{
ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_DATABASE),
- errmsg("database \"%s\" does not exist", dbname)));
+ (errcode(ERRCODE_UNDEFINED_DATABASE),
+ errmsg("database \"%s\" does not exist", dbname)));
}
else
{
-
+
/* Close pg_database, release the lock, since we changed nothing */
heap_close(pgdbrel, ExclusiveLock);
- ereport(NOTICE,
- (errmsg("database \"%s\" does not exist, skipping",
+ ereport(NOTICE,
+ (errmsg("database \"%s\" does not exist, skipping",
dbname)));
return;
@@ -658,8 +658,8 @@ dropdb(const char *dbname, bool missing_ok)
/*
* Delete any comments associated with the database
*
- * NOTE: this is probably dead code since any such comments should have been
- * in that database, not mine.
+ * NOTE: this is probably dead code since any such comments should have
+ * been in that database, not mine.
*/
DeleteComments(db_id, DatabaseRelationId, 0);
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 08480631fe..e8cdfbad88 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.139 2005/10/21 16:43:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.140 2005/11/22 18:17:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -199,9 +199,9 @@ ExplainOneQuery(Query *query, ExplainStmt *stmt, TupOutputState *tstate)
/*
* Update snapshot command ID to ensure this query sees results of any
* previously executed queries. (It's a bit cheesy to modify
- * ActiveSnapshot without making a copy, but for the limited ways in
- * which EXPLAIN can be invoked, I think it's OK, because the active
- * snapshot shouldn't be shared with anything else anyway.)
+ * ActiveSnapshot without making a copy, but for the limited ways in which
+ * EXPLAIN can be invoked, I think it's OK, because the active snapshot
+ * shouldn't be shared with anything else anyway.)
*/
ActiveSnapshot->curcid = GetCurrentCommandId();
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 0a19168179..4a425d4eb2 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.135 2005/11/07 17:36:45 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.136 2005/11/22 18:17:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -358,10 +358,10 @@ DefineIndex(RangeVar *heapRelation,
* we don't cascade the notnull constraint(s) either; but this is
* pretty debatable.
*
- * XXX: possible future improvement: when being called from ALTER TABLE,
- * it would be more efficient to merge this with the outer ALTER
- * TABLE, so as to avoid two scans. But that seems to complicate
- * DefineIndex's API unduly.
+ * XXX: possible future improvement: when being called from ALTER
+ * TABLE, it would be more efficient to merge this with the outer
+ * ALTER TABLE, so as to avoid two scans. But that seems to
+ * complicate DefineIndex's API unduly.
*/
if (cmds)
AlterTableInternal(relationId, cmds, false);
@@ -568,8 +568,8 @@ GetIndexOpClass(List *opclass, Oid attrType,
* Release 7.1 removes lztext_ops, so suppress that too for a while. tgl
* 2000/07/30
*
- * Release 7.2 renames timestamp_ops to timestamptz_ops, so suppress that too
- * for awhile. I'm starting to think we need a better approach. tgl
+ * Release 7.2 renames timestamp_ops to timestamptz_ops, so suppress that
+ * too for awhile. I'm starting to think we need a better approach. tgl
* 2000/10/01
*
* Release 8.0 removes bigbox_ops (which was dead code for a long while
diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c
index 10745032de..378421033d 100644
--- a/src/backend/commands/opclasscmds.c
+++ b/src/backend/commands/opclasscmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.39 2005/11/21 12:49:31 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.40 2005/11/22 18:17:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -59,7 +59,7 @@ static void addClassMember(List **list, OpClassMember *member, bool isProc);
static void storeOperators(Oid opclassoid, List *operators);
static void storeProcedures(Oid opclassoid, List *procedures);
static void AlterOpClassOwner_internal(Relation rel, HeapTuple tuple,
- Oid newOwnerId);
+ Oid newOwnerId);
/*
@@ -894,7 +894,7 @@ AlterOpClassOwner_oid(Oid opcOid, Oid newOwnerId)
tup = SearchSysCacheCopy(CLAOID,
ObjectIdGetDatum(opcOid),
0, 0, 0);
- if (!HeapTupleIsValid(tup)) /* shouldn't happen */
+ if (!HeapTupleIsValid(tup)) /* shouldn't happen */
elog(ERROR, "cache lookup failed for opclass %u", opcOid);
AlterOpClassOwner_internal(rel, tup, newOwnerId);
@@ -933,7 +933,7 @@ AlterOpClassOwner(List *name, const char *access_method, Oid newOwnerId)
if (schemaname)
{
- Oid namespaceOid;
+ Oid namespaceOid;
namespaceOid = LookupExplicitNamespace(schemaname);
@@ -950,7 +950,7 @@ AlterOpClassOwner(List *name, const char *access_method, Oid newOwnerId)
}
else
{
- Oid opcOid;
+ Oid opcOid;
opcOid = OpclassnameGetOpcid(amOid, opcname);
if (!OidIsValid(opcOid))
diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c
index 7a7b930ce0..c6e45798b4 100644
--- a/src/backend/commands/schemacmds.c
+++ b/src/backend/commands/schemacmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.37 2005/11/21 12:49:31 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.38 2005/11/22 18:17:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -175,7 +175,7 @@ RemoveSchema(List *names, DropBehavior behavior, bool missing_ok)
else
{
ereport(NOTICE,
- (errmsg("schema \"%s\" does not exist, skipping",
+ (errmsg("schema \"%s\" does not exist, skipping",
namespaceName)));
}
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index 201fcbf0c6..9c1efe856c 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.125 2005/10/15 02:49:15 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.126 2005/11/22 18:17:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -219,8 +219,8 @@ DefineSequence(CreateSeqStmt *seq)
/*
* Two special hacks here:
*
- * 1. Since VACUUM does not process sequences, we have to force the tuple to
- * have xmin = FrozenTransactionId now. Otherwise it would become
+ * 1. Since VACUUM does not process sequences, we have to force the tuple
+ * to have xmin = FrozenTransactionId now. Otherwise it would become
* invisible to SELECTs after 2G transactions. It is okay to do this
* because if the current transaction aborts, no other xact will ever
* examine the sequence tuple anyway.
@@ -459,10 +459,10 @@ nextval_internal(Oid relid)
* fetch count to grab SEQ_LOG_VALS more values than we actually need to
* cache. (These will then be usable without logging.)
*
- * If this is the first nextval after a checkpoint, we must force a new WAL
- * record to be written anyway, else replay starting from the checkpoint
- * would fail to advance the sequence past the logged values. In this
- * case we may as well fetch extra values.
+ * If this is the first nextval after a checkpoint, we must force a new
+ * WAL record to be written anyway, else replay starting from the
+ * checkpoint would fail to advance the sequence past the logged values.
+ * In this case we may as well fetch extra values.
*/
if (log < fetch)
{
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 09161e0d4b..43a56f5030 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.175 2005/11/21 12:49:31 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.176 2005/11/22 18:17:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -907,9 +907,9 @@ MergeAttributes(List *schema, List *supers, bool istemp,
* If default expr could contain any vars, we'd need to fix
* 'em, but it can't; so default is ready to apply to child.
*
- * If we already had a default from some prior parent, check to
- * see if they are the same. If so, no problem; if not, mark
- * the column as having a bogus default. Below, we will
+ * If we already had a default from some prior parent, check
+ * to see if they are the same. If so, no problem; if not,
+ * mark the column as having a bogus default. Below, we will
* complain if the bogus default isn't overridden by the child
* schema.
*/
@@ -1124,9 +1124,9 @@ StoreCatalogInheritance(Oid relationId, List *supers)
* Also enter dependencies on the direct ancestors, and make sure they are
* marked with relhassubclass = true.
*
- * (Once upon a time, both direct and indirect ancestors were found here and
- * then entered into pg_ipl. Since that catalog doesn't exist anymore,
- * there's no need to look for indirect ancestors.)
+ * (Once upon a time, both direct and indirect ancestors were found here
+ * and then entered into pg_ipl. Since that catalog doesn't exist
+ * anymore, there's no need to look for indirect ancestors.)
*/
relation = heap_open(InheritsRelationId, RowExclusiveLock);
desc = RelationGetDescr(relation);
@@ -1216,8 +1216,8 @@ setRelhassubclassInRelation(Oid relationId, bool relhassubclass)
/*
* Fetch a modifiable copy of the tuple, modify it, update pg_class.
*
- * If the tuple already has the right relhassubclass setting, we don't need
- * to update it, but we still need to issue an SI inval message.
+ * If the tuple already has the right relhassubclass setting, we don't
+ * need to update it, but we still need to issue an SI inval message.
*/
relationRelation = heap_open(RelationRelationId, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
@@ -1301,8 +1301,8 @@ renameatt(Oid myrelid,
* attribute in all classes that inherit from 'relname' (as well as in
* 'relname').
*
- * any permissions or problems with duplicate attributes will cause the whole
- * transaction to abort, which is what we want -- all or nothing.
+ * any permissions or problems with duplicate attributes will cause the
+ * whole transaction to abort, which is what we want -- all or nothing.
*/
if (recurse)
{
@@ -1632,8 +1632,8 @@ update_ri_trigger_args(Oid relid,
/*
* It is an RI trigger, so parse the tgargs bytea.
*
- * NB: we assume the field will never be compressed or moved out of line;
- * so does trigger.c ...
+ * NB: we assume the field will never be compressed or moved out of
+ * line; so does trigger.c ...
*/
tgnargs = pg_trigger->tgnargs;
val = (bytea *)
@@ -2392,9 +2392,9 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap)
* If we need to rewrite the table, the operation has to be propagated to
* tables that use this table's rowtype as a column type.
*
- * (Eventually this will probably become true for scans as well, but at the
- * moment a composite type does not enforce any constraints, so it's not
- * necessary/appropriate to enforce them just during ALTER.)
+ * (Eventually this will probably become true for scans as well, but at
+ * the moment a composite type does not enforce any constraints, so it's
+ * not necessary/appropriate to enforce them just during ALTER.)
*/
if (newrel)
find_composite_type_dependencies(oldrel->rd_rel->reltype,
@@ -2836,9 +2836,9 @@ ATPrepAddColumn(List **wqueue, Relation rel, bool recurse,
/*
* Recurse to add the column to child classes, if requested.
*
- * We must recurse one level at a time, so that multiply-inheriting children
- * are visited the right number of times and end up with the right
- * attinhcount.
+ * We must recurse one level at a time, so that multiply-inheriting
+ * children are visited the right number of times and end up with the
+ * right attinhcount.
*/
if (recurse)
{
@@ -3038,8 +3038,8 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel,
/*
* Tell Phase 3 to fill in the default expression, if there is one.
*
- * If there is no default, Phase 3 doesn't have to do anything, because that
- * effectively means that the default is NULL. The heap tuple access
+ * If there is no default, Phase 3 doesn't have to do anything, because
+ * that effectively means that the default is NULL. The heap tuple access
* routines always check for attnum > # of attributes in tuple, and return
* NULL if so, so without any modification of the tuple data we will get
* the effect of NULL values in the new column.
@@ -3832,8 +3832,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
/*
* Validity and permissions checks
*
- * Note: REFERENCES permissions checks are redundant with CREATE TRIGGER, but
- * we may as well error out sooner instead of later.
+ * Note: REFERENCES permissions checks are redundant with CREATE TRIGGER,
+ * but we may as well error out sooner instead of later.
*/
if (pkrel->rd_rel->relkind != RELKIND_RELATION)
ereport(ERROR,
@@ -3931,9 +3931,9 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
* pktypoid[i] is the primary key table's i'th key's type fktypoid[i]
* is the foreign key table's i'th key's type
*
- * Note that we look for an operator with the PK type on the left; when
- * the types are different this is critical because the PK index will
- * need operators with the indexkey on the left. (Ordinarily both
+ * Note that we look for an operator with the PK type on the left;
+ * when the types are different this is critical because the PK index
+ * will need operators with the indexkey on the left. (Ordinarily both
* commutator operators will exist if either does, but we won't get
* the right answer from the test below on opclass membership unless
* we select the proper operator.)
@@ -4861,10 +4861,10 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
* the column type, because build_column_default itself will try to
* coerce, and will not issue the error message we want if it fails.)
*
- * We remove any implicit coercion steps at the top level of the old default
- * expression; this has been agreed to satisfy the principle of least
- * surprise. (The conversion to the new column type should act like it
- * started from what the user sees as the stored expression, and the
+ * We remove any implicit coercion steps at the top level of the old
+ * default expression; this has been agreed to satisfy the principle of
+ * least surprise. (The conversion to the new column type should act like
+ * it started from what the user sees as the stored expression, and the
* implicit coercions aren't going to be shown.)
*/
if (attTup->atthasdef)
@@ -4895,8 +4895,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
* the info before executing ALTER TYPE, though, else the deparser will
* get confused.
*
- * There could be multiple entries for the same object, so we must check to
- * ensure we process each one only once. Note: we assume that an index
+ * There could be multiple entries for the same object, so we must check
+ * to ensure we process each one only once. Note: we assume that an index
* that implements a constraint will not show a direct dependency on the
* column.
*/
@@ -5781,9 +5781,9 @@ copy_relation_data(Relation rel, SMgrRelation dst)
* to commit the transaction. (For a temp rel we don't care since the rel
* will be uninteresting after a crash anyway.)
*
- * It's obvious that we must do this when not WAL-logging the copy. It's less
- * obvious that we have to do it even if we did WAL-log the copied pages.
- * The reason is that since we're copying outside shared buffers, a
+ * It's obvious that we must do this when not WAL-logging the copy. It's
+ * less obvious that we have to do it even if we did WAL-log the copied
+ * pages. The reason is that since we're copying outside shared buffers, a
* CHECKPOINT occurring during the copy has no way to flush the previously
* written data to disk (indeed it won't know the new rel even exists). A
* crash later on would replay WAL from the checkpoint, therefore it
@@ -5841,12 +5841,12 @@ AlterTableCreateToastTable(Oid relOid, bool silent)
/*
* Toast table is shared if and only if its parent is.
*
- * We cannot allow toasting a shared relation after initdb (because there's
- * no way to mark it toasted in other databases' pg_class). Unfortunately
- * we can't distinguish initdb from a manually started standalone backend
- * (toasting happens after the bootstrap phase, so checking
- * IsBootstrapProcessingMode() won't work). However, we can at least
- * prevent this mistake under normal multi-user operation.
+ * We cannot allow toasting a shared relation after initdb (because
+ * there's no way to mark it toasted in other databases' pg_class).
+ * Unfortunately we can't distinguish initdb from a manually started
+ * standalone backend (toasting happens after the bootstrap phase, so
+ * checking IsBootstrapProcessingMode() won't work). However, we can at
+ * least prevent this mistake under normal multi-user operation.
*/
shared_relation = rel->rd_rel->relisshared;
if (shared_relation && IsUnderPostmaster)
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 81463abba8..e9ac17d148 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.196 2005/11/20 19:49:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.197 2005/11/22 18:17:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -566,8 +566,8 @@ RemoveTriggerById(Oid trigOid)
* (and this one too!) are sent SI message to make them rebuild relcache
* entries.
*
- * Note this is OK only because we have AccessExclusiveLock on the rel, so no
- * one else is creating/deleting triggers on this rel at the same time.
+ * Note this is OK only because we have AccessExclusiveLock on the rel, so
+ * no one else is creating/deleting triggers on this rel at the same time.
*/
pgrel = heap_open(RelationRelationId, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
@@ -1182,8 +1182,8 @@ equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
* we have the same triggers with the same types, the derived index data
* should match.
*
- * As of 7.3 we assume trigger set ordering is significant in the comparison;
- * so we just compare corresponding slots of the two sets.
+ * As of 7.3 we assume trigger set ordering is significant in the
+ * comparison; so we just compare corresponding slots of the two sets.
*/
if (trigdesc1 != NULL)
{
@@ -2533,13 +2533,14 @@ AfterTriggerEndQuery(EState *estate)
* Process all immediate-mode triggers queued by the query, and move the
* deferred ones to the main list of deferred events.
*
- * Notice that we decide which ones will be fired, and put the deferred ones
- * on the main list, before anything is actually fired. This ensures
+ * Notice that we decide which ones will be fired, and put the deferred
+ * ones on the main list, before anything is actually fired. This ensures
* reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
* IMMEDIATE: all events we have decided to defer will be available for it
* to fire.
*
- * If we find no firable events, we don't have to increment firing_counter.
+ * If we find no firable events, we don't have to increment
+ * firing_counter.
*/
events = &afterTriggers->query_stack[afterTriggers->query_depth];
if (afterTriggerMarkEvents(events, &afterTriggers->events, true))
@@ -3026,8 +3027,8 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
* list of previously deferred events to fire any that have now become
* immediate.
*
- * Obviously, if this was SET ... DEFERRED then it can't have converted any
- * unfired events to immediate, so we need do nothing in that case.
+ * Obviously, if this was SET ... DEFERRED then it can't have converted
+ * any unfired events to immediate, so we need do nothing in that case.
*/
if (!stmt->deferred)
{
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index 30044f10bf..389fe133a5 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.84 2005/11/21 12:49:31 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.85 2005/11/22 18:17:09 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -425,7 +425,7 @@ RemoveType(List *names, DropBehavior behavior, bool missing_ok)
else
{
ereport(NOTICE,
- (errmsg("type \"%s\" does not exist, skipping",
+ (errmsg("type \"%s\" does not exist, skipping",
TypeNameToString(typename))));
}
@@ -820,7 +820,7 @@ RemoveDomain(List *names, DropBehavior behavior, bool missing_ok)
else
{
ereport(NOTICE,
- (errmsg("type \"%s\" does not exist, skipping",
+ (errmsg("type \"%s\" does not exist, skipping",
TypeNameToString(typename))));
}
@@ -879,8 +879,8 @@ findTypeInputFunction(List *procname, Oid typeOid)
* Input functions can take a single argument of type CSTRING, or three
* arguments (string, typioparam OID, typmod).
*
- * For backwards compatibility we allow OPAQUE in place of CSTRING; if we see
- * this, we issue a warning and fix up the pg_proc entry.
+ * For backwards compatibility we allow OPAQUE in place of CSTRING; if we
+ * see this, we issue a warning and fix up the pg_proc entry.
*/
argList[0] = CSTRINGOID;
@@ -1864,8 +1864,8 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
/*
* Deparse it to produce text for consrc.
*
- * Since VARNOs aren't allowed in domain constraints, relation context isn't
- * required as anything other than a shell.
+ * Since VARNOs aren't allowed in domain constraints, relation context
+ * isn't required as anything other than a shell.
*/
ccsrc = deparse_expression(expr,
deparse_context_for(domainName,
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index 9ac3c8a97e..381ebe24ad 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.165 2005/11/21 12:49:31 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.166 2005/11/22 18:17:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1124,15 +1124,15 @@ GrantRole(GrantRoleStmt *stmt)
* Drop the objects owned by a given list of roles.
*/
void
-DropOwnedObjects(DropOwnedStmt *stmt)
+DropOwnedObjects(DropOwnedStmt * stmt)
{
- List *role_ids = roleNamesToIds(stmt->roles);
- ListCell *cell;
+ List *role_ids = roleNamesToIds(stmt->roles);
+ ListCell *cell;
/* Check privileges */
- foreach (cell, role_ids)
+ foreach(cell, role_ids)
{
- Oid roleid = lfirst_oid(cell);
+ Oid roleid = lfirst_oid(cell);
if (!has_privs_of_role(GetUserId(), roleid))
ereport(ERROR,
@@ -1150,16 +1150,16 @@ DropOwnedObjects(DropOwnedStmt *stmt)
* Give the objects owned by a given list of roles away to another user.
*/
void
-ReassignOwnedObjects(ReassignOwnedStmt *stmt)
+ReassignOwnedObjects(ReassignOwnedStmt * stmt)
{
List *role_ids = roleNamesToIds(stmt->roles);
ListCell *cell;
Oid newrole;
/* Check privileges */
- foreach (cell, role_ids)
+ foreach(cell, role_ids)
{
- Oid roleid = lfirst_oid(cell);
+ Oid roleid = lfirst_oid(cell);
if (!has_privs_of_role(GetUserId(), roleid))
ereport(ERROR,
@@ -1171,9 +1171,9 @@ ReassignOwnedObjects(ReassignOwnedStmt *stmt)
newrole = get_roleid_checked(stmt->newrole);
if (!has_privs_of_role(GetUserId(), newrole))
- ereport(ERROR,
- (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied to reassign objects")));
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("permission denied to reassign objects")));
/* Ok, do it */
shdepReassignOwned(role_ids, newrole);
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index af384cebcd..0526d67aae 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.318 2005/11/20 19:49:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.319 2005/11/22 18:17:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -313,8 +313,8 @@ vacuum(VacuumStmt *vacstmt, List *relids)
* compared to telling people to use two operations. See pgsql-hackers
* discussion of 27-Nov-2004, and comments below for update_hint_bits().
*
- * Note: this is enforced here, and not in the grammar, since (a) we can give
- * a better error message, and (b) we might want to allow it again
+ * Note: this is enforced here, and not in the grammar, since (a) we can
+ * give a better error message, and (b) we might want to allow it again
* someday.
*/
if (vacstmt->vacuum && vacstmt->full && vacstmt->freeze)
@@ -332,8 +332,8 @@ vacuum(VacuumStmt *vacstmt, List *relids)
/*
* Create special memory context for cross-transaction storage.
*
- * Since it is a child of PortalContext, it will go away eventually even if
- * we suffer an error; there's no need for special abort cleanup logic.
+ * Since it is a child of PortalContext, it will go away eventually even
+ * if we suffer an error; there's no need for special abort cleanup logic.
*/
vac_context = AllocSetContextCreate(PortalContext,
"Vacuum",
@@ -355,14 +355,14 @@ vacuum(VacuumStmt *vacstmt, List *relids)
/*
* It's a database-wide VACUUM.
*
- * Compute the initially applicable OldestXmin and FreezeLimit XIDs, so
- * that we can record these values at the end of the VACUUM. Note that
- * individual tables may well be processed with newer values, but we
- * can guarantee that no (non-shared) relations are processed with
+ * Compute the initially applicable OldestXmin and FreezeLimit XIDs,
+ * so that we can record these values at the end of the VACUUM. Note
+ * that individual tables may well be processed with newer values, but
+ * we can guarantee that no (non-shared) relations are processed with
* older ones.
*
- * It is okay to record non-shared values in pg_database, even though we
- * may vacuum shared relations with older cutoffs, because only the
+ * It is okay to record non-shared values in pg_database, even though
+ * we may vacuum shared relations with older cutoffs, because only the
* minimum of the values present in pg_database matters. We can be
* sure that shared relations have at some time been vacuumed with
* cutoffs no worse than the global minimum; for, if there is a
@@ -379,8 +379,8 @@ vacuum(VacuumStmt *vacstmt, List *relids)
/*
* Decide whether we need to start/commit our own transactions.
*
- * For VACUUM (with or without ANALYZE): always do so, so that we can release
- * locks as soon as possible. (We could possibly use the outer
+ * For VACUUM (with or without ANALYZE): always do so, so that we can
+ * release locks as soon as possible. (We could possibly use the outer
* transaction for a one-table VACUUM, but handling TOAST tables would be
* problematic.)
*
@@ -981,21 +981,20 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
/*
* Determine the type of lock we want --- hard exclusive lock for a FULL
- * vacuum, but just ShareUpdateExclusiveLock for concurrent vacuum.
- * Either way, we can be sure that no other backend is vacuuming the same
- * table.
+ * vacuum, but just ShareUpdateExclusiveLock for concurrent vacuum. Either
+ * way, we can be sure that no other backend is vacuuming the same table.
*/
lmode = vacstmt->full ? AccessExclusiveLock : ShareUpdateExclusiveLock;
/*
* Open the class, get an appropriate lock on it, and check permissions.
*
- * We allow the user to vacuum a table if he is superuser, the table owner,
- * or the database owner (but in the latter case, only if it's not a
- * shared relation). pg_class_ownercheck includes the superuser case.
+ * We allow the user to vacuum a table if he is superuser, the table
+ * owner, or the database owner (but in the latter case, only if it's not
+ * a shared relation). pg_class_ownercheck includes the superuser case.
*
- * Note we choose to treat permissions failure as a WARNING and keep trying
- * to vacuum the rest of the DB --- is this appropriate?
+ * Note we choose to treat permissions failure as a WARNING and keep
+ * trying to vacuum the rest of the DB --- is this appropriate?
*/
onerel = relation_open(relid, lmode);
@@ -1660,8 +1659,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* find a page we cannot completely empty (this last condition is handled
* by "break" statements within the loop).
*
- * NB: this code depends on the vacuum_pages and fraged_pages lists being in
- * order by blkno.
+ * NB: this code depends on the vacuum_pages and fraged_pages lists being
+ * in order by blkno.
*/
nblocks = vacrelstats->rel_pages;
for (blkno = nblocks - vacuum_pages->empty_end_pages - 1;
@@ -1684,9 +1683,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* since we stop the outer loop at last_move_dest_block, pages removed
* here cannot have had anything moved onto them already.
*
- * Also note that we don't change the stored fraged_pages list, only our
- * local variable num_fraged_pages; so the forgotten pages are still
- * available to be loaded into the free space map later.
+ * Also note that we don't change the stored fraged_pages list, only
+ * our local variable num_fraged_pages; so the forgotten pages are
+ * still available to be loaded into the free space map later.
*/
while (num_fraged_pages > 0 &&
fraged_pages->pagedesc[num_fraged_pages - 1]->blkno >= blkno)
@@ -1839,17 +1838,17 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* --- it must be recently obsoleted, else scan_heap would have
* deemed it removable.)
*
- * NOTE: this test is not 100% accurate: it is possible for a tuple
- * to be an updated one with recent xmin, and yet not match any
- * new_tid entry in the vtlinks list. Presumably there was once a
- * parent tuple with xmax matching the xmin, but it's possible
- * that that tuple has been removed --- for example, if it had
- * xmin = xmax and wasn't itself an updated version, then
+ * NOTE: this test is not 100% accurate: it is possible for a
+ * tuple to be an updated one with recent xmin, and yet not match
+ * any new_tid entry in the vtlinks list. Presumably there was
+ * once a parent tuple with xmax matching the xmin, but it's
+ * possible that that tuple has been removed --- for example, if
+ * it had xmin = xmax and wasn't itself an updated version, then
* HeapTupleSatisfiesVacuum would deem it removable as soon as the
* xmin xact completes.
*
- * To be on the safe side, we abandon the repair_frag process if we
- * cannot find the parent tuple in vtlinks. This may be overly
+ * To be on the safe side, we abandon the repair_frag process if
+ * we cannot find the parent tuple in vtlinks. This may be overly
* conservative; AFAICS it would be safe to move the chain.
*/
if (((tuple.t_data->t_infomask & HEAP_UPDATED) &&
@@ -2388,8 +2387,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* Clean moved-off tuples from last page in Nvacpagelist list.
*
- * We need only do this in this one page, because higher-numbered pages
- * are going to be truncated from the relation entirely. But see
+ * We need only do this in this one page, because higher-numbered
+ * pages are going to be truncated from the relation entirely. But see
* comments for update_hint_bits().
*/
if (vacpage->blkno == (blkno - 1) &&
@@ -2544,8 +2543,8 @@ move_chain_tuple(Relation rel,
* Therefore we must do everything that uses old_tup->t_data BEFORE this
* step!!
*
- * This path is different from the other callers of vacuum_page, because we
- * have already incremented the vacpage's offsets_used field to account
+ * This path is different from the other callers of vacuum_page, because
+ * we have already incremented the vacpage's offsets_used field to account
* for the tuple(s) we expect to move onto the page. Therefore
* vacuum_page's check for offsets_used == 0 is wrong. But since that's a
* good debugging check for all other callers, we work around it here
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index d0b1d3a6fb..fbdb019b35 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -31,7 +31,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.62 2005/11/20 19:49:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.63 2005/11/22 18:17:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -286,21 +286,21 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* relation but crashes before initializing the page. Reclaim such
* pages for use.
*
- * We have to be careful here because we could be looking at a page
- * that someone has just added to the relation and not yet been
- * able to initialize (see RelationGetBufferForTuple). To
+ * We have to be careful here because we could be looking at a
+ * page that someone has just added to the relation and not yet
+ * been able to initialize (see RelationGetBufferForTuple). To
* interlock against that, release the buffer read lock (which we
* must do anyway) and grab the relation extension lock before
* re-locking in exclusive mode. If the page is still
* uninitialized by then, it must be left over from a crashed
* backend, and we can initialize it.
*
- * We don't really need the relation lock when this is a new or temp
- * relation, but it's probably not worth the code space to check
- * that, since this surely isn't a critical path.
+ * We don't really need the relation lock when this is a new or
+ * temp relation, but it's probably not worth the code space to
+ * check that, since this surely isn't a critical path.
*
- * Note: the comparable code in vacuum.c need not worry because it's
- * got exclusive lock on the whole relation.
+ * Note: the comparable code in vacuum.c need not worry because
+ * it's got exclusive lock on the whole relation.
*/
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockRelationForExtension(onerel, ExclusiveLock);
@@ -366,12 +366,12 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* Tuple is good. Consider whether to replace its xmin
* value with FrozenTransactionId.
*
- * NB: Since we hold only a shared buffer lock here, we are
- * assuming that TransactionId read/write is atomic. This
- * is not the only place that makes such an assumption.
- * It'd be possible to avoid the assumption by momentarily
- * acquiring exclusive lock, but for the moment I see no
- * need to.
+ * NB: Since we hold only a shared buffer lock here, we
+ * are assuming that TransactionId read/write is atomic.
+ * This is not the only place that makes such an
+ * assumption. It'd be possible to avoid the assumption by
+ * momentarily acquiring exclusive lock, but for the
+ * moment I see no need to.
*/
if (TransactionIdIsNormal(HeapTupleHeaderGetXmin(tuple.t_data)) &&
TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index 31113fffe2..38a10bd605 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.114 2005/10/15 02:49:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.115 2005/11/22 18:17:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -134,8 +134,8 @@ assign_datestyle(const char *value, bool doit, GucSource source)
* Easiest way to get the current DEFAULT state is to fetch the
* DEFAULT string from guc.c and recursively parse it.
*
- * We can't simply "return assign_datestyle(...)" because we need to
- * handle constructs like "DEFAULT, ISO".
+ * We can't simply "return assign_datestyle(...)" because we need
+ * to handle constructs like "DEFAULT, ISO".
*/
int saveDateStyle = DateStyle;
int saveDateOrder = DateOrder;
@@ -339,8 +339,8 @@ assign_timezone(const char *value, bool doit, GucSource source)
* timezone setting, we will return that name rather than UNKNOWN
* as the canonical spelling.
*
- * During GUC initialization, since the timezone library isn't set up
- * yet, pg_get_timezone_name will return NULL and we will leave
+ * During GUC initialization, since the timezone library isn't set
+ * up yet, pg_get_timezone_name will return NULL and we will leave
* the setting as UNKNOWN. If this isn't overridden from the
* config file then pg_timezone_initialize() will eventually
* select a default value from the environment.
diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
index 54030452f8..dfa1494556 100644
--- a/src/backend/commands/view.c
+++ b/src/backend/commands/view.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.91 2005/10/15 02:49:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.92 2005/11/22 18:17:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -410,7 +410,8 @@ DefineView(RangeVar *view, Query *viewParse, bool replace)
/*
* Create the view relation
*
- * NOTE: if it already exists and replace is false, the xact will be aborted.
+ * NOTE: if it already exists and replace is false, the xact will be
+ * aborted.
*/
viewOid = DefineVirtualRelation(view, viewParse->targetList, replace);