summaryrefslogtreecommitdiff
path: root/src/backend/access
diff options
context:
space:
mode:
authorBruce Momjian <bruce@momjian.us>2009-06-11 14:49:15 +0000
committerBruce Momjian <bruce@momjian.us>2009-06-11 14:49:15 +0000
commitd7471402794266078953f1bd113dab4913d631a1 (patch)
tree618e392a84eaf837e00bf78f8694097b78fec227 /src/backend/access
parent4e86efb4e51b66ef57b3fe6f28576de23a1bf1c6 (diff)
downloadpostgresql-d7471402794266078953f1bd113dab4913d631a1.tar.gz
8.4 pgindent run, with new combined Linux/FreeBSD/MinGW typedef list
provided by Andrew.
Diffstat (limited to 'src/backend/access')
-rw-r--r--src/backend/access/common/heaptuple.c8
-rw-r--r--src/backend/access/common/indextuple.c6
-rw-r--r--src/backend/access/common/printtup.c4
-rw-r--r--src/backend/access/common/reloptions.c168
-rw-r--r--src/backend/access/common/tupdesc.c6
-rw-r--r--src/backend/access/gin/ginarrayproc.c3
-rw-r--r--src/backend/access/gin/ginbulk.c12
-rw-r--r--src/backend/access/gin/gindatapage.c4
-rw-r--r--src/backend/access/gin/ginentrypage.c36
-rw-r--r--src/backend/access/gin/ginfast.c339
-rw-r--r--src/backend/access/gin/ginget.c485
-rw-r--r--src/backend/access/gin/gininsert.c51
-rw-r--r--src/backend/access/gin/ginscan.c28
-rw-r--r--src/backend/access/gin/ginutil.c86
-rw-r--r--src/backend/access/gin/ginvacuum.c14
-rw-r--r--src/backend/access/gin/ginxlog.c87
-rw-r--r--src/backend/access/gist/gistget.c58
-rw-r--r--src/backend/access/gist/gistproc.c17
-rw-r--r--src/backend/access/gist/gistscan.c10
-rw-r--r--src/backend/access/gist/gistsplit.c44
-rw-r--r--src/backend/access/gist/gistutil.c5
-rw-r--r--src/backend/access/gist/gistvacuum.c4
-rw-r--r--src/backend/access/hash/hash.c18
-rw-r--r--src/backend/access/hash/hashfunc.c70
-rw-r--r--src/backend/access/hash/hashpage.c22
-rw-r--r--src/backend/access/hash/hashscan.c6
-rw-r--r--src/backend/access/hash/hashsearch.c14
-rw-r--r--src/backend/access/hash/hashsort.c12
-rw-r--r--src/backend/access/hash/hashutil.c44
-rw-r--r--src/backend/access/heap/heapam.c74
-rw-r--r--src/backend/access/heap/hio.c10
-rw-r--r--src/backend/access/heap/pruneheap.c57
-rw-r--r--src/backend/access/heap/rewriteheap.c4
-rw-r--r--src/backend/access/heap/tuptoaster.c22
-rw-r--r--src/backend/access/heap/visibilitymap.c50
-rw-r--r--src/backend/access/index/genam.c19
-rw-r--r--src/backend/access/index/indexam.c10
-rw-r--r--src/backend/access/nbtree/nbtinsert.c6
-rw-r--r--src/backend/access/nbtree/nbtree.c9
-rw-r--r--src/backend/access/nbtree/nbtxlog.c8
-rw-r--r--src/backend/access/transam/clog.c53
-rw-r--r--src/backend/access/transam/twophase.c4
-rw-r--r--src/backend/access/transam/xact.c107
-rw-r--r--src/backend/access/transam/xlog.c239
-rw-r--r--src/backend/access/transam/xlogutils.c44
45 files changed, 1210 insertions, 1167 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 6499549177..ac5749c713 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -50,7 +50,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.126 2009/03/30 04:08:43 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.127 2009/06/11 14:48:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -388,7 +388,7 @@ nocachegetattr(HeapTuple tuple,
* Now check to see if any preceding bits are null...
*/
{
- int byte = attnum >> 3;
+ int byte = attnum >> 3;
int finalbit = attnum & 0x07;
/* check for nulls "before" final bit of last byte */
@@ -1183,7 +1183,7 @@ slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull)
{
if (tuple == NULL) /* internal error */
elog(ERROR, "cannot extract system attribute from virtual tuple");
- if (tuple == &(slot->tts_minhdr)) /* internal error */
+ if (tuple == &(slot->tts_minhdr)) /* internal error */
elog(ERROR, "cannot extract system attribute from minimal tuple");
return heap_getsysattr(tuple, attnum, tupleDesc, isnull);
}
@@ -1369,7 +1369,7 @@ slot_attisnull(TupleTableSlot *slot, int attnum)
{
if (tuple == NULL) /* internal error */
elog(ERROR, "cannot extract system attribute from virtual tuple");
- if (tuple == &(slot->tts_minhdr)) /* internal error */
+ if (tuple == &(slot->tts_minhdr)) /* internal error */
elog(ERROR, "cannot extract system attribute from minimal tuple");
return heap_attisnull(tuple, attnum);
}
diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c
index 839d242913..cd3b88654b 100644
--- a/src/backend/access/common/indextuple.c
+++ b/src/backend/access/common/indextuple.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/indextuple.c,v 1.87 2009/01/01 17:23:34 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/indextuple.c,v 1.88 2009/06/11 14:48:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -86,7 +86,7 @@ index_form_tuple(TupleDesc tupleDescriptor,
* try to compress it in-line.
*/
if (!VARATT_IS_EXTENDED(DatumGetPointer(untoasted_values[i])) &&
- VARSIZE(DatumGetPointer(untoasted_values[i])) > TOAST_INDEX_TARGET &&
+ VARSIZE(DatumGetPointer(untoasted_values[i])) > TOAST_INDEX_TARGET &&
(att->attstorage == 'x' || att->attstorage == 'm'))
{
Datum cvalue = toast_compress_datum(untoasted_values[i]);
@@ -270,7 +270,7 @@ nocache_index_getattr(IndexTuple tup,
* Now check to see if any preceding bits are null...
*/
{
- int byte = attnum >> 3;
+ int byte = attnum >> 3;
int finalbit = attnum & 0x07;
/* check for nulls "before" final bit of last byte */
diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c
index 324c978132..13a09dd9b3 100644
--- a/src/backend/access/common/printtup.c
+++ b/src/backend/access/common/printtup.c
@@ -9,7 +9,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/printtup.c,v 1.104 2009/01/01 17:23:34 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/printtup.c,v 1.105 2009/06/11 14:48:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -71,7 +71,7 @@ printtup_create_DR(CommandDest dest)
{
DR_printtup *self = (DR_printtup *) palloc0(sizeof(DR_printtup));
- self->pub.receiveSlot = printtup; /* might get changed later */
+ self->pub.receiveSlot = printtup; /* might get changed later */
self->pub.rStartup = printtup_startup;
self->pub.rShutdown = printtup_shutdown;
self->pub.rDestroy = printtup_destroy;
diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index 2668fd8bfe..b970601b1c 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/reloptions.c,v 1.27 2009/05/24 22:22:44 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/reloptions.c,v 1.28 2009/06/11 14:48:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -65,7 +65,7 @@ static relopt_bool boolRelOpts[] =
true
},
/* list terminator */
- { { NULL } }
+ {{NULL}}
};
static relopt_int intRelOpts[] =
@@ -158,7 +158,7 @@ static relopt_int intRelOpts[] =
}, 150000000, 0, 2000000000
},
/* list terminator */
- { { NULL } }
+ {{NULL}}
};
static relopt_real realRelOpts[] =
@@ -180,21 +180,21 @@ static relopt_real realRelOpts[] =
0.1, 0.0, 100.0
},
/* list terminator */
- { { NULL } }
+ {{NULL}}
};
static relopt_string stringRelOpts[] =
{
/* list terminator */
- { { NULL } }
+ {{NULL}}
};
static relopt_gen **relOpts = NULL;
static bits32 last_assigned_kind = RELOPT_KIND_LAST_DEFAULT;
-static int num_custom_options = 0;
+static int num_custom_options = 0;
static relopt_gen **custom_options = NULL;
-static bool need_initialization = true;
+static bool need_initialization = true;
static void initialize_reloptions(void);
static void parse_one_reloption(relopt_value *option, char *text_str,
@@ -202,15 +202,15 @@ static void parse_one_reloption(relopt_value *option, char *text_str,
/*
* initialize_reloptions
- * initialization routine, must be called before parsing
+ * initialization routine, must be called before parsing
*
* Initialize the relOpts array and fill each variable's type and name length.
*/
static void
initialize_reloptions(void)
{
- int i;
- int j = 0;
+ int i;
+ int j = 0;
for (i = 0; boolRelOpts[i].gen.name; i++)
j++;
@@ -272,8 +272,8 @@ initialize_reloptions(void)
/*
* add_reloption_kind
- * Create a new relopt_kind value, to be used in custom reloptions by
- * user-defined AMs.
+ * Create a new relopt_kind value, to be used in custom reloptions by
+ * user-defined AMs.
*/
relopt_kind
add_reloption_kind(void)
@@ -282,24 +282,24 @@ add_reloption_kind(void)
if (last_assigned_kind >= RELOPT_KIND_MAX)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("user-defined relation parameter types limit exceeded")));
+ errmsg("user-defined relation parameter types limit exceeded")));
last_assigned_kind <<= 1;
return (relopt_kind) last_assigned_kind;
}
/*
* add_reloption
- * Add an already-created custom reloption to the list, and recompute the
- * main parser table.
+ * Add an already-created custom reloption to the list, and recompute the
+ * main parser table.
*/
static void
add_reloption(relopt_gen *newoption)
{
- static int max_custom_options = 0;
+ static int max_custom_options = 0;
if (num_custom_options >= max_custom_options)
{
- MemoryContext oldcxt;
+ MemoryContext oldcxt;
oldcxt = MemoryContextSwitchTo(TopMemoryContext);
@@ -312,7 +312,7 @@ add_reloption(relopt_gen *newoption)
{
max_custom_options *= 2;
custom_options = repalloc(custom_options,
- max_custom_options * sizeof(relopt_gen *));
+ max_custom_options * sizeof(relopt_gen *));
}
MemoryContextSwitchTo(oldcxt);
}
@@ -323,15 +323,15 @@ add_reloption(relopt_gen *newoption)
/*
* allocate_reloption
- * Allocate a new reloption and initialize the type-agnostic fields
- * (for types other than string)
+ * Allocate a new reloption and initialize the type-agnostic fields
+ * (for types other than string)
*/
static relopt_gen *
allocate_reloption(bits32 kinds, int type, char *name, char *desc)
{
- MemoryContext oldcxt;
- size_t size;
- relopt_gen *newoption;
+ MemoryContext oldcxt;
+ size_t size;
+ relopt_gen *newoption;
Assert(type != RELOPT_TYPE_STRING);
@@ -350,7 +350,7 @@ allocate_reloption(bits32 kinds, int type, char *name, char *desc)
break;
default:
elog(ERROR, "unsupported option type");
- return NULL; /* keep compiler quiet */
+ return NULL; /* keep compiler quiet */
}
newoption = palloc(size);
@@ -371,12 +371,12 @@ allocate_reloption(bits32 kinds, int type, char *name, char *desc)
/*
* add_bool_reloption
- * Add a new boolean reloption
+ * Add a new boolean reloption
*/
void
add_bool_reloption(bits32 kinds, char *name, char *desc, bool default_val)
{
- relopt_bool *newoption;
+ relopt_bool *newoption;
newoption = (relopt_bool *) allocate_reloption(kinds, RELOPT_TYPE_BOOL,
name, desc);
@@ -387,13 +387,13 @@ add_bool_reloption(bits32 kinds, char *name, char *desc, bool default_val)
/*
* add_int_reloption
- * Add a new integer reloption
+ * Add a new integer reloption
*/
void
add_int_reloption(bits32 kinds, char *name, char *desc, int default_val,
int min_val, int max_val)
{
- relopt_int *newoption;
+ relopt_int *newoption;
newoption = (relopt_int *) allocate_reloption(kinds, RELOPT_TYPE_INT,
name, desc);
@@ -406,13 +406,13 @@ add_int_reloption(bits32 kinds, char *name, char *desc, int default_val,
/*
* add_real_reloption
- * Add a new float reloption
+ * Add a new float reloption
*/
void
add_real_reloption(bits32 kinds, char *name, char *desc, double default_val,
- double min_val, double max_val)
+ double min_val, double max_val)
{
- relopt_real *newoption;
+ relopt_real *newoption;
newoption = (relopt_real *) allocate_reloption(kinds, RELOPT_TYPE_REAL,
name, desc);
@@ -428,7 +428,7 @@ add_real_reloption(bits32 kinds, char *name, char *desc, double default_val,
* Add a new string reloption
*
* "validator" is an optional function pointer that can be used to test the
- * validity of the values. It must elog(ERROR) when the argument string is
+ * validity of the values. It must elog(ERROR) when the argument string is
* not acceptable for the variable. Note that the default value must pass
* the validation.
*/
@@ -436,9 +436,9 @@ void
add_string_reloption(bits32 kinds, char *name, char *desc, char *default_val,
validate_string_relopt validator)
{
- MemoryContext oldcxt;
- relopt_string *newoption;
- int default_len = 0;
+ MemoryContext oldcxt;
+ relopt_string *newoption;
+ int default_len = 0;
oldcxt = MemoryContextSwitchTo(TopMemoryContext);
@@ -495,7 +495,7 @@ add_string_reloption(bits32 kinds, char *name, char *desc, char *default_val,
* Note that this is not responsible for determining whether the options
* are valid, but it does check that namespaces for all the options given are
* listed in validnsps. The NULL namespace is always valid and needs not be
- * explicitely listed. Passing a NULL pointer means that only the NULL
+ * explicitely listed. Passing a NULL pointer means that only the NULL
* namespace is valid.
*
* Both oldOptions and the result are text arrays (or NULL for "default"),
@@ -538,7 +538,7 @@ transformRelOptions(Datum oldOptions, List *defList, char *namspace,
/* Search for a match in defList */
foreach(cell, defList)
{
- DefElem *def = (DefElem *) lfirst(cell);
+ DefElem *def = (DefElem *) lfirst(cell);
int kw_len;
/* ignore if not in the same namespace */
@@ -574,7 +574,7 @@ transformRelOptions(Datum oldOptions, List *defList, char *namspace,
*/
foreach(cell, defList)
{
- DefElem *def = (DefElem *) lfirst(cell);
+ DefElem *def = (DefElem *) lfirst(cell);
if (isReset)
{
@@ -590,13 +590,13 @@ transformRelOptions(Datum oldOptions, List *defList, char *namspace,
Size len;
/*
- * Error out if the namespace is not valid. A NULL namespace
- * is always valid.
+ * Error out if the namespace is not valid. A NULL namespace is
+ * always valid.
*/
if (def->defnamespace != NULL)
{
- bool valid = false;
- int i;
+ bool valid = false;
+ int i;
if (validnsps)
{
@@ -719,10 +719,10 @@ untransformRelOptions(Datum options)
bytea *
extractRelOptions(HeapTuple tuple, TupleDesc tupdesc, Oid amoptions)
{
- bytea *options;
- bool isnull;
- Datum datum;
- Form_pg_class classForm;
+ bytea *options;
+ bool isnull;
+ Datum datum;
+ Form_pg_class classForm;
datum = fastgetattr(tuple,
Anum_pg_class_reloptions,
@@ -768,7 +768,7 @@ extractRelOptions(HeapTuple tuple, TupleDesc tupdesc, Oid amoptions)
* is returned.
*
* Note: values of type int, bool and real are allocated as part of the
- * returned array. Values of type string are allocated separately and must
+ * returned array. Values of type string are allocated separately and must
* be freed by the caller.
*/
relopt_value *
@@ -894,31 +894,31 @@ parse_one_reloption(relopt_value *option, char *text_str, int text_len,
parsed = parse_bool(value, &option->values.bool_val);
if (validate && !parsed)
ereport(ERROR,
- (errmsg("invalid value for boolean option \"%s\": %s",
- option->gen->name, value)));
+ (errmsg("invalid value for boolean option \"%s\": %s",
+ option->gen->name, value)));
}
break;
case RELOPT_TYPE_INT:
{
- relopt_int *optint = (relopt_int *) option->gen;
+ relopt_int *optint = (relopt_int *) option->gen;
parsed = parse_int(value, &option->values.int_val, 0, NULL);
if (validate && !parsed)
ereport(ERROR,
- (errmsg("invalid value for integer option \"%s\": %s",
- option->gen->name, value)));
+ (errmsg("invalid value for integer option \"%s\": %s",
+ option->gen->name, value)));
if (validate && (option->values.int_val < optint->min ||
option->values.int_val > optint->max))
ereport(ERROR,
- (errmsg("value %s out of bounds for option \"%s\"",
- value, option->gen->name),
- errdetail("Valid values are between \"%d\" and \"%d\".",
- optint->min, optint->max)));
+ (errmsg("value %s out of bounds for option \"%s\"",
+ value, option->gen->name),
+ errdetail("Valid values are between \"%d\" and \"%d\".",
+ optint->min, optint->max)));
}
break;
case RELOPT_TYPE_REAL:
{
- relopt_real *optreal = (relopt_real *) option->gen;
+ relopt_real *optreal = (relopt_real *) option->gen;
parsed = parse_real(value, &option->values.real_val);
if (validate && !parsed)
@@ -928,15 +928,15 @@ parse_one_reloption(relopt_value *option, char *text_str, int text_len,
if (validate && (option->values.real_val < optreal->min ||
option->values.real_val > optreal->max))
ereport(ERROR,
- (errmsg("value %s out of bounds for option \"%s\"",
- value, option->gen->name),
- errdetail("Valid values are between \"%f\" and \"%f\".",
- optreal->min, optreal->max)));
+ (errmsg("value %s out of bounds for option \"%s\"",
+ value, option->gen->name),
+ errdetail("Valid values are between \"%f\" and \"%f\".",
+ optreal->min, optreal->max)));
}
break;
case RELOPT_TYPE_STRING:
{
- relopt_string *optstring = (relopt_string *) option->gen;
+ relopt_string *optstring = (relopt_string *) option->gen;
option->values.string_val = value;
nofree = true;
@@ -947,7 +947,7 @@ parse_one_reloption(relopt_value *option, char *text_str, int text_len,
break;
default:
elog(ERROR, "unsupported reloption type %d", option->gen->type);
- parsed = true; /* quiet compiler */
+ parsed = true; /* quiet compiler */
break;
}
@@ -967,8 +967,8 @@ parse_one_reloption(relopt_value *option, char *text_str, int text_len,
void *
allocateReloptStruct(Size base, relopt_value *options, int numoptions)
{
- Size size = base;
- int i;
+ Size size = base;
+ int i;
for (i = 0; i < numoptions; i++)
if (options[i].gen->type == RELOPT_TYPE_STRING)
@@ -994,21 +994,21 @@ fillRelOptions(void *rdopts, Size basesize,
bool validate,
const relopt_parse_elt *elems, int numelems)
{
- int i;
- int offset = basesize;
+ int i;
+ int offset = basesize;
for (i = 0; i < numoptions; i++)
{
- int j;
- bool found = false;
+ int j;
+ bool found = false;
for (j = 0; j < numelems; j++)
{
if (pg_strcasecmp(options[i].gen->name, elems[j].optname) == 0)
{
relopt_string *optstring;
- char *itempos = ((char *) rdopts) + elems[j].offset;
- char *string_val;
+ char *itempos = ((char *) rdopts) + elems[j].offset;
+ char *string_val;
switch (options[i].gen->type)
{
@@ -1069,31 +1069,31 @@ fillRelOptions(void *rdopts, Size basesize,
bytea *
default_reloptions(Datum reloptions, bool validate, relopt_kind kind)
{
- relopt_value *options;
- StdRdOptions *rdopts;
- int numoptions;
+ relopt_value *options;
+ StdRdOptions *rdopts;
+ int numoptions;
static const relopt_parse_elt tab[] = {
{"fillfactor", RELOPT_TYPE_INT, offsetof(StdRdOptions, fillfactor)},
{"autovacuum_enabled", RELOPT_TYPE_BOOL,
- offsetof(StdRdOptions, autovacuum) + offsetof(AutoVacOpts, enabled)},
+ offsetof(StdRdOptions, autovacuum) +offsetof(AutoVacOpts, enabled)},
{"autovacuum_vacuum_threshold", RELOPT_TYPE_INT,
- offsetof(StdRdOptions, autovacuum) + offsetof(AutoVacOpts, vacuum_threshold)},
+ offsetof(StdRdOptions, autovacuum) +offsetof(AutoVacOpts, vacuum_threshold)},
{"autovacuum_analyze_threshold", RELOPT_TYPE_INT,
- offsetof(StdRdOptions, autovacuum) + offsetof(AutoVacOpts, analyze_threshold)},
+ offsetof(StdRdOptions, autovacuum) +offsetof(AutoVacOpts, analyze_threshold)},
{"autovacuum_vacuum_cost_delay", RELOPT_TYPE_INT,
- offsetof(StdRdOptions, autovacuum) + offsetof(AutoVacOpts, vacuum_cost_delay)},
+ offsetof(StdRdOptions, autovacuum) +offsetof(AutoVacOpts, vacuum_cost_delay)},
{"autovacuum_vacuum_cost_limit", RELOPT_TYPE_INT,
- offsetof(StdRdOptions, autovacuum) + offsetof(AutoVacOpts, vacuum_cost_limit)},
+ offsetof(StdRdOptions, autovacuum) +offsetof(AutoVacOpts, vacuum_cost_limit)},
{"autovacuum_freeze_min_age", RELOPT_TYPE_INT,
- offsetof(StdRdOptions, autovacuum) + offsetof(AutoVacOpts, freeze_min_age)},
+ offsetof(StdRdOptions, autovacuum) +offsetof(AutoVacOpts, freeze_min_age)},
{"autovacuum_freeze_max_age", RELOPT_TYPE_INT,
- offsetof(StdRdOptions, autovacuum) + offsetof(AutoVacOpts, freeze_max_age)},
+ offsetof(StdRdOptions, autovacuum) +offsetof(AutoVacOpts, freeze_max_age)},
{"autovacuum_freeze_table_age", RELOPT_TYPE_INT,
- offsetof(StdRdOptions, autovacuum) + offsetof(AutoVacOpts, freeze_table_age)},
+ offsetof(StdRdOptions, autovacuum) +offsetof(AutoVacOpts, freeze_table_age)},
{"autovacuum_vacuum_scale_factor", RELOPT_TYPE_REAL,
- offsetof(StdRdOptions, autovacuum) + offsetof(AutoVacOpts, vacuum_scale_factor)},
+ offsetof(StdRdOptions, autovacuum) +offsetof(AutoVacOpts, vacuum_scale_factor)},
{"autovacuum_analyze_scale_factor", RELOPT_TYPE_REAL,
- offsetof(StdRdOptions, autovacuum) + offsetof(AutoVacOpts, analyze_scale_factor)}
+ offsetof(StdRdOptions, autovacuum) +offsetof(AutoVacOpts, analyze_scale_factor)}
};
options = parseRelOptions(reloptions, validate, kind, &numoptions);
diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c
index ea16913c8e..79efec0969 100644
--- a/src/backend/access/common/tupdesc.c
+++ b/src/backend/access/common/tupdesc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/tupdesc.c,v 1.125 2009/01/22 20:16:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/tupdesc.c,v 1.126 2009/06/11 14:48:53 momjian Exp $
*
* NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be
@@ -55,8 +55,8 @@ CreateTemplateTupleDesc(int natts, bool hasoid)
* pointers.
*
* Note: Only the fixed part of pg_attribute rows is included in tuple
- * descriptors, so we only need ATTRIBUTE_FIXED_PART_SIZE space
- * per attr. That might need alignment padding, however.
+ * descriptors, so we only need ATTRIBUTE_FIXED_PART_SIZE space per attr.
+ * That might need alignment padding, however.
*/
attroffset = sizeof(struct tupleDesc) + natts * sizeof(Form_pg_attribute);
attroffset = MAXALIGN(attroffset);
diff --git a/src/backend/access/gin/ginarrayproc.c b/src/backend/access/gin/ginarrayproc.c
index 717caaad8b..feff95f1d8 100644
--- a/src/backend/access/gin/ginarrayproc.c
+++ b/src/backend/access/gin/ginarrayproc.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginarrayproc.c,v 1.15 2009/03/25 22:19:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginarrayproc.c,v 1.16 2009/06/11 14:48:53 momjian Exp $
*-------------------------------------------------------------------------
*/
#include "postgres.h"
@@ -95,6 +95,7 @@ ginarrayconsistent(PG_FUNCTION_ARGS)
bool *check = (bool *) PG_GETARG_POINTER(0);
StrategyNumber strategy = PG_GETARG_UINT16(1);
ArrayType *query = PG_GETARG_ARRAYTYPE_P(2);
+
/* int32 nkeys = PG_GETARG_INT32(3); */
/* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
bool *recheck = (bool *) PG_GETARG_POINTER(5);
diff --git a/src/backend/access/gin/ginbulk.c b/src/backend/access/gin/ginbulk.c
index a7258619ae..2e800ce44a 100644
--- a/src/backend/access/gin/ginbulk.c
+++ b/src/backend/access/gin/ginbulk.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginbulk.c,v 1.15 2009/03/24 20:17:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginbulk.c,v 1.16 2009/06/11 14:48:53 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -84,7 +84,7 @@ ginInsertData(BuildAccumulator *accum, EntryAccumulator *entry, ItemPointer heap
static Datum
getDatumCopy(BuildAccumulator *accum, OffsetNumber attnum, Datum value)
{
- Form_pg_attribute att = accum->ginstate->origTupdesc->attrs[ attnum - 1 ];
+ Form_pg_attribute att = accum->ginstate->origTupdesc->attrs[attnum - 1];
Datum res;
if (att->attbyval)
@@ -161,8 +161,8 @@ ginInsertEntry(BuildAccumulator *accum, ItemPointer heapptr, OffsetNumber attnum
* then calls itself for each parts
*/
static void
-ginChooseElem(BuildAccumulator *accum, ItemPointer heapptr, OffsetNumber attnum,
- Datum *entries, uint32 nentry,
+ginChooseElem(BuildAccumulator *accum, ItemPointer heapptr, OffsetNumber attnum,
+ Datum *entries, uint32 nentry,
uint32 low, uint32 high, uint32 offset)
{
uint32 pos;
@@ -187,8 +187,8 @@ ginChooseElem(BuildAccumulator *accum, ItemPointer heapptr, OffsetNumber attnum,
* next middle on left part and middle of right part.
*/
void
-ginInsertRecordBA(BuildAccumulator *accum, ItemPointer heapptr, OffsetNumber attnum,
- Datum *entries, int32 nentry)
+ginInsertRecordBA(BuildAccumulator *accum, ItemPointer heapptr, OffsetNumber attnum,
+ Datum *entries, int32 nentry)
{
uint32 i,
nbit = 0,
diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c
index 22199102dd..ebbdaa33e5 100644
--- a/src/backend/access/gin/gindatapage.c
+++ b/src/backend/access/gin/gindatapage.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/gindatapage.c,v 1.15 2009/06/06 02:39:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/gindatapage.c,v 1.16 2009/06/11 14:48:53 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -47,7 +47,7 @@ MergeItemPointers(ItemPointerData *dst,
while (aptr - a < na && bptr - b < nb)
{
- int cmp = compareItemPointers(aptr, bptr);
+ int cmp = compareItemPointers(aptr, bptr);
if (cmp > 0)
*dptr++ = *bptr++;
diff --git a/src/backend/access/gin/ginentrypage.c b/src/backend/access/gin/ginentrypage.c
index f35994db95..c4659cde1f 100644
--- a/src/backend/access/gin/ginentrypage.c
+++ b/src/backend/access/gin/ginentrypage.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginentrypage.c,v 1.20 2009/06/06 02:39:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginentrypage.c,v 1.21 2009/06/11 14:48:53 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -46,23 +46,23 @@
* Attributes of an index tuple are different for single and multicolumn index.
* For single-column case, index tuple stores only value to be indexed.
* For multicolumn case, it stores two attributes: column number of value
- * and value.
+ * and value.
*/
IndexTuple
GinFormTuple(GinState *ginstate, OffsetNumber attnum, Datum key, ItemPointerData *ipd, uint32 nipd)
{
- bool isnull[2] = {FALSE,FALSE};
+ bool isnull[2] = {FALSE, FALSE};
IndexTuple itup;
- if ( ginstate->oneCol )
+ if (ginstate->oneCol)
itup = index_form_tuple(ginstate->origTupdesc, &key, isnull);
else
{
- Datum datums[2];
+ Datum datums[2];
datums[0] = UInt16GetDatum(attnum);
datums[1] = key;
- itup = index_form_tuple(ginstate->tupdesc[attnum-1], datums, isnull);
+ itup = index_form_tuple(ginstate->tupdesc[attnum - 1], datums, isnull);
}
GinSetOrigSizePosting(itup, IndexTupleSize(itup));
@@ -136,12 +136,12 @@ entryIsMoveRight(GinBtree btree, Page page)
if (GinPageRightMost(page))
return FALSE;
- itup = getRightMostTuple(page);
+ itup = getRightMostTuple(page);
if (compareAttEntries(btree->ginstate,
- btree->entryAttnum, btree->entryValue,
- gintuple_get_attrnum(btree->ginstate, itup),
- gin_index_getattr(btree->ginstate, itup)) > 0)
+ btree->entryAttnum, btree->entryValue,
+ gintuple_get_attrnum(btree->ginstate, itup),
+ gin_index_getattr(btree->ginstate, itup)) > 0)
return TRUE;
return FALSE;
@@ -187,10 +187,10 @@ entryLocateEntry(GinBtree btree, GinBtreeStack *stack)
else
{
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, mid));
- result = compareAttEntries(btree->ginstate,
- btree->entryAttnum, btree->entryValue,
- gintuple_get_attrnum(btree->ginstate, itup),
- gin_index_getattr(btree->ginstate, itup));
+ result = compareAttEntries(btree->ginstate,
+ btree->entryAttnum, btree->entryValue,
+ gintuple_get_attrnum(btree->ginstate, itup),
+ gin_index_getattr(btree->ginstate, itup));
}
if (result == 0)
@@ -252,10 +252,10 @@ entryLocateLeafEntry(GinBtree btree, GinBtreeStack *stack)
int result;
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, mid));
- result = compareAttEntries(btree->ginstate,
- btree->entryAttnum, btree->entryValue,
- gintuple_get_attrnum(btree->ginstate, itup),
- gin_index_getattr(btree->ginstate, itup));
+ result = compareAttEntries(btree->ginstate,
+ btree->entryAttnum, btree->entryValue,
+ gintuple_get_attrnum(btree->ginstate, itup),
+ gin_index_getattr(btree->ginstate, itup));
if (result == 0)
{
stack->off = mid;
diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c
index f474ad6598..20887ba56c 100644
--- a/src/backend/access/gin/ginfast.c
+++ b/src/backend/access/gin/ginfast.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginfast.c,v 1.2 2009/03/24 22:06:03 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginfast.c,v 1.3 2009/06/11 14:48:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -33,9 +33,9 @@
typedef struct DatumArray
{
- Datum *values; /* expansible array */
- int32 nvalues; /* current number of valid entries */
- int32 maxvalues; /* allocated size of array */
+ Datum *values; /* expansible array */
+ int32 nvalues; /* current number of valid entries */
+ int32 maxvalues; /* allocated size of array */
} DatumArray;
@@ -46,11 +46,14 @@ static int32
writeListPage(Relation index, Buffer buffer,
IndexTuple *tuples, int32 ntuples, BlockNumber rightlink)
{
- Page page = BufferGetPage(buffer);
- int i, freesize, size=0;
- OffsetNumber l, off;
- char *workspace;
- char *ptr;
+ Page page = BufferGetPage(buffer);
+ int i,
+ freesize,
+ size = 0;
+ OffsetNumber l,
+ off;
+ char *workspace;
+ char *ptr;
/* workspace could be a local array; we use palloc for alignment */
workspace = palloc(BLCKSZ);
@@ -62,15 +65,15 @@ writeListPage(Relation index, Buffer buffer,
off = FirstOffsetNumber;
ptr = workspace;
- for(i=0; i<ntuples; i++)
+ for (i = 0; i < ntuples; i++)
{
- int this_size = IndexTupleSize(tuples[i]);
+ int this_size = IndexTupleSize(tuples[i]);
memcpy(ptr, tuples[i], this_size);
ptr += this_size;
size += this_size;
- l = PageAddItem(page, (Item)tuples[i], this_size, off, false, false);
+ l = PageAddItem(page, (Item) tuples[i], this_size, off, false, false);
if (l == InvalidOffsetNumber)
elog(ERROR, "failed to add item to index page in \"%s\"",
@@ -84,10 +87,10 @@ writeListPage(Relation index, Buffer buffer,
GinPageGetOpaque(page)->rightlink = rightlink;
/*
- * tail page may contain only the whole row(s) or final
- * part of row placed on previous pages
+ * tail page may contain only the whole row(s) or final part of row placed
+ * on previous pages
*/
- if ( rightlink == InvalidBlockNumber )
+ if (rightlink == InvalidBlockNumber)
{
GinPageSetFullRow(page);
GinPageGetOpaque(page)->maxoff = 1;
@@ -103,15 +106,15 @@ writeListPage(Relation index, Buffer buffer,
if (!index->rd_istemp)
{
- XLogRecData rdata[2];
- ginxlogInsertListPage data;
- XLogRecPtr recptr;
+ XLogRecData rdata[2];
+ ginxlogInsertListPage data;
+ XLogRecPtr recptr;
rdata[0].buffer = buffer;
rdata[0].buffer_std = true;
- rdata[0].data = (char*)&data;
+ rdata[0].data = (char *) &data;
rdata[0].len = sizeof(ginxlogInsertListPage);
- rdata[0].next = rdata+1;
+ rdata[0].next = rdata + 1;
rdata[1].buffer = InvalidBuffer;
rdata[1].data = workspace;
@@ -140,27 +143,29 @@ static void
makeSublist(Relation index, IndexTuple *tuples, int32 ntuples,
GinMetaPageData *res)
{
- Buffer curBuffer = InvalidBuffer;
- Buffer prevBuffer = InvalidBuffer;
- int i, size = 0, tupsize;
- int startTuple = 0;
+ Buffer curBuffer = InvalidBuffer;
+ Buffer prevBuffer = InvalidBuffer;
+ int i,
+ size = 0,
+ tupsize;
+ int startTuple = 0;
Assert(ntuples > 0);
/*
* Split tuples into pages
*/
- for(i=0;i<ntuples;i++)
+ for (i = 0; i < ntuples; i++)
{
- if ( curBuffer == InvalidBuffer )
+ if (curBuffer == InvalidBuffer)
{
curBuffer = GinNewBuffer(index);
- if ( prevBuffer != InvalidBuffer )
+ if (prevBuffer != InvalidBuffer)
{
res->nPendingPages++;
writeListPage(index, prevBuffer,
- tuples+startTuple, i-startTuple,
+ tuples + startTuple, i - startTuple,
BufferGetBlockNumber(curBuffer));
}
else
@@ -175,7 +180,7 @@ makeSublist(Relation index, IndexTuple *tuples, int32 ntuples,
tupsize = MAXALIGN(IndexTupleSize(tuples[i])) + sizeof(ItemIdData);
- if ( size + tupsize >= GinListPageSize )
+ if (size + tupsize >= GinListPageSize)
{
/* won't fit, force a new page and reprocess */
i--;
@@ -192,7 +197,7 @@ makeSublist(Relation index, IndexTuple *tuples, int32 ntuples,
*/
res->tail = BufferGetBlockNumber(curBuffer);
res->tailFreeSize = writeListPage(index, curBuffer,
- tuples+startTuple, ntuples-startTuple,
+ tuples + startTuple, ntuples - startTuple,
InvalidBlockNumber);
res->nPendingPages++;
/* that was only one heap tuple */
@@ -207,17 +212,17 @@ void
ginHeapTupleFastInsert(Relation index, GinState *ginstate,
GinTupleCollector *collector)
{
- Buffer metabuffer;
- Page metapage;
- GinMetaPageData *metadata = NULL;
- XLogRecData rdata[2];
- Buffer buffer = InvalidBuffer;
- Page page = NULL;
- ginxlogUpdateMeta data;
- bool separateList = false;
- bool needCleanup = false;
-
- if ( collector->ntuples == 0 )
+ Buffer metabuffer;
+ Page metapage;
+ GinMetaPageData *metadata = NULL;
+ XLogRecData rdata[2];
+ Buffer buffer = InvalidBuffer;
+ Page page = NULL;
+ ginxlogUpdateMeta data;
+ bool separateList = false;
+ bool needCleanup = false;
+
+ if (collector->ntuples == 0)
return;
data.node = index->rd_node;
@@ -232,7 +237,7 @@ ginHeapTupleFastInsert(Relation index, GinState *ginstate,
metabuffer = ReadBuffer(index, GIN_METAPAGE_BLKNO);
metapage = BufferGetPage(metabuffer);
- if ( collector->sumsize + collector->ntuples * sizeof(ItemIdData) > GIN_PAGE_FREESIZE )
+ if (collector->sumsize + collector->ntuples * sizeof(ItemIdData) > GIN_PAGE_FREESIZE)
{
/*
* Total size is greater than one page => make sublist
@@ -244,8 +249,8 @@ ginHeapTupleFastInsert(Relation index, GinState *ginstate,
LockBuffer(metabuffer, GIN_EXCLUSIVE);
metadata = GinPageGetMeta(metapage);
- if ( metadata->head == InvalidBlockNumber ||
- collector->sumsize + collector->ntuples * sizeof(ItemIdData) > metadata->tailFreeSize )
+ if (metadata->head == InvalidBlockNumber ||
+ collector->sumsize + collector->ntuples * sizeof(ItemIdData) > metadata->tailFreeSize)
{
/*
* Pending list is empty or total size is greater than freespace
@@ -258,14 +263,14 @@ ginHeapTupleFastInsert(Relation index, GinState *ginstate,
}
}
- if ( separateList )
+ if (separateList)
{
- GinMetaPageData sublist;
+ GinMetaPageData sublist;
/*
* We should make sublist separately and append it to the tail
*/
- memset( &sublist, 0, sizeof(GinMetaPageData) );
+ memset(&sublist, 0, sizeof(GinMetaPageData));
makeSublist(index, collector->tuples, collector->ntuples, &sublist);
@@ -275,14 +280,14 @@ ginHeapTupleFastInsert(Relation index, GinState *ginstate,
LockBuffer(metabuffer, GIN_EXCLUSIVE);
metadata = GinPageGetMeta(metapage);
- if ( metadata->head == InvalidBlockNumber )
+ if (metadata->head == InvalidBlockNumber)
{
/*
* Sublist becomes main list
*/
START_CRIT_SECTION();
- memcpy(metadata, &sublist, sizeof(GinMetaPageData) );
- memcpy(&data.metadata, &sublist, sizeof(GinMetaPageData) );
+ memcpy(metadata, &sublist, sizeof(GinMetaPageData));
+ memcpy(&data.metadata, &sublist, sizeof(GinMetaPageData));
}
else
{
@@ -305,7 +310,7 @@ ginHeapTupleFastInsert(Relation index, GinState *ginstate,
metadata->nPendingPages += sublist.nPendingPages;
metadata->nPendingHeapTuples += sublist.nPendingHeapTuples;
- memcpy(&data.metadata, metadata, sizeof(GinMetaPageData) );
+ memcpy(&data.metadata, metadata, sizeof(GinMetaPageData));
data.newRightlink = sublist.head;
MarkBufferDirty(buffer);
@@ -317,21 +322,23 @@ ginHeapTupleFastInsert(Relation index, GinState *ginstate,
* Insert into tail page, metapage is already locked
*/
- OffsetNumber l, off;
- int i, tupsize;
- char *ptr;
+ OffsetNumber l,
+ off;
+ int i,
+ tupsize;
+ char *ptr;
buffer = ReadBuffer(index, metadata->tail);
LockBuffer(buffer, GIN_EXCLUSIVE);
page = BufferGetPage(buffer);
off = (PageIsEmpty(page)) ? FirstOffsetNumber :
- OffsetNumberNext(PageGetMaxOffsetNumber(page));
+ OffsetNumberNext(PageGetMaxOffsetNumber(page));
rdata[0].next = rdata + 1;
rdata[1].buffer = buffer;
rdata[1].buffer_std = true;
- ptr = rdata[1].data = (char *) palloc( collector->sumsize );
+ ptr = rdata[1].data = (char *) palloc(collector->sumsize);
rdata[1].len = collector->sumsize;
rdata[1].next = NULL;
@@ -342,44 +349,44 @@ ginHeapTupleFastInsert(Relation index, GinState *ginstate,
/*
* Increase counter of heap tuples
*/
- Assert( GinPageGetOpaque(page)->maxoff <= metadata->nPendingHeapTuples );
+ Assert(GinPageGetOpaque(page)->maxoff <= metadata->nPendingHeapTuples);
GinPageGetOpaque(page)->maxoff++;
metadata->nPendingHeapTuples++;
- for(i=0; i<collector->ntuples; i++)
+ for (i = 0; i < collector->ntuples; i++)
{
tupsize = IndexTupleSize(collector->tuples[i]);
- l = PageAddItem(page, (Item)collector->tuples[i], tupsize, off, false, false);
+ l = PageAddItem(page, (Item) collector->tuples[i], tupsize, off, false, false);
if (l == InvalidOffsetNumber)
elog(ERROR, "failed to add item to index page in \"%s\"",
- RelationGetRelationName(index));
+ RelationGetRelationName(index));
memcpy(ptr, collector->tuples[i], tupsize);
- ptr+=tupsize;
+ ptr += tupsize;
off++;
}
metadata->tailFreeSize -= collector->sumsize + collector->ntuples * sizeof(ItemIdData);
- memcpy(&data.metadata, metadata, sizeof(GinMetaPageData) );
+ memcpy(&data.metadata, metadata, sizeof(GinMetaPageData));
MarkBufferDirty(buffer);
}
/*
- * Make real write
+ * Make real write
*/
MarkBufferDirty(metabuffer);
- if ( !index->rd_istemp )
+ if (!index->rd_istemp)
{
- XLogRecPtr recptr;
+ XLogRecPtr recptr;
recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_UPDATE_META_PAGE, rdata);
PageSetLSN(metapage, recptr);
PageSetTLI(metapage, ThisTimeLineID);
- if ( buffer != InvalidBuffer )
+ if (buffer != InvalidBuffer)
{
PageSetLSN(page, recptr);
PageSetTLI(page, ThisTimeLineID);
@@ -390,23 +397,22 @@ ginHeapTupleFastInsert(Relation index, GinState *ginstate,
UnlockReleaseBuffer(buffer);
/*
- * Force pending list cleanup when it becomes too long.
- * And, ginInsertCleanup could take significant amount of
- * time, so we prefer to call it when it can do all the work in a
- * single collection cycle. In non-vacuum mode, it shouldn't
- * require maintenance_work_mem, so fire it while pending list is
- * still small enough to fit into work_mem.
+ * Force pending list cleanup when it becomes too long. And,
+ * ginInsertCleanup could take significant amount of time, so we prefer to
+ * call it when it can do all the work in a single collection cycle. In
+ * non-vacuum mode, it shouldn't require maintenance_work_mem, so fire it
+ * while pending list is still small enough to fit into work_mem.
*
* ginInsertCleanup() should not be called inside our CRIT_SECTION.
*/
- if ( metadata->nPendingPages * GIN_PAGE_FREESIZE > work_mem * 1024L )
+ if (metadata->nPendingPages * GIN_PAGE_FREESIZE > work_mem * 1024L)
needCleanup = true;
UnlockReleaseBuffer(metabuffer);
END_CRIT_SECTION();
- if ( needCleanup )
+ if (needCleanup)
ginInsertCleanup(index, ginstate, false, NULL);
}
@@ -432,17 +438,17 @@ ginHeapTupleFastCollect(Relation index, GinState *ginstate,
/*
* Allocate/reallocate memory for storing collected tuples
*/
- if ( collector->tuples == NULL )
+ if (collector->tuples == NULL)
{
collector->lentuples = nentries * index->rd_att->natts;
- collector->tuples = (IndexTuple*)palloc(sizeof(IndexTuple) * collector->lentuples);
+ collector->tuples = (IndexTuple *) palloc(sizeof(IndexTuple) * collector->lentuples);
}
- while ( collector->ntuples + nentries > collector->lentuples )
+ while (collector->ntuples + nentries > collector->lentuples)
{
collector->lentuples *= 2;
- collector->tuples = (IndexTuple*)repalloc( collector->tuples,
- sizeof(IndexTuple) * collector->lentuples);
+ collector->tuples = (IndexTuple *) repalloc(collector->tuples,
+ sizeof(IndexTuple) * collector->lentuples);
}
/*
@@ -450,13 +456,13 @@ ginHeapTupleFastCollect(Relation index, GinState *ginstate,
*/
for (i = 0; i < nentries; i++)
{
- int32 tupsize;
+ int32 tupsize;
collector->tuples[collector->ntuples + i] = GinFormTuple(ginstate, attnum, entries[i], NULL, 0);
collector->tuples[collector->ntuples + i]->t_tid = *item;
tupsize = IndexTupleSize(collector->tuples[collector->ntuples + i]);
- if ( tupsize > TOAST_INDEX_TARGET || tupsize >= GinMaxItemSize)
+ if (tupsize > TOAST_INDEX_TARGET || tupsize >= GinMaxItemSize)
elog(ERROR, "huge tuple");
collector->sumsize += tupsize;
@@ -480,9 +486,9 @@ static bool
shiftList(Relation index, Buffer metabuffer, BlockNumber newHead,
IndexBulkDeleteResult *stats)
{
- Page metapage;
- GinMetaPageData *metadata;
- BlockNumber blknoToDelete;
+ Page metapage;
+ GinMetaPageData *metadata;
+ BlockNumber blknoToDelete;
metapage = BufferGetPage(metabuffer);
metadata = GinPageGetMeta(metapage);
@@ -490,12 +496,12 @@ shiftList(Relation index, Buffer metabuffer, BlockNumber newHead,
do
{
- Page page;
- int i;
- int64 nDeletedHeapTuples = 0;
- ginxlogDeleteListPages data;
- XLogRecData rdata[1];
- Buffer buffers[GIN_NDELETE_AT_ONCE];
+ Page page;
+ int i;
+ int64 nDeletedHeapTuples = 0;
+ ginxlogDeleteListPages data;
+ XLogRecData rdata[1];
+ Buffer buffers[GIN_NDELETE_AT_ONCE];
data.node = index->rd_node;
@@ -507,24 +513,24 @@ shiftList(Relation index, Buffer metabuffer, BlockNumber newHead,
data.ndeleted = 0;
while (data.ndeleted < GIN_NDELETE_AT_ONCE && blknoToDelete != newHead)
{
- data.toDelete[ data.ndeleted ] = blknoToDelete;
- buffers[ data.ndeleted ] = ReadBuffer(index, blknoToDelete);
- LockBuffer( buffers[ data.ndeleted ], GIN_EXCLUSIVE );
- page = BufferGetPage( buffers[ data.ndeleted ] );
+ data.toDelete[data.ndeleted] = blknoToDelete;
+ buffers[data.ndeleted] = ReadBuffer(index, blknoToDelete);
+ LockBuffer(buffers[data.ndeleted], GIN_EXCLUSIVE);
+ page = BufferGetPage(buffers[data.ndeleted]);
data.ndeleted++;
- if ( GinPageIsDeleted(page) )
+ if (GinPageIsDeleted(page))
{
/* concurrent cleanup process is detected */
- for(i=0;i<data.ndeleted;i++)
- UnlockReleaseBuffer( buffers[i] );
+ for (i = 0; i < data.ndeleted; i++)
+ UnlockReleaseBuffer(buffers[i]);
return true;
}
nDeletedHeapTuples += GinPageGetOpaque(page)->maxoff;
- blknoToDelete = GinPageGetOpaque( page )->rightlink;
+ blknoToDelete = GinPageGetOpaque(page)->rightlink;
}
if (stats)
@@ -534,50 +540,50 @@ shiftList(Relation index, Buffer metabuffer, BlockNumber newHead,
metadata->head = blknoToDelete;
- Assert( metadata->nPendingPages >= data.ndeleted );
+ Assert(metadata->nPendingPages >= data.ndeleted);
metadata->nPendingPages -= data.ndeleted;
- Assert( metadata->nPendingHeapTuples >= nDeletedHeapTuples );
+ Assert(metadata->nPendingHeapTuples >= nDeletedHeapTuples);
metadata->nPendingHeapTuples -= nDeletedHeapTuples;
- if ( blknoToDelete == InvalidBlockNumber )
+ if (blknoToDelete == InvalidBlockNumber)
{
metadata->tail = InvalidBlockNumber;
metadata->tailFreeSize = 0;
metadata->nPendingPages = 0;
metadata->nPendingHeapTuples = 0;
}
- memcpy( &data.metadata, metadata, sizeof(GinMetaPageData));
+ memcpy(&data.metadata, metadata, sizeof(GinMetaPageData));
- MarkBufferDirty( metabuffer );
+ MarkBufferDirty(metabuffer);
- for(i=0; i<data.ndeleted; i++)
+ for (i = 0; i < data.ndeleted; i++)
{
- page = BufferGetPage( buffers[ i ] );
- GinPageGetOpaque( page )->flags = GIN_DELETED;
- MarkBufferDirty( buffers[ i ] );
+ page = BufferGetPage(buffers[i]);
+ GinPageGetOpaque(page)->flags = GIN_DELETED;
+ MarkBufferDirty(buffers[i]);
}
- if ( !index->rd_istemp )
+ if (!index->rd_istemp)
{
- XLogRecPtr recptr;
+ XLogRecPtr recptr;
recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_DELETE_LISTPAGE, rdata);
PageSetLSN(metapage, recptr);
PageSetTLI(metapage, ThisTimeLineID);
- for(i=0; i<data.ndeleted; i++)
+ for (i = 0; i < data.ndeleted; i++)
{
- page = BufferGetPage( buffers[ i ] );
+ page = BufferGetPage(buffers[i]);
PageSetLSN(page, recptr);
PageSetTLI(page, ThisTimeLineID);
}
}
- for(i=0; i<data.ndeleted; i++)
- UnlockReleaseBuffer( buffers[ i ] );
+ for (i = 0; i < data.ndeleted; i++)
+ UnlockReleaseBuffer(buffers[i]);
END_CRIT_SECTION();
- } while( blknoToDelete != newHead );
+ } while (blknoToDelete != newHead);
return false;
}
@@ -586,14 +592,14 @@ shiftList(Relation index, Buffer metabuffer, BlockNumber newHead,
static void
addDatum(DatumArray *datums, Datum datum)
{
- if ( datums->nvalues >= datums->maxvalues)
+ if (datums->nvalues >= datums->maxvalues)
{
datums->maxvalues *= 2;
- datums->values = (Datum*)repalloc(datums->values,
- sizeof(Datum)*datums->maxvalues);
+ datums->values = (Datum *) repalloc(datums->values,
+ sizeof(Datum) * datums->maxvalues);
}
- datums->values[ datums->nvalues++ ] = datum;
+ datums->values[datums->nvalues++] = datum;
}
/*
@@ -606,31 +612,33 @@ static void
processPendingPage(BuildAccumulator *accum, DatumArray *da,
Page page, OffsetNumber startoff)
{
- ItemPointerData heapptr;
- OffsetNumber i,maxoff;
- OffsetNumber attrnum, curattnum;
+ ItemPointerData heapptr;
+ OffsetNumber i,
+ maxoff;
+ OffsetNumber attrnum,
+ curattnum;
/* reset *da to empty */
da->nvalues = 0;
maxoff = PageGetMaxOffsetNumber(page);
- Assert( maxoff >= FirstOffsetNumber );
+ Assert(maxoff >= FirstOffsetNumber);
ItemPointerSetInvalid(&heapptr);
attrnum = 0;
for (i = startoff; i <= maxoff; i = OffsetNumberNext(i))
{
- IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, i));
+ IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, i));
curattnum = gintuple_get_attrnum(accum->ginstate, itup);
- if ( !ItemPointerIsValid(&heapptr) )
+ if (!ItemPointerIsValid(&heapptr))
{
heapptr = itup->t_tid;
attrnum = curattnum;
}
- else if ( !(ItemPointerEquals(&heapptr, &itup->t_tid) &&
- curattnum == attrnum) )
+ else if (!(ItemPointerEquals(&heapptr, &itup->t_tid) &&
+ curattnum == attrnum))
{
/*
* We can insert several datums per call, but only for one heap
@@ -652,7 +660,7 @@ processPendingPage(BuildAccumulator *accum, DatumArray *da,
*
* This can be called concurrently by multiple backends, so it must cope.
* On first glance it looks completely not concurrent-safe and not crash-safe
- * either. The reason it's okay is that multiple insertion of the same entry
+ * either. The reason it's okay is that multiple insertion of the same entry
* is detected and treated as a no-op by gininsert.c. If we crash after
* posting entries to the main index and before removing them from the
* pending list, it's okay because when we redo the posting later on, nothing
@@ -671,20 +679,23 @@ void
ginInsertCleanup(Relation index, GinState *ginstate,
bool vac_delay, IndexBulkDeleteResult *stats)
{
- Buffer metabuffer, buffer;
- Page metapage, page;
- GinMetaPageData *metadata;
- MemoryContext opCtx, oldCtx;
- BuildAccumulator accum;
- DatumArray datums;
- BlockNumber blkno;
+ Buffer metabuffer,
+ buffer;
+ Page metapage,
+ page;
+ GinMetaPageData *metadata;
+ MemoryContext opCtx,
+ oldCtx;
+ BuildAccumulator accum;
+ DatumArray datums;
+ BlockNumber blkno;
metabuffer = ReadBuffer(index, GIN_METAPAGE_BLKNO);
LockBuffer(metabuffer, GIN_SHARE);
metapage = BufferGetPage(metabuffer);
metadata = GinPageGetMeta(metapage);
- if ( metadata->head == InvalidBlockNumber )
+ if (metadata->head == InvalidBlockNumber)
{
/* Nothing to do */
UnlockReleaseBuffer(metabuffer);
@@ -702,7 +713,7 @@ ginInsertCleanup(Relation index, GinState *ginstate,
LockBuffer(metabuffer, GIN_UNLOCK);
/*
- * Initialize. All temporary space will be in opCtx
+ * Initialize. All temporary space will be in opCtx
*/
opCtx = AllocSetContextCreate(CurrentMemoryContext,
"GIN insert cleanup temporary context",
@@ -712,24 +723,24 @@ ginInsertCleanup(Relation index, GinState *ginstate,
oldCtx = MemoryContextSwitchTo(opCtx);
- datums.maxvalues=128;
+ datums.maxvalues = 128;
datums.nvalues = 0;
- datums.values = (Datum*)palloc(sizeof(Datum)*datums.maxvalues);
+ datums.values = (Datum *) palloc(sizeof(Datum) * datums.maxvalues);
ginInitBA(&accum);
accum.ginstate = ginstate;
/*
- * At the top of this loop, we have pin and lock on the current page
- * of the pending list. However, we'll release that before exiting
- * the loop. Note we also have pin but not lock on the metapage.
+ * At the top of this loop, we have pin and lock on the current page of
+ * the pending list. However, we'll release that before exiting the loop.
+ * Note we also have pin but not lock on the metapage.
*/
- for(;;)
+ for (;;)
{
- if ( GinPageIsDeleted(page) )
+ if (GinPageIsDeleted(page))
{
/* another cleanup process is running concurrently */
- UnlockReleaseBuffer( buffer );
+ UnlockReleaseBuffer(buffer);
break;
}
@@ -742,9 +753,9 @@ ginInsertCleanup(Relation index, GinState *ginstate,
vacuum_delay_point();
/*
- * Is it time to flush memory to disk? Flush if we are at the end
- * of the pending list, or if we have a full row and memory is
- * getting full.
+ * Is it time to flush memory to disk? Flush if we are at the end of
+ * the pending list, or if we have a full row and memory is getting
+ * full.
*
* XXX using up maintenance_work_mem here is probably unreasonably
* much, since vacuum might already be using that much.
@@ -754,15 +765,16 @@ ginInsertCleanup(Relation index, GinState *ginstate,
(accum.allocatedMemory >= maintenance_work_mem * 1024L ||
accum.maxdepth > GIN_MAX_TREE_DEPTH)))
{
- ItemPointerData *list;
- uint32 nlist;
- Datum entry;
- OffsetNumber maxoff, attnum;
+ ItemPointerData *list;
+ uint32 nlist;
+ Datum entry;
+ OffsetNumber maxoff,
+ attnum;
/*
- * Unlock current page to increase performance.
- * Changes of page will be checked later by comparing
- * maxoff after completion of memory flush.
+ * Unlock current page to increase performance. Changes of page
+ * will be checked later by comparing maxoff after completion of
+ * memory flush.
*/
maxoff = PageGetMaxOffsetNumber(page);
LockBuffer(buffer, GIN_UNLOCK);
@@ -785,7 +797,7 @@ ginInsertCleanup(Relation index, GinState *ginstate,
LockBuffer(metabuffer, GIN_EXCLUSIVE);
LockBuffer(buffer, GIN_SHARE);
- if ( GinPageIsDeleted(page) )
+ if (GinPageIsDeleted(page))
{
/* another cleanup process is running concurrently */
UnlockReleaseBuffer(buffer);
@@ -795,16 +807,16 @@ ginInsertCleanup(Relation index, GinState *ginstate,
/*
* While we left the page unlocked, more stuff might have gotten
- * added to it. If so, process those entries immediately. There
+ * added to it. If so, process those entries immediately. There
* shouldn't be very many, so we don't worry about the fact that
* we're doing this with exclusive lock. Insertion algorithm
* gurantees that inserted row(s) will not continue on next page.
* NOTE: intentionally no vacuum_delay_point in this loop.
*/
- if ( PageGetMaxOffsetNumber(page) != maxoff )
+ if (PageGetMaxOffsetNumber(page) != maxoff)
{
ginInitBA(&accum);
- processPendingPage(&accum, &datums, page, maxoff+1);
+ processPendingPage(&accum, &datums, page, maxoff + 1);
while ((list = ginGetEntry(&accum, &attnum, &entry, &nlist)) != NULL)
ginEntryInsert(index, ginstate, attnum, entry, list, nlist, FALSE);
@@ -814,26 +826,27 @@ ginInsertCleanup(Relation index, GinState *ginstate,
* Remember next page - it will become the new list head
*/
blkno = GinPageGetOpaque(page)->rightlink;
- UnlockReleaseBuffer(buffer); /* shiftList will do exclusive locking */
+ UnlockReleaseBuffer(buffer); /* shiftList will do exclusive
+ * locking */
/*
* remove readed pages from pending list, at this point all
* content of readed pages is in regular structure
*/
- if ( shiftList(index, metabuffer, blkno, stats) )
+ if (shiftList(index, metabuffer, blkno, stats))
{
/* another cleanup process is running concurrently */
LockBuffer(metabuffer, GIN_UNLOCK);
break;
}
- Assert( blkno == metadata->head );
+ Assert(blkno == metadata->head);
LockBuffer(metabuffer, GIN_UNLOCK);
/*
* if we removed the whole pending list just exit
*/
- if ( blkno == InvalidBlockNumber )
+ if (blkno == InvalidBlockNumber)
break;
/*
@@ -842,7 +855,7 @@ ginInsertCleanup(Relation index, GinState *ginstate,
MemoryContextReset(opCtx);
ginInitBA(&accum);
datums.nvalues = 0;
- datums.values = (Datum*)palloc(sizeof(Datum)*datums.maxvalues);
+ datums.values = (Datum *) palloc(sizeof(Datum) * datums.maxvalues);
}
else
{
diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c
index d57ee8febb..f5e0f788d1 100644
--- a/src/backend/access/gin/ginget.c
+++ b/src/backend/access/gin/ginget.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginget.c,v 1.26 2009/05/19 02:48:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginget.c,v 1.27 2009/06/11 14:48:53 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -25,10 +25,10 @@
typedef struct pendingPosition
{
- Buffer pendingBuffer;
- OffsetNumber firstOffset;
- OffsetNumber lastOffset;
- ItemPointerData item;
+ Buffer pendingBuffer;
+ OffsetNumber firstOffset;
+ OffsetNumber lastOffset;
+ ItemPointerData item;
} pendingPosition;
@@ -64,19 +64,19 @@ findItemInPage(Page page, ItemPointer item, OffsetNumber *off)
* Goes to the next page if current offset is outside of bounds
*/
static bool
-moveRightIfItNeeded( GinBtreeData *btree, GinBtreeStack *stack )
+moveRightIfItNeeded(GinBtreeData *btree, GinBtreeStack *stack)
{
- Page page = BufferGetPage(stack->buffer);
+ Page page = BufferGetPage(stack->buffer);
- if ( stack->off > PageGetMaxOffsetNumber(page) )
+ if (stack->off > PageGetMaxOffsetNumber(page))
{
/*
* We scanned the whole page, so we should take right page
*/
stack->blkno = GinPageGetOpaque(page)->rightlink;
- if ( GinPageRightMost(page) )
- return false; /* no more pages */
+ if (GinPageRightMost(page))
+ return false; /* no more pages */
LockBuffer(stack->buffer, GIN_UNLOCK);
stack->buffer = ReleaseAndReadBuffer(stack->buffer, btree->index, stack->blkno);
@@ -92,12 +92,12 @@ moveRightIfItNeeded( GinBtreeData *btree, GinBtreeStack *stack )
* in scanEntry->partialMatch TIDBitmap
*/
static void
-scanForItems( Relation index, GinScanEntry scanEntry, BlockNumber rootPostingTree )
+scanForItems(Relation index, GinScanEntry scanEntry, BlockNumber rootPostingTree)
{
GinPostingTreeScan *gdi;
- Buffer buffer;
- Page page;
- BlockNumber blkno;
+ Buffer buffer;
+ Page page;
+ BlockNumber blkno;
gdi = prepareScanPostingTree(index, rootPostingTree, TRUE);
@@ -110,23 +110,23 @@ scanForItems( Relation index, GinScanEntry scanEntry, BlockNumber rootPostingTre
/*
* Goes through all leaves
*/
- for(;;)
+ for (;;)
{
page = BufferGetPage(buffer);
- if ((GinPageGetOpaque(page)->flags & GIN_DELETED) == 0 && GinPageGetOpaque(page)->maxoff >= FirstOffsetNumber )
+ if ((GinPageGetOpaque(page)->flags & GIN_DELETED) == 0 && GinPageGetOpaque(page)->maxoff >= FirstOffsetNumber)
{
- tbm_add_tuples( scanEntry->partialMatch,
- (ItemPointer)GinDataPageGetItem(page, FirstOffsetNumber),
- GinPageGetOpaque(page)->maxoff, false);
+ tbm_add_tuples(scanEntry->partialMatch,
+ (ItemPointer) GinDataPageGetItem(page, FirstOffsetNumber),
+ GinPageGetOpaque(page)->maxoff, false);
scanEntry->predictNumberResult += GinPageGetOpaque(page)->maxoff;
}
blkno = GinPageGetOpaque(page)->rightlink;
- if ( GinPageRightMost(page) )
+ if (GinPageRightMost(page))
{
UnlockReleaseBuffer(buffer);
- return; /* no more pages */
+ return; /* no more pages */
}
LockBuffer(buffer, GIN_UNLOCK);
@@ -142,21 +142,21 @@ scanForItems( Relation index, GinScanEntry scanEntry, BlockNumber rootPostingTre
* Returns true if done, false if it's needed to restart scan from scratch
*/
static bool
-computePartialMatchList( GinBtreeData *btree, GinBtreeStack *stack, GinScanEntry scanEntry )
+computePartialMatchList(GinBtreeData *btree, GinBtreeStack *stack, GinScanEntry scanEntry)
{
- Page page;
- IndexTuple itup;
+ Page page;
+ IndexTuple itup;
Datum idatum;
int32 cmp;
- scanEntry->partialMatch = tbm_create( work_mem * 1024L );
+ scanEntry->partialMatch = tbm_create(work_mem * 1024L);
- for(;;)
+ for (;;)
{
/*
* stack->off points to the interested entry, buffer is already locked
*/
- if ( moveRightIfItNeeded(btree, stack) == false )
+ if (moveRightIfItNeeded(btree, stack) == false)
return true;
page = BufferGetPage(stack->buffer);
@@ -165,10 +165,10 @@ computePartialMatchList( GinBtreeData *btree, GinBtreeStack *stack, GinScanEntry
/*
* If tuple stores another attribute then stop scan
*/
- if ( gintuple_get_attrnum( btree->ginstate, itup ) != scanEntry->attnum )
+ if (gintuple_get_attrnum(btree->ginstate, itup) != scanEntry->attnum)
return true;
- idatum = gin_index_getattr( btree->ginstate, itup );
+ idatum = gin_index_getattr(btree->ginstate, itup);
/*----------
@@ -178,74 +178,74 @@ computePartialMatchList( GinBtreeData *btree, GinBtreeStack *stack, GinScanEntry
* case cmp < 0 => not match and continue scan
*----------
*/
- cmp = DatumGetInt32(FunctionCall4(&btree->ginstate->comparePartialFn[scanEntry->attnum-1],
+ cmp = DatumGetInt32(FunctionCall4(&btree->ginstate->comparePartialFn[scanEntry->attnum - 1],
scanEntry->entry,
idatum,
UInt16GetDatum(scanEntry->strategy),
- PointerGetDatum(scanEntry->extra_data)));
+ PointerGetDatum(scanEntry->extra_data)));
- if ( cmp > 0 )
+ if (cmp > 0)
return true;
- else if ( cmp < 0 )
+ else if (cmp < 0)
{
stack->off++;
continue;
}
- if ( GinIsPostingTree(itup) )
+ if (GinIsPostingTree(itup))
{
BlockNumber rootPostingTree = GinGetPostingTree(itup);
Datum newDatum,
- savedDatum = datumCopy (
- idatum,
- btree->ginstate->origTupdesc->attrs[scanEntry->attnum-1]->attbyval,
- btree->ginstate->origTupdesc->attrs[scanEntry->attnum-1]->attlen
- );
+ savedDatum = datumCopy(
+ idatum,
+ btree->ginstate->origTupdesc->attrs[scanEntry->attnum - 1]->attbyval,
+ btree->ginstate->origTupdesc->attrs[scanEntry->attnum - 1]->attlen
+ );
+
/*
- * We should unlock current page (but not unpin) during
- * tree scan to prevent deadlock with vacuum processes.
+ * We should unlock current page (but not unpin) during tree scan
+ * to prevent deadlock with vacuum processes.
*
* We save current entry value (savedDatum) to be able to refind
* our tuple after re-locking
*/
LockBuffer(stack->buffer, GIN_UNLOCK);
- scanForItems( btree->index, scanEntry, rootPostingTree );
+ scanForItems(btree->index, scanEntry, rootPostingTree);
/*
- * We lock again the entry page and while it was unlocked
- * insert might occured, so we need to refind our position
+ * We lock again the entry page and while it was unlocked insert
+ * might occured, so we need to refind our position
*/
LockBuffer(stack->buffer, GIN_SHARE);
page = BufferGetPage(stack->buffer);
- if ( !GinPageIsLeaf(page) )
+ if (!GinPageIsLeaf(page))
{
/*
- * Root page becomes non-leaf while we unlock it. We
- * will start again, this situation doesn't cause
- * often - root can became a non-leaf only one per
- * life of index.
+ * Root page becomes non-leaf while we unlock it. We will
+ * start again, this situation doesn't cause often - root can
+ * became a non-leaf only one per life of index.
*/
return false;
}
- for(;;)
+ for (;;)
{
- if ( moveRightIfItNeeded(btree, stack) == false )
- elog(ERROR, "lost saved point in index"); /* must not happen !!! */
+ if (moveRightIfItNeeded(btree, stack) == false)
+ elog(ERROR, "lost saved point in index"); /* must not happen !!! */
page = BufferGetPage(stack->buffer);
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, stack->off));
- newDatum = gin_index_getattr( btree->ginstate, itup );
+ newDatum = gin_index_getattr(btree->ginstate, itup);
- if ( gintuple_get_attrnum( btree->ginstate, itup ) != scanEntry->attnum )
- elog(ERROR, "lost saved point in index"); /* must not happen !!! */
+ if (gintuple_get_attrnum(btree->ginstate, itup) != scanEntry->attnum)
+ elog(ERROR, "lost saved point in index"); /* must not happen !!! */
- if ( compareEntries(btree->ginstate, scanEntry->attnum, newDatum, savedDatum) == 0 )
+ if (compareEntries(btree->ginstate, scanEntry->attnum, newDatum, savedDatum) == 0)
{
/* Found! */
- if ( btree->ginstate->origTupdesc->attrs[scanEntry->attnum-1]->attbyval == false )
- pfree( DatumGetPointer(savedDatum) );
+ if (btree->ginstate->origTupdesc->attrs[scanEntry->attnum - 1]->attbyval == false)
+ pfree(DatumGetPointer(savedDatum));
break;
}
@@ -254,8 +254,8 @@ computePartialMatchList( GinBtreeData *btree, GinBtreeStack *stack, GinScanEntry
}
else
{
- tbm_add_tuples( scanEntry->partialMatch, GinGetPosting(itup), GinGetNPosting(itup), false);
- scanEntry->predictNumberResult += GinGetNPosting(itup);
+ tbm_add_tuples(scanEntry->partialMatch, GinGetPosting(itup), GinGetNPosting(itup), false);
+ scanEntry->predictNumberResult += GinGetNPosting(itup);
}
/*
@@ -273,10 +273,10 @@ computePartialMatchList( GinBtreeData *btree, GinBtreeStack *stack, GinScanEntry
static void
startScanEntry(Relation index, GinState *ginstate, GinScanEntry entry)
{
- GinBtreeData btreeEntry;
- GinBtreeStack *stackEntry;
- Page page;
- bool needUnlock = TRUE;
+ GinBtreeData btreeEntry;
+ GinBtreeStack *stackEntry;
+ Page page;
+ bool needUnlock = TRUE;
entry->buffer = InvalidBuffer;
entry->offset = InvalidOffsetNumber;
@@ -294,8 +294,8 @@ startScanEntry(Relation index, GinState *ginstate, GinScanEntry entry)
}
/*
- * we should find entry, and begin scan of posting tree
- * or just store posting list in memory
+ * we should find entry, and begin scan of posting tree or just store
+ * posting list in memory
*/
prepareEntryScan(&btreeEntry, index, entry->attnum, entry->entry, ginstate);
@@ -305,27 +305,26 @@ startScanEntry(Relation index, GinState *ginstate, GinScanEntry entry)
entry->isFinished = TRUE;
- if ( entry->isPartialMatch )
+ if (entry->isPartialMatch)
{
/*
- * btreeEntry.findItem points to the first equal or greater value
- * than needed. So we will scan further and collect all
- * ItemPointers
+ * btreeEntry.findItem points to the first equal or greater value than
+ * needed. So we will scan further and collect all ItemPointers
*/
btreeEntry.findItem(&btreeEntry, stackEntry);
- if ( computePartialMatchList( &btreeEntry, stackEntry, entry ) == false )
+ if (computePartialMatchList(&btreeEntry, stackEntry, entry) == false)
{
/*
- * GIN tree was seriously restructured, so we will
- * cleanup all found data and rescan. See comments near
- * 'return false' in computePartialMatchList()
+ * GIN tree was seriously restructured, so we will cleanup all
+ * found data and rescan. See comments near 'return false' in
+ * computePartialMatchList()
*/
- if ( entry->partialMatch )
+ if (entry->partialMatch)
{
if (entry->partialMatchIterator)
tbm_end_iterate(entry->partialMatchIterator);
entry->partialMatchIterator = NULL;
- tbm_free( entry->partialMatch );
+ tbm_free(entry->partialMatch);
entry->partialMatch = NULL;
}
LockBuffer(stackEntry->buffer, GIN_UNLOCK);
@@ -335,7 +334,7 @@ startScanEntry(Relation index, GinState *ginstate, GinScanEntry entry)
return;
}
- if ( entry->partialMatch && !tbm_is_empty(entry->partialMatch) )
+ if (entry->partialMatch && !tbm_is_empty(entry->partialMatch))
{
entry->partialMatchIterator = tbm_begin_iterate(entry->partialMatch);
entry->isFinished = FALSE;
@@ -352,22 +351,22 @@ startScanEntry(Relation index, GinState *ginstate, GinScanEntry entry)
Page page;
/*
- * We should unlock entry page before make deal with
- * posting tree to prevent deadlocks with vacuum processes.
- * Because entry is never deleted from page and posting tree is
- * never reduced to the posting list, we can unlock page after
- * getting BlockNumber of root of posting tree.
+ * We should unlock entry page before make deal with posting tree
+ * to prevent deadlocks with vacuum processes. Because entry is
+ * never deleted from page and posting tree is never reduced to
+ * the posting list, we can unlock page after getting BlockNumber
+ * of root of posting tree.
*/
LockBuffer(stackEntry->buffer, GIN_UNLOCK);
needUnlock = FALSE;
gdi = prepareScanPostingTree(index, rootPostingTree, TRUE);
entry->buffer = scanBeginPostingTree(gdi);
+
/*
* We keep buffer pinned because we need to prevent deletion of
- * page during scan. See GIN's vacuum implementation. RefCount
- * is increased to keep buffer pinned after freeGinBtreeStack()
- * call.
+ * page during scan. See GIN's vacuum implementation. RefCount is
+ * increased to keep buffer pinned after freeGinBtreeStack() call.
*/
IncrBufferRefCount(entry->buffer);
@@ -377,10 +376,10 @@ startScanEntry(Relation index, GinState *ginstate, GinScanEntry entry)
/*
* Keep page content in memory to prevent durable page locking
*/
- entry->list = (ItemPointerData *) palloc( BLCKSZ );
+ entry->list = (ItemPointerData *) palloc(BLCKSZ);
entry->nlist = GinPageGetOpaque(page)->maxoff;
- memcpy( entry->list, GinDataPageGetItem(page, FirstOffsetNumber),
- GinPageGetOpaque(page)->maxoff * sizeof(ItemPointerData) );
+ memcpy(entry->list, GinDataPageGetItem(page, FirstOffsetNumber),
+ GinPageGetOpaque(page)->maxoff * sizeof(ItemPointerData));
LockBuffer(entry->buffer, GIN_UNLOCK);
freeGinBtreeStack(gdi->stack);
@@ -397,7 +396,7 @@ startScanEntry(Relation index, GinState *ginstate, GinScanEntry entry)
}
if (needUnlock)
- LockBuffer(stackEntry->buffer, GIN_UNLOCK);
+ LockBuffer(stackEntry->buffer, GIN_UNLOCK);
freeGinBtreeStack(stackEntry);
}
@@ -419,10 +418,10 @@ startScanKey(Relation index, GinState *ginstate, GinScanKey key)
if (GinFuzzySearchLimit > 0)
{
/*
- * If all of keys more than threshold we will try to reduce
- * result, we hope (and only hope, for intersection operation of
- * array our supposition isn't true), that total result will not
- * more than minimal predictNumberResult.
+ * If all of keys more than threshold we will try to reduce result, we
+ * hope (and only hope, for intersection operation of array our
+ * supposition isn't true), that total result will not more than
+ * minimal predictNumberResult.
*/
for (i = 0; i < key->nentries; i++)
@@ -459,7 +458,7 @@ entryGetNextItem(Relation index, GinScanEntry entry)
Page page;
BlockNumber blkno;
- for(;;)
+ for (;;)
{
entry->offset++;
@@ -471,7 +470,7 @@ entryGetNextItem(Relation index, GinScanEntry entry)
LockBuffer(entry->buffer, GIN_SHARE);
page = BufferGetPage(entry->buffer);
- for(;;)
+ for (;;)
{
/*
* It's needed to go by right link. During that we should refind
@@ -501,20 +500,20 @@ entryGetNextItem(Relation index, GinScanEntry entry)
* Found position equal to or greater than stored
*/
entry->nlist = GinPageGetOpaque(page)->maxoff;
- memcpy( entry->list, GinDataPageGetItem(page, FirstOffsetNumber),
- GinPageGetOpaque(page)->maxoff * sizeof(ItemPointerData) );
+ memcpy(entry->list, GinDataPageGetItem(page, FirstOffsetNumber),
+ GinPageGetOpaque(page)->maxoff * sizeof(ItemPointerData));
LockBuffer(entry->buffer, GIN_UNLOCK);
- if ( !ItemPointerIsValid(&entry->curItem) ||
- compareItemPointers( &entry->curItem, entry->list + entry->offset - 1 ) == 0 )
+ if (!ItemPointerIsValid(&entry->curItem) ||
+ compareItemPointers(&entry->curItem, entry->list + entry->offset - 1) == 0)
{
/*
- * First pages are deleted or empty, or we found exact position,
- * so break inner loop and continue outer one.
+ * First pages are deleted or empty, or we found exact
+ * position, so break inner loop and continue outer one.
*/
- break;
+ break;
}
/*
@@ -543,7 +542,7 @@ entryGetItem(Relation index, GinScanEntry entry)
entry->isFinished = entry->master->isFinished;
entry->curItem = entry->master->curItem;
}
- else if ( entry->partialMatch )
+ else if (entry->partialMatch)
{
do
{
@@ -552,7 +551,7 @@ entryGetItem(Relation index, GinScanEntry entry)
{
entry->partialMatchResult = tbm_iterate(entry->partialMatchIterator);
- if ( entry->partialMatchResult == NULL )
+ if (entry->partialMatchResult == NULL)
{
ItemPointerSet(&entry->curItem, InvalidBlockNumber, InvalidOffsetNumber);
tbm_end_iterate(entry->partialMatchIterator);
@@ -562,22 +561,23 @@ entryGetItem(Relation index, GinScanEntry entry)
}
/*
- * reset counter to the beginning of entry->partialMatchResult.
- * Note: entry->offset is still greater than
- * partialMatchResult->ntuples if partialMatchResult is
- * lossy. So, on next call we will get next result from
- * TIDBitmap.
+ * reset counter to the beginning of
+ * entry->partialMatchResult. Note: entry->offset is still
+ * greater than partialMatchResult->ntuples if
+ * partialMatchResult is lossy. So, on next call we will get
+ * next result from TIDBitmap.
*/
entry->offset = 0;
}
- if ( entry->partialMatchResult->ntuples < 0 )
+ if (entry->partialMatchResult->ntuples < 0)
{
/*
* lossy result, so we need to check the whole page
*/
ItemPointerSetLossyPage(&entry->curItem,
entry->partialMatchResult->blockno);
+
/*
* We might as well fall out of the loop; we could not
* estimate number of results on this page to support correct
@@ -618,7 +618,7 @@ entryGetItem(Relation index, GinScanEntry entry)
* Sets key->curItem to new found heap item pointer for one scan key
* Returns isFinished, ie TRUE means we did NOT get a new item pointer!
* Also, *keyrecheck is set true if recheck is needed for this scan key.
- * Note: lossy page could be returned after items from the same page.
+ * Note: lossy page could be returned after items from the same page.
*/
static bool
keyGetItem(Relation index, GinState *ginstate, MemoryContext tempCtx,
@@ -636,10 +636,10 @@ keyGetItem(Relation index, GinState *ginstate, MemoryContext tempCtx,
{
/*
* move forward from previously value and set new curItem, which is
- * minimal from entries->curItems. Lossy page is encoded by ItemPointer
- * with max value for offset (0xffff), so if there is an non-lossy entries
- * on lossy page they will returned too and after that the whole page.
- * That's not a problem for resulting tidbitmap.
+ * minimal from entries->curItems. Lossy page is encoded by
+ * ItemPointer with max value for offset (0xffff), so if there is an
+ * non-lossy entries on lossy page they will returned too and after
+ * that the whole page. That's not a problem for resulting tidbitmap.
*/
ItemPointerSetMax(&key->curItem);
for (i = 0; i < key->nentries; i++)
@@ -649,9 +649,9 @@ keyGetItem(Relation index, GinState *ginstate, MemoryContext tempCtx,
if (key->entryRes[i])
{
/*
- * Move forward only entries which was the least
- * on previous call, key->entryRes[i] points that
- * current entry was a result of loop/call.
+ * Move forward only entries which was the least on previous
+ * call, key->entryRes[i] points that current entry was a
+ * result of loop/call.
*/
if (entry->isFinished == FALSE && entryGetItem(index, entry) == FALSE)
{
@@ -685,10 +685,10 @@ keyGetItem(Relation index, GinState *ginstate, MemoryContext tempCtx,
/*----------
* entryRes array is used for:
* - as an argument for consistentFn
- * - entry->curItem with corresponding key->entryRes[i] == false are
- * greater than key->curItem, so next loop/call they should be
- * renewed by entryGetItem(). So, we need to set up an array before
- * checking of lossy page.
+ * - entry->curItem with corresponding key->entryRes[i] == false are
+ * greater than key->curItem, so next loop/call they should be
+ * renewed by entryGetItem(). So, we need to set up an array before
+ * checking of lossy page.
*----------
*/
for (i = 0; i < key->nentries; i++)
@@ -717,7 +717,7 @@ keyGetItem(Relation index, GinState *ginstate, MemoryContext tempCtx,
return FALSE;
oldCtx = MemoryContextSwitchTo(tempCtx);
- res = DatumGetBool(FunctionCall6(&ginstate->consistentFn[key->attnum-1],
+ res = DatumGetBool(FunctionCall6(&ginstate->consistentFn[key->attnum - 1],
PointerGetDatum(key->entryRes),
UInt16GetDatum(key->strategy),
key->query,
@@ -745,35 +745,36 @@ keyGetItem(Relation index, GinState *ginstate, MemoryContext tempCtx,
static bool
scanGetCandidate(IndexScanDesc scan, pendingPosition *pos)
{
- OffsetNumber maxoff;
- Page page;
- IndexTuple itup;
+ OffsetNumber maxoff;
+ Page page;
+ IndexTuple itup;
- ItemPointerSetInvalid( &pos->item );
- for(;;)
+ ItemPointerSetInvalid(&pos->item);
+ for (;;)
{
page = BufferGetPage(pos->pendingBuffer);
maxoff = PageGetMaxOffsetNumber(page);
- if ( pos->firstOffset > maxoff )
+ if (pos->firstOffset > maxoff)
{
BlockNumber blkno = GinPageGetOpaque(page)->rightlink;
- if ( blkno == InvalidBlockNumber )
+
+ if (blkno == InvalidBlockNumber)
{
UnlockReleaseBuffer(pos->pendingBuffer);
- pos->pendingBuffer=InvalidBuffer;
+ pos->pendingBuffer = InvalidBuffer;
return false;
}
else
{
/*
- * Here we must prevent deletion of next page by
- * insertcleanup process, which may be trying to obtain
- * exclusive lock on current page. So, we lock next
- * page before releasing the current one
+ * Here we must prevent deletion of next page by insertcleanup
+ * process, which may be trying to obtain exclusive lock on
+ * current page. So, we lock next page before releasing the
+ * current one
*/
- Buffer tmpbuf = ReadBuffer(scan->indexRelation, blkno);
+ Buffer tmpbuf = ReadBuffer(scan->indexRelation, blkno);
LockBuffer(tmpbuf, GIN_SHARE);
UnlockReleaseBuffer(pos->pendingBuffer);
@@ -786,12 +787,12 @@ scanGetCandidate(IndexScanDesc scan, pendingPosition *pos)
{
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, pos->firstOffset));
pos->item = itup->t_tid;
- if ( GinPageHasFullRow(page) )
+ if (GinPageHasFullRow(page))
{
/*
* find itempointer to the next row
*/
- for(pos->lastOffset = pos->firstOffset+1; pos->lastOffset<=maxoff; pos->lastOffset++)
+ for (pos->lastOffset = pos->firstOffset + 1; pos->lastOffset <= maxoff; pos->lastOffset++)
{
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, pos->lastOffset));
if (!ItemPointerEquals(&pos->item, &itup->t_tid))
@@ -807,9 +808,9 @@ scanGetCandidate(IndexScanDesc scan, pendingPosition *pos)
}
/*
- * Now pos->firstOffset points to the first tuple of current heap row,
- * pos->lastOffset points to the first tuple of second heap row (or
- * to the end of page)
+ * Now pos->firstOffset points to the first tuple of current heap
+ * row, pos->lastOffset points to the first tuple of second heap
+ * row (or to the end of page)
*/
break;
@@ -830,23 +831,23 @@ static bool
matchPartialInPendingList(GinState *ginstate, Page page,
OffsetNumber off, OffsetNumber maxoff,
Datum value, OffsetNumber attrnum,
- Datum *datum, bool *datumExtracted,
+ Datum *datum, bool *datumExtracted,
StrategyNumber strategy,
Pointer extra_data)
{
- IndexTuple itup;
- int32 cmp;
+ IndexTuple itup;
+ int32 cmp;
- while ( off < maxoff )
+ while (off < maxoff)
{
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, off));
- if ( attrnum != gintuple_get_attrnum(ginstate, itup) )
+ if (attrnum != gintuple_get_attrnum(ginstate, itup))
return false;
- if (datumExtracted[ off-1 ] == false)
+ if (datumExtracted[off - 1] == false)
{
- datum[ off-1 ] = gin_index_getattr(ginstate, itup);
- datumExtracted[ off-1 ] = true;
+ datum[off - 1] = gin_index_getattr(ginstate, itup);
+ datumExtracted[off - 1] = true;
}
/*----------
@@ -856,9 +857,9 @@ matchPartialInPendingList(GinState *ginstate, Page page,
* case cmp < 0 => not match and continue scan
*----------
*/
- cmp = DatumGetInt32(FunctionCall4(&ginstate->comparePartialFn[attrnum-1],
+ cmp = DatumGetInt32(FunctionCall4(&ginstate->comparePartialFn[attrnum - 1],
value,
- datum[off-1],
+ datum[off - 1],
UInt16GetDatum(strategy),
PointerGetDatum(extra_data)));
if (cmp == 0)
@@ -882,12 +883,13 @@ matchPartialInPendingList(GinState *ginstate, Page page,
static bool
collectDatumForItem(IndexScanDesc scan, pendingPosition *pos)
{
- GinScanOpaque so = (GinScanOpaque) scan->opaque;
- OffsetNumber attrnum;
- Page page;
- IndexTuple itup;
- int i, j;
- bool hasMatch = false;
+ GinScanOpaque so = (GinScanOpaque) scan->opaque;
+ OffsetNumber attrnum;
+ Page page;
+ IndexTuple itup;
+ int i,
+ j;
+ bool hasMatch = false;
/*
* Resets entryRes
@@ -895,38 +897,40 @@ collectDatumForItem(IndexScanDesc scan, pendingPosition *pos)
for (i = 0; i < so->nkeys; i++)
{
GinScanKey key = so->keys + i;
- memset( key->entryRes, FALSE, key->nentries );
+
+ memset(key->entryRes, FALSE, key->nentries);
}
- for(;;)
+ for (;;)
{
- Datum datum[ BLCKSZ/sizeof(IndexTupleData) ];
- bool datumExtracted[ BLCKSZ/sizeof(IndexTupleData) ];
+ Datum datum[BLCKSZ / sizeof(IndexTupleData)];
+ bool datumExtracted[BLCKSZ / sizeof(IndexTupleData)];
- Assert( pos->lastOffset > pos->firstOffset );
- memset(datumExtracted + pos->firstOffset - 1, 0, sizeof(bool) * (pos->lastOffset - pos->firstOffset ));
+ Assert(pos->lastOffset > pos->firstOffset);
+ memset(datumExtracted + pos->firstOffset - 1, 0, sizeof(bool) * (pos->lastOffset - pos->firstOffset));
page = BufferGetPage(pos->pendingBuffer);
- for(i = 0; i < so->nkeys; i++)
+ for (i = 0; i < so->nkeys; i++)
{
- GinScanKey key = so->keys + i;
+ GinScanKey key = so->keys + i;
- for(j=0; j<key->nentries; j++)
+ for (j = 0; j < key->nentries; j++)
{
- OffsetNumber StopLow = pos->firstOffset,
- StopHigh = pos->lastOffset,
- StopMiddle;
- GinScanEntry entry = key->scanEntry + j;
+ OffsetNumber StopLow = pos->firstOffset,
+ StopHigh = pos->lastOffset,
+ StopMiddle;
+ GinScanEntry entry = key->scanEntry + j;
/* already true - do not extra work */
- if ( key->entryRes[j] )
+ if (key->entryRes[j])
continue;
/*
- * Interested tuples are from pos->firstOffset to pos->lastOffset
- * and they are ordered by (attnum, Datum) as it's done in entry tree
- * So we could use binary search to prevent linear scanning
+ * Interested tuples are from pos->firstOffset to
+ * pos->lastOffset and they are ordered by (attnum, Datum) as
+ * it's done in entry tree So we could use binary search to
+ * prevent linear scanning
*/
while (StopLow < StopHigh)
{
@@ -941,53 +945,53 @@ collectDatumForItem(IndexScanDesc scan, pendingPosition *pos)
StopLow = StopMiddle + 1;
else
{
- int res;
+ int res;
- if (datumExtracted[ StopMiddle-1 ] == false)
+ if (datumExtracted[StopMiddle - 1] == false)
{
- datum[ StopMiddle-1 ] = gin_index_getattr(&so->ginstate, itup);
- datumExtracted[ StopMiddle-1 ] = true;
+ datum[StopMiddle - 1] = gin_index_getattr(&so->ginstate, itup);
+ datumExtracted[StopMiddle - 1] = true;
}
- res = compareEntries(&so->ginstate,
- entry->attnum,
- entry->entry,
- datum[ StopMiddle-1 ]);
+ res = compareEntries(&so->ginstate,
+ entry->attnum,
+ entry->entry,
+ datum[StopMiddle - 1]);
- if ( res == 0 )
+ if (res == 0)
{
/*
- * The exact match causes, so we just scan from
- * current position to find a partial match.
- * See comment above about tuple's ordering.
+ * The exact match causes, so we just scan from
+ * current position to find a partial match. See
+ * comment above about tuple's ordering.
*/
- if ( entry->isPartialMatch )
+ if (entry->isPartialMatch)
key->entryRes[j] =
matchPartialInPendingList(&so->ginstate,
- page, StopMiddle,
+ page, StopMiddle,
pos->lastOffset,
entry->entry,
entry->attnum,
datum,
datumExtracted,
entry->strategy,
- entry->extra_data);
+ entry->extra_data);
else
key->entryRes[j] = true;
break;
}
- else if ( res < 0 )
+ else if (res < 0)
StopHigh = StopMiddle;
else
StopLow = StopMiddle + 1;
}
}
- if ( StopLow>=StopHigh && entry->isPartialMatch )
+ if (StopLow >= StopHigh && entry->isPartialMatch)
{
- /*
- * The exact match wasn't found, so we need to start
- * scan from first tuple greater then current entry
- * See comment above about tuple's ordering.
+ /*
+ * The exact match wasn't found, so we need to start scan
+ * from first tuple greater then current entry See comment
+ * above about tuple's ordering.
*/
key->entryRes[j] =
matchPartialInPendingList(&so->ginstate,
@@ -1007,7 +1011,7 @@ collectDatumForItem(IndexScanDesc scan, pendingPosition *pos)
pos->firstOffset = pos->lastOffset;
- if ( GinPageHasFullRow(page) )
+ if (GinPageHasFullRow(page))
{
/*
* We scan all values from one tuple, go to next one
@@ -1020,12 +1024,13 @@ collectDatumForItem(IndexScanDesc scan, pendingPosition *pos)
ItemPointerData item = pos->item;
/*
- * need to get next portion of tuples of row containing
- * on several pages
+ * need to get next portion of tuples of row containing on several
+ * pages
*/
- if ( scanGetCandidate(scan, pos) == false || !ItemPointerEquals(&pos->item, &item) )
- elog(ERROR,"Could not process tuple"); /* XXX should not be here ! */
+ if (scanGetCandidate(scan, pos) == false || !ItemPointerEquals(&pos->item, &item))
+ elog(ERROR, "Could not process tuple"); /* XXX should not be
+ * here ! */
}
}
@@ -1039,12 +1044,14 @@ static void
scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
{
GinScanOpaque so = (GinScanOpaque) scan->opaque;
- MemoryContext oldCtx;
- bool recheck, keyrecheck, match;
- int i;
- pendingPosition pos;
- Buffer metabuffer = ReadBuffer(scan->indexRelation, GIN_METAPAGE_BLKNO);
- BlockNumber blkno;
+ MemoryContext oldCtx;
+ bool recheck,
+ keyrecheck,
+ match;
+ int i;
+ pendingPosition pos;
+ Buffer metabuffer = ReadBuffer(scan->indexRelation, GIN_METAPAGE_BLKNO);
+ BlockNumber blkno;
*ntids = 0;
@@ -1052,39 +1059,38 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
blkno = GinPageGetMeta(BufferGetPage(metabuffer))->head;
/*
- * fetch head of list before unlocking metapage.
- * head page must be pinned to prevent deletion by vacuum process
+ * fetch head of list before unlocking metapage. head page must be pinned
+ * to prevent deletion by vacuum process
*/
- if ( blkno == InvalidBlockNumber )
+ if (blkno == InvalidBlockNumber)
{
/* No pending list, so proceed with normal scan */
- UnlockReleaseBuffer( metabuffer );
+ UnlockReleaseBuffer(metabuffer);
return;
}
pos.pendingBuffer = ReadBuffer(scan->indexRelation, blkno);
LockBuffer(pos.pendingBuffer, GIN_SHARE);
pos.firstOffset = FirstOffsetNumber;
- UnlockReleaseBuffer( metabuffer );
+ UnlockReleaseBuffer(metabuffer);
/*
- * loop for each heap row. scanGetCandidate returns full row
- * or row's tuples from first page.
+ * loop for each heap row. scanGetCandidate returns full row or row's
+ * tuples from first page.
*/
- while( scanGetCandidate(scan, &pos) )
+ while (scanGetCandidate(scan, &pos))
{
-
/*
- * Check entries in tuple and setup entryRes array
- * If tuples of heap's row are placed on several pages
- * collectDatumForItem will read all of that pages.
+ * Check entries in tuple and setup entryRes array If tuples of heap's
+ * row are placed on several pages collectDatumForItem will read all
+ * of that pages.
*/
if (!collectDatumForItem(scan, &pos))
continue;
/*
- * Matching of entries of one row is finished,
- * so check row by consistent function.
+ * Matching of entries of one row is finished, so check row by
+ * consistent function.
*/
oldCtx = MemoryContextSwitchTo(so->tempCtx);
recheck = false;
@@ -1092,11 +1098,11 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
for (i = 0; i < so->nkeys; i++)
{
- GinScanKey key = so->keys + i;
+ GinScanKey key = so->keys + i;
keyrecheck = true;
- if (!DatumGetBool(FunctionCall6(&so->ginstate.consistentFn[key->attnum-1],
+ if (!DatumGetBool(FunctionCall6(&so->ginstate.consistentFn[key->attnum - 1],
PointerGetDatum(key->entryRes),
UInt16GetDatum(key->strategy),
key->query,
@@ -1114,7 +1120,7 @@ scanPendingInsert(IndexScanDesc scan, TIDBitmap *tbm, int64 *ntids)
MemoryContextSwitchTo(oldCtx);
MemoryContextReset(so->tempCtx);
- if ( match )
+ if (match)
{
tbm_add_tuples(tbm, &pos.item, 1, recheck);
(*ntids)++;
@@ -1137,10 +1143,10 @@ scanGetItem(IndexScanDesc scan, ItemPointerData *item, bool *recheck)
* We return recheck = true if any of the keyGetItem calls return
* keyrecheck = true. Note that because the second loop might advance
* some keys, this could theoretically be too conservative. In practice
- * though, we expect that a consistentFn's recheck result will depend
- * only on the operator and the query, so for any one key it should
- * stay the same regardless of advancing to new items. So it's not
- * worth working harder.
+ * though, we expect that a consistentFn's recheck result will depend only
+ * on the operator and the query, so for any one key it should stay the
+ * same regardless of advancing to new items. So it's not worth working
+ * harder.
*/
*recheck = false;
@@ -1165,13 +1171,13 @@ scanGetItem(IndexScanDesc scan, ItemPointerData *item, bool *recheck)
{
int cmp = compareItemPointers(item, &key->curItem);
- if ( cmp != 0 && (ItemPointerIsLossyPage(item) || ItemPointerIsLossyPage(&key->curItem)) )
+ if (cmp != 0 && (ItemPointerIsLossyPage(item) || ItemPointerIsLossyPage(&key->curItem)))
{
/*
* if one of ItemPointers points to the whole page then
* compare only page's number
*/
- if ( ItemPointerGetBlockNumber(item) == ItemPointerGetBlockNumber(&key->curItem) )
+ if (ItemPointerGetBlockNumber(item) == ItemPointerGetBlockNumber(&key->curItem))
cmp = 0;
else
cmp = (ItemPointerGetBlockNumber(item) > ItemPointerGetBlockNumber(&key->curItem)) ? 1 : -1;
@@ -1205,7 +1211,7 @@ Datum
gingetbitmap(PG_FUNCTION_ARGS)
{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
- TIDBitmap *tbm = (TIDBitmap *) PG_GETARG_POINTER(1);
+ TIDBitmap *tbm = (TIDBitmap *) PG_GETARG_POINTER(1);
int64 ntids;
if (GinIsNewKey(scan))
@@ -1217,15 +1223,14 @@ gingetbitmap(PG_FUNCTION_ARGS)
ntids = 0;
/*
- * First, scan the pending list and collect any matching entries into
- * the bitmap. After we scan a pending item, some other backend could
- * post it into the main index, and so we might visit it a second time
- * during the main scan. This is okay because we'll just re-set the
- * same bit in the bitmap. (The possibility of duplicate visits is a
- * major reason why GIN can't support the amgettuple API, however.)
- * Note that it would not do to scan the main index before the pending
- * list, since concurrent cleanup could then make us miss entries
- * entirely.
+ * First, scan the pending list and collect any matching entries into the
+ * bitmap. After we scan a pending item, some other backend could post it
+ * into the main index, and so we might visit it a second time during the
+ * main scan. This is okay because we'll just re-set the same bit in the
+ * bitmap. (The possibility of duplicate visits is a major reason why GIN
+ * can't support the amgettuple API, however.) Note that it would not do
+ * to scan the main index before the pending list, since concurrent
+ * cleanup could then make us miss entries entirely.
*/
scanPendingInsert(scan, tbm, &ntids);
@@ -1244,7 +1249,7 @@ gingetbitmap(PG_FUNCTION_ARGS)
if (!scanGetItem(scan, &iptr, &recheck))
break;
- if ( ItemPointerIsLossyPage(&iptr) )
+ if (ItemPointerIsLossyPage(&iptr))
tbm_add_page(tbm, ItemPointerGetBlockNumber(&iptr));
else
tbm_add_tuples(tbm, &iptr, 1, recheck);
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index ef3d4bbb03..2adaed43d4 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/gininsert.c,v 1.21 2009/06/06 02:39:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/gininsert.c,v 1.22 2009/06/11 14:48:53 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -100,15 +100,15 @@ static IndexTuple
addItemPointersToTuple(Relation index, GinState *ginstate, GinBtreeStack *stack,
IndexTuple old, ItemPointerData *items, uint32 nitem, bool isBuild)
{
- Datum key = gin_index_getattr(ginstate, old);
- OffsetNumber attnum = gintuple_get_attrnum(ginstate, old);
- IndexTuple res = GinFormTuple(ginstate, attnum, key,
- NULL, nitem + GinGetNPosting(old));
+ Datum key = gin_index_getattr(ginstate, old);
+ OffsetNumber attnum = gintuple_get_attrnum(ginstate, old);
+ IndexTuple res = GinFormTuple(ginstate, attnum, key,
+ NULL, nitem + GinGetNPosting(old));
if (res)
{
/* good, small enough */
- uint32 newnitem;
+ uint32 newnitem;
newnitem = MergeItemPointers(GinGetPosting(res),
GinGetPosting(old), GinGetNPosting(old),
@@ -236,15 +236,15 @@ ginBuildCallback(Relation index, HeapTuple htup, Datum *values,
{
GinBuildState *buildstate = (GinBuildState *) state;
MemoryContext oldCtx;
- int i;
+ int i;
oldCtx = MemoryContextSwitchTo(buildstate->tmpCtx);
- for(i=0; i<buildstate->ginstate.origTupdesc->natts;i++)
- if ( !isnull[i] )
- buildstate->indtuples += ginHeapTupleBulkInsert(buildstate,
- (OffsetNumber)(i+1), values[i],
- &htup->t_self);
+ for (i = 0; i < buildstate->ginstate.origTupdesc->natts; i++)
+ if (!isnull[i])
+ buildstate->indtuples += ginHeapTupleBulkInsert(buildstate,
+ (OffsetNumber) (i + 1), values[i],
+ &htup->t_self);
/* If we've maxed out our available memory, dump everything to the index */
/* Also dump if the tree seems to be getting too unbalanced */
@@ -254,7 +254,7 @@ ginBuildCallback(Relation index, HeapTuple htup, Datum *values,
ItemPointerData *list;
Datum entry;
uint32 nlist;
- OffsetNumber attnum;
+ OffsetNumber attnum;
while ((list = ginGetEntry(&buildstate->accum, &attnum, &entry, &nlist)) != NULL)
{
@@ -279,7 +279,8 @@ ginbuild(PG_FUNCTION_ARGS)
IndexBuildResult *result;
double reltuples;
GinBuildState buildstate;
- Buffer RootBuffer, MetaBuffer;
+ Buffer RootBuffer,
+ MetaBuffer;
ItemPointerData *list;
Datum entry;
uint32 nlist;
@@ -316,7 +317,7 @@ ginbuild(PG_FUNCTION_ARGS)
rdata.next = NULL;
recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_CREATE_INDEX, &rdata);
-
+
page = BufferGetPage(RootBuffer);
PageSetLSN(page, recptr);
PageSetTLI(page, ThisTimeLineID);
@@ -420,7 +421,7 @@ gininsert(PG_FUNCTION_ARGS)
MemoryContext oldCtx;
MemoryContext insertCtx;
uint32 res = 0;
- int i;
+ int i;
insertCtx = AllocSetContextCreate(CurrentMemoryContext,
"Gin insert temporary context",
@@ -432,24 +433,24 @@ gininsert(PG_FUNCTION_ARGS)
initGinState(&ginstate, index);
- if ( GinGetUseFastUpdate(index) )
+ if (GinGetUseFastUpdate(index))
{
- GinTupleCollector collector;
+ GinTupleCollector collector;
memset(&collector, 0, sizeof(GinTupleCollector));
- for(i=0; i<ginstate.origTupdesc->natts;i++)
- if ( !isnull[i] )
+ for (i = 0; i < ginstate.origTupdesc->natts; i++)
+ if (!isnull[i])
res += ginHeapTupleFastCollect(index, &ginstate, &collector,
- (OffsetNumber)(i+1), values[i], ht_ctid);
+ (OffsetNumber) (i + 1), values[i], ht_ctid);
ginHeapTupleFastInsert(index, &ginstate, &collector);
}
else
{
- for(i=0; i<ginstate.origTupdesc->natts;i++)
- if ( !isnull[i] )
- res += ginHeapTupleInsert(index, &ginstate,
- (OffsetNumber)(i+1), values[i], ht_ctid);
+ for (i = 0; i < ginstate.origTupdesc->natts; i++)
+ if (!isnull[i])
+ res += ginHeapTupleInsert(index, &ginstate,
+ (OffsetNumber) (i + 1), values[i], ht_ctid);
}
diff --git a/src/backend/access/gin/ginscan.c b/src/backend/access/gin/ginscan.c
index a3d1135708..bda97033ad 100644
--- a/src/backend/access/gin/ginscan.c
+++ b/src/backend/access/gin/ginscan.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginscan.c,v 1.23 2009/04/05 11:32:01 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginscan.c,v 1.24 2009/06/11 14:48:53 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -37,7 +37,7 @@ ginbeginscan(PG_FUNCTION_ARGS)
static void
fillScanKey(GinState *ginstate, GinScanKey key, OffsetNumber attnum, Datum query,
- Datum *entryValues, bool *partial_matches, uint32 nEntryValues,
+ Datum *entryValues, bool *partial_matches, uint32 nEntryValues,
StrategyNumber strategy, Pointer *extra_data)
{
uint32 i,
@@ -68,8 +68,8 @@ fillScanKey(GinState *ginstate, GinScanKey key, OffsetNumber attnum, Datum query
key->scanEntry[i].strategy = strategy;
key->scanEntry[i].list = NULL;
key->scanEntry[i].nlist = 0;
- key->scanEntry[i].isPartialMatch = ( ginstate->canPartialMatch[attnum - 1] && partial_matches )
- ? partial_matches[i] : false;
+ key->scanEntry[i].isPartialMatch = (ginstate->canPartialMatch[attnum - 1] && partial_matches)
+ ? partial_matches[i] : false;
/* link to the equals entry in current scan key */
key->scanEntry[i].master = NULL;
@@ -172,12 +172,12 @@ newScanKey(IndexScanDesc scan)
ScanKey skey = &scankey[i];
Datum *entryValues;
int32 nEntryValues = 0;
- bool *partial_matches = NULL;
- Pointer *extra_data = NULL;
+ bool *partial_matches = NULL;
+ Pointer *extra_data = NULL;
/*
- * Assume, that GIN-indexable operators are strict, so
- * nothing could be found
+ * Assume, that GIN-indexable operators are strict, so nothing could
+ * be found
*/
if (skey->sk_flags & SK_ISNULL)
{
@@ -196,8 +196,8 @@ newScanKey(IndexScanDesc scan)
if (nEntryValues < 0)
{
/*
- * extractQueryFn signals that nothing can match, so we can
- * just set isVoidRes flag. No need to examine any more keys.
+ * extractQueryFn signals that nothing can match, so we can just
+ * set isVoidRes flag. No need to examine any more keys.
*/
so->isVoidRes = true;
break;
@@ -206,10 +206,10 @@ newScanKey(IndexScanDesc scan)
if (entryValues == NULL || nEntryValues == 0)
{
/*
- * extractQueryFn signals that everything matches. This would
- * require a full scan, which we can't do, but perhaps there
- * is another scankey that provides a restriction to use. So
- * we keep going and check only at the end.
+ * extractQueryFn signals that everything matches. This would
+ * require a full scan, which we can't do, but perhaps there is
+ * another scankey that provides a restriction to use. So we keep
+ * going and check only at the end.
*/
continue;
}
diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c
index e0951a6a4f..3834ae3b56 100644
--- a/src/backend/access/gin/ginutil.c
+++ b/src/backend/access/gin/ginutil.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginutil.c,v 1.21 2009/03/24 20:17:11 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginutil.c,v 1.22 2009/06/11 14:48:53 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -16,7 +16,7 @@
#include "access/genam.h"
#include "access/gin.h"
#include "access/reloptions.h"
-#include "catalog/pg_type.h"
+#include "catalog/pg_type.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/indexfsm.h"
@@ -25,44 +25,44 @@
void
initGinState(GinState *state, Relation index)
{
- int i;
+ int i;
state->origTupdesc = index->rd_att;
state->oneCol = (index->rd_att->natts == 1) ? true : false;
- for(i=0;i<index->rd_att->natts;i++)
+ for (i = 0; i < index->rd_att->natts; i++)
{
- state->tupdesc[i] = CreateTemplateTupleDesc(2,false);
+ state->tupdesc[i] = CreateTemplateTupleDesc(2, false);
- TupleDescInitEntry( state->tupdesc[i], (AttrNumber) 1, NULL,
- INT2OID, -1, 0);
- TupleDescInitEntry( state->tupdesc[i], (AttrNumber) 2, NULL,
- index->rd_att->attrs[i]->atttypid,
- index->rd_att->attrs[i]->atttypmod,
- index->rd_att->attrs[i]->attndims
- );
+ TupleDescInitEntry(state->tupdesc[i], (AttrNumber) 1, NULL,
+ INT2OID, -1, 0);
+ TupleDescInitEntry(state->tupdesc[i], (AttrNumber) 2, NULL,
+ index->rd_att->attrs[i]->atttypid,
+ index->rd_att->attrs[i]->atttypmod,
+ index->rd_att->attrs[i]->attndims
+ );
fmgr_info_copy(&(state->compareFn[i]),
- index_getprocinfo(index, i+1, GIN_COMPARE_PROC),
- CurrentMemoryContext);
+ index_getprocinfo(index, i + 1, GIN_COMPARE_PROC),
+ CurrentMemoryContext);
fmgr_info_copy(&(state->extractValueFn[i]),
- index_getprocinfo(index, i+1, GIN_EXTRACTVALUE_PROC),
- CurrentMemoryContext);
+ index_getprocinfo(index, i + 1, GIN_EXTRACTVALUE_PROC),
+ CurrentMemoryContext);
fmgr_info_copy(&(state->extractQueryFn[i]),
- index_getprocinfo(index, i+1, GIN_EXTRACTQUERY_PROC),
- CurrentMemoryContext);
+ index_getprocinfo(index, i + 1, GIN_EXTRACTQUERY_PROC),
+ CurrentMemoryContext);
fmgr_info_copy(&(state->consistentFn[i]),
- index_getprocinfo(index, i+1, GIN_CONSISTENT_PROC),
- CurrentMemoryContext);
+ index_getprocinfo(index, i + 1, GIN_CONSISTENT_PROC),
+ CurrentMemoryContext);
/*
* Check opclass capability to do partial match.
*/
- if ( index_getprocid(index, i+1, GIN_COMPARE_PARTIAL_PROC) != InvalidOid )
+ if (index_getprocid(index, i + 1, GIN_COMPARE_PARTIAL_PROC) != InvalidOid)
{
fmgr_info_copy(&(state->comparePartialFn[i]),
- index_getprocinfo(index, i+1, GIN_COMPARE_PARTIAL_PROC),
+ index_getprocinfo(index, i + 1, GIN_COMPARE_PARTIAL_PROC),
CurrentMemoryContext);
state->canPartialMatch[i] = true;
@@ -82,21 +82,21 @@ gintuple_get_attrnum(GinState *ginstate, IndexTuple tuple)
{
OffsetNumber colN = FirstOffsetNumber;
- if ( !ginstate->oneCol )
+ if (!ginstate->oneCol)
{
- Datum res;
- bool isnull;
+ Datum res;
+ bool isnull;
/*
- * First attribute is always int16, so we can safely use any
- * tuple descriptor to obtain first attribute of tuple
+ * First attribute is always int16, so we can safely use any tuple
+ * descriptor to obtain first attribute of tuple
*/
res = index_getattr(tuple, FirstOffsetNumber, ginstate->tupdesc[0],
&isnull);
Assert(!isnull);
colN = DatumGetUInt16(res);
- Assert( colN >= FirstOffsetNumber && colN <= ginstate->origTupdesc->natts );
+ Assert(colN >= FirstOffsetNumber && colN <= ginstate->origTupdesc->natts);
}
return colN;
@@ -108,10 +108,10 @@ gintuple_get_attrnum(GinState *ginstate, IndexTuple tuple)
Datum
gin_index_getattr(GinState *ginstate, IndexTuple tuple)
{
- bool isnull;
- Datum res;
+ bool isnull;
+ Datum res;
- if ( ginstate->oneCol )
+ if (ginstate->oneCol)
{
/*
* Single column index doesn't store attribute numbers in tuples
@@ -122,8 +122,8 @@ gin_index_getattr(GinState *ginstate, IndexTuple tuple)
else
{
/*
- * Since the datum type depends on which index column it's from,
- * we must be careful to use the right tuple descriptor here.
+ * Since the datum type depends on which index column it's from, we
+ * must be careful to use the right tuple descriptor here.
*/
OffsetNumber colN = gintuple_get_attrnum(ginstate, tuple);
@@ -216,8 +216,8 @@ GinInitBuffer(Buffer b, uint32 f)
void
GinInitMetabuffer(Buffer b)
{
- GinMetaPageData *metadata;
- Page page = BufferGetPage(b);
+ GinMetaPageData *metadata;
+ Page page = BufferGetPage(b);
GinInitPage(page, GIN_META, BufferGetPageSize(b));
@@ -234,7 +234,7 @@ compareEntries(GinState *ginstate, OffsetNumber attnum, Datum a, Datum b)
{
return DatumGetInt32(
FunctionCall2(
- &ginstate->compareFn[attnum-1],
+ &ginstate->compareFn[attnum - 1],
a, b
)
);
@@ -242,12 +242,12 @@ compareEntries(GinState *ginstate, OffsetNumber attnum, Datum a, Datum b)
int
compareAttEntries(GinState *ginstate, OffsetNumber attnum_a, Datum a,
- OffsetNumber attnum_b, Datum b)
+ OffsetNumber attnum_b, Datum b)
{
- if ( attnum_a == attnum_b )
- return compareEntries( ginstate, attnum_a, a, b);
+ if (attnum_a == attnum_b)
+ return compareEntries(ginstate, attnum_a, a, b);
- return ( attnum_a < attnum_b ) ? -1 : 1;
+ return (attnum_a < attnum_b) ? -1 : 1;
}
typedef struct
@@ -275,7 +275,7 @@ extractEntriesS(GinState *ginstate, OffsetNumber attnum, Datum value, int32 *nen
Datum *entries;
entries = (Datum *) DatumGetPointer(FunctionCall2(
- &ginstate->extractValueFn[attnum-1],
+ &ginstate->extractValueFn[attnum - 1],
value,
PointerGetDatum(nentries)
));
@@ -288,7 +288,7 @@ extractEntriesS(GinState *ginstate, OffsetNumber attnum, Datum value, int32 *nen
{
cmpEntriesData arg;
- arg.cmpDatumFunc = &ginstate->compareFn[attnum-1];
+ arg.cmpDatumFunc = &ginstate->compareFn[attnum - 1];
arg.needUnique = needUnique;
qsort_arg(entries, *nentries, sizeof(Datum),
(qsort_arg_comparator) cmpEntries, (void *) &arg);
@@ -348,7 +348,7 @@ ginoptions(PG_FUNCTION_ARGS)
rdopts = allocateReloptStruct(sizeof(GinOptions), options, numoptions);
fillRelOptions((void *) rdopts, sizeof(GinOptions), options, numoptions,
- validate, tab, lengthof(tab));
+ validate, tab, lengthof(tab));
pfree(options);
diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c
index 934bf7c362..7f2ff9896f 100644
--- a/src/backend/access/gin/ginvacuum.c
+++ b/src/backend/access/gin/ginvacuum.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginvacuum.c,v 1.29 2009/06/06 22:13:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginvacuum.c,v 1.30 2009/06/11 14:48:53 momjian Exp $
*-------------------------------------------------------------------------
*/
@@ -533,8 +533,8 @@ ginVacuumEntryPage(GinVacuumState *gvs, Buffer buffer, BlockNumber *roots, uint3
if (GinGetNPosting(itup) != newN)
{
- Datum value;
- OffsetNumber attnum;
+ Datum value;
+ OffsetNumber attnum;
/*
* Some ItemPointers was deleted, so we should remake our
@@ -724,9 +724,9 @@ ginvacuumcleanup(PG_FUNCTION_ARGS)
PG_RETURN_POINTER(stats);
}
- /*
- * Set up all-zero stats and cleanup pending inserts
- * if ginbulkdelete wasn't called
+ /*
+ * Set up all-zero stats and cleanup pending inserts if ginbulkdelete
+ * wasn't called
*/
if (stats == NULL)
{
@@ -758,7 +758,7 @@ ginvacuumcleanup(PG_FUNCTION_ARGS)
if (needLock)
UnlockRelationForExtension(index, ExclusiveLock);
- totFreePages = 0;
+ totFreePages = 0;
for (blkno = GIN_ROOT_BLKNO + 1; blkno < npages; blkno++)
{
diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c
index 03cdc1129c..1f008b727f 100644
--- a/src/backend/access/gin/ginxlog.c
+++ b/src/backend/access/gin/ginxlog.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gin/ginxlog.c,v 1.18 2009/03/24 20:17:11 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gin/ginxlog.c,v 1.19 2009/06/11 14:48:53 momjian Exp $
*-------------------------------------------------------------------------
*/
#include "postgres.h"
@@ -71,7 +71,8 @@ static void
ginRedoCreateIndex(XLogRecPtr lsn, XLogRecord *record)
{
RelFileNode *node = (RelFileNode *) XLogRecGetData(record);
- Buffer RootBuffer, MetaBuffer;
+ Buffer RootBuffer,
+ MetaBuffer;
Page page;
MetaBuffer = XLogReadBuffer(*node, GIN_METAPAGE_BLKNO, true);
@@ -446,7 +447,7 @@ ginRedoDeletePage(XLogRecPtr lsn, XLogRecord *record)
static void
ginRedoUpdateMetapage(XLogRecPtr lsn, XLogRecord *record)
{
- ginxlogUpdateMeta *data = (ginxlogUpdateMeta*) XLogRecGetData(record);
+ ginxlogUpdateMeta *data = (ginxlogUpdateMeta *) XLogRecGetData(record);
Buffer metabuffer;
Page metapage;
@@ -455,39 +456,41 @@ ginRedoUpdateMetapage(XLogRecPtr lsn, XLogRecord *record)
if (!XLByteLE(lsn, PageGetLSN(metapage)))
{
- memcpy( GinPageGetMeta(metapage), &data->metadata, sizeof(GinMetaPageData));
+ memcpy(GinPageGetMeta(metapage), &data->metadata, sizeof(GinMetaPageData));
PageSetLSN(metapage, lsn);
PageSetTLI(metapage, ThisTimeLineID);
MarkBufferDirty(metabuffer);
}
- if ( data->ntuples > 0 )
+ if (data->ntuples > 0)
{
/*
* insert into tail page
*/
if (!(record->xl_info & XLR_BKP_BLOCK_1))
{
- Buffer buffer = XLogReadBuffer(data->node, data->metadata.tail, false);
- Page page = BufferGetPage(buffer);
+ Buffer buffer = XLogReadBuffer(data->node, data->metadata.tail, false);
+ Page page = BufferGetPage(buffer);
- if ( !XLByteLE(lsn, PageGetLSN(page)))
+ if (!XLByteLE(lsn, PageGetLSN(page)))
{
- OffsetNumber l, off = (PageIsEmpty(page)) ? FirstOffsetNumber :
- OffsetNumberNext(PageGetMaxOffsetNumber(page));
- int i, tupsize;
- IndexTuple tuples = (IndexTuple) (XLogRecGetData(record) + sizeof(ginxlogUpdateMeta));
-
- for(i=0; i<data->ntuples; i++)
+ OffsetNumber l,
+ off = (PageIsEmpty(page)) ? FirstOffsetNumber :
+ OffsetNumberNext(PageGetMaxOffsetNumber(page));
+ int i,
+ tupsize;
+ IndexTuple tuples = (IndexTuple) (XLogRecGetData(record) + sizeof(ginxlogUpdateMeta));
+
+ for (i = 0; i < data->ntuples; i++)
{
tupsize = IndexTupleSize(tuples);
- l = PageAddItem(page, (Item)tuples, tupsize, off, false, false);
+ l = PageAddItem(page, (Item) tuples, tupsize, off, false, false);
if (l == InvalidOffsetNumber)
elog(ERROR, "failed to add item to index page");
- tuples = (IndexTuple)( ((char*)tuples) + tupsize );
+ tuples = (IndexTuple) (((char *) tuples) + tupsize);
}
/*
@@ -502,16 +505,16 @@ ginRedoUpdateMetapage(XLogRecPtr lsn, XLogRecord *record)
UnlockReleaseBuffer(buffer);
}
}
- else if ( data->prevTail != InvalidBlockNumber )
+ else if (data->prevTail != InvalidBlockNumber)
{
/*
* New tail
*/
- Buffer buffer = XLogReadBuffer(data->node, data->prevTail, false);
- Page page = BufferGetPage(buffer);
+ Buffer buffer = XLogReadBuffer(data->node, data->prevTail, false);
+ Page page = BufferGetPage(buffer);
- if ( !XLByteLE(lsn, PageGetLSN(page)))
+ if (!XLByteLE(lsn, PageGetLSN(page)))
{
GinPageGetOpaque(page)->rightlink = data->newRightlink;
@@ -528,12 +531,14 @@ ginRedoUpdateMetapage(XLogRecPtr lsn, XLogRecord *record)
static void
ginRedoInsertListPage(XLogRecPtr lsn, XLogRecord *record)
{
- ginxlogInsertListPage *data = (ginxlogInsertListPage*) XLogRecGetData(record);
- Buffer buffer;
- Page page;
- OffsetNumber l, off = FirstOffsetNumber;
- int i, tupsize;
- IndexTuple tuples = (IndexTuple) (XLogRecGetData(record) + sizeof(ginxlogInsertListPage));
+ ginxlogInsertListPage *data = (ginxlogInsertListPage *) XLogRecGetData(record);
+ Buffer buffer;
+ Page page;
+ OffsetNumber l,
+ off = FirstOffsetNumber;
+ int i,
+ tupsize;
+ IndexTuple tuples = (IndexTuple) (XLogRecGetData(record) + sizeof(ginxlogInsertListPage));
if (record->xl_info & XLR_BKP_BLOCK_1)
return;
@@ -543,7 +548,7 @@ ginRedoInsertListPage(XLogRecPtr lsn, XLogRecord *record)
GinInitBuffer(buffer, GIN_LIST);
GinPageGetOpaque(page)->rightlink = data->rightlink;
- if ( data->rightlink == InvalidBlockNumber )
+ if (data->rightlink == InvalidBlockNumber)
{
/* tail of sublist */
GinPageSetFullRow(page);
@@ -554,16 +559,16 @@ ginRedoInsertListPage(XLogRecPtr lsn, XLogRecord *record)
GinPageGetOpaque(page)->maxoff = 0;
}
- for(i=0; i<data->ntuples; i++)
+ for (i = 0; i < data->ntuples; i++)
{
tupsize = IndexTupleSize(tuples);
- l = PageAddItem(page, (Item)tuples, tupsize, off, false, false);
+ l = PageAddItem(page, (Item) tuples, tupsize, off, false, false);
if (l == InvalidOffsetNumber)
elog(ERROR, "failed to add item to index page");
- tuples = (IndexTuple)( ((char*)tuples) + tupsize );
+ tuples = (IndexTuple) (((char *) tuples) + tupsize);
}
PageSetLSN(page, lsn);
@@ -576,28 +581,28 @@ ginRedoInsertListPage(XLogRecPtr lsn, XLogRecord *record)
static void
ginRedoDeleteListPages(XLogRecPtr lsn, XLogRecord *record)
{
- ginxlogDeleteListPages *data = (ginxlogDeleteListPages*) XLogRecGetData(record);
+ ginxlogDeleteListPages *data = (ginxlogDeleteListPages *) XLogRecGetData(record);
Buffer metabuffer;
Page metapage;
- int i;
+ int i;
metabuffer = XLogReadBuffer(data->node, GIN_METAPAGE_BLKNO, false);
metapage = BufferGetPage(metabuffer);
if (!XLByteLE(lsn, PageGetLSN(metapage)))
{
- memcpy( GinPageGetMeta(metapage), &data->metadata, sizeof(GinMetaPageData));
+ memcpy(GinPageGetMeta(metapage), &data->metadata, sizeof(GinMetaPageData));
PageSetLSN(metapage, lsn);
PageSetTLI(metapage, ThisTimeLineID);
MarkBufferDirty(metabuffer);
}
- for(i=0; i<data->ndeleted; i++)
+ for (i = 0; i < data->ndeleted; i++)
{
- Buffer buffer = XLogReadBuffer(data->node,data->toDelete[i],false);
- Page page = BufferGetPage(buffer);
+ Buffer buffer = XLogReadBuffer(data->node, data->toDelete[i], false);
+ Page page = BufferGetPage(buffer);
- if ( !XLByteLE(lsn, PageGetLSN(page)))
+ if (!XLByteLE(lsn, PageGetLSN(page)))
{
GinPageGetOpaque(page)->flags = GIN_DELETED;
@@ -645,7 +650,7 @@ gin_redo(XLogRecPtr lsn, XLogRecord *record)
case XLOG_GIN_INSERT_LISTPAGE:
ginRedoInsertListPage(lsn, record);
break;
- case XLOG_GIN_DELETE_LISTPAGE:
+ case XLOG_GIN_DELETE_LISTPAGE:
ginRedoDeleteListPages(lsn, record);
break;
default:
@@ -705,15 +710,15 @@ gin_desc(StringInfo buf, uint8 xl_info, char *rec)
break;
case XLOG_GIN_UPDATE_META_PAGE:
appendStringInfo(buf, "Update metapage, ");
- desc_node(buf, ((ginxlogUpdateMeta *) rec)->node, ((ginxlogUpdateMeta *) rec)->metadata.tail);
+ desc_node(buf, ((ginxlogUpdateMeta *) rec)->node, ((ginxlogUpdateMeta *) rec)->metadata.tail);
break;
case XLOG_GIN_INSERT_LISTPAGE:
appendStringInfo(buf, "Insert new list page, ");
- desc_node(buf, ((ginxlogInsertListPage *) rec)->node, ((ginxlogInsertListPage *) rec)->blkno);
+ desc_node(buf, ((ginxlogInsertListPage *) rec)->node, ((ginxlogInsertListPage *) rec)->blkno);
break;
case XLOG_GIN_DELETE_LISTPAGE:
appendStringInfo(buf, "Delete list pages (%d), ", ((ginxlogDeleteListPages *) rec)->ndeleted);
- desc_node(buf, ((ginxlogDeleteListPages *) rec)->node, ((ginxlogDeleteListPages *) rec)->metadata.head);
+ desc_node(buf, ((ginxlogDeleteListPages *) rec)->node, ((ginxlogDeleteListPages *) rec)->metadata.head);
break;
default:
elog(PANIC, "gin_desc: unknown op code %u", info);
diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c
index 9701d30e38..e7ca8f6476 100644
--- a/src/backend/access/gist/gistget.c
+++ b/src/backend/access/gist/gistget.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.80 2009/01/01 17:23:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.81 2009/06/11 14:48:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -79,7 +79,7 @@ gistgettuple(PG_FUNCTION_ARGS)
so = (GISTScanOpaque) scan->opaque;
- if (dir != ForwardScanDirection)
+ if (dir != ForwardScanDirection)
elog(ERROR, "GiST doesn't support other scan directions than forward");
/*
@@ -101,8 +101,8 @@ Datum
gistgetbitmap(PG_FUNCTION_ARGS)
{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
- TIDBitmap *tbm = (TIDBitmap *) PG_GETARG_POINTER(1);
- int64 ntids;
+ TIDBitmap *tbm = (TIDBitmap *) PG_GETARG_POINTER(1);
+ int64 ntids;
ntids = gistnext(scan, tbm);
@@ -136,10 +136,10 @@ gistnext(IndexScanDesc scan, TIDBitmap *tbm)
so = (GISTScanOpaque) scan->opaque;
- if ( so->qual_ok == false )
+ if (so->qual_ok == false)
return 0;
- if ( so->curbuf == InvalidBuffer )
+ if (so->curbuf == InvalidBuffer)
{
if (ItemPointerIsValid(&so->curpos) == false)
{
@@ -155,7 +155,7 @@ gistnext(IndexScanDesc scan, TIDBitmap *tbm)
stk->block = GIST_ROOT_BLKNO;
pgstat_count_index_scan(scan->indexRelation);
- }
+ }
else
{
/* scan is finished */
@@ -164,25 +164,25 @@ gistnext(IndexScanDesc scan, TIDBitmap *tbm)
}
/*
- * check stored pointers from last visit
+ * check stored pointers from last visit
*/
- if ( so->nPageData > 0 )
+ if (so->nPageData > 0)
{
/*
* gistgetmulti never should go here
*/
- Assert( tbm == NULL );
+ Assert(tbm == NULL);
- if ( so->curPageData < so->nPageData )
+ if (so->curPageData < so->nPageData)
{
- scan->xs_ctup.t_self = so->pageData[ so->curPageData ].heapPtr;
- scan->xs_recheck = so->pageData[ so->curPageData ].recheck;
+ scan->xs_ctup.t_self = so->pageData[so->curPageData].heapPtr;
+ scan->xs_recheck = so->pageData[so->curPageData].recheck;
ItemPointerSet(&so->curpos,
- BufferGetBlockNumber(so->curbuf),
- so->pageData[ so->curPageData ].pageOffset);
+ BufferGetBlockNumber(so->curbuf),
+ so->pageData[so->curPageData].pageOffset);
- so->curPageData ++;
+ so->curPageData++;
return 1;
}
@@ -227,7 +227,7 @@ gistnext(IndexScanDesc scan, TIDBitmap *tbm)
if (!XLogRecPtrIsInvalid(so->stack->parentlsn) &&
XLByteLT(so->stack->parentlsn, opaque->nsn) &&
opaque->rightlink != InvalidBlockNumber /* sanity check */ &&
- (so->stack->next == NULL || so->stack->next->block != opaque->rightlink) /* check if already
+ (so->stack->next == NULL || so->stack->next->block != opaque->rightlink) /* check if already
added */ )
{
/* detect page split, follow right link to add pages */
@@ -272,12 +272,12 @@ gistnext(IndexScanDesc scan, TIDBitmap *tbm)
if (!OffsetNumberIsValid(n))
{
/*
- * If we was called from gistgettuple and current buffer contains
- * something matched then make a recursive call - it will return
- * ItemPointer from so->pageData. But we save buffer pinned to
- * support tuple's killing
+ * If we was called from gistgettuple and current buffer
+ * contains something matched then make a recursive call - it
+ * will return ItemPointer from so->pageData. But we save
+ * buffer pinned to support tuple's killing
*/
- if ( !tbm && so->nPageData > 0 )
+ if (!tbm && so->nPageData > 0)
{
LockBuffer(so->curbuf, GIST_UNLOCK);
return gistnext(scan, NULL);
@@ -324,12 +324,12 @@ gistnext(IndexScanDesc scan, TIDBitmap *tbm)
ntids++;
if (tbm != NULL)
tbm_add_tuples(tbm, &it->t_tid, 1, scan->xs_recheck);
- else
+ else
{
- so->pageData[ so->nPageData ].heapPtr = it->t_tid;
- so->pageData[ so->nPageData ].pageOffset = n;
- so->pageData[ so->nPageData ].recheck = scan->xs_recheck;
- so->nPageData ++;
+ so->pageData[so->nPageData].heapPtr = it->t_tid;
+ so->pageData[so->nPageData].pageOffset = n;
+ so->pageData[so->nPageData].recheck = scan->xs_recheck;
+ so->nPageData++;
}
}
}
@@ -437,8 +437,8 @@ gistindex_keytest(IndexTuple tuple,
/*
* Call the Consistent function to evaluate the test. The
* arguments are the index datum (as a GISTENTRY*), the comparison
- * datum, the comparison operator's strategy number and
- * subtype from pg_amop, and the recheck flag.
+ * datum, the comparison operator's strategy number and subtype
+ * from pg_amop, and the recheck flag.
*
* (Presently there's no need to pass the subtype since it'll
* always be zero, but might as well pass it for possible future
diff --git a/src/backend/access/gist/gistproc.c b/src/backend/access/gist/gistproc.c
index 396b93ab97..ef44380e77 100644
--- a/src/backend/access/gist/gistproc.c
+++ b/src/backend/access/gist/gistproc.c
@@ -10,7 +10,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistproc.c,v 1.16 2009/04/06 14:27:27 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistproc.c,v 1.17 2009/06/11 14:48:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -86,6 +86,7 @@ gist_box_consistent(PG_FUNCTION_ARGS)
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
BOX *query = PG_GETARG_BOX_P(1);
StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2);
+
/* Oid subtype = PG_GETARG_OID(3); */
bool *recheck = (bool *) PG_GETARG_POINTER(4);
@@ -279,11 +280,11 @@ chooseLR(GIST_SPLITVEC *v,
static void
fallbackSplit(GistEntryVector *entryvec, GIST_SPLITVEC *v)
{
- OffsetNumber i,
- maxoff;
- BOX *unionL = NULL,
- *unionR = NULL;
- int nbytes;
+ OffsetNumber i,
+ maxoff;
+ BOX *unionL = NULL,
+ *unionR = NULL;
+ int nbytes;
maxoff = entryvec->n - 1;
@@ -294,7 +295,7 @@ fallbackSplit(GistEntryVector *entryvec, GIST_SPLITVEC *v)
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
- BOX * cur = DatumGetBoxP(entryvec->vector[i].key);
+ BOX *cur = DatumGetBoxP(entryvec->vector[i].key);
if (i <= (maxoff - FirstOffsetNumber + 1) / 2)
{
@@ -767,6 +768,7 @@ gist_poly_consistent(PG_FUNCTION_ARGS)
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
POLYGON *query = PG_GETARG_POLYGON_P(1);
StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2);
+
/* Oid subtype = PG_GETARG_OID(3); */
bool *recheck = (bool *) PG_GETARG_POINTER(4);
bool result;
@@ -843,6 +845,7 @@ gist_circle_consistent(PG_FUNCTION_ARGS)
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
CIRCLE *query = PG_GETARG_CIRCLE_P(1);
StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2);
+
/* Oid subtype = PG_GETARG_OID(3); */
bool *recheck = (bool *) PG_GETARG_POINTER(4);
BOX bbox;
diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c
index 68924332b7..aed3e95b4e 100644
--- a/src/backend/access/gist/gistscan.c
+++ b/src/backend/access/gist/gistscan.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistscan.c,v 1.75 2009/01/01 17:23:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistscan.c,v 1.76 2009/06/11 14:48:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -94,11 +94,13 @@ gistrescan(PG_FUNCTION_ARGS)
* Next, if any of keys is a NULL and that key is not marked with
* SK_SEARCHNULL then nothing can be found.
*/
- for (i = 0; i < scan->numberOfKeys; i++) {
+ for (i = 0; i < scan->numberOfKeys; i++)
+ {
scan->keyData[i].sk_func = so->giststate->consistentFn[scan->keyData[i].sk_attno - 1];
- if ( scan->keyData[i].sk_flags & SK_ISNULL ) {
- if ( (scan->keyData[i].sk_flags & SK_SEARCHNULL) == 0 )
+ if (scan->keyData[i].sk_flags & SK_ISNULL)
+ {
+ if ((scan->keyData[i].sk_flags & SK_SEARCHNULL) == 0)
so->qual_ok = false;
}
}
diff --git a/src/backend/access/gist/gistsplit.c b/src/backend/access/gist/gistsplit.c
index 0d11e7c9ab..c5e5f6f13a 100644
--- a/src/backend/access/gist/gistsplit.c
+++ b/src/backend/access/gist/gistsplit.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistsplit.c,v 1.9 2009/06/10 20:02:15 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistsplit.c,v 1.10 2009/06/11 14:48:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -281,7 +281,7 @@ supportSecondarySplit(Relation r, GISTSTATE *giststate, int attno, GIST_SPLITVEC
}
/*
- * Trivial picksplit implementaion. Function called only
+ * Trivial picksplit implementaion. Function called only
* if user-defined picksplit puts all keys to the one page.
* That is a bug of user-defined picksplit but we'd like
* to "fix" that.
@@ -289,10 +289,10 @@ supportSecondarySplit(Relation r, GISTSTATE *giststate, int attno, GIST_SPLITVEC
static void
genericPickSplit(GISTSTATE *giststate, GistEntryVector *entryvec, GIST_SPLITVEC *v, int attno)
{
- OffsetNumber i,
- maxoff;
- int nbytes;
- GistEntryVector *evec;
+ OffsetNumber i,
+ maxoff;
+ int nbytes;
+ GistEntryVector *evec;
maxoff = entryvec->n - 1;
@@ -320,21 +320,21 @@ genericPickSplit(GISTSTATE *giststate, GistEntryVector *entryvec, GIST_SPLITVEC
* Form unions of each page
*/
- evec = palloc( sizeof(GISTENTRY) * entryvec->n + GEVHDRSZ );
+ evec = palloc(sizeof(GISTENTRY) * entryvec->n + GEVHDRSZ);
evec->n = v->spl_nleft;
- memcpy(evec->vector, entryvec->vector + FirstOffsetNumber,
- sizeof(GISTENTRY) * evec->n);
- v->spl_ldatum = FunctionCall2(&giststate->unionFn[attno],
- PointerGetDatum(evec),
- PointerGetDatum(&nbytes));
+ memcpy(evec->vector, entryvec->vector + FirstOffsetNumber,
+ sizeof(GISTENTRY) * evec->n);
+ v->spl_ldatum = FunctionCall2(&giststate->unionFn[attno],
+ PointerGetDatum(evec),
+ PointerGetDatum(&nbytes));
evec->n = v->spl_nright;
- memcpy(evec->vector, entryvec->vector + FirstOffsetNumber + v->spl_nleft,
- sizeof(GISTENTRY) * evec->n);
- v->spl_rdatum = FunctionCall2(&giststate->unionFn[attno],
- PointerGetDatum(evec),
- PointerGetDatum(&nbytes));
+ memcpy(evec->vector, entryvec->vector + FirstOffsetNumber + v->spl_nleft,
+ sizeof(GISTENTRY) * evec->n);
+ v->spl_rdatum = FunctionCall2(&giststate->unionFn[attno],
+ PointerGetDatum(evec),
+ PointerGetDatum(&nbytes));
}
/*
@@ -365,17 +365,17 @@ gistUserPicksplit(Relation r, GistEntryVector *entryvec, int attno, GistSplitVec
PointerGetDatum(entryvec),
PointerGetDatum(sv));
- if ( sv->spl_nleft == 0 || sv->spl_nright == 0 )
+ if (sv->spl_nleft == 0 || sv->spl_nright == 0)
{
ereport(DEBUG1,
(errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("picksplit method for %d column of index \"%s\" failed",
- attno+1, RelationGetRelationName(r)),
+ errmsg("picksplit method for %d column of index \"%s\" failed",
+ attno + 1, RelationGetRelationName(r)),
errhint("The index is not optimal. To optimize it, contact a developer, or try to use the column as the second one in the CREATE INDEX command.")));
/*
- * Reinit GIST_SPLITVEC. Although that fields are not used
- * by genericPickSplit(), let us set up it for further processing
+ * Reinit GIST_SPLITVEC. Although that fields are not used by
+ * genericPickSplit(), let us set up it for further processing
*/
sv->spl_ldatum_exists = (v->spl_lisnull[attno]) ? false : true;
sv->spl_rdatum_exists = (v->spl_risnull[attno]) ? false : true;
diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c
index fa1e3088ad..78eb378725 100644
--- a/src/backend/access/gist/gistutil.c
+++ b/src/backend/access/gist/gistutil.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistutil.c,v 1.33 2009/01/05 17:14:28 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistutil.c,v 1.34 2009/06/11 14:48:53 momjian Exp $
*-------------------------------------------------------------------------
*/
#include "postgres.h"
@@ -43,7 +43,8 @@ gistfillbuffer(Page page, IndexTuple *itup, int len, OffsetNumber off)
for (i = 0; i < len; i++)
{
- Size sz = IndexTupleSize(itup[i]);
+ Size sz = IndexTupleSize(itup[i]);
+
l = PageAddItem(page, (Item) itup[i], sz, off, false, false);
if (l == InvalidOffsetNumber)
elog(ERROR, "failed to add item to GiST index page, item %d out of %d, size %d bytes",
diff --git a/src/backend/access/gist/gistvacuum.c b/src/backend/access/gist/gistvacuum.c
index 833e6c574e..975f9d8c56 100644
--- a/src/backend/access/gist/gistvacuum.c
+++ b/src/backend/access/gist/gistvacuum.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistvacuum.c,v 1.44 2009/06/06 22:13:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistvacuum.c,v 1.45 2009/06/11 14:48:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -510,7 +510,7 @@ gistvacuumcleanup(PG_FUNCTION_ARGS)
Relation rel = info->index;
BlockNumber npages,
blkno;
- BlockNumber totFreePages;
+ BlockNumber totFreePages;
BlockNumber lastBlock = GIST_ROOT_BLKNO,
lastFilledBlock = GIST_ROOT_BLKNO;
bool needLock;
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index 4c1cd5ceda..49b6594f1e 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.111 2009/06/06 22:13:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.112 2009/06/11 14:48:53 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@@ -52,7 +52,7 @@ hashbuild(PG_FUNCTION_ARGS)
Relation index = (Relation) PG_GETARG_POINTER(1);
IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
IndexBuildResult *result;
- BlockNumber relpages;
+ BlockNumber relpages;
double reltuples;
uint32 num_buckets;
HashBuildState buildstate;
@@ -76,12 +76,12 @@ hashbuild(PG_FUNCTION_ARGS)
* (assuming their hash codes are pretty random) there will be no locality
* of access to the index, and if the index is bigger than available RAM
* then we'll thrash horribly. To prevent that scenario, we can sort the
- * tuples by (expected) bucket number. However, such a sort is useless
+ * tuples by (expected) bucket number. However, such a sort is useless
* overhead when the index does fit in RAM. We choose to sort if the
* initial index size exceeds NBuffers.
*
- * NOTE: this test will need adjustment if a bucket is ever different
- * from one page.
+ * NOTE: this test will need adjustment if a bucket is ever different from
+ * one page.
*/
if (num_buckets >= (uint32) NBuffers)
buildstate.spool = _h_spoolinit(index, num_buckets);
@@ -285,7 +285,7 @@ Datum
hashgetbitmap(PG_FUNCTION_ARGS)
{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
- TIDBitmap *tbm = (TIDBitmap *) PG_GETARG_POINTER(1);
+ TIDBitmap *tbm = (TIDBitmap *) PG_GETARG_POINTER(1);
HashScanOpaque so = (HashScanOpaque) scan->opaque;
bool res;
int64 ntids = 0;
@@ -294,7 +294,7 @@ hashgetbitmap(PG_FUNCTION_ARGS)
while (res)
{
- bool add_tuple;
+ bool add_tuple;
/*
* Skip killed tuples if asked to.
@@ -312,7 +312,7 @@ hashgetbitmap(PG_FUNCTION_ARGS)
add_tuple = true;
/* Save tuple ID, and continue scanning */
- if (add_tuple)
+ if (add_tuple)
{
/* Note we mark the tuple ID as requiring recheck */
tbm_add_tuples(tbm, &scan->xs_ctup.t_self, 1, true);
@@ -481,7 +481,7 @@ hashbulkdelete(PG_FUNCTION_ARGS)
* each bucket.
*/
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE);
- metap = HashPageGetMeta(BufferGetPage(metabuf));
+ metap = HashPageGetMeta(BufferGetPage(metabuf));
orig_maxbucket = metap->hashm_maxbucket;
orig_ntuples = metap->hashm_ntuples;
memcpy(&local_metapage, metap, sizeof(local_metapage));
diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c
index 4561a09481..3242e2713e 100644
--- a/src/backend/access/hash/hashfunc.c
+++ b/src/backend/access/hash/hashfunc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashfunc.c,v 1.58 2009/02/09 21:18:28 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashfunc.c,v 1.59 2009/06/11 14:48:53 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@@ -225,35 +225,35 @@ hashvarlena(PG_FUNCTION_ARGS)
* are sometimes the same for one pair and different for another pair.
* This was tested for:
* * pairs that differed by one bit, by two bits, in any combination
- * of top bits of (a,b,c), or in any combination of bottom bits of
- * (a,b,c).
+ * of top bits of (a,b,c), or in any combination of bottom bits of
+ * (a,b,c).
* * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
- * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
- * is commonly produced by subtraction) look like a single 1-bit
- * difference.
+ * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
+ * is commonly produced by subtraction) look like a single 1-bit
+ * difference.
* * the base values were pseudorandom, all zero but one bit set, or
- * all zero plus a counter that starts at zero.
- *
+ * all zero plus a counter that starts at zero.
+ *
* This does not achieve avalanche. There are input bits of (a,b,c)
* that fail to affect some output bits of (a,b,c), especially of a. The
* most thoroughly mixed value is c, but it doesn't really even achieve
- * avalanche in c.
- *
+ * avalanche in c.
+ *
* This allows some parallelism. Read-after-writes are good at doubling
* the number of bits affected, so the goal of mixing pulls in the opposite
- * direction from the goal of parallelism. I did what I could. Rotates
+ * direction from the goal of parallelism. I did what I could. Rotates
* seem to cost as much as shifts on every machine I could lay my hands on,
* and rotates are much kinder to the top and bottom bits, so I used rotates.
*----------
*/
#define mix(a,b,c) \
{ \
- a -= c; a ^= rot(c, 4); c += b; \
- b -= a; b ^= rot(a, 6); a += c; \
- c -= b; c ^= rot(b, 8); b += a; \
- a -= c; a ^= rot(c,16); c += b; \
- b -= a; b ^= rot(a,19); a += c; \
- c -= b; c ^= rot(b, 4); b += a; \
+ a -= c; a ^= rot(c, 4); c += b; \
+ b -= a; b ^= rot(a, 6); a += c; \
+ c -= b; c ^= rot(b, 8); b += a; \
+ a -= c; a ^= rot(c,16); c += b; \
+ b -= a; b ^= rot(a,19); a += c; \
+ c -= b; c ^= rot(b, 4); b += a; \
}
/*----------
@@ -262,20 +262,20 @@ hashvarlena(PG_FUNCTION_ARGS)
* Pairs of (a,b,c) values differing in only a few bits will usually
* produce values of c that look totally different. This was tested for
* * pairs that differed by one bit, by two bits, in any combination
- * of top bits of (a,b,c), or in any combination of bottom bits of
- * (a,b,c).
+ * of top bits of (a,b,c), or in any combination of bottom bits of
+ * (a,b,c).
* * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
- * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
- * is commonly produced by subtraction) look like a single 1-bit
- * difference.
+ * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
+ * is commonly produced by subtraction) look like a single 1-bit
+ * difference.
* * the base values were pseudorandom, all zero but one bit set, or
- * all zero plus a counter that starts at zero.
- *
+ * all zero plus a counter that starts at zero.
+ *
* The use of separate functions for mix() and final() allow for a
* substantial performance increase since final() does not need to
* do well in reverse, but is does need to affect all output bits.
* mix(), on the other hand, does not need to affect all output
- * bits (affecting 32 bits is enough). The original hash function had
+ * bits (affecting 32 bits is enough). The original hash function had
* a single mixing operation that had to satisfy both sets of requirements
* and was slower as a result.
*----------
@@ -374,9 +374,9 @@ hash_any(register const unsigned char *k, register int keylen)
/* fall through */
case 1:
a += ((uint32) k[0] << 24);
- /* case 0: nothing left to add */
+ /* case 0: nothing left to add */
}
-#else /* !WORDS_BIGENDIAN */
+#else /* !WORDS_BIGENDIAN */
switch (len)
{
case 11:
@@ -413,9 +413,9 @@ hash_any(register const unsigned char *k, register int keylen)
/* fall through */
case 1:
a += k[0];
- /* case 0: nothing left to add */
+ /* case 0: nothing left to add */
}
-#endif /* WORDS_BIGENDIAN */
+#endif /* WORDS_BIGENDIAN */
}
else
{
@@ -428,11 +428,11 @@ hash_any(register const unsigned char *k, register int keylen)
a += (k[3] + ((uint32) k[2] << 8) + ((uint32) k[1] << 16) + ((uint32) k[0] << 24));
b += (k[7] + ((uint32) k[6] << 8) + ((uint32) k[5] << 16) + ((uint32) k[4] << 24));
c += (k[11] + ((uint32) k[10] << 8) + ((uint32) k[9] << 16) + ((uint32) k[8] << 24));
-#else /* !WORDS_BIGENDIAN */
+#else /* !WORDS_BIGENDIAN */
a += (k[0] + ((uint32) k[1] << 8) + ((uint32) k[2] << 16) + ((uint32) k[3] << 24));
b += (k[4] + ((uint32) k[5] << 8) + ((uint32) k[6] << 16) + ((uint32) k[7] << 24));
c += (k[8] + ((uint32) k[9] << 8) + ((uint32) k[10] << 16) + ((uint32) k[11] << 24));
-#endif /* WORDS_BIGENDIAN */
+#endif /* WORDS_BIGENDIAN */
mix(a, b, c);
k += 12;
len -= 12;
@@ -465,9 +465,9 @@ hash_any(register const unsigned char *k, register int keylen)
a += ((uint32) k[1] << 16);
case 1:
a += ((uint32) k[0] << 24);
- /* case 0: nothing left to add */
+ /* case 0: nothing left to add */
}
-#else /* !WORDS_BIGENDIAN */
+#else /* !WORDS_BIGENDIAN */
switch (len) /* all the case statements fall through */
{
case 11:
@@ -493,9 +493,9 @@ hash_any(register const unsigned char *k, register int keylen)
a += ((uint32) k[1] << 8);
case 1:
a += k[0];
- /* case 0: nothing left to add */
+ /* case 0: nothing left to add */
}
-#endif /* WORDS_BIGENDIAN */
+#endif /* WORDS_BIGENDIAN */
}
final(a, b, c);
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index f47d213298..bd1f930e06 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.79 2009/01/01 17:23:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.80 2009/06/11 14:48:53 momjian Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
@@ -361,8 +361,8 @@ _hash_metapinit(Relation rel, double num_tuples)
/*
* Choose the number of initial bucket pages to match the fill factor
* given the estimated number of tuples. We round up the result to the
- * next power of 2, however, and always force at least 2 bucket pages.
- * The upper limit is determined by considerations explained in
+ * next power of 2, however, and always force at least 2 bucket pages. The
+ * upper limit is determined by considerations explained in
* _hash_expandtable().
*/
dnumbuckets = num_tuples / ffactor;
@@ -421,8 +421,8 @@ _hash_metapinit(Relation rel, double num_tuples)
/*
* We initialize the index with N buckets, 0 .. N-1, occupying physical
- * blocks 1 to N. The first freespace bitmap page is in block N+1.
- * Since N is a power of 2, we can set the masks this way:
+ * blocks 1 to N. The first freespace bitmap page is in block N+1. Since
+ * N is a power of 2, we can set the masks this way:
*/
metap->hashm_maxbucket = metap->hashm_lowmask = num_buckets - 1;
metap->hashm_highmask = (num_buckets << 1) - 1;
@@ -438,8 +438,8 @@ _hash_metapinit(Relation rel, double num_tuples)
/*
* Release buffer lock on the metapage while we initialize buckets.
* Otherwise, we'll be in interrupt holdoff and the CHECK_FOR_INTERRUPTS
- * won't accomplish anything. It's a bad idea to hold buffer locks
- * for long intervals in any case, since that can block the bgwriter.
+ * won't accomplish anything. It's a bad idea to hold buffer locks for
+ * long intervals in any case, since that can block the bgwriter.
*/
_hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
@@ -552,8 +552,8 @@ _hash_expandtable(Relation rel, Buffer metabuf)
* _hash_alloc_buckets() would fail, but if we supported buckets smaller
* than a disk block then this would be an independent constraint.
*
- * If you change this, see also the maximum initial number of buckets
- * in _hash_metapinit().
+ * If you change this, see also the maximum initial number of buckets in
+ * _hash_metapinit().
*/
if (metap->hashm_maxbucket >= (uint32) 0x7FFFFFFE)
goto fail;
@@ -839,8 +839,8 @@ _hash_splitbucket(Relation rel,
}
/*
- * Fetch the item's hash key (conveniently stored in the item)
- * and determine which bucket it now belongs in.
+ * Fetch the item's hash key (conveniently stored in the item) and
+ * determine which bucket it now belongs in.
*/
itup = (IndexTuple) PageGetItem(opage, PageGetItemId(opage, ooffnum));
bucket = _hash_hashkey2bucket(_hash_get_indextuple_hashkey(itup),
diff --git a/src/backend/access/hash/hashscan.c b/src/backend/access/hash/hashscan.c
index 8cd371e902..1b0ce2c739 100644
--- a/src/backend/access/hash/hashscan.c
+++ b/src/backend/access/hash/hashscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashscan.c,v 1.46 2009/01/01 17:23:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashscan.c,v 1.47 2009/06/11 14:48:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -59,8 +59,8 @@ ReleaseResources_hash(void)
/*
* Release all HashScanList items belonging to the current ResourceOwner.
* Note that we do not release the underlying IndexScanDesc; that's in
- * executor memory and will go away on its own (in fact quite possibly
- * has gone away already, so we mustn't try to touch it here).
+ * executor memory and will go away on its own (in fact quite possibly has
+ * gone away already, so we mustn't try to touch it here).
*
* Note: this should be a no-op during normal query shutdown. However, in
* an abort situation ExecutorEnd is not called and so there may be open
diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c
index 5a9763fe9a..fec2f5d78a 100644
--- a/src/backend/access/hash/hashsearch.c
+++ b/src/backend/access/hash/hashsearch.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashsearch.c,v 1.56 2009/05/05 19:36:32 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashsearch.c,v 1.57 2009/06/11 14:48:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -312,15 +312,15 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
for (;;)
{
/*
- * check if we're still in the range of items with
- * the target hash key
+ * check if we're still in the range of items with the
+ * target hash key
*/
if (offnum <= maxoff)
{
Assert(offnum >= FirstOffsetNumber);
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
if (so->hashso_sk_hash == _hash_get_indextuple_hashkey(itup))
- break; /* yes, so exit for-loop */
+ break; /* yes, so exit for-loop */
}
/*
@@ -353,15 +353,15 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
for (;;)
{
/*
- * check if we're still in the range of items with
- * the target hash key
+ * check if we're still in the range of items with the
+ * target hash key
*/
if (offnum >= FirstOffsetNumber)
{
Assert(offnum <= maxoff);
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
if (so->hashso_sk_hash == _hash_get_indextuple_hashkey(itup))
- break; /* yes, so exit for-loop */
+ break; /* yes, so exit for-loop */
}
/*
diff --git a/src/backend/access/hash/hashsort.c b/src/backend/access/hash/hashsort.c
index 6ba8362a9e..00e7dc5f5d 100644
--- a/src/backend/access/hash/hashsort.c
+++ b/src/backend/access/hash/hashsort.c
@@ -8,7 +8,7 @@
* thrashing. We use tuplesort.c to sort the given index tuples into order.
*
* Note: if the number of rows in the table has been underestimated,
- * bucket splits may occur during the index build. In that case we'd
+ * bucket splits may occur during the index build. In that case we'd
* be inserting into two or more buckets for each possible masked-off
* hash code value. That's no big problem though, since we'll still have
* plenty of locality of access.
@@ -18,7 +18,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashsort.c,v 1.2 2009/01/01 17:23:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashsort.c,v 1.3 2009/06/11 14:48:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -52,13 +52,13 @@ _h_spoolinit(Relation index, uint32 num_buckets)
hspool->index = index;
/*
- * Determine the bitmask for hash code values. Since there are currently
+ * Determine the bitmask for hash code values. Since there are currently
* num_buckets buckets in the index, the appropriate mask can be computed
* as follows.
*
- * Note: at present, the passed-in num_buckets is always a power of 2,
- * so we could just compute num_buckets - 1. We prefer not to assume
- * that here, though.
+ * Note: at present, the passed-in num_buckets is always a power of 2, so
+ * we could just compute num_buckets - 1. We prefer not to assume that
+ * here, though.
*/
hash_mask = (((uint32) 1) << _hash_log2(num_buckets)) - 1;
diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c
index 42e79376f8..ba1d6fbbf5 100644
--- a/src/backend/access/hash/hashutil.c
+++ b/src/backend/access/hash/hashutil.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashutil.c,v 1.59 2009/01/05 17:14:28 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashutil.c,v 1.60 2009/06/11 14:48:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -29,10 +29,10 @@ bool
_hash_checkqual(IndexScanDesc scan, IndexTuple itup)
{
/*
- * Currently, we can't check any of the scan conditions since we do
- * not have the original index entry value to supply to the sk_func.
- * Always return true; we expect that hashgettuple already set the
- * recheck flag to make the main indexscan code do it.
+ * Currently, we can't check any of the scan conditions since we do not
+ * have the original index entry value to supply to the sk_func. Always
+ * return true; we expect that hashgettuple already set the recheck flag
+ * to make the main indexscan code do it.
*/
#ifdef NOT_USED
TupleDesc tupdesc = RelationGetDescr(scan->indexRelation);
@@ -240,8 +240,8 @@ _hash_get_indextuple_hashkey(IndexTuple itup)
char *attp;
/*
- * We assume the hash key is the first attribute and can't be null,
- * so this can be done crudely but very very cheaply ...
+ * We assume the hash key is the first attribute and can't be null, so
+ * this can be done crudely but very very cheaply ...
*/
attp = (char *) itup + IndexInfoFindDataOffset(itup->t_info);
return *((uint32 *) attp);
@@ -253,10 +253,10 @@ _hash_get_indextuple_hashkey(IndexTuple itup)
IndexTuple
_hash_form_tuple(Relation index, Datum *values, bool *isnull)
{
- IndexTuple itup;
- uint32 hashkey;
- Datum hashkeydatum;
- TupleDesc hashdesc;
+ IndexTuple itup;
+ uint32 hashkey;
+ Datum hashkeydatum;
+ TupleDesc hashdesc;
if (isnull[0])
hashkeydatum = (Datum) 0;
@@ -280,14 +280,14 @@ _hash_form_tuple(Relation index, Datum *values, bool *isnull)
*
* Returns the offset of the first index entry having hashkey >= hash_value,
* or the page's max offset plus one if hash_value is greater than all
- * existing hash keys in the page. This is the appropriate place to start
+ * existing hash keys in the page. This is the appropriate place to start
* a search, or to insert a new item.
*/
OffsetNumber
_hash_binsearch(Page page, uint32 hash_value)
{
- OffsetNumber upper;
- OffsetNumber lower;
+ OffsetNumber upper;
+ OffsetNumber lower;
/* Loop invariant: lower <= desired place <= upper */
upper = PageGetMaxOffsetNumber(page) + 1;
@@ -295,9 +295,9 @@ _hash_binsearch(Page page, uint32 hash_value)
while (upper > lower)
{
- OffsetNumber off;
- IndexTuple itup;
- uint32 hashkey;
+ OffsetNumber off;
+ IndexTuple itup;
+ uint32 hashkey;
off = (upper + lower) / 2;
Assert(OffsetNumberIsValid(off));
@@ -324,8 +324,8 @@ _hash_binsearch(Page page, uint32 hash_value)
OffsetNumber
_hash_binsearch_last(Page page, uint32 hash_value)
{
- OffsetNumber upper;
- OffsetNumber lower;
+ OffsetNumber upper;
+ OffsetNumber lower;
/* Loop invariant: lower <= desired place <= upper */
upper = PageGetMaxOffsetNumber(page);
@@ -333,9 +333,9 @@ _hash_binsearch_last(Page page, uint32 hash_value)
while (upper > lower)
{
- IndexTuple itup;
- OffsetNumber off;
- uint32 hashkey;
+ IndexTuple itup;
+ OffsetNumber off;
+ uint32 hashkey;
off = (upper + lower + 1) / 2;
Assert(OffsetNumberIsValid(off));
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 1f6edf6caf..2e45c041a6 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.276 2009/06/10 18:54:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.277 2009/06/11 14:48:53 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -69,7 +69,7 @@
/* GUC variable */
-bool synchronize_seqscans = true;
+bool synchronize_seqscans = true;
static HeapScanDesc heap_beginscan_internal(Relation relation,
@@ -116,9 +116,9 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan)
* strategy and enable synchronized scanning (see syncscan.c). Although
* the thresholds for these features could be different, we make them the
* same so that there are only two behaviors to tune rather than four.
- * (However, some callers need to be able to disable one or both of
- * these behaviors, independently of the size of the table; also there
- * is a GUC variable that can disable synchronized scanning.)
+ * (However, some callers need to be able to disable one or both of these
+ * behaviors, independently of the size of the table; also there is a GUC
+ * variable that can disable synchronized scanning.)
*
* During a rescan, don't make a new strategy object if we don't have to.
*/
@@ -146,8 +146,8 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan)
if (is_rescan)
{
/*
- * If rescan, keep the previous startblock setting so that rewinding
- * a cursor doesn't generate surprising results. Reset the syncscan
+ * If rescan, keep the previous startblock setting so that rewinding a
+ * cursor doesn't generate surprising results. Reset the syncscan
* setting, though.
*/
scan->rs_syncscan = (allow_sync && synchronize_seqscans);
@@ -1793,7 +1793,7 @@ void
FreeBulkInsertState(BulkInsertState bistate)
{
if (bistate->current_buf != InvalidBuffer)
- ReleaseBuffer(bistate->current_buf);
+ ReleaseBuffer(bistate->current_buf);
FreeAccessStrategy(bistate->strategy);
pfree(bistate);
}
@@ -1977,7 +1977,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
/* Clear the bit in the visibility map if necessary */
if (all_visible_cleared)
- visibilitymap_clear(relation,
+ visibilitymap_clear(relation,
ItemPointerGetBlockNumber(&(heaptup->t_self)));
/*
@@ -3437,8 +3437,8 @@ l3:
LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
/*
- * Don't update the visibility map here. Locking a tuple doesn't
- * change visibility info.
+ * Don't update the visibility map here. Locking a tuple doesn't change
+ * visibility info.
*/
/*
@@ -4115,11 +4115,11 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record, bool clean_move)
nowunused, nunused,
clean_move);
- freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
+ freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
/*
- * Note: we don't worry about updating the page's prunability hints.
- * At worst this will cause an extra prune cycle to occur soon.
+ * Note: we don't worry about updating the page's prunability hints. At
+ * worst this will cause an extra prune cycle to occur soon.
*/
PageSetLSN(page, lsn);
@@ -4217,17 +4217,18 @@ heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
OffsetNumber offnum;
ItemId lp = NULL;
HeapTupleHeader htup;
- BlockNumber blkno;
+ BlockNumber blkno;
blkno = ItemPointerGetBlockNumber(&(xlrec->target.tid));
/*
- * The visibility map always needs to be updated, even if the heap page
- * is already up-to-date.
+ * The visibility map always needs to be updated, even if the heap page is
+ * already up-to-date.
*/
if (xlrec->all_visible_cleared)
{
- Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
+ Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
+
visibilitymap_clear(reln, blkno);
FreeFakeRelcacheEntry(reln);
}
@@ -4294,17 +4295,18 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
xl_heap_header xlhdr;
uint32 newlen;
Size freespace;
- BlockNumber blkno;
+ BlockNumber blkno;
blkno = ItemPointerGetBlockNumber(&(xlrec->target.tid));
/*
- * The visibility map always needs to be updated, even if the heap page
- * is already up-to-date.
+ * The visibility map always needs to be updated, even if the heap page is
+ * already up-to-date.
*/
if (xlrec->all_visible_cleared)
{
- Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
+ Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
+
visibilitymap_clear(reln, blkno);
FreeFakeRelcacheEntry(reln);
}
@@ -4361,7 +4363,7 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
if (offnum == InvalidOffsetNumber)
elog(PANIC, "heap_insert_redo: failed to add tuple");
- freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
+ freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
PageSetLSN(page, lsn);
PageSetTLI(page, ThisTimeLineID);
@@ -4374,8 +4376,8 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
/*
* If the page is running low on free space, update the FSM as well.
- * Arbitrarily, our definition of "low" is less than 20%. We can't do
- * much better than that without knowing the fill-factor for the table.
+ * Arbitrarily, our definition of "low" is less than 20%. We can't do much
+ * better than that without knowing the fill-factor for the table.
*
* XXX: We don't get here if the page was restored from full page image.
* We don't bother to update the FSM in that case, it doesn't need to be
@@ -4410,12 +4412,13 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool move, bool hot_update)
Size freespace;
/*
- * The visibility map always needs to be updated, even if the heap page
- * is already up-to-date.
+ * The visibility map always needs to be updated, even if the heap page is
+ * already up-to-date.
*/
if (xlrec->all_visible_cleared)
{
- Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
+ Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
+
visibilitymap_clear(reln,
ItemPointerGetBlockNumber(&xlrec->target.tid));
FreeFakeRelcacheEntry(reln);
@@ -4504,12 +4507,13 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool move, bool hot_update)
newt:;
/*
- * The visibility map always needs to be updated, even if the heap page
- * is already up-to-date.
+ * The visibility map always needs to be updated, even if the heap page is
+ * already up-to-date.
*/
if (xlrec->new_all_visible_cleared)
{
- Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
+ Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
+
visibilitymap_clear(reln, ItemPointerGetBlockNumber(&xlrec->newtid));
FreeFakeRelcacheEntry(reln);
}
@@ -4595,7 +4599,7 @@ newsame:;
if (xlrec->new_all_visible_cleared)
PageClearAllVisible(page);
- freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
+ freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
PageSetLSN(page, lsn);
PageSetTLI(page, ThisTimeLineID);
@@ -4604,8 +4608,8 @@ newsame:;
/*
* If the page is running low on free space, update the FSM as well.
- * Arbitrarily, our definition of "low" is less than 20%. We can't do
- * much better than that without knowing the fill-factor for the table.
+ * Arbitrarily, our definition of "low" is less than 20%. We can't do much
+ * better than that without knowing the fill-factor for the table.
*
* However, don't update the FSM on HOT updates, because after crash
* recovery, either the old or the new tuple will certainly be dead and
@@ -4619,7 +4623,7 @@ newsame:;
*/
if (!hot_update && freespace < BLCKSZ / 5)
XLogRecordPageWithFreeSpace(xlrec->target.node,
- ItemPointerGetBlockNumber(&(xlrec->newtid)), freespace);
+ ItemPointerGetBlockNumber(&(xlrec->newtid)), freespace);
}
static void
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index 1d70e6c01a..7ed8612357 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/hio.c,v 1.75 2009/01/01 17:23:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/hio.c,v 1.76 2009/06/11 14:48:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -64,7 +64,7 @@ static Buffer
ReadBufferBI(Relation relation, BlockNumber targetBlock,
BulkInsertState bistate)
{
- Buffer buffer;
+ Buffer buffer;
/* If not bulk-insert, exactly like ReadBuffer */
if (!bistate)
@@ -118,7 +118,7 @@ ReadBufferBI(Relation relation, BlockNumber targetBlock,
* happen if space is freed in that page after heap_update finds there's not
* enough there). In that case, the page will be pinned and locked only once.
*
- * We normally use FSM to help us find free space. However,
+ * We normally use FSM to help us find free space. However,
* if HEAP_INSERT_SKIP_FSM is specified, we just append a new empty page to
* the end of the relation if the tuple won't fit on the current target page.
* This can save some cycles when we know the relation is new and doesn't
@@ -133,7 +133,7 @@ ReadBufferBI(Relation relation, BlockNumber targetBlock,
* for additional constraints needed for safe usage of this behavior.)
*
* The caller can also provide a BulkInsertState object to optimize many
- * insertions into the same relation. This keeps a pin on the current
+ * insertions into the same relation. This keeps a pin on the current
* insertion target page (to save pin/unpin cycles) and also passes a
* BULKWRITE buffer selection strategy object to the buffer manager.
* Passing NULL for bistate selects the default behavior.
@@ -186,7 +186,7 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* We first try to put the tuple on the same page we last inserted a tuple
- * on, as cached in the BulkInsertState or relcache entry. If that
+ * on, as cached in the BulkInsertState or relcache entry. If that
* doesn't work, we ask the Free Space Map to locate a suitable page.
* Since the FSM's info might be out of date, we have to be prepared to
* loop around and retry multiple times. (To insure this isn't an infinite
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 4b445c7ad8..71ea689d0e 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/pruneheap.c,v 1.17 2009/01/01 17:23:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/pruneheap.c,v 1.18 2009/06/11 14:48:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -30,7 +30,7 @@
typedef struct
{
TransactionId new_prune_xid; /* new prune hint value for page */
- int nredirected; /* numbers of entries in arrays below */
+ int nredirected; /* numbers of entries in arrays below */
int ndead;
int nunused;
/* arrays that accumulate indexes of items to be changed */
@@ -159,21 +159,21 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
/*
* Our strategy is to scan the page and make lists of items to change,
- * then apply the changes within a critical section. This keeps as
- * much logic as possible out of the critical section, and also ensures
- * that WAL replay will work the same as the normal case.
+ * then apply the changes within a critical section. This keeps as much
+ * logic as possible out of the critical section, and also ensures that
+ * WAL replay will work the same as the normal case.
*
- * First, inform inval.c that upcoming CacheInvalidateHeapTuple calls
- * are nontransactional.
+ * First, inform inval.c that upcoming CacheInvalidateHeapTuple calls are
+ * nontransactional.
*/
if (redirect_move)
BeginNonTransactionalInvalidation();
/*
- * Initialize the new pd_prune_xid value to zero (indicating no
- * prunable tuples). If we find any tuples which may soon become
- * prunable, we will save the lowest relevant XID in new_prune_xid.
- * Also initialize the rest of our working state.
+ * Initialize the new pd_prune_xid value to zero (indicating no prunable
+ * tuples). If we find any tuples which may soon become prunable, we will
+ * save the lowest relevant XID in new_prune_xid. Also initialize the rest
+ * of our working state.
*/
prstate.new_prune_xid = InvalidTransactionId;
prstate.nredirected = prstate.ndead = prstate.nunused = 0;
@@ -204,9 +204,9 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
}
/*
- * Send invalidation messages for any tuples we are about to move.
- * It is safe to do this now, even though we could theoretically still
- * fail before making the actual page update, because a useless cache
+ * Send invalidation messages for any tuples we are about to move. It is
+ * safe to do this now, even though we could theoretically still fail
+ * before making the actual page update, because a useless cache
* invalidation doesn't hurt anything. Also, no one else can reload the
* tuples while we have exclusive buffer lock, so it's not too early to
* send the invals. This avoids sending the invals while inside the
@@ -222,9 +222,8 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
if (prstate.nredirected > 0 || prstate.ndead > 0 || prstate.nunused > 0)
{
/*
- * Apply the planned item changes, then repair page fragmentation,
- * and update the page's hint bit about whether it has free line
- * pointers.
+ * Apply the planned item changes, then repair page fragmentation, and
+ * update the page's hint bit about whether it has free line pointers.
*/
heap_page_prune_execute(buffer,
prstate.redirected, prstate.nredirected,
@@ -268,8 +267,8 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
{
/*
* If we didn't prune anything, but have found a new value for the
- * pd_prune_xid field, update it and mark the buffer dirty.
- * This is treated as a non-WAL-logged hint.
+ * pd_prune_xid field, update it and mark the buffer dirty. This is
+ * treated as a non-WAL-logged hint.
*
* Also clear the "page is full" flag if it is set, since there's no
* point in repeating the prune/defrag process until something else
@@ -334,8 +333,8 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin,
* OldestXmin is the cutoff XID used to identify dead tuples.
*
* We don't actually change the page here, except perhaps for hint-bit updates
- * caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in
- * prstate showing the changes to be made. Items to be redirected are added
+ * caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in
+ * prstate showing the changes to be made. Items to be redirected are added
* to the redirected[] array (two entries per redirection); items to be set to
* LP_DEAD state are added to nowdead[]; and items to be set to LP_UNUSED
* state are added to nowunused[].
@@ -598,19 +597,19 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum,
else if (redirect_move && ItemIdIsRedirected(rootlp))
{
/*
- * If we desire to eliminate LP_REDIRECT items by moving tuples,
- * make a redirection entry for each redirected root item; this
- * will cause heap_page_prune_execute to actually do the move.
- * (We get here only when there are no DEAD tuples in the chain;
- * otherwise the redirection entry was made above.)
+ * If we desire to eliminate LP_REDIRECT items by moving tuples, make
+ * a redirection entry for each redirected root item; this will cause
+ * heap_page_prune_execute to actually do the move. (We get here only
+ * when there are no DEAD tuples in the chain; otherwise the
+ * redirection entry was made above.)
*/
heap_prune_record_redirect(prstate, rootoffnum, chainitems[1]);
redirect_target = chainitems[1];
}
/*
- * If we are going to implement a redirect by moving tuples, we have
- * to issue a cache invalidation against the redirection target tuple,
+ * If we are going to implement a redirect by moving tuples, we have to
+ * issue a cache invalidation against the redirection target tuple,
* because its CTID will be effectively changed by the move. Note that
* CacheInvalidateHeapTuple only queues the request, it doesn't send it;
* if we fail before reaching EndNonTransactionalInvalidation, nothing
@@ -693,7 +692,7 @@ heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum)
* buffer, and is inside a critical section.
*
* This is split out because it is also used by heap_xlog_clean()
- * to replay the WAL record when needed after a crash. Note that the
+ * to replay the WAL record when needed after a crash. Note that the
* arguments are identical to those of log_heap_clean().
*/
void
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
index f6a16b8492..6f00c24845 100644
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -96,7 +96,7 @@
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/rewriteheap.c,v 1.17 2009/01/01 17:23:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/rewriteheap.c,v 1.18 2009/06/11 14:48:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -577,7 +577,7 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
heaptup = toast_insert_or_update(state->rs_new_rel, tup, NULL,
HEAP_INSERT_SKIP_FSM |
(state->rs_use_wal ?
- 0 : HEAP_INSERT_SKIP_WAL));
+ 0 : HEAP_INSERT_SKIP_WAL));
else
heaptup = tup;
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index 35019a543c..a3203085b7 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.92 2009/01/01 17:23:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.93 2009/06/11 14:48:54 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -1073,8 +1073,8 @@ toast_compress_datum(Datum value)
Assert(!VARATT_IS_COMPRESSED(DatumGetPointer(value)));
/*
- * No point in wasting a palloc cycle if value size is out of the
- * allowed range for compression
+ * No point in wasting a palloc cycle if value size is out of the allowed
+ * range for compression
*/
if (valsize < PGLZ_strategy_default->min_input_size ||
valsize > PGLZ_strategy_default->max_input_size)
@@ -1087,10 +1087,10 @@ toast_compress_datum(Datum value)
* because it might be satisfied with having saved as little as one byte
* in the compressed data --- which could turn into a net loss once you
* consider header and alignment padding. Worst case, the compressed
- * format might require three padding bytes (plus header, which is included
- * in VARSIZE(tmp)), whereas the uncompressed format would take only one
- * header byte and no padding if the value is short enough. So we insist
- * on a savings of more than 2 bytes to ensure we have a gain.
+ * format might require three padding bytes (plus header, which is
+ * included in VARSIZE(tmp)), whereas the uncompressed format would take
+ * only one header byte and no padding if the value is short enough. So
+ * we insist on a savings of more than 2 bytes to ensure we have a gain.
*/
if (pglz_compress(VARDATA_ANY(DatumGetPointer(value)), valsize,
(PGLZ_Header *) tmp, PGLZ_strategy_default) &&
@@ -1130,7 +1130,7 @@ toast_save_datum(Relation rel, Datum value, int options)
struct
{
struct varlena hdr;
- char data[TOAST_MAX_CHUNK_SIZE]; /* make struct big enough */
+ char data[TOAST_MAX_CHUNK_SIZE]; /* make struct big enough */
int32 align_it; /* ensure struct is aligned well enough */
} chunk_data;
int32 chunk_size;
@@ -1295,8 +1295,8 @@ toast_delete_datum(Relation rel, Datum value)
/*
* Find all the chunks. (We don't actually care whether we see them in
- * sequence or not, but since we've already locked the index we might
- * as well use systable_beginscan_ordered.)
+ * sequence or not, but since we've already locked the index we might as
+ * well use systable_beginscan_ordered.)
*/
toastscan = systable_beginscan_ordered(toastrel, toastidx,
SnapshotToast, 1, &toastkey);
@@ -1598,7 +1598,7 @@ toast_fetch_datum_slice(struct varlena * attr, int32 sliceoffset, int32 length)
*/
nextidx = startchunk;
toastscan = systable_beginscan_ordered(toastrel, toastidx,
- SnapshotToast, nscankeys, toastkey);
+ SnapshotToast, nscankeys, toastkey);
while ((ttup = systable_getnext_ordered(toastscan, ForwardScanDirection)) != NULL)
{
/*
diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c
index 2b5a0704ed..d425e772b9 100644
--- a/src/backend/access/heap/visibilitymap.c
+++ b/src/backend/access/heap/visibilitymap.c
@@ -8,10 +8,10 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/visibilitymap.c,v 1.3 2009/01/01 17:23:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/visibilitymap.c,v 1.4 2009/06/11 14:48:54 momjian Exp $
*
* INTERFACE ROUTINES
- * visibilitymap_clear - clear a bit in the visibility map
+ * visibilitymap_clear - clear a bit in the visibility map
* visibilitymap_pin - pin a map page for setting a bit
* visibilitymap_set - set a bit in a previously pinned page
* visibilitymap_test - test if a bit is set
@@ -144,7 +144,7 @@ visibilitymap_clear(Relation rel, BlockNumber heapBlk)
mapBuffer = vm_readbuf(rel, mapBlock, false);
if (!BufferIsValid(mapBuffer))
- return; /* nothing to do */
+ return; /* nothing to do */
LockBuffer(mapBuffer, BUFFER_LOCK_EXCLUSIVE);
map = PageGetContents(BufferGetPage(mapBuffer));
@@ -295,10 +295,11 @@ void
visibilitymap_truncate(Relation rel, BlockNumber nheapblocks)
{
BlockNumber newnblocks;
+
/* last remaining block, byte, and bit */
BlockNumber truncBlock = HEAPBLK_TO_MAPBLOCK(nheapblocks);
- uint32 truncByte = HEAPBLK_TO_MAPBYTE(nheapblocks);
- uint8 truncBit = HEAPBLK_TO_MAPBIT(nheapblocks);
+ uint32 truncByte = HEAPBLK_TO_MAPBYTE(nheapblocks);
+ uint8 truncBit = HEAPBLK_TO_MAPBIT(nheapblocks);
#ifdef TRACE_VISIBILITYMAP
elog(DEBUG1, "vm_truncate %s %d", RelationGetRelationName(rel), nheapblocks);
@@ -315,14 +316,14 @@ visibilitymap_truncate(Relation rel, BlockNumber nheapblocks)
* Unless the new size is exactly at a visibility map page boundary, the
* tail bits in the last remaining map page, representing truncated heap
* blocks, need to be cleared. This is not only tidy, but also necessary
- * because we don't get a chance to clear the bits if the heap is
- * extended again.
+ * because we don't get a chance to clear the bits if the heap is extended
+ * again.
*/
if (truncByte != 0 || truncBit != 0)
{
- Buffer mapBuffer;
- Page page;
- char *map;
+ Buffer mapBuffer;
+ Page page;
+ char *map;
newnblocks = truncBlock + 1;
@@ -344,11 +345,8 @@ visibilitymap_truncate(Relation rel, BlockNumber nheapblocks)
/*
* Mask out the unwanted bits of the last remaining byte.
*
- * ((1 << 0) - 1) = 00000000
- * ((1 << 1) - 1) = 00000001
- * ...
- * ((1 << 6) - 1) = 00111111
- * ((1 << 7) - 1) = 01111111
+ * ((1 << 0) - 1) = 00000000 ((1 << 1) - 1) = 00000001 ... ((1 << 6) -
+ * 1) = 00111111 ((1 << 7) - 1) = 01111111
*/
map[truncByte] &= (1 << truncBit) - 1;
@@ -368,8 +366,8 @@ visibilitymap_truncate(Relation rel, BlockNumber nheapblocks)
rel->rd_istemp);
/*
- * Need to invalidate the relcache entry, because rd_vm_nblocks
- * seen by other backends is no longer valid.
+ * Need to invalidate the relcache entry, because rd_vm_nblocks seen by
+ * other backends is no longer valid.
*/
if (!InRecovery)
CacheInvalidateRelcache(rel);
@@ -386,7 +384,7 @@ visibilitymap_truncate(Relation rel, BlockNumber nheapblocks)
static Buffer
vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
{
- Buffer buf;
+ Buffer buf;
RelationOpenSmgr(rel);
@@ -433,20 +431,20 @@ static void
vm_extend(Relation rel, BlockNumber vm_nblocks)
{
BlockNumber vm_nblocks_now;
- Page pg;
+ Page pg;
pg = (Page) palloc(BLCKSZ);
PageInit(pg, BLCKSZ, 0);
/*
- * We use the relation extension lock to lock out other backends trying
- * to extend the visibility map at the same time. It also locks out
- * extension of the main fork, unnecessarily, but extending the
- * visibility map happens seldom enough that it doesn't seem worthwhile to
- * have a separate lock tag type for it.
+ * We use the relation extension lock to lock out other backends trying to
+ * extend the visibility map at the same time. It also locks out extension
+ * of the main fork, unnecessarily, but extending the visibility map
+ * happens seldom enough that it doesn't seem worthwhile to have a
+ * separate lock tag type for it.
*
- * Note that another backend might have extended or created the
- * relation before we get the lock.
+ * Note that another backend might have extended or created the relation
+ * before we get the lock.
*/
LockRelationForExtension(rel, ExclusiveLock);
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index 9193ee40eb..a79c392071 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.73 2009/01/01 17:23:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.74 2009/06/11 14:48:54 momjian Exp $
*
* NOTES
* many of the old access method routines have been turned into
@@ -197,7 +197,7 @@ systable_beginscan(Relation heapRelation,
/* Change attribute numbers to be index column numbers. */
for (i = 0; i < nkeys; i++)
{
- int j;
+ int j;
for (j = 0; j < irel->rd_index->indnatts; j++)
{
@@ -241,12 +241,13 @@ systable_getnext(SysScanDesc sysscan)
if (sysscan->irel)
{
htup = index_getnext(sysscan->iscan, ForwardScanDirection);
+
/*
- * We currently don't need to support lossy index operators for
- * any system catalog scan. It could be done here, using the
- * scan keys to drive the operator calls, if we arranged to save
- * the heap attnums during systable_beginscan(); this is practical
- * because we still wouldn't need to support indexes on expressions.
+ * We currently don't need to support lossy index operators for any
+ * system catalog scan. It could be done here, using the scan keys to
+ * drive the operator calls, if we arranged to save the heap attnums
+ * during systable_beginscan(); this is practical because we still
+ * wouldn't need to support indexes on expressions.
*/
if (htup && sysscan->iscan->xs_recheck)
elog(ERROR, "system catalog scans with lossy index conditions are not implemented");
@@ -326,7 +327,7 @@ systable_endscan(SysScanDesc sysscan)
* index order. Also, for largely historical reasons, the index to use
* is opened and locked by the caller, not here.
*
- * Currently we do not support non-index-based scans here. (In principle
+ * Currently we do not support non-index-based scans here. (In principle
* we could do a heapscan and sort, but the uses are in places that
* probably don't need to still work with corrupted catalog indexes.)
* For the moment, therefore, these functions are merely the thinnest of
@@ -360,7 +361,7 @@ systable_beginscan_ordered(Relation heapRelation,
/* Change attribute numbers to be index column numbers. */
for (i = 0; i < nkeys; i++)
{
- int j;
+ int j;
for (j = 0; j < indexRelation->rd_index->indnatts; j++)
{
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index 197fa3b041..32623965c7 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.113 2009/03/24 20:17:12 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.114 2009/06/11 14:48:54 momjian Exp $
*
* INTERFACE ROUTINES
* index_open - open an index relation by relation OID
@@ -21,7 +21,7 @@
* index_markpos - mark a scan position
* index_restrpos - restore a scan position
* index_getnext - get the next tuple from a scan
- * index_getbitmap - get all tuples from a scan
+ * index_getbitmap - get all tuples from a scan
* index_bulk_delete - bulk deletion of index tuples
* index_vacuum_cleanup - post-deletion cleanup of an index
* index_getprocid - get a support procedure OID
@@ -461,9 +461,9 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
/*
* The AM's gettuple proc finds the next index entry matching the
- * scan keys, and puts the TID in xs_ctup.t_self (ie, *tid).
- * It should also set scan->xs_recheck, though we pay no
- * attention to that here.
+ * scan keys, and puts the TID in xs_ctup.t_self (ie, *tid). It
+ * should also set scan->xs_recheck, though we pay no attention to
+ * that here.
*/
found = DatumGetBool(FunctionCall2(procedure,
PointerGetDatum(scan),
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index b0d14319b7..a06faa2020 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.169 2009/01/01 17:23:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.170 2009/06/11 14:48:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -374,7 +374,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
* removing any LP_DEAD tuples.
*
* On entry, *buf and *offsetptr point to the first legal position
- * where the new tuple could be inserted. The caller should hold an
+ * where the new tuple could be inserted. The caller should hold an
* exclusive lock on *buf. *offsetptr can also be set to
* InvalidOffsetNumber, in which case the function will search for the
* right location within the page if needed. On exit, they point to the
@@ -951,7 +951,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
sopaque = (BTPageOpaque) PageGetSpecialPointer(spage);
if (sopaque->btpo_prev != ropaque->btpo_prev)
elog(PANIC, "right sibling's left-link doesn't match: "
- "block %u links to %u instead of expected %u in index \"%s\"",
+ "block %u links to %u instead of expected %u in index \"%s\"",
ropaque->btpo_next, sopaque->btpo_prev, ropaque->btpo_prev,
RelationGetRelationName(rel));
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 55d947e9f2..2b76e7cd45 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.170 2009/06/06 22:13:51 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.171 2009/06/11 14:48:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -290,7 +290,7 @@ Datum
btgetbitmap(PG_FUNCTION_ARGS)
{
IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
- TIDBitmap *tbm = (TIDBitmap *) PG_GETARG_POINTER(1);
+ TIDBitmap *tbm = (TIDBitmap *) PG_GETARG_POINTER(1);
BTScanOpaque so = (BTScanOpaque) scan->opaque;
int64 ntids = 0;
ItemPointer heapTid;
@@ -579,9 +579,8 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
/*
* During a non-FULL vacuum it's quite possible for us to be fooled by
* concurrent page splits into double-counting some index tuples, so
- * disbelieve any total that exceeds the underlying heap's count ...
- * if we know that accurately. Otherwise this might just make matters
- * worse.
+ * disbelieve any total that exceeds the underlying heap's count ... if we
+ * know that accurately. Otherwise this might just make matters worse.
*/
if (!info->vacuum_full && !info->estimated_count)
{
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index 3de7d50e01..d132d6bdee 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.54 2009/01/20 18:59:37 heikki Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.55 2009/06/11 14:48:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -338,8 +338,8 @@ btree_xlog_split(bool onleft, bool isroot,
_bt_restore_page(rpage, datapos, datalen);
/*
- * On leaf level, the high key of the left page is equal to the
- * first key on the right page.
+ * On leaf level, the high key of the left page is equal to the first key
+ * on the right page.
*/
if (xlrec->level == 0)
{
@@ -936,7 +936,7 @@ btree_xlog_cleanup(void)
buf = XLogReadBuffer(action->node, action->delblk, false);
if (BufferIsValid(buf))
{
- Relation reln;
+ Relation reln;
reln = CreateFakeRelcacheEntry(action->node);
if (_bt_pagedel(reln, buf, NULL, true) == 0)
diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c
index 368d2c9d1a..8544725abb 100644
--- a/src/backend/access/transam/clog.c
+++ b/src/backend/access/transam/clog.c
@@ -26,7 +26,7 @@
* Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.52 2009/01/20 18:59:37 heikki Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.53 2009/06/11 14:48:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -81,7 +81,7 @@ static bool CLOGPagePrecedes(int page1, int page2);
static void WriteZeroPageXlogRec(int pageno);
static void WriteTruncateXlogRec(int pageno);
static void TransactionIdSetPageStatus(TransactionId xid, int nsubxids,
- TransactionId *subxids, XidStatus status,
+ TransactionId *subxids, XidStatus status,
XLogRecPtr lsn, int pageno);
static void TransactionIdSetStatusBit(TransactionId xid, XidStatus status,
XLogRecPtr lsn, int slotno);
@@ -112,10 +112,10 @@ static void set_status_by_pages(int nsubxids, TransactionId *subxids,
* the same CLOG page as xid. If they all are, then the lock will be grabbed
* only once, and the status will be set to committed directly. Otherwise
* we must
- * 1. set sub-committed all subxids that are not on the same page as the
- * main xid
- * 2. atomically set committed the main xid and the subxids on the same page
- * 3. go over the first bunch again and set them committed
+ * 1. set sub-committed all subxids that are not on the same page as the
+ * main xid
+ * 2. atomically set committed the main xid and the subxids on the same page
+ * 3. go over the first bunch again and set them committed
* Note that as far as concurrent checkers are concerned, main transaction
* commit as a whole is still atomic.
*
@@ -126,13 +126,13 @@ static void set_status_by_pages(int nsubxids, TransactionId *subxids,
* page2: set t2,t3 as sub-committed
* page3: set t4 as sub-committed
* 2. update page1:
- * set t1 as sub-committed,
+ * set t1 as sub-committed,
* then set t as committed,
then set t1 as committed
* 3. update pages2-3:
* page2: set t2,t3 as committed
* page3: set t4 as committed
- *
+ *
* NB: this is a low-level routine and is NOT the preferred entry point
* for most uses; functions in transam.c are the intended callers.
*
@@ -142,16 +142,17 @@ static void set_status_by_pages(int nsubxids, TransactionId *subxids,
*/
void
TransactionIdSetTreeStatus(TransactionId xid, int nsubxids,
- TransactionId *subxids, XidStatus status, XLogRecPtr lsn)
+ TransactionId *subxids, XidStatus status, XLogRecPtr lsn)
{
- int pageno = TransactionIdToPage(xid); /* get page of parent */
- int i;
+ int pageno = TransactionIdToPage(xid); /* get page of parent */
+ int i;
Assert(status == TRANSACTION_STATUS_COMMITTED ||
status == TRANSACTION_STATUS_ABORTED);
/*
- * See how many subxids, if any, are on the same page as the parent, if any.
+ * See how many subxids, if any, are on the same page as the parent, if
+ * any.
*/
for (i = 0; i < nsubxids; i++)
{
@@ -172,14 +173,14 @@ TransactionIdSetTreeStatus(TransactionId xid, int nsubxids,
}
else
{
- int nsubxids_on_first_page = i;
+ int nsubxids_on_first_page = i;
/*
* If this is a commit then we care about doing this correctly (i.e.
- * using the subcommitted intermediate status). By here, we know we're
- * updating more than one page of clog, so we must mark entries that
- * are *not* on the first page so that they show as subcommitted before
- * we then return to update the status to fully committed.
+ * using the subcommitted intermediate status). By here, we know
+ * we're updating more than one page of clog, so we must mark entries
+ * that are *not* on the first page so that they show as subcommitted
+ * before we then return to update the status to fully committed.
*
* To avoid touching the first page twice, skip marking subcommitted
* for the subxids on that first page.
@@ -217,13 +218,13 @@ static void
set_status_by_pages(int nsubxids, TransactionId *subxids,
XidStatus status, XLogRecPtr lsn)
{
- int pageno = TransactionIdToPage(subxids[0]);
- int offset = 0;
- int i = 0;
+ int pageno = TransactionIdToPage(subxids[0]);
+ int offset = 0;
+ int i = 0;
while (i < nsubxids)
{
- int num_on_page = 0;
+ int num_on_page = 0;
while (TransactionIdToPage(subxids[i]) == pageno && i < nsubxids)
{
@@ -251,7 +252,7 @@ TransactionIdSetPageStatus(TransactionId xid, int nsubxids,
XLogRecPtr lsn, int pageno)
{
int slotno;
- int i;
+ int i;
Assert(status == TRANSACTION_STATUS_COMMITTED ||
status == TRANSACTION_STATUS_ABORTED ||
@@ -275,9 +276,9 @@ TransactionIdSetPageStatus(TransactionId xid, int nsubxids,
*
* If we update more than one xid on this page while it is being written
* out, we might find that some of the bits go to disk and others don't.
- * If we are updating commits on the page with the top-level xid that could
- * break atomicity, so we subcommit the subxids first before we mark the
- * top-level commit.
+ * If we are updating commits on the page with the top-level xid that
+ * could break atomicity, so we subcommit the subxids first before we mark
+ * the top-level commit.
*/
if (TransactionIdIsValid(xid))
{
@@ -336,7 +337,7 @@ TransactionIdSetStatusBit(TransactionId xid, XidStatus status, XLogRecPtr lsn, i
curval == TRANSACTION_STATUS_COMMITTED)
return;
- /*
+ /*
* Current state change should be from 0 or subcommitted to target state
* or we should already be there when replaying changes during recovery.
*/
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 4685ccdf10..4a43579c40 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.52 2009/04/23 00:23:45 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.53 2009/06/11 14:48:54 momjian Exp $
*
* NOTES
* Each global transaction is associated with a global transaction
@@ -233,7 +233,7 @@ MarkAsPreparing(TransactionId xid, const char *gid,
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("prepared transactions are disabled"),
- errhint("Set max_prepared_transactions to a nonzero value.")));
+ errhint("Set max_prepared_transactions to a nonzero value.")));
LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index ac330411aa..2b6a222477 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.273 2009/05/13 20:27:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.274 2009/06/11 14:48:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -456,7 +456,7 @@ GetCurrentSubTransactionId(void)
*
* "used" must be TRUE if the caller intends to use the command ID to mark
* inserted/updated/deleted tuples. FALSE means the ID is being fetched
- * for read-only purposes (ie, as a snapshot validity cutoff). See
+ * for read-only purposes (ie, as a snapshot validity cutoff). See
* CommandCounterIncrement() for discussion.
*/
CommandId
@@ -566,7 +566,8 @@ TransactionIdIsCurrentTransactionId(TransactionId xid)
*/
for (s = CurrentTransactionState; s != NULL; s = s->parent)
{
- int low, high;
+ int low,
+ high;
if (s->state == TRANS_ABORT)
continue;
@@ -579,8 +580,8 @@ TransactionIdIsCurrentTransactionId(TransactionId xid)
high = s->nChildXids - 1;
while (low <= high)
{
- int middle;
- TransactionId probe;
+ int middle;
+ TransactionId probe;
middle = low + (high - low) / 2;
probe = s->childXids[middle];
@@ -604,33 +605,31 @@ void
CommandCounterIncrement(void)
{
/*
- * If the current value of the command counter hasn't been "used" to
- * mark tuples, we need not increment it, since there's no need to
- * distinguish a read-only command from others. This helps postpone
- * command counter overflow, and keeps no-op CommandCounterIncrement
- * operations cheap.
+ * If the current value of the command counter hasn't been "used" to mark
+ * tuples, we need not increment it, since there's no need to distinguish
+ * a read-only command from others. This helps postpone command counter
+ * overflow, and keeps no-op CommandCounterIncrement operations cheap.
*/
if (currentCommandIdUsed)
{
currentCommandId += 1;
- if (currentCommandId == FirstCommandId) /* check for overflow */
+ if (currentCommandId == FirstCommandId) /* check for overflow */
{
currentCommandId -= 1;
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("cannot have more than 2^32-1 commands in a transaction")));
+ errmsg("cannot have more than 2^32-1 commands in a transaction")));
}
currentCommandIdUsed = false;
/* Propagate new command ID into static snapshots */
SnapshotSetCommandId(currentCommandId);
-
+
/*
- * Make any catalog changes done by the just-completed command
- * visible in the local syscache. We obviously don't need to do
- * this after a read-only command. (But see hacks in inval.c
- * to make real sure we don't think a command that queued inval
- * messages was read-only.)
+ * Make any catalog changes done by the just-completed command visible
+ * in the local syscache. We obviously don't need to do this after a
+ * read-only command. (But see hacks in inval.c to make real sure we
+ * don't think a command that queued inval messages was read-only.)
*/
AtCommit_LocalCache();
}
@@ -638,11 +637,11 @@ CommandCounterIncrement(void)
/*
* Make any other backends' catalog changes visible to me.
*
- * XXX this is probably in the wrong place: CommandCounterIncrement
- * should be purely a local operation, most likely. However fooling
- * with this will affect asynchronous cross-backend interactions,
- * which doesn't seem like a wise thing to do in late beta, so save
- * improving this for another day - tgl 2007-11-30
+ * XXX this is probably in the wrong place: CommandCounterIncrement should
+ * be purely a local operation, most likely. However fooling with this
+ * will affect asynchronous cross-backend interactions, which doesn't seem
+ * like a wise thing to do in late beta, so save improving this for
+ * another day - tgl 2007-11-30
*/
AtStart_Cache();
}
@@ -1086,14 +1085,14 @@ AtSubCommit_childXids(void)
/* Allocate or enlarge the parent array if necessary */
if (s->parent->maxChildXids < new_nChildXids)
{
- int new_maxChildXids;
- TransactionId *new_childXids;
+ int new_maxChildXids;
+ TransactionId *new_childXids;
/*
* Make it 2x what's needed right now, to avoid having to enlarge it
- * repeatedly. But we can't go above MaxAllocSize. (The latter
- * limit is what ensures that we don't need to worry about integer
- * overflow here or in the calculation of new_nChildXids.)
+ * repeatedly. But we can't go above MaxAllocSize. (The latter limit
+ * is what ensures that we don't need to worry about integer overflow
+ * here or in the calculation of new_nChildXids.)
*/
new_maxChildXids = Min(new_nChildXids * 2,
(int) (MaxAllocSize / sizeof(TransactionId)));
@@ -1111,13 +1110,13 @@ AtSubCommit_childXids(void)
*/
if (s->parent->childXids == NULL)
new_childXids =
- MemoryContextAlloc(TopTransactionContext,
+ MemoryContextAlloc(TopTransactionContext,
new_maxChildXids * sizeof(TransactionId));
else
- new_childXids = repalloc(s->parent->childXids,
- new_maxChildXids * sizeof(TransactionId));
+ new_childXids = repalloc(s->parent->childXids,
+ new_maxChildXids * sizeof(TransactionId));
- s->parent->childXids = new_childXids;
+ s->parent->childXids = new_childXids;
s->parent->maxChildXids = new_maxChildXids;
}
@@ -1126,9 +1125,9 @@ AtSubCommit_childXids(void)
*
* Note: We rely on the fact that the XID of a child always follows that
* of its parent. By copying the XID of this subtransaction before the
- * XIDs of its children, we ensure that the array stays ordered. Likewise,
- * all XIDs already in the array belong to subtransactions started and
- * subcommitted before us, so their XIDs must precede ours.
+ * XIDs of its children, we ensure that the array stays ordered.
+ * Likewise, all XIDs already in the array belong to subtransactions
+ * started and subcommitted before us, so their XIDs must precede ours.
*/
s->parent->childXids[s->parent->nChildXids] = s->transactionId;
@@ -1801,15 +1800,15 @@ PrepareTransaction(void)
/* NOTIFY and flatfiles will be handled below */
/*
- * Don't allow PREPARE TRANSACTION if we've accessed a temporary table
- * in this transaction. Having the prepared xact hold locks on another
+ * Don't allow PREPARE TRANSACTION if we've accessed a temporary table in
+ * this transaction. Having the prepared xact hold locks on another
* backend's temp table seems a bad idea --- for instance it would prevent
- * the backend from exiting. There are other problems too, such as how
- * to clean up the source backend's local buffers and ON COMMIT state
- * if the prepared xact includes a DROP of a temp table.
+ * the backend from exiting. There are other problems too, such as how to
+ * clean up the source backend's local buffers and ON COMMIT state if the
+ * prepared xact includes a DROP of a temp table.
*
- * We must check this after executing any ON COMMIT actions, because
- * they might still access a temp relation.
+ * We must check this after executing any ON COMMIT actions, because they
+ * might still access a temp relation.
*
* XXX In principle this could be relaxed to allow some useful special
* cases, such as a temp table created and dropped all within the
@@ -2021,8 +2020,8 @@ AbortTransaction(void)
/*
* Reset user ID which might have been changed transiently. We need this
* to clean up in case control escaped out of a SECURITY DEFINER function
- * or other local change of CurrentUserId; therefore, the prior value
- * of SecurityDefinerContext also needs to be restored.
+ * or other local change of CurrentUserId; therefore, the prior value of
+ * SecurityDefinerContext also needs to be restored.
*
* (Note: it is not necessary to restore session authorization or role
* settings here because those can only be changed via GUC, and GUC will
@@ -3749,8 +3748,8 @@ CommitSubTransaction(void)
/* Must CCI to ensure commands of subtransaction are seen as done */
CommandCounterIncrement();
- /*
- * Prior to 8.4 we marked subcommit in clog at this point. We now only
+ /*
+ * Prior to 8.4 we marked subcommit in clog at this point. We now only
* perform that step, if required, as part of the atomic update of the
* whole transaction tree at top level commit or abort.
*/
@@ -3868,8 +3867,8 @@ AbortSubTransaction(void)
s->state = TRANS_ABORT;
/*
- * Reset user ID which might have been changed transiently. (See notes
- * in AbortTransaction.)
+ * Reset user ID which might have been changed transiently. (See notes in
+ * AbortTransaction.)
*/
SetUserIdAndContext(s->prevUser, s->prevSecDefCxt);
@@ -4089,7 +4088,7 @@ ShowTransactionStateRec(TransactionState s)
if (s->nChildXids > 0)
{
- int i;
+ int i;
appendStringInfo(&buf, "%u", s->childXids[0]);
for (i = 1; i < s->nChildXids; i++)
@@ -4241,7 +4240,7 @@ xact_redo_commit(xl_xact_commit *xlrec, TransactionId xid)
for (i = 0; i < xlrec->nrels; i++)
{
SMgrRelation srel = smgropen(xlrec->xnodes[i]);
- ForkNumber fork;
+ ForkNumber fork;
for (fork = 0; fork <= MAX_FORKNUM; fork++)
{
@@ -4284,7 +4283,7 @@ xact_redo_abort(xl_xact_abort *xlrec, TransactionId xid)
for (i = 0; i < xlrec->nrels; i++)
{
SMgrRelation srel = smgropen(xlrec->xnodes[i]);
- ForkNumber fork;
+ ForkNumber fork;
for (fork = 0; fork <= MAX_FORKNUM; fork++)
{
@@ -4353,7 +4352,8 @@ xact_desc_commit(StringInfo buf, xl_xact_commit *xlrec)
appendStringInfo(buf, "; rels:");
for (i = 0; i < xlrec->nrels; i++)
{
- char *path = relpath(xlrec->xnodes[i], MAIN_FORKNUM);
+ char *path = relpath(xlrec->xnodes[i], MAIN_FORKNUM);
+
appendStringInfo(buf, " %s", path);
pfree(path);
}
@@ -4380,7 +4380,8 @@ xact_desc_abort(StringInfo buf, xl_xact_abort *xlrec)
appendStringInfo(buf, "; rels:");
for (i = 0; i < xlrec->nrels; i++)
{
- char *path = relpath(xlrec->xnodes[i], MAIN_FORKNUM);
+ char *path = relpath(xlrec->xnodes[i], MAIN_FORKNUM);
+
appendStringInfo(buf, " %s", path);
pfree(path);
}
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index b6171c78c0..d521fa6e7b 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.342 2009/06/02 06:18:06 heikki Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.343 2009/06/11 14:48:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -69,7 +69,7 @@ bool XLogArchiveMode = false;
char *XLogArchiveCommand = NULL;
bool fullPageWrites = true;
bool log_checkpoints = false;
-int sync_method = DEFAULT_SYNC_METHOD;
+int sync_method = DEFAULT_SYNC_METHOD;
#ifdef WAL_DEBUG
bool XLOG_DEBUG = false;
@@ -122,7 +122,7 @@ CheckpointStatsData CheckpointStats;
TimeLineID ThisTimeLineID = 0;
/*
- * Are we doing recovery from XLOG?
+ * Are we doing recovery from XLOG?
*
* This is only ever true in the startup process, even if the system is still
* in recovery. Prior to 8.4, all activity during recovery were carried out
@@ -336,9 +336,8 @@ typedef struct XLogCtlData
bool SharedRecoveryInProgress;
/*
- * During recovery, we keep a copy of the latest checkpoint record
- * here. Used by the background writer when it wants to create
- * a restartpoint.
+ * During recovery, we keep a copy of the latest checkpoint record here.
+ * Used by the background writer when it wants to create a restartpoint.
*
* Protected by info_lck.
*/
@@ -422,8 +421,9 @@ static XLogRecPtr ReadRecPtr; /* start of last record read */
static XLogRecPtr EndRecPtr; /* end+1 of last record read */
static XLogRecord *nextRecord = NULL;
static TimeLineID lastPageTLI = 0;
-static XLogRecPtr minRecoveryPoint; /* local copy of ControlFile->minRecoveryPoint */
-static bool updateMinRecoveryPoint = true;
+static XLogRecPtr minRecoveryPoint; /* local copy of
+ * ControlFile->minRecoveryPoint */
+static bool updateMinRecoveryPoint = true;
static bool InRedo = false;
@@ -432,6 +432,7 @@ static bool InRedo = false;
*/
static volatile sig_atomic_t got_SIGHUP = false;
static volatile sig_atomic_t shutdown_requested = false;
+
/*
* Flag set when executing a restore command, to tell SIGTERM signal handler
* that it's safe to just proc_exit.
@@ -482,6 +483,7 @@ static void writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI,
static void WriteControlFile(void);
static void ReadControlFile(void);
static char *str_time(pg_time_t tnow);
+
#ifdef WAL_DEBUG
static void xlog_outrec(StringInfo buf, XLogRecord *record);
#endif
@@ -490,7 +492,7 @@ static void pg_start_backup_callback(int code, Datum arg);
static bool read_backup_label(XLogRecPtr *checkPointLoc,
XLogRecPtr *minRecoveryLoc);
static void rm_redo_error_callback(void *arg);
-static int get_sync_bit(int method);
+static int get_sync_bit(int method);
/*
@@ -1260,9 +1262,9 @@ XLogArchiveIsBusy(const char *xlog)
return false;
/*
- * Check to see if the WAL file has been removed by checkpoint,
- * which implies it has already been archived, and explains why we
- * can't see a status file for it.
+ * Check to see if the WAL file has been removed by checkpoint, which
+ * implies it has already been archived, and explains why we can't see a
+ * status file for it.
*/
snprintf(archiveStatusPath, MAXPGPATH, XLOGDIR "/%s", xlog);
if (stat(archiveStatusPath, &stat_buf) != 0 &&
@@ -1775,8 +1777,8 @@ XLogSetAsyncCommitLSN(XLogRecPtr asyncCommitLSN)
* Advance minRecoveryPoint in control file.
*
* If we crash during recovery, we must reach this point again before the
- * database is consistent.
- *
+ * database is consistent.
+ *
* If 'force' is true, 'lsn' argument is ignored. Otherwise, minRecoveryPoint
* is is only updated if it's not already greater than or equal to 'lsn'.
*/
@@ -1802,7 +1804,7 @@ UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force)
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
- XLogRecPtr newMinRecoveryPoint;
+ XLogRecPtr newMinRecoveryPoint;
/*
* To avoid having to update the control file too often, we update it
@@ -2567,7 +2569,7 @@ XLogFileClose(void)
/*
* WAL segment files will not be re-read in normal operation, so we advise
- * the OS to release any cached pages. But do not do so if WAL archiving
+ * the OS to release any cached pages. But do not do so if WAL archiving
* is active, because archiver process could use the cache to read the WAL
* segment. Also, don't bother with it if we are using O_DIRECT, since
* the kernel is presumably not caching in that case.
@@ -2663,19 +2665,19 @@ RestoreArchivedFile(char *path, const char *xlogfname,
/*
* Calculate the archive file cutoff point for use during log shipping
- * replication. All files earlier than this point can be deleted
- * from the archive, though there is no requirement to do so.
+ * replication. All files earlier than this point can be deleted from the
+ * archive, though there is no requirement to do so.
*
* We initialise this with the filename of an InvalidXLogRecPtr, which
- * will prevent the deletion of any WAL files from the archive
- * because of the alphabetic sorting property of WAL filenames.
+ * will prevent the deletion of any WAL files from the archive because of
+ * the alphabetic sorting property of WAL filenames.
*
* Once we have successfully located the redo pointer of the checkpoint
* from which we start recovery we never request a file prior to the redo
- * pointer of the last restartpoint. When redo begins we know that we
- * have successfully located it, so there is no need for additional
- * status flags to signify the point when we can begin deleting WAL files
- * from the archive.
+ * pointer of the last restartpoint. When redo begins we know that we have
+ * successfully located it, so there is no need for additional status
+ * flags to signify the point when we can begin deleting WAL files from
+ * the archive.
*/
if (InRedo)
{
@@ -2821,9 +2823,9 @@ RestoreArchivedFile(char *path, const char *xlogfname,
* On SIGTERM, assume we have received a fast shutdown request, and exit
* cleanly. It's pure chance whether we receive the SIGTERM first, or the
* child process. If we receive it first, the signal handler will call
- * proc_exit, otherwise we do it here. If we or the child process
- * received SIGTERM for any other reason than a fast shutdown request,
- * postmaster will perform an immediate shutdown when it sees us exiting
+ * proc_exit, otherwise we do it here. If we or the child process received
+ * SIGTERM for any other reason than a fast shutdown request, postmaster
+ * will perform an immediate shutdown when it sees us exiting
* unexpectedly.
*
* Per the Single Unix Spec, shells report exit status > 128 when a called
@@ -2871,19 +2873,19 @@ ExecuteRecoveryEndCommand(void)
/*
* Calculate the archive file cutoff point for use during log shipping
- * replication. All files earlier than this point can be deleted
- * from the archive, though there is no requirement to do so.
+ * replication. All files earlier than this point can be deleted from the
+ * archive, though there is no requirement to do so.
*
* We initialise this with the filename of an InvalidXLogRecPtr, which
- * will prevent the deletion of any WAL files from the archive
- * because of the alphabetic sorting property of WAL filenames.
+ * will prevent the deletion of any WAL files from the archive because of
+ * the alphabetic sorting property of WAL filenames.
*
* Once we have successfully located the redo pointer of the checkpoint
* from which we start recovery we never request a file prior to the redo
- * pointer of the last restartpoint. When redo begins we know that we
- * have successfully located it, so there is no need for additional
- * status flags to signify the point when we can begin deleting WAL files
- * from the archive.
+ * pointer of the last restartpoint. When redo begins we know that we have
+ * successfully located it, so there is no need for additional status
+ * flags to signify the point when we can begin deleting WAL files from
+ * the archive.
*/
if (InRedo)
{
@@ -2948,14 +2950,14 @@ ExecuteRecoveryEndCommand(void)
{
/*
* If the failure was due to any sort of signal, it's best to punt and
- * abort recovery. See also detailed comments on signals in
+ * abort recovery. See also detailed comments on signals in
* RestoreArchivedFile().
*/
signaled = WIFSIGNALED(rc) || WEXITSTATUS(rc) > 125;
ereport(signaled ? FATAL : WARNING,
(errmsg("recovery_end_command \"%s\": return code %d",
- xlogRecoveryEndCmd, rc)));
+ xlogRecoveryEndCmd, rc)));
}
}
@@ -3101,12 +3103,12 @@ static void
ValidateXLOGDirectoryStructure(void)
{
char path[MAXPGPATH];
- struct stat stat_buf;
+ struct stat stat_buf;
/* Check for pg_xlog; if it doesn't exist, error out */
if (stat(XLOGDIR, &stat_buf) != 0 ||
!S_ISDIR(stat_buf.st_mode))
- ereport(FATAL,
+ ereport(FATAL,
(errmsg("required WAL directory \"%s\" does not exist",
XLOGDIR)));
@@ -3116,7 +3118,7 @@ ValidateXLOGDirectoryStructure(void)
{
/* Check for weird cases where it exists but isn't a directory */
if (!S_ISDIR(stat_buf.st_mode))
- ereport(FATAL,
+ ereport(FATAL,
(errmsg("required WAL directory \"%s\" does not exist",
path)));
}
@@ -3125,7 +3127,7 @@ ValidateXLOGDirectoryStructure(void)
ereport(LOG,
(errmsg("creating missing WAL directory \"%s\"", path)));
if (mkdir(path, 0700) < 0)
- ereport(FATAL,
+ ereport(FATAL,
(errmsg("could not create missing directory \"%s\": %m",
path)));
}
@@ -3187,7 +3189,7 @@ CleanupBackupHistory(void)
* ignoring them as already applied, but that's not a huge drawback.
*
* If 'cleanup' is true, a cleanup lock is used when restoring blocks.
- * Otherwise, a normal exclusive lock is used. At the moment, that's just
+ * Otherwise, a normal exclusive lock is used. At the moment, that's just
* pro forma, because there can't be any regular backends in the system
* during recovery. The 'cleanup' argument applies to all backup blocks
* in the WAL record, that suffices for now.
@@ -4283,8 +4285,8 @@ ReadControlFile(void)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with PG_CONTROL_VERSION %d (0x%08x),"
- " but the server was compiled with PG_CONTROL_VERSION %d (0x%08x).",
- ControlFile->pg_control_version, ControlFile->pg_control_version,
+ " but the server was compiled with PG_CONTROL_VERSION %d (0x%08x).",
+ ControlFile->pg_control_version, ControlFile->pg_control_version,
PG_CONTROL_VERSION, PG_CONTROL_VERSION),
errhint("This could be a problem of mismatched byte ordering. It looks like you need to initdb.")));
@@ -4309,8 +4311,8 @@ ReadControlFile(void)
/*
* Do compatibility checking immediately. If the database isn't
- * compatible with the backend executable, we want to abort before we
- * can possibly do any damage.
+ * compatible with the backend executable, we want to abort before we can
+ * possibly do any damage.
*/
if (ControlFile->catalog_version_no != CATALOG_VERSION_NO)
ereport(FATAL,
@@ -4402,14 +4404,14 @@ ReadControlFile(void)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized without USE_FLOAT4_BYVAL"
- " but the server was compiled with USE_FLOAT4_BYVAL."),
+ " but the server was compiled with USE_FLOAT4_BYVAL."),
errhint("It looks like you need to recompile or initdb.")));
#else
if (ControlFile->float4ByVal != false)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
- errdetail("The database cluster was initialized with USE_FLOAT4_BYVAL"
- " but the server was compiled without USE_FLOAT4_BYVAL."),
+ errdetail("The database cluster was initialized with USE_FLOAT4_BYVAL"
+ " but the server was compiled without USE_FLOAT4_BYVAL."),
errhint("It looks like you need to recompile or initdb.")));
#endif
@@ -4418,14 +4420,14 @@ ReadControlFile(void)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized without USE_FLOAT8_BYVAL"
- " but the server was compiled with USE_FLOAT8_BYVAL."),
+ " but the server was compiled with USE_FLOAT8_BYVAL."),
errhint("It looks like you need to recompile or initdb.")));
#else
if (ControlFile->float8ByVal != false)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
- errdetail("The database cluster was initialized with USE_FLOAT8_BYVAL"
- " but the server was compiled without USE_FLOAT8_BYVAL."),
+ errdetail("The database cluster was initialized with USE_FLOAT8_BYVAL"
+ " but the server was compiled without USE_FLOAT8_BYVAL."),
errhint("It looks like you need to recompile or initdb.")));
#endif
}
@@ -4848,9 +4850,9 @@ readRecoveryCommandFile(void)
* does nothing if a recovery_target is not also set
*/
if (!parse_bool(tok2, &recoveryTargetInclusive))
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("parameter \"recovery_target_inclusive\" requires a Boolean value")));
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("parameter \"recovery_target_inclusive\" requires a Boolean value")));
ereport(LOG,
(errmsg("recovery_target_inclusive = %s", tok2)));
}
@@ -5204,8 +5206,8 @@ StartupXLOG(void)
/*
* Verify that pg_xlog and pg_xlog/archive_status exist. In cases where
- * someone has performed a copy for PITR, these directories may have
- * been excluded and need to be re-created.
+ * someone has performed a copy for PITR, these directories may have been
+ * excluded and need to be re-created.
*/
ValidateXLOGDirectoryStructure();
@@ -5437,6 +5439,7 @@ StartupXLOG(void)
bool recoveryApply = true;
bool reachedMinRecoveryPoint = false;
ErrorContextCallback errcontext;
+
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
@@ -5454,7 +5457,7 @@ StartupXLOG(void)
else
ereport(LOG,
(errmsg("redo starts at %X/%X, consistency will be reached at %X/%X",
- ReadRecPtr.xlogid, ReadRecPtr.xrecoff,
+ ReadRecPtr.xlogid, ReadRecPtr.xrecoff,
minRecoveryPoint.xlogid, minRecoveryPoint.xrecoff)));
/*
@@ -5512,17 +5515,17 @@ StartupXLOG(void)
proc_exit(1);
/*
- * Have we reached our safe starting point? If so, we can
- * tell postmaster that the database is consistent now.
+ * Have we reached our safe starting point? If so, we can tell
+ * postmaster that the database is consistent now.
*/
- if (!reachedMinRecoveryPoint &&
- XLByteLE(minRecoveryPoint, EndRecPtr))
+ if (!reachedMinRecoveryPoint &&
+ XLByteLE(minRecoveryPoint, EndRecPtr))
{
reachedMinRecoveryPoint = true;
if (InArchiveRecovery)
{
ereport(LOG,
- (errmsg("consistent recovery state reached")));
+ (errmsg("consistent recovery state reached")));
if (IsUnderPostmaster)
SendPostmasterSignal(PMSIGNAL_RECOVERY_CONSISTENT);
}
@@ -5554,9 +5557,8 @@ StartupXLOG(void)
}
/*
- * Update shared replayEndRecPtr before replaying this
- * record, so that XLogFlush will update minRecoveryPoint
- * correctly.
+ * Update shared replayEndRecPtr before replaying this record,
+ * so that XLogFlush will update minRecoveryPoint correctly.
*/
SpinLockAcquire(&xlogctl->info_lck);
xlogctl->replayEndRecPtr = EndRecPtr;
@@ -5819,9 +5821,9 @@ bool
RecoveryInProgress(void)
{
/*
- * We check shared state each time only until we leave recovery mode.
- * We can't re-enter recovery, so we rely on the local state variable
- * after that.
+ * We check shared state each time only until we leave recovery mode. We
+ * can't re-enter recovery, so we rely on the local state variable after
+ * that.
*/
if (!LocalRecoveryInProgress)
return false;
@@ -6114,11 +6116,11 @@ ShutdownXLOG(int code, Datum arg)
static void
LogCheckpointStart(int flags, bool restartpoint)
{
- char *msg;
+ char *msg;
/*
- * XXX: This is hopelessly untranslatable. We could call gettext_noop
- * for the main message, but what about all the flags?
+ * XXX: This is hopelessly untranslatable. We could call gettext_noop for
+ * the main message, but what about all the flags?
*/
if (restartpoint)
msg = "restartpoint starting:%s%s%s%s%s%s";
@@ -6561,7 +6563,7 @@ CheckPointGuts(XLogRecPtr checkPointRedo, int flags)
/*
* This is used during WAL recovery to establish a point from which recovery
- * can roll forward without replaying the entire recovery log. This function
+ * can roll forward without replaying the entire recovery log. This function
* is called each time a checkpoint record is read from XLOG. It is stored
* in shared memory, so that it can be used as a restartpoint later on.
*/
@@ -6569,6 +6571,7 @@ static void
RecoveryRestartPoint(const CheckPoint *checkPoint)
{
int rmid;
+
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
@@ -6592,8 +6595,8 @@ RecoveryRestartPoint(const CheckPoint *checkPoint)
}
/*
- * Copy the checkpoint record to shared memory, so that bgwriter can
- * use it the next time it wants to perform a restartpoint.
+ * Copy the checkpoint record to shared memory, so that bgwriter can use
+ * it the next time it wants to perform a restartpoint.
*/
SpinLockAcquire(&xlogctl->info_lck);
XLogCtl->lastCheckPointRecPtr = ReadRecPtr;
@@ -6613,8 +6616,9 @@ RecoveryRestartPoint(const CheckPoint *checkPoint)
bool
CreateRestartPoint(int flags)
{
- XLogRecPtr lastCheckPointRecPtr;
- CheckPoint lastCheckPoint;
+ XLogRecPtr lastCheckPointRecPtr;
+ CheckPoint lastCheckPoint;
+
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
@@ -6630,14 +6634,14 @@ CreateRestartPoint(int flags)
memcpy(&lastCheckPoint, &XLogCtl->lastCheckPoint, sizeof(CheckPoint));
SpinLockRelease(&xlogctl->info_lck);
- /*
+ /*
* Check that we're still in recovery mode. It's ok if we exit recovery
* mode after this check, the restart point is valid anyway.
*/
if (!RecoveryInProgress())
{
ereport(DEBUG2,
- (errmsg("skipping restartpoint, recovery has already ended")));
+ (errmsg("skipping restartpoint, recovery has already ended")));
LWLockRelease(CheckpointLock);
return false;
}
@@ -6653,16 +6657,17 @@ CreateRestartPoint(int flags)
* possible in case of immediate shutdown, though.
*
* We don't explicitly advance minRecoveryPoint when we do create a
- * restartpoint. It's assumed that flushing the buffers will do that
- * as a side-effect.
+ * restartpoint. It's assumed that flushing the buffers will do that as a
+ * side-effect.
*/
if (XLogRecPtrIsInvalid(lastCheckPointRecPtr) ||
XLByteLE(lastCheckPoint.redo, ControlFile->checkPointCopy.redo))
{
- XLogRecPtr InvalidXLogRecPtr = {0, 0};
+ XLogRecPtr InvalidXLogRecPtr = {0, 0};
+
ereport(DEBUG2,
(errmsg("skipping restartpoint, already performed at %X/%X",
- lastCheckPoint.redo.xlogid, lastCheckPoint.redo.xrecoff)));
+ lastCheckPoint.redo.xlogid, lastCheckPoint.redo.xrecoff)));
UpdateMinRecoveryPoint(InvalidXLogRecPtr, true);
LWLockRelease(CheckpointLock);
@@ -6694,9 +6699,9 @@ CreateRestartPoint(int flags)
LWLockRelease(ControlFileLock);
/*
- * Currently, there is no need to truncate pg_subtrans during recovery.
- * If we did do that, we will need to have called StartupSUBTRANS()
- * already and then TruncateSUBTRANS() would go here.
+ * Currently, there is no need to truncate pg_subtrans during recovery. If
+ * we did do that, we will need to have called StartupSUBTRANS() already
+ * and then TruncateSUBTRANS() would go here.
*/
/* All real work is done, but log before releasing lock. */
@@ -6705,12 +6710,12 @@ CreateRestartPoint(int flags)
ereport((log_checkpoints ? LOG : DEBUG2),
(errmsg("recovery restart point at %X/%X",
- lastCheckPoint.redo.xlogid, lastCheckPoint.redo.xrecoff)));
+ lastCheckPoint.redo.xlogid, lastCheckPoint.redo.xrecoff)));
if (recoveryLastXTime)
ereport((log_checkpoints ? LOG : DEBUG2),
- (errmsg("last completed transaction was at log time %s",
- timestamptz_to_str(recoveryLastXTime))));
+ (errmsg("last completed transaction was at log time %s",
+ timestamptz_to_str(recoveryLastXTime))));
LWLockRelease(CheckpointLock);
return true;
@@ -6828,9 +6833,9 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
(int) checkPoint.ThisTimeLineID))
ereport(PANIC,
(errmsg("unexpected timeline ID %u (after %u) in checkpoint record",
- checkPoint.ThisTimeLineID, ThisTimeLineID)));
- /* Following WAL records should be run with new TLI */
- ThisTimeLineID = checkPoint.ThisTimeLineID;
+ checkPoint.ThisTimeLineID, ThisTimeLineID)));
+ /* Following WAL records should be run with new TLI */
+ ThisTimeLineID = checkPoint.ThisTimeLineID;
}
RecoveryRestartPoint(&checkPoint);
@@ -6948,12 +6953,12 @@ get_sync_bit(int method)
switch (method)
{
- /*
- * enum values for all sync options are defined even if they are not
- * supported on the current platform. But if not, they are not
- * included in the enum option array, and therefore will never be seen
- * here.
- */
+ /*
+ * enum values for all sync options are defined even if they are
+ * not supported on the current platform. But if not, they are
+ * not included in the enum option array, and therefore will never
+ * be seen here.
+ */
case SYNC_METHOD_FSYNC:
case SYNC_METHOD_FSYNC_WRITETHROUGH:
case SYNC_METHOD_FDATASYNC:
@@ -6969,7 +6974,7 @@ get_sync_bit(int method)
default:
/* can't happen (unless we are out of sync with option array) */
elog(ERROR, "unrecognized wal_sync_method: %d", method);
- return 0; /* silence warning */
+ return 0; /* silence warning */
}
}
@@ -7146,8 +7151,8 @@ pg_start_backup(PG_FUNCTION_ARGS)
* have different checkpoint positions and hence different history
* file names, even if nothing happened in between.
*
- * We use CHECKPOINT_IMMEDIATE only if requested by user (via
- * passing fast = true). Otherwise this can take awhile.
+ * We use CHECKPOINT_IMMEDIATE only if requested by user (via passing
+ * fast = true). Otherwise this can take awhile.
*/
RequestCheckpoint(CHECKPOINT_FORCE | CHECKPOINT_WAIT |
(fast ? CHECKPOINT_IMMEDIATE : 0));
@@ -7376,9 +7381,9 @@ pg_stop_backup(PG_FUNCTION_ARGS)
* property of the WAL files ensures any earlier WAL files are safely
* archived as well.
*
- * We wait forever, since archive_command is supposed to work and
- * we assume the admin wanted his backup to work completely. If you
- * don't wish to wait, you can set statement_timeout.
+ * We wait forever, since archive_command is supposed to work and we
+ * assume the admin wanted his backup to work completely. If you don't
+ * wish to wait, you can set statement_timeout.
*/
XLByteToPrevSeg(stoppoint, _logId, _logSeg);
XLogFileName(lastxlogfilename, ThisTimeLineID, _logId, _logSeg);
@@ -7399,7 +7404,7 @@ pg_stop_backup(PG_FUNCTION_ARGS)
if (++waits >= seconds_before_warning)
{
- seconds_before_warning *= 2; /* This wraps in >10 years... */
+ seconds_before_warning *= 2; /* This wraps in >10 years... */
ereport(WARNING,
(errmsg("pg_stop_backup still waiting for archive to complete (%d seconds elapsed)",
waits)));
@@ -7775,7 +7780,7 @@ CancelBackup(void)
ereport(LOG,
(errmsg("online backup mode cancelled"),
errdetail("\"%s\" was renamed to \"%s\".",
- BACKUP_LABEL_FILE, BACKUP_LABEL_OLD)));
+ BACKUP_LABEL_FILE, BACKUP_LABEL_OLD)));
}
else
{
@@ -7783,12 +7788,12 @@ CancelBackup(void)
(errcode_for_file_access(),
errmsg("online backup mode was not cancelled"),
errdetail("Could not rename \"%s\" to \"%s\": %m.",
- BACKUP_LABEL_FILE, BACKUP_LABEL_OLD)));
+ BACKUP_LABEL_FILE, BACKUP_LABEL_OLD)));
}
}
/* ------------------------------------------------------
- * Startup Process main entry point and signal handlers
+ * Startup Process main entry point and signal handlers
* ------------------------------------------------------
*/
@@ -7818,8 +7823,8 @@ startupproc_quickdie(SIGNAL_ARGS)
* system reset cycle if some idiot DBA sends a manual SIGQUIT to a random
* backend. This is necessary precisely because we don't clean up our
* shared memory state. (The "dead man switch" mechanism in pmsignal.c
- * should ensure the postmaster sees this as a crash, too, but no harm
- * in being doubly sure.)
+ * should ensure the postmaster sees this as a crash, too, but no harm in
+ * being doubly sure.)
*/
exit(2);
}
@@ -7858,10 +7863,10 @@ StartupProcessMain(void)
/*
* Properly accept or ignore signals the postmaster might send us
*/
- pqsignal(SIGHUP, StartupProcSigHupHandler); /* reload config file */
- pqsignal(SIGINT, SIG_IGN); /* ignore query cancel */
- pqsignal(SIGTERM, StartupProcShutdownHandler); /* request shutdown */
- pqsignal(SIGQUIT, startupproc_quickdie); /* hard crash time */
+ pqsignal(SIGHUP, StartupProcSigHupHandler); /* reload config file */
+ pqsignal(SIGINT, SIG_IGN); /* ignore query cancel */
+ pqsignal(SIGTERM, StartupProcShutdownHandler); /* request shutdown */
+ pqsignal(SIGQUIT, startupproc_quickdie); /* hard crash time */
pqsignal(SIGALRM, SIG_IGN);
pqsignal(SIGPIPE, SIG_IGN);
pqsignal(SIGUSR1, SIG_IGN);
@@ -7881,13 +7886,13 @@ StartupProcessMain(void)
*/
PG_SETMASK(&UnBlockSig);
- StartupXLOG();
+ StartupXLOG();
BuildFlatFiles(false);
/*
- * Exit normally. Exit code 0 tells postmaster that we completed
- * recovery successfully.
+ * Exit normally. Exit code 0 tells postmaster that we completed recovery
+ * successfully.
*/
proc_exit(0);
}
diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c
index 458af10ca1..8c6e339bf4 100644
--- a/src/backend/access/transam/xlogutils.c
+++ b/src/backend/access/transam/xlogutils.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/xlogutils.c,v 1.67 2009/01/20 18:59:37 heikki Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xlogutils.c,v 1.68 2009/06/11 14:48:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -68,7 +68,8 @@ log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno,
*/
if (log_min_messages <= DEBUG1 || client_min_messages <= DEBUG1)
{
- char *path = relpath(node, forkno);
+ char *path = relpath(node, forkno);
+
if (present)
elog(DEBUG1, "page %u of relation %s is uninitialized",
blkno, path);
@@ -132,7 +133,8 @@ forget_invalid_pages(RelFileNode node, ForkNumber forkno, BlockNumber minblkno)
{
if (log_min_messages <= DEBUG2 || client_min_messages <= DEBUG2)
{
- char *path = relpath(hentry->key.node, forkno);
+ char *path = relpath(hentry->key.node, forkno);
+
elog(DEBUG2, "page %u of relation %s has been dropped",
hentry->key.blkno, path);
pfree(path);
@@ -164,7 +166,8 @@ forget_invalid_pages_db(Oid dbid)
{
if (log_min_messages <= DEBUG2 || client_min_messages <= DEBUG2)
{
- char *path = relpath(hentry->key.node, hentry->key.forkno);
+ char *path = relpath(hentry->key.node, hentry->key.forkno);
+
elog(DEBUG2, "page %u of relation %s has been dropped",
hentry->key.blkno, path);
pfree(path);
@@ -197,7 +200,8 @@ XLogCheckInvalidPages(void)
*/
while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
{
- char *path = relpath(hentry->key.node, hentry->key.forkno);
+ char *path = relpath(hentry->key.node, hentry->key.forkno);
+
if (hentry->present)
elog(WARNING, "page %u of relation %s was uninitialized",
hentry->key.blkno, path);
@@ -237,7 +241,8 @@ XLogCheckInvalidPages(void)
Buffer
XLogReadBuffer(RelFileNode rnode, BlockNumber blkno, bool init)
{
- Buffer buf;
+ Buffer buf;
+
buf = XLogReadBufferExtended(rnode, MAIN_FORKNUM, blkno,
init ? RBM_ZERO : RBM_NORMAL);
if (BufferIsValid(buf))
@@ -344,8 +349,8 @@ XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum,
*/
typedef struct
{
- RelationData reldata; /* Note: this must be first */
- FormData_pg_class pgc;
+ RelationData reldata; /* Note: this must be first */
+ FormData_pg_class pgc;
} FakeRelCacheEntryData;
typedef FakeRelCacheEntryData *FakeRelCacheEntry;
@@ -354,10 +359,10 @@ typedef FakeRelCacheEntryData *FakeRelCacheEntry;
* Create a fake relation cache entry for a physical relation
*
* It's often convenient to use the same functions in XLOG replay as in the
- * main codepath, but those functions typically work with a relcache entry.
- * We don't have a working relation cache during XLOG replay, but this
- * function can be used to create a fake relcache entry instead. Only the
- * fields related to physical storage, like rd_rel, are initialized, so the
+ * main codepath, but those functions typically work with a relcache entry.
+ * We don't have a working relation cache during XLOG replay, but this
+ * function can be used to create a fake relcache entry instead. Only the
+ * fields related to physical storage, like rd_rel, are initialized, so the
* fake entry is only usable in low-level operations like ReadBuffer().
*
* Caller must free the returned entry with FreeFakeRelcacheEntry().
@@ -366,7 +371,7 @@ Relation
CreateFakeRelcacheEntry(RelFileNode rnode)
{
FakeRelCacheEntry fakeentry;
- Relation rel;
+ Relation rel;
/* Allocate the Relation struct and all related space in one block. */
fakeentry = palloc0(sizeof(FakeRelCacheEntryData));
@@ -381,9 +386,9 @@ CreateFakeRelcacheEntry(RelFileNode rnode)
/*
* We set up the lockRelId in case anything tries to lock the dummy
* relation. Note that this is fairly bogus since relNode may be
- * different from the relation's OID. It shouldn't really matter
- * though, since we are presumably running by ourselves and can't have
- * any lock conflicts ...
+ * different from the relation's OID. It shouldn't really matter though,
+ * since we are presumably running by ourselves and can't have any lock
+ * conflicts ...
*/
rel->rd_lockInfo.lockRelId.dbId = rnode.dbNode;
rel->rd_lockInfo.lockRelId.relId = rnode.relNode;
@@ -427,10 +432,9 @@ XLogDropDatabase(Oid dbid)
{
/*
* This is unnecessarily heavy-handed, as it will close SMgrRelation
- * objects for other databases as well. DROP DATABASE occurs seldom
- * enough that it's not worth introducing a variant of smgrclose for
- * just this purpose. XXX: Or should we rather leave the smgr entries
- * dangling?
+ * objects for other databases as well. DROP DATABASE occurs seldom enough
+ * that it's not worth introducing a variant of smgrclose for just this
+ * purpose. XXX: Or should we rather leave the smgr entries dangling?
*/
smgrcloseall();