diff options
Diffstat (limited to 'src/backend/nodes')
| -rw-r--r-- | src/backend/nodes/bitmapset.c | 8 | ||||
| -rw-r--r-- | src/backend/nodes/copyfuncs.c | 19 | ||||
| -rw-r--r-- | src/backend/nodes/equalfuncs.c | 61 | ||||
| -rw-r--r-- | src/backend/nodes/list.c | 24 | ||||
| -rw-r--r-- | src/backend/nodes/makefuncs.c | 15 | ||||
| -rw-r--r-- | src/backend/nodes/outfuncs.c | 27 | ||||
| -rw-r--r-- | src/backend/nodes/print.c | 6 | ||||
| -rw-r--r-- | src/backend/nodes/read.c | 21 | ||||
| -rw-r--r-- | src/backend/nodes/readfuncs.c | 50 | ||||
| -rw-r--r-- | src/backend/nodes/tidbitmap.c | 108 |
10 files changed, 168 insertions, 171 deletions
diff --git a/src/backend/nodes/bitmapset.c b/src/backend/nodes/bitmapset.c index d74ba6189e..916833df0d 100644 --- a/src/backend/nodes/bitmapset.c +++ b/src/backend/nodes/bitmapset.c @@ -14,7 +14,7 @@ * Copyright (c) 2003-2005, PostgreSQL Global Development Group * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/nodes/bitmapset.c,v 1.9 2005/06/15 16:24:07 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/nodes/bitmapset.c,v 1.10 2005/10/15 02:49:18 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -769,7 +769,7 @@ bms_first_member(Bitmapset *a) * * Note: we must ensure that any two bitmapsets that are bms_equal() will * hash to the same value; in practice this means that trailing all-zero - * words cannot affect the result. The circular-shift-and-XOR hash method + * words cannot affect the result. The circular-shift-and-XOR hash method * used here has this property, so long as we work from back to front. * * Note: you might wonder why we bother with the circular shift; at first @@ -779,7 +779,7 @@ bms_first_member(Bitmapset *a) * multiword bitmapsets is "a JOIN b JOIN c JOIN d ...", which gives rise * to rangetables in which base tables and JOIN nodes alternate; so * bitmapsets of base table RT indexes tend to use only odd-numbered or only - * even-numbered bits. A straight longitudinal XOR would preserve this + * even-numbered bits. A straight longitudinal XOR would preserve this * property, leading to a much smaller set of possible outputs than if * we include a shift. */ @@ -791,7 +791,7 @@ bms_hash_value(const Bitmapset *a) if (a == NULL || a->nwords <= 0) return 0; /* All empty sets hash to 0 */ - for (wordnum = a->nwords; --wordnum > 0; ) + for (wordnum = a->nwords; --wordnum > 0;) { result ^= a->words[wordnum]; if (result & ((bitmapword) 1 << (BITS_PER_BITMAPWORD - 1))) diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c index 9c21c2f977..4a90b10b27 100644 --- a/src/backend/nodes/copyfuncs.c +++ b/src/backend/nodes/copyfuncs.c @@ -15,7 +15,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/nodes/copyfuncs.c,v 1.315 2005/08/01 20:31:08 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/nodes/copyfuncs.c,v 1.316 2005/10/15 02:49:18 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -154,7 +154,7 @@ _copyAppend(Append *from) static BitmapAnd * _copyBitmapAnd(BitmapAnd *from) { - BitmapAnd *newnode = makeNode(BitmapAnd); + BitmapAnd *newnode = makeNode(BitmapAnd); /* * copy node superclass fields @@ -175,7 +175,7 @@ _copyBitmapAnd(BitmapAnd *from) static BitmapOr * _copyBitmapOr(BitmapOr *from) { - BitmapOr *newnode = makeNode(BitmapOr); + BitmapOr *newnode = makeNode(BitmapOr); /* * copy node superclass fields @@ -269,7 +269,7 @@ _copyIndexScan(IndexScan *from) static BitmapIndexScan * _copyBitmapIndexScan(BitmapIndexScan *from) { - BitmapIndexScan *newnode = makeNode(BitmapIndexScan); + BitmapIndexScan *newnode = makeNode(BitmapIndexScan); /* * copy node superclass fields @@ -294,7 +294,7 @@ _copyBitmapIndexScan(BitmapIndexScan *from) static BitmapHeapScan * _copyBitmapHeapScan(BitmapHeapScan *from) { - BitmapHeapScan *newnode = makeNode(BitmapHeapScan); + BitmapHeapScan *newnode = makeNode(BitmapHeapScan); /* * copy node superclass fields @@ -1262,8 +1262,7 @@ _copyRestrictInfo(RestrictInfo *from) COPY_SCALAR_FIELD(right_sortop); /* - * Do not copy pathkeys, since they'd not be canonical in a copied - * query + * Do not copy pathkeys, since they'd not be canonical in a copied query */ newnode->left_pathkey = NIL; newnode->right_pathkey = NIL; @@ -1791,7 +1790,7 @@ _copyFuncWithArgs(FuncWithArgs *from) static GrantRoleStmt * _copyGrantRoleStmt(GrantRoleStmt *from) { - GrantRoleStmt *newnode = makeNode(GrantRoleStmt); + GrantRoleStmt *newnode = makeNode(GrantRoleStmt); COPY_NODE_FIELD(granted_roles); COPY_NODE_FIELD(grantee_roles); @@ -2906,8 +2905,8 @@ copyObject(void *from) break; /* - * Lists of integers and OIDs don't need to be deep-copied, so - * we perform a shallow copy via list_copy() + * Lists of integers and OIDs don't need to be deep-copied, so we + * perform a shallow copy via list_copy() */ case T_IntList: case T_OidList: diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c index 326eb9c62a..9baa79dd93 100644 --- a/src/backend/nodes/equalfuncs.c +++ b/src/backend/nodes/equalfuncs.c @@ -18,7 +18,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/nodes/equalfuncs.c,v 1.252 2005/08/01 20:31:08 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/nodes/equalfuncs.c,v 1.253 2005/10/15 02:49:18 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -135,8 +135,7 @@ _equalConst(Const *a, Const *b) /* * We treat all NULL constants of the same type as equal. Someday this - * might need to change? But datumIsEqual doesn't work on nulls, - * so... + * might need to change? But datumIsEqual doesn't work on nulls, so... */ if (a->constisnull) return true; @@ -202,8 +201,8 @@ _equalFuncExpr(FuncExpr *a, FuncExpr *b) COMPARE_SCALAR_FIELD(funcretset); /* - * Special-case COERCE_DONTCARE, so that planner can build coercion - * nodes that are equal() to both explicit and implicit coercions. + * Special-case COERCE_DONTCARE, so that planner can build coercion nodes + * that are equal() to both explicit and implicit coercions. */ if (a->funcformat != b->funcformat && a->funcformat != COERCE_DONTCARE && @@ -222,9 +221,9 @@ _equalOpExpr(OpExpr *a, OpExpr *b) /* * Special-case opfuncid: it is allowable for it to differ if one node - * contains zero and the other doesn't. This just means that the one - * node isn't as far along in the parse/plan pipeline and hasn't had - * the opfuncid cache filled yet. + * contains zero and the other doesn't. This just means that the one node + * isn't as far along in the parse/plan pipeline and hasn't had the + * opfuncid cache filled yet. */ if (a->opfuncid != b->opfuncid && a->opfuncid != 0 && @@ -245,9 +244,9 @@ _equalDistinctExpr(DistinctExpr *a, DistinctExpr *b) /* * Special-case opfuncid: it is allowable for it to differ if one node - * contains zero and the other doesn't. This just means that the one - * node isn't as far along in the parse/plan pipeline and hasn't had - * the opfuncid cache filled yet. + * contains zero and the other doesn't. This just means that the one node + * isn't as far along in the parse/plan pipeline and hasn't had the + * opfuncid cache filled yet. */ if (a->opfuncid != b->opfuncid && a->opfuncid != 0 && @@ -268,9 +267,9 @@ _equalScalarArrayOpExpr(ScalarArrayOpExpr *a, ScalarArrayOpExpr *b) /* * Special-case opfuncid: it is allowable for it to differ if one node - * contains zero and the other doesn't. This just means that the one - * node isn't as far along in the parse/plan pipeline and hasn't had - * the opfuncid cache filled yet. + * contains zero and the other doesn't. This just means that the one node + * isn't as far along in the parse/plan pipeline and hasn't had the + * opfuncid cache filled yet. */ if (a->opfuncid != b->opfuncid && a->opfuncid != 0 && @@ -354,8 +353,8 @@ _equalRelabelType(RelabelType *a, RelabelType *b) COMPARE_SCALAR_FIELD(resulttypmod); /* - * Special-case COERCE_DONTCARE, so that planner can build coercion - * nodes that are equal() to both explicit and implicit coercions. + * Special-case COERCE_DONTCARE, so that planner can build coercion nodes + * that are equal() to both explicit and implicit coercions. */ if (a->relabelformat != b->relabelformat && a->relabelformat != COERCE_DONTCARE && @@ -372,8 +371,8 @@ _equalConvertRowtypeExpr(ConvertRowtypeExpr *a, ConvertRowtypeExpr *b) COMPARE_SCALAR_FIELD(resulttype); /* - * Special-case COERCE_DONTCARE, so that planner can build coercion - * nodes that are equal() to both explicit and implicit coercions. + * Special-case COERCE_DONTCARE, so that planner can build coercion nodes + * that are equal() to both explicit and implicit coercions. */ if (a->convertformat != b->convertformat && a->convertformat != COERCE_DONTCARE && @@ -430,8 +429,8 @@ _equalRowExpr(RowExpr *a, RowExpr *b) COMPARE_SCALAR_FIELD(row_typeid); /* - * Special-case COERCE_DONTCARE, so that planner can build coercion - * nodes that are equal() to both explicit and implicit coercions. + * Special-case COERCE_DONTCARE, so that planner can build coercion nodes + * that are equal() to both explicit and implicit coercions. */ if (a->row_format != b->row_format && a->row_format != COERCE_DONTCARE && @@ -467,9 +466,9 @@ _equalNullIfExpr(NullIfExpr *a, NullIfExpr *b) /* * Special-case opfuncid: it is allowable for it to differ if one node - * contains zero and the other doesn't. This just means that the one - * node isn't as far along in the parse/plan pipeline and hasn't had - * the opfuncid cache filled yet. + * contains zero and the other doesn't. This just means that the one node + * isn't as far along in the parse/plan pipeline and hasn't had the + * opfuncid cache filled yet. */ if (a->opfuncid != b->opfuncid && a->opfuncid != 0 && @@ -509,8 +508,8 @@ _equalCoerceToDomain(CoerceToDomain *a, CoerceToDomain *b) COMPARE_SCALAR_FIELD(resulttypmod); /* - * Special-case COERCE_DONTCARE, so that planner can build coercion - * nodes that are equal() to both explicit and implicit coercions. + * Special-case COERCE_DONTCARE, so that planner can build coercion nodes + * that are equal() to both explicit and implicit coercions. */ if (a->coercionformat != b->coercionformat && a->coercionformat != COERCE_DONTCARE && @@ -606,8 +605,8 @@ _equalRestrictInfo(RestrictInfo *a, RestrictInfo *b) COMPARE_BITMAPSET_FIELD(required_relids); /* - * We ignore all the remaining fields, since they may not be set yet, - * and should be derivable from the clause anyway. + * We ignore all the remaining fields, since they may not be set yet, and + * should be derivable from the clause anyway. */ return true; @@ -1717,15 +1716,15 @@ _equalList(List *a, List *b) ListCell *item_b; /* - * Try to reject by simple scalar checks before grovelling through all - * the list elements... + * Try to reject by simple scalar checks before grovelling through all the + * list elements... */ COMPARE_SCALAR_FIELD(type); COMPARE_SCALAR_FIELD(length); /* - * We place the switch outside the loop for the sake of efficiency; - * this may not be worth doing... + * We place the switch outside the loop for the sake of efficiency; this + * may not be worth doing... */ switch (a->type) { diff --git a/src/backend/nodes/list.c b/src/backend/nodes/list.c index 80043834b6..c775770f70 100644 --- a/src/backend/nodes/list.c +++ b/src/backend/nodes/list.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/nodes/list.c,v 1.65 2005/07/28 20:26:19 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/nodes/list.c,v 1.66 2005/10/15 02:49:18 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -50,7 +50,6 @@ check_list_invariants(List *list) Assert(list->head->next == list->tail); Assert(list->tail->next == NULL); } - #else #define check_list_invariants(l) #endif /* USE_ASSERT_CHECKING */ @@ -532,9 +531,9 @@ list_delete_cell(List *list, ListCell *cell, ListCell *prev) Assert(prev != NULL ? lnext(prev) == cell : list_head(list) == cell); /* - * If we're about to delete the last node from the list, free the - * whole list instead and return NIL, which is the only valid - * representation of a zero-length list. + * If we're about to delete the last node from the list, free the whole + * list instead and return NIL, which is the only valid representation of + * a zero-length list. */ if (list->length == 1) { @@ -543,9 +542,8 @@ list_delete_cell(List *list, ListCell *cell, ListCell *prev) } /* - * Otherwise, adjust the necessary list links, deallocate the - * particular node we have just removed, and return the list we were - * given. + * Otherwise, adjust the necessary list links, deallocate the particular + * node we have just removed, and return the list we were given. */ list->length--; @@ -951,7 +949,7 @@ list_append_unique_oid(List *list, Oid datum) * via equal(). * * This is almost the same functionality as list_union(), but list1 is - * modified in-place rather than being copied. Note also that list2's cells + * modified in-place rather than being copied. Note also that list2's cells * are not inserted in list1, so the analogy to list_concat() isn't perfect. */ List * @@ -1110,8 +1108,8 @@ list_copy(List *oldlist) newlist->length = oldlist->length; /* - * Copy over the data in the first cell; new_list() has already - * allocated the head cell itself + * Copy over the data in the first cell; new_list() has already allocated + * the head cell itself */ newlist->head->data = oldlist->head->data; @@ -1163,8 +1161,8 @@ list_copy_tail(List *oldlist, int nskip) oldlist_cur = oldlist_cur->next; /* - * Copy over the data in the first remaining cell; new_list() has - * already allocated the head cell itself + * Copy over the data in the first remaining cell; new_list() has already + * allocated the head cell itself */ newlist->head->data = oldlist_cur->data; diff --git a/src/backend/nodes/makefuncs.c b/src/backend/nodes/makefuncs.c index e1e6c3da83..28202af9ee 100644 --- a/src/backend/nodes/makefuncs.c +++ b/src/backend/nodes/makefuncs.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/nodes/makefuncs.c,v 1.47 2005/04/06 16:34:05 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/nodes/makefuncs.c,v 1.48 2005/10/15 02:49:18 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -73,11 +73,10 @@ makeVar(Index varno, var->varlevelsup = varlevelsup; /* - * Since few if any routines ever create Var nodes with - * varnoold/varoattno different from varno/varattno, we don't provide - * separate arguments for them, but just initialize them to the given - * varno/varattno. This reduces code clutter and chance of error for - * most callers. + * Since few if any routines ever create Var nodes with varnoold/varoattno + * different from varno/varattno, we don't provide separate arguments for + * them, but just initialize them to the given varno/varattno. This + * reduces code clutter and chance of error for most callers. */ var->varnoold = varno; var->varoattno = varattno; @@ -102,8 +101,8 @@ makeTargetEntry(Expr *expr, tle->resname = resname; /* - * We always set these fields to 0. If the caller wants to change them - * he must do so explicitly. Few callers do that, so omitting these + * We always set these fields to 0. If the caller wants to change them he + * must do so explicitly. Few callers do that, so omitting these * arguments reduces the chance of error. */ tle->ressortgroupref = 0; diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c index d6d1236388..19306b3e53 100644 --- a/src/backend/nodes/outfuncs.c +++ b/src/backend/nodes/outfuncs.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/nodes/outfuncs.c,v 1.260 2005/08/27 22:13:43 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/nodes/outfuncs.c,v 1.261 2005/10/15 02:49:18 momjian Exp $ * * NOTES * Every node type that can appear in stored rules' parsetrees *must* @@ -113,9 +113,9 @@ _outToken(StringInfo str, char *s) } /* - * Look for characters or patterns that are treated specially by - * read.c (either in pg_strtok() or in nodeRead()), and therefore need - * a protective backslash. + * Look for characters or patterns that are treated specially by read.c + * (either in pg_strtok() or in nodeRead()), and therefore need a + * protective backslash. */ /* These characters only need to be quoted at the start of the string */ if (*s == '<' || @@ -151,8 +151,8 @@ _outList(StringInfo str, List *node) { /* * For the sake of backward compatibility, we emit a slightly - * different whitespace format for lists of nodes vs. other types - * of lists. XXX: is this necessary? + * different whitespace format for lists of nodes vs. other types of + * lists. XXX: is this necessary? */ if (IsA(node, List)) { @@ -1444,9 +1444,9 @@ _outQuery(StringInfo str, Query *node) /* * Hack to work around missing outfuncs routines for a lot of the * utility-statement node types. (The only one we actually *need* for - * rules support is NotifyStmt.) Someday we ought to support 'em all, - * but for the meantime do this to avoid getting lots of warnings when - * running with debug_print_parse on. + * rules support is NotifyStmt.) Someday we ought to support 'em all, but + * for the meantime do this to avoid getting lots of warnings when running + * with debug_print_parse on. */ if (node->utilityStmt) { @@ -1616,8 +1616,8 @@ _outValue(StringInfo str, Value *value) case T_Float: /* - * We assume the value is a valid numeric literal and so does - * not need quoting. + * We assume the value is a valid numeric literal and so does not + * need quoting. */ appendStringInfoString(str, value->val.str); break; @@ -2099,9 +2099,8 @@ _outNode(StringInfo str, void *obj) default: /* - * This should be an ERROR, but it's too useful to be able - * to dump structures that _outNode only understands part - * of. + * This should be an ERROR, but it's too useful to be able to + * dump structures that _outNode only understands part of. */ elog(WARNING, "could not dump unrecognized node type: %d", (int) nodeTag(obj)); diff --git a/src/backend/nodes/print.c b/src/backend/nodes/print.c index 9d6511cf50..2f70355b32 100644 --- a/src/backend/nodes/print.c +++ b/src/backend/nodes/print.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/nodes/print.c,v 1.76 2005/05/01 18:56:18 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/nodes/print.c,v 1.77 2005/10/15 02:49:19 momjian Exp $ * * HISTORY * AUTHOR DATE MAJOR EVENT @@ -603,7 +603,7 @@ print_plan_recursive(Plan *p, Query *parsetree, int indentLevel, char *label) if (IsA(p, BitmapAnd)) { ListCell *l; - BitmapAnd *bitmapandplan = (BitmapAnd *) p; + BitmapAnd *bitmapandplan = (BitmapAnd *) p; foreach(l, bitmapandplan->bitmapplans) { @@ -616,7 +616,7 @@ print_plan_recursive(Plan *p, Query *parsetree, int indentLevel, char *label) if (IsA(p, BitmapOr)) { ListCell *l; - BitmapOr *bitmaporplan = (BitmapOr *) p; + BitmapOr *bitmaporplan = (BitmapOr *) p; foreach(l, bitmaporplan->bitmapplans) { diff --git a/src/backend/nodes/read.c b/src/backend/nodes/read.c index df2165863d..09175074d5 100644 --- a/src/backend/nodes/read.c +++ b/src/backend/nodes/read.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/nodes/read.c,v 1.46 2004/12/31 21:59:55 pgsql Exp $ + * $PostgreSQL: pgsql/src/backend/nodes/read.c,v 1.47 2005/10/15 02:49:19 momjian Exp $ * * HISTORY * AUTHOR DATE MAJOR EVENT @@ -41,10 +41,10 @@ stringToNode(char *str) void *retval; /* - * We save and restore the pre-existing state of pg_strtok. This makes - * the world safe for re-entrant invocation of stringToNode, without - * incurring a lot of notational overhead by having to pass the - * next-character pointer around through all the readfuncs.c code. + * We save and restore the pre-existing state of pg_strtok. This makes the + * world safe for re-entrant invocation of stringToNode, without incurring + * a lot of notational overhead by having to pass the next-character + * pointer around through all the readfuncs.c code. */ save_strtok = pg_strtok_ptr; @@ -211,13 +211,13 @@ nodeTokenType(char *token, int length) if (*numptr == '+' || *numptr == '-') numptr++, numlen--; if ((numlen > 0 && isdigit((unsigned char) *numptr)) || - (numlen > 1 && *numptr == '.' && isdigit((unsigned char) numptr[1]))) + (numlen > 1 && *numptr == '.' && isdigit((unsigned char) numptr[1]))) { /* * Yes. Figure out whether it is integral or float; this requires - * both a syntax check and a range check. strtol() can do both for - * us. We know the token will end at a character that strtol will - * stop at, so we do not need to modify the string. + * both a syntax check and a range check. strtol() can do both for us. + * We know the token will end at a character that strtol will stop at, + * so we do not need to modify the string. */ long val; char *endptr; @@ -386,8 +386,7 @@ nodeRead(char *token, int tok_len) case T_Integer: /* - * we know that the token terminates on a char atol will stop - * at + * we know that the token terminates on a char atol will stop at */ result = (Node *) makeInteger(atol(token)); break; diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c index ff49ee21f2..46c9983446 100644 --- a/src/backend/nodes/readfuncs.c +++ b/src/backend/nodes/readfuncs.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/nodes/readfuncs.c,v 1.181 2005/08/01 20:31:08 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/nodes/readfuncs.c,v 1.182 2005/10/15 02:49:19 momjian Exp $ * * NOTES * Path and Plan nodes do not have any readfuncs support, because we @@ -389,12 +389,12 @@ _readOpExpr(void) READ_OID_FIELD(opfuncid); /* - * The opfuncid is stored in the textual format primarily for - * debugging and documentation reasons. We want to always read it as - * zero to force it to be re-looked-up in the pg_operator entry. This - * ensures that stored rules don't have hidden dependencies on - * operators' functions. (We don't currently support an ALTER OPERATOR - * command, but might someday.) + * The opfuncid is stored in the textual format primarily for debugging + * and documentation reasons. We want to always read it as zero to force + * it to be re-looked-up in the pg_operator entry. This ensures that + * stored rules don't have hidden dependencies on operators' functions. + * (We don't currently support an ALTER OPERATOR command, but might + * someday.) */ local_node->opfuncid = InvalidOid; @@ -417,12 +417,12 @@ _readDistinctExpr(void) READ_OID_FIELD(opfuncid); /* - * The opfuncid is stored in the textual format primarily for - * debugging and documentation reasons. We want to always read it as - * zero to force it to be re-looked-up in the pg_operator entry. This - * ensures that stored rules don't have hidden dependencies on - * operators' functions. (We don't currently support an ALTER OPERATOR - * command, but might someday.) + * The opfuncid is stored in the textual format primarily for debugging + * and documentation reasons. We want to always read it as zero to force + * it to be re-looked-up in the pg_operator entry. This ensures that + * stored rules don't have hidden dependencies on operators' functions. + * (We don't currently support an ALTER OPERATOR command, but might + * someday.) */ local_node->opfuncid = InvalidOid; @@ -445,12 +445,12 @@ _readScalarArrayOpExpr(void) READ_OID_FIELD(opfuncid); /* - * The opfuncid is stored in the textual format primarily for - * debugging and documentation reasons. We want to always read it as - * zero to force it to be re-looked-up in the pg_operator entry. This - * ensures that stored rules don't have hidden dependencies on - * operators' functions. (We don't currently support an ALTER OPERATOR - * command, but might someday.) + * The opfuncid is stored in the textual format primarily for debugging + * and documentation reasons. We want to always read it as zero to force + * it to be re-looked-up in the pg_operator entry. This ensures that + * stored rules don't have hidden dependencies on operators' functions. + * (We don't currently support an ALTER OPERATOR command, but might + * someday.) */ local_node->opfuncid = InvalidOid; @@ -686,12 +686,12 @@ _readNullIfExpr(void) READ_OID_FIELD(opfuncid); /* - * The opfuncid is stored in the textual format primarily for - * debugging and documentation reasons. We want to always read it as - * zero to force it to be re-looked-up in the pg_operator entry. This - * ensures that stored rules don't have hidden dependencies on - * operators' functions. (We don't currently support an ALTER OPERATOR - * command, but might someday.) + * The opfuncid is stored in the textual format primarily for debugging + * and documentation reasons. We want to always read it as zero to force + * it to be re-looked-up in the pg_operator entry. This ensures that + * stored rules don't have hidden dependencies on operators' functions. + * (We don't currently support an ALTER OPERATOR command, but might + * someday.) */ local_node->opfuncid = InvalidOid; diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c index a3b5c7d6d0..bcfc7d0920 100644 --- a/src/backend/nodes/tidbitmap.c +++ b/src/backend/nodes/tidbitmap.c @@ -23,7 +23,7 @@ * Copyright (c) 2003-2005, PostgreSQL Global Development Group * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/nodes/tidbitmap.c,v 1.7 2005/09/02 19:02:20 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/nodes/tidbitmap.c,v 1.8 2005/10/15 02:49:19 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -39,7 +39,7 @@ /* * The maximum number of tuples per page is not large (typically 256 with * 8K pages, or 1024 with 32K pages). So there's not much point in making - * the per-page bitmaps variable size. We just legislate that the size + * the per-page bitmaps variable size. We just legislate that the size * is this: */ #define MAX_TUPLES_PER_PAGE MaxHeapTuplesPerPage @@ -52,10 +52,10 @@ * for that page in the page table. * * We actually store both exact pages and lossy chunks in the same hash - * table, using identical data structures. (This is because dynahash.c's + * table, using identical data structures. (This is because dynahash.c's * memory management doesn't allow space to be transferred easily from one * hashtable to another.) Therefore it's best if PAGES_PER_CHUNK is the - * same as MAX_TUPLES_PER_PAGE, or at least not too different. But we + * same as MAX_TUPLES_PER_PAGE, or at least not too different. But we * also want PAGES_PER_CHUNK to be a power of 2 to avoid expensive integer * remainder operations. So, define it like this: */ @@ -69,7 +69,7 @@ typedef uint32 bitmapword; /* must be an unsigned type */ #define BITNUM(x) ((x) % BITS_PER_BITMAPWORD) /* number of active words for an exact page: */ -#define WORDS_PER_PAGE ((MAX_TUPLES_PER_PAGE - 1) / BITS_PER_BITMAPWORD + 1) +#define WORDS_PER_PAGE ((MAX_TUPLES_PER_PAGE - 1) / BITS_PER_BITMAPWORD + 1) /* number of active words for a lossy chunk: */ #define WORDS_PER_CHUNK ((PAGES_PER_CHUNK - 1) / BITS_PER_BITMAPWORD + 1) @@ -85,7 +85,7 @@ typedef uint32 bitmapword; /* must be an unsigned type */ */ typedef struct PagetableEntry { - BlockNumber blockno; /* page number (hashtable key) */ + BlockNumber blockno; /* page number (hashtable key) */ bool ischunk; /* T = lossy storage, F = exact */ bitmapword words[Max(WORDS_PER_PAGE, WORDS_PER_CHUNK)]; } PagetableEntry; @@ -136,9 +136,9 @@ struct TIDBitmap /* Local function prototypes */ static void tbm_union_page(TIDBitmap *a, const PagetableEntry *bpage); static bool tbm_intersect_page(TIDBitmap *a, PagetableEntry *apage, - const TIDBitmap *b); + const TIDBitmap *b); static const PagetableEntry *tbm_find_pageentry(const TIDBitmap *tbm, - BlockNumber pageno); + BlockNumber pageno); static PagetableEntry *tbm_get_pageentry(TIDBitmap *tbm, BlockNumber pageno); static bool tbm_page_is_lossy(const TIDBitmap *tbm, BlockNumber pageno); static void tbm_mark_page_lossy(TIDBitmap *tbm, BlockNumber pageno); @@ -160,8 +160,8 @@ tbm_create(long maxbytes) long nbuckets; /* - * Create the TIDBitmap struct, with enough trailing space to serve - * the needs of the TBMIterateResult sub-struct. + * Create the TIDBitmap struct, with enough trailing space to serve the + * needs of the TBMIterateResult sub-struct. */ tbm = (TIDBitmap *) palloc(sizeof(TIDBitmap) + MAX_TUPLES_PER_PAGE * sizeof(OffsetNumber)); @@ -173,17 +173,17 @@ tbm_create(long maxbytes) tbm->status = TBM_EMPTY; /* - * Estimate number of hashtable entries we can have within maxbytes. - * This estimates the hash overhead at MAXALIGN(sizeof(HASHELEMENT)) - * plus a pointer per hash entry, which is crude but good enough for - * our purpose. Also count an extra Pointer per entry for the arrays - * created during iteration readout. + * Estimate number of hashtable entries we can have within maxbytes. This + * estimates the hash overhead at MAXALIGN(sizeof(HASHELEMENT)) plus a + * pointer per hash entry, which is crude but good enough for our purpose. + * Also count an extra Pointer per entry for the arrays created during + * iteration readout. */ nbuckets = maxbytes / (MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(sizeof(PagetableEntry)) + sizeof(Pointer) + sizeof(Pointer)); - nbuckets = Min(nbuckets, INT_MAX-1); /* safety limit */ - nbuckets = Max(nbuckets, 16); /* sanity limit */ + nbuckets = Min(nbuckets, INT_MAX - 1); /* safety limit */ + nbuckets = Max(nbuckets, 16); /* sanity limit */ tbm->maxentries = (int) nbuckets; return tbm; @@ -319,7 +319,7 @@ static void tbm_union_page(TIDBitmap *a, const PagetableEntry *bpage) { PagetableEntry *apage; - int wordnum; + int wordnum; if (bpage->ischunk) { @@ -330,7 +330,7 @@ tbm_union_page(TIDBitmap *a, const PagetableEntry *bpage) if (w != 0) { - BlockNumber pg; + BlockNumber pg; pg = bpage->blockno + (wordnum * BITS_PER_BITMAPWORD); while (w != 0) @@ -428,12 +428,12 @@ static bool tbm_intersect_page(TIDBitmap *a, PagetableEntry *apage, const TIDBitmap *b) { const PagetableEntry *bpage; - int wordnum; + int wordnum; if (apage->ischunk) { /* Scan each bit in chunk, try to clear */ - bool candelete = true; + bool candelete = true; for (wordnum = 0; wordnum < WORDS_PER_PAGE; wordnum++) { @@ -442,8 +442,8 @@ tbm_intersect_page(TIDBitmap *a, PagetableEntry *apage, const TIDBitmap *b) if (w != 0) { bitmapword neww = w; - BlockNumber pg; - int bitnum; + BlockNumber pg; + int bitnum; pg = apage->blockno + (wordnum * BITS_PER_BITMAPWORD); bitnum = 0; @@ -472,19 +472,19 @@ tbm_intersect_page(TIDBitmap *a, PagetableEntry *apage, const TIDBitmap *b) else if (tbm_page_is_lossy(b, apage->blockno)) { /* - * When the page is lossy in b, we have to mark it lossy in a too. - * We know that no bits need be set in bitmap a, but we do not know - * which ones should be cleared, and we have no API for "at most - * these tuples need be checked". (Perhaps it's worth adding that?) + * When the page is lossy in b, we have to mark it lossy in a too. We + * know that no bits need be set in bitmap a, but we do not know which + * ones should be cleared, and we have no API for "at most these + * tuples need be checked". (Perhaps it's worth adding that?) */ tbm_mark_page_lossy(a, apage->blockno); /* - * Note: tbm_mark_page_lossy will have removed apage from a, and - * may have inserted a new lossy chunk instead. We can continue the - * same seq_search scan at the caller level, because it does not - * matter whether we visit such a new chunk or not: it will have - * only the bit for apage->blockno set, which is correct. + * Note: tbm_mark_page_lossy will have removed apage from a, and may + * have inserted a new lossy chunk instead. We can continue the same + * seq_search scan at the caller level, because it does not matter + * whether we visit such a new chunk or not: it will have only the bit + * for apage->blockno set, which is correct. * * We must return false here since apage was already deleted. */ @@ -492,7 +492,7 @@ tbm_intersect_page(TIDBitmap *a, PagetableEntry *apage, const TIDBitmap *b) } else { - bool candelete = true; + bool candelete = true; bpage = tbm_find_pageentry(b, apage->blockno); if (bpage != NULL) @@ -535,17 +535,20 @@ tbm_begin_iterate(TIDBitmap *tbm) int nchunks; tbm->iterating = true; + /* * Reset iteration pointers. */ tbm->spageptr = 0; tbm->schunkptr = 0; tbm->schunkbit = 0; + /* * Nothing else to do if no entries, nor if we don't have a hashtable. */ if (tbm->nentries == 0 || tbm->status != TBM_HASH) return; + /* * Create and fill the sorted page lists if we didn't already. */ @@ -591,6 +594,7 @@ tbm_iterate(TIDBitmap *tbm) TBMIterateResult *output = &(tbm->output); Assert(tbm->iterating); + /* * If lossy chunk pages remain, make sure we've advanced schunkptr/ * schunkbit to the next set bit. @@ -598,12 +602,12 @@ tbm_iterate(TIDBitmap *tbm) while (tbm->schunkptr < tbm->nchunks) { PagetableEntry *chunk = tbm->schunks[tbm->schunkptr]; - int schunkbit = tbm->schunkbit; + int schunkbit = tbm->schunkbit; while (schunkbit < PAGES_PER_CHUNK) { - int wordnum = WORDNUM(schunkbit); - int bitnum = BITNUM(schunkbit); + int wordnum = WORDNUM(schunkbit); + int bitnum = BITNUM(schunkbit); if ((chunk->words[wordnum] & ((bitmapword) 1 << bitnum)) != 0) break; @@ -618,6 +622,7 @@ tbm_iterate(TIDBitmap *tbm) tbm->schunkptr++; tbm->schunkbit = 0; } + /* * If both chunk and per-page data remain, must output the numerically * earlier page. @@ -717,7 +722,7 @@ tbm_find_pageentry(const TIDBitmap *tbm, BlockNumber pageno) * * If new, the entry is marked as an exact (non-chunk) entry. * - * This may cause the table to exceed the desired memory size. It is + * This may cause the table to exceed the desired memory size. It is * up to the caller to call tbm_lossify() at the next safe point if so. */ static PagetableEntry * @@ -785,8 +790,8 @@ tbm_page_is_lossy(const TIDBitmap *tbm, BlockNumber pageno) HASH_FIND, NULL); if (page != NULL && page->ischunk) { - int wordnum = WORDNUM(bitno); - int bitnum = BITNUM(bitno); + int wordnum = WORDNUM(bitno); + int bitnum = BITNUM(bitno); if ((page->words[wordnum] & ((bitmapword) 1 << bitnum)) != 0) return true; @@ -797,7 +802,7 @@ tbm_page_is_lossy(const TIDBitmap *tbm, BlockNumber pageno) /* * tbm_mark_page_lossy - mark the page number as lossily stored * - * This may cause the table to exceed the desired memory size. It is + * This may cause the table to exceed the desired memory size. It is * up to the caller to call tbm_lossify() at the next safe point if so. */ static void @@ -818,9 +823,8 @@ tbm_mark_page_lossy(TIDBitmap *tbm, BlockNumber pageno) chunk_pageno = pageno - bitno; /* - * Remove any extant non-lossy entry for the page. If the page is - * its own chunk header, however, we skip this and handle the case - * below. + * Remove any extant non-lossy entry for the page. If the page is its own + * chunk header, however, we skip this and handle the case below. */ if (bitno != 0) { @@ -879,10 +883,9 @@ tbm_lossify(TIDBitmap *tbm) /* * XXX Really stupid implementation: this just lossifies pages in - * essentially random order. We should be paying some attention - * to the number of bits set in each page, instead. Also it might - * be a good idea to lossify more than the minimum number of pages - * during each call. + * essentially random order. We should be paying some attention to the + * number of bits set in each page, instead. Also it might be a good idea + * to lossify more than the minimum number of pages during each call. */ Assert(!tbm->iterating); Assert(tbm->status == TBM_HASH); @@ -892,9 +895,10 @@ tbm_lossify(TIDBitmap *tbm) { if (page->ischunk) continue; /* already a chunk header */ + /* - * If the page would become a chunk header, we won't save anything - * by converting it to lossy, so skip it. + * If the page would become a chunk header, we won't save anything by + * converting it to lossy, so skip it. */ if ((page->blockno % PAGES_PER_CHUNK) == 0) continue; @@ -906,9 +910,9 @@ tbm_lossify(TIDBitmap *tbm) return; /* we have done enough */ /* - * Note: tbm_mark_page_lossy may have inserted a lossy chunk into - * the hashtable. We can continue the same seq_search scan since - * we do not care whether we visit lossy chunks or not. + * Note: tbm_mark_page_lossy may have inserted a lossy chunk into the + * hashtable. We can continue the same seq_search scan since we do + * not care whether we visit lossy chunks or not. */ } } |
