diff options
Diffstat (limited to 'src/backend/utils')
27 files changed, 145 insertions, 140 deletions
diff --git a/src/backend/utils/activity/backend_progress.c b/src/backend/utils/activity/backend_progress.c index 293254993c..6743e68cef 100644 --- a/src/backend/utils/activity/backend_progress.c +++ b/src/backend/utils/activity/backend_progress.c @@ -10,7 +10,7 @@ */ #include "postgres.h" -#include "port/atomics.h" /* for memory barriers */ +#include "port/atomics.h" /* for memory barriers */ #include "utils/backend_progress.h" #include "utils/backend_status.h" diff --git a/src/backend/utils/activity/backend_status.c b/src/backend/utils/activity/backend_status.c index a368101103..2901f9f5a9 100644 --- a/src/backend/utils/activity/backend_status.c +++ b/src/backend/utils/activity/backend_status.c @@ -16,13 +16,13 @@ #include "miscadmin.h" #include "pg_trace.h" #include "pgstat.h" -#include "port/atomics.h" /* for memory barriers */ +#include "port/atomics.h" /* for memory barriers */ #include "storage/ipc.h" -#include "storage/proc.h" /* for MyProc */ +#include "storage/proc.h" /* for MyProc */ #include "storage/sinvaladt.h" #include "utils/ascii.h" #include "utils/backend_status.h" -#include "utils/guc.h" /* for application_name */ +#include "utils/guc.h" /* for application_name */ #include "utils/memutils.h" @@ -498,8 +498,8 @@ pgstat_setup_backend_status_context(void) { if (!backendStatusSnapContext) backendStatusSnapContext = AllocSetContextCreate(TopMemoryContext, - "Backend Status Snapshot", - ALLOCSET_SMALL_SIZES); + "Backend Status Snapshot", + ALLOCSET_SMALL_SIZES); } @@ -1033,7 +1033,8 @@ pgstat_get_my_query_id(void) if (!MyBEEntry) return 0; - /* There's no need for a lock around pgstat_begin_read_activity / + /* + * There's no need for a lock around pgstat_begin_read_activity / * pgstat_end_read_activity here as it's only called from * pg_stat_get_activity which is already protected, or from the same * backend which means that there won't be concurrent writes. diff --git a/src/backend/utils/activity/wait_event.c b/src/backend/utils/activity/wait_event.c index 89b5b8b7b9..6baf67740c 100644 --- a/src/backend/utils/activity/wait_event.c +++ b/src/backend/utils/activity/wait_event.c @@ -22,8 +22,8 @@ */ #include "postgres.h" -#include "storage/lmgr.h" /* for GetLockNameFromTagType */ -#include "storage/lwlock.h" /* for GetLWLockIdentifier */ +#include "storage/lmgr.h" /* for GetLockNameFromTagType */ +#include "storage/lwlock.h" /* for GetLWLockIdentifier */ #include "utils/wait_event.h" diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c index 7861a0a613..67f8b29434 100644 --- a/src/backend/utils/adt/acl.c +++ b/src/backend/utils/adt/acl.c @@ -2453,9 +2453,9 @@ column_privilege_check(Oid tableoid, AttrNumber attnum, return -1; /* - * Check for column-level privileges first. This serves in - * part as a check on whether the column even exists, so we - * need to do it before checking table-level privilege. + * Check for column-level privileges first. This serves in part as a check + * on whether the column even exists, so we need to do it before checking + * table-level privilege. */ aclresult = pg_attribute_aclcheck_ext(tableoid, attnum, roleid, mode, &is_missing); diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c index da1a879f1f..3c70bb5943 100644 --- a/src/backend/utils/adt/dbsize.c +++ b/src/backend/utils/adt/dbsize.c @@ -867,7 +867,7 @@ pg_relation_filenode(PG_FUNCTION_ARGS) { if (relform->relfilenode) result = relform->relfilenode; - else /* Consult the relation mapper */ + else /* Consult the relation mapper */ result = RelationMapOidToFilenode(relid, relform->relisshared); } @@ -946,17 +946,17 @@ pg_relation_filepath(PG_FUNCTION_ARGS) rnode.dbNode = MyDatabaseId; if (relform->relfilenode) rnode.relNode = relform->relfilenode; - else /* Consult the relation mapper */ + else /* Consult the relation mapper */ rnode.relNode = RelationMapOidToFilenode(relid, relform->relisshared); } else { - /* no storage, return NULL */ - rnode.relNode = InvalidOid; - /* some compilers generate warnings without these next two lines */ - rnode.dbNode = InvalidOid; - rnode.spcNode = InvalidOid; + /* no storage, return NULL */ + rnode.relNode = InvalidOid; + /* some compilers generate warnings without these next two lines */ + rnode.dbNode = InvalidOid; + rnode.spcNode = InvalidOid; } if (!OidIsValid(rnode.relNode)) diff --git a/src/backend/utils/adt/genfile.c b/src/backend/utils/adt/genfile.c index 322152ebd9..c436d9318b 100644 --- a/src/backend/utils/adt/genfile.c +++ b/src/backend/utils/adt/genfile.c @@ -160,16 +160,15 @@ read_binary_file(const char *filename, int64 seek_offset, int64 bytes_to_read, #define MIN_READ_SIZE 4096 /* - * If not at end of file, and sbuf.len is equal to - * MaxAllocSize - 1, then either the file is too large, or - * there is nothing left to read. Attempt to read one more - * byte to see if the end of file has been reached. If not, - * the file is too large; we'd rather give the error message - * for that ourselves. + * If not at end of file, and sbuf.len is equal to MaxAllocSize - + * 1, then either the file is too large, or there is nothing left + * to read. Attempt to read one more byte to see if the end of + * file has been reached. If not, the file is too large; we'd + * rather give the error message for that ourselves. */ if (sbuf.len == MaxAllocSize - 1) { - char rbuf[1]; + char rbuf[1]; if (fread(rbuf, 1, 1, file) != 0 || !feof(file)) ereport(ERROR, diff --git a/src/backend/utils/adt/lockfuncs.c b/src/backend/utils/adt/lockfuncs.c index 97f0265c12..085fec3ea2 100644 --- a/src/backend/utils/adt/lockfuncs.c +++ b/src/backend/utils/adt/lockfuncs.c @@ -636,10 +636,10 @@ pg_isolation_test_session_is_blocked(PG_FUNCTION_ARGS) * Check if any of these are in the list of interesting PIDs, that being * the sessions that the isolation tester is running. We don't use * "arrayoverlaps" here, because it would lead to cache lookups and one of - * our goals is to run quickly with debug_invalidate_system_caches_always > 0. We expect - * blocking_pids to be usually empty and otherwise a very small number in - * isolation tester cases, so make that the outer loop of a naive search - * for a match. + * our goals is to run quickly with debug_invalidate_system_caches_always + * > 0. We expect blocking_pids to be usually empty and otherwise a very + * small number in isolation tester cases, so make that the outer loop of + * a naive search for a match. */ for (i = 0; i < num_blocking_pids; i++) for (j = 0; j < num_interesting_pids; j++) diff --git a/src/backend/utils/adt/mcxtfuncs.c b/src/backend/utils/adt/mcxtfuncs.c index e2b87a7ed9..2984768d19 100644 --- a/src/backend/utils/adt/mcxtfuncs.c +++ b/src/backend/utils/adt/mcxtfuncs.c @@ -34,8 +34,8 @@ */ static void PutMemoryContextsStatsTupleStore(Tuplestorestate *tupstore, - TupleDesc tupdesc, MemoryContext context, - const char *parent, int level) + TupleDesc tupdesc, MemoryContext context, + const char *parent, int level) { #define PG_GET_BACKEND_MEMORY_CONTEXTS_COLS 9 @@ -52,8 +52,8 @@ PutMemoryContextsStatsTupleStore(Tuplestorestate *tupstore, ident = context->ident; /* - * To be consistent with logging output, we label dynahash contexts - * with just the hash table name as with MemoryContextStatsPrint(). + * To be consistent with logging output, we label dynahash contexts with + * just the hash table name as with MemoryContextStatsPrint(). */ if (ident && strcmp(name, "dynahash") == 0) { @@ -75,7 +75,7 @@ PutMemoryContextsStatsTupleStore(Tuplestorestate *tupstore, if (ident) { - int idlen = strlen(ident); + int idlen = strlen(ident); char clipped_ident[MEMORY_CONTEXT_IDENT_DISPLAY_SIZE]; /* @@ -108,7 +108,7 @@ PutMemoryContextsStatsTupleStore(Tuplestorestate *tupstore, for (child = context->firstchild; child != NULL; child = child->nextchild) { PutMemoryContextsStatsTupleStore(tupstore, tupdesc, - child, name, level + 1); + child, name, level + 1); } } @@ -150,7 +150,7 @@ pg_get_backend_memory_contexts(PG_FUNCTION_ARGS) MemoryContextSwitchTo(oldcontext); PutMemoryContextsStatsTupleStore(tupstore, tupdesc, - TopMemoryContext, NULL, 0); + TopMemoryContext, NULL, 0); /* clean up and return the tuplestore */ tuplestore_donestoring(tupstore); diff --git a/src/backend/utils/adt/name.c b/src/backend/utils/adt/name.c index c93be3350e..602a724d2f 100644 --- a/src/backend/utils/adt/name.c +++ b/src/backend/utils/adt/name.c @@ -234,7 +234,7 @@ namestrcpy(Name name, const char *str) { /* NB: We need to zero-pad the destination. */ strncpy(NameStr(*name), str, NAMEDATALEN); - NameStr(*name)[NAMEDATALEN-1] = '\0'; + NameStr(*name)[NAMEDATALEN - 1] = '\0'; } /* diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c index caa09d6373..453af401ca 100644 --- a/src/backend/utils/adt/pg_locale.c +++ b/src/backend/utils/adt/pg_locale.c @@ -1668,16 +1668,16 @@ get_collation_actual_version(char collprovider, const char *collcollate) } else #endif - if (collprovider == COLLPROVIDER_LIBC && - pg_strcasecmp("C", collcollate) != 0 && - pg_strncasecmp("C.", collcollate, 2) != 0 && - pg_strcasecmp("POSIX", collcollate) != 0) + if (collprovider == COLLPROVIDER_LIBC && + pg_strcasecmp("C", collcollate) != 0 && + pg_strncasecmp("C.", collcollate, 2) != 0 && + pg_strcasecmp("POSIX", collcollate) != 0) { #if defined(__GLIBC__) /* Use the glibc version because we don't have anything better. */ collversion = pstrdup(gnu_get_libc_version()); #elif defined(LC_VERSION_MASK) - locale_t loc; + locale_t loc; /* Look up FreeBSD collation version. */ loc = newlocale(LC_COLLATE, collcollate, NULL); diff --git a/src/backend/utils/adt/rangetypes_typanalyze.c b/src/backend/utils/adt/rangetypes_typanalyze.c index 2c10f2c867..671fe6ddb7 100644 --- a/src/backend/utils/adt/rangetypes_typanalyze.c +++ b/src/backend/utils/adt/rangetypes_typanalyze.c @@ -330,7 +330,7 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, stats->statypid[slot_idx] = typcache->type_id; stats->statyplen[slot_idx] = typcache->typlen; stats->statypbyval[slot_idx] = typcache->typbyval; - stats->statypalign[slot_idx] = typcache->typalign; + stats->statypalign[slot_idx] = typcache->typalign; slot_idx++; } diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c index 7c77c338ce..96269fc2ad 100644 --- a/src/backend/utils/adt/ri_triggers.c +++ b/src/backend/utils/adt/ri_triggers.c @@ -394,8 +394,8 @@ RI_FKey_check(TriggerData *trigdata) * Now check that foreign key exists in PK table * * XXX detectNewRows must be true when a partitioned table is on the - * referenced side. The reason is that our snapshot must be fresh - * in order for the hack in find_inheritance_children() to work. + * referenced side. The reason is that our snapshot must be fresh in + * order for the hack in find_inheritance_children() to work. */ ri_PerformCheck(riinfo, &qkey, qplan, fk_rel, pk_rel, diff --git a/src/backend/utils/adt/rowtypes.c b/src/backend/utils/adt/rowtypes.c index 23787a6ae7..1a71fdbc33 100644 --- a/src/backend/utils/adt/rowtypes.c +++ b/src/backend/utils/adt/rowtypes.c @@ -1802,8 +1802,8 @@ hash_record(PG_FUNCTION_ARGS) tuple.t_data = record; /* - * We arrange to look up the needed hashing info just once per series - * of calls, assuming the record type doesn't change underneath us. + * We arrange to look up the needed hashing info just once per series of + * calls, assuming the record type doesn't change underneath us. */ my_extra = (RecordCompareData *) fcinfo->flinfo->fn_extra; if (my_extra == NULL || @@ -1923,8 +1923,8 @@ hash_record_extended(PG_FUNCTION_ARGS) tuple.t_data = record; /* - * We arrange to look up the needed hashing info just once per series - * of calls, assuming the record type doesn't change underneath us. + * We arrange to look up the needed hashing info just once per series of + * calls, assuming the record type doesn't change underneath us. */ my_extra = (RecordCompareData *) fcinfo->flinfo->fn_extra; if (my_extra == NULL || diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index 881e8ec03d..84ad62caea 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -2980,37 +2980,38 @@ pg_get_functiondef(PG_FUNCTION_ARGS) } else { - appendStringInfoString(&buf, "AS "); + appendStringInfoString(&buf, "AS "); - tmp = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_probin, &isnull); - if (!isnull) - { - simple_quote_literal(&buf, TextDatumGetCString(tmp)); - appendStringInfoString(&buf, ", "); /* assume prosrc isn't null */ - } + tmp = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_probin, &isnull); + if (!isnull) + { + simple_quote_literal(&buf, TextDatumGetCString(tmp)); + appendStringInfoString(&buf, ", "); /* assume prosrc isn't null */ + } - tmp = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_prosrc, &isnull); - if (isnull) - elog(ERROR, "null prosrc"); - prosrc = TextDatumGetCString(tmp); + tmp = SysCacheGetAttr(PROCOID, proctup, Anum_pg_proc_prosrc, &isnull); + if (isnull) + elog(ERROR, "null prosrc"); + prosrc = TextDatumGetCString(tmp); - /* - * We always use dollar quoting. Figure out a suitable delimiter. - * - * Since the user is likely to be editing the function body string, we - * shouldn't use a short delimiter that he might easily create a conflict - * with. Hence prefer "$function$"/"$procedure$", but extend if needed. - */ - initStringInfo(&dq); - appendStringInfoChar(&dq, '$'); - appendStringInfoString(&dq, (isfunction ? "function" : "procedure")); - while (strstr(prosrc, dq.data) != NULL) - appendStringInfoChar(&dq, 'x'); - appendStringInfoChar(&dq, '$'); - - appendBinaryStringInfo(&buf, dq.data, dq.len); - appendStringInfoString(&buf, prosrc); - appendBinaryStringInfo(&buf, dq.data, dq.len); + /* + * We always use dollar quoting. Figure out a suitable delimiter. + * + * Since the user is likely to be editing the function body string, we + * shouldn't use a short delimiter that he might easily create a + * conflict with. Hence prefer "$function$"/"$procedure$", but extend + * if needed. + */ + initStringInfo(&dq); + appendStringInfoChar(&dq, '$'); + appendStringInfoString(&dq, (isfunction ? "function" : "procedure")); + while (strstr(prosrc, dq.data) != NULL) + appendStringInfoChar(&dq, 'x'); + appendStringInfoChar(&dq, '$'); + + appendBinaryStringInfo(&buf, dq.data, dq.len); + appendStringInfoString(&buf, prosrc); + appendBinaryStringInfo(&buf, dq.data, dq.len); } appendStringInfoChar(&buf, '\n'); diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index 3d4304cce7..37ddda7724 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -3446,10 +3446,10 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, * XXX This has the consequence that if there's a statistics on the * expression, we don't split it into individual Vars. This affects * our selection of statistics in estimate_multivariate_ndistinct, - * because it's probably better to use more accurate estimate for - * each expression and treat them as independent, than to combine - * estimates for the extracted variables when we don't know how that - * relates to the expressions. + * because it's probably better to use more accurate estimate for each + * expression and treat them as independent, than to combine estimates + * for the extracted variables when we don't know how that relates to + * the expressions. */ examine_variable(root, groupexpr, 0, &vardata); if (HeapTupleIsValid(vardata.statsTuple) || vardata.isunique) @@ -4039,16 +4039,16 @@ estimate_multivariate_ndistinct(PlannerInfo *root, RelOptInfo *rel, /* * Process a simple Var expression, by matching it to keys - * directly. If there's a matching expression, we'll try - * matching it later. + * directly. If there's a matching expression, we'll try matching + * it later. */ if (IsA(varinfo->var, Var)) { AttrNumber attnum = ((Var *) varinfo->var)->varattno; /* - * Ignore expressions on system attributes. Can't rely on - * the bms check for negative values. + * Ignore expressions on system attributes. Can't rely on the + * bms check for negative values. */ if (!AttrNumberIsForUserDefinedAttr(attnum)) continue; diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c index 3a93e92e40..79761f809c 100644 --- a/src/backend/utils/adt/timestamp.c +++ b/src/backend/utils/adt/timestamp.c @@ -3847,8 +3847,8 @@ timestamp_bin(PG_FUNCTION_ARGS) tm_delta = tm_diff - tm_diff % stride_usecs; /* - * Make sure the returned timestamp is at the start of the bin, - * even if the origin is in the future. + * Make sure the returned timestamp is at the start of the bin, even if + * the origin is in the future. */ if (origin > timestamp && stride_usecs > 1) tm_delta -= stride_usecs; @@ -4025,8 +4025,8 @@ timestamptz_bin(PG_FUNCTION_ARGS) tm_delta = tm_diff - tm_diff % stride_usecs; /* - * Make sure the returned timestamp is at the start of the bin, - * even if the origin is in the future. + * Make sure the returned timestamp is at the start of the bin, even if + * the origin is in the future. */ if (origin > timestamp && stride_usecs > 1) tm_delta -= stride_usecs; diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c index 0c6e5f24ba..d2a11b1b5d 100644 --- a/src/backend/utils/adt/varlena.c +++ b/src/backend/utils/adt/varlena.c @@ -307,7 +307,7 @@ byteain(PG_FUNCTION_ARGS) size_t len = strlen(inputText); uint64 dstlen = pg_hex_dec_len(len - 2); - bc = dstlen + VARHDRSZ; /* maximum possible length */ + bc = dstlen + VARHDRSZ; /* maximum possible length */ result = palloc(bc); bc = pg_hex_decode(inputText + 2, len - 2, VARDATA(result), dstlen); diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c index f54dc12b71..dcfd9e8389 100644 --- a/src/backend/utils/cache/inval.c +++ b/src/backend/utils/cache/inval.c @@ -181,7 +181,7 @@ static int numSharedInvalidMessagesArray; static int maxSharedInvalidMessagesArray; /* GUC storage */ -int debug_invalidate_system_caches_always = 0; +int debug_invalidate_system_caches_always = 0; /* * Dynamically-registered callback functions. Current implementation @@ -692,26 +692,27 @@ AcceptInvalidationMessages(void) /* * Test code to force cache flushes anytime a flush could happen. * - * This helps detect intermittent faults caused by code that reads a - * cache entry and then performs an action that could invalidate the entry, - * but rarely actually does so. This can spot issues that would otherwise + * This helps detect intermittent faults caused by code that reads a cache + * entry and then performs an action that could invalidate the entry, but + * rarely actually does so. This can spot issues that would otherwise * only arise with badly timed concurrent DDL, for example. * - * The default debug_invalidate_system_caches_always = 0 does no forced cache flushes. + * The default debug_invalidate_system_caches_always = 0 does no forced + * cache flushes. * - * If used with CLOBBER_FREED_MEMORY, debug_invalidate_system_caches_always = 1 - * (CLOBBER_CACHE_ALWAYS) provides a fairly thorough test that the system - * contains no cache-flush hazards. However, it also makes the system - * unbelievably slow --- the regression tests take about 100 times longer - * than normal. + * If used with CLOBBER_FREED_MEMORY, + * debug_invalidate_system_caches_always = 1 (CLOBBER_CACHE_ALWAYS) + * provides a fairly thorough test that the system contains no cache-flush + * hazards. However, it also makes the system unbelievably slow --- the + * regression tests take about 100 times longer than normal. * - * If you're a glutton for punishment, try debug_invalidate_system_caches_always = 3 - * (CLOBBER_CACHE_RECURSIVELY). This slows things by at least a factor - * of 10000, so I wouldn't suggest trying to run the entire regression - * tests that way. It's useful to try a few simple tests, to make sure - * that cache reload isn't subject to internal cache-flush hazards, but - * after you've done a few thousand recursive reloads it's unlikely - * you'll learn more. + * If you're a glutton for punishment, try + * debug_invalidate_system_caches_always = 3 (CLOBBER_CACHE_RECURSIVELY). + * This slows things by at least a factor of 10000, so I wouldn't suggest + * trying to run the entire regression tests that way. It's useful to try + * a few simple tests, to make sure that cache reload isn't subject to + * internal cache-flush hazards, but after you've done a few thousand + * recursive reloads it's unlikely you'll learn more. */ #ifdef CLOBBER_CACHE_ENABLED { diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c index 1a0950489d..07b0145132 100644 --- a/src/backend/utils/cache/plancache.c +++ b/src/backend/utils/cache/plancache.c @@ -897,8 +897,9 @@ BuildCachedPlan(CachedPlanSource *plansource, List *qlist, * rejected a generic plan, it's possible to reach here with is_valid * false due to an invalidation while making the generic plan. In theory * the invalidation must be a false positive, perhaps a consequence of an - * sinval reset event or the debug_invalidate_system_caches_always code. But for - * safety, let's treat it as real and redo the RevalidateCachedQuery call. + * sinval reset event or the debug_invalidate_system_caches_always code. + * But for safety, let's treat it as real and redo the + * RevalidateCachedQuery call. */ if (!plansource->is_valid) qlist = RevalidateCachedQuery(plansource, queryEnv); diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index bd88f6105b..fd05615e76 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -1016,9 +1016,9 @@ RelationBuildDesc(Oid targetRelId, bool insertIt) * * When cache clobbering is enabled or when forced to by * RECOVER_RELATION_BUILD_MEMORY=1, arrange to allocate the junk in a - * temporary context that we'll free before returning. Make it a child - * of caller's context so that it will get cleaned up appropriately if - * we error out partway through. + * temporary context that we'll free before returning. Make it a child of + * caller's context so that it will get cleaned up appropriately if we + * error out partway through. */ #ifdef MAYBE_RECOVER_RELATION_BUILD_MEMORY MemoryContext tmpcxt = NULL; diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c index 4915ef5934..35c8cf7b24 100644 --- a/src/backend/utils/cache/typcache.c +++ b/src/backend/utils/cache/typcache.c @@ -696,7 +696,7 @@ lookup_type_cache(Oid type_id, int flags) !record_fields_have_hashing(typentry)) hash_proc = InvalidOid; else if (hash_proc == F_HASH_RANGE && - !range_element_has_hashing(typentry)) + !range_element_has_hashing(typentry)) hash_proc = InvalidOid; /* @@ -742,10 +742,10 @@ lookup_type_cache(Oid type_id, int flags) !array_element_has_extended_hashing(typentry)) hash_extended_proc = InvalidOid; else if (hash_extended_proc == F_HASH_RECORD_EXTENDED && - !record_fields_have_extended_hashing(typentry)) + !record_fields_have_extended_hashing(typentry)) hash_extended_proc = InvalidOid; else if (hash_extended_proc == F_HASH_RANGE_EXTENDED && - !range_element_has_extended_hashing(typentry)) + !range_element_has_extended_hashing(typentry)) hash_extended_proc = InvalidOid; /* diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c index 65019989cf..a3e1c59a82 100644 --- a/src/backend/utils/error/elog.c +++ b/src/backend/utils/error/elog.c @@ -2717,10 +2717,10 @@ log_line_prefix(StringInfo buf, ErrorData *edata) case 'Q': if (padding != 0) appendStringInfo(buf, "%*lld", padding, - (long long) pgstat_get_my_query_id()); + (long long) pgstat_get_my_query_id()); else appendStringInfo(buf, "%lld", - (long long) pgstat_get_my_query_id()); + (long long) pgstat_get_my_query_id()); break; default: /* format error - ignore it */ diff --git a/src/backend/utils/mb/Unicode/convutils.pm b/src/backend/utils/mb/Unicode/convutils.pm index adfe12b2c2..5ad38514be 100644 --- a/src/backend/utils/mb/Unicode/convutils.pm +++ b/src/backend/utils/mb/Unicode/convutils.pm @@ -381,7 +381,7 @@ sub print_radix_table header => "Dummy map, for invalid values", min_idx => 0, max_idx => $widest_range, - label => "dummy map" + label => "dummy map" }; ### diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index 0a180341c2..eb7f7181e4 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -2636,7 +2636,7 @@ static struct config_int ConfigureNamesInt[] = NULL }, &vacuum_defer_cleanup_age, - 0, 0, 1000000, /* see ComputeXidHorizons */ + 0, 0, 1000000, /* see ComputeXidHorizons */ NULL, NULL, NULL }, { @@ -3257,6 +3257,7 @@ static struct config_int ConfigureNamesInt[] = NULL }, &autovacuum_freeze_max_age, + /* * see pg_resetwal and vacuum_failsafe_age if you change the * upper-limit value. @@ -3513,9 +3514,9 @@ static struct config_int ConfigureNamesInt[] = 0, #endif 0, 5, -#else /* not CLOBBER_CACHE_ENABLED */ +#else /* not CLOBBER_CACHE_ENABLED */ 0, 0, 0, -#endif /* not CLOBBER_CACHE_ENABLED */ +#endif /* not CLOBBER_CACHE_ENABLED */ NULL, NULL, NULL }, diff --git a/src/backend/utils/misc/queryjumble.c b/src/backend/utils/misc/queryjumble.c index 1bb9fe20ea..f004a9ce8c 100644 --- a/src/backend/utils/misc/queryjumble.c +++ b/src/backend/utils/misc/queryjumble.c @@ -55,8 +55,8 @@ static void RecordConstLocation(JumbleState *jstate, int location); const char * CleanQuerytext(const char *query, int *location, int *len) { - int query_location = *location; - int query_len = *len; + int query_location = *location; + int query_len = *len; /* First apply starting offset, unless it's -1 (unknown). */ if (query_location >= 0) @@ -95,11 +95,12 @@ JumbleState * JumbleQuery(Query *query, const char *querytext) { JumbleState *jstate = NULL; + if (query->utilityStmt) { query->queryId = compute_utility_query_id(querytext, - query->stmt_location, - query->stmt_len); + query->stmt_location, + query->stmt_len); } else { @@ -137,12 +138,12 @@ JumbleQuery(Query *query, const char *querytext) static uint64 compute_utility_query_id(const char *query_text, int query_location, int query_len) { - uint64 queryId; + uint64 queryId; const char *sql; /* - * Confine our attention to the relevant part of the string, if the - * query is a portion of a multi-statement source string. + * Confine our attention to the relevant part of the string, if the query + * is a portion of a multi-statement source string. */ sql = CleanQuerytext(query_text, &query_location, &query_len); @@ -150,9 +151,8 @@ compute_utility_query_id(const char *query_text, int query_location, int query_l query_len, 0)); /* - * If we are unlucky enough to get a hash of zero(invalid), use - * queryID as 2 instead, queryID 1 is already in use for normal - * statements. + * If we are unlucky enough to get a hash of zero(invalid), use queryID as + * 2 instead, queryID 1 is already in use for normal statements. */ if (queryId == UINT64CONST(0)) queryId = UINT64CONST(2); diff --git a/src/backend/utils/sort/logtape.c b/src/backend/utils/sort/logtape.c index 089ba2e106..cafc087254 100644 --- a/src/backend/utils/sort/logtape.c +++ b/src/backend/utils/sort/logtape.c @@ -1275,6 +1275,7 @@ LogicalTapeSetBlocks(LogicalTapeSet *lts) for (int i = 0; i < lts->nTapes; i++) { LogicalTape *lt = <s->tapes[i]; + Assert(!lt->writing || lt->buffer == NULL); } #endif diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c index 95704265b6..2968c7f7b7 100644 --- a/src/backend/utils/time/snapmgr.c +++ b/src/backend/utils/time/snapmgr.c @@ -1808,8 +1808,8 @@ TransactionIdLimitedForOldSnapshots(TransactionId recentXmin, if (ts == threshold_timestamp) { /* - * Current timestamp is in same bucket as the last limit that - * was applied. Reuse. + * Current timestamp is in same bucket as the last limit that was + * applied. Reuse. */ xlimit = threshold_xid; } @@ -1965,13 +1965,13 @@ MaintainOldSnapshotTimeMapping(TimestampTz whenTaken, TransactionId xmin) * number of minutes of difference between ts and the current * head_timestamp. * - * The distance from the current head to the current tail is one - * less than the number of entries in the mapping, because the - * entry at the head_offset is for 0 minutes after head_timestamp. + * The distance from the current head to the current tail is one less + * than the number of entries in the mapping, because the entry at the + * head_offset is for 0 minutes after head_timestamp. * - * The difference between these two values is the number of minutes - * by which we need to advance the mapping, either adding new entries - * or rotating old ones out. + * The difference between these two values is the number of minutes by + * which we need to advance the mapping, either adding new entries or + * rotating old ones out. */ distance_to_new_tail = (ts - oldSnapshotControl->head_timestamp) / USECS_PER_MINUTE; |
