diff options
| author | Tom Lane <tgl@sss.pgh.pa.us> | 2019-05-22 12:55:34 -0400 |
|---|---|---|
| committer | Tom Lane <tgl@sss.pgh.pa.us> | 2019-05-22 12:55:34 -0400 |
| commit | be76af171cdb3e7465c4ef234af403f97ad79b7b (patch) | |
| tree | 1fa62d2b7a6680a4237a1548f7002fa0b234b143 /src/bin | |
| parent | 66a4bad83aaa6613a45a00a488c04427f9969fb4 (diff) | |
| download | postgresql-be76af171cdb3e7465c4ef234af403f97ad79b7b.tar.gz | |
Initial pgindent run for v12.
This is still using the 2.0 version of pg_bsd_indent.
I thought it would be good to commit this separately,
so as to document the differences between 2.0 and 2.1 behavior.
Discussion: https://postgr.es/m/16296.1558103386@sss.pgh.pa.us
Diffstat (limited to 'src/bin')
32 files changed, 325 insertions, 321 deletions
diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c index 5abc64f5f7..9a9069e551 100644 --- a/src/bin/initdb/initdb.c +++ b/src/bin/initdb/initdb.c @@ -185,7 +185,7 @@ static const char *default_timezone = NULL; "# allows any local user to connect as any PostgreSQL user, including\n" \ "# the database superuser. If you do not trust all your local users,\n" \ "# use another authentication method.\n" -static bool authwarning = false; +static bool authwarning = false; /* * Centralized knowledge of switches to pass to backend @@ -2431,11 +2431,11 @@ check_need_password(const char *authmethodlocal, const char *authmethodhost) !(pwprompt || pwfilename)) { pg_log_error("must specify a password for the superuser to enable %s authentication", - (strcmp(authmethodlocal, "md5") == 0 || - strcmp(authmethodlocal, "password") == 0 || - strcmp(authmethodlocal, "scram-sha-256") == 0) - ? authmethodlocal - : authmethodhost); + (strcmp(authmethodlocal, "md5") == 0 || + strcmp(authmethodlocal, "password") == 0 || + strcmp(authmethodlocal, "scram-sha-256") == 0) + ? authmethodlocal + : authmethodhost); exit(1); } } @@ -3067,8 +3067,8 @@ main(int argc, char *argv[]) char pg_ctl_path[MAXPGPATH]; /* - * Ensure that buffering behavior of stdout matches what it is - * in interactive usage (at least on most platforms). This prevents + * Ensure that buffering behavior of stdout matches what it is in + * interactive usage (at least on most platforms). This prevents * unexpected output ordering when, eg, output is redirected to a file. * POSIX says we must do this before any other usage of these files. */ diff --git a/src/bin/pg_archivecleanup/pg_archivecleanup.c b/src/bin/pg_archivecleanup/pg_archivecleanup.c index 8f89be64cd..bb4257ff18 100644 --- a/src/bin/pg_archivecleanup/pg_archivecleanup.c +++ b/src/bin/pg_archivecleanup/pg_archivecleanup.c @@ -123,7 +123,8 @@ CleanupPriorWALFiles(void) if ((IsXLogFileName(walfile) || IsPartialXLogFileName(walfile)) && strcmp(walfile + 8, exclusiveCleanupFileName + 8) < 0) { - char WALFilePath[MAXPGPATH * 2]; /* the file path including archive */ + char WALFilePath[MAXPGPATH * 2]; /* the file path + * including archive */ /* * Use the original file name again now, including any diff --git a/src/bin/pg_basebackup/pg_receivewal.c b/src/bin/pg_basebackup/pg_receivewal.c index 5301e88375..86859b7813 100644 --- a/src/bin/pg_basebackup/pg_receivewal.c +++ b/src/bin/pg_basebackup/pg_receivewal.c @@ -633,7 +633,7 @@ main(int argc, char **argv) { /* translator: second %s is an option name */ pg_log_error("%s needs a slot to be specified using --slot", - do_drop_slot ? "--drop-slot" : "--create-slot"); + do_drop_slot ? "--drop-slot" : "--create-slot"); fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname); exit(1); @@ -728,7 +728,7 @@ main(int argc, char **argv) if (do_drop_slot) { if (verbose) - pg_log_info("dropping replication slot \"%s\"", replication_slot); + pg_log_info("dropping replication slot \"%s\"", replication_slot); if (!DropReplicationSlot(conn, replication_slot)) exit(1); diff --git a/src/bin/pg_basebackup/pg_recvlogical.c b/src/bin/pg_basebackup/pg_recvlogical.c index 547eb8de86..2e45c14642 100644 --- a/src/bin/pg_basebackup/pg_recvlogical.c +++ b/src/bin/pg_basebackup/pg_recvlogical.c @@ -133,9 +133,9 @@ sendFeedback(PGconn *conn, TimestampTz now, bool force, bool replyRequested) if (verbose) pg_log_info("confirming write up to %X/%X, flush to %X/%X (slot %s)", - (uint32) (output_written_lsn >> 32), (uint32) output_written_lsn, - (uint32) (output_fsync_lsn >> 32), (uint32) output_fsync_lsn, - replication_slot); + (uint32) (output_written_lsn >> 32), (uint32) output_written_lsn, + (uint32) (output_fsync_lsn >> 32), (uint32) output_fsync_lsn, + replication_slot); replybuf[len] = 'r'; len += 1; @@ -1021,11 +1021,11 @@ prepareToTerminate(PGconn *conn, XLogRecPtr endpos, bool keepalive, XLogRecPtr l { if (keepalive) pg_log_info("endpos %X/%X reached by keepalive", - (uint32) (endpos >> 32), (uint32) endpos); + (uint32) (endpos >> 32), (uint32) endpos); else pg_log_info("endpos %X/%X reached by record at %X/%X", - (uint32) (endpos >> 32), (uint32) (endpos), - (uint32) (lsn >> 32), (uint32) lsn); + (uint32) (endpos >> 32), (uint32) (endpos), + (uint32) (lsn >> 32), (uint32) lsn); } } diff --git a/src/bin/pg_basebackup/receivelog.c b/src/bin/pg_basebackup/receivelog.c index e9854527e2..bf99a7dbc8 100644 --- a/src/bin/pg_basebackup/receivelog.c +++ b/src/bin/pg_basebackup/receivelog.c @@ -378,8 +378,8 @@ CheckServerVersionForStreaming(PGconn *conn) const char *serverver = PQparameterStatus(conn, "server_version"); pg_log_error("incompatible server version %s; client does not support streaming from server versions older than %s", - serverver ? serverver : "'unknown'", - "9.3"); + serverver ? serverver : "'unknown'", + "9.3"); return false; } else if (serverMajor > maxServerMajor) @@ -387,8 +387,8 @@ CheckServerVersionForStreaming(PGconn *conn) const char *serverver = PQparameterStatus(conn, "server_version"); pg_log_error("incompatible server version %s; client does not support streaming from server versions newer than %s", - serverver ? serverver : "'unknown'", - PG_VERSION); + serverver ? serverver : "'unknown'", + PG_VERSION); return false; } return true; @@ -620,8 +620,8 @@ ReceiveXlogStream(PGconn *conn, StreamCtl *stream) if (stream->startpos > stoppos) { pg_log_error("server stopped streaming timeline %u at %X/%X, but reported next timeline %u to begin at %X/%X", - stream->timeline, (uint32) (stoppos >> 32), (uint32) stoppos, - newtimeline, (uint32) (stream->startpos >> 32), (uint32) stream->startpos); + stream->timeline, (uint32) (stoppos >> 32), (uint32) stoppos, + newtimeline, (uint32) (stream->startpos >> 32), (uint32) stream->startpos); goto error; } diff --git a/src/bin/pg_dump/common.c b/src/bin/pg_dump/common.c index b402e49896..522a245088 100644 --- a/src/bin/pg_dump/common.c +++ b/src/bin/pg_dump/common.c @@ -1021,9 +1021,9 @@ findParentsByOid(TableInfo *self, if (parent == NULL) { pg_log_error("failed sanity check, parent OID %u of table \"%s\" (OID %u) not found", - inhinfo[i].inhparent, - self->dobj.name, - oid); + inhinfo[i].inhparent, + self->dobj.name, + oid); exit_nicely(1); } self->parents[j++] = parent; diff --git a/src/bin/pg_dump/compress_io.c b/src/bin/pg_dump/compress_io.c index a0d7644a8a..952caef52c 100644 --- a/src/bin/pg_dump/compress_io.c +++ b/src/bin/pg_dump/compress_io.c @@ -235,7 +235,7 @@ InitCompressorZlib(CompressorState *cs, int level) if (deflateInit(zp, level) != Z_OK) fatal("could not initialize compression library: %s", - zp->msg); + zp->msg); /* Just be paranoid - maybe End is called after Start, with no Write */ zp->next_out = (void *) cs->zlibOut; @@ -334,7 +334,7 @@ ReadDataFromArchiveZlib(ArchiveHandle *AH, ReadFunc readF) if (inflateInit(zp) != Z_OK) fatal("could not initialize compression library: %s", - zp->msg); + zp->msg); /* no minimal chunk size for zlib */ while ((cnt = readF(AH, &buf, &buflen))) @@ -586,7 +586,7 @@ cfread(void *ptr, int size, cfp *fp) const char *errmsg = gzerror(fp->compressedfp, &errnum); fatal("could not read from input file: %s", - errnum == Z_ERRNO ? strerror(errno) : errmsg); + errnum == Z_ERRNO ? strerror(errno) : errmsg); } } else diff --git a/src/bin/pg_dump/parallel.c b/src/bin/pg_dump/parallel.c index 3dfdae3a57..7152fd6457 100644 --- a/src/bin/pg_dump/parallel.c +++ b/src/bin/pg_dump/parallel.c @@ -1158,7 +1158,7 @@ parseWorkerCommand(ArchiveHandle *AH, TocEntry **te, T_Action *act, } else fatal("unrecognized command received from master: \"%s\"", - msg); + msg); } /* @@ -1201,7 +1201,7 @@ parseWorkerResponse(ArchiveHandle *AH, TocEntry *te, } else fatal("invalid message received from worker: \"%s\"", - msg); + msg); return status; } @@ -1439,7 +1439,7 @@ ListenToWorkers(ArchiveHandle *AH, ParallelState *pstate, bool do_wait) } else fatal("invalid message received from worker: \"%s\"", - msg); + msg); /* Free the string returned from getMessageFromWorker */ free(msg); @@ -1744,7 +1744,7 @@ pgpipe(int handles[2]) if ((s = socket(AF_INET, SOCK_STREAM, 0)) == PGINVALID_SOCKET) { pg_log_error("pgpipe: could not create socket: error code %d", - WSAGetLastError()); + WSAGetLastError()); return -1; } @@ -1755,21 +1755,21 @@ pgpipe(int handles[2]) if (bind(s, (SOCKADDR *) &serv_addr, len) == SOCKET_ERROR) { pg_log_error("pgpipe: could not bind: error code %d", - WSAGetLastError()); + WSAGetLastError()); closesocket(s); return -1; } if (listen(s, 1) == SOCKET_ERROR) { pg_log_error("pgpipe: could not listen: error code %d", - WSAGetLastError()); + WSAGetLastError()); closesocket(s); return -1; } if (getsockname(s, (SOCKADDR *) &serv_addr, &len) == SOCKET_ERROR) { pg_log_error("pgpipe: getsockname() failed: error code %d", - WSAGetLastError()); + WSAGetLastError()); closesocket(s); return -1; } @@ -1780,7 +1780,7 @@ pgpipe(int handles[2]) if ((tmp_sock = socket(AF_INET, SOCK_STREAM, 0)) == PGINVALID_SOCKET) { pg_log_error("pgpipe: could not create second socket: error code %d", - WSAGetLastError()); + WSAGetLastError()); closesocket(s); return -1; } @@ -1789,7 +1789,7 @@ pgpipe(int handles[2]) if (connect(handles[1], (SOCKADDR *) &serv_addr, len) == SOCKET_ERROR) { pg_log_error("pgpipe: could not connect socket: error code %d", - WSAGetLastError()); + WSAGetLastError()); closesocket(handles[1]); handles[1] = -1; closesocket(s); @@ -1798,7 +1798,7 @@ pgpipe(int handles[2]) if ((tmp_sock = accept(s, (SOCKADDR *) &serv_addr, &len)) == PGINVALID_SOCKET) { pg_log_error("pgpipe: could not accept connection: error code %d", - WSAGetLastError()); + WSAGetLastError()); closesocket(handles[1]); handles[1] = -1; closesocket(s); diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c index d764d36936..564772ea7e 100644 --- a/src/bin/pg_dump/pg_backup_archiver.c +++ b/src/bin/pg_dump/pg_backup_archiver.c @@ -324,7 +324,7 @@ ProcessArchiveRestoreOptions(Archive *AHX) break; default: fatal("unexpected section code %d", - (int) te->section); + (int) te->section); break; } } @@ -608,7 +608,7 @@ RestoreArchive(Archive *AHX) { /* complain and emit unmodified command */ pg_log_warning("could not find where to insert IF EXISTS in statement \"%s\"", - dropStmtOrig); + dropStmtOrig); appendPQExpBufferStr(ftStmt, dropStmt); } } @@ -889,7 +889,7 @@ restore_toc_entry(ArchiveHandle *AH, TocEntry *te, bool is_parallel) _selectOutputSchema(AH, te->namespace); pg_log_info("processing data for table \"%s.%s\"", - te->namespace, te->tag); + te->namespace, te->tag); /* * In parallel restore, if we created the table earlier in @@ -1288,8 +1288,8 @@ EndRestoreBlobs(ArchiveHandle *AH) pg_log_info(ngettext("restored %d large object", "restored %d large objects", - AH->blobCount), - AH->blobCount); + AH->blobCount), + AH->blobCount); } @@ -1320,12 +1320,12 @@ StartRestoreBlob(ArchiveHandle *AH, Oid oid, bool drop) loOid = lo_create(AH->connection, oid); if (loOid == 0 || loOid != oid) fatal("could not create large object %u: %s", - oid, PQerrorMessage(AH->connection)); + oid, PQerrorMessage(AH->connection)); } AH->loFd = lo_open(AH->connection, oid, INV_WRITE); if (AH->loFd == -1) fatal("could not open large object %u: %s", - oid, PQerrorMessage(AH->connection)); + oid, PQerrorMessage(AH->connection)); } else { @@ -1429,7 +1429,7 @@ SortTocFromFile(Archive *AHX) te = getTocEntryByDumpId(AH, id); if (!te) fatal("could not find entry for ID %d", - id); + id); /* Mark it wanted */ ropt->idWanted[id - 1] = true; @@ -1662,10 +1662,10 @@ dump_lo_buf(ArchiveHandle *AH) pg_log_debug(ngettext("wrote %lu byte of large object data (result = %lu)", "wrote %lu bytes of large object data (result = %lu)", AH->lo_buf_used), - (unsigned long) AH->lo_buf_used, (unsigned long) res); + (unsigned long) AH->lo_buf_used, (unsigned long) res); if (res != AH->lo_buf_used) fatal("could not write to large object (result: %lu, expected: %lu)", - (unsigned long) res, (unsigned long) AH->lo_buf_used); + (unsigned long) res, (unsigned long) AH->lo_buf_used); } else { @@ -1772,12 +1772,12 @@ warn_or_exit_horribly(ArchiveHandle *AH, const char *fmt,...) if (AH->currentTE != NULL && AH->currentTE != AH->lastErrorTE) { pg_log_generic(PG_LOG_INFO, "from TOC entry %d; %u %u %s %s %s", - AH->currentTE->dumpId, - AH->currentTE->catalogId.tableoid, - AH->currentTE->catalogId.oid, - AH->currentTE->desc ? AH->currentTE->desc : "(no desc)", - AH->currentTE->tag ? AH->currentTE->tag : "(no tag)", - AH->currentTE->owner ? AH->currentTE->owner : "(no owner)"); + AH->currentTE->dumpId, + AH->currentTE->catalogId.tableoid, + AH->currentTE->catalogId.oid, + AH->currentTE->desc ? AH->currentTE->desc : "(no desc)", + AH->currentTE->tag ? AH->currentTE->tag : "(no tag)", + AH->currentTE->owner ? AH->currentTE->owner : "(no owner)"); } AH->lastErrorStage = AH->stage; AH->lastErrorTE = AH->currentTE; @@ -2111,7 +2111,7 @@ _discoverArchiveFormat(ArchiveHandle *AH) if (snprintf(buf, MAXPGPATH, "%s/toc.dat", AH->fSpec) >= MAXPGPATH) fatal("directory name too long: \"%s\"", - AH->fSpec); + AH->fSpec); if (stat(buf, &st) == 0 && S_ISREG(st.st_mode)) { AH->format = archDirectory; @@ -2121,7 +2121,7 @@ _discoverArchiveFormat(ArchiveHandle *AH) #ifdef HAVE_LIBZ if (snprintf(buf, MAXPGPATH, "%s/toc.dat.gz", AH->fSpec) >= MAXPGPATH) fatal("directory name too long: \"%s\"", - AH->fSpec); + AH->fSpec); if (stat(buf, &st) == 0 && S_ISREG(st.st_mode)) { AH->format = archDirectory; @@ -2129,7 +2129,7 @@ _discoverArchiveFormat(ArchiveHandle *AH) } #endif fatal("directory \"%s\" does not appear to be a valid archive (\"toc.dat\" does not exist)", - AH->fSpec); + AH->fSpec); fh = NULL; /* keep compiler quiet */ } else @@ -2152,7 +2152,7 @@ _discoverArchiveFormat(ArchiveHandle *AH) fatal("could not read input file: %m"); else fatal("input file is too short (read %lu, expected 5)", - (unsigned long) cnt); + (unsigned long) cnt); } /* Save it, just in case we need it later */ @@ -2321,7 +2321,7 @@ _allocAH(const char *FileSpec, const ArchiveFormat fmt, AH->currUser = NULL; /* unknown */ AH->currSchema = NULL; /* ditto */ AH->currTablespace = NULL; /* ditto */ - AH->currTableAm = NULL; /* ditto */ + AH->currTableAm = NULL; /* ditto */ AH->toc = (TocEntry *) pg_malloc0(sizeof(TocEntry)); @@ -2465,11 +2465,11 @@ mark_dump_job_done(ArchiveHandle *AH, void *callback_data) { pg_log_info("finished item %d %s %s", - te->dumpId, te->desc, te->tag); + te->dumpId, te->desc, te->tag); if (status != 0) fatal("worker process failed: exit code %d", - status); + status); } @@ -2589,7 +2589,7 @@ ReadToc(ArchiveHandle *AH) /* Sanity check */ if (te->dumpId <= 0) fatal("entry ID %d out of range -- perhaps a corrupt TOC", - te->dumpId); + te->dumpId); te->hadDumper = ReadInt(AH); @@ -2702,7 +2702,7 @@ ReadToc(ArchiveHandle *AH) AH->ReadExtraTocPtr(AH, te); pg_log_debug("read TOC entry %d (ID %d) for %s %s", - i, te->dumpId, te->desc, te->tag); + i, te->dumpId, te->desc, te->tag); /* link completed entry into TOC circular list */ te->prev = AH->toc->prev; @@ -2738,12 +2738,12 @@ processEncodingEntry(ArchiveHandle *AH, TocEntry *te) encoding = pg_char_to_encoding(ptr1); if (encoding < 0) fatal("unrecognized encoding \"%s\"", - ptr1); + ptr1); AH->public.encoding = encoding; } else fatal("invalid ENCODING item: %s", - te->defn); + te->defn); free(defn); } @@ -2761,7 +2761,7 @@ processStdStringsEntry(ArchiveHandle *AH, TocEntry *te) AH->public.std_strings = false; else fatal("invalid STDSTRINGS item: %s", - te->defn); + te->defn); } static void @@ -3193,7 +3193,7 @@ _doSetSessionAuth(ArchiveHandle *AH, const char *user) if (!res || PQresultStatus(res) != PGRES_COMMAND_OK) /* NOT warn_or_exit_horribly... use -O instead to skip this. */ fatal("could not set session user to \"%s\": %s", - user, PQerrorMessage(AH->connection)); + user, PQerrorMessage(AH->connection)); PQclear(res); } @@ -3415,7 +3415,8 @@ static void _selectTableAccessMethod(ArchiveHandle *AH, const char *tableam) { PQExpBuffer cmd; - const char *want, *have; + const char *want, + *have; have = AH->currTableAm; want = tableam; @@ -3530,7 +3531,7 @@ _getObjectDescription(PQExpBuffer buf, TocEntry *te, ArchiveHandle *AH) } pg_log_warning("don't know how to set owner for object type \"%s\"", - type); + type); } /* @@ -3688,7 +3689,7 @@ _printTocEntry(ArchiveHandle *AH, TocEntry *te, bool isData) else { pg_log_warning("don't know how to set owner for object type \"%s\"", - te->desc); + te->desc); } } @@ -3805,12 +3806,12 @@ ReadHead(ArchiveHandle *AH) if (AH->version < K_VERS_1_0 || AH->version > K_VERS_MAX) fatal("unsupported version (%d.%d) in file header", - vmaj, vmin); + vmaj, vmin); AH->intSize = AH->ReadBytePtr(AH); if (AH->intSize > 32) fatal("sanity check on integer size (%lu) failed", - (unsigned long) AH->intSize); + (unsigned long) AH->intSize); if (AH->intSize > sizeof(int)) pg_log_warning("archive was made on a machine with larger integers, some operations might fail"); @@ -3824,7 +3825,7 @@ ReadHead(ArchiveHandle *AH) if (AH->format != fmt) fatal("expected format (%d) differs from format found in file (%d)", - AH->format, fmt); + AH->format, fmt); } if (AH->version >= K_VERS_1_2) @@ -3995,8 +3996,8 @@ restore_toc_entries_prefork(ArchiveHandle *AH, TocEntry *pending_list) { /* OK, restore the item and update its dependencies */ pg_log_info("processing item %d %s %s", - next_work_item->dumpId, - next_work_item->desc, next_work_item->tag); + next_work_item->dumpId, + next_work_item->desc, next_work_item->tag); (void) restore_toc_entry(AH, next_work_item, false); @@ -4085,8 +4086,8 @@ restore_toc_entries_parallel(ArchiveHandle *AH, ParallelState *pstate, if ((next_work_item->reqs & (REQ_SCHEMA | REQ_DATA)) == 0) { pg_log_info("skipping item %d %s %s", - next_work_item->dumpId, - next_work_item->desc, next_work_item->tag); + next_work_item->dumpId, + next_work_item->desc, next_work_item->tag); /* Update its dependencies as though we'd completed it */ reduce_dependencies(AH, next_work_item, &ready_list); /* Loop around to see if anything else can be dispatched */ @@ -4094,8 +4095,8 @@ restore_toc_entries_parallel(ArchiveHandle *AH, ParallelState *pstate, } pg_log_info("launching item %d %s %s", - next_work_item->dumpId, - next_work_item->desc, next_work_item->tag); + next_work_item->dumpId, + next_work_item->desc, next_work_item->tag); /* Dispatch to some worker */ DispatchJobForTocEntry(AH, pstate, next_work_item, ACT_RESTORE, @@ -4186,7 +4187,7 @@ restore_toc_entries_postfork(ArchiveHandle *AH, TocEntry *pending_list) for (te = pending_list->pending_next; te != pending_list; te = te->pending_next) { pg_log_info("processing missed item %d %s %s", - te->dumpId, te->desc, te->tag); + te->dumpId, te->desc, te->tag); (void) restore_toc_entry(AH, te, false); } } @@ -4472,7 +4473,7 @@ mark_restore_job_done(ArchiveHandle *AH, ParallelReadyList *ready_list = (ParallelReadyList *) callback_data; pg_log_info("finished item %d %s %s", - te->dumpId, te->desc, te->tag); + te->dumpId, te->desc, te->tag); if (status == WORKER_CREATE_DONE) mark_create_done(AH, te); @@ -4485,7 +4486,7 @@ mark_restore_job_done(ArchiveHandle *AH, AH->public.n_errors++; else if (status != 0) fatal("worker process failed: exit code %d", - status); + status); reduce_dependencies(AH, te, ready_list); } @@ -4657,7 +4658,7 @@ repoint_table_dependencies(ArchiveHandle *AH) te->dependencies[i] = tabledataid; te->dataLength = Max(te->dataLength, tabledatate->dataLength); pg_log_debug("transferring dependency %d -> %d to %d", - te->dumpId, olddep, tabledataid); + te->dumpId, olddep, tabledataid); } } } @@ -4791,7 +4792,7 @@ static void inhibit_data_for_failed_table(ArchiveHandle *AH, TocEntry *te) { pg_log_info("table \"%s\" could not be created, will not restore its data", - te->tag); + te->tag); if (AH->tableDataId[te->dumpId] != 0) { diff --git a/src/bin/pg_dump/pg_backup_custom.c b/src/bin/pg_dump/pg_backup_custom.c index ae5306b9da..497b81b684 100644 --- a/src/bin/pg_dump/pg_backup_custom.c +++ b/src/bin/pg_dump/pg_backup_custom.c @@ -445,7 +445,7 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te) default: /* Always have a default */ fatal("unrecognized data block type (%d) while searching archive", - blkType); + blkType); break; } _readBlockHeader(AH, &blkType, &id); @@ -482,7 +482,7 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te) /* Are we sane? */ if (id != te->dumpId) fatal("found unexpected block ID (%d) when reading data -- expected %d", - id, te->dumpId); + id, te->dumpId); switch (blkType) { @@ -496,7 +496,7 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te) default: /* Always have a default */ fatal("unrecognized data block type %d while restoring archive", - blkType); + blkType); break; } } @@ -910,11 +910,11 @@ _readBlockHeader(ArchiveHandle *AH, int *type, int *id) int byt; /* - * Note: if we are at EOF with a pre-1.3 input file, we'll fatal() - * inside ReadInt rather than returning EOF. It doesn't seem worth - * jumping through hoops to deal with that case better, because no such - * files are likely to exist in the wild: only some 7.1 development - * versions of pg_dump ever generated such files. + * Note: if we are at EOF with a pre-1.3 input file, we'll fatal() inside + * ReadInt rather than returning EOF. It doesn't seem worth jumping + * through hoops to deal with that case better, because no such files are + * likely to exist in the wild: only some 7.1 development versions of + * pg_dump ever generated such files. */ if (AH->version < K_VERS_1_3) *type = BLK_DATA; diff --git a/src/bin/pg_dump/pg_backup_db.c b/src/bin/pg_dump/pg_backup_db.c index 9fd3b8a79f..8af5c7bebd 100644 --- a/src/bin/pg_dump/pg_backup_db.c +++ b/src/bin/pg_dump/pg_backup_db.c @@ -53,7 +53,7 @@ _check_database_version(ArchiveHandle *AH) remoteversion > AH->public.maxRemoteVersion)) { pg_log_error("server version: %s; %s version: %s", - remoteversion_str, progname, PG_VERSION); + remoteversion_str, progname, PG_VERSION); fatal("aborting because of server version mismatch"); } @@ -138,7 +138,7 @@ _connectDB(ArchiveHandle *AH, const char *reqdb, const char *requser) newuser = requser; pg_log_info("connecting to database \"%s\" as user \"%s\"", - newdb, newuser); + newdb, newuser); password = AH->savedPassword; @@ -182,7 +182,7 @@ _connectDB(ArchiveHandle *AH, const char *reqdb, const char *requser) { if (!PQconnectionNeedsPassword(newConn)) fatal("could not reconnect to database: %s", - PQerrorMessage(newConn)); + PQerrorMessage(newConn)); PQfinish(newConn); if (password) @@ -304,8 +304,8 @@ ConnectDatabase(Archive *AHX, /* check to see that the backend connection was successfully made */ if (PQstatus(AH->connection) == CONNECTION_BAD) fatal("connection to database \"%s\" failed: %s", - PQdb(AH->connection) ? PQdb(AH->connection) : "", - PQerrorMessage(AH->connection)); + PQdb(AH->connection) ? PQdb(AH->connection) : "", + PQerrorMessage(AH->connection)); /* Start strict; later phases may override this. */ PQclear(ExecuteSqlQueryForSingleRow((Archive *) AH, @@ -383,7 +383,7 @@ static void die_on_query_failure(ArchiveHandle *AH, const char *query) { pg_log_error("query failed: %s", - PQerrorMessage(AH->connection)); + PQerrorMessage(AH->connection)); fatal("query was: %s", query); } @@ -427,8 +427,8 @@ ExecuteSqlQueryForSingleRow(Archive *fout, const char *query) if (ntups != 1) fatal(ngettext("query returned %d row instead of one: %s", "query returned %d rows instead of one: %s", - ntups), - ntups, query); + ntups), + ntups, query); return res; } @@ -571,7 +571,7 @@ ExecuteSqlCommandBuf(Archive *AHX, const char *buf, size_t bufLen) if (AH->pgCopyIn && PQputCopyData(AH->connection, buf, bufLen) <= 0) fatal("error returned by PQputCopyData: %s", - PQerrorMessage(AH->connection)); + PQerrorMessage(AH->connection)); } else if (AH->outputKind == OUTPUT_OTHERDATA) { @@ -620,7 +620,7 @@ EndDBCopyMode(Archive *AHX, const char *tocEntryTag) if (PQputCopyEnd(AH->connection, NULL) <= 0) fatal("error returned by PQputCopyEnd: %s", - PQerrorMessage(AH->connection)); + PQerrorMessage(AH->connection)); /* Check command status and return to normal libpq state */ res = PQgetResult(AH->connection); @@ -632,7 +632,7 @@ EndDBCopyMode(Archive *AHX, const char *tocEntryTag) /* Do this to ensure we've pumped libpq back to idle state */ if (PQgetResult(AH->connection) != NULL) pg_log_warning("unexpected extra results during COPY of table \"%s\"", - tocEntryTag); + tocEntryTag); AH->pgCopyIn = false; } diff --git a/src/bin/pg_dump/pg_backup_directory.c b/src/bin/pg_dump/pg_backup_directory.c index 70eca82c91..cfa2f6ec74 100644 --- a/src/bin/pg_dump/pg_backup_directory.c +++ b/src/bin/pg_dump/pg_backup_directory.c @@ -348,7 +348,7 @@ _WriteData(ArchiveHandle *AH, const void *data, size_t dLen) if (dLen > 0 && cfwrite(data, dLen, ctx->dataFH) != dLen) fatal("could not write to output file: %s", - get_cfp_error(ctx->dataFH)); + get_cfp_error(ctx->dataFH)); return; @@ -452,7 +452,7 @@ _LoadBlobs(ArchiveHandle *AH) /* Can't overflow because line and fname are the same length. */ if (sscanf(line, "%u %s\n", &oid, fname) != 2) fatal("invalid line in large object TOC file \"%s\": \"%s\"", - fname, line); + fname, line); StartRestoreBlob(AH, oid, AH->public.ropt->dropSchema); snprintf(path, MAXPGPATH, "%s/%s", ctx->directory, fname); @@ -461,7 +461,7 @@ _LoadBlobs(ArchiveHandle *AH) } if (!cfeof(ctx->blobsTocFH)) fatal("error reading large object TOC file \"%s\"", - fname); + fname); if (cfclose(ctx->blobsTocFH) != 0) fatal("could not close large object TOC file \"%s\": %m", @@ -486,7 +486,7 @@ _WriteByte(ArchiveHandle *AH, const int i) if (cfwrite(&c, 1, ctx->dataFH) != 1) fatal("could not write to output file: %s", - get_cfp_error(ctx->dataFH)); + get_cfp_error(ctx->dataFH)); return 1; } @@ -516,7 +516,7 @@ _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len) if (cfwrite(buf, len, ctx->dataFH) != len) fatal("could not write to output file: %s", - get_cfp_error(ctx->dataFH)); + get_cfp_error(ctx->dataFH)); return; } diff --git a/src/bin/pg_dump/pg_backup_tar.c b/src/bin/pg_dump/pg_backup_tar.c index b52593c3c0..569df9b4b5 100644 --- a/src/bin/pg_dump/pg_backup_tar.c +++ b/src/bin/pg_dump/pg_backup_tar.c @@ -553,10 +553,10 @@ _tarReadRaw(ArchiveHandle *AH, void *buf, size_t len, TAR_MEMBER *th, FILE *fh) const char *errmsg = gzerror(th->zFH, &errnum); fatal("could not read from input file: %s", - errnum == Z_ERRNO ? strerror(errno) : errmsg); + errnum == Z_ERRNO ? strerror(errno) : errmsg); #else fatal("could not read from input file: %s", - strerror(errno)); + strerror(errno)); #endif } } @@ -691,7 +691,7 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te) if (pos1 < 6 || strncmp(te->copyStmt, "COPY ", 5) != 0 || strcmp(te->copyStmt + pos1, " FROM stdin;\n") != 0) fatal("unexpected COPY statement syntax: \"%s\"", - te->copyStmt); + te->copyStmt); /* Emit all but the FROM part ... */ ahwrite(te->copyStmt, 1, pos1, AH); @@ -1113,7 +1113,7 @@ _tarAddFile(ArchiveHandle *AH, TAR_MEMBER *th) snprintf(buf1, sizeof(buf1), INT64_FORMAT, (int64) len); snprintf(buf2, sizeof(buf2), INT64_FORMAT, (int64) th->fileLen); fatal("actual file length (%s) does not match expected (%s)", - buf1, buf2); + buf1, buf2); } pad = ((len + 511) & ~511) - len; @@ -1150,7 +1150,7 @@ _tarPositionTo(ArchiveHandle *AH, const char *filename) snprintf(buf1, sizeof(buf1), INT64_FORMAT, (int64) ctx->tarFHpos); snprintf(buf2, sizeof(buf2), INT64_FORMAT, (int64) ctx->tarNextMember); pg_log_debug("moving from position %s to next member at file position %s", - buf1, buf2); + buf1, buf2); while (ctx->tarFHpos < ctx->tarNextMember) _tarReadRaw(AH, &c, 1, NULL, ctx->tarFH); @@ -1188,8 +1188,8 @@ _tarPositionTo(ArchiveHandle *AH, const char *filename) id = atoi(th->targetFile); if ((TocIDRequired(AH, id) & REQ_DATA) != 0) fatal("restoring data out of order is not supported in this archive format: " - "\"%s\" is required, but comes before \"%s\" in the archive file.", - th->targetFile, filename); + "\"%s\" is required, but comes before \"%s\" in the archive file.", + th->targetFile, filename); /* Header doesn't match, so read to next header */ len = ((th->fileLen + 511) & ~511); /* Padded length */ @@ -1234,8 +1234,8 @@ _tarGetHeader(ArchiveHandle *AH, TAR_MEMBER *th) if (len != 512) fatal(ngettext("incomplete tar header found (%lu byte)", "incomplete tar header found (%lu bytes)", - len), - (unsigned long) len); + len), + (unsigned long) len); /* Calc checksum */ chk = tarChecksum(h); @@ -1274,7 +1274,7 @@ _tarGetHeader(ArchiveHandle *AH, TAR_MEMBER *th) snprintf(posbuf, sizeof(posbuf), UINT64_FORMAT, (uint64) hPos); snprintf(lenbuf, sizeof(lenbuf), UINT64_FORMAT, (uint64) len); pg_log_debug("TOC Entry %s at %s (length %s, checksum %d)", - tag, posbuf, lenbuf, sum); + tag, posbuf, lenbuf, sum); } if (chk != sum) @@ -1284,7 +1284,7 @@ _tarGetHeader(ArchiveHandle *AH, TAR_MEMBER *th) snprintf(posbuf, sizeof(posbuf), UINT64_FORMAT, (uint64) ftello(ctx->tarFH)); fatal("corrupt tar header found in %s (expected %d, computed %d) file position %s", - tag, sum, chk, posbuf); + tag, sum, chk, posbuf); } th->targetFile = pg_strdup(tag); diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index e8ce719a0a..38a01758a1 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -135,7 +135,7 @@ static const CatalogId nilCatalogId = {0, 0}; /* override for standard extra_float_digits setting */ static bool have_extra_float_digits = false; -static int extra_float_digits; +static int extra_float_digits; /* * The default number of rows per INSERT when @@ -601,7 +601,7 @@ main(int argc, char **argv) errno == ERANGE) { pg_log_error("rows-per-insert must be in range %d..%d", - 1, INT_MAX); + 1, INT_MAX); exit_nicely(1); } dopt.dump_inserts = (int) rowsPerInsert; @@ -1112,13 +1112,14 @@ setup_connection(Archive *AH, const char *dumpencoding, ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES"); /* - * Use an explicitly specified extra_float_digits if it has been - * provided. Otherwise, set extra_float_digits so that we can dump float - * data exactly (given correctly implemented float I/O code, anyway). + * Use an explicitly specified extra_float_digits if it has been provided. + * Otherwise, set extra_float_digits so that we can dump float data + * exactly (given correctly implemented float I/O code, anyway). */ if (have_extra_float_digits) { PQExpBuffer q = createPQExpBuffer(); + appendPQExpBuffer(q, "SET extra_float_digits TO %d", extra_float_digits); ExecuteSqlStatement(AH, q->data); @@ -1921,7 +1922,7 @@ dumpTableData_copy(Archive *fout, void *dcontext) /* Do this to ensure we've pumped libpq back to idle state */ if (PQgetResult(conn) != NULL) pg_log_warning("unexpected extra results during COPY of table \"%s\"", - classname); + classname); destroyPQExpBuffer(q); return 1; @@ -3468,7 +3469,7 @@ dumpBlobs(Archive *fout, void *arg) loFd = lo_open(conn, blobOid, INV_READ); if (loFd == -1) fatal("could not open large object %u: %s", - blobOid, PQerrorMessage(conn)); + blobOid, PQerrorMessage(conn)); StartBlob(fout, blobOid); @@ -3478,7 +3479,7 @@ dumpBlobs(Archive *fout, void *arg) cnt = lo_read(conn, loFd, buf, LOBBUFSIZE); if (cnt < 0) fatal("error reading large object %u: %s", - blobOid, PQerrorMessage(conn)); + blobOid, PQerrorMessage(conn)); WriteData(fout, buf, cnt); } while (cnt > 0); @@ -3711,7 +3712,7 @@ dumpPolicy(Archive *fout, PolicyInfo *polinfo) else { pg_log_error("unexpected policy command type: %c", - polinfo->polcmd); + polinfo->polcmd); exit_nicely(1); } @@ -3838,7 +3839,7 @@ getPublications(Archive *fout) if (strlen(pubinfo[i].rolname) == 0) pg_log_warning("owner of publication \"%s\" appears to be invalid", - pubinfo[i].dobj.name); + pubinfo[i].dobj.name); /* Decide whether we want to dump it */ selectDumpableObject(&(pubinfo[i].dobj), fout); @@ -4172,7 +4173,7 @@ getSubscriptions(Archive *fout) if (strlen(subinfo[i].rolname) == 0) pg_log_warning("owner of subscription \"%s\" appears to be invalid", - subinfo[i].dobj.name); + subinfo[i].dobj.name); /* Decide whether we want to dump it */ selectDumpableObject(&(subinfo[i].dobj), fout); @@ -4488,7 +4489,7 @@ binary_upgrade_extension_member(PQExpBuffer upgrade_buffer, } if (extobj == NULL) fatal("could not find parent extension for %s %s", - objtype, objname); + objtype, objname); appendPQExpBufferStr(upgrade_buffer, "\n-- For binary upgrade, handle extension membership the hard way\n"); @@ -4620,7 +4621,7 @@ getNamespaces(Archive *fout, int *numNamespaces) if (strlen(nsinfo[i].rolname) == 0) pg_log_warning("owner of schema \"%s\" appears to be invalid", - nsinfo[i].dobj.name); + nsinfo[i].dobj.name); } PQclear(res); @@ -4968,7 +4969,7 @@ getTypes(Archive *fout, int *numTypes) if (strlen(tyinfo[i].rolname) == 0) pg_log_warning("owner of data type \"%s\" appears to be invalid", - tyinfo[i].dobj.name); + tyinfo[i].dobj.name); } *numTypes = ntups; @@ -5053,7 +5054,7 @@ getOperators(Archive *fout, int *numOprs) if (strlen(oprinfo[i].rolname) == 0) pg_log_warning("owner of operator \"%s\" appears to be invalid", - oprinfo[i].dobj.name); + oprinfo[i].dobj.name); } PQclear(res); @@ -5355,7 +5356,7 @@ getOpclasses(Archive *fout, int *numOpclasses) if (strlen(opcinfo[i].rolname) == 0) pg_log_warning("owner of operator class \"%s\" appears to be invalid", - opcinfo[i].dobj.name); + opcinfo[i].dobj.name); } PQclear(res); @@ -5439,7 +5440,7 @@ getOpfamilies(Archive *fout, int *numOpfamilies) if (strlen(opfinfo[i].rolname) == 0) pg_log_warning("owner of operator family \"%s\" appears to be invalid", - opfinfo[i].dobj.name); + opfinfo[i].dobj.name); } PQclear(res); @@ -5608,7 +5609,7 @@ getAggregates(Archive *fout, int *numAggs) agginfo[i].aggfn.rolname = pg_strdup(PQgetvalue(res, i, i_rolname)); if (strlen(agginfo[i].aggfn.rolname) == 0) pg_log_warning("owner of aggregate function \"%s\" appears to be invalid", - agginfo[i].aggfn.dobj.name); + agginfo[i].aggfn.dobj.name); agginfo[i].aggfn.lang = InvalidOid; /* not currently interesting */ agginfo[i].aggfn.prorettype = InvalidOid; /* not saved */ agginfo[i].aggfn.proacl = pg_strdup(PQgetvalue(res, i, i_aggacl)); @@ -5868,7 +5869,7 @@ getFuncs(Archive *fout, int *numFuncs) if (strlen(finfo[i].rolname) == 0) pg_log_warning("owner of function \"%s\" appears to be invalid", - finfo[i].dobj.name); + finfo[i].dobj.name); } PQclear(res); @@ -6664,7 +6665,7 @@ getTables(Archive *fout, int *numTables) /* Emit notice if join for owner failed */ if (strlen(tblinfo[i].rolname) == 0) pg_log_warning("owner of table \"%s\" appears to be invalid", - tblinfo[i].dobj.name); + tblinfo[i].dobj.name); } if (dopt->lockWaitTimeout) @@ -6706,7 +6707,7 @@ getOwnedSeqs(Archive *fout, TableInfo tblinfo[], int numTables) owning_tab = findTableByOid(seqinfo->owning_tab); if (owning_tab == NULL) fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found", - seqinfo->owning_tab, seqinfo->dobj.catId.oid); + seqinfo->owning_tab, seqinfo->dobj.catId.oid); /* * Only dump identity sequences if we're going to dump the table that @@ -7470,7 +7471,7 @@ getRules(Archive *fout, int *numRules) ruleinfo[i].ruletable = findTableByOid(ruletableoid); if (ruleinfo[i].ruletable == NULL) fatal("failed sanity check, parent table with OID %u of pg_rewrite entry with OID %u not found", - ruletableoid, ruleinfo[i].dobj.catId.oid); + ruletableoid, ruleinfo[i].dobj.catId.oid); ruleinfo[i].dobj.namespace = ruleinfo[i].ruletable->dobj.namespace; ruleinfo[i].dobj.dump = ruleinfo[i].ruletable->dobj.dump; ruleinfo[i].ev_type = *(PQgetvalue(res, i, i_ev_type)); @@ -7686,9 +7687,9 @@ getTriggers(Archive *fout, TableInfo tblinfo[], int numTables) { if (PQgetisnull(res, j, i_tgconstrrelname)) fatal("query produced null referenced table name for foreign key trigger \"%s\" on table \"%s\" (OID of table: %u)", - tginfo[j].dobj.name, - tbinfo->dobj.name, - tginfo[j].tgconstrrelid); + tginfo[j].dobj.name, + tbinfo->dobj.name, + tginfo[j].tgconstrrelid); tginfo[j].tgconstrrelname = pg_strdup(PQgetvalue(res, j, i_tgconstrrelname)); } else @@ -8377,7 +8378,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables) { if (j + 1 != atoi(PQgetvalue(res, j, i_attnum))) fatal("invalid column numbering in table \"%s\"", - tbinfo->dobj.name); + tbinfo->dobj.name); tbinfo->attnames[j] = pg_strdup(PQgetvalue(res, j, i_attname)); tbinfo->atttypnames[j] = pg_strdup(PQgetvalue(res, j, i_atttypname)); tbinfo->atttypmod[j] = atoi(PQgetvalue(res, j, i_atttypmod)); @@ -8436,7 +8437,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables) if (adnum <= 0 || adnum > ntups) fatal("invalid adnum value %d for table \"%s\"", - adnum, tbinfo->dobj.name); + adnum, tbinfo->dobj.name); /* * dropped columns shouldn't have defaults, but just in case, @@ -8552,7 +8553,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables) pg_log_error(ngettext("expected %d check constraint on table \"%s\" but found %d", "expected %d check constraints on table \"%s\" but found %d", tbinfo->ncheck), - tbinfo->ncheck, tbinfo->dobj.name, numConstrs); + tbinfo->ncheck, tbinfo->dobj.name, numConstrs); pg_log_error("(The system catalogs might be corrupted.)"); exit_nicely(1); } @@ -10130,7 +10131,7 @@ dumpType(Archive *fout, TypeInfo *tyinfo) dumpUndefinedType(fout, tyinfo); else pg_log_warning("typtype of data type \"%s\" appears to be invalid", - tyinfo->dobj.name); + tyinfo->dobj.name); } /* @@ -11977,7 +11978,7 @@ dumpFunc(Archive *fout, FuncInfo *finfo) appendPQExpBufferStr(q, " STABLE"); else if (provolatile[0] != PROVOLATILE_VOLATILE) fatal("unrecognized provolatile value for function \"%s\"", - finfo->dobj.name); + finfo->dobj.name); } if (proisstrict[0] == 't') @@ -12027,7 +12028,7 @@ dumpFunc(Archive *fout, FuncInfo *finfo) appendPQExpBufferStr(q, " PARALLEL RESTRICTED"); else if (proparallel[0] != PROPARALLEL_UNSAFE) fatal("unrecognized proparallel value for function \"%s\"", - finfo->dobj.name); + finfo->dobj.name); } for (i = 0; i < nconfigitems; i++) @@ -12160,7 +12161,7 @@ dumpCast(Archive *fout, CastInfo *cast) funcInfo = findFuncByOid(cast->castfunc); if (funcInfo == NULL) fatal("could not find function definition for function with OID %u", - cast->castfunc); + cast->castfunc); } defqry = createPQExpBuffer(); @@ -12269,14 +12270,14 @@ dumpTransform(Archive *fout, TransformInfo *transform) fromsqlFuncInfo = findFuncByOid(transform->trffromsql); if (fromsqlFuncInfo == NULL) fatal("could not find function definition for function with OID %u", - transform->trffromsql); + transform->trffromsql); } if (OidIsValid(transform->trftosql)) { tosqlFuncInfo = findFuncByOid(transform->trftosql); if (tosqlFuncInfo == NULL) fatal("could not find function definition for function with OID %u", - transform->trftosql); + transform->trftosql); } defqry = createPQExpBuffer(); @@ -12649,7 +12650,7 @@ getFormattedOperatorName(Archive *fout, const char *oproid) if (oprInfo == NULL) { pg_log_warning("could not find operator with OID %s", - oproid); + oproid); return NULL; } @@ -12717,7 +12718,7 @@ dumpAccessMethod(Archive *fout, AccessMethodInfo *aminfo) break; default: pg_log_warning("invalid type \"%c\" of access method \"%s\"", - aminfo->amtype, qamname); + aminfo->amtype, qamname); destroyPQExpBuffer(q); destroyPQExpBuffer(delq); free(qamname); @@ -13471,7 +13472,7 @@ dumpCollation(Archive *fout, CollInfo *collinfo) appendPQExpBufferStr(q, "default"); else fatal("unrecognized collation provider: %s\n", - collprovider); + collprovider); if (strcmp(PQgetvalue(res, 0, i_collisdeterministic), "f") == 0) appendPQExpBufferStr(q, ", deterministic = false"); @@ -13943,7 +13944,7 @@ dumpAgg(Archive *fout, AggInfo *agginfo) if (!convertok) { pg_log_warning("aggregate function %s could not be dumped correctly for this database version; ignored", - aggsig); + aggsig); if (aggfullsig) free(aggfullsig); @@ -13998,7 +13999,7 @@ dumpAgg(Archive *fout, AggInfo *agginfo) break; default: fatal("unrecognized aggfinalmodify value for aggregate \"%s\"", - agginfo->aggfn.dobj.name); + agginfo->aggfn.dobj.name); break; } } @@ -14054,7 +14055,7 @@ dumpAgg(Archive *fout, AggInfo *agginfo) break; default: fatal("unrecognized aggmfinalmodify value for aggregate \"%s\"", - agginfo->aggfn.dobj.name); + agginfo->aggfn.dobj.name); break; } } @@ -14079,7 +14080,7 @@ dumpAgg(Archive *fout, AggInfo *agginfo) appendPQExpBufferStr(details, ",\n PARALLEL = restricted"); else if (proparallel[0] != PROPARALLEL_UNSAFE) fatal("unrecognized proparallel value for function \"%s\"", - agginfo->aggfn.dobj.name); + agginfo->aggfn.dobj.name); } appendPQExpBuffer(delq, "DROP AGGREGATE %s.%s;\n", @@ -14776,7 +14777,7 @@ dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo) default: /* shouldn't get here */ fatal("unrecognized object type in default privileges: %d", - (int) daclinfo->defaclobjtype); + (int) daclinfo->defaclobjtype); type = ""; /* keep compiler quiet */ } @@ -14794,7 +14795,7 @@ dumpDefaultACL(Archive *fout, DefaultACLInfo *daclinfo) fout->remoteVersion, q)) fatal("could not parse default ACL list (%s)", - daclinfo->defaclacl); + daclinfo->defaclacl); if (daclinfo->dobj.dump & DUMP_COMPONENT_ACL) ArchiveEntry(fout, daclinfo->dobj.catId, daclinfo->dobj.dumpId, @@ -14874,7 +14875,7 @@ dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId, initacls, initracls, owner, "", fout->remoteVersion, sql)) fatal("could not parse initial GRANT ACL list (%s) or initial REVOKE ACL list (%s) for object \"%s\" (%s)", - initacls, initracls, name, type); + initacls, initracls, name, type); appendPQExpBuffer(sql, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\n"); } @@ -14882,7 +14883,7 @@ dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId, acls, racls, owner, "", fout->remoteVersion, sql)) fatal("could not parse GRANT ACL list (%s) or REVOKE ACL list (%s) for object \"%s\" (%s)", - acls, racls, name, type); + acls, racls, name, type); if (sql->len > 0) { @@ -15381,17 +15382,17 @@ createViewAsClause(Archive *fout, TableInfo *tbinfo) { if (PQntuples(res) < 1) fatal("query to obtain definition of view \"%s\" returned no data", - tbinfo->dobj.name); + tbinfo->dobj.name); else fatal("query to obtain definition of view \"%s\" returned more than one definition", - tbinfo->dobj.name); + tbinfo->dobj.name); } len = PQgetlength(res, 0, 0); if (len == 0) fatal("definition of view \"%s\" appears to be empty (length zero)", - tbinfo->dobj.name); + tbinfo->dobj.name); /* Strip off the trailing semicolon so that other things may follow. */ Assert(PQgetvalue(res, 0, 0)[len - 1] == ';'); @@ -15473,7 +15474,7 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo) if (tbinfo->hasoids) pg_log_warning("WITH OIDS is not supported anymore (table \"%s\")", - qrelname); + qrelname); if (dopt->binary_upgrade) binary_upgrade_set_type_oids_by_rel_oid(fout, q, @@ -15600,7 +15601,7 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo) */ if (tbinfo->numParents != 1) fatal("invalid number of parents %d for table \"%s\"", - tbinfo->numParents, tbinfo->dobj.name); + tbinfo->numParents, tbinfo->dobj.name); appendPQExpBuffer(q, " PARTITION OF %s", fmtQualifiedDumpable(parentRel)); @@ -16157,7 +16158,7 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo) if (tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION) { - char *tableam = NULL; + char *tableam = NULL; if (tbinfo->relkind == RELKIND_RELATION || tbinfo->relkind == RELKIND_MATVIEW) @@ -16287,7 +16288,7 @@ getAttrName(int attrnum, TableInfo *tblInfo) return "tableoid"; } fatal("invalid column number %d for table \"%s\"", - attrnum, tblInfo->dobj.name); + attrnum, tblInfo->dobj.name); return NULL; /* keep compiler quiet */ } @@ -16549,7 +16550,7 @@ dumpConstraint(Archive *fout, ConstraintInfo *coninfo) if (indxinfo == NULL) fatal("missing index for constraint \"%s\"", - coninfo->dobj.name); + coninfo->dobj.name); if (dopt->binary_upgrade) binary_upgrade_set_pg_class_oids(fout, q, @@ -16769,7 +16770,7 @@ dumpConstraint(Archive *fout, ConstraintInfo *coninfo) else { fatal("unrecognized constraint type: %c", - coninfo->contype); + coninfo->contype); } /* Dump Constraint Comments --- only works for table constraints */ @@ -16902,8 +16903,8 @@ dumpSequence(Archive *fout, TableInfo *tbinfo) { pg_log_error(ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)", "query to get data of sequence \"%s\" returned %d rows (expected 1)", - PQntuples(res)), - tbinfo->dobj.name, PQntuples(res)); + PQntuples(res)), + tbinfo->dobj.name, PQntuples(res)); exit_nicely(1); } @@ -17056,7 +17057,7 @@ dumpSequence(Archive *fout, TableInfo *tbinfo) if (owning_tab == NULL) fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found", - tbinfo->owning_tab, tbinfo->dobj.catId.oid); + tbinfo->owning_tab, tbinfo->dobj.catId.oid); if (owning_tab->dobj.dump & DUMP_COMPONENT_DEFINITION) { @@ -17122,8 +17123,8 @@ dumpSequenceData(Archive *fout, TableDataInfo *tdinfo) { pg_log_error(ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)", "query to get data of sequence \"%s\" returned %d rows (expected 1)", - PQntuples(res)), - tbinfo->dobj.name, PQntuples(res)); + PQntuples(res)), + tbinfo->dobj.name, PQntuples(res)); exit_nicely(1); } @@ -17291,9 +17292,9 @@ dumpTrigger(Archive *fout, TriggerInfo *tginfo) { /* hm, not found before end of bytea value... */ pg_log_error("invalid argument string (%s) for trigger \"%s\" on table \"%s\"", - tginfo->tgargs, - tginfo->dobj.name, - tbinfo->dobj.name); + tginfo->tgargs, + tginfo->dobj.name, + tbinfo->dobj.name); exit_nicely(1); } @@ -17520,7 +17521,7 @@ dumpRule(Archive *fout, RuleInfo *rinfo) if (PQntuples(res) != 1) { pg_log_error("query to get rule \"%s\" for table \"%s\" failed: wrong number of rows returned", - rinfo->dobj.name, tbinfo->dobj.name); + rinfo->dobj.name, tbinfo->dobj.name); exit_nicely(1); } @@ -17949,7 +17950,7 @@ getDependencies(Archive *fout) { #ifdef NOT_USED pg_log_warning("no referencing object %u %u", - objId.tableoid, objId.oid); + objId.tableoid, objId.oid); #endif continue; } @@ -17960,7 +17961,7 @@ getDependencies(Archive *fout) { #ifdef NOT_USED pg_log_warning("no referenced object %u %u", - refobjId.tableoid, refobjId.oid); + refobjId.tableoid, refobjId.oid); #endif continue; } diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c index 102731ea0c..e8fe5b596a 100644 --- a/src/bin/pg_dump/pg_dumpall.c +++ b/src/bin/pg_dump/pg_dumpall.c @@ -54,7 +54,7 @@ static char *constructConnStr(const char **keywords, const char **values); static PGresult *executeQuery(PGconn *conn, const char *query); static void executeCommand(PGconn *conn, const char *query); static void expand_dbname_patterns(PGconn *conn, SimpleStringList *patterns, - SimpleStringList *names); + SimpleStringList *names); static char pg_dump_bin[MAXPGPATH]; static const char *progname; @@ -1406,8 +1406,8 @@ expand_dbname_patterns(PGconn *conn, /* * The loop below runs multiple SELECTs, which might sometimes result in - * duplicate entries in the name list, but we don't care, since all - * we're going to do is test membership of the list. + * duplicate entries in the name list, but we don't care, since all we're + * going to do is test membership of the list. */ for (SimpleStringListCell *cell = patterns->head; cell; cell = cell->next) diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c index 8a3fad3d16..f9b1ae6809 100644 --- a/src/bin/pg_dump/pg_restore.c +++ b/src/bin/pg_dump/pg_restore.c @@ -410,7 +410,7 @@ main(int argc, char **argv) default: pg_log_error("unrecognized archive format \"%s\"; please specify \"c\", \"d\", or \"t\"", - opts->formatName); + opts->formatName); exit_nicely(1); } } diff --git a/src/bin/pg_rewind/filemap.c b/src/bin/pg_rewind/filemap.c index 813eadcb01..3c7ef90013 100644 --- a/src/bin/pg_rewind/filemap.c +++ b/src/bin/pg_rewind/filemap.c @@ -508,10 +508,10 @@ check_file_excluded(const char *path, bool is_source) { if (is_source) pg_log_debug("entry \"%s\" excluded from source file list", - path); + path); else pg_log_debug("entry \"%s\" excluded from target file list", - path); + path); return true; } } @@ -528,10 +528,10 @@ check_file_excluded(const char *path, bool is_source) { if (is_source) pg_log_debug("entry \"%s\" excluded from source file list", - path); + path); else pg_log_debug("entry \"%s\" excluded from target file list", - path); + path); return true; } } @@ -659,7 +659,7 @@ print_filemap(void) entry->pagemap.bitmapsize > 0) { pg_log_debug("%s (%s)", entry->path, - action_to_str(entry->action)); + action_to_str(entry->action)); if (entry->pagemap.bitmapsize > 0) datapagemap_print(&entry->pagemap); diff --git a/src/bin/pg_rewind/libpq_fetch.c b/src/bin/pg_rewind/libpq_fetch.c index b6fa7e5b09..d6cbe23926 100644 --- a/src/bin/pg_rewind/libpq_fetch.c +++ b/src/bin/pg_rewind/libpq_fetch.c @@ -320,7 +320,7 @@ receiveFileChunks(const char *sql) if (PQgetisnull(res, 0, 2)) { pg_log_debug("received null value for chunk for file \"%s\", file has been deleted", - filename); + filename); remove_target_file(filename, true); pg_free(filename); PQclear(res); @@ -333,7 +333,7 @@ receiveFileChunks(const char *sql) */ snprintf(chunkoff_str, sizeof(chunkoff_str), INT64_FORMAT, chunkoff); pg_log_debug("received chunk for file \"%s\", offset %s, size %d", - filename, chunkoff_str, chunksize); + filename, chunkoff_str, chunksize); open_target_file(filename, false); diff --git a/src/bin/pg_rewind/parsexlog.c b/src/bin/pg_rewind/parsexlog.c index 65e523f5d4..b31071bc09 100644 --- a/src/bin/pg_rewind/parsexlog.c +++ b/src/bin/pg_rewind/parsexlog.c @@ -315,7 +315,7 @@ SimpleXLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, pg_log_error("could not read file \"%s\": %m", xlogfpath); else pg_log_error("could not read file \"%s\": read %d of %zu", - xlogfpath, r, (Size) XLOG_BLCKSZ); + xlogfpath, r, (Size) XLOG_BLCKSZ); return -1; } diff --git a/src/bin/pg_rewind/pg_rewind.c b/src/bin/pg_rewind/pg_rewind.c index d47b5f9648..6cd3917628 100644 --- a/src/bin/pg_rewind/pg_rewind.c +++ b/src/bin/pg_rewind/pg_rewind.c @@ -52,7 +52,7 @@ char *datadir_target = NULL; char *datadir_source = NULL; char *connstr_source = NULL; -static bool debug = false; +static bool debug = false; bool showprogress = false; bool dry_run = false; bool do_sync = true; @@ -260,8 +260,8 @@ main(int argc, char **argv) { findCommonAncestorTimeline(&divergerec, &lastcommontliIndex); pg_log_info("servers diverged at WAL location %X/%X on timeline %u", - (uint32) (divergerec >> 32), (uint32) divergerec, - targetHistory[lastcommontliIndex].tli); + (uint32) (divergerec >> 32), (uint32) divergerec, + targetHistory[lastcommontliIndex].tli); /* * Check for the possibility that the target is in fact a direct @@ -304,8 +304,8 @@ main(int argc, char **argv) lastcommontliIndex, &chkptrec, &chkpttli, &chkptredo); pg_log_info("rewinding from last common checkpoint at %X/%X on timeline %u", - (uint32) (chkptrec >> 32), (uint32) chkptrec, - chkpttli); + (uint32) (chkptrec >> 32), (uint32) chkptrec, + chkpttli); /* * Build the filemap, by comparing the source and target data directories. @@ -344,8 +344,8 @@ main(int argc, char **argv) if (showprogress) { pg_log_info("need to copy %lu MB (total source directory size is %lu MB)", - (unsigned long) (filemap->fetch_size / (1024 * 1024)), - (unsigned long) (filemap->total_size / (1024 * 1024))); + (unsigned long) (filemap->fetch_size / (1024 * 1024)), + (unsigned long) (filemap->total_size / (1024 * 1024))); fetch_size = filemap->fetch_size; fetch_done = 0; @@ -495,8 +495,8 @@ progress_report(bool force) fetch_size / 1024); fprintf(stderr, _("%*s/%s kB (%d%%) copied"), - (int) strlen(fetch_size_str), fetch_done_str, fetch_size_str, - percent); + (int) strlen(fetch_size_str), fetch_done_str, fetch_size_str, + percent); if (isatty(fileno(stderr))) fprintf(stderr, "\r"); else @@ -581,8 +581,8 @@ getTimelineHistory(ControlFileData *controlFile, int *nentries) entry = &history[i]; pg_log_debug("%d: %X/%X - %X/%X", entry->tli, - (uint32) (entry->begin >> 32), (uint32) (entry->begin), - (uint32) (entry->end >> 32), (uint32) (entry->end)); + (uint32) (entry->begin >> 32), (uint32) (entry->begin), + (uint32) (entry->end >> 32), (uint32) (entry->end)); } } diff --git a/src/bin/pg_upgrade/controldata.c b/src/bin/pg_upgrade/controldata.c index bbeba673d4..6788f882a8 100644 --- a/src/bin/pg_upgrade/controldata.c +++ b/src/bin/pg_upgrade/controldata.c @@ -137,14 +137,15 @@ get_control_data(ClusterInfo *cluster, bool live_check) if (p == NULL || strlen(p) <= 1) pg_fatal("%d: database cluster state problem\n", __LINE__); - p++; /* remove ':' char */ + p++; /* remove ':' char */ /* - * We checked earlier for a postmaster lock file, and if we found - * one, we tried to start/stop the server to replay the WAL. However, - * pg_ctl -m immediate doesn't leave a lock file, but does require - * WAL replay, so we check here that the server was shut down cleanly, - * from the controldata perspective. + * We checked earlier for a postmaster lock file, and if we + * found one, we tried to start/stop the server to replay the + * WAL. However, pg_ctl -m immediate doesn't leave a lock + * file, but does require WAL replay, so we check here that + * the server was shut down cleanly, from the controldata + * perspective. */ /* remove leading spaces */ while (*p == ' ') diff --git a/src/bin/pg_upgrade/function.c b/src/bin/pg_upgrade/function.c index 2a7df78f80..0c66d1c056 100644 --- a/src/bin/pg_upgrade/function.c +++ b/src/bin/pg_upgrade/function.c @@ -41,7 +41,7 @@ library_name_compare(const void *p1, const void *p2) return cmp; else return ((const LibraryInfo *) p1)->dbnum - - ((const LibraryInfo *) p2)->dbnum; + ((const LibraryInfo *) p2)->dbnum; } @@ -213,16 +213,16 @@ check_loadable_libraries(void) { /* * In Postgres 9.0, Python 3 support was added, and to do that, a - * plpython2u language was created with library name plpython2.so as a - * symbolic link to plpython.so. In Postgres 9.1, only the - * plpython2.so library was created, and both plpythonu and plpython2u - * pointing to it. For this reason, any reference to library name - * "plpython" in an old PG <= 9.1 cluster must look for "plpython2" in - * the new cluster. + * plpython2u language was created with library name plpython2.so + * as a symbolic link to plpython.so. In Postgres 9.1, only the + * plpython2.so library was created, and both plpythonu and + * plpython2u pointing to it. For this reason, any reference to + * library name "plpython" in an old PG <= 9.1 cluster must look + * for "plpython2" in the new cluster. * - * For this case, we could check pg_pltemplate, but that only works - * for languages, and does not help with function shared objects, so - * we just do a general fix. + * For this case, we could check pg_pltemplate, but that only + * works for languages, and does not help with function shared + * objects, so we just do a general fix. */ if (GET_MAJOR_VERSION(old_cluster.major_version) < 901 && strcmp(lib, "$libdir/plpython") == 0) @@ -257,7 +257,7 @@ check_loadable_libraries(void) if (was_load_failure) fprintf(script, _("Database: %s\n"), - old_cluster.dbarr.dbs[os_info.libraries[libnum].dbnum].db_name); + old_cluster.dbarr.dbs[os_info.libraries[libnum].dbnum].db_name); } PQfinish(conn); diff --git a/src/bin/pg_waldump/pg_waldump.c b/src/bin/pg_waldump/pg_waldump.c index dab9525c68..c40014d483 100644 --- a/src/bin/pg_waldump/pg_waldump.c +++ b/src/bin/pg_waldump/pg_waldump.c @@ -1031,9 +1031,9 @@ main(int argc, char **argv) else if (!XLByteInSeg(private.startptr, segno, WalSegSz)) { pg_log_error("start WAL location %X/%X is not inside file \"%s\"", - (uint32) (private.startptr >> 32), - (uint32) private.startptr, - fname); + (uint32) (private.startptr >> 32), + (uint32) private.startptr, + fname); goto bad_argument; } @@ -1074,9 +1074,9 @@ main(int argc, char **argv) private.endptr != (segno + 1) * WalSegSz) { pg_log_error("end WAL location %X/%X is not inside file \"%s\"", - (uint32) (private.endptr >> 32), - (uint32) private.endptr, - argv[argc - 1]); + (uint32) (private.endptr >> 32), + (uint32) private.endptr, + argv[argc - 1]); goto bad_argument; } } diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c index a81383eb57..e99ab1e07f 100644 --- a/src/bin/pgbench/pgbench.c +++ b/src/bin/pgbench/pgbench.c @@ -217,7 +217,7 @@ bool progress_timestamp = false; /* progress report with Unix time */ int nclients = 1; /* number of clients */ int nthreads = 1; /* number of threads */ bool is_connect; /* establish connection for each transaction */ -bool report_per_command; /* report per-command latencies */ +bool report_per_command; /* report per-command latencies */ int main_pid; /* main process id used in log filename */ char *pghost = ""; @@ -422,11 +422,11 @@ typedef struct /* * Separate randomness for each thread. Each thread option uses its own - * random state to make all of them independent of each other and therefore - * deterministic at the thread level. + * random state to make all of them independent of each other and + * therefore deterministic at the thread level. */ RandomState ts_choose_rs; /* random state for selecting a script */ - RandomState ts_throttle_rs; /* random state for transaction throttling */ + RandomState ts_throttle_rs; /* random state for transaction throttling */ RandomState ts_sample_rs; /* random state for log sampling */ int64 throttle_trigger; /* previous/next throttling (us) */ @@ -777,7 +777,7 @@ invalid_syntax: bool strtodouble(const char *str, bool errorOK, double *dv) { - char *end; + char *end; errno = 0; *dv = strtod(str, &end); @@ -1322,7 +1322,7 @@ makeVariableValue(Variable *var) else if (is_an_int(var->svalue)) { /* if it looks like an int, it must be an int without overflow */ - int64 iv; + int64 iv; if (!strtoint64(var->svalue, false, &iv)) return false; @@ -2725,7 +2725,7 @@ readCommandResponse(CState *st, char *varprefix) while (res != NULL) { - bool is_last; + bool is_last; /* peek at the next result to know whether the current is last */ next_res = PQgetResult(st->con); diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c index a2c0ec0b7f..df3824f689 100644 --- a/src/bin/psql/command.c +++ b/src/bin/psql/command.c @@ -291,7 +291,7 @@ exec_command(const char *cmd, !is_branching_command(cmd)) { pg_log_warning("\\%s command ignored; use \\endif or Ctrl-C to exit current \\if block", - cmd); + cmd); } if (strcmp(cmd, "a") == 0) @@ -551,8 +551,8 @@ exec_command_cd(PsqlScanState scan_state, bool active_branch, const char *cmd) if (!pw) { pg_log_error("could not get home directory for user ID %ld: %s", - (long) user_id, - errno ? strerror(errno) : _("user does not exist")); + (long) user_id, + errno ? strerror(errno) : _("user does not exist")); exit(EXIT_FAILURE); } dir = pw->pw_dir; @@ -1015,10 +1015,10 @@ exec_command_ef_ev(PsqlScanState scan_state, bool active_branch, sverbuf, sizeof(sverbuf)); if (is_func) pg_log_error("The server (version %s) does not support editing function source.", - sverbuf); + sverbuf); else pg_log_error("The server (version %s) does not support editing view definitions.", - sverbuf); + sverbuf); status = PSQL_CMD_ERROR; } else if (!query_buf) @@ -1933,7 +1933,7 @@ exec_command_prompt(PsqlScanState scan_state, bool active_branch, if (!result) { pg_log_error("\\%s: could not read value for variable", - cmd); + cmd); success = false; } } @@ -2145,7 +2145,7 @@ exec_command_setenv(PsqlScanState scan_state, bool active_branch, else if (strchr(envvar, '=') != NULL) { pg_log_error("\\%s: environment variable name must not contain \"=\"", - cmd); + cmd); success = false; } else if (!envval) @@ -2206,10 +2206,10 @@ exec_command_sf_sv(PsqlScanState scan_state, bool active_branch, sverbuf, sizeof(sverbuf)); if (is_func) pg_log_error("The server (version %s) does not support showing function source.", - sverbuf); + sverbuf); else pg_log_error("The server (version %s) does not support showing view definitions.", - sverbuf); + sverbuf); status = PSQL_CMD_ERROR; } else if (!obj_desc) @@ -3441,7 +3441,7 @@ do_edit(const char *filename_arg, PQExpBuffer query_buf, if (ret == 0 || ret > MAXPGPATH) { pg_log_error("could not locate temporary directory: %s", - !ret ? strerror(errno) : ""); + !ret ? strerror(errno) : ""); return false; } @@ -3761,8 +3761,8 @@ do_pset(const char *param, const char *value, printQueryOpt *popt, bool quiet) else { pg_log_error("\\pset: ambiguous abbreviation \"%s\" matches both \"%s\" and \"%s\"", - value, - formats[match_pos].name, formats[i].name); + value, + formats[match_pos].name, formats[i].name); return false; } } @@ -4694,7 +4694,7 @@ get_create_object_cmd(EditableObjectType obj_type, Oid oid, break; default: pg_log_error("\"%s.%s\" is not a view", - nspname, relname); + nspname, relname); result = false; break; } diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c index 9579e10630..44a782478d 100644 --- a/src/bin/psql/common.c +++ b/src/bin/psql/common.c @@ -199,7 +199,7 @@ psql_get_variable(const char *varname, PsqlScanQuoteType quote, if (!appendShellStringNoError(&buf, value)) { pg_log_error("shell command argument contains a newline or carriage return: \"%s\"", - value); + value); free(buf.data); return NULL; } @@ -509,7 +509,7 @@ AcceptResult(const PGresult *result) default: OK = false; pg_log_error("unexpected PQresultStatus: %d", - PQresultStatus(result)); + PQresultStatus(result)); break; } @@ -1278,7 +1278,7 @@ PrintQueryResults(PGresult *results) default: success = false; pg_log_error("unexpected PQresultStatus: %d", - PQresultStatus(results)); + PQresultStatus(results)); break; } @@ -1378,8 +1378,8 @@ SendQuery(const char *query) char sverbuf[32]; pg_log_warning("The server (version %s) does not support savepoints for ON_ERROR_ROLLBACK.", - formatPGVersionNumber(pset.sversion, false, - sverbuf, sizeof(sverbuf))); + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); on_error_rollback_warning = true; } else @@ -1484,7 +1484,7 @@ SendQuery(const char *query) /* PQTRANS_UNKNOWN is expected given a broken connection. */ if (transaction_status != PQTRANS_UNKNOWN || ConnectionUp()) pg_log_error("unexpected transaction status (%d)", - transaction_status); + transaction_status); break; } diff --git a/src/bin/psql/copy.c b/src/bin/psql/copy.c index b02177a5c2..f9e53d6295 100644 --- a/src/bin/psql/copy.c +++ b/src/bin/psql/copy.c @@ -390,7 +390,7 @@ do_copy(const char *args) char *reason = wait_result_to_str(pclose_rc); pg_log_error("%s: %s", options->file, - reason ? reason : ""); + reason ? reason : ""); if (reason) free(reason); } diff --git a/src/bin/psql/crosstabview.c b/src/bin/psql/crosstabview.c index 390f750c41..184ebe6d21 100644 --- a/src/bin/psql/crosstabview.c +++ b/src/bin/psql/crosstabview.c @@ -227,7 +227,7 @@ PrintResultsInCrosstab(const PGresult *res) if (piv_columns.count > CROSSTABVIEW_MAX_COLUMNS) { pg_log_error("\\crosstabview: maximum number of columns (%d) exceeded", - CROSSTABVIEW_MAX_COLUMNS); + CROSSTABVIEW_MAX_COLUMNS); goto error_return; } @@ -396,10 +396,10 @@ printCrosstab(const PGresult *results, if (cont.cells[idx] != NULL) { pg_log_error("\\crosstabview: query result contains multiple data values for row \"%s\", column \"%s\"", - rp->name ? rp->name : - (popt.nullPrint ? popt.nullPrint : "(null)"), - cp->name ? cp->name : - (popt.nullPrint ? popt.nullPrint : "(null)")); + rp->name ? rp->name : + (popt.nullPrint ? popt.nullPrint : "(null)"), + cp->name ? cp->name : + (popt.nullPrint ? popt.nullPrint : "(null)")); goto error; } @@ -644,7 +644,7 @@ indexOfColumn(char *arg, const PGresult *res) if (idx < 0 || idx >= PQnfields(res)) { pg_log_error("\\crosstabview: column number %d is out of range 1..%d", - idx + 1, PQnfields(res)); + idx + 1, PQnfields(res)); return -1; } } diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c index 773107227d..3dc5447f1a 100644 --- a/src/bin/psql/describe.c +++ b/src/bin/psql/describe.c @@ -160,8 +160,8 @@ describeAccessMethods(const char *pattern, bool verbose) char sverbuf[32]; pg_log_error("The server (version %s) does not support access methods.", - formatPGVersionNumber(pset.sversion, false, - sverbuf, sizeof(sverbuf))); + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -229,8 +229,8 @@ describeTablespaces(const char *pattern, bool verbose) char sverbuf[32]; pg_log_info("The server (version %s) does not support tablespaces.", - formatPGVersionNumber(pset.sversion, false, - sverbuf, sizeof(sverbuf))); + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -340,9 +340,9 @@ describeFunctions(const char *functypes, const char *pattern, bool verbose, bool char sverbuf[32]; pg_log_error("\\df does not take a \"%c\" option with server version %s", - 'p', - formatPGVersionNumber(pset.sversion, false, - sverbuf, sizeof(sverbuf))); + 'p', + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -351,9 +351,9 @@ describeFunctions(const char *functypes, const char *pattern, bool verbose, bool char sverbuf[32]; pg_log_error("\\df does not take a \"%c\" option with server version %s", - 'w', - formatPGVersionNumber(pset.sversion, false, - sverbuf, sizeof(sverbuf))); + 'w', + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -1100,8 +1100,8 @@ listDefaultACLs(const char *pattern) char sverbuf[32]; pg_log_error("The server (version %s) does not support altering default privileges.", - formatPGVersionNumber(pset.sversion, false, - sverbuf, sizeof(sverbuf))); + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -1401,7 +1401,7 @@ describeTableDetails(const char *pattern, bool verbose, bool showSystem) { if (pattern) pg_log_error("Did not find any relation named \"%s\".", - pattern); + pattern); else pg_log_error("Did not find any relations."); } @@ -3548,8 +3548,8 @@ listDbRoleSettings(const char *pattern, const char *pattern2) char sverbuf[32]; pg_log_error("The server (version %s) does not support per-database role settings.", - formatPGVersionNumber(pset.sversion, false, - sverbuf, sizeof(sverbuf))); + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -3584,10 +3584,10 @@ listDbRoleSettings(const char *pattern, const char *pattern2) { if (pattern && pattern2) pg_log_error("Did not find any settings for role \"%s\" and database \"%s\".", - pattern, pattern2); + pattern, pattern2); else if (pattern) pg_log_error("Did not find any settings for role \"%s\".", - pattern); + pattern); else pg_log_error("Did not find any settings."); } @@ -3760,7 +3760,7 @@ listTables(const char *tabtypes, const char *pattern, bool verbose, bool showSys { if (pattern) pg_log_error("Did not find any relation named \"%s\".", - pattern); + pattern); else pg_log_error("Did not find any relations."); } @@ -3804,7 +3804,7 @@ listPartitionedTables(const char *reltypes, const char *pattern, bool verbose) PQExpBufferData title; PGresult *res; printQueryOpt myopt = pset.popt; - bool translate_columns[] = {false, false, false, false, false, false, false, false, false}; + bool translate_columns[] = {false, false, false, false, false, false, false, false, false}; const char *tabletitle; bool mixed_output = false; @@ -4432,8 +4432,8 @@ listCollations(const char *pattern, bool verbose, bool showSystem) char sverbuf[32]; pg_log_error("The server (version %s) does not support collations.", - formatPGVersionNumber(pset.sversion, false, - sverbuf, sizeof(sverbuf))); + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -4587,8 +4587,8 @@ listTSParsers(const char *pattern, bool verbose) char sverbuf[32]; pg_log_error("The server (version %s) does not support full text search.", - formatPGVersionNumber(pset.sversion, false, - sverbuf, sizeof(sverbuf))); + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -4667,7 +4667,7 @@ listTSParsersVerbose(const char *pattern) { if (pattern) pg_log_error("Did not find any text search parser named \"%s\".", - pattern); + pattern); else pg_log_error("Did not find any text search parsers."); } @@ -4834,8 +4834,8 @@ listTSDictionaries(const char *pattern, bool verbose) char sverbuf[32]; pg_log_error("The server (version %s) does not support full text search.", - formatPGVersionNumber(pset.sversion, false, - sverbuf, sizeof(sverbuf))); + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -4905,8 +4905,8 @@ listTSTemplates(const char *pattern, bool verbose) char sverbuf[32]; pg_log_error("The server (version %s) does not support full text search.", - formatPGVersionNumber(pset.sversion, false, - sverbuf, sizeof(sverbuf))); + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -4976,8 +4976,8 @@ listTSConfigs(const char *pattern, bool verbose) char sverbuf[32]; pg_log_error("The server (version %s) does not support full text search.", - formatPGVersionNumber(pset.sversion, false, - sverbuf, sizeof(sverbuf))); + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -5057,7 +5057,7 @@ listTSConfigsVerbose(const char *pattern) { if (pattern) pg_log_error("Did not find any text search configuration named \"%s\".", - pattern); + pattern); else pg_log_error("Did not find any text search configurations."); } @@ -5182,8 +5182,8 @@ listForeignDataWrappers(const char *pattern, bool verbose) char sverbuf[32]; pg_log_error("The server (version %s) does not support foreign-data wrappers.", - formatPGVersionNumber(pset.sversion, false, - sverbuf, sizeof(sverbuf))); + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -5265,8 +5265,8 @@ listForeignServers(const char *pattern, bool verbose) char sverbuf[32]; pg_log_error("The server (version %s) does not support foreign servers.", - formatPGVersionNumber(pset.sversion, false, - sverbuf, sizeof(sverbuf))); + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -5347,8 +5347,8 @@ listUserMappings(const char *pattern, bool verbose) char sverbuf[32]; pg_log_error("The server (version %s) does not support user mappings.", - formatPGVersionNumber(pset.sversion, false, - sverbuf, sizeof(sverbuf))); + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -5408,8 +5408,8 @@ listForeignTables(const char *pattern, bool verbose) char sverbuf[32]; pg_log_error("The server (version %s) does not support foreign tables.", - formatPGVersionNumber(pset.sversion, false, - sverbuf, sizeof(sverbuf))); + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -5486,8 +5486,8 @@ listExtensions(const char *pattern) char sverbuf[32]; pg_log_error("The server (version %s) does not support extensions.", - formatPGVersionNumber(pset.sversion, false, - sverbuf, sizeof(sverbuf))); + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -5543,8 +5543,8 @@ listExtensionContents(const char *pattern) char sverbuf[32]; pg_log_error("The server (version %s) does not support extensions.", - formatPGVersionNumber(pset.sversion, false, - sverbuf, sizeof(sverbuf))); + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -5571,7 +5571,7 @@ listExtensionContents(const char *pattern) { if (pattern) pg_log_error("Did not find any extension named \"%s\".", - pattern); + pattern); else pg_log_error("Did not find any extensions."); } @@ -5657,8 +5657,8 @@ listPublications(const char *pattern) char sverbuf[32]; pg_log_error("The server (version %s) does not support publications.", - formatPGVersionNumber(pset.sversion, false, - sverbuf, sizeof(sverbuf))); + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -5728,8 +5728,8 @@ describePublications(const char *pattern) char sverbuf[32]; pg_log_error("The server (version %s) does not support publications.", - formatPGVersionNumber(pset.sversion, false, - sverbuf, sizeof(sverbuf))); + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } @@ -5766,7 +5766,7 @@ describePublications(const char *pattern) { if (pattern) pg_log_error("Did not find any publication named \"%s\".", - pattern); + pattern); else pg_log_error("Did not find any publications."); } @@ -5884,8 +5884,8 @@ describeSubscriptions(const char *pattern, bool verbose) char sverbuf[32]; pg_log_error("The server (version %s) does not support subscriptions.", - formatPGVersionNumber(pset.sversion, false, - sverbuf, sizeof(sverbuf))); + formatPGVersionNumber(pset.sversion, false, + sverbuf, sizeof(sverbuf))); return true; } diff --git a/src/bin/psql/startup.c b/src/bin/psql/startup.c index 7c5b45f7cc..dc6dcbba01 100644 --- a/src/bin/psql/startup.c +++ b/src/bin/psql/startup.c @@ -104,7 +104,7 @@ log_locus_callback(const char **filename, uint64 *lineno) if (pset.inputfile) { *filename = pset.inputfile; - *lineno = pset.lineno; + *lineno = pset.lineno; } else { diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c index d77aa2936d..04ce3e722f 100644 --- a/src/bin/psql/tab-complete.c +++ b/src/bin/psql/tab-complete.c @@ -485,7 +485,7 @@ static const SchemaQuery Query_for_list_of_relations = { static const SchemaQuery Query_for_list_of_partitioned_relations = { .catname = "pg_catalog.pg_class c", .selcondition = "c.relkind IN (" CppAsString2(RELKIND_PARTITIONED_TABLE) - ", " CppAsString2(RELKIND_PARTITIONED_INDEX) ")", + ", " CppAsString2(RELKIND_PARTITIONED_INDEX) ")", .viscondition = "pg_catalog.pg_table_is_visible(c.oid)", .namespace = "c.relnamespace", .result = "pg_catalog.quote_ident(c.relname)", @@ -4361,7 +4361,7 @@ exec_query(const char *query) { #ifdef NOT_USED pg_log_error("tab completion query failed: %s\nQuery was:\n%s", - PQerrorMessage(pset.db), query); + PQerrorMessage(pset.db), query); #endif PQclear(result); result = NULL; diff --git a/src/bin/psql/variables.c b/src/bin/psql/variables.c index e456b9db2f..1d2a31cd65 100644 --- a/src/bin/psql/variables.c +++ b/src/bin/psql/variables.c @@ -139,7 +139,7 @@ ParseVariableBool(const char *value, const char *name, bool *result) /* string is not recognized; don't clobber *result */ if (name) pg_log_error("unrecognized value \"%s\" for \"%s\": Boolean expected", - value, name); + value, name); valid = false; } return valid; @@ -176,7 +176,7 @@ ParseVariableNum(const char *value, const char *name, int *result) /* string is not recognized; don't clobber *result */ if (name) pg_log_error("invalid value \"%s\" for \"%s\": integer expected", - value, name); + value, name); return false; } } @@ -394,5 +394,5 @@ PsqlVarEnumError(const char *name, const char *value, const char *suggestions) { pg_log_error("unrecognized value \"%s\" for \"%s\"\n" "Available values are: %s.", - value, name, suggestions); + value, name, suggestions); } |
