summaryrefslogtreecommitdiff
path: root/src/backend/access/nbtree
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access/nbtree')
-rw-r--r--src/backend/access/nbtree/nbtinsert.c52
-rw-r--r--src/backend/access/nbtree/nbtpage.c20
-rw-r--r--src/backend/access/nbtree/nbtree.c54
-rw-r--r--src/backend/access/nbtree/nbtsort.c17
-rw-r--r--src/backend/access/nbtree/nbtutils.c18
-rw-r--r--src/backend/access/nbtree/nbtxlog.c52
6 files changed, 111 insertions, 102 deletions
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 86c8698f69..de9bd95f88 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.176 2010/01/02 16:57:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.177 2010/02/26 02:00:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -88,7 +88,7 @@ static void _bt_vacuum_one_page(Relation rel, Buffer buffer);
* and btinsert. By here, itup is filled in, including the TID.
*
* If checkUnique is UNIQUE_CHECK_NO or UNIQUE_CHECK_PARTIAL, this
- * will allow duplicates. Otherwise (UNIQUE_CHECK_YES or
+ * will allow duplicates. Otherwise (UNIQUE_CHECK_YES or
* UNIQUE_CHECK_EXISTING) it will throw error for a duplicate.
* For UNIQUE_CHECK_EXISTING we merely run the duplicate check, and
* don't actually insert.
@@ -149,9 +149,9 @@ top:
* If we must wait for another xact, we release the lock while waiting,
* and then must start over completely.
*
- * For a partial uniqueness check, we don't wait for the other xact.
- * Just let the tuple in and return false for possibly non-unique,
- * or true for definitely unique.
+ * For a partial uniqueness check, we don't wait for the other xact. Just
+ * let the tuple in and return false for possibly non-unique, or true for
+ * definitely unique.
*/
if (checkUnique != UNIQUE_CHECK_NO)
{
@@ -281,7 +281,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
/*
* If we are doing a recheck, we expect to find the tuple we
- * are rechecking. It's not a duplicate, but we have to keep
+ * are rechecking. It's not a duplicate, but we have to keep
* scanning.
*/
if (checkUnique == UNIQUE_CHECK_EXISTING &&
@@ -302,10 +302,10 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
/*
* It is a duplicate. If we are only doing a partial
- * check, then don't bother checking if the tuple is
- * being updated in another transaction. Just return
- * the fact that it is a potential conflict and leave
- * the full check till later.
+ * check, then don't bother checking if the tuple is being
+ * updated in another transaction. Just return the fact
+ * that it is a potential conflict and leave the full
+ * check till later.
*/
if (checkUnique == UNIQUE_CHECK_PARTIAL)
{
@@ -362,20 +362,20 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
}
/*
- * This is a definite conflict. Break the tuple down
- * into datums and report the error. But first, make
- * sure we release the buffer locks we're holding ---
+ * This is a definite conflict. Break the tuple down into
+ * datums and report the error. But first, make sure we
+ * release the buffer locks we're holding ---
* BuildIndexValueDescription could make catalog accesses,
- * which in the worst case might touch this same index
- * and cause deadlocks.
+ * which in the worst case might touch this same index and
+ * cause deadlocks.
*/
if (nbuf != InvalidBuffer)
_bt_relbuf(rel, nbuf);
_bt_relbuf(rel, buf);
{
- Datum values[INDEX_MAX_KEYS];
- bool isnull[INDEX_MAX_KEYS];
+ Datum values[INDEX_MAX_KEYS];
+ bool isnull[INDEX_MAX_KEYS];
index_deform_tuple(itup, RelationGetDescr(rel),
values, isnull);
@@ -385,7 +385,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
RelationGetRelationName(rel)),
errdetail("Key %s already exists.",
BuildIndexValueDescription(rel,
- values, isnull))));
+ values, isnull))));
}
}
else if (all_dead)
@@ -438,16 +438,16 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
}
/*
- * If we are doing a recheck then we should have found the tuple we
- * are checking. Otherwise there's something very wrong --- probably,
- * the index is on a non-immutable expression.
+ * If we are doing a recheck then we should have found the tuple we are
+ * checking. Otherwise there's something very wrong --- probably, the
+ * index is on a non-immutable expression.
*/
if (checkUnique == UNIQUE_CHECK_EXISTING && !found)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("failed to re-find tuple within index \"%s\"",
RelationGetRelationName(rel)),
- errhint("This may be because of a non-immutable index expression.")));
+ errhint("This may be because of a non-immutable index expression.")));
if (nbuf != InvalidBuffer)
_bt_relbuf(rel, nbuf);
@@ -518,10 +518,10 @@ _bt_findinsertloc(Relation rel,
if (itemsz > BTMaxItemSize(page))
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
- (unsigned long) itemsz,
- (unsigned long) BTMaxItemSize(page),
- RelationGetRelationName(rel)),
+ errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
+ (unsigned long) itemsz,
+ (unsigned long) BTMaxItemSize(page),
+ RelationGetRelationName(rel)),
errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n"
"Consider a function index of an MD5 hash of the value, "
"or use full text indexing.")));
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 5df975e4ec..c0502e5583 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.119 2010/02/13 00:59:58 sriggs Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.120 2010/02/26 02:00:34 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@@ -459,8 +459,8 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedX
START_CRIT_SECTION();
/*
- * We don't do MarkBufferDirty here because we're about initialise
- * the page, and nobody else can see it yet.
+ * We don't do MarkBufferDirty here because we're about initialise the
+ * page, and nobody else can see it yet.
*/
/* XLOG stuff */
@@ -480,8 +480,8 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedX
recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_REUSE_PAGE, rdata);
/*
- * We don't do PageSetLSN or PageSetTLI here because
- * we're about initialise the page, so no need.
+ * We don't do PageSetLSN or PageSetTLI here because we're about
+ * initialise the page, so no need.
*/
}
@@ -552,11 +552,11 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
{
page = BufferGetPage(buf);
if (_bt_page_recyclable(page))
- {
+ {
/*
- * If we are generating WAL for Hot Standby then create
- * a WAL record that will allow us to conflict with
- * queries running on standby.
+ * If we are generating WAL for Hot Standby then create a
+ * WAL record that will allow us to conflict with queries
+ * running on standby.
*/
if (XLogStandbyInfoActive())
{
@@ -762,6 +762,7 @@ _bt_delitems(Relation rel, Buffer buf,
if (isVacuum)
{
xl_btree_vacuum xlrec_vacuum;
+
xlrec_vacuum.node = rel->rd_node;
xlrec_vacuum.block = BufferGetBlockNumber(buf);
@@ -772,6 +773,7 @@ _bt_delitems(Relation rel, Buffer buf,
else
{
xl_btree_delete xlrec_delete;
+
xlrec_delete.node = rel->rd_node;
xlrec_delete.block = BufferGetBlockNumber(buf);
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index b0acaf257f..01899cfc16 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.175 2010/02/08 04:33:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.176 2010/02/26 02:00:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -57,8 +57,8 @@ typedef struct
IndexBulkDeleteCallback callback;
void *callback_state;
BTCycleId cycleid;
- BlockNumber lastBlockVacuumed; /* last blkno reached by Vacuum scan */
- BlockNumber lastUsedPage; /* blkno of last non-recyclable page */
+ BlockNumber lastBlockVacuumed; /* last blkno reached by Vacuum scan */
+ BlockNumber lastUsedPage; /* blkno of last non-recyclable page */
BlockNumber totFreePages; /* true total # of free pages */
MemoryContext pagedelcontext;
} BTVacState;
@@ -630,7 +630,7 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
vstate.callback = callback;
vstate.callback_state = callback_state;
vstate.cycleid = cycleid;
- vstate.lastBlockVacuumed = BTREE_METAPAGE; /* Initialise at first block */
+ vstate.lastBlockVacuumed = BTREE_METAPAGE; /* Initialise at first block */
vstate.lastUsedPage = BTREE_METAPAGE;
vstate.totFreePages = 0;
@@ -702,8 +702,8 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
/*
* We can't use _bt_getbuf() here because it always applies
* _bt_checkpage(), which will barf on an all-zero page. We want to
- * recycle all-zero pages, not fail. Also, we want to use a nondefault
- * buffer access strategy.
+ * recycle all-zero pages, not fail. Also, we want to use a
+ * nondefault buffer access strategy.
*/
buf = ReadBufferExtended(rel, MAIN_FORKNUM, num_pages - 1, RBM_NORMAL,
info->strategy);
@@ -856,23 +856,25 @@ restart:
htup = &(itup->t_tid);
/*
- * During Hot Standby we currently assume that XLOG_BTREE_VACUUM
- * records do not produce conflicts. That is only true as long
- * as the callback function depends only upon whether the index
- * tuple refers to heap tuples removed in the initial heap scan.
- * When vacuum starts it derives a value of OldestXmin. Backends
- * taking later snapshots could have a RecentGlobalXmin with a
- * later xid than the vacuum's OldestXmin, so it is possible that
- * row versions deleted after OldestXmin could be marked as killed
- * by other backends. The callback function *could* look at the
- * index tuple state in isolation and decide to delete the index
- * tuple, though currently it does not. If it ever did, we would
- * need to reconsider whether XLOG_BTREE_VACUUM records should
- * cause conflicts. If they did cause conflicts they would be
- * fairly harsh conflicts, since we haven't yet worked out a way
- * to pass a useful value for latestRemovedXid on the
- * XLOG_BTREE_VACUUM records. This applies to *any* type of index
- * that marks index tuples as killed.
+ * During Hot Standby we currently assume that
+ * XLOG_BTREE_VACUUM records do not produce conflicts. That is
+ * only true as long as the callback function depends only
+ * upon whether the index tuple refers to heap tuples removed
+ * in the initial heap scan. When vacuum starts it derives a
+ * value of OldestXmin. Backends taking later snapshots could
+ * have a RecentGlobalXmin with a later xid than the vacuum's
+ * OldestXmin, so it is possible that row versions deleted
+ * after OldestXmin could be marked as killed by other
+ * backends. The callback function *could* look at the index
+ * tuple state in isolation and decide to delete the index
+ * tuple, though currently it does not. If it ever did, we
+ * would need to reconsider whether XLOG_BTREE_VACUUM records
+ * should cause conflicts. If they did cause conflicts they
+ * would be fairly harsh conflicts, since we haven't yet
+ * worked out a way to pass a useful value for
+ * latestRemovedXid on the XLOG_BTREE_VACUUM records. This
+ * applies to *any* type of index that marks index tuples as
+ * killed.
*/
if (callback(htup, callback_state))
deletable[ndeletable++] = offnum;
@@ -885,13 +887,13 @@ restart:
*/
if (ndeletable > 0)
{
- BlockNumber lastBlockVacuumed = BufferGetBlockNumber(buf);
+ BlockNumber lastBlockVacuumed = BufferGetBlockNumber(buf);
_bt_delitems(rel, buf, deletable, ndeletable, true, vstate->lastBlockVacuumed);
/*
- * Keep track of the block number of the lastBlockVacuumed, so
- * we can scan those blocks as well during WAL replay. This then
+ * Keep track of the block number of the lastBlockVacuumed, so we
+ * can scan those blocks as well during WAL replay. This then
* provides concurrency protection and allows btrees to be used
* while in recovery.
*/
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index 772215c181..84540b7353 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -59,7 +59,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.123 2010/01/20 19:43:40 heikki Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.124 2010/02/26 02:00:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -216,12 +216,13 @@ _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2)
wstate.btws_use_wal = XLogIsNeeded() && !wstate.index->rd_istemp;
/*
- * Write an XLOG UNLOGGED record if WAL-logging was skipped because
- * WAL archiving is not enabled.
+ * Write an XLOG UNLOGGED record if WAL-logging was skipped because WAL
+ * archiving is not enabled.
*/
if (!wstate.btws_use_wal && !wstate.index->rd_istemp)
{
- char reason[NAMEDATALEN + 20];
+ char reason[NAMEDATALEN + 20];
+
snprintf(reason, sizeof(reason), "b-tree build on \"%s\"",
RelationGetRelationName(wstate.index));
XLogReportUnloggedStatement(reason);
@@ -492,10 +493,10 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
if (itupsz > BTMaxItemSize(npage))
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
- (unsigned long) itupsz,
- (unsigned long) BTMaxItemSize(npage),
- RelationGetRelationName(wstate->index)),
+ errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
+ (unsigned long) itupsz,
+ (unsigned long) BTMaxItemSize(npage),
+ RelationGetRelationName(wstate->index)),
errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n"
"Consider a function index of an MD5 hash of the value, "
"or use full text indexing.")));
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index a7a3d7a12d..6b399d34a6 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.97 2010/01/03 05:39:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.98 2010/02/26 02:00:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -515,7 +515,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
StrategyNumber strat;
/*
- * First, deal with cases where one or both args are NULL. This should
+ * First, deal with cases where one or both args are NULL. This should
* only happen when the scankeys represent IS NULL/NOT NULL conditions.
*/
if ((leftarg->sk_flags | rightarg->sk_flags) & SK_ISNULL)
@@ -566,7 +566,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
break;
default:
elog(ERROR, "unrecognized StrategyNumber: %d", (int) strat);
- *result = false; /* keep compiler quiet */
+ *result = false; /* keep compiler quiet */
break;
}
return true;
@@ -612,8 +612,8 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
* indexscan initiated by syscache lookup will use cross-data-type
* operators.)
*
- * If the sk_strategy was flipped by _bt_fix_scankey_strategy, we
- * have to un-flip it to get the correct opfamily member.
+ * If the sk_strategy was flipped by _bt_fix_scankey_strategy, we have to
+ * un-flip it to get the correct opfamily member.
*/
strat = op->sk_strategy;
if (op->sk_flags & SK_BT_DESC)
@@ -653,7 +653,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
*
* Lastly, for ordinary scankeys (not IS NULL/NOT NULL), we check for a
* NULL comparison value. Since all btree operators are assumed strict,
- * a NULL means that the qual cannot be satisfied. We return TRUE if the
+ * a NULL means that the qual cannot be satisfied. We return TRUE if the
* comparison value isn't NULL, or FALSE if the scan should be abandoned.
*
* This function is applied to the *input* scankey structure; therefore
@@ -682,7 +682,7 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption)
* --- we can treat IS NULL as an equality operator for purposes of search
* strategy.
*
- * Likewise, "x IS NOT NULL" is supported. We treat that as either "less
+ * Likewise, "x IS NOT NULL" is supported. We treat that as either "less
* than NULL" in a NULLS LAST index, or "greater than NULL" in a NULLS
* FIRST index.
*/
@@ -910,13 +910,13 @@ _bt_checkkeys(IndexScanDesc scan,
if (key->sk_flags & SK_SEARCHNULL)
{
if (isNull)
- continue; /* tuple satisfies this qual */
+ continue; /* tuple satisfies this qual */
}
else
{
Assert(key->sk_flags & SK_SEARCHNOTNULL);
if (!isNull)
- continue; /* tuple satisfies this qual */
+ continue; /* tuple satisfies this qual */
}
/*
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index f5320fb103..07416d599b 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.61 2010/02/13 00:59:58 sriggs Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.62 2010/02/26 02:00:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -473,10 +473,10 @@ btree_xlog_vacuum(XLogRecPtr lsn, XLogRecord *record)
xlrec = (xl_btree_vacuum *) XLogRecGetData(record);
/*
- * If queries might be active then we need to ensure every block is unpinned
- * between the lastBlockVacuumed and the current block, if there are any.
- * This ensures that every block in the index is touched during VACUUM as
- * required to ensure scans work correctly.
+ * If queries might be active then we need to ensure every block is
+ * unpinned between the lastBlockVacuumed and the current block, if there
+ * are any. This ensures that every block in the index is touched during
+ * VACUUM as required to ensure scans work correctly.
*/
if (standbyState == STANDBY_SNAPSHOT_READY &&
(xlrec->lastBlockVacuumed + 1) != xlrec->block)
@@ -486,10 +486,10 @@ btree_xlog_vacuum(XLogRecPtr lsn, XLogRecord *record)
for (; blkno < xlrec->block; blkno++)
{
/*
- * XXX we don't actually need to read the block, we
- * just need to confirm it is unpinned. If we had a special call
- * into the buffer manager we could optimise this so that
- * if the block is not in shared_buffers we confirm it as unpinned.
+ * XXX we don't actually need to read the block, we just need to
+ * confirm it is unpinned. If we had a special call into the
+ * buffer manager we could optimise this so that if the block is
+ * not in shared_buffers we confirm it as unpinned.
*
* Another simple optimization would be to check if there's any
* backends running; if not, we could just skip this.
@@ -505,9 +505,9 @@ btree_xlog_vacuum(XLogRecPtr lsn, XLogRecord *record)
/*
* If the block was restored from a full page image, nothing more to do.
- * The RestoreBkpBlocks() call already pinned and took cleanup lock on
- * it. XXX: Perhaps we should call RestoreBkpBlocks() *after* the loop
- * above, to make the disk access more sequential.
+ * The RestoreBkpBlocks() call already pinned and took cleanup lock on it.
+ * XXX: Perhaps we should call RestoreBkpBlocks() *after* the loop above,
+ * to make the disk access more sequential.
*/
if (record->xl_info & XLR_BKP_BLOCK_1)
return;
@@ -567,8 +567,8 @@ btree_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
xlrec = (xl_btree_delete *) XLogRecGetData(record);
/*
- * We don't need to take a cleanup lock to apply these changes.
- * See nbtree/README for details.
+ * We don't need to take a cleanup lock to apply these changes. See
+ * nbtree/README for details.
*/
buffer = XLogReadBuffer(xlrec->node, xlrec->block, false);
if (!BufferIsValid(buffer))
@@ -819,13 +819,15 @@ btree_redo(XLogRecPtr lsn, XLogRecord *record)
switch (info)
{
case XLOG_BTREE_DELETE:
+
/*
- * Btree delete records can conflict with standby queries. You might
- * think that vacuum records would conflict as well, but we've handled
- * that already. XLOG_HEAP2_CLEANUP_INFO records provide the highest xid
- * cleaned by the vacuum of the heap and so we can resolve any conflicts
- * just once when that arrives. After that any we know that no conflicts
- * exist from individual btree vacuum records on that index.
+ * Btree delete records can conflict with standby queries. You
+ * might think that vacuum records would conflict as well, but
+ * we've handled that already. XLOG_HEAP2_CLEANUP_INFO records
+ * provide the highest xid cleaned by the vacuum of the heap
+ * and so we can resolve any conflicts just once when that
+ * arrives. After that any we know that no conflicts exist
+ * from individual btree vacuum records on that index.
*/
{
xl_btree_delete *xlrec = (xl_btree_delete *) XLogRecGetData(record);
@@ -842,9 +844,11 @@ btree_redo(XLogRecPtr lsn, XLogRecord *record)
break;
case XLOG_BTREE_REUSE_PAGE:
+
/*
- * Btree reuse page records exist to provide a conflict point when we
- * reuse pages in the index via the FSM. That's all it does though.
+ * Btree reuse page records exist to provide a conflict point
+ * when we reuse pages in the index via the FSM. That's all it
+ * does though.
*/
{
xl_btree_reuse_page *xlrec = (xl_btree_reuse_page *) XLogRecGetData(record);
@@ -859,8 +863,8 @@ btree_redo(XLogRecPtr lsn, XLogRecord *record)
}
/*
- * Vacuum needs to pin and take cleanup lock on every leaf page,
- * a regular exclusive lock is enough for all other purposes.
+ * Vacuum needs to pin and take cleanup lock on every leaf page, a regular
+ * exclusive lock is enough for all other purposes.
*/
RestoreBkpBlocks(lsn, record, (info == XLOG_BTREE_VACUUM));