summaryrefslogtreecommitdiff
path: root/src/backend/commands/vacuumlazy.c
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2006-03-31 23:32:07 +0000
committerTom Lane <tgl@sss.pgh.pa.us>2006-03-31 23:32:07 +0000
commita8b8f4db23cff16af50a2b960cb8d20d39b761cf (patch)
tree224f8cb0da7e2e17ccfd7b8a030db1664acd46c1 /src/backend/commands/vacuumlazy.c
parent89395bfa6f2fafccec10be377fcf759030910654 (diff)
downloadpostgresql-a8b8f4db23cff16af50a2b960cb8d20d39b761cf.tar.gz
Clean up WAL/buffer interactions as per my recent proposal. Get rid of the
misleadingly-named WriteBuffer routine, and instead require routines that change buffer pages to call MarkBufferDirty (which does exactly what it says). We also require that they do so before calling XLogInsert; this takes care of the synchronization requirement documented in SyncOneBuffer. Note that because bufmgr takes the buffer content lock (in shared mode) while writing out any buffer, it doesn't matter whether MarkBufferDirty is executed before the buffer content change is complete, so long as the content change is completed before releasing exclusive lock on the buffer. So it's OK to set the dirtybit before we fill in the LSN. This eliminates the former kluge of needing to set the dirtybit in LockBuffer. Aside from making the code more transparent, we can also add some new debugging assertions, in particular that the caller of MarkBufferDirty must hold the buffer content lock, not merely a pin.
Diffstat (limited to 'src/backend/commands/vacuumlazy.c')
-rw-r--r--src/backend/commands/vacuumlazy.c31
1 files changed, 13 insertions, 18 deletions
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index 2e1a4da38b..b270538bd8 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -31,7 +31,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.68 2006/03/05 15:58:25 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.69 2006/03/31 23:32:06 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -317,8 +317,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
lazy_record_free_space(vacrelstats, blkno,
PageGetFreeSpace(page));
}
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- WriteBuffer(buf);
+ MarkBufferDirty(buf);
+ UnlockReleaseBuffer(buf);
continue;
}
@@ -327,8 +327,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
empty_pages++;
lazy_record_free_space(vacrelstats, blkno,
PageGetFreeSpace(page));
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- ReleaseBuffer(buf);
+ UnlockReleaseBuffer(buf);
continue;
}
@@ -439,12 +438,9 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
if (hastup)
vacrelstats->nonempty_pages = blkno + 1;
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
-
if (pgchanged)
- WriteBuffer(buf);
- else
- ReleaseBuffer(buf);
+ MarkBufferDirty(buf);
+ UnlockReleaseBuffer(buf);
}
/* save stats for use later */
@@ -524,8 +520,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
page = BufferGetPage(buf);
lazy_record_free_space(vacrelstats, tblk,
PageGetFreeSpace(page));
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- WriteBuffer(buf);
+ UnlockReleaseBuffer(buf);
npages++;
}
@@ -541,7 +536,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
* lazy_vacuum_page() -- free dead tuples on a page
* and repair its fragmentation.
*
- * Caller is expected to handle reading, locking, and writing the buffer.
+ * Caller must hold pin and lock on the buffer.
*
* tupindex is the index in vacrelstats->dead_tuples of the first dead
* tuple for this page. We assume the rest follow sequentially.
@@ -557,6 +552,7 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
ItemId itemid;
START_CRIT_SECTION();
+
for (; tupindex < vacrelstats->num_dead_tuples; tupindex++)
{
BlockNumber tblk;
@@ -572,6 +568,8 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
uncnt = PageRepairFragmentation(page, unused);
+ MarkBufferDirty(buffer);
+
/* XLOG stuff */
if (!onerel->rd_istemp)
{
@@ -871,8 +869,7 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
if (PageIsNew(page) || PageIsEmpty(page))
{
/* PageIsNew probably shouldn't happen... */
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- ReleaseBuffer(buf);
+ UnlockReleaseBuffer(buf);
continue;
}
@@ -928,9 +925,7 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
}
} /* scan along page */
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
-
- ReleaseBuffer(buf);
+ UnlockReleaseBuffer(buf);
/* Done scanning if we found a tuple here */
if (hastup)