summaryrefslogtreecommitdiff
path: root/src/backend/commands
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2006-03-31 23:32:07 +0000
committerTom Lane <tgl@sss.pgh.pa.us>2006-03-31 23:32:07 +0000
commita8b8f4db23cff16af50a2b960cb8d20d39b761cf (patch)
tree224f8cb0da7e2e17ccfd7b8a030db1664acd46c1 /src/backend/commands
parent89395bfa6f2fafccec10be377fcf759030910654 (diff)
downloadpostgresql-a8b8f4db23cff16af50a2b960cb8d20d39b761cf.tar.gz
Clean up WAL/buffer interactions as per my recent proposal. Get rid of the
misleadingly-named WriteBuffer routine, and instead require routines that change buffer pages to call MarkBufferDirty (which does exactly what it says). We also require that they do so before calling XLogInsert; this takes care of the synchronization requirement documented in SyncOneBuffer. Note that because bufmgr takes the buffer content lock (in shared mode) while writing out any buffer, it doesn't matter whether MarkBufferDirty is executed before the buffer content change is complete, so long as the content change is completed before releasing exclusive lock on the buffer. So it's OK to set the dirtybit before we fill in the LSN. This eliminates the former kluge of needing to set the dirtybit in LockBuffer. Aside from making the code more transparent, we can also add some new debugging assertions, in particular that the caller of MarkBufferDirty must hold the buffer content lock, not merely a pin.
Diffstat (limited to 'src/backend/commands')
-rw-r--r--src/backend/commands/sequence.c30
-rw-r--r--src/backend/commands/vacuum.c72
-rw-r--r--src/backend/commands/vacuumlazy.c31
3 files changed, 66 insertions, 67 deletions
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index 0e448271e1..10ebe56b6a 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.131 2006/03/29 21:17:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.132 2006/03/31 23:32:06 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -249,6 +249,8 @@ DefineSequence(CreateSeqStmt *seq)
tuple->t_data->t_infomask |= HEAP_XMIN_COMMITTED;
}
+ MarkBufferDirty(buf);
+
/* XLOG stuff */
if (!rel->rd_istemp)
{
@@ -281,8 +283,8 @@ DefineSequence(CreateSeqStmt *seq)
END_CRIT_SECTION();
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- WriteBuffer(buf);
+ UnlockReleaseBuffer(buf);
+
heap_close(rel, NoLock);
}
@@ -331,6 +333,8 @@ AlterSequence(AlterSeqStmt *stmt)
START_CRIT_SECTION();
+ MarkBufferDirty(buf);
+
/* XLOG stuff */
if (!seqrel->rd_istemp)
{
@@ -358,9 +362,7 @@ AlterSequence(AlterSeqStmt *stmt)
END_CRIT_SECTION();
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
-
- WriteBuffer(buf);
+ UnlockReleaseBuffer(buf);
relation_close(seqrel, NoLock);
}
@@ -550,6 +552,8 @@ nextval_internal(Oid relid)
START_CRIT_SECTION();
+ MarkBufferDirty(buf);
+
/* XLOG stuff */
if (logit && !seqrel->rd_istemp)
{
@@ -587,9 +591,7 @@ nextval_internal(Oid relid)
END_CRIT_SECTION();
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
-
- WriteBuffer(buf);
+ UnlockReleaseBuffer(buf);
relation_close(seqrel, NoLock);
@@ -720,6 +722,8 @@ do_setval(Oid relid, int64 next, bool iscalled)
START_CRIT_SECTION();
+ MarkBufferDirty(buf);
+
/* XLOG stuff */
if (!seqrel->rd_istemp)
{
@@ -758,9 +762,7 @@ do_setval(Oid relid, int64 next, bool iscalled)
END_CRIT_SECTION();
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
-
- WriteBuffer(buf);
+ UnlockReleaseBuffer(buf);
relation_close(seqrel, NoLock);
}
@@ -1159,8 +1161,8 @@ seq_redo(XLogRecPtr lsn, XLogRecord *record)
PageSetLSN(page, lsn);
PageSetTLI(page, ThisTimeLineID);
- LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
- WriteBuffer(buffer);
+ MarkBufferDirty(buffer);
+ UnlockReleaseBuffer(buffer);
}
void
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 16be5171bf..c6ebdd5770 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.325 2006/03/05 15:58:25 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.326 2006/03/31 23:32:06 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -729,6 +729,8 @@ vac_update_relstats(Oid relid, BlockNumber num_pages, double num_tuples,
if (!hasindex)
pgcform->relhaspkey = false;
+ MarkBufferDirty(buffer);
+
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
/*
@@ -739,8 +741,7 @@ vac_update_relstats(Oid relid, BlockNumber num_pages, double num_tuples,
*/
CacheInvalidateHeapTuple(rd, &rtup);
- /* Write the buffer */
- WriteBuffer(buffer);
+ ReleaseBuffer(buffer);
heap_close(rd, RowExclusiveLock);
}
@@ -795,11 +796,12 @@ vac_update_dbstats(Oid dbid,
dbform->datvacuumxid = vacuumXID;
dbform->datfrozenxid = frozenXID;
+ MarkBufferDirty(scan->rs_cbuf);
+
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
- /* invalidate the tuple in the cache and write the buffer */
+ /* invalidate the tuple in the cache so we'll see the change in cache */
CacheInvalidateHeapTuple(relation, tuple);
- WriteNoReleaseBuffer(scan->rs_cbuf);
heap_endscan(scan);
@@ -1298,6 +1300,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
(errmsg("relation \"%s\" page %u is uninitialized --- fixing",
relname, blkno)));
PageInit(page, BufferGetPageSize(buf), 0);
+ MarkBufferDirty(buf);
vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
free_space += vacpage->free;
empty_pages++;
@@ -1305,8 +1308,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
vacpagecopy = copy_vac_page(vacpage);
vpage_insert(vacuum_pages, vacpagecopy);
vpage_insert(fraged_pages, vacpagecopy);
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- WriteBuffer(buf);
+ UnlockReleaseBuffer(buf);
continue;
}
@@ -1321,8 +1323,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
vacpagecopy = copy_vac_page(vacpage);
vpage_insert(vacuum_pages, vacpagecopy);
vpage_insert(fraged_pages, vacpagecopy);
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- ReleaseBuffer(buf);
+ UnlockReleaseBuffer(buf);
continue;
}
@@ -1527,11 +1528,9 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
else
empty_end_pages = 0;
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
if (pgchanged)
- WriteBuffer(buf);
- else
- ReleaseBuffer(buf);
+ MarkBufferDirty(buf);
+ UnlockReleaseBuffer(buf);
}
pfree(vacpage);
@@ -1682,7 +1681,6 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
OffsetNumber offnum,
maxoff;
bool isempty,
- dowrite,
chain_tuple_moved;
vacuum_delay_point();
@@ -1714,8 +1712,6 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
isempty = PageIsEmpty(page);
- dowrite = false;
-
/* Is the page in the vacuum_pages list? */
if (blkno == last_vacuum_block)
{
@@ -1726,7 +1722,6 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
vacuum_page(onerel, buf, last_vacuum_page);
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- dowrite = true;
}
else
Assert(isempty);
@@ -1884,7 +1879,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (dst_buffer != InvalidBuffer)
{
- WriteBuffer(dst_buffer);
+ ReleaseBuffer(dst_buffer);
dst_buffer = InvalidBuffer;
}
@@ -2148,8 +2143,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
else
keep_tuples++;
- WriteBuffer(dst_buffer);
- WriteBuffer(Cbuf);
+ ReleaseBuffer(dst_buffer);
+ ReleaseBuffer(Cbuf);
} /* end of move-the-tuple-chain loop */
dst_buffer = InvalidBuffer;
@@ -2166,7 +2161,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
{
if (dst_buffer != InvalidBuffer)
{
- WriteBuffer(dst_buffer);
+ ReleaseBuffer(dst_buffer);
dst_buffer = InvalidBuffer;
}
for (i = 0; i < num_fraged_pages; i++)
@@ -2273,12 +2268,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
sizeof(OffsetNumber), vac_cmp_offno);
}
vpage_insert(&Nvacpagelist, copy_vac_page(vacpage));
- WriteBuffer(buf);
}
- else if (dowrite)
- WriteBuffer(buf);
- else
- ReleaseBuffer(buf);
+
+ ReleaseBuffer(buf);
if (offnum <= maxoff)
break; /* had to quit early, see above note */
@@ -2290,7 +2282,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (dst_buffer != InvalidBuffer)
{
Assert(num_moved > 0);
- WriteBuffer(dst_buffer);
+ ReleaseBuffer(dst_buffer);
}
if (num_moved > 0)
@@ -2332,8 +2324,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
page = BufferGetPage(buf);
if (!PageIsEmpty(page))
vacuum_page(onerel, buf, *curpage);
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- WriteBuffer(buf);
+ UnlockReleaseBuffer(buf);
}
}
@@ -2449,6 +2440,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
uncnt = PageRepairFragmentation(page, unused);
+ MarkBufferDirty(buf);
+
/* XLOG stuff */
if (!onerel->rd_istemp)
{
@@ -2469,8 +2462,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
END_CRIT_SECTION();
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- WriteBuffer(buf);
+ UnlockReleaseBuffer(buf);
}
/* now - free new list of reaped pages */
@@ -2601,6 +2593,10 @@ move_chain_tuple(Relation rel,
newtup.t_data->t_ctid = *ctid;
*ctid = newtup.t_self;
+ MarkBufferDirty(dst_buf);
+ if (dst_buf != old_buf)
+ MarkBufferDirty(old_buf);
+
/* XLOG stuff */
if (!rel->rd_istemp)
{
@@ -2708,6 +2704,9 @@ move_plain_tuple(Relation rel,
old_tup->t_data->t_infomask |= HEAP_MOVED_OFF;
HeapTupleHeaderSetXvac(old_tup->t_data, myXID);
+ MarkBufferDirty(dst_buf);
+ MarkBufferDirty(old_buf);
+
/* XLOG stuff */
if (!rel->rd_istemp)
{
@@ -2832,8 +2831,8 @@ update_hint_bits(Relation rel, VacPageList fraged_pages, int num_fraged_pages,
else
htup->t_infomask |= HEAP_XMIN_INVALID;
}
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- WriteBuffer(buf);
+ MarkBufferDirty(buf);
+ UnlockReleaseBuffer(buf);
Assert((*curpage)->offsets_used == num_tuples);
checked_moved += num_tuples;
}
@@ -2867,8 +2866,7 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
buf = ReadBuffer(onerel, (*vacpage)->blkno);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
vacuum_page(onerel, buf, *vacpage);
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- WriteBuffer(buf);
+ UnlockReleaseBuffer(buf);
}
}
@@ -2889,6 +2887,8 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
/*
* vacuum_page() -- free dead tuples on a page
* and repair its fragmentation.
+ *
+ * Caller must hold pin and lock on buffer.
*/
static void
vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage)
@@ -2912,6 +2912,8 @@ vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage)
uncnt = PageRepairFragmentation(page, unused);
+ MarkBufferDirty(buffer);
+
/* XLOG stuff */
if (!onerel->rd_istemp)
{
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index 2e1a4da38b..b270538bd8 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -31,7 +31,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.68 2006/03/05 15:58:25 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.69 2006/03/31 23:32:06 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -317,8 +317,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
lazy_record_free_space(vacrelstats, blkno,
PageGetFreeSpace(page));
}
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- WriteBuffer(buf);
+ MarkBufferDirty(buf);
+ UnlockReleaseBuffer(buf);
continue;
}
@@ -327,8 +327,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
empty_pages++;
lazy_record_free_space(vacrelstats, blkno,
PageGetFreeSpace(page));
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- ReleaseBuffer(buf);
+ UnlockReleaseBuffer(buf);
continue;
}
@@ -439,12 +438,9 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
if (hastup)
vacrelstats->nonempty_pages = blkno + 1;
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
-
if (pgchanged)
- WriteBuffer(buf);
- else
- ReleaseBuffer(buf);
+ MarkBufferDirty(buf);
+ UnlockReleaseBuffer(buf);
}
/* save stats for use later */
@@ -524,8 +520,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
page = BufferGetPage(buf);
lazy_record_free_space(vacrelstats, tblk,
PageGetFreeSpace(page));
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- WriteBuffer(buf);
+ UnlockReleaseBuffer(buf);
npages++;
}
@@ -541,7 +536,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
* lazy_vacuum_page() -- free dead tuples on a page
* and repair its fragmentation.
*
- * Caller is expected to handle reading, locking, and writing the buffer.
+ * Caller must hold pin and lock on the buffer.
*
* tupindex is the index in vacrelstats->dead_tuples of the first dead
* tuple for this page. We assume the rest follow sequentially.
@@ -557,6 +552,7 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
ItemId itemid;
START_CRIT_SECTION();
+
for (; tupindex < vacrelstats->num_dead_tuples; tupindex++)
{
BlockNumber tblk;
@@ -572,6 +568,8 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
uncnt = PageRepairFragmentation(page, unused);
+ MarkBufferDirty(buffer);
+
/* XLOG stuff */
if (!onerel->rd_istemp)
{
@@ -871,8 +869,7 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
if (PageIsNew(page) || PageIsEmpty(page))
{
/* PageIsNew probably shouldn't happen... */
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
- ReleaseBuffer(buf);
+ UnlockReleaseBuffer(buf);
continue;
}
@@ -928,9 +925,7 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
}
} /* scan along page */
- LockBuffer(buf, BUFFER_LOCK_UNLOCK);
-
- ReleaseBuffer(buf);
+ UnlockReleaseBuffer(buf);
/* Done scanning if we found a tuple here */
if (hastup)