diff options
Diffstat (limited to 'src/backend/access/heap')
| -rw-r--r-- | src/backend/access/heap/heapam.c | 60 | ||||
| -rw-r--r-- | src/backend/access/heap/visibilitymap.c | 28 |
2 files changed, 45 insertions, 43 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 2e45c041a6..148d88ba27 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.277 2009/06/11 14:48:53 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.278 2009/08/24 02:18:31 tgl Exp $ * * * INTERFACE ROUTINES @@ -78,7 +78,8 @@ static HeapScanDesc heap_beginscan_internal(Relation relation, bool allow_strat, bool allow_sync, bool is_bitmapscan); static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf, - ItemPointerData from, Buffer newbuf, HeapTuple newtup, bool move); + ItemPointerData from, Buffer newbuf, HeapTuple newtup, bool move, + bool all_visible_cleared, bool new_all_visible_cleared); static bool HeapSatisfiesHOTUpdate(Relation relation, Bitmapset *hot_attrs, HeapTuple oldtup, HeapTuple newtup); @@ -2760,21 +2761,29 @@ l2: /* record address of new tuple in t_ctid of old one */ oldtup.t_data->t_ctid = heaptup->t_self; + /* clear PD_ALL_VISIBLE flags */ + if (PageIsAllVisible(BufferGetPage(buffer))) + { + all_visible_cleared = true; + PageClearAllVisible(BufferGetPage(buffer)); + } + if (newbuf != buffer && PageIsAllVisible(BufferGetPage(newbuf))) + { + all_visible_cleared_new = true; + PageClearAllVisible(BufferGetPage(newbuf)); + } + if (newbuf != buffer) MarkBufferDirty(newbuf); MarkBufferDirty(buffer); - /* - * Note: we mustn't clear PD_ALL_VISIBLE flags before writing the WAL - * record, because log_heap_update looks at those flags to set the - * corresponding flags in the WAL record. - */ - /* XLOG stuff */ if (!relation->rd_istemp) { XLogRecPtr recptr = log_heap_update(relation, buffer, oldtup.t_self, - newbuf, heaptup, false); + newbuf, heaptup, false, + all_visible_cleared, + all_visible_cleared_new); if (newbuf != buffer) { @@ -2785,18 +2794,6 @@ l2: PageSetTLI(BufferGetPage(buffer), ThisTimeLineID); } - /* Clear PD_ALL_VISIBLE flags */ - if (PageIsAllVisible(BufferGetPage(buffer))) - { - all_visible_cleared = true; - PageClearAllVisible(BufferGetPage(buffer)); - } - if (newbuf != buffer && PageIsAllVisible(BufferGetPage(newbuf))) - { - all_visible_cleared_new = true; - PageClearAllVisible(BufferGetPage(newbuf)); - } - END_CRIT_SECTION(); if (newbuf != buffer) @@ -3910,7 +3907,8 @@ log_heap_freeze(Relation reln, Buffer buffer, */ static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from, - Buffer newbuf, HeapTuple newtup, bool move) + Buffer newbuf, HeapTuple newtup, bool move, + bool all_visible_cleared, bool new_all_visible_cleared) { /* * Note: xlhdr is declared to have adequate size and correct alignment for @@ -3946,9 +3944,9 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from, xlrec.target.node = reln->rd_node; xlrec.target.tid = from; - xlrec.all_visible_cleared = PageIsAllVisible(BufferGetPage(oldbuf)); + xlrec.all_visible_cleared = all_visible_cleared; xlrec.newtid = newtup->t_self; - xlrec.new_all_visible_cleared = PageIsAllVisible(BufferGetPage(newbuf)); + xlrec.new_all_visible_cleared = new_all_visible_cleared; rdata[0].data = (char *) &xlrec; rdata[0].len = SizeOfHeapUpdate; @@ -4015,9 +4013,11 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from, */ XLogRecPtr log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from, - Buffer newbuf, HeapTuple newtup) + Buffer newbuf, HeapTuple newtup, + bool all_visible_cleared, bool new_all_visible_cleared) { - return log_heap_update(reln, oldbuf, from, newbuf, newtup, true); + return log_heap_update(reln, oldbuf, from, newbuf, newtup, true, + all_visible_cleared, new_all_visible_cleared); } /* @@ -4222,7 +4222,7 @@ heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record) blkno = ItemPointerGetBlockNumber(&(xlrec->target.tid)); /* - * The visibility map always needs to be updated, even if the heap page is + * The visibility map may need to be fixed even if the heap page is * already up-to-date. */ if (xlrec->all_visible_cleared) @@ -4300,7 +4300,7 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record) blkno = ItemPointerGetBlockNumber(&(xlrec->target.tid)); /* - * The visibility map always needs to be updated, even if the heap page is + * The visibility map may need to be fixed even if the heap page is * already up-to-date. */ if (xlrec->all_visible_cleared) @@ -4412,7 +4412,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool move, bool hot_update) Size freespace; /* - * The visibility map always needs to be updated, even if the heap page is + * The visibility map may need to be fixed even if the heap page is * already up-to-date. */ if (xlrec->all_visible_cleared) @@ -4507,7 +4507,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool move, bool hot_update) newt:; /* - * The visibility map always needs to be updated, even if the heap page is + * The visibility map may need to be fixed even if the heap page is * already up-to-date. */ if (xlrec->new_all_visible_cleared) diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c index 3fc26bea8b..50462f27f0 100644 --- a/src/backend/access/heap/visibilitymap.c +++ b/src/backend/access/heap/visibilitymap.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/heap/visibilitymap.c,v 1.5 2009/06/18 10:08:08 heikki Exp $ + * $PostgreSQL: pgsql/src/backend/access/heap/visibilitymap.c,v 1.6 2009/08/24 02:18:31 tgl Exp $ * * INTERFACE ROUTINES * visibilitymap_clear - clear a bit in the visibility map @@ -19,10 +19,10 @@ * NOTES * * The visibility map is a bitmap with one bit per heap page. A set bit means - * that all tuples on the page are visible to all transactions, and doesn't - * therefore need to be vacuumed. The map is conservative in the sense that we - * make sure that whenever a bit is set, we know the condition is true, but if - * a bit is not set, it might or might not be. + * that all tuples on the page are known visible to all transactions, and + * therefore the page doesn't need to be vacuumed. The map is conservative in + * the sense that we make sure that whenever a bit is set, we know the + * condition is true, but if a bit is not set, it might or might not be true. * * There's no explicit WAL logging in the functions in this file. The callers * must make sure that whenever a bit is cleared, the bit is cleared on WAL @@ -34,10 +34,11 @@ * make VACUUM skip pages that need vacuuming, until the next anti-wraparound * vacuum. The visibility map is not used for anti-wraparound vacuums, because * an anti-wraparound vacuum needs to freeze tuples and observe the latest xid - * present in the table, also on pages that don't have any dead tuples. + * present in the table, even on pages that don't have any dead tuples. * * Although the visibility map is just a hint at the moment, the PD_ALL_VISIBLE - * flag on heap pages *must* be correct. + * flag on heap pages *must* be correct, because it is used to skip visibility + * checking. * * LOCKING * @@ -55,17 +56,17 @@ * When a bit is set, the LSN of the visibility map page is updated to make * sure that the visibility map update doesn't get written to disk before the * WAL record of the changes that made it possible to set the bit is flushed. - * But when a bit is cleared, we don't have to do that because it's always OK - * to clear a bit in the map from correctness point of view. + * But when a bit is cleared, we don't have to do that because it's always + * safe to clear a bit in the map from correctness point of view. * * TODO * - * It would be nice to use the visibility map to skip visibility checkes in + * It would be nice to use the visibility map to skip visibility checks in * index scans. * * Currently, the visibility map is not 100% correct all the time. * During updates, the bit in the visibility map is cleared after releasing - * the lock on the heap page. During the window after releasing the lock + * the lock on the heap page. During the window between releasing the lock * and clearing the bit in the visibility map, the bit in the visibility map * is set, but the new insertion or deletion is not yet visible to other * backends. @@ -73,7 +74,7 @@ * That might actually be OK for the index scans, though. The newly inserted * tuple wouldn't have an index pointer yet, so all tuples reachable from an * index would still be visible to all other backends, and deletions wouldn't - * be visible to other backends yet. + * be visible to other backends yet. (But HOT breaks that argument, no?) * * There's another hole in the way the PD_ALL_VISIBLE flag is set. When * vacuum observes that all tuples are visible to all, it sets the flag on @@ -81,7 +82,8 @@ * crash, and only the visibility map page was flushed to disk, we'll have * a bit set in the visibility map, but the corresponding flag on the heap * page is not set. If the heap page is then updated, the updater won't - * know to clear the bit in the visibility map. + * know to clear the bit in the visibility map. (Isn't that prevented by + * the LSN interlock?) * *------------------------------------------------------------------------- */ |
