diff options
author | Alexander Korotkov <akorotkov@postgresql.org> | 2018-12-21 02:37:37 +0300 |
---|---|---|
committer | Alexander Korotkov <akorotkov@postgresql.org> | 2018-12-21 02:37:37 +0300 |
commit | c952eae52a33069e2e92d34f217b43d0eca3d7de (patch) | |
tree | 4d9d8d95f7c1602630af3f83d8efd3ade5d98d33 /src/backend/access/gist/gist.c | |
parent | 7c15cef86d37924505b3bb49b5e1ad1740b1d8f7 (diff) | |
download | postgresql-c952eae52a33069e2e92d34f217b43d0eca3d7de.tar.gz |
Check for conflicting queries during replay of gistvacuumpage()
013ebc0a7b implements so-called GiST microvacuum. That is gistgettuple() marks
index tuples as dead when kill_prior_tuple is set. Later, when new tuple
insertion claims page space, those dead index tuples are physically deleted
from page. When this deletion is replayed on standby, it might conflict with
read-only queries. But 013ebc0a7b doesn't handle this. That may lead to
disappearance of some tuples from read-only snapshots on standby.
This commit implements resolving of conflicts between replay of GiST microvacuum
and standby queries. On the master we implement new WAL record type
XLOG_GIST_DELETE, which comprises necessary information. On stable releases
we've to be tricky to keep WAL compatibility. Information required for conflict
processing is just appended to data of XLOG_GIST_PAGE_UPDATE record. So,
PostgreSQL version, which doesn't know about conflict processing, will just
ignore that.
Reported-by: Andres Freund
Diagnosed-by: Andres Freund
Discussion: https://postgr.es/m/20181212224524.scafnlyjindmrbe6%40alap3.anarazel.de
Author: Alexander Korotkov
Backpatch-through: 9.6
Diffstat (limited to 'src/backend/access/gist/gist.c')
-rw-r--r-- | src/backend/access/gist/gist.c | 23 |
1 files changed, 14 insertions, 9 deletions
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index 8a42effdf7..a2cb84800e 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -38,7 +38,8 @@ static bool gistinserttuples(GISTInsertState *state, GISTInsertStack *stack, bool unlockbuf, bool unlockleftchild); static void gistfinishsplit(GISTInsertState *state, GISTInsertStack *stack, GISTSTATE *giststate, List *splitinfo, bool releasebuf); -static void gistvacuumpage(Relation rel, Page page, Buffer buffer); +static void gistvacuumpage(Relation rel, Page page, Buffer buffer, + Relation heapRel); #define ROTATEDIST(d) do { \ @@ -172,7 +173,7 @@ gistinsert(Relation r, Datum *values, bool *isnull, values, isnull, true /* size is currently bogus */ ); itup->t_tid = *ht_ctid; - gistdoinsert(r, itup, 0, giststate); + gistdoinsert(r, itup, 0, giststate, heapRel); /* cleanup */ MemoryContextSwitchTo(oldCxt); @@ -218,7 +219,8 @@ gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, BlockNumber *newblkno, Buffer leftchildbuf, List **splitinfo, - bool markfollowright) + bool markfollowright, + Relation heapRel) { BlockNumber blkno = BufferGetBlockNumber(buffer); Page page = BufferGetPage(buffer); @@ -259,7 +261,7 @@ gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, */ if (is_split && GistPageIsLeaf(page) && GistPageHasGarbage(page)) { - gistvacuumpage(rel, page, buffer); + gistvacuumpage(rel, page, buffer, heapRel); is_split = gistnospace(page, itup, ntup, oldoffnum, freespace); } @@ -604,7 +606,8 @@ gistplacetopage(Relation rel, Size freespace, GISTSTATE *giststate, * so it does not bother releasing palloc'd allocations. */ void -gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) +gistdoinsert(Relation r, IndexTuple itup, Size freespace, + GISTSTATE *giststate, Relation heapRel) { ItemId iid; IndexTuple idxtuple; @@ -616,6 +619,7 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) memset(&state, 0, sizeof(GISTInsertState)); state.freespace = freespace; state.r = r; + state.heapRel = heapRel; /* Start from the root */ firststack.blkno = GIST_ROOT_BLKNO; @@ -1232,7 +1236,8 @@ gistinserttuples(GISTInsertState *state, GISTInsertStack *stack, oldoffnum, NULL, leftchild, &splitinfo, - true); + true, + state->heapRel); /* * Before recursing up in case the page was split, release locks on the @@ -1543,7 +1548,7 @@ freeGISTstate(GISTSTATE *giststate) * Function assumes that buffer is exclusively locked. */ static void -gistvacuumpage(Relation rel, Page page, Buffer buffer) +gistvacuumpage(Relation rel, Page page, Buffer buffer, Relation heapRel) { OffsetNumber deletable[MaxIndexTuplesPerPage]; int ndeletable = 0; @@ -1589,9 +1594,9 @@ gistvacuumpage(Relation rel, Page page, Buffer buffer) { XLogRecPtr recptr; - recptr = gistXLogUpdate(buffer, + recptr = gistXLogDelete(buffer, deletable, ndeletable, - NULL, 0, InvalidBuffer); + heapRel->rd_node); PageSetLSN(page, recptr); } |