summaryrefslogtreecommitdiff
path: root/src/backend/utils/cache/catcache.c
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2002-03-03 17:47:56 +0000
committerTom Lane <tgl@sss.pgh.pa.us>2002-03-03 17:47:56 +0000
commit26ac2171735f288a1380e173ffbb6af1a50183d8 (patch)
treee2604a289c8065884497475e46b1b2731b867267 /src/backend/utils/cache/catcache.c
parent592caa089756d1b9ddf2675e9027d0c8635a6918 (diff)
downloadpostgresql-26ac2171735f288a1380e173ffbb6af1a50183d8.tar.gz
Catcaches can now store negative entries as well as positive ones, to
speed up repetitive failed searches; per pghackers discussion in late January. inval.c logic substantially simplified, since we can now treat inserts and deletes alike as far as inval events are concerned. Some repair work needed in heap_create_with_catalog, which turns out to have been doing CommandCounterIncrement at a point where the new relation has non-self-consistent catalog entries. With the new inval code, that resulted in assert failures during a relcache entry rebuild.
Diffstat (limited to 'src/backend/utils/cache/catcache.c')
-rw-r--r--src/backend/utils/cache/catcache.c806
1 files changed, 493 insertions, 313 deletions
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index b79d671255..e04792d71f 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.89 2002/03/02 21:39:32 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.90 2002/03/03 17:47:55 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -91,15 +91,15 @@ static const Oid eqproc[] = {
#define EQPROC(SYSTEMTYPEOID) eqproc[(SYSTEMTYPEOID)-BOOLOID]
-static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
-static Index CatalogCacheComputeHashIndex(CatCache *cache,
+static uint32 CatalogCacheComputeHashValue(CatCache *cache,
ScanKey cur_skey);
-static Index CatalogCacheComputeTupleHashIndex(CatCache *cache,
+static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache,
HeapTuple tuple);
-static void CatalogCacheInitializeCache(CatCache *cache);
#ifdef CATCACHE_STATS
static void CatCachePrintStats(void);
#endif
+static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
+static void CatalogCacheInitializeCache(CatCache *cache);
/*
@@ -136,221 +136,17 @@ GetCCHashFunc(Oid keytype)
}
}
-#ifdef CATCACHE_STATS
-
-static void
-CatCachePrintStats(void)
-{
- CatCache *cache;
- long cc_searches = 0;
- long cc_hits = 0;
- long cc_newloads = 0;
-
- elog(LOG, "Catcache stats dump: %d/%d tuples in catcaches",
- CacheHdr->ch_ntup, CacheHdr->ch_maxtup);
-
- for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
- {
- if (cache->cc_ntup == 0 && cache->cc_searches == 0)
- continue; /* don't print unused caches */
- elog(LOG, "Catcache %s/%s: %d tup, %ld srch, %ld hits, %ld loads, %ld not found",
- cache->cc_relname,
- cache->cc_indname,
- cache->cc_ntup,
- cache->cc_searches,
- cache->cc_hits,
- cache->cc_newloads,
- cache->cc_searches - cache->cc_hits - cache->cc_newloads);
- cc_searches += cache->cc_searches;
- cc_hits += cache->cc_hits;
- cc_newloads += cache->cc_newloads;
- }
- elog(LOG, "Catcache totals: %d tup, %ld srch, %ld hits, %ld loads, %ld not found",
- CacheHdr->ch_ntup,
- cc_searches,
- cc_hits,
- cc_newloads,
- cc_searches - cc_hits - cc_newloads);
-}
-
-#endif /* CATCACHE_STATS */
-
-
-/*
- * Standard routine for creating cache context if it doesn't exist yet
- *
- * There are a lot of places (probably far more than necessary) that check
- * whether CacheMemoryContext exists yet and want to create it if not.
- * We centralize knowledge of exactly how to create it here.
- */
-void
-CreateCacheMemoryContext(void)
-{
- /*
- * Purely for paranoia, check that context doesn't exist; caller
- * probably did so already.
- */
- if (!CacheMemoryContext)
- CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
- "CacheMemoryContext",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
-}
-
-
/*
- * CatalogCacheInitializeCache
+ * CatalogCacheComputeHashValue
*
- * This function does final initialization of a catcache: obtain the tuple
- * descriptor and set up the hash and equality function links. We assume
- * that the relcache entry can be opened at this point!
+ * Compute the hash value associated with a given set of lookup keys
*/
-#ifdef CACHEDEBUG
-#define CatalogCacheInitializeCache_DEBUG1 \
- elog(LOG, "CatalogCacheInitializeCache: cache @%p %s", cache, \
- cache->cc_relname)
-
-#define CatalogCacheInitializeCache_DEBUG2 \
-do { \
- if (cache->cc_key[i] > 0) { \
- elog(LOG, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
- i+1, cache->cc_nkeys, cache->cc_key[i], \
- tupdesc->attrs[cache->cc_key[i] - 1]->atttypid); \
- } else { \
- elog(LOG, "CatalogCacheInitializeCache: load %d/%d w/%d", \
- i+1, cache->cc_nkeys, cache->cc_key[i]); \
- } \
-} while(0)
-
-#else
-#define CatalogCacheInitializeCache_DEBUG1
-#define CatalogCacheInitializeCache_DEBUG2
-#endif
-
-static void
-CatalogCacheInitializeCache(CatCache *cache)
+static uint32
+CatalogCacheComputeHashValue(CatCache *cache, ScanKey cur_skey)
{
- Relation relation;
- MemoryContext oldcxt;
- TupleDesc tupdesc;
- int i;
-
- CatalogCacheInitializeCache_DEBUG1;
-
- /*
- * Open the relation without locking --- we only need the tupdesc,
- * which we assume will never change ...
- */
- relation = heap_openr(cache->cc_relname, NoLock);
- Assert(RelationIsValid(relation));
-
- /*
- * switch to the cache context so our allocations do not vanish at the
- * end of a transaction
- */
- Assert(CacheMemoryContext != NULL);
-
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
-
- /*
- * copy the relcache's tuple descriptor to permanent cache storage
- */
- tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
-
- /*
- * get the relation's relisshared flag, too
- */
- cache->cc_relisshared = RelationGetForm(relation)->relisshared;
-
- /*
- * return to the caller's memory context and close the rel
- */
- MemoryContextSwitchTo(oldcxt);
-
- heap_close(relation, NoLock);
+ uint32 hashValue = 0;
- CACHE3_elog(LOG, "CatalogCacheInitializeCache: %s, %d keys",
- cache->cc_relname, cache->cc_nkeys);
-
- /*
- * initialize cache's key information
- */
- for (i = 0; i < cache->cc_nkeys; ++i)
- {
- Oid keytype;
-
- CatalogCacheInitializeCache_DEBUG2;
-
- if (cache->cc_key[i] > 0)
- keytype = tupdesc->attrs[cache->cc_key[i] - 1]->atttypid;
- else
- {
- if (cache->cc_key[i] != ObjectIdAttributeNumber)
- elog(FATAL, "CatalogCacheInit: only sys attr supported is OID");
- keytype = OIDOID;
- }
-
- cache->cc_hashfunc[i] = GetCCHashFunc(keytype);
-
- /*
- * If GetCCHashFunc liked the type, safe to index into eqproc[]
- */
- cache->cc_skey[i].sk_procedure = EQPROC(keytype);
-
- /* Do function lookup */
- fmgr_info_cxt(cache->cc_skey[i].sk_procedure,
- &cache->cc_skey[i].sk_func,
- CacheMemoryContext);
-
- /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
- cache->cc_skey[i].sk_attno = cache->cc_key[i];
-
- CACHE4_elog(LOG, "CatalogCacheInit %s %d %p",
- cache->cc_relname,
- i,
- cache);
- }
-
- /*
- * mark this cache fully initialized
- */
- cache->cc_tupdesc = tupdesc;
-}
-
-/*
- * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
- *
- * The only reason to call this routine is to ensure that the relcache
- * has created entries for all the catalogs and indexes referenced by
- * catcaches. Therefore, open the index too. An exception is the indexes
- * on pg_am, which we don't use (cf. IndexScanOK).
- */
-void
-InitCatCachePhase2(CatCache *cache)
-{
- if (cache->cc_tupdesc == NULL)
- CatalogCacheInitializeCache(cache);
-
- if (cache->id != AMOID &&
- cache->id != AMNAME)
- {
- Relation idesc;
-
- idesc = index_openr(cache->cc_indname);
- index_close(idesc);
- }
-}
-
-/*
- * CatalogCacheComputeHashIndex
- */
-static Index
-CatalogCacheComputeHashIndex(CatCache *cache, ScanKey cur_skey)
-{
- uint32 hashIndex = 0;
-
- CACHE4_elog(LOG, "CatalogCacheComputeHashIndex %s %d %p",
+ CACHE4_elog(DEBUG1, "CatalogCacheComputeHashValue %s %d %p",
cache->cc_relname,
cache->cc_nkeys,
cache);
@@ -358,39 +154,40 @@ CatalogCacheComputeHashIndex(CatCache *cache, ScanKey cur_skey)
switch (cache->cc_nkeys)
{
case 4:
- hashIndex ^=
+ hashValue ^=
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[3],
cur_skey[3].sk_argument)) << 9;
/* FALLTHROUGH */
case 3:
- hashIndex ^=
+ hashValue ^=
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[2],
cur_skey[2].sk_argument)) << 6;
/* FALLTHROUGH */
case 2:
- hashIndex ^=
+ hashValue ^=
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[1],
cur_skey[1].sk_argument)) << 3;
/* FALLTHROUGH */
case 1:
- hashIndex ^=
+ hashValue ^=
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[0],
cur_skey[0].sk_argument));
break;
default:
- elog(FATAL, "CCComputeHashIndex: %d cc_nkeys", cache->cc_nkeys);
+ elog(FATAL, "CCComputeHashValue: %d cc_nkeys", cache->cc_nkeys);
break;
}
- hashIndex %= (uint32) cache->cc_size;
- return (Index) hashIndex;
+
+ return hashValue;
}
/*
- * CatalogCacheComputeTupleHashIndex
+ * CatalogCacheComputeTupleHashValue
+ *
+ * Compute the hash value associated with a given tuple to be cached
*/
-static Index
-CatalogCacheComputeTupleHashIndex(CatCache *cache,
- HeapTuple tuple)
+static uint32
+CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple)
{
ScanKeyData cur_skey[4];
bool isNull = false;
@@ -442,16 +239,75 @@ CatalogCacheComputeTupleHashIndex(CatCache *cache,
Assert(!isNull);
break;
default:
- elog(FATAL, "CCComputeTupleHashIndex: %d cc_nkeys",
+ elog(FATAL, "CCComputeTupleHashValue: %d cc_nkeys",
cache->cc_nkeys);
break;
}
- return CatalogCacheComputeHashIndex(cache, cur_skey);
+ return CatalogCacheComputeHashValue(cache, cur_skey);
}
+
+#ifdef CATCACHE_STATS
+
+static void
+CatCachePrintStats(void)
+{
+ CatCache *cache;
+ long cc_searches = 0;
+ long cc_hits = 0;
+ long cc_neg_hits = 0;
+ long cc_newloads = 0;
+ long cc_invals = 0;
+ long cc_discards = 0;
+
+ elog(DEBUG1, "Catcache stats dump: %d/%d tuples in catcaches",
+ CacheHdr->ch_ntup, CacheHdr->ch_maxtup);
+
+ for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
+ {
+ if (cache->cc_ntup == 0 && cache->cc_searches == 0)
+ continue; /* don't print unused caches */
+ elog(DEBUG1, "Catcache %s/%s: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld discards",
+ cache->cc_relname,
+ cache->cc_indname,
+ cache->cc_ntup,
+ cache->cc_searches,
+ cache->cc_hits,
+ cache->cc_neg_hits,
+ cache->cc_hits + cache->cc_neg_hits,
+ cache->cc_newloads,
+ cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
+ cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
+ cache->cc_invals,
+ cache->cc_discards);
+ cc_searches += cache->cc_searches;
+ cc_hits += cache->cc_hits;
+ cc_neg_hits += cache->cc_neg_hits;
+ cc_newloads += cache->cc_newloads;
+ cc_invals += cache->cc_invals;
+ cc_discards += cache->cc_discards;
+ }
+ elog(DEBUG1, "Catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld discards",
+ CacheHdr->ch_ntup,
+ cc_searches,
+ cc_hits,
+ cc_neg_hits,
+ cc_hits + cc_neg_hits,
+ cc_newloads,
+ cc_searches - cc_hits - cc_neg_hits - cc_newloads,
+ cc_searches - cc_hits - cc_neg_hits,
+ cc_invals,
+ cc_discards);
+}
+
+#endif /* CATCACHE_STATS */
+
+
/*
* CatCacheRemoveCTup
+ *
+ * Unlink and delete the given cache entry
*/
static void
CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
@@ -473,16 +329,26 @@ CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
}
/*
- * CatalogCacheIdInvalidate()
+ * CatalogCacheIdInvalidate
+ *
+ * Invalidate entries in the specified cache, given a hash value and
+ * item pointer. Positive entries are deleted if they match the item
+ * pointer. Negative entries must be deleted if they match the hash
+ * value (since we do not have the exact key of the tuple that's being
+ * inserted). But this should only rarely result in loss of a cache
+ * entry that could have been kept.
*
- * Invalidate a tuple given a cache id. In this case the id should always
- * be found (whether the cache has opened its relation or not). Of course,
- * if the cache has yet to open its relation, there will be no tuples so
- * no problem.
+ * Note that it's not very relevant whether the tuple identified by
+ * the item pointer is being inserted or deleted. We don't expect to
+ * find matching positive entries in the one case, and we don't expect
+ * to find matching negative entries in the other; but we will do the
+ * right things in any case.
+ *
+ * This routine is only quasi-public: it should only be used by inval.c.
*/
void
CatalogCacheIdInvalidate(int cacheId,
- Index hashIndex,
+ uint32 hashValue,
ItemPointer pointer)
{
CatCache *ccp;
@@ -491,37 +357,51 @@ CatalogCacheIdInvalidate(int cacheId,
* sanity checks
*/
Assert(ItemPointerIsValid(pointer));
- CACHE1_elog(LOG, "CatalogCacheIdInvalidate: called");
+ CACHE1_elog(DEBUG1, "CatalogCacheIdInvalidate: called");
/*
* inspect caches to find the proper cache
*/
for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
{
+ Index hashIndex;
Dlelem *elt,
*nextelt;
if (cacheId != ccp->id)
continue;
- Assert(hashIndex < ccp->cc_size);
+ /*
+ * We don't bother to check whether the cache has finished
+ * initialization yet; if not, there will be no entries in it
+ * so no problem.
+ */
/*
- * inspect the hash bucket until we find a match or exhaust
+ * inspect the proper hash bucket for matches
*/
+ hashIndex = (Index) (hashValue % (uint32) ccp->cc_size);
+
for (elt = DLGetHead(&ccp->cc_bucket[hashIndex]); elt; elt = nextelt)
{
CatCTup *ct = (CatCTup *) DLE_VAL(elt);
nextelt = DLGetSucc(elt);
- if (ItemPointerEquals(pointer, &ct->tuple.t_self))
+ if (hashValue != ct->hash_value)
+ continue; /* ignore non-matching hash values */
+
+ if (ct->negative ||
+ ItemPointerEquals(pointer, &ct->tuple.t_self))
{
if (ct->refcount > 0)
ct->dead = true;
else
CatCacheRemoveCTup(ccp, ct);
- CACHE1_elog(LOG, "CatalogCacheIdInvalidate: invalidated");
+ CACHE1_elog(DEBUG1, "CatalogCacheIdInvalidate: invalidated");
+#ifdef CATCACHE_STATS
+ ccp->cc_invals++;
+#endif
/* could be multiple matches, so keep looking! */
}
}
@@ -531,18 +411,34 @@ CatalogCacheIdInvalidate(int cacheId,
/* ----------------------------------------------------------------
* public functions
- *
- * AtEOXact_CatCache
- * ResetCatalogCaches
- * InitCatCache
- * SearchCatCache
- * ReleaseCatCache
- * RelationInvalidateCatalogCacheTuple
* ----------------------------------------------------------------
*/
/*
+ * Standard routine for creating cache context if it doesn't exist yet
+ *
+ * There are a lot of places (probably far more than necessary) that check
+ * whether CacheMemoryContext exists yet and want to create it if not.
+ * We centralize knowledge of exactly how to create it here.
+ */
+void
+CreateCacheMemoryContext(void)
+{
+ /*
+ * Purely for paranoia, check that context doesn't exist; caller
+ * probably did so already.
+ */
+ if (!CacheMemoryContext)
+ CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
+ "CacheMemoryContext",
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
+}
+
+
+/*
* AtEOXact_CatCache
*
* Clean up catcaches at end of transaction (either commit or abort)
@@ -609,6 +505,9 @@ ResetCatalogCache(CatCache *cache)
ct->dead = true;
else
CatCacheRemoveCTup(cache, ct);
+#ifdef CATCACHE_STATS
+ cache->cc_invals++;
+#endif
}
}
}
@@ -623,12 +522,12 @@ ResetCatalogCaches(void)
{
CatCache *cache;
- CACHE1_elog(LOG, "ResetCatalogCaches called");
+ CACHE1_elog(DEBUG1, "ResetCatalogCaches called");
for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
ResetCatalogCache(cache);
- CACHE1_elog(LOG, "end of ResetCatalogCaches call");
+ CACHE1_elog(DEBUG1, "end of ResetCatalogCaches call");
}
/*
@@ -656,7 +555,7 @@ CatalogCacheFlushRelation(Oid relId)
{
CatCache *cache;
- CACHE2_elog(LOG, "CatalogCacheFlushRelation called for %u", relId);
+ CACHE2_elog(DEBUG1, "CatalogCacheFlushRelation called for %u", relId);
for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
{
@@ -691,6 +590,13 @@ CatalogCacheFlushRelation(Oid relId)
nextelt = DLGetSucc(elt);
+ /*
+ * Negative entries are never considered related to a rel,
+ * even if the rel is part of their lookup key.
+ */
+ if (ct->negative)
+ continue;
+
if (cache->cc_reloidattr == ObjectIdAttributeNumber)
tupRelid = ct->tuple.t_data->t_oid;
else
@@ -711,12 +617,15 @@ CatalogCacheFlushRelation(Oid relId)
ct->dead = true;
else
CatCacheRemoveCTup(cache, ct);
+#ifdef CATCACHE_STATS
+ cache->cc_invals++;
+#endif
}
}
}
}
- CACHE1_elog(LOG, "end of CatalogCacheFlushRelation call");
+ CACHE1_elog(DEBUG1, "end of CatalogCacheFlushRelation call");
}
/*
@@ -730,7 +639,7 @@ CatalogCacheFlushRelation(Oid relId)
#ifdef CACHEDEBUG
#define InitCatCache_DEBUG1 \
do { \
- elog(LOG, "InitCatCache: rel=%s id=%d nkeys=%d size=%d\n", \
+ elog(DEBUG1, "InitCatCache: rel=%s id=%d nkeys=%d size=%d\n", \
cp->cc_relname, cp->id, cp->cc_nkeys, cp->cc_size); \
} while(0)
@@ -791,9 +700,10 @@ InitCatCache(int id,
cp->id = id;
cp->cc_relname = relname;
cp->cc_indname = indname;
- cp->cc_reloidattr = reloidattr;
+ cp->cc_reloid = InvalidOid; /* temporary */
cp->cc_relisshared = false; /* temporary */
cp->cc_tupdesc = (TupleDesc) NULL;
+ cp->cc_reloidattr = reloidattr;
cp->cc_ntup = 0;
cp->cc_size = NCCBUCKETS;
cp->cc_nkeys = nkeys;
@@ -820,6 +730,152 @@ InitCatCache(int id,
return cp;
}
+/*
+ * CatalogCacheInitializeCache
+ *
+ * This function does final initialization of a catcache: obtain the tuple
+ * descriptor and set up the hash and equality function links. We assume
+ * that the relcache entry can be opened at this point!
+ */
+#ifdef CACHEDEBUG
+#define CatalogCacheInitializeCache_DEBUG1 \
+ elog(DEBUG1, "CatalogCacheInitializeCache: cache @%p %s", cache, \
+ cache->cc_relname)
+
+#define CatalogCacheInitializeCache_DEBUG2 \
+do { \
+ if (cache->cc_key[i] > 0) { \
+ elog(DEBUG1, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
+ i+1, cache->cc_nkeys, cache->cc_key[i], \
+ tupdesc->attrs[cache->cc_key[i] - 1]->atttypid); \
+ } else { \
+ elog(DEBUG1, "CatalogCacheInitializeCache: load %d/%d w/%d", \
+ i+1, cache->cc_nkeys, cache->cc_key[i]); \
+ } \
+} while(0)
+
+#else
+#define CatalogCacheInitializeCache_DEBUG1
+#define CatalogCacheInitializeCache_DEBUG2
+#endif
+
+static void
+CatalogCacheInitializeCache(CatCache *cache)
+{
+ Relation relation;
+ MemoryContext oldcxt;
+ TupleDesc tupdesc;
+ int i;
+
+ CatalogCacheInitializeCache_DEBUG1;
+
+ /*
+ * Open the relation without locking --- we only need the tupdesc,
+ * which we assume will never change ...
+ */
+ relation = heap_openr(cache->cc_relname, NoLock);
+ Assert(RelationIsValid(relation));
+
+ /*
+ * switch to the cache context so our allocations do not vanish at the
+ * end of a transaction
+ */
+ Assert(CacheMemoryContext != NULL);
+
+ oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+
+ /*
+ * copy the relcache's tuple descriptor to permanent cache storage
+ */
+ tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
+
+ /*
+ * get the relation's OID and relisshared flag, too
+ */
+ cache->cc_reloid = RelationGetRelid(relation);
+ cache->cc_relisshared = RelationGetForm(relation)->relisshared;
+
+ /*
+ * return to the caller's memory context and close the rel
+ */
+ MemoryContextSwitchTo(oldcxt);
+
+ heap_close(relation, NoLock);
+
+ CACHE3_elog(DEBUG1, "CatalogCacheInitializeCache: %s, %d keys",
+ cache->cc_relname, cache->cc_nkeys);
+
+ /*
+ * initialize cache's key information
+ */
+ for (i = 0; i < cache->cc_nkeys; ++i)
+ {
+ Oid keytype;
+
+ CatalogCacheInitializeCache_DEBUG2;
+
+ if (cache->cc_key[i] > 0)
+ keytype = tupdesc->attrs[cache->cc_key[i] - 1]->atttypid;
+ else
+ {
+ if (cache->cc_key[i] != ObjectIdAttributeNumber)
+ elog(FATAL, "CatalogCacheInit: only sys attr supported is OID");
+ keytype = OIDOID;
+ }
+
+ cache->cc_hashfunc[i] = GetCCHashFunc(keytype);
+
+ cache->cc_isname[i] = (keytype == NAMEOID);
+
+ /*
+ * If GetCCHashFunc liked the type, safe to index into eqproc[]
+ */
+ cache->cc_skey[i].sk_procedure = EQPROC(keytype);
+
+ /* Do function lookup */
+ fmgr_info_cxt(cache->cc_skey[i].sk_procedure,
+ &cache->cc_skey[i].sk_func,
+ CacheMemoryContext);
+
+ /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
+ cache->cc_skey[i].sk_attno = cache->cc_key[i];
+
+ CACHE4_elog(DEBUG1, "CatalogCacheInit %s %d %p",
+ cache->cc_relname,
+ i,
+ cache);
+ }
+
+ /*
+ * mark this cache fully initialized
+ */
+ cache->cc_tupdesc = tupdesc;
+}
+
+/*
+ * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
+ *
+ * The only reason to call this routine is to ensure that the relcache
+ * has created entries for all the catalogs and indexes referenced by
+ * catcaches. Therefore, open the index too. An exception is the indexes
+ * on pg_am, which we don't use (cf. IndexScanOK).
+ */
+void
+InitCatCachePhase2(CatCache *cache)
+{
+ if (cache->cc_tupdesc == NULL)
+ CatalogCacheInitializeCache(cache);
+
+ if (cache->id != AMOID &&
+ cache->id != AMNAME)
+ {
+ Relation idesc;
+
+ idesc = index_openr(cache->cc_indname);
+ index_close(idesc);
+ }
+}
+
/*
* IndexScanOK
@@ -874,10 +930,20 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey)
}
/*
- * SearchCatCache
+ * SearchCatCache
*
* This call searches a system cache for a tuple, opening the relation
- * if necessary (the first access to a particular cache).
+ * if necessary (on the first access to a particular cache).
+ *
+ * The result is NULL if not found, or a pointer to a HeapTuple in
+ * the cache. The caller must not modify the tuple, and must call
+ * ReleaseCatCache() when done with it.
+ *
+ * The search key values should be expressed as Datums of the key columns'
+ * datatype(s). (Pass zeroes for any unused parameters.) As a special
+ * exception, the passed-in key for a NAME column can be just a C string;
+ * the caller need not go to the trouble of converting it to a fully
+ * null-padded NAME.
*/
HeapTuple
SearchCatCache(CatCache *cache,
@@ -887,11 +953,13 @@ SearchCatCache(CatCache *cache,
Datum v4)
{
ScanKeyData cur_skey[4];
- Index hash;
+ uint32 hashValue;
+ Index hashIndex;
Dlelem *elt;
CatCTup *ct;
- HeapTuple ntp;
Relation relation;
+ HeapTuple ntp;
+ int i;
MemoryContext oldcxt;
/*
@@ -916,12 +984,13 @@ SearchCatCache(CatCache *cache,
/*
* find the hash bucket in which to look for the tuple
*/
- hash = CatalogCacheComputeHashIndex(cache, cur_skey);
+ hashValue = CatalogCacheComputeHashValue(cache, cur_skey);
+ hashIndex = (Index) (hashValue % (uint32) cache->cc_size);
/*
* scan the hash bucket until we find a match or exhaust our tuples
*/
- for (elt = DLGetHead(&cache->cc_bucket[hash]);
+ for (elt = DLGetHead(&cache->cc_bucket[hashIndex]);
elt;
elt = DLGetSucc(elt))
{
@@ -932,9 +1001,11 @@ SearchCatCache(CatCache *cache,
if (ct->dead)
continue; /* ignore dead entries */
+ if (ct->hash_value != hashValue)
+ continue; /* quickly skip entry if wrong hash val */
+
/*
- * see if the cached tuple matches our key. (should we be worried
- * about time ranges? -cim 10/2/90)
+ * see if the cached tuple matches our key.
*/
HeapKeyTest(&ct->tuple,
cache->cc_tupdesc,
@@ -945,33 +1016,53 @@ SearchCatCache(CatCache *cache,
continue;
/*
- * we found a tuple in the cache: bump its refcount, move it to
- * the front of the LRU list, and return it. We also move it to
- * the front of the list for its hashbucket, in order to speed
- * subsequent searches. (The most frequently accessed elements in
- * any hashbucket will tend to be near the front of the
- * hashbucket's list.)
+ * we found a match in the cache: move it to the front of the global
+ * LRU list. We also move it to the front of the list for its
+ * hashbucket, in order to speed subsequent searches. (The most
+ * frequently accessed elements in any hashbucket will tend to be
+ * near the front of the hashbucket's list.)
*/
- ct->refcount++;
-
DLMoveToFront(&ct->lrulist_elem);
DLMoveToFront(&ct->cache_elem);
+ /*
+ * If it's a positive entry, bump its refcount and return it.
+ * If it's negative, we can report failure to the caller.
+ */
+ if (!ct->negative)
+ {
+ ct->refcount++;
+
#ifdef CACHEDEBUG
- CACHE3_elog(LOG, "SearchCatCache(%s): found in bucket %d",
- cache->cc_relname, hash);
+ CACHE3_elog(DEBUG1, "SearchCatCache(%s): found in bucket %d",
+ cache->cc_relname, hashIndex);
#endif /* CACHEDEBUG */
#ifdef CATCACHE_STATS
- cache->cc_hits++;
+ cache->cc_hits++;
#endif
- return &ct->tuple;
+ return &ct->tuple;
+ }
+ else
+ {
+#ifdef CACHEDEBUG
+ CACHE3_elog(DEBUG1, "SearchCatCache(%s): found neg entry in bucket %d",
+ cache->cc_relname, hashIndex);
+#endif /* CACHEDEBUG */
+
+#ifdef CATCACHE_STATS
+ cache->cc_neg_hits++;
+#endif
+
+ return NULL;
+ }
}
/*
- * Tuple was not found in cache, so we have to try and retrieve it
- * directly from the relation. If it's found, we add it to the cache.
+ * Tuple was not found in cache, so we have to try to retrieve it
+ * directly from the relation. If found, we will add it to the
+ * cache; if not found, we will add a negative cache entry instead.
*
* NOTE: it is possible for recursive cache lookups to occur while
* reading the relation --- for example, due to shared-cache-inval
@@ -987,14 +1078,18 @@ SearchCatCache(CatCache *cache,
/*
* open the relation associated with the cache
*/
- relation = heap_openr(cache->cc_relname, AccessShareLock);
+ relation = heap_open(cache->cc_reloid, AccessShareLock);
+
+ /*
+ * Pre-create cache entry header, and mark no tuple found.
+ */
+ ct = (CatCTup *) MemoryContextAlloc(CacheMemoryContext, sizeof(CatCTup));
+ ct->negative = true;
/*
* Scan the relation to find the tuple. If there's an index, and if
* it's safe to do so, use the index. Else do a heap scan.
*/
- ct = NULL;
-
if ((RelationGetForm(relation))->relhasindex &&
!IsIgnoringSystemIndexes() &&
IndexScanOK(cache, cur_skey))
@@ -1004,9 +1099,8 @@ SearchCatCache(CatCache *cache,
RetrieveIndexResult indexRes;
HeapTupleData tuple;
Buffer buffer;
- int i;
- CACHE2_elog(LOG, "SearchCatCache(%s): performing index scan",
+ CACHE2_elog(DEBUG1, "SearchCatCache(%s): performing index scan",
cache->cc_relname);
/*
@@ -1031,8 +1125,8 @@ SearchCatCache(CatCache *cache,
{
/* Copy tuple into our context */
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
- ct = (CatCTup *) palloc(sizeof(CatCTup));
heap_copytuple_with_tuple(&tuple, &ct->tuple);
+ ct->negative = false;
MemoryContextSwitchTo(oldcxt);
ReleaseBuffer(buffer);
break;
@@ -1045,7 +1139,7 @@ SearchCatCache(CatCache *cache,
{
HeapScanDesc sd;
- CACHE2_elog(LOG, "SearchCatCache(%s): performing heap scan",
+ CACHE2_elog(DEBUG1, "SearchCatCache(%s): performing heap scan",
cache->cc_relname);
sd = heap_beginscan(relation, 0, SnapshotNow,
@@ -1057,8 +1151,8 @@ SearchCatCache(CatCache *cache,
{
/* Copy tuple into our context */
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
- ct = (CatCTup *) palloc(sizeof(CatCTup));
heap_copytuple_with_tuple(ntp, &ct->tuple);
+ ct->negative = false;
MemoryContextSwitchTo(oldcxt);
/* We should not free the result of heap_getnext... */
}
@@ -1072,58 +1166,140 @@ SearchCatCache(CatCache *cache,
heap_close(relation, AccessShareLock);
/*
- * scan is complete. if tup was found, we can add it to the cache.
+ * scan is complete. If tuple was not found, we need to build
+ * a fake tuple for the negative cache entry. The fake tuple has
+ * the correct key columns, but nulls everywhere else.
*/
- if (ct == NULL)
- return NULL;
+ if (ct->negative)
+ {
+ TupleDesc tupDesc = cache->cc_tupdesc;
+ Datum *values;
+ char *nulls;
+ Oid negOid = InvalidOid;
+
+ values = (Datum *) palloc(tupDesc->natts * sizeof(Datum));
+ nulls = (char *) palloc(tupDesc->natts * sizeof(char));
+
+ memset(values, 0, tupDesc->natts * sizeof(Datum));
+ memset(nulls, 'n', tupDesc->natts * sizeof(char));
+
+ for (i = 0; i < cache->cc_nkeys; i++)
+ {
+ int attindex = cache->cc_key[i];
+ Datum keyval = cur_skey[i].sk_argument;
+
+ if (attindex > 0)
+ {
+ /*
+ * Here we must be careful in case the caller passed a
+ * C string where a NAME is wanted: convert the given
+ * argument to a correctly padded NAME. Otherwise the
+ * memcpy() done in heap_formtuple could fall off the
+ * end of memory.
+ */
+ if (cache->cc_isname[i])
+ {
+ Name newval = (Name) palloc(NAMEDATALEN);
+
+ namestrcpy(newval, DatumGetCString(keyval));
+ keyval = NameGetDatum(newval);
+ }
+ values[attindex-1] = keyval;
+ nulls[attindex-1] = ' ';
+ }
+ else
+ {
+ Assert(attindex == ObjectIdAttributeNumber);
+ negOid = DatumGetObjectId(keyval);
+ }
+ }
+
+ ntp = heap_formtuple(tupDesc, values, nulls);
+
+ oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ heap_copytuple_with_tuple(ntp, &ct->tuple);
+ ct->tuple.t_data->t_oid = negOid;
+ MemoryContextSwitchTo(oldcxt);
+
+ heap_freetuple(ntp);
+ for (i = 0; i < cache->cc_nkeys; i++)
+ {
+ if (cache->cc_isname[i])
+ pfree(DatumGetName(values[cache->cc_key[i]-1]));
+ }
+ pfree(values);
+ pfree(nulls);
+ }
/*
* Finish initializing the CatCTup header, and add it to the linked
* lists.
*/
- CACHE1_elog(LOG, "SearchCatCache: found tuple");
-
ct->ct_magic = CT_MAGIC;
ct->my_cache = cache;
DLInitElem(&ct->lrulist_elem, (void *) ct);
DLInitElem(&ct->cache_elem, (void *) ct);
ct->refcount = 1; /* count this first reference */
ct->dead = false;
+ ct->hash_value = hashValue;
DLAddHead(&CacheHdr->ch_lrulist, &ct->lrulist_elem);
- DLAddHead(&cache->cc_bucket[hash], &ct->cache_elem);
-
-#ifdef CATCACHE_STATS
- cache->cc_newloads++;
-#endif
+ DLAddHead(&cache->cc_bucket[hashIndex], &ct->cache_elem);
/*
* If we've exceeded the desired size of the caches, try to throw away
- * the least recently used entry.
+ * the least recently used entry. NB: the newly-built entry cannot
+ * get thrown away here, because it has positive refcount.
*/
++cache->cc_ntup;
if (++CacheHdr->ch_ntup > CacheHdr->ch_maxtup)
{
- for (elt = DLGetTail(&CacheHdr->ch_lrulist);
- elt;
- elt = DLGetPred(elt))
+ Dlelem *prevelt;
+
+ for (elt = DLGetTail(&CacheHdr->ch_lrulist); elt; elt = prevelt)
{
CatCTup *oldct = (CatCTup *) DLE_VAL(elt);
+ prevelt = DLGetPred(elt);
+
if (oldct->refcount == 0)
{
- CACHE2_elog(LOG, "SearchCatCache(%s): Overflow, LRU removal",
+ CACHE2_elog(DEBUG1, "SearchCatCache(%s): Overflow, LRU removal",
cache->cc_relname);
+#ifdef CATCACHE_STATS
+ oldct->my_cache->cc_discards++;
+#endif
CatCacheRemoveCTup(oldct->my_cache, oldct);
- break;
+ if (CacheHdr->ch_ntup <= CacheHdr->ch_maxtup)
+ break;
}
}
}
- CACHE4_elog(LOG, "SearchCatCache(%s): Contains %d/%d tuples",
+ CACHE4_elog(DEBUG1, "SearchCatCache(%s): Contains %d/%d tuples",
cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
- CACHE3_elog(LOG, "SearchCatCache(%s): put in bucket %d",
- cache->cc_relname, hash);
+
+ if (ct->negative)
+ {
+ CACHE3_elog(DEBUG1, "SearchCatCache(%s): put neg entry in bucket %d",
+ cache->cc_relname, hashIndex);
+
+ /*
+ * We are not returning the new entry to the caller, so reset its
+ * refcount. Note it would be uncool to set the refcount to 0
+ * before doing the extra-entry removal step above.
+ */
+ ct->refcount = 0; /* negative entries never have refs */
+
+ return NULL;
+ }
+
+ CACHE3_elog(DEBUG1, "SearchCatCache(%s): put in bucket %d",
+ cache->cc_relname, hashIndex);
+
+#ifdef CATCACHE_STATS
+ cache->cc_newloads++;
+#endif
return &ct->tuple;
}
@@ -1164,7 +1340,7 @@ ReleaseCatCache(HeapTuple tuple)
*
* This is part of a rather subtle chain of events, so pay attention:
*
- * When a tuple is updated or deleted, it cannot be flushed from the
+ * When a tuple is inserted or deleted, it cannot be flushed from the
* catcaches immediately, for reasons explained at the top of cache/inval.c.
* Instead we have to add entry(s) for the tuple to a list of pending tuple
* invalidations that will be done at the end of the command or transaction.
@@ -1172,15 +1348,16 @@ ReleaseCatCache(HeapTuple tuple)
* The lists of tuples that need to be flushed are kept by inval.c. This
* routine is a helper routine for inval.c. Given a tuple belonging to
* the specified relation, find all catcaches it could be in, compute the
- * correct hashindex for each such catcache, and call the specified function
- * to record the cache id, hashindex, and tuple ItemPointer in inval.c's
+ * correct hash value for each such catcache, and call the specified function
+ * to record the cache id, hash value, and tuple ItemPointer in inval.c's
* lists. CatalogCacheIdInvalidate will be called later, if appropriate,
* using the recorded information.
*
* Note that it is irrelevant whether the given tuple is actually loaded
* into the catcache at the moment. Even if it's not there now, it might
- * be by the end of the command --- or might be in other backends' caches
- * --- so we have to be prepared to flush it.
+ * be by the end of the command, or there might be a matching negative entry
+ * to flush --- or other backends' caches might have such entries --- so
+ * we have to make list entries to flush it later.
*
* Also note that it's not an error if there are no catcaches for the
* specified relation. inval.c doesn't know exactly which rels have
@@ -1190,11 +1367,12 @@ ReleaseCatCache(HeapTuple tuple)
void
PrepareToInvalidateCacheTuple(Relation relation,
HeapTuple tuple,
- void (*function) (int, Index, ItemPointer, Oid))
+ void (*function) (int, uint32, ItemPointer, Oid))
{
CatCache *ccp;
+ Oid reloid;
- CACHE1_elog(LOG, "PrepareToInvalidateCacheTuple: called");
+ CACHE1_elog(DEBUG1, "PrepareToInvalidateCacheTuple: called");
/*
* sanity checks
@@ -1204,25 +1382,27 @@ PrepareToInvalidateCacheTuple(Relation relation,
Assert(PointerIsValid(function));
Assert(CacheHdr != NULL);
+ reloid = RelationGetRelid(relation);
+
/* ----------------
* for each cache
* if the cache contains tuples from the specified relation
- * compute the tuple's hash index in this cache,
+ * compute the tuple's hash value in this cache,
* and call the passed function to register the information.
* ----------------
*/
for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
{
- if (strcmp(ccp->cc_relname, RelationGetRelationName(relation)) != 0)
- continue;
-
/* Just in case cache hasn't finished initialization yet... */
if (ccp->cc_tupdesc == NULL)
CatalogCacheInitializeCache(ccp);
+ if (ccp->cc_reloid != reloid)
+ continue;
+
(*function) (ccp->id,
- CatalogCacheComputeTupleHashIndex(ccp, tuple),
+ CatalogCacheComputeTupleHashValue(ccp, tuple),
&tuple->t_self,
ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId);
}