summaryrefslogtreecommitdiff
path: root/src/backend/catalog
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/catalog')
-rw-r--r--src/backend/catalog/index.c81
-rw-r--r--src/backend/catalog/indexing.c4
-rw-r--r--src/backend/catalog/unused_oids41
3 files changed, 77 insertions, 49 deletions
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index eefa44c729..14fc47700e 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.8 1996/11/08 00:44:30 scrappy Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.9 1996/11/13 20:47:53 scrappy Exp $
*
*
* INTERFACE ROUTINES
@@ -81,7 +81,7 @@ AppendAttributeTuples(Relation indexRelation, int numatts);
static void UpdateIndexRelation(Oid indexoid, Oid heapoid,
FuncIndexInfo *funcInfo, int natts,
AttrNumber attNums[], Oid classOids[], Node *predicate,
- TypeName *indexKeyType, bool islossy);
+ TypeName *indexKeyType, bool islossy, bool unique);
static void DefaultBuild(Relation heapRelation, Relation indexRelation,
int numberOfAttributes, AttrNumber attributeNumber[],
IndexStrategy indexStrategy, uint16 parameterCount,
@@ -742,7 +742,8 @@ UpdateIndexRelation(Oid indexoid,
Oid classOids[],
Node *predicate,
TypeName *indexKeyType,
- bool islossy)
+ bool islossy,
+ bool unique)
{
IndexTupleForm indexForm;
char *predString;
@@ -779,6 +780,7 @@ UpdateIndexRelation(Oid indexoid,
indexForm->indproc = (PointerIsValid(funcInfo)) ?
FIgetProcOid(funcInfo) : InvalidOid;
indexForm->indislossy = islossy;
+ indexForm->indisunique = unique;
if (indexKeyType != NULL)
indexForm->indhaskeytype = 1;
else
@@ -1008,7 +1010,8 @@ index_create(char *heapRelationName,
uint16 parameterCount,
Datum *parameter,
Node *predicate,
- bool islossy)
+ bool islossy,
+ bool unique)
{
Relation heapRelation;
Relation indexRelation;
@@ -1122,7 +1125,7 @@ index_create(char *heapRelationName,
*/
UpdateIndexRelation(indexoid, heapoid, funcInfo,
numatts, attNums, classObjectId, predicate,
- IndexKeyType, islossy);
+ IndexKeyType, islossy, unique);
predInfo = (PredInfo*)palloc(sizeof(PredInfo));
predInfo->pred = predicate;
@@ -1594,7 +1597,7 @@ DefaultBuild(Relation heapRelation,
indexTuple->t_tid = heapTuple->t_ctid;
insertResult = index_insert(indexRelation, datum, nullv,
- &(heapTuple->t_ctid));
+ &(heapTuple->t_ctid), false);
if (insertResult) pfree(insertResult);
pfree(indexTuple);
@@ -1678,4 +1681,70 @@ index_build(Relation heapRelation,
predInfo);
}
+/*
+ * IndexIsUnique: given an index's relation OID, see if it
+ * is unique using the system cache.
+ */
+bool
+IndexIsUnique(Oid indexId)
+{
+ HeapTuple tuple;
+ IndexTupleForm index;
+
+ tuple = SearchSysCacheTuple(INDEXRELID,
+ ObjectIdGetDatum(indexId),
+ 0,0,0);
+ if(!HeapTupleIsValid(tuple)) {
+ elog(WARN, "Can't find index id %d in IndexIsUnique",
+ indexId);
+ }
+ index = (IndexTupleForm)GETSTRUCT(tuple);
+ Assert(index->indexrelid == indexId);
+
+ return index->indisunique;
+}
+/*
+ * IndexIsUniqueNoCache: same as above function, but don't use the
+ * system cache. if we are called from btbuild, the transaction
+ * that is adding the entry to pg_index has not been committed yet.
+ * the system cache functions will do a heap scan, but only with
+ * NowTimeQual, not SelfTimeQual, so it won't find tuples added
+ * by the current transaction (which is good, because if the transaction
+ * is aborted, you don't want the tuples sitting around in the cache).
+ * so anyway, we have to do our own scan with SelfTimeQual.
+ * this is only called when a new index is created, so it's OK
+ * if it's slow.
+ */
+bool
+IndexIsUniqueNoCache(Oid indexId)
+{
+ Relation pg_index;
+ ScanKeyData skey[1];
+ HeapScanDesc scandesc;
+ HeapTuple tuple;
+ Buffer b;
+ IndexTupleForm index;
+ bool isunique;
+
+ pg_index = heap_openr(IndexRelationName);
+
+ ScanKeyEntryInitialize(&skey[0], (bits16)0x0,
+ Anum_pg_index_indexrelid,
+ (RegProcedure)ObjectIdEqualRegProcedure,
+ ObjectIdGetDatum(indexId));
+
+ scandesc = heap_beginscan(pg_index, 0, SelfTimeQual, 1, skey);
+
+ tuple = heap_getnext(scandesc, 0, &b);
+ if(!HeapTupleIsValid(tuple)) {
+ elog(WARN, "Can't find index id %d in IndexIsUniqueNoCache",
+ indexId);
+ }
+ index = (IndexTupleForm)GETSTRUCT(tuple);
+ Assert(index->indexrelid == indexId);
+ isunique = index->indisunique;
+
+ ReleaseBuffer(b);
+ return isunique;
+}
diff --git a/src/backend/catalog/indexing.c b/src/backend/catalog/indexing.c
index 34603cb155..a27496528c 100644
--- a/src/backend/catalog/indexing.c
+++ b/src/backend/catalog/indexing.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.5 1996/11/11 14:02:10 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.6 1996/11/13 20:47:57 scrappy Exp $
*
*-------------------------------------------------------------------------
*/
@@ -184,7 +184,7 @@ CatalogIndexInsert(Relation *idescs,
finfoP);
indexRes = index_insert(idescs[i], &datum, nulls,
- &(heapTuple->t_ctid));
+ &(heapTuple->t_ctid), false);
if (indexRes) pfree(indexRes);
}
}
diff --git a/src/backend/catalog/unused_oids b/src/backend/catalog/unused_oids
index 9608204f49..e69de29bb2 100644
--- a/src/backend/catalog/unused_oids
+++ b/src/backend/catalog/unused_oids
@@ -1,41 +0,0 @@
-#!/bin/sh
-# unused_oids
-#
-# $Header: /cvsroot/pgsql/src/backend/catalog/Attic/unused_oids,v 1.1.1.1 1996/07/09 06:21:18 scrappy Exp $
-#
-# finds blocks of oids that have not already been claimed by
-# post_hackers for internal purposes. primarily useful for
-# finding valid oids for new internal function oids. the numbers
-# printed are inclusive ranges of valid (unused) oids.
-#
-# before using a large empty block, make sure you aren't about
-# to take over what was intended as expansion space for something
-# else. also, before using a number, do a "grepsrc" to make sure
-# that someone isn't using a literal numeric constant somewhere..
-#
-# non-berkeley post_hackers should probably not try to use oids
-# less than the highest one that comes with the distributed source.
-#
-# run this script in src/backend/catalog.
-#
-egrep '^DATA' pg_*.h | \
- sed -e 's/^.*OID[^=]*=[^0-9]*//' -e 's/[^0-9].*$//' | \
- sort -n | \
- uniq | \
- awk '
-BEGIN {
- last = 0;
-}
-/^[0-9]/ {
- if ($1 > last + 1) {
- if ($1 > last + 2) {
- print last + 1, "-", $1 - 1;
- } else {
- print last + 1;
- }
- }
- last = $1;
-}
-END {
- print last + 1, "-";
-}'