summaryrefslogtreecommitdiff
path: root/src/include/executor
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2005-03-06 22:15:05 +0000
committerTom Lane <tgl@sss.pgh.pa.us>2005-03-06 22:15:05 +0000
commit849074f9ae422c64501bb1d53ef840de870bf65c (patch)
tree9ac33ca6df68410da184659a4b0ca67f7bdf8bef /src/include/executor
parent31b6d840f6fdbf3d272e7bf8ec0461742edcdd46 (diff)
downloadpostgresql-849074f9ae422c64501bb1d53ef840de870bf65c.tar.gz
Revise hash join code so that we can increase the number of batches
on-the-fly, and thereby avoid blowing out memory when the planner has underestimated the hash table size. Hash join will now obey the work_mem limit with some faithfulness. Per my recent proposal (hash aggregate part isn't done yet though).
Diffstat (limited to 'src/include/executor')
-rw-r--r--src/include/executor/hashjoin.h75
-rw-r--r--src/include/executor/nodeHash.h26
-rw-r--r--src/include/executor/nodeHashjoin.h8
3 files changed, 64 insertions, 45 deletions
diff --git a/src/include/executor/hashjoin.h b/src/include/executor/hashjoin.h
index e267f474ed..c0f75922e1 100644
--- a/src/include/executor/hashjoin.h
+++ b/src/include/executor/hashjoin.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/executor/hashjoin.h,v 1.34 2004/12/31 22:03:29 pgsql Exp $
+ * $PostgreSQL: pgsql/src/include/executor/hashjoin.h,v 1.35 2005/03/06 22:15:05 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -20,11 +20,12 @@
/* ----------------------------------------------------------------
* hash-join hash table structures
*
- * Each active hashjoin has a HashJoinTable control block which is
+ * Each active hashjoin has a HashJoinTable control block, which is
* palloc'd in the executor's per-query context. All other storage needed
* for the hashjoin is kept in private memory contexts, two for each hashjoin.
* This makes it easy and fast to release the storage when we don't need it
- * anymore.
+ * anymore. (Exception: data associated with the temp files lives in the
+ * per-query context too, since we always call buffile.c in that context.)
*
* The hashtable contexts are made children of the per-query context, ensuring
* that they will be discarded at end of statement even if the join is
@@ -35,40 +36,64 @@
* "hashCxt", while storage that is only wanted for the current batch is
* allocated in the "batchCxt". By resetting the batchCxt at the end of
* each batch, we free all the per-batch storage reliably and without tedium.
+ *
+ * During first scan of inner relation, we get its tuples from executor.
+ * If nbatch > 1 then tuples that don't belong in first batch get saved
+ * into inner-batch temp files. The same statements apply for the
+ * first scan of the outer relation, except we write tuples to outer-batch
+ * temp files. After finishing the first scan, we do the following for
+ * each remaining batch:
+ * 1. Read tuples from inner batch file, load into hash buckets.
+ * 2. Read tuples from outer batch file, match to hash buckets and output.
+ *
+ * It is possible to increase nbatch on the fly if the in-memory hash table
+ * gets too big. The hash-value-to-batch computation is arranged so that this
+ * can only cause a tuple to go into a later batch than previously thought,
+ * never into an earlier batch. When we increase nbatch, we rescan the hash
+ * table and dump out any tuples that are now of a later batch to the correct
+ * inner batch file. Subsequently, while reading either inner or outer batch
+ * files, we might find tuples that no longer belong to the current batch;
+ * if so, we just dump them out to the correct batch file.
* ----------------------------------------------------------------
*/
+/* these are in nodes/execnodes.h: */
+/* typedef struct HashJoinTupleData *HashJoinTuple; */
+/* typedef struct HashJoinTableData *HashJoinTable; */
+
typedef struct HashJoinTupleData
{
- struct HashJoinTupleData *next; /* link to next tuple in same
- * bucket */
+ struct HashJoinTupleData *next; /* link to next tuple in same bucket */
+ uint32 hashvalue; /* tuple's hash code */
HeapTupleData htup; /* tuple header */
} HashJoinTupleData;
-typedef HashJoinTupleData *HashJoinTuple;
-
typedef struct HashJoinTableData
{
- int nbuckets; /* buckets in use during this batch */
- int totalbuckets; /* total number of (virtual) buckets */
- HashJoinTuple *buckets; /* buckets[i] is head of list of tuples */
+ int nbuckets; /* # buckets in the in-memory hash table */
+ /* buckets[i] is head of list of tuples in i'th in-memory bucket */
+ struct HashJoinTupleData **buckets;
/* buckets array is per-batch storage, as are all the tuples */
- int nbatch; /* number of batches; 0 means 1-pass join */
- int curbatch; /* current batch #, or 0 during 1st pass */
+ int nbatch; /* number of batches */
+ int curbatch; /* current batch #; 0 during 1st pass */
+
+ int nbatch_original; /* nbatch when we started inner scan */
+ int nbatch_outstart; /* nbatch when we started outer scan */
+
+ bool growEnabled; /* flag to shut off nbatch increases */
bool hashNonEmpty; /* did inner plan produce any rows? */
/*
- * all these arrays are allocated for the life of the hash join, but
- * only if nbatch > 0:
+ * These arrays are allocated for the life of the hash join, but
+ * only if nbatch > 1. A file is opened only when we first write
+ * a tuple into it (otherwise its pointer remains NULL). Note that
+ * the zero'th array elements never get used, since we will process
+ * rather than dump out any tuples of batch zero.
*/
BufFile **innerBatchFile; /* buffered virtual temp file per batch */
BufFile **outerBatchFile; /* buffered virtual temp file per batch */
- long *outerBatchSize; /* count of tuples in each outer batch
- * file */
- long *innerBatchSize; /* count of tuples in each inner batch
- * file */
/*
* Info about the datatype-specific hash functions for the datatypes
@@ -79,21 +104,11 @@ typedef struct HashJoinTableData
*/
FmgrInfo *hashfunctions; /* lookup data for hash functions */
- /*
- * During 1st scan of inner relation, we get tuples from executor. If
- * nbatch > 0 then tuples that don't belong in first nbuckets logical
- * buckets get dumped into inner-batch temp files. The same statements
- * apply for the 1st scan of the outer relation, except we write
- * tuples to outer-batch temp files. If nbatch > 0 then we do the
- * following for each batch: 1. Read tuples from inner batch file,
- * load into hash buckets. 2. Read tuples from outer batch file, match
- * to hash buckets and output.
- */
+ Size spaceUsed; /* memory space currently used by tuples */
+ Size spaceAllowed; /* upper limit for space used */
MemoryContext hashCxt; /* context for whole-hash-join storage */
MemoryContext batchCxt; /* context for this-batch-only storage */
} HashJoinTableData;
-typedef HashJoinTableData *HashJoinTable;
-
#endif /* HASHJOIN_H */
diff --git a/src/include/executor/nodeHash.h b/src/include/executor/nodeHash.h
index 781cfcf838..06d73c060e 100644
--- a/src/include/executor/nodeHash.h
+++ b/src/include/executor/nodeHash.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/executor/nodeHash.h,v 1.35 2004/12/31 22:03:29 pgsql Exp $
+ * $PostgreSQL: pgsql/src/include/executor/nodeHash.h,v 1.36 2005/03/06 22:15:05 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -25,18 +25,20 @@ extern void ExecReScanHash(HashState *node, ExprContext *exprCtxt);
extern HashJoinTable ExecHashTableCreate(Hash *node, List *hashOperators);
extern void ExecHashTableDestroy(HashJoinTable hashtable);
extern void ExecHashTableInsert(HashJoinTable hashtable,
- ExprContext *econtext,
- List *hashkeys);
-extern int ExecHashGetBucket(HashJoinTable hashtable,
- ExprContext *econtext,
- List *hashkeys);
-extern int ExecHashGetBatch(int bucketno, HashJoinTable hashtable);
-extern HeapTuple ExecScanHashBucket(HashJoinState *hjstate, List *hjclauses,
- ExprContext *econtext);
-extern void ExecHashTableReset(HashJoinTable hashtable, long ntuples);
+ HeapTuple tuple,
+ uint32 hashvalue);
+extern uint32 ExecHashGetHashValue(HashJoinTable hashtable,
+ ExprContext *econtext,
+ List *hashkeys);
+extern void ExecHashGetBucketAndBatch(HashJoinTable hashtable,
+ uint32 hashvalue,
+ int *bucketno,
+ int *batchno);
+extern HeapTuple ExecScanHashBucket(HashJoinState *hjstate,
+ ExprContext *econtext);
+extern void ExecHashTableReset(HashJoinTable hashtable);
extern void ExecChooseHashTableSize(double ntuples, int tupwidth,
- int *virtualbuckets,
- int *physicalbuckets,
+ int *numbuckets,
int *numbatches);
#endif /* NODEHASH_H */
diff --git a/src/include/executor/nodeHashjoin.h b/src/include/executor/nodeHashjoin.h
index 1902c11fb8..44e942317d 100644
--- a/src/include/executor/nodeHashjoin.h
+++ b/src/include/executor/nodeHashjoin.h
@@ -1,13 +1,13 @@
/*-------------------------------------------------------------------------
*
* nodeHashjoin.h
- *
+ * prototypes for nodeHashjoin.c
*
*
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/executor/nodeHashjoin.h,v 1.28 2004/12/31 22:03:29 pgsql Exp $
+ * $PostgreSQL: pgsql/src/include/executor/nodeHashjoin.h,v 1.29 2005/03/06 22:15:05 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -15,6 +15,7 @@
#define NODEHASHJOIN_H
#include "nodes/execnodes.h"
+#include "storage/buffile.h"
extern int ExecCountSlotsHashJoin(HashJoin *node);
extern HashJoinState *ExecInitHashJoin(HashJoin *node, EState *estate);
@@ -22,6 +23,7 @@ extern TupleTableSlot *ExecHashJoin(HashJoinState *node);
extern void ExecEndHashJoin(HashJoinState *node);
extern void ExecReScanHashJoin(HashJoinState *node, ExprContext *exprCtxt);
-extern void ExecHashJoinSaveTuple(HeapTuple heapTuple, BufFile *file);
+extern void ExecHashJoinSaveTuple(HeapTuple heapTuple, uint32 hashvalue,
+ BufFile **fileptr);
#endif /* NODEHASHJOIN_H */