summaryrefslogtreecommitdiff
path: root/src/backend/utils
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/utils')
-rw-r--r--src/backend/utils/adt/ri_triggers.c32
-rw-r--r--src/backend/utils/init/globals.c6
-rw-r--r--src/backend/utils/misc/guc.c67
-rw-r--r--src/backend/utils/misc/postgresql.conf.sample4
-rw-r--r--src/backend/utils/mmgr/portalmem.c6
-rw-r--r--src/backend/utils/sort/tuplesort.c46
-rw-r--r--src/backend/utils/sort/tuplestore.c9
7 files changed, 117 insertions, 53 deletions
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index 73aab9e578..47384fbb89 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -17,7 +17,7 @@
*
* Portions Copyright (c) 1996-2003, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.66 2004/01/07 18:56:28 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.67 2004/02/03 17:34:03 tgl Exp $
*
* ----------
*/
@@ -41,6 +41,7 @@
#include "utils/lsyscache.h"
#include "utils/typcache.h"
#include "utils/acl.h"
+#include "utils/guc.h"
#include "miscadmin.h"
@@ -2572,6 +2573,8 @@ RI_Initial_Check(FkConstraint *fkconstraint, Relation rel, Relation pkrel)
const char *sep;
List *list;
List *list2;
+ int old_work_mem;
+ char workmembuf[32];
int spi_result;
void *qplan;
@@ -2665,6 +2668,23 @@ RI_Initial_Check(FkConstraint *fkconstraint, Relation rel, Relation pkrel)
snprintf(querystr + strlen(querystr), sizeof(querystr) - strlen(querystr),
")");
+ /*
+ * Temporarily increase work_mem so that the check query can be executed
+ * more efficiently. It seems okay to do this because the query is simple
+ * enough to not use a multiple of work_mem, and one typically would not
+ * have many large foreign-key validations happening concurrently. So
+ * this seems to meet the criteria for being considered a "maintenance"
+ * operation, and accordingly we use maintenance_work_mem.
+ *
+ * We do the equivalent of "SET LOCAL work_mem" so that transaction abort
+ * will restore the old value if we lose control due to an error.
+ */
+ old_work_mem = work_mem;
+ snprintf(workmembuf, sizeof(workmembuf), "%d", maintenance_work_mem);
+ (void) set_config_option("work_mem", workmembuf,
+ PGC_USERSET, PGC_S_SESSION,
+ true, true);
+
if (SPI_connect() != SPI_OK_CONNECT)
elog(ERROR, "SPI_connect failed");
@@ -2741,6 +2761,16 @@ RI_Initial_Check(FkConstraint *fkconstraint, Relation rel, Relation pkrel)
if (SPI_finish() != SPI_OK_FINISH)
elog(ERROR, "SPI_finish failed");
+ /*
+ * Restore work_mem for the remainder of the current transaction.
+ * This is another SET LOCAL, so it won't affect the session value,
+ * nor any tentative value if there is one.
+ */
+ snprintf(workmembuf, sizeof(workmembuf), "%d", old_work_mem);
+ (void) set_config_option("work_mem", workmembuf,
+ PGC_USERSET, PGC_S_SESSION,
+ true, true);
+
return true;
}
diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c
index 3bddad4685..c170ae603d 100644
--- a/src/backend/utils/init/globals.c
+++ b/src/backend/utils/init/globals.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/init/globals.c,v 1.81 2004/01/28 21:02:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/globals.c,v 1.82 2004/02/03 17:34:03 tgl Exp $
*
* NOTES
* Globals used all over the place should be declared here and not
@@ -78,6 +78,6 @@ int CTimeZone = 0;
bool enableFsync = true;
bool allowSystemTableMods = false;
-int SortMem = 1024;
-int VacuumMem = 8192;
+int work_mem = 1024;
+int maintenance_work_mem = 16384;
int NBuffers = 1000;
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index ebc17830d3..7a0deef9bb 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -10,7 +10,7 @@
* Written by Peter Eisentraut <peter_e@gmx.net>.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.183 2004/02/02 00:17:21 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.184 2004/02/03 17:34:03 tgl Exp $
*
*--------------------------------------------------------------------
*/
@@ -1030,23 +1030,23 @@ static struct config_int ConfigureNamesInt[] =
},
{
- {"sort_mem", PGC_USERSET, RESOURCES_MEM,
- gettext_noop("Sets the maximum memory to be used for sorts and hash tables."),
- gettext_noop("Specifies the amount of memory to be used by internal "
- "sort operations and hash tables before switching to temporary disk "
- "files")
+ {"work_mem", PGC_USERSET, RESOURCES_MEM,
+ gettext_noop("Sets the maximum memory to be used for query workspaces."),
+ gettext_noop("This much memory may be used by each internal "
+ "sort operation and hash table before switching to "
+ "temporary disk files.")
},
- &SortMem,
- 1024, 8 * BLCKSZ / 1024, INT_MAX, NULL, NULL
+ &work_mem,
+ 1024, 8 * BLCKSZ / 1024, INT_MAX / 1024, NULL, NULL
},
{
- {"vacuum_mem", PGC_USERSET, RESOURCES_MEM,
- gettext_noop("Sets the maximum memory used to keep track of to-be-reclaimed rows."),
- NULL
+ {"maintenance_work_mem", PGC_USERSET, RESOURCES_MEM,
+ gettext_noop("Sets the maximum memory to be used for maintenance operations."),
+ gettext_noop("This includes operations such as VACUUM and CREATE INDEX.")
},
- &VacuumMem,
- 8192, 1024, INT_MAX, NULL, NULL
+ &maintenance_work_mem,
+ 16384, 1024, INT_MAX / 1024, NULL, NULL
},
{
@@ -1710,6 +1710,19 @@ static struct config_string ConfigureNamesString[] =
/*
+ * To allow continued support of obsolete names for GUC variables, we apply
+ * the following mappings to any unrecognized name. Note that an old name
+ * should be mapped to a new one only if the new variable has very similar
+ * semantics to the old.
+ */
+static const char * const map_old_guc_names[] = {
+ "sort_mem", "work_mem",
+ "vacuum_mem", "maintenance_work_mem",
+ NULL
+};
+
+
+/*
* Actual lookup of variables is done through this single, sorted array.
*/
struct config_generic **guc_variables;
@@ -1723,6 +1736,7 @@ static char *guc_string_workspace; /* for avoiding memory leaks */
static int guc_var_compare(const void *a, const void *b);
+static int guc_name_compare(const char *namea, const char *nameb);
static void ReportGUCOption(struct config_generic * record);
static char *_ShowOption(struct config_generic * record);
@@ -1812,11 +1826,12 @@ find_option(const char *name)
{
const char **key = &name;
struct config_generic **res;
+ int i;
Assert(name);
/*
- * by equating const char ** with struct config_generic *, we are
+ * By equating const char ** with struct config_generic *, we are
* assuming the name field is first in config_generic.
*/
res = (struct config_generic **) bsearch((void *) &key,
@@ -1826,6 +1841,19 @@ find_option(const char *name)
guc_var_compare);
if (res)
return *res;
+
+ /*
+ * See if the name is an obsolete name for a variable. We assume that
+ * the set of supported old names is short enough that a brute-force
+ * search is the best way.
+ */
+ for (i = 0; map_old_guc_names[i] != NULL; i += 2)
+ {
+ if (guc_name_compare(name, map_old_guc_names[i]) == 0)
+ return find_option(map_old_guc_names[i+1]);
+ }
+
+ /* Unknown name */
return NULL;
}
@@ -1838,16 +1866,19 @@ guc_var_compare(const void *a, const void *b)
{
struct config_generic *confa = *(struct config_generic **) a;
struct config_generic *confb = *(struct config_generic **) b;
- const char *namea;
- const char *nameb;
+ return guc_name_compare(confa->name, confb->name);
+}
+
+
+static int
+guc_name_compare(const char *namea, const char *nameb)
+{
/*
* The temptation to use strcasecmp() here must be resisted, because
* the array ordering has to remain stable across setlocale() calls.
* So, build our own with a simple ASCII-only downcasing.
*/
- namea = confa->name;
- nameb = confb->name;
while (*namea && *nameb)
{
char cha = *namea++;
diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample
index 530e8c7952..0a6e15071a 100644
--- a/src/backend/utils/misc/postgresql.conf.sample
+++ b/src/backend/utils/misc/postgresql.conf.sample
@@ -56,8 +56,8 @@
# - Memory -
#shared_buffers = 1000 # min 16, at least max_connections*2, 8KB each
-#sort_mem = 1024 # min 64, size in KB
-#vacuum_mem = 8192 # min 1024, size in KB
+#work_mem = 1024 # min 64, size in KB
+#maintenance_work_mem = 16384 # min 1024, size in KB
#debug_shared_buffers = 0 # 0-600 seconds
# - Background writer -
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index 159f2fe359..f6b72481fb 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.63 2003/11/29 19:52:04 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.64 2004/02/03 17:34:03 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -282,8 +282,8 @@ PortalCreateHoldStore(Portal portal)
/* Create the tuple store, selecting cross-transaction temp files. */
oldcxt = MemoryContextSwitchTo(portal->holdContext);
- /* XXX: Should SortMem be used for this? */
- portal->holdStore = tuplestore_begin_heap(true, true, SortMem);
+ /* XXX: Should maintenance_work_mem be used for the portal size? */
+ portal->holdStore = tuplestore_begin_heap(true, true, work_mem);
MemoryContextSwitchTo(oldcxt);
}
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index a93c4742fd..636fdedcdb 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -30,15 +30,15 @@
* heap. When the run number at the top of the heap changes, we know that
* no more records of the prior run are left in the heap.
*
- * The (approximate) amount of memory allowed for any one sort operation
- * is given in kilobytes by the external variable SortMem. Initially,
+ * The approximate amount of memory allowed for any one sort operation
+ * is specified in kilobytes by the caller (most pass work_mem). Initially,
* we absorb tuples and simply store them in an unsorted array as long as
- * we haven't exceeded SortMem. If we reach the end of the input without
- * exceeding SortMem, we sort the array using qsort() and subsequently return
+ * we haven't exceeded workMem. If we reach the end of the input without
+ * exceeding workMem, we sort the array using qsort() and subsequently return
* tuples just by scanning the tuple array sequentially. If we do exceed
- * SortMem, we construct a heap using Algorithm H and begin to emit tuples
+ * workMem, we construct a heap using Algorithm H and begin to emit tuples
* into sorted runs in temporary tapes, emitting just enough tuples at each
- * step to get back within the SortMem limit. Whenever the run number at
+ * step to get back within the workMem limit. Whenever the run number at
* the top of the heap changes, we begin a new run with a new output tape
* (selected per Algorithm D). After the end of the input is reached,
* we dump out remaining tuples in memory into a final run (or two),
@@ -49,7 +49,7 @@
* next tuple from its source tape (if any). When the heap empties, the merge
* is complete. The basic merge algorithm thus needs very little memory ---
* only M tuples for an M-way merge, and M is at most six in the present code.
- * However, we can still make good use of our full SortMem allocation by
+ * However, we can still make good use of our full workMem allocation by
* pre-reading additional tuples from each source tape. Without prereading,
* our access pattern to the temporary file would be very erratic; on average
* we'd read one block from each of M source tapes during the same time that
@@ -59,7 +59,7 @@
* of the temp file, ensuring that things will be even worse when it comes
* time to read that tape. A straightforward merge pass thus ends up doing a
* lot of waiting for disk seeks. We can improve matters by prereading from
- * each source tape sequentially, loading about SortMem/M bytes from each tape
+ * each source tape sequentially, loading about workMem/M bytes from each tape
* in turn. Then we run the merge algorithm, writing but not reading until
* one of the preloaded tuple series runs out. Then we switch back to preread
* mode, fill memory again, and repeat. This approach helps to localize both
@@ -78,7 +78,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.40 2003/11/29 19:52:04 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.41 2004/02/03 17:34:03 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -323,7 +323,7 @@ struct Tuplesortstate
*
* NOTES about memory consumption calculations:
*
- * We count space allocated for tuples against the SortMem limit, plus
+ * We count space allocated for tuples against the workMem limit, plus
* the space used by the variable-size arrays memtuples and memtupindex.
* Fixed-size space (primarily the LogicalTapeSet I/O buffers) is not
* counted.
@@ -351,7 +351,7 @@ typedef struct
} DatumTuple;
-static Tuplesortstate *tuplesort_begin_common(bool randomAccess);
+static Tuplesortstate *tuplesort_begin_common(int workMem, bool randomAccess);
static void puttuple_common(Tuplesortstate *state, void *tuple);
static void inittapes(Tuplesortstate *state);
static void selectnewtape(Tuplesortstate *state);
@@ -406,10 +406,16 @@ static Tuplesortstate *qsort_tuplesortstate;
* access was requested, rescan, markpos, and restorepos can also be called.)
* For Datum sorts, putdatum/getdatum are used instead of puttuple/gettuple.
* Call tuplesort_end to terminate the operation and release memory/disk space.
+ *
+ * Each variant of tuplesort_begin has a workMem parameter specifying the
+ * maximum number of kilobytes of RAM to use before spilling data to disk.
+ * (The normal value of this parameter is work_mem, but some callers use
+ * other values.) Each variant also has a randomAccess parameter specifying
+ * whether the caller needs non-sequential access to the sort result.
*/
static Tuplesortstate *
-tuplesort_begin_common(bool randomAccess)
+tuplesort_begin_common(int workMem, bool randomAccess)
{
Tuplesortstate *state;
@@ -417,7 +423,7 @@ tuplesort_begin_common(bool randomAccess)
state->status = TSS_INITIAL;
state->randomAccess = randomAccess;
- state->availMem = SortMem * 1024L;
+ state->availMem = workMem * 1024L;
state->tapeset = NULL;
state->memtupcount = 0;
@@ -442,9 +448,9 @@ Tuplesortstate *
tuplesort_begin_heap(TupleDesc tupDesc,
int nkeys,
Oid *sortOperators, AttrNumber *attNums,
- bool randomAccess)
+ int workMem, bool randomAccess)
{
- Tuplesortstate *state = tuplesort_begin_common(randomAccess);
+ Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
int i;
AssertArg(nkeys > 0);
@@ -488,9 +494,9 @@ tuplesort_begin_heap(TupleDesc tupDesc,
Tuplesortstate *
tuplesort_begin_index(Relation indexRel,
bool enforceUnique,
- bool randomAccess)
+ int workMem, bool randomAccess)
{
- Tuplesortstate *state = tuplesort_begin_common(randomAccess);
+ Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
state->comparetup = comparetup_index;
state->copytup = copytup_index;
@@ -508,9 +514,9 @@ tuplesort_begin_index(Relation indexRel,
Tuplesortstate *
tuplesort_begin_datum(Oid datumType,
Oid sortOperator,
- bool randomAccess)
+ int workMem, bool randomAccess)
{
- Tuplesortstate *state = tuplesort_begin_common(randomAccess);
+ Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess);
RegProcedure sortFunction;
int16 typlen;
bool typbyval;
@@ -1077,7 +1083,7 @@ mergeruns(Tuplesortstate *state)
/*
* If we produced only one initial run (quite likely if the total data
- * volume is between 1X and 2X SortMem), we can just use that tape as
+ * volume is between 1X and 2X workMem), we can just use that tape as
* the finished output, rather than doing a useless merge.
*/
if (state->currentRun == 1)
diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c
index 9f7efa66a3..d0dce20fc6 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -36,7 +36,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.17 2003/11/29 19:52:04 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.18 2004/02/03 17:34:03 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -219,10 +219,7 @@ tuplestore_begin_common(bool randomAccess, bool interXact, int maxKBytes)
state->myfile = NULL;
state->memtupcount = 0;
- if (maxKBytes > 0)
- state->memtupsize = 1024; /* initial guess */
- else
- state->memtupsize = 1; /* won't really need any space */
+ state->memtupsize = 1024; /* initial guess */
state->memtuples = (void **) palloc(state->memtupsize * sizeof(void *));
USEMEM(state, GetMemoryChunkSpace(state->memtuples));
@@ -250,7 +247,7 @@ tuplestore_begin_common(bool randomAccess, bool interXact, int maxKBytes)
* no longer wanted.
*
* maxKBytes: how much data to store in memory (any data beyond this
- * amount is paged to disk).
+ * amount is paged to disk). When in doubt, use work_mem.
*/
Tuplestorestate *
tuplestore_begin_heap(bool randomAccess, bool interXact, int maxKBytes)