summaryrefslogtreecommitdiff
path: root/src/backend/optimizer
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/optimizer')
-rw-r--r--src/backend/optimizer/path/costsize.c22
-rw-r--r--src/backend/optimizer/plan/planner.c6
-rw-r--r--src/backend/optimizer/plan/subselect.c6
-rw-r--r--src/backend/optimizer/util/pathnode.c4
4 files changed, 19 insertions, 19 deletions
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 454b1db127..c23cf4d232 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -49,7 +49,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.123 2004/01/19 03:52:28 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.124 2004/02/03 17:34:03 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -503,18 +503,18 @@ cost_functionscan(Path *path, Query *root, RelOptInfo *baserel)
* Determines and returns the cost of sorting a relation, including
* the cost of reading the input data.
*
- * If the total volume of data to sort is less than SortMem, we will do
+ * If the total volume of data to sort is less than work_mem, we will do
* an in-memory sort, which requires no I/O and about t*log2(t) tuple
* comparisons for t tuples.
*
- * If the total volume exceeds SortMem, we switch to a tape-style merge
+ * If the total volume exceeds work_mem, we switch to a tape-style merge
* algorithm. There will still be about t*log2(t) tuple comparisons in
* total, but we will also need to write and read each tuple once per
* merge pass. We expect about ceil(log6(r)) merge passes where r is the
* number of initial runs formed (log6 because tuplesort.c uses six-tape
- * merging). Since the average initial run should be about twice SortMem,
+ * merging). Since the average initial run should be about twice work_mem,
* we have
- * disk traffic = 2 * relsize * ceil(log6(p / (2*SortMem)))
+ * disk traffic = 2 * relsize * ceil(log6(p / (2*work_mem)))
* cpu = comparison_cost * t * log2(t)
*
* The disk traffic is assumed to be half sequential and half random
@@ -542,7 +542,7 @@ cost_sort(Path *path, Query *root,
Cost startup_cost = input_cost;
Cost run_cost = 0;
double nbytes = relation_byte_size(tuples, width);
- long sortmembytes = SortMem * 1024L;
+ long work_mem_bytes = work_mem * 1024L;
if (!enable_sort)
startup_cost += disable_cost;
@@ -564,10 +564,10 @@ cost_sort(Path *path, Query *root,
startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples);
/* disk costs */
- if (nbytes > sortmembytes)
+ if (nbytes > work_mem_bytes)
{
double npages = ceil(nbytes / BLCKSZ);
- double nruns = nbytes / (sortmembytes * 2);
+ double nruns = nbytes / (work_mem_bytes * 2);
double log_runs = ceil(LOG6(nruns));
double npageaccesses;
@@ -594,7 +594,7 @@ cost_sort(Path *path, Query *root,
* Determines and returns the cost of materializing a relation, including
* the cost of reading the input data.
*
- * If the total volume of data to materialize exceeds SortMem, we will need
+ * If the total volume of data to materialize exceeds work_mem, we will need
* to write it to disk, so the cost is much higher in that case.
*/
void
@@ -604,10 +604,10 @@ cost_material(Path *path,
Cost startup_cost = input_cost;
Cost run_cost = 0;
double nbytes = relation_byte_size(tuples, width);
- long sortmembytes = SortMem * 1024L;
+ long work_mem_bytes = work_mem * 1024L;
/* disk costs */
- if (nbytes > sortmembytes)
+ if (nbytes > work_mem_bytes)
{
double npages = ceil(nbytes / BLCKSZ);
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 00ba58ec8b..e00f73c74b 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.165 2004/01/18 00:50:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.166 2004/02/03 17:34:03 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -966,7 +966,7 @@ grouping_planner(Query *parse, double tuple_fraction)
{
/*
* Use hashed grouping if (a) we think we can fit the
- * hashtable into SortMem, *and* (b) the estimated cost is
+ * hashtable into work_mem, *and* (b) the estimated cost is
* no more than doing it the other way. While avoiding
* the need for sorted input is usually a win, the fact
* that the output won't be sorted may be a loss; so we
@@ -979,7 +979,7 @@ grouping_planner(Query *parse, double tuple_fraction)
*/
int hashentrysize = cheapest_path_width + 64 + numAggs * 100;
- if (hashentrysize * dNumGroups <= SortMem * 1024L)
+ if (hashentrysize * dNumGroups <= work_mem * 1024L)
{
/*
* Okay, do the cost comparison. We need to consider
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index 34dca0e5ac..4e7e2fe439 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.87 2004/01/12 22:20:28 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.88 2004/02/03 17:34:03 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -614,12 +614,12 @@ subplan_is_hashable(SubLink *slink, SubPlan *node)
return false;
/*
- * The estimated size of the subquery result must fit in SortMem. (XXX
+ * The estimated size of the subquery result must fit in work_mem. (XXX
* what about hashtable overhead?)
*/
subquery_size = node->plan->plan_rows *
(MAXALIGN(node->plan->plan_width) + MAXALIGN(sizeof(HeapTupleData)));
- if (subquery_size > SortMem * 1024L)
+ if (subquery_size > work_mem * 1024L)
return false;
/*
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index 059685e76a..549909dfa0 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.100 2004/01/19 03:49:41 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.101 2004/02/03 17:34:03 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -637,7 +637,7 @@ create_unique_path(Query *root, RelOptInfo *rel, Path *subpath)
*/
int hashentrysize = rel->width + 64;
- if (hashentrysize * pathnode->rows <= SortMem * 1024L)
+ if (hashentrysize * pathnode->rows <= work_mem * 1024L)
{
cost_agg(&agg_path, root,
AGG_HASHED, 0,