summaryrefslogtreecommitdiff
path: root/src/backend/optimizer/path
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/optimizer/path')
-rw-r--r--src/backend/optimizer/path/allpaths.c45
-rw-r--r--src/backend/optimizer/path/costsize.c70
2 files changed, 115 insertions, 0 deletions
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index a1e1a87c29..343b35aa32 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -111,6 +111,8 @@ static void set_tablefunc_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
+static void set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte);
static void set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static RelOptInfo *make_rel_from_joinlist(PlannerInfo *root, List *joinlist);
@@ -396,6 +398,9 @@ set_rel_size(PlannerInfo *root, RelOptInfo *rel,
else
set_cte_pathlist(root, rel, rte);
break;
+ case RTE_NAMEDTUPLESTORE:
+ set_namedtuplestore_pathlist(root, rel, rte);
+ break;
default:
elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
break;
@@ -464,6 +469,9 @@ set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
case RTE_CTE:
/* CTE reference --- fully handled during set_rel_size */
break;
+ case RTE_NAMEDTUPLESTORE:
+ /* tuplestore reference --- fully handled during set_rel_size */
+ break;
default:
elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
break;
@@ -639,6 +647,13 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
* executed only once.
*/
return;
+
+ case RTE_NAMEDTUPLESTORE:
+ /*
+ * tuplestore cannot be shared, at least without more
+ * infrastructure to support that.
+ */
+ return;
}
/*
@@ -2090,6 +2105,36 @@ set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
}
/*
+ * set_namedtuplestore_pathlist
+ * Build the (single) access path for a named tuplestore RTE
+ *
+ * There's no need for a separate set_namedtuplestore_size phase, since we
+ * don't support join-qual-parameterized paths for tuplestores.
+ */
+static void
+set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte)
+{
+ Relids required_outer;
+
+ /* Mark rel with estimated output rows, width, etc */
+ set_namedtuplestore_size_estimates(root, rel);
+
+ /*
+ * We don't support pushing join clauses into the quals of a tuplestore
+ * scan, but it could still have required parameterization due to LATERAL
+ * refs in its tlist.
+ */
+ required_outer = rel->lateral_relids;
+
+ /* Generate appropriate path */
+ add_path(rel, create_namedtuplestorescan_path(root, rel, required_outer));
+
+ /* Select cheapest path (pretty easy in this case...) */
+ set_cheapest(rel);
+}
+
+/*
* set_worktable_pathlist
* Build the (single) access path for a self-reference CTE RTE
*
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 92de2b7d48..ed07e2f655 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -1517,6 +1517,43 @@ cost_ctescan(Path *path, PlannerInfo *root,
}
/*
+ * cost_namedtuplestorescan
+ * Determines and returns the cost of scanning a named tuplestore.
+ */
+void
+cost_namedtuplestorescan(Path *path, PlannerInfo *root,
+ RelOptInfo *baserel, ParamPathInfo *param_info)
+{
+ Cost startup_cost = 0;
+ Cost run_cost = 0;
+ QualCost qpqual_cost;
+ Cost cpu_per_tuple;
+
+ /* Should only be applied to base relations that are Tuplestores */
+ Assert(baserel->relid > 0);
+ Assert(baserel->rtekind == RTE_NAMEDTUPLESTORE);
+
+ /* Mark the path with the correct row estimate */
+ if (param_info)
+ path->rows = param_info->ppi_rows;
+ else
+ path->rows = baserel->rows;
+
+ /* Charge one CPU tuple cost per row for tuplestore manipulation */
+ cpu_per_tuple = cpu_tuple_cost;
+
+ /* Add scanning CPU costs */
+ get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
+
+ startup_cost += qpqual_cost.startup;
+ cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
+ run_cost += cpu_per_tuple * baserel->tuples;
+
+ path->startup_cost = startup_cost;
+ path->total_cost = startup_cost + run_cost;
+}
+
+/*
* cost_recursive_union
* Determines and returns the cost of performing a recursive union,
* and also the estimated output size.
@@ -4685,6 +4722,39 @@ set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
}
/*
+ * set_namedtuplestore_size_estimates
+ * Set the size estimates for a base relation that is a tuplestore reference.
+ *
+ * The rel's targetlist and restrictinfo list must have been constructed
+ * already.
+ *
+ * We set the same fields as set_baserel_size_estimates.
+ */
+void
+set_namedtuplestore_size_estimates(PlannerInfo *root, RelOptInfo *rel)
+{
+ RangeTblEntry *rte;
+
+ /* Should only be applied to base relations that are tuplestore references */
+ Assert(rel->relid > 0);
+ rte = planner_rt_fetch(rel->relid, root);
+ Assert(rte->rtekind == RTE_NAMEDTUPLESTORE);
+
+ /*
+ * Use the estimate provided by the code which is generating the named
+ * tuplestore. In some cases, the actual number might be available; in
+ * others the same plan will be re-used, so a "typical" value might be
+ * estimated and used.
+ */
+ rel->tuples = rte->enrtuples;
+ if (rel->tuples < 0)
+ rel->tuples = 1000;
+
+ /* Now estimate number of output rows, etc */
+ set_baserel_size_estimates(root, rel);
+}
+
+/*
* set_foreign_size_estimates
* Set the size estimates for a base relation that is a foreign table.
*