summaryrefslogtreecommitdiff
path: root/src/backend/optimizer
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/optimizer')
-rw-r--r--src/backend/optimizer/path/allpaths.c45
-rw-r--r--src/backend/optimizer/path/costsize.c70
-rw-r--r--src/backend/optimizer/plan/createplan.c71
-rw-r--r--src/backend/optimizer/plan/setrefs.c16
-rw-r--r--src/backend/optimizer/plan/subselect.c4
-rw-r--r--src/backend/optimizer/prep/prepjointree.c2
-rw-r--r--src/backend/optimizer/util/clauses.c2
-rw-r--r--src/backend/optimizer/util/pathnode.c26
-rw-r--r--src/backend/optimizer/util/plancat.c7
-rw-r--r--src/backend/optimizer/util/relnode.c1
10 files changed, 240 insertions, 4 deletions
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index a1e1a87c29..343b35aa32 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -111,6 +111,8 @@ static void set_tablefunc_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
+static void set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte);
static void set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static RelOptInfo *make_rel_from_joinlist(PlannerInfo *root, List *joinlist);
@@ -396,6 +398,9 @@ set_rel_size(PlannerInfo *root, RelOptInfo *rel,
else
set_cte_pathlist(root, rel, rte);
break;
+ case RTE_NAMEDTUPLESTORE:
+ set_namedtuplestore_pathlist(root, rel, rte);
+ break;
default:
elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
break;
@@ -464,6 +469,9 @@ set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
case RTE_CTE:
/* CTE reference --- fully handled during set_rel_size */
break;
+ case RTE_NAMEDTUPLESTORE:
+ /* tuplestore reference --- fully handled during set_rel_size */
+ break;
default:
elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
break;
@@ -639,6 +647,13 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
* executed only once.
*/
return;
+
+ case RTE_NAMEDTUPLESTORE:
+ /*
+ * tuplestore cannot be shared, at least without more
+ * infrastructure to support that.
+ */
+ return;
}
/*
@@ -2090,6 +2105,36 @@ set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
}
/*
+ * set_namedtuplestore_pathlist
+ * Build the (single) access path for a named tuplestore RTE
+ *
+ * There's no need for a separate set_namedtuplestore_size phase, since we
+ * don't support join-qual-parameterized paths for tuplestores.
+ */
+static void
+set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte)
+{
+ Relids required_outer;
+
+ /* Mark rel with estimated output rows, width, etc */
+ set_namedtuplestore_size_estimates(root, rel);
+
+ /*
+ * We don't support pushing join clauses into the quals of a tuplestore
+ * scan, but it could still have required parameterization due to LATERAL
+ * refs in its tlist.
+ */
+ required_outer = rel->lateral_relids;
+
+ /* Generate appropriate path */
+ add_path(rel, create_namedtuplestorescan_path(root, rel, required_outer));
+
+ /* Select cheapest path (pretty easy in this case...) */
+ set_cheapest(rel);
+}
+
+/*
* set_worktable_pathlist
* Build the (single) access path for a self-reference CTE RTE
*
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 92de2b7d48..ed07e2f655 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -1517,6 +1517,43 @@ cost_ctescan(Path *path, PlannerInfo *root,
}
/*
+ * cost_namedtuplestorescan
+ * Determines and returns the cost of scanning a named tuplestore.
+ */
+void
+cost_namedtuplestorescan(Path *path, PlannerInfo *root,
+ RelOptInfo *baserel, ParamPathInfo *param_info)
+{
+ Cost startup_cost = 0;
+ Cost run_cost = 0;
+ QualCost qpqual_cost;
+ Cost cpu_per_tuple;
+
+ /* Should only be applied to base relations that are Tuplestores */
+ Assert(baserel->relid > 0);
+ Assert(baserel->rtekind == RTE_NAMEDTUPLESTORE);
+
+ /* Mark the path with the correct row estimate */
+ if (param_info)
+ path->rows = param_info->ppi_rows;
+ else
+ path->rows = baserel->rows;
+
+ /* Charge one CPU tuple cost per row for tuplestore manipulation */
+ cpu_per_tuple = cpu_tuple_cost;
+
+ /* Add scanning CPU costs */
+ get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
+
+ startup_cost += qpqual_cost.startup;
+ cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
+ run_cost += cpu_per_tuple * baserel->tuples;
+
+ path->startup_cost = startup_cost;
+ path->total_cost = startup_cost + run_cost;
+}
+
+/*
* cost_recursive_union
* Determines and returns the cost of performing a recursive union,
* and also the estimated output size.
@@ -4685,6 +4722,39 @@ set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, double cte_rows)
}
/*
+ * set_namedtuplestore_size_estimates
+ * Set the size estimates for a base relation that is a tuplestore reference.
+ *
+ * The rel's targetlist and restrictinfo list must have been constructed
+ * already.
+ *
+ * We set the same fields as set_baserel_size_estimates.
+ */
+void
+set_namedtuplestore_size_estimates(PlannerInfo *root, RelOptInfo *rel)
+{
+ RangeTblEntry *rte;
+
+ /* Should only be applied to base relations that are tuplestore references */
+ Assert(rel->relid > 0);
+ rte = planner_rt_fetch(rel->relid, root);
+ Assert(rte->rtekind == RTE_NAMEDTUPLESTORE);
+
+ /*
+ * Use the estimate provided by the code which is generating the named
+ * tuplestore. In some cases, the actual number might be available; in
+ * others the same plan will be re-used, so a "typical" value might be
+ * estimated and used.
+ */
+ rel->tuples = rte->enrtuples;
+ if (rel->tuples < 0)
+ rel->tuples = 1000;
+
+ /* Now estimate number of output rows, etc */
+ set_baserel_size_estimates(root, rel);
+}
+
+/*
* set_foreign_size_estimates
* Set the size estimates for a base relation that is a foreign table.
*
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index ed06a8de78..2a78595e1f 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -139,6 +139,8 @@ static TableFuncScan *create_tablefuncscan_plan(PlannerInfo *root, Path *best_pa
List *tlist, List *scan_clauses);
static CteScan *create_ctescan_plan(PlannerInfo *root, Path *best_path,
List *tlist, List *scan_clauses);
+static NamedTuplestoreScan *create_namedtuplestorescan_plan(PlannerInfo *root,
+ Path *best_path, List *tlist, List *scan_clauses);
static WorkTableScan *create_worktablescan_plan(PlannerInfo *root, Path *best_path,
List *tlist, List *scan_clauses);
static ForeignScan *create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
@@ -197,6 +199,8 @@ static TableFuncScan *make_tablefuncscan(List *qptlist, List *qpqual,
Index scanrelid, TableFunc *tablefunc);
static CteScan *make_ctescan(List *qptlist, List *qpqual,
Index scanrelid, int ctePlanId, int cteParam);
+static NamedTuplestoreScan *make_namedtuplestorescan(List *qptlist, List *qpqual,
+ Index scanrelid, char *enrname);
static WorkTableScan *make_worktablescan(List *qptlist, List *qpqual,
Index scanrelid, int wtParam);
static Append *make_append(List *appendplans, List *tlist, List *partitioned_rels);
@@ -366,6 +370,7 @@ create_plan_recurse(PlannerInfo *root, Path *best_path, int flags)
case T_ValuesScan:
case T_CteScan:
case T_WorkTableScan:
+ case T_NamedTuplestoreScan:
case T_ForeignScan:
case T_CustomScan:
plan = create_scan_plan(root, best_path, flags);
@@ -668,6 +673,13 @@ create_scan_plan(PlannerInfo *root, Path *best_path, int flags)
scan_clauses);
break;
+ case T_NamedTuplestoreScan:
+ plan = (Plan *) create_namedtuplestorescan_plan(root,
+ best_path,
+ tlist,
+ scan_clauses);
+ break;
+
case T_WorkTableScan:
plan = (Plan *) create_worktablescan_plan(root,
best_path,
@@ -3286,6 +3298,45 @@ create_ctescan_plan(PlannerInfo *root, Path *best_path,
}
/*
+ * create_namedtuplestorescan_plan
+ * Returns a tuplestorescan plan for the base relation scanned by
+ * 'best_path' with restriction clauses 'scan_clauses' and targetlist
+ * 'tlist'.
+ */
+static NamedTuplestoreScan *
+create_namedtuplestorescan_plan(PlannerInfo *root, Path *best_path,
+ List *tlist, List *scan_clauses)
+{
+ NamedTuplestoreScan *scan_plan;
+ Index scan_relid = best_path->parent->relid;
+ RangeTblEntry *rte;
+
+ Assert(scan_relid > 0);
+ rte = planner_rt_fetch(scan_relid, root);
+ Assert(rte->rtekind == RTE_NAMEDTUPLESTORE);
+
+ /* Sort clauses into best execution order */
+ scan_clauses = order_qual_clauses(root, scan_clauses);
+
+ /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
+ scan_clauses = extract_actual_clauses(scan_clauses, false);
+
+ /* Replace any outer-relation variables with nestloop params */
+ if (best_path->param_info)
+ {
+ scan_clauses = (List *)
+ replace_nestloop_params(root, (Node *) scan_clauses);
+ }
+
+ scan_plan = make_namedtuplestorescan(tlist, scan_clauses, scan_relid,
+ rte->enrname);
+
+ copy_generic_path_info(&scan_plan->scan.plan, best_path);
+
+ return scan_plan;
+}
+
+/*
* create_worktablescan_plan
* Returns a worktablescan plan for the base relation scanned by 'best_path'
* with restriction clauses 'scan_clauses' and targetlist 'tlist'.
@@ -5120,6 +5171,26 @@ make_ctescan(List *qptlist,
return node;
}
+static NamedTuplestoreScan *
+make_namedtuplestorescan(List *qptlist,
+ List *qpqual,
+ Index scanrelid,
+ char *enrname)
+{
+ NamedTuplestoreScan *node = makeNode(NamedTuplestoreScan);
+ Plan *plan = &node->scan.plan;
+
+ /* cost should be inserted by caller */
+ plan->targetlist = qptlist;
+ plan->qual = qpqual;
+ plan->lefttree = NULL;
+ plan->righttree = NULL;
+ node->scan.scanrelid = scanrelid;
+ node->enrname = enrname;
+
+ return node;
+}
+
static WorkTableScan *
make_worktablescan(List *qptlist,
List *qpqual,
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index 4e3f6ee960..cdb8e95deb 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -591,6 +591,17 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
fix_scan_list(root, splan->scan.plan.qual, rtoffset);
}
break;
+ case T_NamedTuplestoreScan:
+ {
+ NamedTuplestoreScan *splan = (NamedTuplestoreScan *) plan;
+
+ splan->scan.scanrelid += rtoffset;
+ splan->scan.plan.targetlist =
+ fix_scan_list(root, splan->scan.plan.targetlist, rtoffset);
+ splan->scan.plan.qual =
+ fix_scan_list(root, splan->scan.plan.qual, rtoffset);
+ }
+ break;
case T_WorkTableScan:
{
WorkTableScan *splan = (WorkTableScan *) plan;
@@ -2571,6 +2582,11 @@ extract_query_dependencies_walker(Node *node, PlannerInfo *context)
if (rte->rtekind == RTE_RELATION)
context->glob->relationOids =
lappend_oid(context->glob->relationOids, rte->relid);
+ else if (rte->rtekind == RTE_NAMEDTUPLESTORE &&
+ OidIsValid(rte->relid))
+ context->glob->relationOids =
+ lappend_oid(context->glob->relationOids,
+ rte->relid);
}
/* And recurse into the query's subexpressions */
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index db0e5b31e2..87cc44d678 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -2476,6 +2476,10 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params,
context.paramids = bms_add_members(context.paramids, scan_params);
break;
+ case T_NamedTuplestoreScan:
+ context.paramids = bms_add_members(context.paramids, scan_params);
+ break;
+
case T_ForeignScan:
{
ForeignScan *fscan = (ForeignScan *) plan;
diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c
index 348c6b791f..749ea805f8 100644
--- a/src/backend/optimizer/prep/prepjointree.c
+++ b/src/backend/optimizer/prep/prepjointree.c
@@ -1123,6 +1123,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
break;
case RTE_JOIN:
case RTE_CTE:
+ case RTE_NAMEDTUPLESTORE:
/* these can't contain any lateral references */
break;
}
@@ -1977,6 +1978,7 @@ replace_vars_in_jointree(Node *jtnode,
break;
case RTE_JOIN:
case RTE_CTE:
+ case RTE_NAMEDTUPLESTORE:
/* these shouldn't be marked LATERAL */
Assert(false);
break;
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index a578867cce..59d71c1b32 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -4910,7 +4910,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
querytree_list = pg_analyze_and_rewrite_params(linitial(raw_parsetree_list),
src,
(ParserSetupHook) sql_fn_parser_setup,
- pinfo);
+ pinfo, NULL);
if (list_length(querytree_list) != 1)
goto fail;
querytree = linitial(querytree_list);
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index c6298072c9..8536212177 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -1893,6 +1893,32 @@ create_ctescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer)
}
/*
+ * create_namedtuplestorescan_path
+ * Creates a path corresponding to a scan of a named tuplestore, returning
+ * the pathnode.
+ */
+Path *
+create_namedtuplestorescan_path(PlannerInfo *root, RelOptInfo *rel,
+ Relids required_outer)
+{
+ Path *pathnode = makeNode(Path);
+
+ pathnode->pathtype = T_NamedTuplestoreScan;
+ pathnode->parent = rel;
+ pathnode->pathtarget = rel->reltarget;
+ pathnode->param_info = get_baserel_parampathinfo(root, rel,
+ required_outer);
+ pathnode->parallel_aware = false;
+ pathnode->parallel_safe = rel->consider_parallel;
+ pathnode->parallel_workers = 0;
+ pathnode->pathkeys = NIL; /* result is always unordered */
+
+ cost_namedtuplestorescan(pathnode, root, rel, pathnode->param_info);
+
+ return pathnode;
+}
+
+/*
* create_worktablescan_path
* Creates a path corresponding to a scan of a self-reference CTE,
* returning the pathnode.
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index cc88dcc28e..1cd21c0fdc 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -1446,9 +1446,9 @@ relation_excluded_by_constraints(PlannerInfo *root,
* dropped cols.
*
* We also support building a "physical" tlist for subqueries, functions,
- * values lists, table expressions and CTEs, since the same optimization can
- * occur in SubqueryScan, FunctionScan, ValuesScan, CteScan, TableFunc
- * and WorkTableScan nodes.
+ * values lists, table expressions, and CTEs, since the same optimization can
+ * occur in SubqueryScan, FunctionScan, ValuesScan, CteScan, TableFunc,
+ * NamedTuplestoreScan, and WorkTableScan nodes.
*/
List *
build_physical_tlist(PlannerInfo *root, RelOptInfo *rel)
@@ -1523,6 +1523,7 @@ build_physical_tlist(PlannerInfo *root, RelOptInfo *rel)
case RTE_TABLEFUNC:
case RTE_VALUES:
case RTE_CTE:
+ case RTE_NAMEDTUPLESTORE:
/* Not all of these can have dropped cols, but share code anyway */
expandRTE(rte, varno, 0, -1, true /* include dropped */ ,
NULL, &colvars);
diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c
index 6ab78545c3..7912df0baa 100644
--- a/src/backend/optimizer/util/relnode.c
+++ b/src/backend/optimizer/util/relnode.c
@@ -156,6 +156,7 @@ build_simple_rel(PlannerInfo *root, int relid, RelOptKind reloptkind)
case RTE_TABLEFUNC:
case RTE_VALUES:
case RTE_CTE:
+ case RTE_NAMEDTUPLESTORE:
/*
* Subquery, function, tablefunc, or values list --- set up attr