00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047
00048
00049
00050
00051
00052
00053
00054
00055
00056
00057
00058
00059
00060
00061
00062
00063
00064
00065
00066
00067
00068
00069 #include "postgres.h"
00070
00071 #ifdef _MSC_VER
00072 #include <float.h>
00073 #endif
00074 #include <math.h>
00075
00076 #include "access/htup_details.h"
00077 #include "executor/executor.h"
00078 #include "executor/nodeHash.h"
00079 #include "miscadmin.h"
00080 #include "nodes/nodeFuncs.h"
00081 #include "optimizer/clauses.h"
00082 #include "optimizer/cost.h"
00083 #include "optimizer/pathnode.h"
00084 #include "optimizer/paths.h"
00085 #include "optimizer/placeholder.h"
00086 #include "optimizer/plancat.h"
00087 #include "optimizer/planmain.h"
00088 #include "optimizer/restrictinfo.h"
00089 #include "parser/parsetree.h"
00090 #include "utils/lsyscache.h"
00091 #include "utils/selfuncs.h"
00092 #include "utils/spccache.h"
00093 #include "utils/tuplesort.h"
00094
00095
00096 #define LOG2(x) (log(x) / 0.693147180559945)
00097
00098
00099 double seq_page_cost = DEFAULT_SEQ_PAGE_COST;
00100 double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
00101 double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
00102 double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
00103 double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
00104
00105 int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
00106
00107 Cost disable_cost = 1.0e10;
00108
00109 bool enable_seqscan = true;
00110 bool enable_indexscan = true;
00111 bool enable_indexonlyscan = true;
00112 bool enable_bitmapscan = true;
00113 bool enable_tidscan = true;
00114 bool enable_sort = true;
00115 bool enable_hashagg = true;
00116 bool enable_nestloop = true;
00117 bool enable_material = true;
00118 bool enable_mergejoin = true;
00119 bool enable_hashjoin = true;
00120
00121 typedef struct
00122 {
00123 PlannerInfo *root;
00124 QualCost total;
00125 } cost_qual_eval_context;
00126
00127 static MergeScanSelCache *cached_scansel(PlannerInfo *root,
00128 RestrictInfo *rinfo,
00129 PathKey *pathkey);
00130 static void cost_rescan(PlannerInfo *root, Path *path,
00131 Cost *rescan_startup_cost, Cost *rescan_total_cost);
00132 static bool cost_qual_eval_walker(Node *node, cost_qual_eval_context *context);
00133 static void get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
00134 ParamPathInfo *param_info,
00135 QualCost *qpqual_cost);
00136 static bool has_indexed_join_quals(NestPath *joinpath);
00137 static double approx_tuple_count(PlannerInfo *root, JoinPath *path,
00138 List *quals);
00139 static double calc_joinrel_size_estimate(PlannerInfo *root,
00140 double outer_rows,
00141 double inner_rows,
00142 SpecialJoinInfo *sjinfo,
00143 List *restrictlist);
00144 static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
00145 static double relation_byte_size(double tuples, int width);
00146 static double page_size(double tuples, int width);
00147
00148
00149
00150
00151
00152
00153 double
00154 clamp_row_est(double nrows)
00155 {
00156
00157
00158
00159
00160
00161 if (nrows <= 1.0)
00162 nrows = 1.0;
00163 else
00164 nrows = rint(nrows);
00165
00166 return nrows;
00167 }
00168
00169
00170
00171
00172
00173
00174
00175
00176
00177 void
00178 cost_seqscan(Path *path, PlannerInfo *root,
00179 RelOptInfo *baserel, ParamPathInfo *param_info)
00180 {
00181 Cost startup_cost = 0;
00182 Cost run_cost = 0;
00183 double spc_seq_page_cost;
00184 QualCost qpqual_cost;
00185 Cost cpu_per_tuple;
00186
00187
00188 Assert(baserel->relid > 0);
00189 Assert(baserel->rtekind == RTE_RELATION);
00190
00191
00192 if (param_info)
00193 path->rows = param_info->ppi_rows;
00194 else
00195 path->rows = baserel->rows;
00196
00197 if (!enable_seqscan)
00198 startup_cost += disable_cost;
00199
00200
00201 get_tablespace_page_costs(baserel->reltablespace,
00202 NULL,
00203 &spc_seq_page_cost);
00204
00205
00206
00207
00208 run_cost += spc_seq_page_cost * baserel->pages;
00209
00210
00211 get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
00212
00213 startup_cost += qpqual_cost.startup;
00214 cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
00215 run_cost += cpu_per_tuple * baserel->tuples;
00216
00217 path->startup_cost = startup_cost;
00218 path->total_cost = startup_cost + run_cost;
00219 }
00220
00221
00222
00223
00224
00225
00226
00227
00228
00229
00230
00231
00232
00233
00234
00235
00236
00237
00238
00239 void
00240 cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
00241 {
00242 IndexOptInfo *index = path->indexinfo;
00243 RelOptInfo *baserel = index->rel;
00244 bool indexonly = (path->path.pathtype == T_IndexOnlyScan);
00245 List *allclauses;
00246 Cost startup_cost = 0;
00247 Cost run_cost = 0;
00248 Cost indexStartupCost;
00249 Cost indexTotalCost;
00250 Selectivity indexSelectivity;
00251 double indexCorrelation,
00252 csquared;
00253 double spc_seq_page_cost,
00254 spc_random_page_cost;
00255 Cost min_IO_cost,
00256 max_IO_cost;
00257 QualCost qpqual_cost;
00258 Cost cpu_per_tuple;
00259 double tuples_fetched;
00260 double pages_fetched;
00261
00262
00263 Assert(IsA(baserel, RelOptInfo) &&
00264 IsA(index, IndexOptInfo));
00265 Assert(baserel->relid > 0);
00266 Assert(baserel->rtekind == RTE_RELATION);
00267
00268
00269 if (path->path.param_info)
00270 {
00271 path->path.rows = path->path.param_info->ppi_rows;
00272
00273 allclauses = list_concat(list_copy(path->path.param_info->ppi_clauses),
00274 baserel->baserestrictinfo);
00275 }
00276 else
00277 {
00278 path->path.rows = baserel->rows;
00279
00280 allclauses = baserel->baserestrictinfo;
00281 }
00282
00283 if (!enable_indexscan)
00284 startup_cost += disable_cost;
00285
00286
00287
00288
00289
00290
00291
00292
00293 OidFunctionCall7(index->amcostestimate,
00294 PointerGetDatum(root),
00295 PointerGetDatum(path),
00296 Float8GetDatum(loop_count),
00297 PointerGetDatum(&indexStartupCost),
00298 PointerGetDatum(&indexTotalCost),
00299 PointerGetDatum(&indexSelectivity),
00300 PointerGetDatum(&indexCorrelation));
00301
00302
00303
00304
00305
00306
00307 path->indextotalcost = indexTotalCost;
00308 path->indexselectivity = indexSelectivity;
00309
00310
00311 startup_cost += indexStartupCost;
00312 run_cost += indexTotalCost - indexStartupCost;
00313
00314
00315 tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
00316
00317
00318 get_tablespace_page_costs(baserel->reltablespace,
00319 &spc_random_page_cost,
00320 &spc_seq_page_cost);
00321
00322
00323
00324
00325
00326
00327
00328
00329
00330
00331
00332
00333
00334
00335
00336
00337
00338
00339
00340
00341
00342
00343
00344
00345
00346
00347
00348
00349 if (loop_count > 1)
00350 {
00351
00352
00353
00354
00355
00356
00357
00358
00359 pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
00360 baserel->pages,
00361 (double) index->pages,
00362 root);
00363
00364 if (indexonly)
00365 pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
00366
00367 max_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
00368
00369
00370
00371
00372
00373
00374
00375
00376
00377
00378
00379 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
00380
00381 pages_fetched = index_pages_fetched(pages_fetched * loop_count,
00382 baserel->pages,
00383 (double) index->pages,
00384 root);
00385
00386 if (indexonly)
00387 pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
00388
00389 min_IO_cost = (pages_fetched * spc_random_page_cost) / loop_count;
00390 }
00391 else
00392 {
00393
00394
00395
00396
00397 pages_fetched = index_pages_fetched(tuples_fetched,
00398 baserel->pages,
00399 (double) index->pages,
00400 root);
00401
00402 if (indexonly)
00403 pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
00404
00405
00406 max_IO_cost = pages_fetched * spc_random_page_cost;
00407
00408
00409 pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
00410
00411 if (indexonly)
00412 pages_fetched = ceil(pages_fetched * (1.0 - baserel->allvisfrac));
00413
00414 if (pages_fetched > 0)
00415 {
00416 min_IO_cost = spc_random_page_cost;
00417 if (pages_fetched > 1)
00418 min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;
00419 }
00420 else
00421 min_IO_cost = 0;
00422 }
00423
00424
00425
00426
00427
00428 csquared = indexCorrelation * indexCorrelation;
00429
00430 run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
00431
00432
00433
00434
00435
00436
00437
00438
00439
00440
00441
00442
00443
00444
00445
00446 cost_qual_eval(&qpqual_cost,
00447 list_difference_ptr(allclauses, path->indexquals),
00448 root);
00449
00450 startup_cost += qpqual_cost.startup;
00451 cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
00452
00453 run_cost += cpu_per_tuple * tuples_fetched;
00454
00455 path->path.startup_cost = startup_cost;
00456 path->path.total_cost = startup_cost + run_cost;
00457 }
00458
00459
00460
00461
00462
00463
00464
00465
00466
00467
00468
00469
00470
00471
00472
00473
00474
00475
00476
00477
00478
00479
00480
00481
00482
00483
00484
00485
00486
00487
00488
00489
00490
00491
00492
00493
00494
00495
00496
00497 double
00498 index_pages_fetched(double tuples_fetched, BlockNumber pages,
00499 double index_pages, PlannerInfo *root)
00500 {
00501 double pages_fetched;
00502 double total_pages;
00503 double T,
00504 b;
00505
00506
00507 T = (pages > 1) ? (double) pages : 1.0;
00508
00509
00510 total_pages = root->total_table_pages + index_pages;
00511 total_pages = Max(total_pages, 1.0);
00512 Assert(T <= total_pages);
00513
00514
00515 b = (double) effective_cache_size *T / total_pages;
00516
00517
00518 if (b <= 1.0)
00519 b = 1.0;
00520 else
00521 b = ceil(b);
00522
00523
00524 if (T <= b)
00525 {
00526 pages_fetched =
00527 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
00528 if (pages_fetched >= T)
00529 pages_fetched = T;
00530 else
00531 pages_fetched = ceil(pages_fetched);
00532 }
00533 else
00534 {
00535 double lim;
00536
00537 lim = (2.0 * T * b) / (2.0 * T - b);
00538 if (tuples_fetched <= lim)
00539 {
00540 pages_fetched =
00541 (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
00542 }
00543 else
00544 {
00545 pages_fetched =
00546 b + (tuples_fetched - lim) * (T - b) / T;
00547 }
00548 pages_fetched = ceil(pages_fetched);
00549 }
00550 return pages_fetched;
00551 }
00552
00553
00554
00555
00556
00557
00558
00559
00560
00561
00562 static double
00563 get_indexpath_pages(Path *bitmapqual)
00564 {
00565 double result = 0;
00566 ListCell *l;
00567
00568 if (IsA(bitmapqual, BitmapAndPath))
00569 {
00570 BitmapAndPath *apath = (BitmapAndPath *) bitmapqual;
00571
00572 foreach(l, apath->bitmapquals)
00573 {
00574 result += get_indexpath_pages((Path *) lfirst(l));
00575 }
00576 }
00577 else if (IsA(bitmapqual, BitmapOrPath))
00578 {
00579 BitmapOrPath *opath = (BitmapOrPath *) bitmapqual;
00580
00581 foreach(l, opath->bitmapquals)
00582 {
00583 result += get_indexpath_pages((Path *) lfirst(l));
00584 }
00585 }
00586 else if (IsA(bitmapqual, IndexPath))
00587 {
00588 IndexPath *ipath = (IndexPath *) bitmapqual;
00589
00590 result = (double) ipath->indexinfo->pages;
00591 }
00592 else
00593 elog(ERROR, "unrecognized node type: %d", nodeTag(bitmapqual));
00594
00595 return result;
00596 }
00597
00598
00599
00600
00601
00602
00603
00604
00605
00606
00607
00608
00609
00610
00611
00612 void
00613 cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
00614 ParamPathInfo *param_info,
00615 Path *bitmapqual, double loop_count)
00616 {
00617 Cost startup_cost = 0;
00618 Cost run_cost = 0;
00619 Cost indexTotalCost;
00620 Selectivity indexSelectivity;
00621 QualCost qpqual_cost;
00622 Cost cpu_per_tuple;
00623 Cost cost_per_page;
00624 double tuples_fetched;
00625 double pages_fetched;
00626 double spc_seq_page_cost,
00627 spc_random_page_cost;
00628 double T;
00629
00630
00631 Assert(IsA(baserel, RelOptInfo));
00632 Assert(baserel->relid > 0);
00633 Assert(baserel->rtekind == RTE_RELATION);
00634
00635
00636 if (param_info)
00637 path->rows = param_info->ppi_rows;
00638 else
00639 path->rows = baserel->rows;
00640
00641 if (!enable_bitmapscan)
00642 startup_cost += disable_cost;
00643
00644
00645
00646
00647
00648 cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
00649
00650 startup_cost += indexTotalCost;
00651
00652
00653 get_tablespace_page_costs(baserel->reltablespace,
00654 &spc_random_page_cost,
00655 &spc_seq_page_cost);
00656
00657
00658
00659
00660 tuples_fetched = clamp_row_est(indexSelectivity * baserel->tuples);
00661
00662 T = (baserel->pages > 1) ? (double) baserel->pages : 1.0;
00663
00664 if (loop_count > 1)
00665 {
00666
00667
00668
00669
00670
00671
00672 pages_fetched = index_pages_fetched(tuples_fetched * loop_count,
00673 baserel->pages,
00674 get_indexpath_pages(bitmapqual),
00675 root);
00676 pages_fetched /= loop_count;
00677 }
00678 else
00679 {
00680
00681
00682
00683
00684
00685 pages_fetched = (2.0 * T * tuples_fetched) / (2.0 * T + tuples_fetched);
00686 }
00687 if (pages_fetched >= T)
00688 pages_fetched = T;
00689 else
00690 pages_fetched = ceil(pages_fetched);
00691
00692
00693
00694
00695
00696
00697
00698
00699 if (pages_fetched >= 2.0)
00700 cost_per_page = spc_random_page_cost -
00701 (spc_random_page_cost - spc_seq_page_cost)
00702 * sqrt(pages_fetched / T);
00703 else
00704 cost_per_page = spc_random_page_cost;
00705
00706 run_cost += pages_fetched * cost_per_page;
00707
00708
00709
00710
00711
00712
00713
00714
00715
00716
00717 get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
00718
00719 startup_cost += qpqual_cost.startup;
00720 cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
00721
00722 run_cost += cpu_per_tuple * tuples_fetched;
00723
00724 path->startup_cost = startup_cost;
00725 path->total_cost = startup_cost + run_cost;
00726 }
00727
00728
00729
00730
00731
00732 void
00733 cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
00734 {
00735 if (IsA(path, IndexPath))
00736 {
00737 *cost = ((IndexPath *) path)->indextotalcost;
00738 *selec = ((IndexPath *) path)->indexselectivity;
00739
00740
00741
00742
00743
00744
00745
00746 *cost += 0.1 * cpu_operator_cost * path->rows;
00747 }
00748 else if (IsA(path, BitmapAndPath))
00749 {
00750 *cost = path->total_cost;
00751 *selec = ((BitmapAndPath *) path)->bitmapselectivity;
00752 }
00753 else if (IsA(path, BitmapOrPath))
00754 {
00755 *cost = path->total_cost;
00756 *selec = ((BitmapOrPath *) path)->bitmapselectivity;
00757 }
00758 else
00759 {
00760 elog(ERROR, "unrecognized node type: %d", nodeTag(path));
00761 *cost = *selec = 0;
00762 }
00763 }
00764
00765
00766
00767
00768
00769
00770
00771
00772
00773
00774
00775 void
00776 cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
00777 {
00778 Cost totalCost;
00779 Selectivity selec;
00780 ListCell *l;
00781
00782
00783
00784
00785
00786
00787
00788
00789
00790
00791 totalCost = 0.0;
00792 selec = 1.0;
00793 foreach(l, path->bitmapquals)
00794 {
00795 Path *subpath = (Path *) lfirst(l);
00796 Cost subCost;
00797 Selectivity subselec;
00798
00799 cost_bitmap_tree_node(subpath, &subCost, &subselec);
00800
00801 selec *= subselec;
00802
00803 totalCost += subCost;
00804 if (l != list_head(path->bitmapquals))
00805 totalCost += 100.0 * cpu_operator_cost;
00806 }
00807 path->bitmapselectivity = selec;
00808 path->path.rows = 0;
00809 path->path.startup_cost = totalCost;
00810 path->path.total_cost = totalCost;
00811 }
00812
00813
00814
00815
00816
00817
00818
00819 void
00820 cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
00821 {
00822 Cost totalCost;
00823 Selectivity selec;
00824 ListCell *l;
00825
00826
00827
00828
00829
00830
00831
00832
00833
00834
00835
00836 totalCost = 0.0;
00837 selec = 0.0;
00838 foreach(l, path->bitmapquals)
00839 {
00840 Path *subpath = (Path *) lfirst(l);
00841 Cost subCost;
00842 Selectivity subselec;
00843
00844 cost_bitmap_tree_node(subpath, &subCost, &subselec);
00845
00846 selec += subselec;
00847
00848 totalCost += subCost;
00849 if (l != list_head(path->bitmapquals) &&
00850 !IsA(subpath, IndexPath))
00851 totalCost += 100.0 * cpu_operator_cost;
00852 }
00853 path->bitmapselectivity = Min(selec, 1.0);
00854 path->path.rows = 0;
00855 path->path.startup_cost = totalCost;
00856 path->path.total_cost = totalCost;
00857 }
00858
00859
00860
00861
00862
00863
00864
00865
00866
00867 void
00868 cost_tidscan(Path *path, PlannerInfo *root,
00869 RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info)
00870 {
00871 Cost startup_cost = 0;
00872 Cost run_cost = 0;
00873 bool isCurrentOf = false;
00874 QualCost qpqual_cost;
00875 Cost cpu_per_tuple;
00876 QualCost tid_qual_cost;
00877 int ntuples;
00878 ListCell *l;
00879 double spc_random_page_cost;
00880
00881
00882 Assert(baserel->relid > 0);
00883 Assert(baserel->rtekind == RTE_RELATION);
00884
00885
00886 if (param_info)
00887 path->rows = param_info->ppi_rows;
00888 else
00889 path->rows = baserel->rows;
00890
00891
00892 ntuples = 0;
00893 foreach(l, tidquals)
00894 {
00895 if (IsA(lfirst(l), ScalarArrayOpExpr))
00896 {
00897
00898 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) lfirst(l);
00899 Node *arraynode = (Node *) lsecond(saop->args);
00900
00901 ntuples += estimate_array_length(arraynode);
00902 }
00903 else if (IsA(lfirst(l), CurrentOfExpr))
00904 {
00905
00906 isCurrentOf = true;
00907 ntuples++;
00908 }
00909 else
00910 {
00911
00912 ntuples++;
00913 }
00914 }
00915
00916
00917
00918
00919
00920
00921
00922
00923
00924 if (isCurrentOf)
00925 {
00926 Assert(baserel->baserestrictcost.startup >= disable_cost);
00927 startup_cost -= disable_cost;
00928 }
00929 else if (!enable_tidscan)
00930 startup_cost += disable_cost;
00931
00932
00933
00934
00935
00936 cost_qual_eval(&tid_qual_cost, tidquals, root);
00937
00938
00939 get_tablespace_page_costs(baserel->reltablespace,
00940 &spc_random_page_cost,
00941 NULL);
00942
00943
00944 run_cost += spc_random_page_cost * ntuples;
00945
00946
00947 get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
00948
00949
00950 startup_cost += qpqual_cost.startup + tid_qual_cost.per_tuple;
00951 cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple -
00952 tid_qual_cost.per_tuple;
00953 run_cost += cpu_per_tuple * ntuples;
00954
00955 path->startup_cost = startup_cost;
00956 path->total_cost = startup_cost + run_cost;
00957 }
00958
00959
00960
00961
00962
00963
00964
00965
00966 void
00967 cost_subqueryscan(Path *path, PlannerInfo *root,
00968 RelOptInfo *baserel, ParamPathInfo *param_info)
00969 {
00970 Cost startup_cost;
00971 Cost run_cost;
00972 QualCost qpqual_cost;
00973 Cost cpu_per_tuple;
00974
00975
00976 Assert(baserel->relid > 0);
00977 Assert(baserel->rtekind == RTE_SUBQUERY);
00978
00979
00980 if (param_info)
00981 path->rows = param_info->ppi_rows;
00982 else
00983 path->rows = baserel->rows;
00984
00985
00986
00987
00988
00989
00990 path->startup_cost = baserel->subplan->startup_cost;
00991 path->total_cost = baserel->subplan->total_cost;
00992
00993 get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
00994
00995 startup_cost = qpqual_cost.startup;
00996 cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
00997 run_cost = cpu_per_tuple * baserel->tuples;
00998
00999 path->startup_cost += startup_cost;
01000 path->total_cost += startup_cost + run_cost;
01001 }
01002
01003
01004
01005
01006
01007
01008
01009
01010 void
01011 cost_functionscan(Path *path, PlannerInfo *root,
01012 RelOptInfo *baserel, ParamPathInfo *param_info)
01013 {
01014 Cost startup_cost = 0;
01015 Cost run_cost = 0;
01016 QualCost qpqual_cost;
01017 Cost cpu_per_tuple;
01018 RangeTblEntry *rte;
01019 QualCost exprcost;
01020
01021
01022 Assert(baserel->relid > 0);
01023 rte = planner_rt_fetch(baserel->relid, root);
01024 Assert(rte->rtekind == RTE_FUNCTION);
01025
01026
01027 if (param_info)
01028 path->rows = param_info->ppi_rows;
01029 else
01030 path->rows = baserel->rows;
01031
01032
01033
01034
01035
01036
01037
01038
01039
01040
01041
01042
01043
01044
01045 cost_qual_eval_node(&exprcost, rte->funcexpr, root);
01046
01047 startup_cost += exprcost.startup + exprcost.per_tuple;
01048
01049
01050 get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
01051
01052 startup_cost += qpqual_cost.startup;
01053 cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
01054 run_cost += cpu_per_tuple * baserel->tuples;
01055
01056 path->startup_cost = startup_cost;
01057 path->total_cost = startup_cost + run_cost;
01058 }
01059
01060
01061
01062
01063
01064
01065
01066
01067 void
01068 cost_valuesscan(Path *path, PlannerInfo *root,
01069 RelOptInfo *baserel, ParamPathInfo *param_info)
01070 {
01071 Cost startup_cost = 0;
01072 Cost run_cost = 0;
01073 QualCost qpqual_cost;
01074 Cost cpu_per_tuple;
01075
01076
01077 Assert(baserel->relid > 0);
01078 Assert(baserel->rtekind == RTE_VALUES);
01079
01080
01081 if (param_info)
01082 path->rows = param_info->ppi_rows;
01083 else
01084 path->rows = baserel->rows;
01085
01086
01087
01088
01089
01090 cpu_per_tuple = cpu_operator_cost;
01091
01092
01093 get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
01094
01095 startup_cost += qpqual_cost.startup;
01096 cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
01097 run_cost += cpu_per_tuple * baserel->tuples;
01098
01099 path->startup_cost = startup_cost;
01100 path->total_cost = startup_cost + run_cost;
01101 }
01102
01103
01104
01105
01106
01107
01108
01109
01110
01111
01112
01113 void
01114 cost_ctescan(Path *path, PlannerInfo *root,
01115 RelOptInfo *baserel, ParamPathInfo *param_info)
01116 {
01117 Cost startup_cost = 0;
01118 Cost run_cost = 0;
01119 QualCost qpqual_cost;
01120 Cost cpu_per_tuple;
01121
01122
01123 Assert(baserel->relid > 0);
01124 Assert(baserel->rtekind == RTE_CTE);
01125
01126
01127 if (param_info)
01128 path->rows = param_info->ppi_rows;
01129 else
01130 path->rows = baserel->rows;
01131
01132
01133 cpu_per_tuple = cpu_tuple_cost;
01134
01135
01136 get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
01137
01138 startup_cost += qpqual_cost.startup;
01139 cpu_per_tuple += cpu_tuple_cost + qpqual_cost.per_tuple;
01140 run_cost += cpu_per_tuple * baserel->tuples;
01141
01142 path->startup_cost = startup_cost;
01143 path->total_cost = startup_cost + run_cost;
01144 }
01145
01146
01147
01148
01149
01150
01151
01152
01153
01154
01155
01156
01157 void
01158 cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm)
01159 {
01160 Cost startup_cost;
01161 Cost total_cost;
01162 double total_rows;
01163
01164
01165 startup_cost = nrterm->startup_cost;
01166 total_cost = nrterm->total_cost;
01167 total_rows = nrterm->plan_rows;
01168
01169
01170
01171
01172
01173
01174
01175 total_cost += 10 * rterm->total_cost;
01176 total_rows += 10 * rterm->plan_rows;
01177
01178
01179
01180
01181
01182
01183 total_cost += cpu_tuple_cost * total_rows;
01184
01185 runion->startup_cost = startup_cost;
01186 runion->total_cost = total_cost;
01187 runion->plan_rows = total_rows;
01188 runion->plan_width = Max(nrterm->plan_width, rterm->plan_width);
01189 }
01190
01191
01192
01193
01194
01195
01196
01197
01198
01199
01200
01201
01202
01203
01204
01205
01206
01207
01208
01209
01210
01211
01212
01213
01214
01215
01216
01217
01218
01219
01220
01221
01222
01223
01224
01225
01226
01227
01228
01229
01230
01231
01232
01233
01234
01235
01236 void
01237 cost_sort(Path *path, PlannerInfo *root,
01238 List *pathkeys, Cost input_cost, double tuples, int width,
01239 Cost comparison_cost, int sort_mem,
01240 double limit_tuples)
01241 {
01242 Cost startup_cost = input_cost;
01243 Cost run_cost = 0;
01244 double input_bytes = relation_byte_size(tuples, width);
01245 double output_bytes;
01246 double output_tuples;
01247 long sort_mem_bytes = sort_mem * 1024L;
01248
01249 if (!enable_sort)
01250 startup_cost += disable_cost;
01251
01252 path->rows = tuples;
01253
01254
01255
01256
01257
01258 if (tuples < 2.0)
01259 tuples = 2.0;
01260
01261
01262 comparison_cost += 2.0 * cpu_operator_cost;
01263
01264
01265 if (limit_tuples > 0 && limit_tuples < tuples)
01266 {
01267 output_tuples = limit_tuples;
01268 output_bytes = relation_byte_size(output_tuples, width);
01269 }
01270 else
01271 {
01272 output_tuples = tuples;
01273 output_bytes = input_bytes;
01274 }
01275
01276 if (output_bytes > sort_mem_bytes)
01277 {
01278
01279
01280
01281 double npages = ceil(input_bytes / BLCKSZ);
01282 double nruns = (input_bytes / sort_mem_bytes) * 0.5;
01283 double mergeorder = tuplesort_merge_order(sort_mem_bytes);
01284 double log_runs;
01285 double npageaccesses;
01286
01287
01288
01289
01290
01291
01292 startup_cost += comparison_cost * tuples * LOG2(tuples);
01293
01294
01295
01296
01297 if (nruns > mergeorder)
01298 log_runs = ceil(log(nruns) / log(mergeorder));
01299 else
01300 log_runs = 1.0;
01301 npageaccesses = 2.0 * npages * log_runs;
01302
01303 startup_cost += npageaccesses *
01304 (seq_page_cost * 0.75 + random_page_cost * 0.25);
01305 }
01306 else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
01307 {
01308
01309
01310
01311
01312
01313
01314 startup_cost += comparison_cost * tuples * LOG2(2.0 * output_tuples);
01315 }
01316 else
01317 {
01318
01319 startup_cost += comparison_cost * tuples * LOG2(tuples);
01320 }
01321
01322
01323
01324
01325
01326
01327
01328
01329
01330 run_cost += cpu_operator_cost * tuples;
01331
01332 path->startup_cost = startup_cost;
01333 path->total_cost = startup_cost + run_cost;
01334 }
01335
01336
01337
01338
01339
01340
01341
01342
01343
01344
01345
01346
01347
01348
01349
01350
01351
01352
01353
01354
01355
01356
01357
01358
01359
01360
01361 void
01362 cost_merge_append(Path *path, PlannerInfo *root,
01363 List *pathkeys, int n_streams,
01364 Cost input_startup_cost, Cost input_total_cost,
01365 double tuples)
01366 {
01367 Cost startup_cost = 0;
01368 Cost run_cost = 0;
01369 Cost comparison_cost;
01370 double N;
01371 double logN;
01372
01373
01374
01375
01376 N = (n_streams < 2) ? 2.0 : (double) n_streams;
01377 logN = LOG2(N);
01378
01379
01380 comparison_cost = 2.0 * cpu_operator_cost;
01381
01382
01383 startup_cost += comparison_cost * N * logN;
01384
01385
01386 run_cost += tuples * comparison_cost * 2.0 * logN;
01387
01388
01389
01390
01391
01392
01393
01394 run_cost += cpu_operator_cost * tuples;
01395
01396 path->startup_cost = startup_cost + input_startup_cost;
01397 path->total_cost = startup_cost + run_cost + input_total_cost;
01398 }
01399
01400
01401
01402
01403
01404
01405
01406
01407
01408
01409
01410
01411
01412 void
01413 cost_material(Path *path,
01414 Cost input_startup_cost, Cost input_total_cost,
01415 double tuples, int width)
01416 {
01417 Cost startup_cost = input_startup_cost;
01418 Cost run_cost = input_total_cost - input_startup_cost;
01419 double nbytes = relation_byte_size(tuples, width);
01420 long work_mem_bytes = work_mem * 1024L;
01421
01422 path->rows = tuples;
01423
01424
01425
01426
01427
01428
01429
01430
01431
01432
01433
01434
01435
01436 run_cost += 2 * cpu_operator_cost * tuples;
01437
01438
01439
01440
01441
01442
01443
01444 if (nbytes > work_mem_bytes)
01445 {
01446 double npages = ceil(nbytes / BLCKSZ);
01447
01448 run_cost += seq_page_cost * npages;
01449 }
01450
01451 path->startup_cost = startup_cost;
01452 path->total_cost = startup_cost + run_cost;
01453 }
01454
01455
01456
01457
01458
01459
01460
01461
01462
01463
01464
01465
01466 void
01467 cost_agg(Path *path, PlannerInfo *root,
01468 AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
01469 int numGroupCols, double numGroups,
01470 Cost input_startup_cost, Cost input_total_cost,
01471 double input_tuples)
01472 {
01473 double output_tuples;
01474 Cost startup_cost;
01475 Cost total_cost;
01476 AggClauseCosts dummy_aggcosts;
01477
01478
01479 if (aggcosts == NULL)
01480 {
01481 Assert(aggstrategy == AGG_HASHED);
01482 MemSet(&dummy_aggcosts, 0, sizeof(AggClauseCosts));
01483 aggcosts = &dummy_aggcosts;
01484 }
01485
01486
01487
01488
01489
01490
01491
01492
01493
01494
01495
01496
01497
01498
01499
01500
01501
01502
01503
01504
01505
01506
01507
01508 if (aggstrategy == AGG_PLAIN)
01509 {
01510 startup_cost = input_total_cost;
01511 startup_cost += aggcosts->transCost.startup;
01512 startup_cost += aggcosts->transCost.per_tuple * input_tuples;
01513 startup_cost += aggcosts->finalCost;
01514
01515 total_cost = startup_cost + cpu_tuple_cost;
01516 output_tuples = 1;
01517 }
01518 else if (aggstrategy == AGG_SORTED)
01519 {
01520
01521 startup_cost = input_startup_cost;
01522 total_cost = input_total_cost;
01523
01524 total_cost += aggcosts->transCost.startup;
01525 total_cost += aggcosts->transCost.per_tuple * input_tuples;
01526 total_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
01527 total_cost += aggcosts->finalCost * numGroups;
01528 total_cost += cpu_tuple_cost * numGroups;
01529 output_tuples = numGroups;
01530 }
01531 else
01532 {
01533
01534 startup_cost = input_total_cost;
01535 startup_cost += aggcosts->transCost.startup;
01536 startup_cost += aggcosts->transCost.per_tuple * input_tuples;
01537 startup_cost += (cpu_operator_cost * numGroupCols) * input_tuples;
01538 total_cost = startup_cost;
01539 total_cost += aggcosts->finalCost * numGroups;
01540 total_cost += cpu_tuple_cost * numGroups;
01541 output_tuples = numGroups;
01542 }
01543
01544 path->rows = output_tuples;
01545 path->startup_cost = startup_cost;
01546 path->total_cost = total_cost;
01547 }
01548
01549
01550
01551
01552
01553
01554
01555
01556 void
01557 cost_windowagg(Path *path, PlannerInfo *root,
01558 List *windowFuncs, int numPartCols, int numOrderCols,
01559 Cost input_startup_cost, Cost input_total_cost,
01560 double input_tuples)
01561 {
01562 Cost startup_cost;
01563 Cost total_cost;
01564 ListCell *lc;
01565
01566 startup_cost = input_startup_cost;
01567 total_cost = input_total_cost;
01568
01569
01570
01571
01572
01573
01574
01575
01576
01577
01578 foreach(lc, windowFuncs)
01579 {
01580 WindowFunc *wfunc = (WindowFunc *) lfirst(lc);
01581 Cost wfunccost;
01582 QualCost argcosts;
01583
01584 Assert(IsA(wfunc, WindowFunc));
01585
01586 wfunccost = get_func_cost(wfunc->winfnoid) * cpu_operator_cost;
01587
01588
01589 cost_qual_eval_node(&argcosts, (Node *) wfunc->args, root);
01590 startup_cost += argcosts.startup;
01591 wfunccost += argcosts.per_tuple;
01592
01593 total_cost += wfunccost * input_tuples;
01594 }
01595
01596
01597
01598
01599
01600
01601
01602
01603
01604 total_cost += cpu_operator_cost * (numPartCols + numOrderCols) * input_tuples;
01605 total_cost += cpu_tuple_cost * input_tuples;
01606
01607 path->rows = input_tuples;
01608 path->startup_cost = startup_cost;
01609 path->total_cost = total_cost;
01610 }
01611
01612
01613
01614
01615
01616
01617
01618
01619
01620 void
01621 cost_group(Path *path, PlannerInfo *root,
01622 int numGroupCols, double numGroups,
01623 Cost input_startup_cost, Cost input_total_cost,
01624 double input_tuples)
01625 {
01626 Cost startup_cost;
01627 Cost total_cost;
01628
01629 startup_cost = input_startup_cost;
01630 total_cost = input_total_cost;
01631
01632
01633
01634
01635
01636 total_cost += cpu_operator_cost * input_tuples * numGroupCols;
01637
01638 path->rows = numGroups;
01639 path->startup_cost = startup_cost;
01640 path->total_cost = total_cost;
01641 }
01642
01643
01644
01645
01646
01647
01648
01649
01650
01651
01652
01653
01654
01655
01656
01657
01658
01659
01660
01661
01662
01663
01664
01665
01666
01667 void
01668 initial_cost_nestloop(PlannerInfo *root, JoinCostWorkspace *workspace,
01669 JoinType jointype,
01670 Path *outer_path, Path *inner_path,
01671 SpecialJoinInfo *sjinfo,
01672 SemiAntiJoinFactors *semifactors)
01673 {
01674 Cost startup_cost = 0;
01675 Cost run_cost = 0;
01676 double outer_path_rows = outer_path->rows;
01677 Cost inner_rescan_start_cost;
01678 Cost inner_rescan_total_cost;
01679 Cost inner_run_cost;
01680 Cost inner_rescan_run_cost;
01681
01682
01683 cost_rescan(root, inner_path,
01684 &inner_rescan_start_cost,
01685 &inner_rescan_total_cost);
01686
01687
01688
01689
01690
01691
01692
01693
01694
01695 startup_cost += outer_path->startup_cost + inner_path->startup_cost;
01696 run_cost += outer_path->total_cost - outer_path->startup_cost;
01697 if (outer_path_rows > 1)
01698 run_cost += (outer_path_rows - 1) * inner_rescan_start_cost;
01699
01700 inner_run_cost = inner_path->total_cost - inner_path->startup_cost;
01701 inner_rescan_run_cost = inner_rescan_total_cost - inner_rescan_start_cost;
01702
01703 if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
01704 {
01705 double outer_matched_rows;
01706 Selectivity inner_scan_frac;
01707
01708
01709
01710
01711
01712
01713
01714
01715
01716
01717
01718
01719
01720
01721
01722
01723
01724
01725
01726 run_cost += inner_run_cost;
01727
01728 outer_matched_rows = rint(outer_path_rows * semifactors->outer_match_frac);
01729 inner_scan_frac = 2.0 / (semifactors->match_count + 1.0);
01730
01731
01732 if (outer_matched_rows > 1)
01733 run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
01734
01735
01736
01737
01738
01739
01740
01741 workspace->outer_matched_rows = outer_matched_rows;
01742 workspace->inner_scan_frac = inner_scan_frac;
01743 }
01744 else
01745 {
01746
01747 run_cost += inner_run_cost;
01748 if (outer_path_rows > 1)
01749 run_cost += (outer_path_rows - 1) * inner_rescan_run_cost;
01750 }
01751
01752
01753
01754
01755 workspace->startup_cost = startup_cost;
01756 workspace->total_cost = startup_cost + run_cost;
01757
01758 workspace->run_cost = run_cost;
01759 workspace->inner_rescan_run_cost = inner_rescan_run_cost;
01760 }
01761
01762
01763
01764
01765
01766
01767
01768
01769
01770
01771 void
01772 final_cost_nestloop(PlannerInfo *root, NestPath *path,
01773 JoinCostWorkspace *workspace,
01774 SpecialJoinInfo *sjinfo,
01775 SemiAntiJoinFactors *semifactors)
01776 {
01777 Path *outer_path = path->outerjoinpath;
01778 Path *inner_path = path->innerjoinpath;
01779 double outer_path_rows = outer_path->rows;
01780 double inner_path_rows = inner_path->rows;
01781 Cost startup_cost = workspace->startup_cost;
01782 Cost run_cost = workspace->run_cost;
01783 Cost inner_rescan_run_cost = workspace->inner_rescan_run_cost;
01784 Cost cpu_per_tuple;
01785 QualCost restrict_qual_cost;
01786 double ntuples;
01787
01788
01789 if (path->path.param_info)
01790 path->path.rows = path->path.param_info->ppi_rows;
01791 else
01792 path->path.rows = path->path.parent->rows;
01793
01794
01795
01796
01797
01798
01799 if (!enable_nestloop)
01800 startup_cost += disable_cost;
01801
01802
01803
01804 if (path->jointype == JOIN_SEMI || path->jointype == JOIN_ANTI)
01805 {
01806 double outer_matched_rows = workspace->outer_matched_rows;
01807 Selectivity inner_scan_frac = workspace->inner_scan_frac;
01808
01809
01810
01811
01812
01813
01814 ntuples = outer_matched_rows * inner_path_rows * inner_scan_frac;
01815
01816
01817
01818
01819
01820
01821
01822
01823
01824 if (has_indexed_join_quals(path))
01825 {
01826 run_cost += (outer_path_rows - outer_matched_rows) *
01827 inner_rescan_run_cost / inner_path_rows;
01828
01829
01830
01831
01832
01833 }
01834 else
01835 {
01836 run_cost += (outer_path_rows - outer_matched_rows) *
01837 inner_rescan_run_cost;
01838 ntuples += (outer_path_rows - outer_matched_rows) *
01839 inner_path_rows;
01840 }
01841 }
01842 else
01843 {
01844
01845
01846
01847 ntuples = outer_path_rows * inner_path_rows;
01848 }
01849
01850
01851 cost_qual_eval(&restrict_qual_cost, path->joinrestrictinfo, root);
01852 startup_cost += restrict_qual_cost.startup;
01853 cpu_per_tuple = cpu_tuple_cost + restrict_qual_cost.per_tuple;
01854 run_cost += cpu_per_tuple * ntuples;
01855
01856 path->path.startup_cost = startup_cost;
01857 path->path.total_cost = startup_cost + run_cost;
01858 }
01859
01860
01861
01862
01863
01864
01865
01866
01867
01868
01869
01870
01871
01872
01873
01874
01875
01876
01877
01878
01879
01880
01881
01882
01883
01884
01885
01886
01887
01888
01889
01890 void
01891 initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace,
01892 JoinType jointype,
01893 List *mergeclauses,
01894 Path *outer_path, Path *inner_path,
01895 List *outersortkeys, List *innersortkeys,
01896 SpecialJoinInfo *sjinfo)
01897 {
01898 Cost startup_cost = 0;
01899 Cost run_cost = 0;
01900 double outer_path_rows = outer_path->rows;
01901 double inner_path_rows = inner_path->rows;
01902 Cost inner_run_cost;
01903 double outer_rows,
01904 inner_rows,
01905 outer_skip_rows,
01906 inner_skip_rows;
01907 Selectivity outerstartsel,
01908 outerendsel,
01909 innerstartsel,
01910 innerendsel;
01911 Path sort_path;
01912
01913
01914 if (outer_path_rows <= 0 || isnan(outer_path_rows))
01915 outer_path_rows = 1;
01916 if (inner_path_rows <= 0 || isnan(inner_path_rows))
01917 inner_path_rows = 1;
01918
01919
01920
01921
01922
01923
01924
01925
01926
01927
01928
01929
01930 if (mergeclauses && jointype != JOIN_FULL)
01931 {
01932 RestrictInfo *firstclause = (RestrictInfo *) linitial(mergeclauses);
01933 List *opathkeys;
01934 List *ipathkeys;
01935 PathKey *opathkey;
01936 PathKey *ipathkey;
01937 MergeScanSelCache *cache;
01938
01939
01940 opathkeys = outersortkeys ? outersortkeys : outer_path->pathkeys;
01941 ipathkeys = innersortkeys ? innersortkeys : inner_path->pathkeys;
01942 Assert(opathkeys);
01943 Assert(ipathkeys);
01944 opathkey = (PathKey *) linitial(opathkeys);
01945 ipathkey = (PathKey *) linitial(ipathkeys);
01946
01947 if (opathkey->pk_opfamily != ipathkey->pk_opfamily ||
01948 opathkey->pk_eclass->ec_collation != ipathkey->pk_eclass->ec_collation ||
01949 opathkey->pk_strategy != ipathkey->pk_strategy ||
01950 opathkey->pk_nulls_first != ipathkey->pk_nulls_first)
01951 elog(ERROR, "left and right pathkeys do not match in mergejoin");
01952
01953
01954 cache = cached_scansel(root, firstclause, opathkey);
01955
01956 if (bms_is_subset(firstclause->left_relids,
01957 outer_path->parent->relids))
01958 {
01959
01960 outerstartsel = cache->leftstartsel;
01961 outerendsel = cache->leftendsel;
01962 innerstartsel = cache->rightstartsel;
01963 innerendsel = cache->rightendsel;
01964 }
01965 else
01966 {
01967
01968 outerstartsel = cache->rightstartsel;
01969 outerendsel = cache->rightendsel;
01970 innerstartsel = cache->leftstartsel;
01971 innerendsel = cache->leftendsel;
01972 }
01973 if (jointype == JOIN_LEFT ||
01974 jointype == JOIN_ANTI)
01975 {
01976 outerstartsel = 0.0;
01977 outerendsel = 1.0;
01978 }
01979 else if (jointype == JOIN_RIGHT)
01980 {
01981 innerstartsel = 0.0;
01982 innerendsel = 1.0;
01983 }
01984 }
01985 else
01986 {
01987
01988 outerstartsel = innerstartsel = 0.0;
01989 outerendsel = innerendsel = 1.0;
01990 }
01991
01992
01993
01994
01995
01996 outer_skip_rows = rint(outer_path_rows * outerstartsel);
01997 inner_skip_rows = rint(inner_path_rows * innerstartsel);
01998 outer_rows = clamp_row_est(outer_path_rows * outerendsel);
01999 inner_rows = clamp_row_est(inner_path_rows * innerendsel);
02000
02001 Assert(outer_skip_rows <= outer_rows);
02002 Assert(inner_skip_rows <= inner_rows);
02003
02004
02005
02006
02007
02008
02009 outerstartsel = outer_skip_rows / outer_path_rows;
02010 innerstartsel = inner_skip_rows / inner_path_rows;
02011 outerendsel = outer_rows / outer_path_rows;
02012 innerendsel = inner_rows / inner_path_rows;
02013
02014 Assert(outerstartsel <= outerendsel);
02015 Assert(innerstartsel <= innerendsel);
02016
02017
02018
02019 if (outersortkeys)
02020 {
02021 cost_sort(&sort_path,
02022 root,
02023 outersortkeys,
02024 outer_path->total_cost,
02025 outer_path_rows,
02026 outer_path->parent->width,
02027 0.0,
02028 work_mem,
02029 -1.0);
02030 startup_cost += sort_path.startup_cost;
02031 startup_cost += (sort_path.total_cost - sort_path.startup_cost)
02032 * outerstartsel;
02033 run_cost += (sort_path.total_cost - sort_path.startup_cost)
02034 * (outerendsel - outerstartsel);
02035 }
02036 else
02037 {
02038 startup_cost += outer_path->startup_cost;
02039 startup_cost += (outer_path->total_cost - outer_path->startup_cost)
02040 * outerstartsel;
02041 run_cost += (outer_path->total_cost - outer_path->startup_cost)
02042 * (outerendsel - outerstartsel);
02043 }
02044
02045 if (innersortkeys)
02046 {
02047 cost_sort(&sort_path,
02048 root,
02049 innersortkeys,
02050 inner_path->total_cost,
02051 inner_path_rows,
02052 inner_path->parent->width,
02053 0.0,
02054 work_mem,
02055 -1.0);
02056 startup_cost += sort_path.startup_cost;
02057 startup_cost += (sort_path.total_cost - sort_path.startup_cost)
02058 * innerstartsel;
02059 inner_run_cost = (sort_path.total_cost - sort_path.startup_cost)
02060 * (innerendsel - innerstartsel);
02061 }
02062 else
02063 {
02064 startup_cost += inner_path->startup_cost;
02065 startup_cost += (inner_path->total_cost - inner_path->startup_cost)
02066 * innerstartsel;
02067 inner_run_cost = (inner_path->total_cost - inner_path->startup_cost)
02068 * (innerendsel - innerstartsel);
02069 }
02070
02071
02072
02073
02074
02075
02076
02077
02078
02079
02080
02081
02082 workspace->startup_cost = startup_cost;
02083 workspace->total_cost = startup_cost + run_cost + inner_run_cost;
02084
02085 workspace->run_cost = run_cost;
02086 workspace->inner_run_cost = inner_run_cost;
02087 workspace->outer_rows = outer_rows;
02088 workspace->inner_rows = inner_rows;
02089 workspace->outer_skip_rows = outer_skip_rows;
02090 workspace->inner_skip_rows = inner_skip_rows;
02091 }
02092
02093
02094
02095
02096
02097
02098
02099
02100
02101
02102
02103
02104
02105
02106
02107
02108
02109
02110
02111
02112
02113 void
02114 final_cost_mergejoin(PlannerInfo *root, MergePath *path,
02115 JoinCostWorkspace *workspace,
02116 SpecialJoinInfo *sjinfo)
02117 {
02118 Path *outer_path = path->jpath.outerjoinpath;
02119 Path *inner_path = path->jpath.innerjoinpath;
02120 double inner_path_rows = inner_path->rows;
02121 List *mergeclauses = path->path_mergeclauses;
02122 List *innersortkeys = path->innersortkeys;
02123 Cost startup_cost = workspace->startup_cost;
02124 Cost run_cost = workspace->run_cost;
02125 Cost inner_run_cost = workspace->inner_run_cost;
02126 double outer_rows = workspace->outer_rows;
02127 double inner_rows = workspace->inner_rows;
02128 double outer_skip_rows = workspace->outer_skip_rows;
02129 double inner_skip_rows = workspace->inner_skip_rows;
02130 Cost cpu_per_tuple,
02131 bare_inner_cost,
02132 mat_inner_cost;
02133 QualCost merge_qual_cost;
02134 QualCost qp_qual_cost;
02135 double mergejointuples,
02136 rescannedtuples;
02137 double rescanratio;
02138
02139
02140 if (inner_path_rows <= 0 || isnan(inner_path_rows))
02141 inner_path_rows = 1;
02142
02143
02144 if (path->jpath.path.param_info)
02145 path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
02146 else
02147 path->jpath.path.rows = path->jpath.path.parent->rows;
02148
02149
02150
02151
02152
02153
02154 if (!enable_mergejoin)
02155 startup_cost += disable_cost;
02156
02157
02158
02159
02160
02161 cost_qual_eval(&merge_qual_cost, mergeclauses, root);
02162 cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
02163 qp_qual_cost.startup -= merge_qual_cost.startup;
02164 qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple;
02165
02166
02167
02168
02169
02170 mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses);
02171
02172
02173
02174
02175
02176
02177
02178
02179
02180
02181
02182
02183
02184
02185
02186
02187
02188
02189
02190
02191
02192
02193
02194
02195
02196
02197
02198 if (IsA(outer_path, UniquePath))
02199 rescannedtuples = 0;
02200 else
02201 {
02202 rescannedtuples = mergejointuples - inner_path_rows;
02203
02204 if (rescannedtuples < 0)
02205 rescannedtuples = 0;
02206 }
02207
02208 rescanratio = 1.0 + (rescannedtuples / inner_path_rows);
02209
02210
02211
02212
02213
02214
02215
02216
02217
02218
02219 bare_inner_cost = inner_run_cost * rescanratio;
02220
02221
02222
02223
02224
02225
02226
02227
02228
02229
02230
02231
02232
02233
02234 mat_inner_cost = inner_run_cost +
02235 cpu_operator_cost * inner_path_rows * rescanratio;
02236
02237
02238
02239
02240
02241 if (enable_material && mat_inner_cost < bare_inner_cost)
02242 path->materialize_inner = true;
02243
02244
02245
02246
02247
02248
02249
02250
02251
02252
02253
02254
02255
02256
02257
02258
02259
02260 else if (innersortkeys == NIL &&
02261 !ExecSupportsMarkRestore(inner_path->pathtype))
02262 path->materialize_inner = true;
02263
02264
02265
02266
02267
02268
02269
02270
02271
02272
02273
02274
02275 else if (enable_material && innersortkeys != NIL &&
02276 relation_byte_size(inner_path_rows, inner_path->parent->width) >
02277 (work_mem * 1024L))
02278 path->materialize_inner = true;
02279 else
02280 path->materialize_inner = false;
02281
02282
02283 if (path->materialize_inner)
02284 run_cost += mat_inner_cost;
02285 else
02286 run_cost += bare_inner_cost;
02287
02288
02289
02290
02291
02292
02293
02294
02295 startup_cost += merge_qual_cost.startup;
02296 startup_cost += merge_qual_cost.per_tuple *
02297 (outer_skip_rows + inner_skip_rows * rescanratio);
02298 run_cost += merge_qual_cost.per_tuple *
02299 ((outer_rows - outer_skip_rows) +
02300 (inner_rows - inner_skip_rows) * rescanratio);
02301
02302
02303
02304
02305
02306
02307
02308
02309
02310
02311 startup_cost += qp_qual_cost.startup;
02312 cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
02313 run_cost += cpu_per_tuple * mergejointuples;
02314
02315 path->jpath.path.startup_cost = startup_cost;
02316 path->jpath.path.total_cost = startup_cost + run_cost;
02317 }
02318
02319
02320
02321
02322 static MergeScanSelCache *
02323 cached_scansel(PlannerInfo *root, RestrictInfo *rinfo, PathKey *pathkey)
02324 {
02325 MergeScanSelCache *cache;
02326 ListCell *lc;
02327 Selectivity leftstartsel,
02328 leftendsel,
02329 rightstartsel,
02330 rightendsel;
02331 MemoryContext oldcontext;
02332
02333
02334 foreach(lc, rinfo->scansel_cache)
02335 {
02336 cache = (MergeScanSelCache *) lfirst(lc);
02337 if (cache->opfamily == pathkey->pk_opfamily &&
02338 cache->collation == pathkey->pk_eclass->ec_collation &&
02339 cache->strategy == pathkey->pk_strategy &&
02340 cache->nulls_first == pathkey->pk_nulls_first)
02341 return cache;
02342 }
02343
02344
02345 mergejoinscansel(root,
02346 (Node *) rinfo->clause,
02347 pathkey->pk_opfamily,
02348 pathkey->pk_strategy,
02349 pathkey->pk_nulls_first,
02350 &leftstartsel,
02351 &leftendsel,
02352 &rightstartsel,
02353 &rightendsel);
02354
02355
02356 oldcontext = MemoryContextSwitchTo(root->planner_cxt);
02357
02358 cache = (MergeScanSelCache *) palloc(sizeof(MergeScanSelCache));
02359 cache->opfamily = pathkey->pk_opfamily;
02360 cache->collation = pathkey->pk_eclass->ec_collation;
02361 cache->strategy = pathkey->pk_strategy;
02362 cache->nulls_first = pathkey->pk_nulls_first;
02363 cache->leftstartsel = leftstartsel;
02364 cache->leftendsel = leftendsel;
02365 cache->rightstartsel = rightstartsel;
02366 cache->rightendsel = rightendsel;
02367
02368 rinfo->scansel_cache = lappend(rinfo->scansel_cache, cache);
02369
02370 MemoryContextSwitchTo(oldcontext);
02371
02372 return cache;
02373 }
02374
02375
02376
02377
02378
02379
02380
02381
02382
02383
02384
02385
02386
02387
02388
02389
02390
02391
02392
02393
02394
02395
02396
02397
02398
02399
02400 void
02401 initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace,
02402 JoinType jointype,
02403 List *hashclauses,
02404 Path *outer_path, Path *inner_path,
02405 SpecialJoinInfo *sjinfo,
02406 SemiAntiJoinFactors *semifactors)
02407 {
02408 Cost startup_cost = 0;
02409 Cost run_cost = 0;
02410 double outer_path_rows = outer_path->rows;
02411 double inner_path_rows = inner_path->rows;
02412 int num_hashclauses = list_length(hashclauses);
02413 int numbuckets;
02414 int numbatches;
02415 int num_skew_mcvs;
02416
02417
02418 startup_cost += outer_path->startup_cost;
02419 run_cost += outer_path->total_cost - outer_path->startup_cost;
02420 startup_cost += inner_path->total_cost;
02421
02422
02423
02424
02425
02426
02427
02428
02429
02430
02431
02432 startup_cost += (cpu_operator_cost * num_hashclauses + cpu_tuple_cost)
02433 * inner_path_rows;
02434 run_cost += cpu_operator_cost * num_hashclauses * outer_path_rows;
02435
02436
02437
02438
02439
02440
02441
02442
02443
02444
02445
02446 ExecChooseHashTableSize(inner_path_rows,
02447 inner_path->parent->width,
02448 true,
02449 &numbuckets,
02450 &numbatches,
02451 &num_skew_mcvs);
02452
02453
02454
02455
02456
02457
02458
02459
02460 if (numbatches > 1)
02461 {
02462 double outerpages = page_size(outer_path_rows,
02463 outer_path->parent->width);
02464 double innerpages = page_size(inner_path_rows,
02465 inner_path->parent->width);
02466
02467 startup_cost += seq_page_cost * innerpages;
02468 run_cost += seq_page_cost * (innerpages + 2 * outerpages);
02469 }
02470
02471
02472
02473
02474 workspace->startup_cost = startup_cost;
02475 workspace->total_cost = startup_cost + run_cost;
02476
02477 workspace->run_cost = run_cost;
02478 workspace->numbuckets = numbuckets;
02479 workspace->numbatches = numbatches;
02480 }
02481
02482
02483
02484
02485
02486
02487
02488
02489
02490
02491
02492
02493
02494 void
02495 final_cost_hashjoin(PlannerInfo *root, HashPath *path,
02496 JoinCostWorkspace *workspace,
02497 SpecialJoinInfo *sjinfo,
02498 SemiAntiJoinFactors *semifactors)
02499 {
02500 Path *outer_path = path->jpath.outerjoinpath;
02501 Path *inner_path = path->jpath.innerjoinpath;
02502 double outer_path_rows = outer_path->rows;
02503 double inner_path_rows = inner_path->rows;
02504 List *hashclauses = path->path_hashclauses;
02505 Cost startup_cost = workspace->startup_cost;
02506 Cost run_cost = workspace->run_cost;
02507 int numbuckets = workspace->numbuckets;
02508 int numbatches = workspace->numbatches;
02509 Cost cpu_per_tuple;
02510 QualCost hash_qual_cost;
02511 QualCost qp_qual_cost;
02512 double hashjointuples;
02513 double virtualbuckets;
02514 Selectivity innerbucketsize;
02515 ListCell *hcl;
02516
02517
02518 if (path->jpath.path.param_info)
02519 path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
02520 else
02521 path->jpath.path.rows = path->jpath.path.parent->rows;
02522
02523
02524
02525
02526
02527
02528 if (!enable_hashjoin)
02529 startup_cost += disable_cost;
02530
02531
02532 path->num_batches = numbatches;
02533
02534
02535 virtualbuckets = (double) numbuckets *(double) numbatches;
02536
02537
02538
02539
02540
02541
02542
02543
02544
02545
02546
02547 if (IsA(inner_path, UniquePath))
02548 innerbucketsize = 1.0 / virtualbuckets;
02549 else
02550 {
02551 innerbucketsize = 1.0;
02552 foreach(hcl, hashclauses)
02553 {
02554 RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(hcl);
02555 Selectivity thisbucketsize;
02556
02557 Assert(IsA(restrictinfo, RestrictInfo));
02558
02559
02560
02561
02562
02563
02564
02565
02566
02567 if (bms_is_subset(restrictinfo->right_relids,
02568 inner_path->parent->relids))
02569 {
02570
02571 thisbucketsize = restrictinfo->right_bucketsize;
02572 if (thisbucketsize < 0)
02573 {
02574
02575 thisbucketsize =
02576 estimate_hash_bucketsize(root,
02577 get_rightop(restrictinfo->clause),
02578 virtualbuckets);
02579 restrictinfo->right_bucketsize = thisbucketsize;
02580 }
02581 }
02582 else
02583 {
02584 Assert(bms_is_subset(restrictinfo->left_relids,
02585 inner_path->parent->relids));
02586
02587 thisbucketsize = restrictinfo->left_bucketsize;
02588 if (thisbucketsize < 0)
02589 {
02590
02591 thisbucketsize =
02592 estimate_hash_bucketsize(root,
02593 get_leftop(restrictinfo->clause),
02594 virtualbuckets);
02595 restrictinfo->left_bucketsize = thisbucketsize;
02596 }
02597 }
02598
02599 if (innerbucketsize > thisbucketsize)
02600 innerbucketsize = thisbucketsize;
02601 }
02602 }
02603
02604
02605
02606
02607
02608 cost_qual_eval(&hash_qual_cost, hashclauses, root);
02609 cost_qual_eval(&qp_qual_cost, path->jpath.joinrestrictinfo, root);
02610 qp_qual_cost.startup -= hash_qual_cost.startup;
02611 qp_qual_cost.per_tuple -= hash_qual_cost.per_tuple;
02612
02613
02614
02615 if (path->jpath.jointype == JOIN_SEMI || path->jpath.jointype == JOIN_ANTI)
02616 {
02617 double outer_matched_rows;
02618 Selectivity inner_scan_frac;
02619
02620
02621
02622
02623
02624
02625
02626
02627
02628
02629
02630
02631 outer_matched_rows = rint(outer_path_rows * semifactors->outer_match_frac);
02632 inner_scan_frac = 2.0 / (semifactors->match_count + 1.0);
02633
02634 startup_cost += hash_qual_cost.startup;
02635 run_cost += hash_qual_cost.per_tuple * outer_matched_rows *
02636 clamp_row_est(inner_path_rows * innerbucketsize * inner_scan_frac) * 0.5;
02637
02638
02639
02640
02641
02642
02643
02644
02645
02646
02647
02648
02649
02650
02651 run_cost += hash_qual_cost.per_tuple *
02652 (outer_path_rows - outer_matched_rows) *
02653 clamp_row_est(inner_path_rows / virtualbuckets) * 0.05;
02654
02655
02656 if (path->jpath.jointype == JOIN_SEMI)
02657 hashjointuples = outer_matched_rows;
02658 else
02659 hashjointuples = outer_path_rows - outer_matched_rows;
02660 }
02661 else
02662 {
02663
02664
02665
02666
02667
02668
02669
02670
02671
02672
02673 startup_cost += hash_qual_cost.startup;
02674 run_cost += hash_qual_cost.per_tuple * outer_path_rows *
02675 clamp_row_est(inner_path_rows * innerbucketsize) * 0.5;
02676
02677
02678
02679
02680
02681
02682 hashjointuples = approx_tuple_count(root, &path->jpath, hashclauses);
02683 }
02684
02685
02686
02687
02688
02689
02690
02691 startup_cost += qp_qual_cost.startup;
02692 cpu_per_tuple = cpu_tuple_cost + qp_qual_cost.per_tuple;
02693 run_cost += cpu_per_tuple * hashjointuples;
02694
02695 path->jpath.path.startup_cost = startup_cost;
02696 path->jpath.path.total_cost = startup_cost + run_cost;
02697 }
02698
02699
02700
02701
02702
02703
02704
02705
02706
02707 void
02708 cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan)
02709 {
02710 QualCost sp_cost;
02711
02712
02713 cost_qual_eval(&sp_cost,
02714 make_ands_implicit((Expr *) subplan->testexpr),
02715 root);
02716
02717 if (subplan->useHashTable)
02718 {
02719
02720
02721
02722
02723
02724
02725 sp_cost.startup += plan->total_cost +
02726 cpu_operator_cost * plan->plan_rows;
02727
02728
02729
02730
02731
02732
02733
02734
02735
02736 }
02737 else
02738 {
02739
02740
02741
02742
02743
02744
02745
02746 Cost plan_run_cost = plan->total_cost - plan->startup_cost;
02747
02748 if (subplan->subLinkType == EXISTS_SUBLINK)
02749 {
02750
02751 sp_cost.per_tuple += plan_run_cost / plan->plan_rows;
02752 }
02753 else if (subplan->subLinkType == ALL_SUBLINK ||
02754 subplan->subLinkType == ANY_SUBLINK)
02755 {
02756
02757 sp_cost.per_tuple += 0.50 * plan_run_cost;
02758
02759 sp_cost.per_tuple += 0.50 * plan->plan_rows * cpu_operator_cost;
02760 }
02761 else
02762 {
02763
02764 sp_cost.per_tuple += plan_run_cost;
02765 }
02766
02767
02768
02769
02770
02771
02772
02773
02774 if (subplan->parParam == NIL &&
02775 ExecMaterializesOutput(nodeTag(plan)))
02776 sp_cost.startup += plan->startup_cost;
02777 else
02778 sp_cost.per_tuple += plan->startup_cost;
02779 }
02780
02781 subplan->startup_cost = sp_cost.startup;
02782 subplan->per_call_cost = sp_cost.per_tuple;
02783 }
02784
02785
02786
02787
02788
02789
02790
02791
02792
02793
02794
02795
02796
02797
02798
02799
02800 static void
02801 cost_rescan(PlannerInfo *root, Path *path,
02802 Cost *rescan_startup_cost,
02803 Cost *rescan_total_cost)
02804 {
02805 switch (path->pathtype)
02806 {
02807 case T_FunctionScan:
02808
02809
02810
02811
02812
02813
02814
02815
02816 *rescan_startup_cost = 0;
02817 *rescan_total_cost = path->total_cost - path->startup_cost;
02818 break;
02819 case T_HashJoin:
02820
02821
02822
02823
02824
02825 *rescan_startup_cost = 0;
02826 *rescan_total_cost = path->total_cost - path->startup_cost;
02827 break;
02828 case T_CteScan:
02829 case T_WorkTableScan:
02830 {
02831
02832
02833
02834
02835
02836
02837 Cost run_cost = cpu_tuple_cost * path->rows;
02838 double nbytes = relation_byte_size(path->rows,
02839 path->parent->width);
02840 long work_mem_bytes = work_mem * 1024L;
02841
02842 if (nbytes > work_mem_bytes)
02843 {
02844
02845 double npages = ceil(nbytes / BLCKSZ);
02846
02847 run_cost += seq_page_cost * npages;
02848 }
02849 *rescan_startup_cost = 0;
02850 *rescan_total_cost = run_cost;
02851 }
02852 break;
02853 case T_Material:
02854 case T_Sort:
02855 {
02856
02857
02858
02859
02860
02861
02862
02863
02864 Cost run_cost = cpu_operator_cost * path->rows;
02865 double nbytes = relation_byte_size(path->rows,
02866 path->parent->width);
02867 long work_mem_bytes = work_mem * 1024L;
02868
02869 if (nbytes > work_mem_bytes)
02870 {
02871
02872 double npages = ceil(nbytes / BLCKSZ);
02873
02874 run_cost += seq_page_cost * npages;
02875 }
02876 *rescan_startup_cost = 0;
02877 *rescan_total_cost = run_cost;
02878 }
02879 break;
02880 default:
02881 *rescan_startup_cost = path->startup_cost;
02882 *rescan_total_cost = path->total_cost;
02883 break;
02884 }
02885 }
02886
02887
02888
02889
02890
02891
02892
02893
02894
02895
02896
02897 void
02898 cost_qual_eval(QualCost *cost, List *quals, PlannerInfo *root)
02899 {
02900 cost_qual_eval_context context;
02901 ListCell *l;
02902
02903 context.root = root;
02904 context.total.startup = 0;
02905 context.total.per_tuple = 0;
02906
02907
02908
02909 foreach(l, quals)
02910 {
02911 Node *qual = (Node *) lfirst(l);
02912
02913 cost_qual_eval_walker(qual, &context);
02914 }
02915
02916 *cost = context.total;
02917 }
02918
02919
02920
02921
02922
02923 void
02924 cost_qual_eval_node(QualCost *cost, Node *qual, PlannerInfo *root)
02925 {
02926 cost_qual_eval_context context;
02927
02928 context.root = root;
02929 context.total.startup = 0;
02930 context.total.per_tuple = 0;
02931
02932 cost_qual_eval_walker(qual, &context);
02933
02934 *cost = context.total;
02935 }
02936
02937 static bool
02938 cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
02939 {
02940 if (node == NULL)
02941 return false;
02942
02943
02944
02945
02946
02947
02948
02949 if (IsA(node, RestrictInfo))
02950 {
02951 RestrictInfo *rinfo = (RestrictInfo *) node;
02952
02953 if (rinfo->eval_cost.startup < 0)
02954 {
02955 cost_qual_eval_context locContext;
02956
02957 locContext.root = context->root;
02958 locContext.total.startup = 0;
02959 locContext.total.per_tuple = 0;
02960
02961
02962
02963
02964
02965 if (rinfo->orclause)
02966 cost_qual_eval_walker((Node *) rinfo->orclause, &locContext);
02967 else
02968 cost_qual_eval_walker((Node *) rinfo->clause, &locContext);
02969
02970
02971
02972
02973
02974 if (rinfo->pseudoconstant)
02975 {
02976
02977 locContext.total.startup += locContext.total.per_tuple;
02978 locContext.total.per_tuple = 0;
02979 }
02980 rinfo->eval_cost = locContext.total;
02981 }
02982 context->total.startup += rinfo->eval_cost.startup;
02983 context->total.per_tuple += rinfo->eval_cost.per_tuple;
02984
02985 return false;
02986 }
02987
02988
02989
02990
02991
02992
02993
02994
02995
02996
02997
02998
02999
03000
03001
03002
03003
03004
03005
03006
03007
03008
03009
03010 if (IsA(node, FuncExpr))
03011 {
03012 context->total.per_tuple +=
03013 get_func_cost(((FuncExpr *) node)->funcid) * cpu_operator_cost;
03014 }
03015 else if (IsA(node, OpExpr) ||
03016 IsA(node, DistinctExpr) ||
03017 IsA(node, NullIfExpr))
03018 {
03019
03020 set_opfuncid((OpExpr *) node);
03021 context->total.per_tuple +=
03022 get_func_cost(((OpExpr *) node)->opfuncid) * cpu_operator_cost;
03023 }
03024 else if (IsA(node, ScalarArrayOpExpr))
03025 {
03026
03027
03028
03029
03030 ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
03031 Node *arraynode = (Node *) lsecond(saop->args);
03032
03033 set_sa_opfuncid(saop);
03034 context->total.per_tuple += get_func_cost(saop->opfuncid) *
03035 cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
03036 }
03037 else if (IsA(node, Aggref) ||
03038 IsA(node, WindowFunc))
03039 {
03040
03041
03042
03043
03044
03045
03046
03047
03048
03049 return false;
03050 }
03051 else if (IsA(node, CoerceViaIO))
03052 {
03053 CoerceViaIO *iocoerce = (CoerceViaIO *) node;
03054 Oid iofunc;
03055 Oid typioparam;
03056 bool typisvarlena;
03057
03058
03059 getTypeInputInfo(iocoerce->resulttype,
03060 &iofunc, &typioparam);
03061 context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
03062
03063 getTypeOutputInfo(exprType((Node *) iocoerce->arg),
03064 &iofunc, &typisvarlena);
03065 context->total.per_tuple += get_func_cost(iofunc) * cpu_operator_cost;
03066 }
03067 else if (IsA(node, ArrayCoerceExpr))
03068 {
03069 ArrayCoerceExpr *acoerce = (ArrayCoerceExpr *) node;
03070 Node *arraynode = (Node *) acoerce->arg;
03071
03072 if (OidIsValid(acoerce->elemfuncid))
03073 context->total.per_tuple += get_func_cost(acoerce->elemfuncid) *
03074 cpu_operator_cost * estimate_array_length(arraynode);
03075 }
03076 else if (IsA(node, RowCompareExpr))
03077 {
03078
03079 RowCompareExpr *rcexpr = (RowCompareExpr *) node;
03080 ListCell *lc;
03081
03082 foreach(lc, rcexpr->opnos)
03083 {
03084 Oid opid = lfirst_oid(lc);
03085
03086 context->total.per_tuple += get_func_cost(get_opcode(opid)) *
03087 cpu_operator_cost;
03088 }
03089 }
03090 else if (IsA(node, CurrentOfExpr))
03091 {
03092
03093 context->total.startup += disable_cost;
03094 }
03095 else if (IsA(node, SubLink))
03096 {
03097
03098 elog(ERROR, "cannot handle unplanned sub-select");
03099 }
03100 else if (IsA(node, SubPlan))
03101 {
03102
03103
03104
03105
03106
03107
03108 SubPlan *subplan = (SubPlan *) node;
03109
03110 context->total.startup += subplan->startup_cost;
03111 context->total.per_tuple += subplan->per_call_cost;
03112
03113
03114
03115
03116
03117 return false;
03118 }
03119 else if (IsA(node, AlternativeSubPlan))
03120 {
03121
03122
03123
03124
03125
03126
03127 AlternativeSubPlan *asplan = (AlternativeSubPlan *) node;
03128
03129 return cost_qual_eval_walker((Node *) linitial(asplan->subplans),
03130 context);
03131 }
03132
03133
03134 return expression_tree_walker(node, cost_qual_eval_walker,
03135 (void *) context);
03136 }
03137
03138
03139
03140
03141
03142
03143
03144
03145
03146
03147
03148
03149
03150 static void
03151 get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
03152 ParamPathInfo *param_info,
03153 QualCost *qpqual_cost)
03154 {
03155 if (param_info)
03156 {
03157
03158 cost_qual_eval(qpqual_cost, param_info->ppi_clauses, root);
03159
03160 qpqual_cost->startup += baserel->baserestrictcost.startup;
03161 qpqual_cost->per_tuple += baserel->baserestrictcost.per_tuple;
03162 }
03163 else
03164 *qpqual_cost = baserel->baserestrictcost;
03165 }
03166
03167
03168
03169
03170
03171
03172
03173
03174
03175
03176
03177
03178
03179
03180
03181
03182
03183
03184
03185
03186
03187
03188
03189
03190 void
03191 compute_semi_anti_join_factors(PlannerInfo *root,
03192 RelOptInfo *outerrel,
03193 RelOptInfo *innerrel,
03194 JoinType jointype,
03195 SpecialJoinInfo *sjinfo,
03196 List *restrictlist,
03197 SemiAntiJoinFactors *semifactors)
03198 {
03199 Selectivity jselec;
03200 Selectivity nselec;
03201 Selectivity avgmatch;
03202 SpecialJoinInfo norm_sjinfo;
03203 List *joinquals;
03204 ListCell *l;
03205
03206
03207 Assert(jointype == JOIN_SEMI || jointype == JOIN_ANTI);
03208
03209
03210
03211
03212
03213
03214
03215 if (jointype == JOIN_ANTI)
03216 {
03217 joinquals = NIL;
03218 foreach(l, restrictlist)
03219 {
03220 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
03221
03222 Assert(IsA(rinfo, RestrictInfo));
03223 if (!rinfo->is_pushed_down)
03224 joinquals = lappend(joinquals, rinfo);
03225 }
03226 }
03227 else
03228 joinquals = restrictlist;
03229
03230
03231
03232
03233 jselec = clauselist_selectivity(root,
03234 joinquals,
03235 0,
03236 jointype,
03237 sjinfo);
03238
03239
03240
03241
03242 norm_sjinfo.type = T_SpecialJoinInfo;
03243 norm_sjinfo.min_lefthand = outerrel->relids;
03244 norm_sjinfo.min_righthand = innerrel->relids;
03245 norm_sjinfo.syn_lefthand = outerrel->relids;
03246 norm_sjinfo.syn_righthand = innerrel->relids;
03247 norm_sjinfo.jointype = JOIN_INNER;
03248
03249 norm_sjinfo.lhs_strict = false;
03250 norm_sjinfo.delay_upper_joins = false;
03251 norm_sjinfo.join_quals = NIL;
03252
03253 nselec = clauselist_selectivity(root,
03254 joinquals,
03255 0,
03256 JOIN_INNER,
03257 &norm_sjinfo);
03258
03259
03260 if (jointype == JOIN_ANTI)
03261 list_free(joinquals);
03262
03263
03264
03265
03266
03267
03268
03269
03270
03271
03272
03273
03274
03275 if (jselec > 0)
03276 {
03277 avgmatch = nselec * innerrel->rows / jselec;
03278
03279 avgmatch = Max(1.0, avgmatch);
03280 }
03281 else
03282 avgmatch = 1.0;
03283
03284 semifactors->outer_match_frac = jselec;
03285 semifactors->match_count = avgmatch;
03286 }
03287
03288
03289
03290
03291
03292
03293
03294
03295
03296
03297
03298 static bool
03299 has_indexed_join_quals(NestPath *joinpath)
03300 {
03301 Relids joinrelids = joinpath->path.parent->relids;
03302 Path *innerpath = joinpath->innerjoinpath;
03303 List *indexclauses;
03304 bool found_one;
03305 ListCell *lc;
03306
03307
03308 if (joinpath->joinrestrictinfo != NIL)
03309 return false;
03310
03311 if (innerpath->param_info == NULL)
03312 return false;
03313
03314
03315 switch (innerpath->pathtype)
03316 {
03317 case T_IndexScan:
03318 case T_IndexOnlyScan:
03319 indexclauses = ((IndexPath *) innerpath)->indexclauses;
03320 break;
03321 case T_BitmapHeapScan:
03322 {
03323
03324 Path *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual;
03325
03326 if (IsA(bmqual, IndexPath))
03327 indexclauses = ((IndexPath *) bmqual)->indexclauses;
03328 else
03329 return false;
03330 break;
03331 }
03332 default:
03333
03334
03335
03336
03337
03338
03339 return false;
03340 }
03341
03342
03343
03344
03345
03346
03347
03348 found_one = false;
03349 foreach(lc, innerpath->param_info->ppi_clauses)
03350 {
03351 RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
03352
03353 if (join_clause_is_movable_into(rinfo,
03354 innerpath->parent->relids,
03355 joinrelids))
03356 {
03357 if (!(list_member_ptr(indexclauses, rinfo) ||
03358 is_redundant_derived_clause(rinfo, indexclauses)))
03359 return false;
03360 found_one = true;
03361 }
03362 }
03363 return found_one;
03364 }
03365
03366
03367
03368
03369
03370
03371
03372
03373
03374
03375
03376
03377
03378
03379
03380
03381
03382
03383
03384
03385
03386
03387
03388
03389
03390
03391 static double
03392 approx_tuple_count(PlannerInfo *root, JoinPath *path, List *quals)
03393 {
03394 double tuples;
03395 double outer_tuples = path->outerjoinpath->rows;
03396 double inner_tuples = path->innerjoinpath->rows;
03397 SpecialJoinInfo sjinfo;
03398 Selectivity selec = 1.0;
03399 ListCell *l;
03400
03401
03402
03403
03404 sjinfo.type = T_SpecialJoinInfo;
03405 sjinfo.min_lefthand = path->outerjoinpath->parent->relids;
03406 sjinfo.min_righthand = path->innerjoinpath->parent->relids;
03407 sjinfo.syn_lefthand = path->outerjoinpath->parent->relids;
03408 sjinfo.syn_righthand = path->innerjoinpath->parent->relids;
03409 sjinfo.jointype = JOIN_INNER;
03410
03411 sjinfo.lhs_strict = false;
03412 sjinfo.delay_upper_joins = false;
03413 sjinfo.join_quals = NIL;
03414
03415
03416 foreach(l, quals)
03417 {
03418 Node *qual = (Node *) lfirst(l);
03419
03420
03421 selec *= clause_selectivity(root, qual, 0, JOIN_INNER, &sjinfo);
03422 }
03423
03424
03425 tuples = selec * outer_tuples * inner_tuples;
03426
03427 return clamp_row_est(tuples);
03428 }
03429
03430
03431
03432
03433
03434
03435
03436
03437
03438
03439
03440
03441
03442
03443
03444 void
03445 set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel)
03446 {
03447 double nrows;
03448
03449
03450 Assert(rel->relid > 0);
03451
03452 nrows = rel->tuples *
03453 clauselist_selectivity(root,
03454 rel->baserestrictinfo,
03455 0,
03456 JOIN_INNER,
03457 NULL);
03458
03459 rel->rows = clamp_row_est(nrows);
03460
03461 cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
03462
03463 set_rel_width(root, rel);
03464 }
03465
03466
03467
03468
03469
03470
03471
03472
03473
03474 double
03475 get_parameterized_baserel_size(PlannerInfo *root, RelOptInfo *rel,
03476 List *param_clauses)
03477 {
03478 List *allclauses;
03479 double nrows;
03480
03481
03482
03483
03484
03485
03486
03487 allclauses = list_concat(list_copy(param_clauses),
03488 rel->baserestrictinfo);
03489 nrows = rel->tuples *
03490 clauselist_selectivity(root,
03491 allclauses,
03492 rel->relid,
03493 JOIN_INNER,
03494 NULL);
03495 nrows = clamp_row_est(nrows);
03496
03497 if (nrows > rel->rows)
03498 nrows = rel->rows;
03499 return nrows;
03500 }
03501
03502
03503
03504
03505
03506
03507
03508
03509
03510
03511
03512
03513
03514
03515
03516
03517
03518
03519
03520
03521
03522
03523
03524 void
03525 set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
03526 RelOptInfo *outer_rel,
03527 RelOptInfo *inner_rel,
03528 SpecialJoinInfo *sjinfo,
03529 List *restrictlist)
03530 {
03531 rel->rows = calc_joinrel_size_estimate(root,
03532 outer_rel->rows,
03533 inner_rel->rows,
03534 sjinfo,
03535 restrictlist);
03536 }
03537
03538
03539
03540
03541
03542
03543
03544
03545
03546
03547
03548
03549
03550
03551
03552
03553 double
03554 get_parameterized_joinrel_size(PlannerInfo *root, RelOptInfo *rel,
03555 double outer_rows,
03556 double inner_rows,
03557 SpecialJoinInfo *sjinfo,
03558 List *restrict_clauses)
03559 {
03560 double nrows;
03561
03562
03563
03564
03565
03566
03567
03568
03569
03570
03571 nrows = calc_joinrel_size_estimate(root,
03572 outer_rows,
03573 inner_rows,
03574 sjinfo,
03575 restrict_clauses);
03576
03577 if (nrows > rel->rows)
03578 nrows = rel->rows;
03579 return nrows;
03580 }
03581
03582
03583
03584
03585
03586
03587 static double
03588 calc_joinrel_size_estimate(PlannerInfo *root,
03589 double outer_rows,
03590 double inner_rows,
03591 SpecialJoinInfo *sjinfo,
03592 List *restrictlist)
03593 {
03594 JoinType jointype = sjinfo->jointype;
03595 Selectivity jselec;
03596 Selectivity pselec;
03597 double nrows;
03598
03599
03600
03601
03602
03603
03604
03605
03606
03607
03608
03609 if (IS_OUTER_JOIN(jointype))
03610 {
03611 List *joinquals = NIL;
03612 List *pushedquals = NIL;
03613 ListCell *l;
03614
03615
03616 foreach(l, restrictlist)
03617 {
03618 RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
03619
03620 Assert(IsA(rinfo, RestrictInfo));
03621 if (rinfo->is_pushed_down)
03622 pushedquals = lappend(pushedquals, rinfo);
03623 else
03624 joinquals = lappend(joinquals, rinfo);
03625 }
03626
03627
03628 jselec = clauselist_selectivity(root,
03629 joinquals,
03630 0,
03631 jointype,
03632 sjinfo);
03633 pselec = clauselist_selectivity(root,
03634 pushedquals,
03635 0,
03636 jointype,
03637 sjinfo);
03638
03639
03640 list_free(joinquals);
03641 list_free(pushedquals);
03642 }
03643 else
03644 {
03645 jselec = clauselist_selectivity(root,
03646 restrictlist,
03647 0,
03648 jointype,
03649 sjinfo);
03650 pselec = 0.0;
03651 }
03652
03653
03654
03655
03656
03657
03658
03659
03660
03661
03662
03663
03664
03665 switch (jointype)
03666 {
03667 case JOIN_INNER:
03668 nrows = outer_rows * inner_rows * jselec;
03669 break;
03670 case JOIN_LEFT:
03671 nrows = outer_rows * inner_rows * jselec;
03672 if (nrows < outer_rows)
03673 nrows = outer_rows;
03674 nrows *= pselec;
03675 break;
03676 case JOIN_FULL:
03677 nrows = outer_rows * inner_rows * jselec;
03678 if (nrows < outer_rows)
03679 nrows = outer_rows;
03680 if (nrows < inner_rows)
03681 nrows = inner_rows;
03682 nrows *= pselec;
03683 break;
03684 case JOIN_SEMI:
03685 nrows = outer_rows * jselec;
03686
03687 break;
03688 case JOIN_ANTI:
03689 nrows = outer_rows * (1.0 - jselec);
03690 nrows *= pselec;
03691 break;
03692 default:
03693
03694 elog(ERROR, "unrecognized join type: %d", (int) jointype);
03695 nrows = 0;
03696 break;
03697 }
03698
03699 return clamp_row_est(nrows);
03700 }
03701
03702
03703
03704
03705
03706
03707
03708
03709
03710
03711
03712 void
03713 set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
03714 {
03715 PlannerInfo *subroot = rel->subroot;
03716 RangeTblEntry *rte PG_USED_FOR_ASSERTS_ONLY;
03717 ListCell *lc;
03718
03719
03720 Assert(rel->relid > 0);
03721 rte = planner_rt_fetch(rel->relid, root);
03722 Assert(rte->rtekind == RTE_SUBQUERY);
03723
03724
03725 rel->tuples = rel->subplan->plan_rows;
03726
03727
03728
03729
03730
03731
03732
03733 foreach(lc, subroot->parse->targetList)
03734 {
03735 TargetEntry *te = (TargetEntry *) lfirst(lc);
03736 Node *texpr = (Node *) te->expr;
03737 int32 item_width = 0;
03738
03739 Assert(IsA(te, TargetEntry));
03740
03741 if (te->resjunk)
03742 continue;
03743
03744
03745
03746
03747
03748
03749
03750 if (te->resno < rel->min_attr || te->resno > rel->max_attr)
03751 continue;
03752
03753
03754
03755
03756
03757
03758
03759
03760
03761
03762
03763
03764
03765
03766 if (IsA(texpr, Var) &&
03767 subroot->parse->setOperations == NULL)
03768 {
03769 Var *var = (Var *) texpr;
03770 RelOptInfo *subrel = find_base_rel(subroot, var->varno);
03771
03772 item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
03773 }
03774 rel->attr_widths[te->resno - rel->min_attr] = item_width;
03775 }
03776
03777
03778 set_baserel_size_estimates(root, rel);
03779 }
03780
03781
03782
03783
03784
03785
03786
03787
03788
03789
03790 void
03791 set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
03792 {
03793 RangeTblEntry *rte;
03794
03795
03796 Assert(rel->relid > 0);
03797 rte = planner_rt_fetch(rel->relid, root);
03798 Assert(rte->rtekind == RTE_FUNCTION);
03799
03800
03801 rel->tuples = expression_returns_set_rows(rte->funcexpr);
03802
03803
03804 set_baserel_size_estimates(root, rel);
03805 }
03806
03807
03808
03809
03810
03811
03812
03813
03814
03815
03816 void
03817 set_values_size_estimates(PlannerInfo *root, RelOptInfo *rel)
03818 {
03819 RangeTblEntry *rte;
03820
03821
03822 Assert(rel->relid > 0);
03823 rte = planner_rt_fetch(rel->relid, root);
03824 Assert(rte->rtekind == RTE_VALUES);
03825
03826
03827
03828
03829
03830
03831
03832 rel->tuples = list_length(rte->values_lists);
03833
03834
03835 set_baserel_size_estimates(root, rel);
03836 }
03837
03838
03839
03840
03841
03842
03843
03844
03845
03846
03847
03848 void
03849 set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, Plan *cteplan)
03850 {
03851 RangeTblEntry *rte;
03852
03853
03854 Assert(rel->relid > 0);
03855 rte = planner_rt_fetch(rel->relid, root);
03856 Assert(rte->rtekind == RTE_CTE);
03857
03858 if (rte->self_reference)
03859 {
03860
03861
03862
03863
03864 rel->tuples = 10 * cteplan->plan_rows;
03865 }
03866 else
03867 {
03868
03869 rel->tuples = cteplan->plan_rows;
03870 }
03871
03872
03873 set_baserel_size_estimates(root, rel);
03874 }
03875
03876
03877
03878
03879
03880
03881
03882
03883
03884
03885
03886
03887
03888
03889
03890
03891 void
03892 set_foreign_size_estimates(PlannerInfo *root, RelOptInfo *rel)
03893 {
03894
03895 Assert(rel->relid > 0);
03896
03897 rel->rows = 1000;
03898
03899 cost_qual_eval(&rel->baserestrictcost, rel->baserestrictinfo, root);
03900
03901 set_rel_width(root, rel);
03902 }
03903
03904
03905
03906
03907
03908
03909
03910
03911
03912
03913
03914
03915
03916
03917
03918
03919
03920
03921
03922
03923
03924 static void
03925 set_rel_width(PlannerInfo *root, RelOptInfo *rel)
03926 {
03927 Oid reloid = planner_rt_fetch(rel->relid, root)->relid;
03928 int32 tuple_width = 0;
03929 bool have_wholerow_var = false;
03930 ListCell *lc;
03931
03932 foreach(lc, rel->reltargetlist)
03933 {
03934 Node *node = (Node *) lfirst(lc);
03935
03936
03937
03938
03939
03940
03941
03942
03943 if (IsA(node, Var) &&
03944 ((Var *) node)->varno == rel->relid)
03945 {
03946 Var *var = (Var *) node;
03947 int ndx;
03948 int32 item_width;
03949
03950 Assert(var->varattno >= rel->min_attr);
03951 Assert(var->varattno <= rel->max_attr);
03952
03953 ndx = var->varattno - rel->min_attr;
03954
03955
03956
03957
03958
03959 if (var->varattno == 0)
03960 {
03961 have_wholerow_var = true;
03962 continue;
03963 }
03964
03965
03966
03967
03968
03969 if (rel->attr_widths[ndx] > 0)
03970 {
03971 tuple_width += rel->attr_widths[ndx];
03972 continue;
03973 }
03974
03975
03976 if (reloid != InvalidOid && var->varattno > 0)
03977 {
03978 item_width = get_attavgwidth(reloid, var->varattno);
03979 if (item_width > 0)
03980 {
03981 rel->attr_widths[ndx] = item_width;
03982 tuple_width += item_width;
03983 continue;
03984 }
03985 }
03986
03987
03988
03989
03990
03991 item_width = get_typavgwidth(var->vartype, var->vartypmod);
03992 Assert(item_width > 0);
03993 rel->attr_widths[ndx] = item_width;
03994 tuple_width += item_width;
03995 }
03996 else if (IsA(node, PlaceHolderVar))
03997 {
03998 PlaceHolderVar *phv = (PlaceHolderVar *) node;
03999 PlaceHolderInfo *phinfo = find_placeholder_info(root, phv, false);
04000
04001 tuple_width += phinfo->ph_width;
04002 }
04003 else
04004 {
04005
04006
04007
04008
04009
04010 int32 item_width;
04011
04012 item_width = get_typavgwidth(exprType(node), exprTypmod(node));
04013 Assert(item_width > 0);
04014 tuple_width += item_width;
04015 }
04016 }
04017
04018
04019
04020
04021
04022 if (have_wholerow_var)
04023 {
04024 int32 wholerow_width = sizeof(HeapTupleHeaderData);
04025
04026 if (reloid != InvalidOid)
04027 {
04028
04029 wholerow_width += get_relation_data_width(reloid,
04030 rel->attr_widths - rel->min_attr);
04031 }
04032 else
04033 {
04034
04035 AttrNumber i;
04036
04037 for (i = 1; i <= rel->max_attr; i++)
04038 wholerow_width += rel->attr_widths[i - rel->min_attr];
04039 }
04040
04041 rel->attr_widths[0 - rel->min_attr] = wholerow_width;
04042
04043
04044
04045
04046
04047 tuple_width += wholerow_width;
04048 }
04049
04050 Assert(tuple_width >= 0);
04051 rel->width = tuple_width;
04052 }
04053
04054
04055
04056
04057
04058
04059 static double
04060 relation_byte_size(double tuples, int width)
04061 {
04062 return tuples * (MAXALIGN(width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
04063 }
04064
04065
04066
04067
04068
04069
04070 static double
04071 page_size(double tuples, int width)
04072 {
04073 return ceil(relation_byte_size(tuples, width) / BLCKSZ);
04074 }