Header And Logo

PostgreSQL
| The world's most advanced open source database.

portalmem.c

Go to the documentation of this file.
00001 /*-------------------------------------------------------------------------
00002  *
00003  * portalmem.c
00004  *    backend portal memory management
00005  *
00006  * Portals are objects representing the execution state of a query.
00007  * This module provides memory management services for portals, but it
00008  * doesn't actually run the executor for them.
00009  *
00010  *
00011  * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
00012  * Portions Copyright (c) 1994, Regents of the University of California
00013  *
00014  * IDENTIFICATION
00015  *    src/backend/utils/mmgr/portalmem.c
00016  *
00017  *-------------------------------------------------------------------------
00018  */
00019 #include "postgres.h"
00020 
00021 #include "access/xact.h"
00022 #include "catalog/pg_type.h"
00023 #include "commands/portalcmds.h"
00024 #include "miscadmin.h"
00025 #include "utils/builtins.h"
00026 #include "utils/memutils.h"
00027 #include "utils/timestamp.h"
00028 
00029 /*
00030  * Estimate of the maximum number of open portals a user would have,
00031  * used in initially sizing the PortalHashTable in EnablePortalManager().
00032  * Since the hash table can expand, there's no need to make this overly
00033  * generous, and keeping it small avoids unnecessary overhead in the
00034  * hash_seq_search() calls executed during transaction end.
00035  */
00036 #define PORTALS_PER_USER       16
00037 
00038 
00039 /* ----------------
00040  *      Global state
00041  * ----------------
00042  */
00043 
00044 #define MAX_PORTALNAME_LEN      NAMEDATALEN
00045 
00046 typedef struct portalhashent
00047 {
00048     char        portalname[MAX_PORTALNAME_LEN];
00049     Portal      portal;
00050 } PortalHashEnt;
00051 
00052 static HTAB *PortalHashTable = NULL;
00053 
00054 #define PortalHashTableLookup(NAME, PORTAL) \
00055 do { \
00056     PortalHashEnt *hentry; \
00057     \
00058     hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
00059                                            (NAME), HASH_FIND, NULL); \
00060     if (hentry) \
00061         PORTAL = hentry->portal; \
00062     else \
00063         PORTAL = NULL; \
00064 } while(0)
00065 
00066 #define PortalHashTableInsert(PORTAL, NAME) \
00067 do { \
00068     PortalHashEnt *hentry; bool found; \
00069     \
00070     hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
00071                                            (NAME), HASH_ENTER, &found); \
00072     if (found) \
00073         elog(ERROR, "duplicate portal name"); \
00074     hentry->portal = PORTAL; \
00075     /* To avoid duplicate storage, make PORTAL->name point to htab entry */ \
00076     PORTAL->name = hentry->portalname; \
00077 } while(0)
00078 
00079 #define PortalHashTableDelete(PORTAL) \
00080 do { \
00081     PortalHashEnt *hentry; \
00082     \
00083     hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
00084                                            PORTAL->name, HASH_REMOVE, NULL); \
00085     if (hentry == NULL) \
00086         elog(WARNING, "trying to delete portal name that does not exist"); \
00087 } while(0)
00088 
00089 static MemoryContext PortalMemory = NULL;
00090 
00091 
00092 /* ----------------------------------------------------------------
00093  *                 public portal interface functions
00094  * ----------------------------------------------------------------
00095  */
00096 
00097 /*
00098  * EnablePortalManager
00099  *      Enables the portal management module at backend startup.
00100  */
00101 void
00102 EnablePortalManager(void)
00103 {
00104     HASHCTL     ctl;
00105 
00106     Assert(PortalMemory == NULL);
00107 
00108     PortalMemory = AllocSetContextCreate(TopMemoryContext,
00109                                          "PortalMemory",
00110                                          ALLOCSET_DEFAULT_MINSIZE,
00111                                          ALLOCSET_DEFAULT_INITSIZE,
00112                                          ALLOCSET_DEFAULT_MAXSIZE);
00113 
00114     ctl.keysize = MAX_PORTALNAME_LEN;
00115     ctl.entrysize = sizeof(PortalHashEnt);
00116 
00117     /*
00118      * use PORTALS_PER_USER as a guess of how many hash table entries to
00119      * create, initially
00120      */
00121     PortalHashTable = hash_create("Portal hash", PORTALS_PER_USER,
00122                                   &ctl, HASH_ELEM);
00123 }
00124 
00125 /*
00126  * GetPortalByName
00127  *      Returns a portal given a portal name, or NULL if name not found.
00128  */
00129 Portal
00130 GetPortalByName(const char *name)
00131 {
00132     Portal      portal;
00133 
00134     if (PointerIsValid(name))
00135         PortalHashTableLookup(name, portal);
00136     else
00137         portal = NULL;
00138 
00139     return portal;
00140 }
00141 
00142 /*
00143  * PortalListGetPrimaryStmt
00144  *      Get the "primary" stmt within a portal, ie, the one marked canSetTag.
00145  *
00146  * Returns NULL if no such stmt.  If multiple PlannedStmt structs within the
00147  * portal are marked canSetTag, returns the first one.  Neither of these
00148  * cases should occur in present usages of this function.
00149  *
00150  * Copes if given a list of Querys --- can't happen in a portal, but this
00151  * code also supports plancache.c, which needs both cases.
00152  *
00153  * Note: the reason this is just handed a List is so that plancache.c
00154  * can share the code.  For use with a portal, use PortalGetPrimaryStmt
00155  * rather than calling this directly.
00156  */
00157 Node *
00158 PortalListGetPrimaryStmt(List *stmts)
00159 {
00160     ListCell   *lc;
00161 
00162     foreach(lc, stmts)
00163     {
00164         Node       *stmt = (Node *) lfirst(lc);
00165 
00166         if (IsA(stmt, PlannedStmt))
00167         {
00168             if (((PlannedStmt *) stmt)->canSetTag)
00169                 return stmt;
00170         }
00171         else if (IsA(stmt, Query))
00172         {
00173             if (((Query *) stmt)->canSetTag)
00174                 return stmt;
00175         }
00176         else
00177         {
00178             /* Utility stmts are assumed canSetTag if they're the only stmt */
00179             if (list_length(stmts) == 1)
00180                 return stmt;
00181         }
00182     }
00183     return NULL;
00184 }
00185 
00186 /*
00187  * CreatePortal
00188  *      Returns a new portal given a name.
00189  *
00190  * allowDup: if true, automatically drop any pre-existing portal of the
00191  * same name (if false, an error is raised).
00192  *
00193  * dupSilent: if true, don't even emit a WARNING.
00194  */
00195 Portal
00196 CreatePortal(const char *name, bool allowDup, bool dupSilent)
00197 {
00198     Portal      portal;
00199 
00200     AssertArg(PointerIsValid(name));
00201 
00202     portal = GetPortalByName(name);
00203     if (PortalIsValid(portal))
00204     {
00205         if (!allowDup)
00206             ereport(ERROR,
00207                     (errcode(ERRCODE_DUPLICATE_CURSOR),
00208                      errmsg("cursor \"%s\" already exists", name)));
00209         if (!dupSilent)
00210             ereport(WARNING,
00211                     (errcode(ERRCODE_DUPLICATE_CURSOR),
00212                      errmsg("closing existing cursor \"%s\"",
00213                             name)));
00214         PortalDrop(portal, false);
00215     }
00216 
00217     /* make new portal structure */
00218     portal = (Portal) MemoryContextAllocZero(PortalMemory, sizeof *portal);
00219 
00220     /* initialize portal heap context; typically it won't store much */
00221     portal->heap = AllocSetContextCreate(PortalMemory,
00222                                          "PortalHeapMemory",
00223                                          ALLOCSET_SMALL_MINSIZE,
00224                                          ALLOCSET_SMALL_INITSIZE,
00225                                          ALLOCSET_SMALL_MAXSIZE);
00226 
00227     /* create a resource owner for the portal */
00228     portal->resowner = ResourceOwnerCreate(CurTransactionResourceOwner,
00229                                            "Portal");
00230 
00231     /* initialize portal fields that don't start off zero */
00232     portal->status = PORTAL_NEW;
00233     portal->cleanup = PortalCleanup;
00234     portal->createSubid = GetCurrentSubTransactionId();
00235     portal->strategy = PORTAL_MULTI_QUERY;
00236     portal->cursorOptions = CURSOR_OPT_NO_SCROLL;
00237     portal->atStart = true;
00238     portal->atEnd = true;       /* disallow fetches until query is set */
00239     portal->visible = true;
00240     portal->creation_time = GetCurrentStatementStartTimestamp();
00241 
00242     /* put portal in table (sets portal->name) */
00243     PortalHashTableInsert(portal, name);
00244 
00245     return portal;
00246 }
00247 
00248 /*
00249  * CreateNewPortal
00250  *      Create a new portal, assigning it a random nonconflicting name.
00251  */
00252 Portal
00253 CreateNewPortal(void)
00254 {
00255     static unsigned int unnamed_portal_count = 0;
00256 
00257     char        portalname[MAX_PORTALNAME_LEN];
00258 
00259     /* Select a nonconflicting name */
00260     for (;;)
00261     {
00262         unnamed_portal_count++;
00263         sprintf(portalname, "<unnamed portal %u>", unnamed_portal_count);
00264         if (GetPortalByName(portalname) == NULL)
00265             break;
00266     }
00267 
00268     return CreatePortal(portalname, false, false);
00269 }
00270 
00271 /*
00272  * PortalDefineQuery
00273  *      A simple subroutine to establish a portal's query.
00274  *
00275  * Notes: as of PG 8.4, caller MUST supply a sourceText string; it is not
00276  * allowed anymore to pass NULL.  (If you really don't have source text,
00277  * you can pass a constant string, perhaps "(query not available)".)
00278  *
00279  * commandTag shall be NULL if and only if the original query string
00280  * (before rewriting) was an empty string.  Also, the passed commandTag must
00281  * be a pointer to a constant string, since it is not copied.
00282  *
00283  * If cplan is provided, then it is a cached plan containing the stmts, and
00284  * the caller must have done GetCachedPlan(), causing a refcount increment.
00285  * The refcount will be released when the portal is destroyed.
00286  *
00287  * If cplan is NULL, then it is the caller's responsibility to ensure that
00288  * the passed plan trees have adequate lifetime.  Typically this is done by
00289  * copying them into the portal's heap context.
00290  *
00291  * The caller is also responsible for ensuring that the passed prepStmtName
00292  * (if not NULL) and sourceText have adequate lifetime.
00293  *
00294  * NB: this function mustn't do much beyond storing the passed values; in
00295  * particular don't do anything that risks elog(ERROR).  If that were to
00296  * happen here before storing the cplan reference, we'd leak the plancache
00297  * refcount that the caller is trying to hand off to us.
00298  */
00299 void
00300 PortalDefineQuery(Portal portal,
00301                   const char *prepStmtName,
00302                   const char *sourceText,
00303                   const char *commandTag,
00304                   List *stmts,
00305                   CachedPlan *cplan)
00306 {
00307     AssertArg(PortalIsValid(portal));
00308     AssertState(portal->status == PORTAL_NEW);
00309 
00310     AssertArg(sourceText != NULL);
00311     AssertArg(commandTag != NULL || stmts == NIL);
00312 
00313     portal->prepStmtName = prepStmtName;
00314     portal->sourceText = sourceText;
00315     portal->commandTag = commandTag;
00316     portal->stmts = stmts;
00317     portal->cplan = cplan;
00318     portal->status = PORTAL_DEFINED;
00319 }
00320 
00321 /*
00322  * PortalReleaseCachedPlan
00323  *      Release a portal's reference to its cached plan, if any.
00324  */
00325 static void
00326 PortalReleaseCachedPlan(Portal portal)
00327 {
00328     if (portal->cplan)
00329     {
00330         ReleaseCachedPlan(portal->cplan, false);
00331         portal->cplan = NULL;
00332 
00333         /*
00334          * We must also clear portal->stmts which is now a dangling reference
00335          * to the cached plan's plan list.  This protects any code that might
00336          * try to examine the Portal later.
00337          */
00338         portal->stmts = NIL;
00339     }
00340 }
00341 
00342 /*
00343  * PortalCreateHoldStore
00344  *      Create the tuplestore for a portal.
00345  */
00346 void
00347 PortalCreateHoldStore(Portal portal)
00348 {
00349     MemoryContext oldcxt;
00350 
00351     Assert(portal->holdContext == NULL);
00352     Assert(portal->holdStore == NULL);
00353 
00354     /*
00355      * Create the memory context that is used for storage of the tuple set.
00356      * Note this is NOT a child of the portal's heap memory.
00357      */
00358     portal->holdContext =
00359         AllocSetContextCreate(PortalMemory,
00360                               "PortalHoldContext",
00361                               ALLOCSET_DEFAULT_MINSIZE,
00362                               ALLOCSET_DEFAULT_INITSIZE,
00363                               ALLOCSET_DEFAULT_MAXSIZE);
00364 
00365     /*
00366      * Create the tuple store, selecting cross-transaction temp files, and
00367      * enabling random access only if cursor requires scrolling.
00368      *
00369      * XXX: Should maintenance_work_mem be used for the portal size?
00370      */
00371     oldcxt = MemoryContextSwitchTo(portal->holdContext);
00372 
00373     portal->holdStore =
00374         tuplestore_begin_heap(portal->cursorOptions & CURSOR_OPT_SCROLL,
00375                               true, work_mem);
00376 
00377     MemoryContextSwitchTo(oldcxt);
00378 }
00379 
00380 /*
00381  * PinPortal
00382  *      Protect a portal from dropping.
00383  *
00384  * A pinned portal is still unpinned and dropped at transaction or
00385  * subtransaction abort.
00386  */
00387 void
00388 PinPortal(Portal portal)
00389 {
00390     if (portal->portalPinned)
00391         elog(ERROR, "portal already pinned");
00392 
00393     portal->portalPinned = true;
00394 }
00395 
00396 void
00397 UnpinPortal(Portal portal)
00398 {
00399     if (!portal->portalPinned)
00400         elog(ERROR, "portal not pinned");
00401 
00402     portal->portalPinned = false;
00403 }
00404 
00405 /*
00406  * MarkPortalDone
00407  *      Transition a portal from ACTIVE to DONE state.
00408  *
00409  * NOTE: never set portal->status = PORTAL_DONE directly; call this instead.
00410  */
00411 void
00412 MarkPortalDone(Portal portal)
00413 {
00414     /* Perform the state transition */
00415     Assert(portal->status == PORTAL_ACTIVE);
00416     portal->status = PORTAL_DONE;
00417 
00418     /*
00419      * Allow portalcmds.c to clean up the state it knows about.  We might as
00420      * well do that now, since the portal can't be executed any more.
00421      *
00422      * In some cases involving execution of a ROLLBACK command in an already
00423      * aborted transaction, this prevents an assertion failure caused by
00424      * reaching AtCleanup_Portals with the cleanup hook still unexecuted.
00425      */
00426     if (PointerIsValid(portal->cleanup))
00427     {
00428         (*portal->cleanup) (portal);
00429         portal->cleanup = NULL;
00430     }
00431 }
00432 
00433 /*
00434  * MarkPortalFailed
00435  *      Transition a portal into FAILED state.
00436  *
00437  * NOTE: never set portal->status = PORTAL_FAILED directly; call this instead.
00438  */
00439 void
00440 MarkPortalFailed(Portal portal)
00441 {
00442     /* Perform the state transition */
00443     Assert(portal->status != PORTAL_DONE);
00444     portal->status = PORTAL_FAILED;
00445 
00446     /*
00447      * Allow portalcmds.c to clean up the state it knows about.  We might as
00448      * well do that now, since the portal can't be executed any more.
00449      *
00450      * In some cases involving cleanup of an already aborted transaction, this
00451      * prevents an assertion failure caused by reaching AtCleanup_Portals with
00452      * the cleanup hook still unexecuted.
00453      */
00454     if (PointerIsValid(portal->cleanup))
00455     {
00456         (*portal->cleanup) (portal);
00457         portal->cleanup = NULL;
00458     }
00459 }
00460 
00461 /*
00462  * PortalDrop
00463  *      Destroy the portal.
00464  */
00465 void
00466 PortalDrop(Portal portal, bool isTopCommit)
00467 {
00468     AssertArg(PortalIsValid(portal));
00469 
00470     /*
00471      * Don't allow dropping a pinned portal, it's still needed by whoever
00472      * pinned it. Not sure if the PORTAL_ACTIVE case can validly happen or
00473      * not...
00474      */
00475     if (portal->portalPinned ||
00476         portal->status == PORTAL_ACTIVE)
00477         ereport(ERROR,
00478                 (errcode(ERRCODE_INVALID_CURSOR_STATE),
00479                  errmsg("cannot drop active portal \"%s\"", portal->name)));
00480 
00481     /*
00482      * Allow portalcmds.c to clean up the state it knows about, in particular
00483      * shutting down the executor if still active.  This step potentially runs
00484      * user-defined code so failure has to be expected.  It's the cleanup
00485      * hook's responsibility to not try to do that more than once, in the case
00486      * that failure occurs and then we come back to drop the portal again
00487      * during transaction abort.
00488      *
00489      * Note: in most paths of control, this will have been done already in
00490      * MarkPortalDone or MarkPortalFailed.  We're just making sure.
00491      */
00492     if (PointerIsValid(portal->cleanup))
00493     {
00494         (*portal->cleanup) (portal);
00495         portal->cleanup = NULL;
00496     }
00497 
00498     /*
00499      * Remove portal from hash table.  Because we do this here, we will not
00500      * come back to try to remove the portal again if there's any error in the
00501      * subsequent steps.  Better to leak a little memory than to get into an
00502      * infinite error-recovery loop.
00503      */
00504     PortalHashTableDelete(portal);
00505 
00506     /* drop cached plan reference, if any */
00507     PortalReleaseCachedPlan(portal);
00508 
00509     /*
00510      * Release any resources still attached to the portal.  There are several
00511      * cases being covered here:
00512      *
00513      * Top transaction commit (indicated by isTopCommit): normally we should
00514      * do nothing here and let the regular end-of-transaction resource
00515      * releasing mechanism handle these resources too.  However, if we have a
00516      * FAILED portal (eg, a cursor that got an error), we'd better clean up
00517      * its resources to avoid resource-leakage warning messages.
00518      *
00519      * Sub transaction commit: never comes here at all, since we don't kill
00520      * any portals in AtSubCommit_Portals().
00521      *
00522      * Main or sub transaction abort: we will do nothing here because
00523      * portal->resowner was already set NULL; the resources were already
00524      * cleaned up in transaction abort.
00525      *
00526      * Ordinary portal drop: must release resources.  However, if the portal
00527      * is not FAILED then we do not release its locks.  The locks become the
00528      * responsibility of the transaction's ResourceOwner (since it is the
00529      * parent of the portal's owner) and will be released when the transaction
00530      * eventually ends.
00531      */
00532     if (portal->resowner &&
00533         (!isTopCommit || portal->status == PORTAL_FAILED))
00534     {
00535         bool        isCommit = (portal->status != PORTAL_FAILED);
00536 
00537         ResourceOwnerRelease(portal->resowner,
00538                              RESOURCE_RELEASE_BEFORE_LOCKS,
00539                              isCommit, false);
00540         ResourceOwnerRelease(portal->resowner,
00541                              RESOURCE_RELEASE_LOCKS,
00542                              isCommit, false);
00543         ResourceOwnerRelease(portal->resowner,
00544                              RESOURCE_RELEASE_AFTER_LOCKS,
00545                              isCommit, false);
00546         ResourceOwnerDelete(portal->resowner);
00547     }
00548     portal->resowner = NULL;
00549 
00550     /*
00551      * Delete tuplestore if present.  We should do this even under error
00552      * conditions; since the tuplestore would have been using cross-
00553      * transaction storage, its temp files need to be explicitly deleted.
00554      */
00555     if (portal->holdStore)
00556     {
00557         MemoryContext oldcontext;
00558 
00559         oldcontext = MemoryContextSwitchTo(portal->holdContext);
00560         tuplestore_end(portal->holdStore);
00561         MemoryContextSwitchTo(oldcontext);
00562         portal->holdStore = NULL;
00563     }
00564 
00565     /* delete tuplestore storage, if any */
00566     if (portal->holdContext)
00567         MemoryContextDelete(portal->holdContext);
00568 
00569     /* release subsidiary storage */
00570     MemoryContextDelete(PortalGetHeapMemory(portal));
00571 
00572     /* release portal struct (it's in PortalMemory) */
00573     pfree(portal);
00574 }
00575 
00576 /*
00577  * Delete all declared cursors.
00578  *
00579  * Used by commands: CLOSE ALL, DISCARD ALL
00580  */
00581 void
00582 PortalHashTableDeleteAll(void)
00583 {
00584     HASH_SEQ_STATUS status;
00585     PortalHashEnt *hentry;
00586 
00587     if (PortalHashTable == NULL)
00588         return;
00589 
00590     hash_seq_init(&status, PortalHashTable);
00591     while ((hentry = hash_seq_search(&status)) != NULL)
00592     {
00593         Portal      portal = hentry->portal;
00594 
00595         /* Can't close the active portal (the one running the command) */
00596         if (portal->status == PORTAL_ACTIVE)
00597             continue;
00598 
00599         PortalDrop(portal, false);
00600 
00601         /* Restart the iteration in case that led to other drops */
00602         hash_seq_term(&status);
00603         hash_seq_init(&status, PortalHashTable);
00604     }
00605 }
00606 
00607 
00608 /*
00609  * Pre-commit processing for portals.
00610  *
00611  * Holdable cursors created in this transaction need to be converted to
00612  * materialized form, since we are going to close down the executor and
00613  * release locks.  Non-holdable portals created in this transaction are
00614  * simply removed.  Portals remaining from prior transactions should be
00615  * left untouched.
00616  *
00617  * Returns TRUE if any portals changed state (possibly causing user-defined
00618  * code to be run), FALSE if not.
00619  */
00620 bool
00621 PreCommit_Portals(bool isPrepare)
00622 {
00623     bool        result = false;
00624     HASH_SEQ_STATUS status;
00625     PortalHashEnt *hentry;
00626 
00627     hash_seq_init(&status, PortalHashTable);
00628 
00629     while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
00630     {
00631         Portal      portal = hentry->portal;
00632 
00633         /*
00634          * There should be no pinned portals anymore. Complain if someone
00635          * leaked one.
00636          */
00637         if (portal->portalPinned)
00638             elog(ERROR, "cannot commit while a portal is pinned");
00639 
00640         /*
00641          * Do not touch active portals --- this can only happen in the case of
00642          * a multi-transaction utility command, such as VACUUM.
00643          *
00644          * Note however that any resource owner attached to such a portal is
00645          * still going to go away, so don't leave a dangling pointer.
00646          */
00647         if (portal->status == PORTAL_ACTIVE)
00648         {
00649             portal->resowner = NULL;
00650             continue;
00651         }
00652 
00653         /* Is it a holdable portal created in the current xact? */
00654         if ((portal->cursorOptions & CURSOR_OPT_HOLD) &&
00655             portal->createSubid != InvalidSubTransactionId &&
00656             portal->status == PORTAL_READY)
00657         {
00658             /*
00659              * We are exiting the transaction that created a holdable cursor.
00660              * Instead of dropping the portal, prepare it for access by later
00661              * transactions.
00662              *
00663              * However, if this is PREPARE TRANSACTION rather than COMMIT,
00664              * refuse PREPARE, because the semantics seem pretty unclear.
00665              */
00666             if (isPrepare)
00667                 ereport(ERROR,
00668                         (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
00669                          errmsg("cannot PREPARE a transaction that has created a cursor WITH HOLD")));
00670 
00671             /*
00672              * Note that PersistHoldablePortal() must release all resources
00673              * used by the portal that are local to the creating transaction.
00674              */
00675             PortalCreateHoldStore(portal);
00676             PersistHoldablePortal(portal);
00677 
00678             /* drop cached plan reference, if any */
00679             PortalReleaseCachedPlan(portal);
00680 
00681             /*
00682              * Any resources belonging to the portal will be released in the
00683              * upcoming transaction-wide cleanup; the portal will no longer
00684              * have its own resources.
00685              */
00686             portal->resowner = NULL;
00687 
00688             /*
00689              * Having successfully exported the holdable cursor, mark it as
00690              * not belonging to this transaction.
00691              */
00692             portal->createSubid = InvalidSubTransactionId;
00693 
00694             /* Report we changed state */
00695             result = true;
00696         }
00697         else if (portal->createSubid == InvalidSubTransactionId)
00698         {
00699             /*
00700              * Do nothing to cursors held over from a previous transaction
00701              * (including ones we just froze in a previous cycle of this loop)
00702              */
00703             continue;
00704         }
00705         else
00706         {
00707             /* Zap all non-holdable portals */
00708             PortalDrop(portal, true);
00709 
00710             /* Report we changed state */
00711             result = true;
00712         }
00713 
00714         /*
00715          * After either freezing or dropping a portal, we have to restart the
00716          * iteration, because we could have invoked user-defined code that
00717          * caused a drop of the next portal in the hash chain.
00718          */
00719         hash_seq_term(&status);
00720         hash_seq_init(&status, PortalHashTable);
00721     }
00722 
00723     return result;
00724 }
00725 
00726 /*
00727  * Abort processing for portals.
00728  *
00729  * At this point we reset "active" status and run the cleanup hook if
00730  * present, but we can't release the portal's memory until the cleanup call.
00731  *
00732  * The reason we need to reset active is so that we can replace the unnamed
00733  * portal, else we'll fail to execute ROLLBACK when it arrives.
00734  */
00735 void
00736 AtAbort_Portals(void)
00737 {
00738     HASH_SEQ_STATUS status;
00739     PortalHashEnt *hentry;
00740 
00741     hash_seq_init(&status, PortalHashTable);
00742 
00743     while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
00744     {
00745         Portal      portal = hentry->portal;
00746 
00747         /* Any portal that was actually running has to be considered broken */
00748         if (portal->status == PORTAL_ACTIVE)
00749             MarkPortalFailed(portal);
00750 
00751         /*
00752          * Do nothing else to cursors held over from a previous transaction.
00753          */
00754         if (portal->createSubid == InvalidSubTransactionId)
00755             continue;
00756 
00757         /*
00758          * If it was created in the current transaction, we can't do normal
00759          * shutdown on a READY portal either; it might refer to objects
00760          * created in the failed transaction.  See comments in
00761          * AtSubAbort_Portals.
00762          */
00763         if (portal->status == PORTAL_READY)
00764             MarkPortalFailed(portal);
00765 
00766         /*
00767          * Allow portalcmds.c to clean up the state it knows about, if we
00768          * haven't already.
00769          */
00770         if (PointerIsValid(portal->cleanup))
00771         {
00772             (*portal->cleanup) (portal);
00773             portal->cleanup = NULL;
00774         }
00775 
00776         /* drop cached plan reference, if any */
00777         PortalReleaseCachedPlan(portal);
00778 
00779         /*
00780          * Any resources belonging to the portal will be released in the
00781          * upcoming transaction-wide cleanup; they will be gone before we run
00782          * PortalDrop.
00783          */
00784         portal->resowner = NULL;
00785 
00786         /*
00787          * Although we can't delete the portal data structure proper, we can
00788          * release any memory in subsidiary contexts, such as executor state.
00789          * The cleanup hook was the last thing that might have needed data
00790          * there.
00791          */
00792         MemoryContextDeleteChildren(PortalGetHeapMemory(portal));
00793     }
00794 }
00795 
00796 /*
00797  * Post-abort cleanup for portals.
00798  *
00799  * Delete all portals not held over from prior transactions.  */
00800 void
00801 AtCleanup_Portals(void)
00802 {
00803     HASH_SEQ_STATUS status;
00804     PortalHashEnt *hentry;
00805 
00806     hash_seq_init(&status, PortalHashTable);
00807 
00808     while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
00809     {
00810         Portal      portal = hentry->portal;
00811 
00812         /* Do nothing to cursors held over from a previous transaction */
00813         if (portal->createSubid == InvalidSubTransactionId)
00814         {
00815             Assert(portal->status != PORTAL_ACTIVE);
00816             Assert(portal->resowner == NULL);
00817             continue;
00818         }
00819 
00820         /*
00821          * If a portal is still pinned, forcibly unpin it. PortalDrop will not
00822          * let us drop the portal otherwise. Whoever pinned the portal was
00823          * interrupted by the abort too and won't try to use it anymore.
00824          */
00825         if (portal->portalPinned)
00826             portal->portalPinned = false;
00827 
00828         /* We had better not be calling any user-defined code here */
00829         Assert(portal->cleanup == NULL);
00830 
00831         /* Zap it. */
00832         PortalDrop(portal, false);
00833     }
00834 }
00835 
00836 /*
00837  * Pre-subcommit processing for portals.
00838  *
00839  * Reassign the portals created in the current subtransaction to the parent
00840  * subtransaction.
00841  */
00842 void
00843 AtSubCommit_Portals(SubTransactionId mySubid,
00844                     SubTransactionId parentSubid,
00845                     ResourceOwner parentXactOwner)
00846 {
00847     HASH_SEQ_STATUS status;
00848     PortalHashEnt *hentry;
00849 
00850     hash_seq_init(&status, PortalHashTable);
00851 
00852     while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
00853     {
00854         Portal      portal = hentry->portal;
00855 
00856         if (portal->createSubid == mySubid)
00857         {
00858             portal->createSubid = parentSubid;
00859             if (portal->resowner)
00860                 ResourceOwnerNewParent(portal->resowner, parentXactOwner);
00861         }
00862     }
00863 }
00864 
00865 /*
00866  * Subtransaction abort handling for portals.
00867  *
00868  * Deactivate portals created during the failed subtransaction.
00869  * Note that per AtSubCommit_Portals, this will catch portals created
00870  * in descendants of the subtransaction too.
00871  *
00872  * We don't destroy any portals here; that's done in AtSubCleanup_Portals.
00873  */
00874 void
00875 AtSubAbort_Portals(SubTransactionId mySubid,
00876                    SubTransactionId parentSubid,
00877                    ResourceOwner parentXactOwner)
00878 {
00879     HASH_SEQ_STATUS status;
00880     PortalHashEnt *hentry;
00881 
00882     hash_seq_init(&status, PortalHashTable);
00883 
00884     while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
00885     {
00886         Portal      portal = hentry->portal;
00887 
00888         if (portal->createSubid != mySubid)
00889             continue;
00890 
00891         /*
00892          * Force any live portals of my own subtransaction into FAILED state.
00893          * We have to do this because they might refer to objects created or
00894          * changed in the failed subtransaction, leading to crashes if
00895          * execution is resumed, or even if we just try to run ExecutorEnd.
00896          * (Note we do NOT do this to upper-level portals, since they cannot
00897          * have such references and hence may be able to continue.)
00898          */
00899         if (portal->status == PORTAL_READY ||
00900             portal->status == PORTAL_ACTIVE)
00901             MarkPortalFailed(portal);
00902 
00903         /*
00904          * Allow portalcmds.c to clean up the state it knows about, if we
00905          * haven't already.
00906          */
00907         if (PointerIsValid(portal->cleanup))
00908         {
00909             (*portal->cleanup) (portal);
00910             portal->cleanup = NULL;
00911         }
00912 
00913         /* drop cached plan reference, if any */
00914         PortalReleaseCachedPlan(portal);
00915 
00916         /*
00917          * Any resources belonging to the portal will be released in the
00918          * upcoming transaction-wide cleanup; they will be gone before we run
00919          * PortalDrop.
00920          */
00921         portal->resowner = NULL;
00922 
00923         /*
00924          * Although we can't delete the portal data structure proper, we can
00925          * release any memory in subsidiary contexts, such as executor state.
00926          * The cleanup hook was the last thing that might have needed data
00927          * there.
00928          */
00929         MemoryContextDeleteChildren(PortalGetHeapMemory(portal));
00930     }
00931 }
00932 
00933 /*
00934  * Post-subabort cleanup for portals.
00935  *
00936  * Drop all portals created in the failed subtransaction (but note that
00937  * we will not drop any that were reassigned to the parent above).
00938  */
00939 void
00940 AtSubCleanup_Portals(SubTransactionId mySubid)
00941 {
00942     HASH_SEQ_STATUS status;
00943     PortalHashEnt *hentry;
00944 
00945     hash_seq_init(&status, PortalHashTable);
00946 
00947     while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
00948     {
00949         Portal      portal = hentry->portal;
00950 
00951         if (portal->createSubid != mySubid)
00952             continue;
00953 
00954         /*
00955          * If a portal is still pinned, forcibly unpin it. PortalDrop will not
00956          * let us drop the portal otherwise. Whoever pinned the portal was
00957          * interrupted by the abort too and won't try to use it anymore.
00958          */
00959         if (portal->portalPinned)
00960             portal->portalPinned = false;
00961 
00962         /* We had better not be calling any user-defined code here */
00963         Assert(portal->cleanup == NULL);
00964 
00965         /* Zap it. */
00966         PortalDrop(portal, false);
00967     }
00968 }
00969 
00970 /* Find all available cursors */
00971 Datum
00972 pg_cursor(PG_FUNCTION_ARGS)
00973 {
00974     ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
00975     TupleDesc   tupdesc;
00976     Tuplestorestate *tupstore;
00977     MemoryContext per_query_ctx;
00978     MemoryContext oldcontext;
00979     HASH_SEQ_STATUS hash_seq;
00980     PortalHashEnt *hentry;
00981 
00982     /* check to see if caller supports us returning a tuplestore */
00983     if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
00984         ereport(ERROR,
00985                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
00986                  errmsg("set-valued function called in context that cannot accept a set")));
00987     if (!(rsinfo->allowedModes & SFRM_Materialize))
00988         ereport(ERROR,
00989                 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
00990                  errmsg("materialize mode required, but it is not " \
00991                         "allowed in this context")));
00992 
00993     /* need to build tuplestore in query context */
00994     per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
00995     oldcontext = MemoryContextSwitchTo(per_query_ctx);
00996 
00997     /*
00998      * build tupdesc for result tuples. This must match the definition of the
00999      * pg_cursors view in system_views.sql
01000      */
01001     tupdesc = CreateTemplateTupleDesc(6, false);
01002     TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
01003                        TEXTOID, -1, 0);
01004     TupleDescInitEntry(tupdesc, (AttrNumber) 2, "statement",
01005                        TEXTOID, -1, 0);
01006     TupleDescInitEntry(tupdesc, (AttrNumber) 3, "is_holdable",
01007                        BOOLOID, -1, 0);
01008     TupleDescInitEntry(tupdesc, (AttrNumber) 4, "is_binary",
01009                        BOOLOID, -1, 0);
01010     TupleDescInitEntry(tupdesc, (AttrNumber) 5, "is_scrollable",
01011                        BOOLOID, -1, 0);
01012     TupleDescInitEntry(tupdesc, (AttrNumber) 6, "creation_time",
01013                        TIMESTAMPTZOID, -1, 0);
01014 
01015     /*
01016      * We put all the tuples into a tuplestore in one scan of the hashtable.
01017      * This avoids any issue of the hashtable possibly changing between calls.
01018      */
01019     tupstore =
01020         tuplestore_begin_heap(rsinfo->allowedModes & SFRM_Materialize_Random,
01021                               false, work_mem);
01022 
01023     /* generate junk in short-term context */
01024     MemoryContextSwitchTo(oldcontext);
01025 
01026     hash_seq_init(&hash_seq, PortalHashTable);
01027     while ((hentry = hash_seq_search(&hash_seq)) != NULL)
01028     {
01029         Portal      portal = hentry->portal;
01030         Datum       values[6];
01031         bool        nulls[6];
01032 
01033         /* report only "visible" entries */
01034         if (!portal->visible)
01035             continue;
01036 
01037         MemSet(nulls, 0, sizeof(nulls));
01038 
01039         values[0] = CStringGetTextDatum(portal->name);
01040         values[1] = CStringGetTextDatum(portal->sourceText);
01041         values[2] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_HOLD);
01042         values[3] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_BINARY);
01043         values[4] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_SCROLL);
01044         values[5] = TimestampTzGetDatum(portal->creation_time);
01045 
01046         tuplestore_putvalues(tupstore, tupdesc, values, nulls);
01047     }
01048 
01049     /* clean up and return the tuplestore */
01050     tuplestore_donestoring(tupstore);
01051 
01052     rsinfo->returnMode = SFRM_Materialize;
01053     rsinfo->setResult = tupstore;
01054     rsinfo->setDesc = tupdesc;
01055 
01056     return (Datum) 0;
01057 }
01058 
01059 bool
01060 ThereAreNoReadyPortals(void)
01061 {
01062     HASH_SEQ_STATUS status;
01063     PortalHashEnt *hentry;
01064 
01065     hash_seq_init(&status, PortalHashTable);
01066 
01067     while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
01068     {
01069         Portal      portal = hentry->portal;
01070 
01071         if (portal->status == PORTAL_READY)
01072             return false;
01073     }
01074 
01075     return true;
01076 }