Header And Logo

PostgreSQL
| The world's most advanced open source database.

unix_latch.c

Go to the documentation of this file.
00001 /*-------------------------------------------------------------------------
00002  *
00003  * unix_latch.c
00004  *    Routines for inter-process latches
00005  *
00006  * The Unix implementation uses the so-called self-pipe trick to overcome
00007  * the race condition involved with select() and setting a global flag
00008  * in the signal handler. When a latch is set and the current process
00009  * is waiting for it, the signal handler wakes up the select() in
00010  * WaitLatch by writing a byte to a pipe. A signal by itself doesn't
00011  * interrupt select() on all platforms, and even on platforms where it
00012  * does, a signal that arrives just before the select() call does not
00013  * prevent the select() from entering sleep. An incoming byte on a pipe
00014  * however reliably interrupts the sleep, and causes select() to return
00015  * immediately even if the signal arrives before select() begins.
00016  *
00017  * (Actually, we prefer poll() over select() where available, but the
00018  * same comments apply to it.)
00019  *
00020  * When SetLatch is called from the same process that owns the latch,
00021  * SetLatch writes the byte directly to the pipe. If it's owned by another
00022  * process, SIGUSR1 is sent and the signal handler in the waiting process
00023  * writes the byte to the pipe on behalf of the signaling process.
00024  *
00025  * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
00026  * Portions Copyright (c) 1994, Regents of the University of California
00027  *
00028  * IDENTIFICATION
00029  *    src/backend/port/unix_latch.c
00030  *
00031  *-------------------------------------------------------------------------
00032  */
00033 #include "postgres.h"
00034 
00035 #include <fcntl.h>
00036 #include <limits.h>
00037 #include <signal.h>
00038 #include <unistd.h>
00039 #include <sys/time.h>
00040 #include <sys/types.h>
00041 #ifdef HAVE_POLL_H
00042 #include <poll.h>
00043 #endif
00044 #ifdef HAVE_SYS_POLL_H
00045 #include <sys/poll.h>
00046 #endif
00047 #ifdef HAVE_SYS_SELECT_H
00048 #include <sys/select.h>
00049 #endif
00050 
00051 #include "miscadmin.h"
00052 #include "portability/instr_time.h"
00053 #include "postmaster/postmaster.h"
00054 #include "storage/latch.h"
00055 #include "storage/pmsignal.h"
00056 #include "storage/shmem.h"
00057 
00058 /* Are we currently in WaitLatch? The signal handler would like to know. */
00059 static volatile sig_atomic_t waiting = false;
00060 
00061 /* Read and write ends of the self-pipe */
00062 static int  selfpipe_readfd = -1;
00063 static int  selfpipe_writefd = -1;
00064 
00065 /* Private function prototypes */
00066 static void sendSelfPipeByte(void);
00067 static void drainSelfPipe(void);
00068 
00069 
00070 /*
00071  * Initialize the process-local latch infrastructure.
00072  *
00073  * This must be called once during startup of any process that can wait on
00074  * latches, before it issues any InitLatch() or OwnLatch() calls.
00075  */
00076 void
00077 InitializeLatchSupport(void)
00078 {
00079     int         pipefd[2];
00080 
00081     Assert(selfpipe_readfd == -1);
00082 
00083     /*
00084      * Set up the self-pipe that allows a signal handler to wake up the
00085      * select() in WaitLatch. Make the write-end non-blocking, so that
00086      * SetLatch won't block if the event has already been set many times
00087      * filling the kernel buffer. Make the read-end non-blocking too, so that
00088      * we can easily clear the pipe by reading until EAGAIN or EWOULDBLOCK.
00089      */
00090     if (pipe(pipefd) < 0)
00091         elog(FATAL, "pipe() failed: %m");
00092     if (fcntl(pipefd[0], F_SETFL, O_NONBLOCK) < 0)
00093         elog(FATAL, "fcntl() failed on read-end of self-pipe: %m");
00094     if (fcntl(pipefd[1], F_SETFL, O_NONBLOCK) < 0)
00095         elog(FATAL, "fcntl() failed on write-end of self-pipe: %m");
00096 
00097     selfpipe_readfd = pipefd[0];
00098     selfpipe_writefd = pipefd[1];
00099 }
00100 
00101 /*
00102  * Initialize a backend-local latch.
00103  */
00104 void
00105 InitLatch(volatile Latch *latch)
00106 {
00107     /* Assert InitializeLatchSupport has been called in this process */
00108     Assert(selfpipe_readfd >= 0);
00109 
00110     latch->is_set = false;
00111     latch->owner_pid = MyProcPid;
00112     latch->is_shared = false;
00113 }
00114 
00115 /*
00116  * Initialize a shared latch that can be set from other processes. The latch
00117  * is initially owned by no-one; use OwnLatch to associate it with the
00118  * current process.
00119  *
00120  * InitSharedLatch needs to be called in postmaster before forking child
00121  * processes, usually right after allocating the shared memory block
00122  * containing the latch with ShmemInitStruct. (The Unix implementation
00123  * doesn't actually require that, but the Windows one does.) Because of
00124  * this restriction, we have no concurrency issues to worry about here.
00125  */
00126 void
00127 InitSharedLatch(volatile Latch *latch)
00128 {
00129     latch->is_set = false;
00130     latch->owner_pid = 0;
00131     latch->is_shared = true;
00132 }
00133 
00134 /*
00135  * Associate a shared latch with the current process, allowing it to
00136  * wait on the latch.
00137  *
00138  * Although there is a sanity check for latch-already-owned, we don't do
00139  * any sort of locking here, meaning that we could fail to detect the error
00140  * if two processes try to own the same latch at about the same time.  If
00141  * there is any risk of that, caller must provide an interlock to prevent it.
00142  *
00143  * In any process that calls OwnLatch(), make sure that
00144  * latch_sigusr1_handler() is called from the SIGUSR1 signal handler,
00145  * as shared latches use SIGUSR1 for inter-process communication.
00146  */
00147 void
00148 OwnLatch(volatile Latch *latch)
00149 {
00150     /* Assert InitializeLatchSupport has been called in this process */
00151     Assert(selfpipe_readfd >= 0);
00152 
00153     Assert(latch->is_shared);
00154 
00155     /* sanity check */
00156     if (latch->owner_pid != 0)
00157         elog(ERROR, "latch already owned");
00158 
00159     latch->owner_pid = MyProcPid;
00160 }
00161 
00162 /*
00163  * Disown a shared latch currently owned by the current process.
00164  */
00165 void
00166 DisownLatch(volatile Latch *latch)
00167 {
00168     Assert(latch->is_shared);
00169     Assert(latch->owner_pid == MyProcPid);
00170 
00171     latch->owner_pid = 0;
00172 }
00173 
00174 /*
00175  * Wait for a given latch to be set, or for postmaster death, or until timeout
00176  * is exceeded. 'wakeEvents' is a bitmask that specifies which of those events
00177  * to wait for. If the latch is already set (and WL_LATCH_SET is given), the
00178  * function returns immediately.
00179  *
00180  * The "timeout" is given in milliseconds. It must be >= 0 if WL_TIMEOUT flag
00181  * is given.  Although it is declared as "long", we don't actually support
00182  * timeouts longer than INT_MAX milliseconds.  Note that some extra overhead
00183  * is incurred when WL_TIMEOUT is given, so avoid using a timeout if possible.
00184  *
00185  * The latch must be owned by the current process, ie. it must be a
00186  * backend-local latch initialized with InitLatch, or a shared latch
00187  * associated with the current process by calling OwnLatch.
00188  *
00189  * Returns bit mask indicating which condition(s) caused the wake-up. Note
00190  * that if multiple wake-up conditions are true, there is no guarantee that
00191  * we return all of them in one call, but we will return at least one.
00192  */
00193 int
00194 WaitLatch(volatile Latch *latch, int wakeEvents, long timeout)
00195 {
00196     return WaitLatchOrSocket(latch, wakeEvents, PGINVALID_SOCKET, timeout);
00197 }
00198 
00199 /*
00200  * Like WaitLatch, but with an extra socket argument for WL_SOCKET_*
00201  * conditions.
00202  *
00203  * When waiting on a socket, WL_SOCKET_READABLE *must* be included in
00204  * 'wakeEvents'; WL_SOCKET_WRITEABLE is optional.  The reason for this is
00205  * that EOF and error conditions are reported only via WL_SOCKET_READABLE.
00206  */
00207 int
00208 WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
00209                   long timeout)
00210 {
00211     int         result = 0;
00212     int         rc;
00213     instr_time  start_time,
00214                 cur_time;
00215     long        cur_timeout;
00216 
00217 #ifdef HAVE_POLL
00218     struct pollfd pfds[3];
00219     int         nfds;
00220 #else
00221     struct timeval tv,
00222                *tvp;
00223     fd_set      input_mask;
00224     fd_set      output_mask;
00225     int         hifd;
00226 #endif
00227 
00228     /* Ignore WL_SOCKET_* events if no valid socket is given */
00229     if (sock == PGINVALID_SOCKET)
00230         wakeEvents &= ~(WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE);
00231 
00232     Assert(wakeEvents != 0);    /* must have at least one wake event */
00233     /* Cannot specify WL_SOCKET_WRITEABLE without WL_SOCKET_READABLE */
00234     Assert((wakeEvents & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE)) != WL_SOCKET_WRITEABLE);
00235 
00236     if ((wakeEvents & WL_LATCH_SET) && latch->owner_pid != MyProcPid)
00237         elog(ERROR, "cannot wait on a latch owned by another process");
00238 
00239     /*
00240      * Initialize timeout if requested.  We must record the current time so
00241      * that we can determine the remaining timeout if the poll() or select()
00242      * is interrupted.  (On some platforms, select() will update the contents
00243      * of "tv" for us, but unfortunately we can't rely on that.)
00244      */
00245     if (wakeEvents & WL_TIMEOUT)
00246     {
00247         INSTR_TIME_SET_CURRENT(start_time);
00248         Assert(timeout >= 0 && timeout <= INT_MAX);
00249         cur_timeout = timeout;
00250 
00251 #ifndef HAVE_POLL
00252         tv.tv_sec = cur_timeout / 1000L;
00253         tv.tv_usec = (cur_timeout % 1000L) * 1000L;
00254         tvp = &tv;
00255 #endif
00256     }
00257     else
00258     {
00259         cur_timeout = -1;
00260 
00261 #ifndef HAVE_POLL
00262         tvp = NULL;
00263 #endif
00264     }
00265 
00266     waiting = true;
00267     do
00268     {
00269         /*
00270          * Clear the pipe, then check if the latch is set already. If someone
00271          * sets the latch between this and the poll()/select() below, the
00272          * setter will write a byte to the pipe (or signal us and the signal
00273          * handler will do that), and the poll()/select() will return
00274          * immediately.
00275          *
00276          * Note: we assume that the kernel calls involved in drainSelfPipe()
00277          * and SetLatch() will provide adequate synchronization on machines
00278          * with weak memory ordering, so that we cannot miss seeing is_set if
00279          * the signal byte is already in the pipe when we drain it.
00280          */
00281         drainSelfPipe();
00282 
00283         if ((wakeEvents & WL_LATCH_SET) && latch->is_set)
00284         {
00285             result |= WL_LATCH_SET;
00286 
00287             /*
00288              * Leave loop immediately, avoid blocking again. We don't attempt
00289              * to report any other events that might also be satisfied.
00290              */
00291             break;
00292         }
00293 
00294         /* Must wait ... we use poll(2) if available, otherwise select(2) */
00295 #ifdef HAVE_POLL
00296         nfds = 0;
00297         if (wakeEvents & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE))
00298         {
00299             /* socket, if used, is always in pfds[0] */
00300             pfds[0].fd = sock;
00301             pfds[0].events = 0;
00302             if (wakeEvents & WL_SOCKET_READABLE)
00303                 pfds[0].events |= POLLIN;
00304             if (wakeEvents & WL_SOCKET_WRITEABLE)
00305                 pfds[0].events |= POLLOUT;
00306             pfds[0].revents = 0;
00307             nfds++;
00308         }
00309 
00310         pfds[nfds].fd = selfpipe_readfd;
00311         pfds[nfds].events = POLLIN;
00312         pfds[nfds].revents = 0;
00313         nfds++;
00314 
00315         if (wakeEvents & WL_POSTMASTER_DEATH)
00316         {
00317             /* postmaster fd, if used, is always in pfds[nfds - 1] */
00318             pfds[nfds].fd = postmaster_alive_fds[POSTMASTER_FD_WATCH];
00319             pfds[nfds].events = POLLIN;
00320             pfds[nfds].revents = 0;
00321             nfds++;
00322         }
00323 
00324         /* Sleep */
00325         rc = poll(pfds, nfds, (int) cur_timeout);
00326 
00327         /* Check return code */
00328         if (rc < 0)
00329         {
00330             /* EINTR is okay, otherwise complain */
00331             if (errno != EINTR)
00332             {
00333                 waiting = false;
00334                 ereport(ERROR,
00335                         (errcode_for_socket_access(),
00336                          errmsg("poll() failed: %m")));
00337             }
00338         }
00339         else if (rc == 0)
00340         {
00341             /* timeout exceeded */
00342             if (wakeEvents & WL_TIMEOUT)
00343                 result |= WL_TIMEOUT;
00344         }
00345         else
00346         {
00347             /* at least one event occurred, so check revents values */
00348             if ((wakeEvents & WL_SOCKET_READABLE) &&
00349                 (pfds[0].revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
00350             {
00351                 /* data available in socket, or EOF/error condition */
00352                 result |= WL_SOCKET_READABLE;
00353             }
00354             if ((wakeEvents & WL_SOCKET_WRITEABLE) &&
00355                 (pfds[0].revents & POLLOUT))
00356             {
00357                 result |= WL_SOCKET_WRITEABLE;
00358             }
00359 
00360             /*
00361              * We expect a POLLHUP when the remote end is closed, but because
00362              * we don't expect the pipe to become readable or to have any
00363              * errors either, treat those cases as postmaster death, too.
00364              */
00365             if ((wakeEvents & WL_POSTMASTER_DEATH) &&
00366                 (pfds[nfds - 1].revents & (POLLHUP | POLLIN | POLLERR | POLLNVAL)))
00367             {
00368                 /*
00369                  * According to the select(2) man page on Linux, select(2) may
00370                  * spuriously return and report a file descriptor as readable,
00371                  * when it's not; and presumably so can poll(2).  It's not
00372                  * clear that the relevant cases would ever apply to the
00373                  * postmaster pipe, but since the consequences of falsely
00374                  * returning WL_POSTMASTER_DEATH could be pretty unpleasant,
00375                  * we take the trouble to positively verify EOF with
00376                  * PostmasterIsAlive().
00377                  */
00378                 if (!PostmasterIsAlive())
00379                     result |= WL_POSTMASTER_DEATH;
00380             }
00381         }
00382 #else                           /* !HAVE_POLL */
00383 
00384         FD_ZERO(&input_mask);
00385         FD_ZERO(&output_mask);
00386 
00387         FD_SET(selfpipe_readfd, &input_mask);
00388         hifd = selfpipe_readfd;
00389 
00390         if (wakeEvents & WL_POSTMASTER_DEATH)
00391         {
00392             FD_SET(postmaster_alive_fds[POSTMASTER_FD_WATCH], &input_mask);
00393             if (postmaster_alive_fds[POSTMASTER_FD_WATCH] > hifd)
00394                 hifd = postmaster_alive_fds[POSTMASTER_FD_WATCH];
00395         }
00396 
00397         if (wakeEvents & WL_SOCKET_READABLE)
00398         {
00399             FD_SET(sock, &input_mask);
00400             if (sock > hifd)
00401                 hifd = sock;
00402         }
00403 
00404         if (wakeEvents & WL_SOCKET_WRITEABLE)
00405         {
00406             FD_SET(sock, &output_mask);
00407             if (sock > hifd)
00408                 hifd = sock;
00409         }
00410 
00411         /* Sleep */
00412         rc = select(hifd + 1, &input_mask, &output_mask, NULL, tvp);
00413 
00414         /* Check return code */
00415         if (rc < 0)
00416         {
00417             /* EINTR is okay, otherwise complain */
00418             if (errno != EINTR)
00419             {
00420                 waiting = false;
00421                 ereport(ERROR,
00422                         (errcode_for_socket_access(),
00423                          errmsg("select() failed: %m")));
00424             }
00425         }
00426         else if (rc == 0)
00427         {
00428             /* timeout exceeded */
00429             if (wakeEvents & WL_TIMEOUT)
00430                 result |= WL_TIMEOUT;
00431         }
00432         else
00433         {
00434             /* at least one event occurred, so check masks */
00435             if ((wakeEvents & WL_SOCKET_READABLE) && FD_ISSET(sock, &input_mask))
00436             {
00437                 /* data available in socket, or EOF */
00438                 result |= WL_SOCKET_READABLE;
00439             }
00440             if ((wakeEvents & WL_SOCKET_WRITEABLE) && FD_ISSET(sock, &output_mask))
00441             {
00442                 result |= WL_SOCKET_WRITEABLE;
00443             }
00444             if ((wakeEvents & WL_POSTMASTER_DEATH) &&
00445             FD_ISSET(postmaster_alive_fds[POSTMASTER_FD_WATCH], &input_mask))
00446             {
00447                 /*
00448                  * According to the select(2) man page on Linux, select(2) may
00449                  * spuriously return and report a file descriptor as readable,
00450                  * when it's not; and presumably so can poll(2).  It's not
00451                  * clear that the relevant cases would ever apply to the
00452                  * postmaster pipe, but since the consequences of falsely
00453                  * returning WL_POSTMASTER_DEATH could be pretty unpleasant,
00454                  * we take the trouble to positively verify EOF with
00455                  * PostmasterIsAlive().
00456                  */
00457                 if (!PostmasterIsAlive())
00458                     result |= WL_POSTMASTER_DEATH;
00459             }
00460         }
00461 #endif   /* HAVE_POLL */
00462 
00463         /* If we're not done, update cur_timeout for next iteration */
00464         if (result == 0 && cur_timeout >= 0)
00465         {
00466             INSTR_TIME_SET_CURRENT(cur_time);
00467             INSTR_TIME_SUBTRACT(cur_time, start_time);
00468             cur_timeout = timeout - (long) INSTR_TIME_GET_MILLISEC(cur_time);
00469             if (cur_timeout < 0)
00470                 cur_timeout = 0;
00471 
00472 #ifndef HAVE_POLL
00473             tv.tv_sec = cur_timeout / 1000L;
00474             tv.tv_usec = (cur_timeout % 1000L) * 1000L;
00475 #endif
00476         }
00477     } while (result == 0);
00478     waiting = false;
00479 
00480     return result;
00481 }
00482 
00483 /*
00484  * Sets a latch and wakes up anyone waiting on it.
00485  *
00486  * This is cheap if the latch is already set, otherwise not so much.
00487  *
00488  * NB: when calling this in a signal handler, be sure to save and restore
00489  * errno around it.  (That's standard practice in most signal handlers, of
00490  * course, but we used to omit it in handlers that only set a flag.)
00491  *
00492  * NB: this function is called from critical sections and signal handlers so
00493  * throwing an error is not a good idea.
00494  */
00495 void
00496 SetLatch(volatile Latch *latch)
00497 {
00498     pid_t       owner_pid;
00499 
00500     /*
00501      * XXX there really ought to be a memory barrier operation right here, to
00502      * ensure that any flag variables we might have changed get flushed to
00503      * main memory before we check/set is_set.  Without that, we have to
00504      * require that callers provide their own synchronization for machines
00505      * with weak memory ordering (see latch.h).
00506      */
00507 
00508     /* Quick exit if already set */
00509     if (latch->is_set)
00510         return;
00511 
00512     latch->is_set = true;
00513 
00514     /*
00515      * See if anyone's waiting for the latch. It can be the current process if
00516      * we're in a signal handler. We use the self-pipe to wake up the select()
00517      * in that case. If it's another process, send a signal.
00518      *
00519      * Fetch owner_pid only once, in case the latch is concurrently getting
00520      * owned or disowned. XXX: This assumes that pid_t is atomic, which isn't
00521      * guaranteed to be true! In practice, the effective range of pid_t fits
00522      * in a 32 bit integer, and so should be atomic. In the worst case, we
00523      * might end up signaling the wrong process. Even then, you're very
00524      * unlucky if a process with that bogus pid exists and belongs to
00525      * Postgres; and PG database processes should handle excess SIGUSR1
00526      * interrupts without a problem anyhow.
00527      *
00528      * Another sort of race condition that's possible here is for a new
00529      * process to own the latch immediately after we look, so we don't signal
00530      * it. This is okay so long as all callers of ResetLatch/WaitLatch follow
00531      * the standard coding convention of waiting at the bottom of their loops,
00532      * not the top, so that they'll correctly process latch-setting events
00533      * that happen before they enter the loop.
00534      */
00535     owner_pid = latch->owner_pid;
00536     if (owner_pid == 0)
00537         return;
00538     else if (owner_pid == MyProcPid)
00539     {
00540         if (waiting)
00541             sendSelfPipeByte();
00542     }
00543     else
00544         kill(owner_pid, SIGUSR1);
00545 }
00546 
00547 /*
00548  * Clear the latch. Calling WaitLatch after this will sleep, unless
00549  * the latch is set again before the WaitLatch call.
00550  */
00551 void
00552 ResetLatch(volatile Latch *latch)
00553 {
00554     /* Only the owner should reset the latch */
00555     Assert(latch->owner_pid == MyProcPid);
00556 
00557     latch->is_set = false;
00558 
00559     /*
00560      * XXX there really ought to be a memory barrier operation right here, to
00561      * ensure that the write to is_set gets flushed to main memory before we
00562      * examine any flag variables.  Otherwise a concurrent SetLatch might
00563      * falsely conclude that it needn't signal us, even though we have missed
00564      * seeing some flag updates that SetLatch was supposed to inform us of.
00565      * For the moment, callers must supply their own synchronization of flag
00566      * variables (see latch.h).
00567      */
00568 }
00569 
00570 /*
00571  * SetLatch uses SIGUSR1 to wake up the process waiting on the latch.
00572  *
00573  * Wake up WaitLatch, if we're waiting.  (We might not be, since SIGUSR1 is
00574  * overloaded for multiple purposes; or we might not have reached WaitLatch
00575  * yet, in which case we don't need to fill the pipe either.)
00576  *
00577  * NB: when calling this in a signal handler, be sure to save and restore
00578  * errno around it.
00579  */
00580 void
00581 latch_sigusr1_handler(void)
00582 {
00583     if (waiting)
00584         sendSelfPipeByte();
00585 }
00586 
00587 /* Send one byte to the self-pipe, to wake up WaitLatch */
00588 static void
00589 sendSelfPipeByte(void)
00590 {
00591     int         rc;
00592     char        dummy = 0;
00593 
00594 retry:
00595     rc = write(selfpipe_writefd, &dummy, 1);
00596     if (rc < 0)
00597     {
00598         /* If interrupted by signal, just retry */
00599         if (errno == EINTR)
00600             goto retry;
00601 
00602         /*
00603          * If the pipe is full, we don't need to retry, the data that's there
00604          * already is enough to wake up WaitLatch.
00605          */
00606         if (errno == EAGAIN || errno == EWOULDBLOCK)
00607             return;
00608 
00609         /*
00610          * Oops, the write() failed for some other reason. We might be in a
00611          * signal handler, so it's not safe to elog(). We have no choice but
00612          * silently ignore the error.
00613          */
00614         return;
00615     }
00616 }
00617 
00618 /*
00619  * Read all available data from the self-pipe
00620  *
00621  * Note: this is only called when waiting = true.  If it fails and doesn't
00622  * return, it must reset that flag first (though ideally, this will never
00623  * happen).
00624  */
00625 static void
00626 drainSelfPipe(void)
00627 {
00628     /*
00629      * There shouldn't normally be more than one byte in the pipe, or maybe a
00630      * few bytes if multiple processes run SetLatch at the same instant.
00631      */
00632     char        buf[16];
00633     int         rc;
00634 
00635     for (;;)
00636     {
00637         rc = read(selfpipe_readfd, buf, sizeof(buf));
00638         if (rc < 0)
00639         {
00640             if (errno == EAGAIN || errno == EWOULDBLOCK)
00641                 break;          /* the pipe is empty */
00642             else if (errno == EINTR)
00643                 continue;       /* retry */
00644             else
00645             {
00646                 waiting = false;
00647                 elog(ERROR, "read() on self-pipe failed: %m");
00648             }
00649         }
00650         else if (rc == 0)
00651         {
00652             waiting = false;
00653             elog(ERROR, "unexpected EOF on self-pipe");
00654         }
00655         else if (rc < sizeof(buf))
00656         {
00657             /* we successfully drained the pipe; no need to read() again */
00658             break;
00659         }
00660         /* else buffer wasn't big enough, so read again */
00661     }
00662 }