00001 /*------------------------------------------------------------------------- 00002 * 00003 * inval.c 00004 * POSTGRES cache invalidation dispatcher code. 00005 * 00006 * This is subtle stuff, so pay attention: 00007 * 00008 * When a tuple is updated or deleted, our standard time qualification rules 00009 * consider that it is *still valid* so long as we are in the same command, 00010 * ie, until the next CommandCounterIncrement() or transaction commit. 00011 * (See utils/time/tqual.c, and note that system catalogs are generally 00012 * scanned under SnapshotNow rules by the system, or plain user snapshots 00013 * for user queries.) At the command boundary, the old tuple stops 00014 * being valid and the new version, if any, becomes valid. Therefore, 00015 * we cannot simply flush a tuple from the system caches during heap_update() 00016 * or heap_delete(). The tuple is still good at that point; what's more, 00017 * even if we did flush it, it might be reloaded into the caches by a later 00018 * request in the same command. So the correct behavior is to keep a list 00019 * of outdated (updated/deleted) tuples and then do the required cache 00020 * flushes at the next command boundary. We must also keep track of 00021 * inserted tuples so that we can flush "negative" cache entries that match 00022 * the new tuples; again, that mustn't happen until end of command. 00023 * 00024 * Once we have finished the command, we still need to remember inserted 00025 * tuples (including new versions of updated tuples), so that we can flush 00026 * them from the caches if we abort the transaction. Similarly, we'd better 00027 * be able to flush "negative" cache entries that may have been loaded in 00028 * place of deleted tuples, so we still need the deleted ones too. 00029 * 00030 * If we successfully complete the transaction, we have to broadcast all 00031 * these invalidation events to other backends (via the SI message queue) 00032 * so that they can flush obsolete entries from their caches. Note we have 00033 * to record the transaction commit before sending SI messages, otherwise 00034 * the other backends won't see our updated tuples as good. 00035 * 00036 * When a subtransaction aborts, we can process and discard any events 00037 * it has queued. When a subtransaction commits, we just add its events 00038 * to the pending lists of the parent transaction. 00039 * 00040 * In short, we need to remember until xact end every insert or delete 00041 * of a tuple that might be in the system caches. Updates are treated as 00042 * two events, delete + insert, for simplicity. (If the update doesn't 00043 * change the tuple hash value, catcache.c optimizes this into one event.) 00044 * 00045 * We do not need to register EVERY tuple operation in this way, just those 00046 * on tuples in relations that have associated catcaches. We do, however, 00047 * have to register every operation on every tuple that *could* be in a 00048 * catcache, whether or not it currently is in our cache. Also, if the 00049 * tuple is in a relation that has multiple catcaches, we need to register 00050 * an invalidation message for each such catcache. catcache.c's 00051 * PrepareToInvalidateCacheTuple() routine provides the knowledge of which 00052 * catcaches may need invalidation for a given tuple. 00053 * 00054 * Also, whenever we see an operation on a pg_class or pg_attribute tuple, 00055 * we register a relcache flush operation for the relation described by that 00056 * tuple. 00057 * 00058 * We keep the relcache flush requests in lists separate from the catcache 00059 * tuple flush requests. This allows us to issue all the pending catcache 00060 * flushes before we issue relcache flushes, which saves us from loading 00061 * a catcache tuple during relcache load only to flush it again right away. 00062 * Also, we avoid queuing multiple relcache flush requests for the same 00063 * relation, since a relcache flush is relatively expensive to do. 00064 * (XXX is it worth testing likewise for duplicate catcache flush entries? 00065 * Probably not.) 00066 * 00067 * If a relcache flush is issued for a system relation that we preload 00068 * from the relcache init file, we must also delete the init file so that 00069 * it will be rebuilt during the next backend restart. The actual work of 00070 * manipulating the init file is in relcache.c, but we keep track of the 00071 * need for it here. 00072 * 00073 * The request lists proper are kept in CurTransactionContext of their 00074 * creating (sub)transaction, since they can be forgotten on abort of that 00075 * transaction but must be kept till top-level commit otherwise. For 00076 * simplicity we keep the controlling list-of-lists in TopTransactionContext. 00077 * 00078 * Currently, inval messages are sent without regard for the possibility 00079 * that the object described by the catalog tuple might be a session-local 00080 * object such as a temporary table. This is because (1) this code has 00081 * no practical way to tell the difference, and (2) it is not certain that 00082 * other backends don't have catalog cache or even relcache entries for 00083 * such tables, anyway; there is nothing that prevents that. It might be 00084 * worth trying to avoid sending such inval traffic in the future, if those 00085 * problems can be overcome cheaply. 00086 * 00087 * 00088 * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group 00089 * Portions Copyright (c) 1994, Regents of the University of California 00090 * 00091 * IDENTIFICATION 00092 * src/backend/utils/cache/inval.c 00093 * 00094 *------------------------------------------------------------------------- 00095 */ 00096 #include "postgres.h" 00097 00098 #include "access/htup_details.h" 00099 #include "access/xact.h" 00100 #include "catalog/catalog.h" 00101 #include "miscadmin.h" 00102 #include "storage/sinval.h" 00103 #include "storage/smgr.h" 00104 #include "utils/catcache.h" 00105 #include "utils/inval.h" 00106 #include "utils/memutils.h" 00107 #include "utils/rel.h" 00108 #include "utils/relmapper.h" 00109 #include "utils/syscache.h" 00110 00111 00112 /* 00113 * To minimize palloc traffic, we keep pending requests in successively- 00114 * larger chunks (a slightly more sophisticated version of an expansible 00115 * array). All request types can be stored as SharedInvalidationMessage 00116 * records. The ordering of requests within a list is never significant. 00117 */ 00118 typedef struct InvalidationChunk 00119 { 00120 struct InvalidationChunk *next; /* list link */ 00121 int nitems; /* # items currently stored in chunk */ 00122 int maxitems; /* size of allocated array in this chunk */ 00123 SharedInvalidationMessage msgs[1]; /* VARIABLE LENGTH ARRAY */ 00124 } InvalidationChunk; /* VARIABLE LENGTH STRUCTURE */ 00125 00126 typedef struct InvalidationListHeader 00127 { 00128 InvalidationChunk *cclist; /* list of chunks holding catcache msgs */ 00129 InvalidationChunk *rclist; /* list of chunks holding relcache msgs */ 00130 } InvalidationListHeader; 00131 00132 /*---------------- 00133 * Invalidation info is divided into two lists: 00134 * 1) events so far in current command, not yet reflected to caches. 00135 * 2) events in previous commands of current transaction; these have 00136 * been reflected to local caches, and must be either broadcast to 00137 * other backends or rolled back from local cache when we commit 00138 * or abort the transaction. 00139 * Actually, we need two such lists for each level of nested transaction, 00140 * so that we can discard events from an aborted subtransaction. When 00141 * a subtransaction commits, we append its lists to the parent's lists. 00142 * 00143 * The relcache-file-invalidated flag can just be a simple boolean, 00144 * since we only act on it at transaction commit; we don't care which 00145 * command of the transaction set it. 00146 *---------------- 00147 */ 00148 00149 typedef struct TransInvalidationInfo 00150 { 00151 /* Back link to parent transaction's info */ 00152 struct TransInvalidationInfo *parent; 00153 00154 /* Subtransaction nesting depth */ 00155 int my_level; 00156 00157 /* head of current-command event list */ 00158 InvalidationListHeader CurrentCmdInvalidMsgs; 00159 00160 /* head of previous-commands event list */ 00161 InvalidationListHeader PriorCmdInvalidMsgs; 00162 00163 /* init file must be invalidated? */ 00164 bool RelcacheInitFileInval; 00165 } TransInvalidationInfo; 00166 00167 static TransInvalidationInfo *transInvalInfo = NULL; 00168 00169 static SharedInvalidationMessage *SharedInvalidMessagesArray; 00170 static int numSharedInvalidMessagesArray; 00171 static int maxSharedInvalidMessagesArray; 00172 00173 00174 /* 00175 * Dynamically-registered callback functions. Current implementation 00176 * assumes there won't be very many of these at once; could improve if needed. 00177 */ 00178 00179 #define MAX_SYSCACHE_CALLBACKS 32 00180 #define MAX_RELCACHE_CALLBACKS 5 00181 00182 static struct SYSCACHECALLBACK 00183 { 00184 int16 id; /* cache number */ 00185 SyscacheCallbackFunction function; 00186 Datum arg; 00187 } syscache_callback_list[MAX_SYSCACHE_CALLBACKS]; 00188 00189 static int syscache_callback_count = 0; 00190 00191 static struct RELCACHECALLBACK 00192 { 00193 RelcacheCallbackFunction function; 00194 Datum arg; 00195 } relcache_callback_list[MAX_RELCACHE_CALLBACKS]; 00196 00197 static int relcache_callback_count = 0; 00198 00199 /* ---------------------------------------------------------------- 00200 * Invalidation list support functions 00201 * 00202 * These three routines encapsulate processing of the "chunked" 00203 * representation of what is logically just a list of messages. 00204 * ---------------------------------------------------------------- 00205 */ 00206 00207 /* 00208 * AddInvalidationMessage 00209 * Add an invalidation message to a list (of chunks). 00210 * 00211 * Note that we do not pay any great attention to maintaining the original 00212 * ordering of the messages. 00213 */ 00214 static void 00215 AddInvalidationMessage(InvalidationChunk **listHdr, 00216 SharedInvalidationMessage *msg) 00217 { 00218 InvalidationChunk *chunk = *listHdr; 00219 00220 if (chunk == NULL) 00221 { 00222 /* First time through; create initial chunk */ 00223 #define FIRSTCHUNKSIZE 32 00224 chunk = (InvalidationChunk *) 00225 MemoryContextAlloc(CurTransactionContext, 00226 sizeof(InvalidationChunk) + 00227 (FIRSTCHUNKSIZE - 1) *sizeof(SharedInvalidationMessage)); 00228 chunk->nitems = 0; 00229 chunk->maxitems = FIRSTCHUNKSIZE; 00230 chunk->next = *listHdr; 00231 *listHdr = chunk; 00232 } 00233 else if (chunk->nitems >= chunk->maxitems) 00234 { 00235 /* Need another chunk; double size of last chunk */ 00236 int chunksize = 2 * chunk->maxitems; 00237 00238 chunk = (InvalidationChunk *) 00239 MemoryContextAlloc(CurTransactionContext, 00240 sizeof(InvalidationChunk) + 00241 (chunksize - 1) *sizeof(SharedInvalidationMessage)); 00242 chunk->nitems = 0; 00243 chunk->maxitems = chunksize; 00244 chunk->next = *listHdr; 00245 *listHdr = chunk; 00246 } 00247 /* Okay, add message to current chunk */ 00248 chunk->msgs[chunk->nitems] = *msg; 00249 chunk->nitems++; 00250 } 00251 00252 /* 00253 * Append one list of invalidation message chunks to another, resetting 00254 * the source chunk-list pointer to NULL. 00255 */ 00256 static void 00257 AppendInvalidationMessageList(InvalidationChunk **destHdr, 00258 InvalidationChunk **srcHdr) 00259 { 00260 InvalidationChunk *chunk = *srcHdr; 00261 00262 if (chunk == NULL) 00263 return; /* nothing to do */ 00264 00265 while (chunk->next != NULL) 00266 chunk = chunk->next; 00267 00268 chunk->next = *destHdr; 00269 00270 *destHdr = *srcHdr; 00271 00272 *srcHdr = NULL; 00273 } 00274 00275 /* 00276 * Process a list of invalidation messages. 00277 * 00278 * This is a macro that executes the given code fragment for each message in 00279 * a message chunk list. The fragment should refer to the message as *msg. 00280 */ 00281 #define ProcessMessageList(listHdr, codeFragment) \ 00282 do { \ 00283 InvalidationChunk *_chunk; \ 00284 for (_chunk = (listHdr); _chunk != NULL; _chunk = _chunk->next) \ 00285 { \ 00286 int _cindex; \ 00287 for (_cindex = 0; _cindex < _chunk->nitems; _cindex++) \ 00288 { \ 00289 SharedInvalidationMessage *msg = &_chunk->msgs[_cindex]; \ 00290 codeFragment; \ 00291 } \ 00292 } \ 00293 } while (0) 00294 00295 /* 00296 * Process a list of invalidation messages group-wise. 00297 * 00298 * As above, but the code fragment can handle an array of messages. 00299 * The fragment should refer to the messages as msgs[], with n entries. 00300 */ 00301 #define ProcessMessageListMulti(listHdr, codeFragment) \ 00302 do { \ 00303 InvalidationChunk *_chunk; \ 00304 for (_chunk = (listHdr); _chunk != NULL; _chunk = _chunk->next) \ 00305 { \ 00306 SharedInvalidationMessage *msgs = _chunk->msgs; \ 00307 int n = _chunk->nitems; \ 00308 codeFragment; \ 00309 } \ 00310 } while (0) 00311 00312 00313 /* ---------------------------------------------------------------- 00314 * Invalidation set support functions 00315 * 00316 * These routines understand about the division of a logical invalidation 00317 * list into separate physical lists for catcache and relcache entries. 00318 * ---------------------------------------------------------------- 00319 */ 00320 00321 /* 00322 * Add a catcache inval entry 00323 */ 00324 static void 00325 AddCatcacheInvalidationMessage(InvalidationListHeader *hdr, 00326 int id, uint32 hashValue, Oid dbId) 00327 { 00328 SharedInvalidationMessage msg; 00329 00330 Assert(id < CHAR_MAX); 00331 msg.cc.id = (int8) id; 00332 msg.cc.dbId = dbId; 00333 msg.cc.hashValue = hashValue; 00334 AddInvalidationMessage(&hdr->cclist, &msg); 00335 } 00336 00337 /* 00338 * Add a whole-catalog inval entry 00339 */ 00340 static void 00341 AddCatalogInvalidationMessage(InvalidationListHeader *hdr, 00342 Oid dbId, Oid catId) 00343 { 00344 SharedInvalidationMessage msg; 00345 00346 msg.cat.id = SHAREDINVALCATALOG_ID; 00347 msg.cat.dbId = dbId; 00348 msg.cat.catId = catId; 00349 AddInvalidationMessage(&hdr->cclist, &msg); 00350 } 00351 00352 /* 00353 * Add a relcache inval entry 00354 */ 00355 static void 00356 AddRelcacheInvalidationMessage(InvalidationListHeader *hdr, 00357 Oid dbId, Oid relId) 00358 { 00359 SharedInvalidationMessage msg; 00360 00361 /* Don't add a duplicate item */ 00362 /* We assume dbId need not be checked because it will never change */ 00363 ProcessMessageList(hdr->rclist, 00364 if (msg->rc.id == SHAREDINVALRELCACHE_ID && 00365 msg->rc.relId == relId) 00366 return); 00367 00368 /* OK, add the item */ 00369 msg.rc.id = SHAREDINVALRELCACHE_ID; 00370 msg.rc.dbId = dbId; 00371 msg.rc.relId = relId; 00372 AddInvalidationMessage(&hdr->rclist, &msg); 00373 } 00374 00375 /* 00376 * Append one list of invalidation messages to another, resetting 00377 * the source list to empty. 00378 */ 00379 static void 00380 AppendInvalidationMessages(InvalidationListHeader *dest, 00381 InvalidationListHeader *src) 00382 { 00383 AppendInvalidationMessageList(&dest->cclist, &src->cclist); 00384 AppendInvalidationMessageList(&dest->rclist, &src->rclist); 00385 } 00386 00387 /* 00388 * Execute the given function for all the messages in an invalidation list. 00389 * The list is not altered. 00390 * 00391 * catcache entries are processed first, for reasons mentioned above. 00392 */ 00393 static void 00394 ProcessInvalidationMessages(InvalidationListHeader *hdr, 00395 void (*func) (SharedInvalidationMessage *msg)) 00396 { 00397 ProcessMessageList(hdr->cclist, func(msg)); 00398 ProcessMessageList(hdr->rclist, func(msg)); 00399 } 00400 00401 /* 00402 * As above, but the function is able to process an array of messages 00403 * rather than just one at a time. 00404 */ 00405 static void 00406 ProcessInvalidationMessagesMulti(InvalidationListHeader *hdr, 00407 void (*func) (const SharedInvalidationMessage *msgs, int n)) 00408 { 00409 ProcessMessageListMulti(hdr->cclist, func(msgs, n)); 00410 ProcessMessageListMulti(hdr->rclist, func(msgs, n)); 00411 } 00412 00413 /* ---------------------------------------------------------------- 00414 * private support functions 00415 * ---------------------------------------------------------------- 00416 */ 00417 00418 /* 00419 * RegisterCatcacheInvalidation 00420 * 00421 * Register an invalidation event for a catcache tuple entry. 00422 */ 00423 static void 00424 RegisterCatcacheInvalidation(int cacheId, 00425 uint32 hashValue, 00426 Oid dbId) 00427 { 00428 AddCatcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs, 00429 cacheId, hashValue, dbId); 00430 } 00431 00432 /* 00433 * RegisterCatalogInvalidation 00434 * 00435 * Register an invalidation event for all catcache entries from a catalog. 00436 */ 00437 static void 00438 RegisterCatalogInvalidation(Oid dbId, Oid catId) 00439 { 00440 AddCatalogInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs, 00441 dbId, catId); 00442 } 00443 00444 /* 00445 * RegisterRelcacheInvalidation 00446 * 00447 * As above, but register a relcache invalidation event. 00448 */ 00449 static void 00450 RegisterRelcacheInvalidation(Oid dbId, Oid relId) 00451 { 00452 AddRelcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs, 00453 dbId, relId); 00454 00455 /* 00456 * Most of the time, relcache invalidation is associated with system 00457 * catalog updates, but there are a few cases where it isn't. Quick hack 00458 * to ensure that the next CommandCounterIncrement() will think that we 00459 * need to do CommandEndInvalidationMessages(). 00460 */ 00461 (void) GetCurrentCommandId(true); 00462 00463 /* 00464 * If the relation being invalidated is one of those cached in the 00465 * relcache init file, mark that we need to zap that file at commit. 00466 */ 00467 if (RelationIdIsInInitFile(relId)) 00468 transInvalInfo->RelcacheInitFileInval = true; 00469 } 00470 00471 /* 00472 * LocalExecuteInvalidationMessage 00473 * 00474 * Process a single invalidation message (which could be of any type). 00475 * Only the local caches are flushed; this does not transmit the message 00476 * to other backends. 00477 */ 00478 static void 00479 LocalExecuteInvalidationMessage(SharedInvalidationMessage *msg) 00480 { 00481 if (msg->id >= 0) 00482 { 00483 if (msg->cc.dbId == MyDatabaseId || msg->cc.dbId == InvalidOid) 00484 { 00485 CatalogCacheIdInvalidate(msg->cc.id, msg->cc.hashValue); 00486 00487 CallSyscacheCallbacks(msg->cc.id, msg->cc.hashValue); 00488 } 00489 } 00490 else if (msg->id == SHAREDINVALCATALOG_ID) 00491 { 00492 if (msg->cat.dbId == MyDatabaseId || msg->cat.dbId == InvalidOid) 00493 { 00494 CatalogCacheFlushCatalog(msg->cat.catId); 00495 00496 /* CatalogCacheFlushCatalog calls CallSyscacheCallbacks as needed */ 00497 } 00498 } 00499 else if (msg->id == SHAREDINVALRELCACHE_ID) 00500 { 00501 if (msg->rc.dbId == MyDatabaseId || msg->rc.dbId == InvalidOid) 00502 { 00503 int i; 00504 00505 RelationCacheInvalidateEntry(msg->rc.relId); 00506 00507 for (i = 0; i < relcache_callback_count; i++) 00508 { 00509 struct RELCACHECALLBACK *ccitem = relcache_callback_list + i; 00510 00511 (*ccitem->function) (ccitem->arg, msg->rc.relId); 00512 } 00513 } 00514 } 00515 else if (msg->id == SHAREDINVALSMGR_ID) 00516 { 00517 /* 00518 * We could have smgr entries for relations of other databases, so no 00519 * short-circuit test is possible here. 00520 */ 00521 RelFileNodeBackend rnode; 00522 00523 rnode.node = msg->sm.rnode; 00524 rnode.backend = (msg->sm.backend_hi << 16) | (int) msg->sm.backend_lo; 00525 smgrclosenode(rnode); 00526 } 00527 else if (msg->id == SHAREDINVALRELMAP_ID) 00528 { 00529 /* We only care about our own database and shared catalogs */ 00530 if (msg->rm.dbId == InvalidOid) 00531 RelationMapInvalidate(true); 00532 else if (msg->rm.dbId == MyDatabaseId) 00533 RelationMapInvalidate(false); 00534 } 00535 else 00536 elog(FATAL, "unrecognized SI message ID: %d", msg->id); 00537 } 00538 00539 /* 00540 * InvalidateSystemCaches 00541 * 00542 * This blows away all tuples in the system catalog caches and 00543 * all the cached relation descriptors and smgr cache entries. 00544 * Relation descriptors that have positive refcounts are then rebuilt. 00545 * 00546 * We call this when we see a shared-inval-queue overflow signal, 00547 * since that tells us we've lost some shared-inval messages and hence 00548 * don't know what needs to be invalidated. 00549 */ 00550 static void 00551 InvalidateSystemCaches(void) 00552 { 00553 int i; 00554 00555 ResetCatalogCaches(); 00556 RelationCacheInvalidate(); /* gets smgr and relmap too */ 00557 00558 for (i = 0; i < syscache_callback_count; i++) 00559 { 00560 struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i; 00561 00562 (*ccitem->function) (ccitem->arg, ccitem->id, 0); 00563 } 00564 00565 for (i = 0; i < relcache_callback_count; i++) 00566 { 00567 struct RELCACHECALLBACK *ccitem = relcache_callback_list + i; 00568 00569 (*ccitem->function) (ccitem->arg, InvalidOid); 00570 } 00571 } 00572 00573 00574 /* ---------------------------------------------------------------- 00575 * public functions 00576 * ---------------------------------------------------------------- 00577 */ 00578 00579 /* 00580 * AcceptInvalidationMessages 00581 * Read and process invalidation messages from the shared invalidation 00582 * message queue. 00583 * 00584 * Note: 00585 * This should be called as the first step in processing a transaction. 00586 */ 00587 void 00588 AcceptInvalidationMessages(void) 00589 { 00590 ReceiveSharedInvalidMessages(LocalExecuteInvalidationMessage, 00591 InvalidateSystemCaches); 00592 00593 /* 00594 * Test code to force cache flushes anytime a flush could happen. 00595 * 00596 * If used with CLOBBER_FREED_MEMORY, CLOBBER_CACHE_ALWAYS provides a 00597 * fairly thorough test that the system contains no cache-flush hazards. 00598 * However, it also makes the system unbelievably slow --- the regression 00599 * tests take about 100 times longer than normal. 00600 * 00601 * If you're a glutton for punishment, try CLOBBER_CACHE_RECURSIVELY. This 00602 * slows things by at least a factor of 10000, so I wouldn't suggest 00603 * trying to run the entire regression tests that way. It's useful to try 00604 * a few simple tests, to make sure that cache reload isn't subject to 00605 * internal cache-flush hazards, but after you've done a few thousand 00606 * recursive reloads it's unlikely you'll learn more. 00607 */ 00608 #if defined(CLOBBER_CACHE_ALWAYS) 00609 { 00610 static bool in_recursion = false; 00611 00612 if (!in_recursion) 00613 { 00614 in_recursion = true; 00615 InvalidateSystemCaches(); 00616 in_recursion = false; 00617 } 00618 } 00619 #elif defined(CLOBBER_CACHE_RECURSIVELY) 00620 InvalidateSystemCaches(); 00621 #endif 00622 } 00623 00624 /* 00625 * AtStart_Inval 00626 * Initialize inval lists at start of a main transaction. 00627 */ 00628 void 00629 AtStart_Inval(void) 00630 { 00631 Assert(transInvalInfo == NULL); 00632 transInvalInfo = (TransInvalidationInfo *) 00633 MemoryContextAllocZero(TopTransactionContext, 00634 sizeof(TransInvalidationInfo)); 00635 transInvalInfo->my_level = GetCurrentTransactionNestLevel(); 00636 SharedInvalidMessagesArray = NULL; 00637 numSharedInvalidMessagesArray = 0; 00638 } 00639 00640 /* 00641 * PostPrepare_Inval 00642 * Clean up after successful PREPARE. 00643 * 00644 * Here, we want to act as though the transaction aborted, so that we will 00645 * undo any syscache changes it made, thereby bringing us into sync with the 00646 * outside world, which doesn't believe the transaction committed yet. 00647 * 00648 * If the prepared transaction is later aborted, there is nothing more to 00649 * do; if it commits, we will receive the consequent inval messages just 00650 * like everyone else. 00651 */ 00652 void 00653 PostPrepare_Inval(void) 00654 { 00655 AtEOXact_Inval(false); 00656 } 00657 00658 /* 00659 * AtSubStart_Inval 00660 * Initialize inval lists at start of a subtransaction. 00661 */ 00662 void 00663 AtSubStart_Inval(void) 00664 { 00665 TransInvalidationInfo *myInfo; 00666 00667 Assert(transInvalInfo != NULL); 00668 myInfo = (TransInvalidationInfo *) 00669 MemoryContextAllocZero(TopTransactionContext, 00670 sizeof(TransInvalidationInfo)); 00671 myInfo->parent = transInvalInfo; 00672 myInfo->my_level = GetCurrentTransactionNestLevel(); 00673 transInvalInfo = myInfo; 00674 } 00675 00676 /* 00677 * Collect invalidation messages into SharedInvalidMessagesArray array. 00678 */ 00679 static void 00680 MakeSharedInvalidMessagesArray(const SharedInvalidationMessage *msgs, int n) 00681 { 00682 /* 00683 * Initialise array first time through in each commit 00684 */ 00685 if (SharedInvalidMessagesArray == NULL) 00686 { 00687 maxSharedInvalidMessagesArray = FIRSTCHUNKSIZE; 00688 numSharedInvalidMessagesArray = 0; 00689 00690 /* 00691 * Although this is being palloc'd we don't actually free it directly. 00692 * We're so close to EOXact that we now we're going to lose it anyhow. 00693 */ 00694 SharedInvalidMessagesArray = palloc(maxSharedInvalidMessagesArray 00695 * sizeof(SharedInvalidationMessage)); 00696 } 00697 00698 if ((numSharedInvalidMessagesArray + n) > maxSharedInvalidMessagesArray) 00699 { 00700 while ((numSharedInvalidMessagesArray + n) > maxSharedInvalidMessagesArray) 00701 maxSharedInvalidMessagesArray *= 2; 00702 00703 SharedInvalidMessagesArray = repalloc(SharedInvalidMessagesArray, 00704 maxSharedInvalidMessagesArray 00705 * sizeof(SharedInvalidationMessage)); 00706 } 00707 00708 /* 00709 * Append the next chunk onto the array 00710 */ 00711 memcpy(SharedInvalidMessagesArray + numSharedInvalidMessagesArray, 00712 msgs, n * sizeof(SharedInvalidationMessage)); 00713 numSharedInvalidMessagesArray += n; 00714 } 00715 00716 /* 00717 * xactGetCommittedInvalidationMessages() is executed by 00718 * RecordTransactionCommit() to add invalidation messages onto the 00719 * commit record. This applies only to commit message types, never to 00720 * abort records. Must always run before AtEOXact_Inval(), since that 00721 * removes the data we need to see. 00722 * 00723 * Remember that this runs before we have officially committed, so we 00724 * must not do anything here to change what might occur *if* we should 00725 * fail between here and the actual commit. 00726 * 00727 * see also xact_redo_commit() and xact_desc_commit() 00728 */ 00729 int 00730 xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs, 00731 bool *RelcacheInitFileInval) 00732 { 00733 MemoryContext oldcontext; 00734 00735 /* Must be at top of stack */ 00736 Assert(transInvalInfo != NULL && transInvalInfo->parent == NULL); 00737 00738 /* 00739 * Relcache init file invalidation requires processing both before and 00740 * after we send the SI messages. However, we need not do anything unless 00741 * we committed. 00742 */ 00743 *RelcacheInitFileInval = transInvalInfo->RelcacheInitFileInval; 00744 00745 /* 00746 * Walk through TransInvalidationInfo to collect all the messages into a 00747 * single contiguous array of invalidation messages. It must be contiguous 00748 * so we can copy directly into WAL message. Maintain the order that they 00749 * would be processed in by AtEOXact_Inval(), to ensure emulated behaviour 00750 * in redo is as similar as possible to original. We want the same bugs, 00751 * if any, not new ones. 00752 */ 00753 oldcontext = MemoryContextSwitchTo(CurTransactionContext); 00754 00755 ProcessInvalidationMessagesMulti(&transInvalInfo->CurrentCmdInvalidMsgs, 00756 MakeSharedInvalidMessagesArray); 00757 ProcessInvalidationMessagesMulti(&transInvalInfo->PriorCmdInvalidMsgs, 00758 MakeSharedInvalidMessagesArray); 00759 MemoryContextSwitchTo(oldcontext); 00760 00761 Assert(!(numSharedInvalidMessagesArray > 0 && 00762 SharedInvalidMessagesArray == NULL)); 00763 00764 *msgs = SharedInvalidMessagesArray; 00765 00766 return numSharedInvalidMessagesArray; 00767 } 00768 00769 /* 00770 * ProcessCommittedInvalidationMessages is executed by xact_redo_commit() 00771 * to process invalidation messages added to commit records. 00772 * 00773 * Relcache init file invalidation requires processing both 00774 * before and after we send the SI messages. See AtEOXact_Inval() 00775 */ 00776 void 00777 ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs, 00778 int nmsgs, bool RelcacheInitFileInval, 00779 Oid dbid, Oid tsid) 00780 { 00781 if (nmsgs <= 0) 00782 return; 00783 00784 elog(trace_recovery(DEBUG4), "replaying commit with %d messages%s", nmsgs, 00785 (RelcacheInitFileInval ? " and relcache file invalidation" : "")); 00786 00787 if (RelcacheInitFileInval) 00788 { 00789 /* 00790 * RelationCacheInitFilePreInvalidate requires DatabasePath to be set, 00791 * but we should not use SetDatabasePath during recovery, since it is 00792 * intended to be used only once by normal backends. Hence, a quick 00793 * hack: set DatabasePath directly then unset after use. 00794 */ 00795 DatabasePath = GetDatabasePath(dbid, tsid); 00796 elog(trace_recovery(DEBUG4), "removing relcache init file in \"%s\"", 00797 DatabasePath); 00798 RelationCacheInitFilePreInvalidate(); 00799 pfree(DatabasePath); 00800 DatabasePath = NULL; 00801 } 00802 00803 SendSharedInvalidMessages(msgs, nmsgs); 00804 00805 if (RelcacheInitFileInval) 00806 RelationCacheInitFilePostInvalidate(); 00807 } 00808 00809 /* 00810 * AtEOXact_Inval 00811 * Process queued-up invalidation messages at end of main transaction. 00812 * 00813 * If isCommit, we must send out the messages in our PriorCmdInvalidMsgs list 00814 * to the shared invalidation message queue. Note that these will be read 00815 * not only by other backends, but also by our own backend at the next 00816 * transaction start (via AcceptInvalidationMessages). This means that 00817 * we can skip immediate local processing of anything that's still in 00818 * CurrentCmdInvalidMsgs, and just send that list out too. 00819 * 00820 * If not isCommit, we are aborting, and must locally process the messages 00821 * in PriorCmdInvalidMsgs. No messages need be sent to other backends, 00822 * since they'll not have seen our changed tuples anyway. We can forget 00823 * about CurrentCmdInvalidMsgs too, since those changes haven't touched 00824 * the caches yet. 00825 * 00826 * In any case, reset the various lists to empty. We need not physically 00827 * free memory here, since TopTransactionContext is about to be emptied 00828 * anyway. 00829 * 00830 * Note: 00831 * This should be called as the last step in processing a transaction. 00832 */ 00833 void 00834 AtEOXact_Inval(bool isCommit) 00835 { 00836 if (isCommit) 00837 { 00838 /* Must be at top of stack */ 00839 Assert(transInvalInfo != NULL && transInvalInfo->parent == NULL); 00840 00841 /* 00842 * Relcache init file invalidation requires processing both before and 00843 * after we send the SI messages. However, we need not do anything 00844 * unless we committed. 00845 */ 00846 if (transInvalInfo->RelcacheInitFileInval) 00847 RelationCacheInitFilePreInvalidate(); 00848 00849 AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs, 00850 &transInvalInfo->CurrentCmdInvalidMsgs); 00851 00852 ProcessInvalidationMessagesMulti(&transInvalInfo->PriorCmdInvalidMsgs, 00853 SendSharedInvalidMessages); 00854 00855 if (transInvalInfo->RelcacheInitFileInval) 00856 RelationCacheInitFilePostInvalidate(); 00857 } 00858 else if (transInvalInfo != NULL) 00859 { 00860 /* Must be at top of stack */ 00861 Assert(transInvalInfo->parent == NULL); 00862 00863 ProcessInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs, 00864 LocalExecuteInvalidationMessage); 00865 } 00866 00867 /* Need not free anything explicitly */ 00868 transInvalInfo = NULL; 00869 } 00870 00871 /* 00872 * AtEOSubXact_Inval 00873 * Process queued-up invalidation messages at end of subtransaction. 00874 * 00875 * If isCommit, process CurrentCmdInvalidMsgs if any (there probably aren't), 00876 * and then attach both CurrentCmdInvalidMsgs and PriorCmdInvalidMsgs to the 00877 * parent's PriorCmdInvalidMsgs list. 00878 * 00879 * If not isCommit, we are aborting, and must locally process the messages 00880 * in PriorCmdInvalidMsgs. No messages need be sent to other backends. 00881 * We can forget about CurrentCmdInvalidMsgs too, since those changes haven't 00882 * touched the caches yet. 00883 * 00884 * In any case, pop the transaction stack. We need not physically free memory 00885 * here, since CurTransactionContext is about to be emptied anyway 00886 * (if aborting). Beware of the possibility of aborting the same nesting 00887 * level twice, though. 00888 */ 00889 void 00890 AtEOSubXact_Inval(bool isCommit) 00891 { 00892 int my_level = GetCurrentTransactionNestLevel(); 00893 TransInvalidationInfo *myInfo = transInvalInfo; 00894 00895 if (isCommit) 00896 { 00897 /* Must be at non-top of stack */ 00898 Assert(myInfo != NULL && myInfo->parent != NULL); 00899 Assert(myInfo->my_level == my_level); 00900 00901 /* If CurrentCmdInvalidMsgs still has anything, fix it */ 00902 CommandEndInvalidationMessages(); 00903 00904 /* Pass up my inval messages to parent */ 00905 AppendInvalidationMessages(&myInfo->parent->PriorCmdInvalidMsgs, 00906 &myInfo->PriorCmdInvalidMsgs); 00907 00908 /* Pending relcache inval becomes parent's problem too */ 00909 if (myInfo->RelcacheInitFileInval) 00910 myInfo->parent->RelcacheInitFileInval = true; 00911 00912 /* Pop the transaction state stack */ 00913 transInvalInfo = myInfo->parent; 00914 00915 /* Need not free anything else explicitly */ 00916 pfree(myInfo); 00917 } 00918 else if (myInfo != NULL && myInfo->my_level == my_level) 00919 { 00920 /* Must be at non-top of stack */ 00921 Assert(myInfo->parent != NULL); 00922 00923 ProcessInvalidationMessages(&myInfo->PriorCmdInvalidMsgs, 00924 LocalExecuteInvalidationMessage); 00925 00926 /* Pop the transaction state stack */ 00927 transInvalInfo = myInfo->parent; 00928 00929 /* Need not free anything else explicitly */ 00930 pfree(myInfo); 00931 } 00932 } 00933 00934 /* 00935 * CommandEndInvalidationMessages 00936 * Process queued-up invalidation messages at end of one command 00937 * in a transaction. 00938 * 00939 * Here, we send no messages to the shared queue, since we don't know yet if 00940 * we will commit. We do need to locally process the CurrentCmdInvalidMsgs 00941 * list, so as to flush our caches of any entries we have outdated in the 00942 * current command. We then move the current-cmd list over to become part 00943 * of the prior-cmds list. 00944 * 00945 * Note: 00946 * This should be called during CommandCounterIncrement(), 00947 * after we have advanced the command ID. 00948 */ 00949 void 00950 CommandEndInvalidationMessages(void) 00951 { 00952 /* 00953 * You might think this shouldn't be called outside any transaction, but 00954 * bootstrap does it, and also ABORT issued when not in a transaction. So 00955 * just quietly return if no state to work on. 00956 */ 00957 if (transInvalInfo == NULL) 00958 return; 00959 00960 ProcessInvalidationMessages(&transInvalInfo->CurrentCmdInvalidMsgs, 00961 LocalExecuteInvalidationMessage); 00962 AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs, 00963 &transInvalInfo->CurrentCmdInvalidMsgs); 00964 } 00965 00966 00967 /* 00968 * CacheInvalidateHeapTuple 00969 * Register the given tuple for invalidation at end of command 00970 * (ie, current command is creating or outdating this tuple). 00971 * Also, detect whether a relcache invalidation is implied. 00972 * 00973 * For an insert or delete, tuple is the target tuple and newtuple is NULL. 00974 * For an update, we are called just once, with tuple being the old tuple 00975 * version and newtuple the new version. This allows avoidance of duplicate 00976 * effort during an update. 00977 */ 00978 void 00979 CacheInvalidateHeapTuple(Relation relation, 00980 HeapTuple tuple, 00981 HeapTuple newtuple) 00982 { 00983 Oid tupleRelId; 00984 Oid databaseId; 00985 Oid relationId; 00986 00987 /* Do nothing during bootstrap */ 00988 if (IsBootstrapProcessingMode()) 00989 return; 00990 00991 /* 00992 * We only need to worry about invalidation for tuples that are in system 00993 * relations; user-relation tuples are never in catcaches and can't affect 00994 * the relcache either. 00995 */ 00996 if (!IsSystemRelation(relation)) 00997 return; 00998 00999 /* 01000 * TOAST tuples can likewise be ignored here. Note that TOAST tables are 01001 * considered system relations so they are not filtered by the above test. 01002 */ 01003 if (IsToastRelation(relation)) 01004 return; 01005 01006 /* 01007 * First let the catcache do its thing 01008 */ 01009 PrepareToInvalidateCacheTuple(relation, tuple, newtuple, 01010 RegisterCatcacheInvalidation); 01011 01012 /* 01013 * Now, is this tuple one of the primary definers of a relcache entry? 01014 * 01015 * Note we ignore newtuple here; we assume an update cannot move a tuple 01016 * from being part of one relcache entry to being part of another. 01017 */ 01018 tupleRelId = RelationGetRelid(relation); 01019 01020 if (tupleRelId == RelationRelationId) 01021 { 01022 Form_pg_class classtup = (Form_pg_class) GETSTRUCT(tuple); 01023 01024 relationId = HeapTupleGetOid(tuple); 01025 if (classtup->relisshared) 01026 databaseId = InvalidOid; 01027 else 01028 databaseId = MyDatabaseId; 01029 } 01030 else if (tupleRelId == AttributeRelationId) 01031 { 01032 Form_pg_attribute atttup = (Form_pg_attribute) GETSTRUCT(tuple); 01033 01034 relationId = atttup->attrelid; 01035 01036 /* 01037 * KLUGE ALERT: we always send the relcache event with MyDatabaseId, 01038 * even if the rel in question is shared (which we can't easily tell). 01039 * This essentially means that only backends in this same database 01040 * will react to the relcache flush request. This is in fact 01041 * appropriate, since only those backends could see our pg_attribute 01042 * change anyway. It looks a bit ugly though. (In practice, shared 01043 * relations can't have schema changes after bootstrap, so we should 01044 * never come here for a shared rel anyway.) 01045 */ 01046 databaseId = MyDatabaseId; 01047 } 01048 else if (tupleRelId == IndexRelationId) 01049 { 01050 Form_pg_index indextup = (Form_pg_index) GETSTRUCT(tuple); 01051 01052 /* 01053 * When a pg_index row is updated, we should send out a relcache inval 01054 * for the index relation. As above, we don't know the shared status 01055 * of the index, but in practice it doesn't matter since indexes of 01056 * shared catalogs can't have such updates. 01057 */ 01058 relationId = indextup->indexrelid; 01059 databaseId = MyDatabaseId; 01060 } 01061 else 01062 return; 01063 01064 /* 01065 * Yes. We need to register a relcache invalidation event. 01066 */ 01067 RegisterRelcacheInvalidation(databaseId, relationId); 01068 } 01069 01070 /* 01071 * CacheInvalidateCatalog 01072 * Register invalidation of the whole content of a system catalog. 01073 * 01074 * This is normally used in VACUUM FULL/CLUSTER, where we haven't so much 01075 * changed any tuples as moved them around. Some uses of catcache entries 01076 * expect their TIDs to be correct, so we have to blow away the entries. 01077 * 01078 * Note: we expect caller to verify that the rel actually is a system 01079 * catalog. If it isn't, no great harm is done, just a wasted sinval message. 01080 */ 01081 void 01082 CacheInvalidateCatalog(Oid catalogId) 01083 { 01084 Oid databaseId; 01085 01086 if (IsSharedRelation(catalogId)) 01087 databaseId = InvalidOid; 01088 else 01089 databaseId = MyDatabaseId; 01090 01091 RegisterCatalogInvalidation(databaseId, catalogId); 01092 } 01093 01094 /* 01095 * CacheInvalidateRelcache 01096 * Register invalidation of the specified relation's relcache entry 01097 * at end of command. 01098 * 01099 * This is used in places that need to force relcache rebuild but aren't 01100 * changing any of the tuples recognized as contributors to the relcache 01101 * entry by CacheInvalidateHeapTuple. (An example is dropping an index.) 01102 */ 01103 void 01104 CacheInvalidateRelcache(Relation relation) 01105 { 01106 Oid databaseId; 01107 Oid relationId; 01108 01109 relationId = RelationGetRelid(relation); 01110 if (relation->rd_rel->relisshared) 01111 databaseId = InvalidOid; 01112 else 01113 databaseId = MyDatabaseId; 01114 01115 RegisterRelcacheInvalidation(databaseId, relationId); 01116 } 01117 01118 /* 01119 * CacheInvalidateRelcacheByTuple 01120 * As above, but relation is identified by passing its pg_class tuple. 01121 */ 01122 void 01123 CacheInvalidateRelcacheByTuple(HeapTuple classTuple) 01124 { 01125 Form_pg_class classtup = (Form_pg_class) GETSTRUCT(classTuple); 01126 Oid databaseId; 01127 Oid relationId; 01128 01129 relationId = HeapTupleGetOid(classTuple); 01130 if (classtup->relisshared) 01131 databaseId = InvalidOid; 01132 else 01133 databaseId = MyDatabaseId; 01134 RegisterRelcacheInvalidation(databaseId, relationId); 01135 } 01136 01137 /* 01138 * CacheInvalidateRelcacheByRelid 01139 * As above, but relation is identified by passing its OID. 01140 * This is the least efficient of the three options; use one of 01141 * the above routines if you have a Relation or pg_class tuple. 01142 */ 01143 void 01144 CacheInvalidateRelcacheByRelid(Oid relid) 01145 { 01146 HeapTuple tup; 01147 01148 tup = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); 01149 if (!HeapTupleIsValid(tup)) 01150 elog(ERROR, "cache lookup failed for relation %u", relid); 01151 CacheInvalidateRelcacheByTuple(tup); 01152 ReleaseSysCache(tup); 01153 } 01154 01155 01156 /* 01157 * CacheInvalidateSmgr 01158 * Register invalidation of smgr references to a physical relation. 01159 * 01160 * Sending this type of invalidation msg forces other backends to close open 01161 * smgr entries for the rel. This should be done to flush dangling open-file 01162 * references when the physical rel is being dropped or truncated. Because 01163 * these are nontransactional (i.e., not-rollback-able) operations, we just 01164 * send the inval message immediately without any queuing. 01165 * 01166 * Note: in most cases there will have been a relcache flush issued against 01167 * the rel at the logical level. We need a separate smgr-level flush because 01168 * it is possible for backends to have open smgr entries for rels they don't 01169 * have a relcache entry for, e.g. because the only thing they ever did with 01170 * the rel is write out dirty shared buffers. 01171 * 01172 * Note: because these messages are nontransactional, they won't be captured 01173 * in commit/abort WAL entries. Instead, calls to CacheInvalidateSmgr() 01174 * should happen in low-level smgr.c routines, which are executed while 01175 * replaying WAL as well as when creating it. 01176 * 01177 * Note: In order to avoid bloating SharedInvalidationMessage, we store only 01178 * three bytes of the backend ID using what would otherwise be padding space. 01179 * Thus, the maximum possible backend ID is 2^23-1. 01180 */ 01181 void 01182 CacheInvalidateSmgr(RelFileNodeBackend rnode) 01183 { 01184 SharedInvalidationMessage msg; 01185 01186 msg.sm.id = SHAREDINVALSMGR_ID; 01187 msg.sm.backend_hi = rnode.backend >> 16; 01188 msg.sm.backend_lo = rnode.backend & 0xffff; 01189 msg.sm.rnode = rnode.node; 01190 SendSharedInvalidMessages(&msg, 1); 01191 } 01192 01193 /* 01194 * CacheInvalidateRelmap 01195 * Register invalidation of the relation mapping for a database, 01196 * or for the shared catalogs if databaseId is zero. 01197 * 01198 * Sending this type of invalidation msg forces other backends to re-read 01199 * the indicated relation mapping file. It is also necessary to send a 01200 * relcache inval for the specific relations whose mapping has been altered, 01201 * else the relcache won't get updated with the new filenode data. 01202 * 01203 * Note: because these messages are nontransactional, they won't be captured 01204 * in commit/abort WAL entries. Instead, calls to CacheInvalidateRelmap() 01205 * should happen in low-level relmapper.c routines, which are executed while 01206 * replaying WAL as well as when creating it. 01207 */ 01208 void 01209 CacheInvalidateRelmap(Oid databaseId) 01210 { 01211 SharedInvalidationMessage msg; 01212 01213 msg.rm.id = SHAREDINVALRELMAP_ID; 01214 msg.rm.dbId = databaseId; 01215 SendSharedInvalidMessages(&msg, 1); 01216 } 01217 01218 01219 /* 01220 * CacheRegisterSyscacheCallback 01221 * Register the specified function to be called for all future 01222 * invalidation events in the specified cache. The cache ID and the 01223 * hash value of the tuple being invalidated will be passed to the 01224 * function. 01225 * 01226 * NOTE: Hash value zero will be passed if a cache reset request is received. 01227 * In this case the called routines should flush all cached state. 01228 * Yes, there's a possibility of a false match to zero, but it doesn't seem 01229 * worth troubling over, especially since most of the current callees just 01230 * flush all cached state anyway. 01231 */ 01232 void 01233 CacheRegisterSyscacheCallback(int cacheid, 01234 SyscacheCallbackFunction func, 01235 Datum arg) 01236 { 01237 if (syscache_callback_count >= MAX_SYSCACHE_CALLBACKS) 01238 elog(FATAL, "out of syscache_callback_list slots"); 01239 01240 syscache_callback_list[syscache_callback_count].id = cacheid; 01241 syscache_callback_list[syscache_callback_count].function = func; 01242 syscache_callback_list[syscache_callback_count].arg = arg; 01243 01244 ++syscache_callback_count; 01245 } 01246 01247 /* 01248 * CacheRegisterRelcacheCallback 01249 * Register the specified function to be called for all future 01250 * relcache invalidation events. The OID of the relation being 01251 * invalidated will be passed to the function. 01252 * 01253 * NOTE: InvalidOid will be passed if a cache reset request is received. 01254 * In this case the called routines should flush all cached state. 01255 */ 01256 void 01257 CacheRegisterRelcacheCallback(RelcacheCallbackFunction func, 01258 Datum arg) 01259 { 01260 if (relcache_callback_count >= MAX_RELCACHE_CALLBACKS) 01261 elog(FATAL, "out of relcache_callback_list slots"); 01262 01263 relcache_callback_list[relcache_callback_count].function = func; 01264 relcache_callback_list[relcache_callback_count].arg = arg; 01265 01266 ++relcache_callback_count; 01267 } 01268 01269 /* 01270 * CallSyscacheCallbacks 01271 * 01272 * This is exported so that CatalogCacheFlushCatalog can call it, saving 01273 * this module from knowing which catcache IDs correspond to which catalogs. 01274 */ 01275 void 01276 CallSyscacheCallbacks(int cacheid, uint32 hashvalue) 01277 { 01278 int i; 01279 01280 for (i = 0; i < syscache_callback_count; i++) 01281 { 01282 struct SYSCACHECALLBACK *ccitem = syscache_callback_list + i; 01283 01284 if (ccitem->id == cacheid) 01285 (*ccitem->function) (ccitem->arg, cacheid, hashvalue); 01286 } 01287 }