#include "postgres.h"
#include <math.h>
#include "access/genam.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/htup_details.h"
#include "access/multixact.h"
#include "access/transam.h"
#include "access/visibilitymap.h"
#include "catalog/storage.h"
#include "commands/dbcommands.h"
#include "commands/vacuum.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "portability/instr_time.h"
#include "postmaster/autovacuum.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/pg_rusage.h"
#include "utils/timestamp.h"
#include "utils/tqual.h"
Go to the source code of this file.
#define LAZY_ALLOC_TUPLES MaxHeapTuplesPerPage |
Definition at line 90 of file vacuumlazy.c.
Referenced by lazy_space_alloc().
#define REL_TRUNCATE_FRACTION 16 |
Definition at line 72 of file vacuumlazy.c.
Referenced by lazy_vacuum_rel().
#define REL_TRUNCATE_MINIMUM 1000 |
Definition at line 71 of file vacuumlazy.c.
Referenced by lazy_vacuum_rel().
#define SKIP_PAGES_THRESHOLD ((BlockNumber) 32) |
Definition at line 96 of file vacuumlazy.c.
Referenced by lazy_scan_heap().
#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL 20 |
Definition at line 81 of file vacuumlazy.c.
Referenced by count_nondeletable_pages().
#define VACUUM_TRUNCATE_LOCK_TIMEOUT 5000 |
Definition at line 83 of file vacuumlazy.c.
Referenced by lazy_truncate_heap().
#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL 50 |
Definition at line 82 of file vacuumlazy.c.
Referenced by lazy_truncate_heap().
typedef struct LVRelStats LVRelStats |
static BlockNumber count_nondeletable_pages | ( | Relation | onerel, | |
LVRelStats * | vacrelstats | |||
) | [static] |
Definition at line 1444 of file vacuumlazy.c.
References AccessExclusiveLock, buf, BUFFER_LOCK_SHARE, BufferGetPage, CHECK_FOR_INTERRUPTS, elevel, ereport, errmsg(), FirstOffsetNumber, INSTR_TIME_GET_MICROSEC, INSTR_TIME_SET_CURRENT, INSTR_TIME_SUBTRACT, ItemIdIsUsed, LVRelStats::lock_waiter_detected, LockBuffer(), LockHasWaitersRelation(), MAIN_FORKNUM, LVRelStats::nonempty_pages, OffsetNumberNext, PageGetItemId, PageGetMaxOffsetNumber, PageIsEmpty, PageIsNew, RBM_NORMAL, ReadBufferExtended(), LVRelStats::rel_pages, RelationGetRelationName, UnlockReleaseBuffer(), and VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL.
Referenced by lazy_truncate_heap().
{ BlockNumber blkno; instr_time starttime; /* Initialize the starttime if we check for conflicting lock requests */ INSTR_TIME_SET_CURRENT(starttime); /* Strange coding of loop control is needed because blkno is unsigned */ blkno = vacrelstats->rel_pages; while (blkno > vacrelstats->nonempty_pages) { Buffer buf; Page page; OffsetNumber offnum, maxoff; bool hastup; /* * Check if another process requests a lock on our relation. We are * holding an AccessExclusiveLock here, so they will be waiting. We * only do this once per VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL, and we * only check if that interval has elapsed once every 32 blocks to * keep the number of system calls and actual shared lock table * lookups to a minimum. */ if ((blkno % 32) == 0) { instr_time currenttime; instr_time elapsed; INSTR_TIME_SET_CURRENT(currenttime); elapsed = currenttime; INSTR_TIME_SUBTRACT(elapsed, starttime); if ((INSTR_TIME_GET_MICROSEC(elapsed) / 1000) >= VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL) { if (LockHasWaitersRelation(onerel, AccessExclusiveLock)) { ereport(elevel, (errmsg("\"%s\": suspending truncate due to conflicting lock request", RelationGetRelationName(onerel)))); vacrelstats->lock_waiter_detected = true; return blkno; } starttime = currenttime; } } /* * We don't insert a vacuum delay point here, because we have an * exclusive lock on the table which we want to hold for as short a * time as possible. We still need to check for interrupts however. */ CHECK_FOR_INTERRUPTS(); blkno--; buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno, RBM_NORMAL, vac_strategy); /* In this phase we only need shared access to the buffer */ LockBuffer(buf, BUFFER_LOCK_SHARE); page = BufferGetPage(buf); if (PageIsNew(page) || PageIsEmpty(page)) { /* PageIsNew probably shouldn't happen... */ UnlockReleaseBuffer(buf); continue; } hastup = false; maxoff = PageGetMaxOffsetNumber(page); for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { ItemId itemid; itemid = PageGetItemId(page, offnum); /* * Note: any non-unused item should be taken as a reason to keep * this page. We formerly thought that DEAD tuples could be * thrown away, but that's not so, because we'd not have cleaned * out their index entries. */ if (ItemIdIsUsed(itemid)) { hastup = true; break; /* can stop scanning */ } } /* scan along page */ UnlockReleaseBuffer(buf); /* Done scanning if we found a tuple here */ if (hastup) return blkno + 1; } /* * If we fall out of the loop, all the previously-thought-to-be-empty * pages still are; we need not bother to look at the last known-nonempty * page. */ return vacrelstats->nonempty_pages; }
static bool heap_page_is_all_visible | ( | Buffer | buf, | |
TransactionId * | visibility_cutoff_xid | |||
) | [static] |
Definition at line 1667 of file vacuumlazy.c.
References Assert, BufferGetBlockNumber(), BufferGetPage, elog, ERROR, FirstOffsetNumber, HEAP_XMIN_COMMITTED, HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetXmin, HeapTupleSatisfiesVacuum(), ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet, OffsetNumberNext, OldestXmin, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, HeapTupleData::t_data, HeapTupleHeaderData::t_infomask, HeapTupleData::t_self, TransactionIdFollows(), and TransactionIdPrecedes().
Referenced by lazy_vacuum_page().
{ Page page = BufferGetPage(buf); OffsetNumber offnum, maxoff; bool all_visible = true; *visibility_cutoff_xid = InvalidTransactionId; /* * This is a stripped down version of the line pointer scan in * lazy_scan_heap(). So if you change anything here, also check that * code. */ maxoff = PageGetMaxOffsetNumber(page); for (offnum = FirstOffsetNumber; offnum <= maxoff && all_visible; offnum = OffsetNumberNext(offnum)) { ItemId itemid; HeapTupleData tuple; itemid = PageGetItemId(page, offnum); /* Unused or redirect line pointers are of no interest */ if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid)) continue; ItemPointerSet(&(tuple.t_self), BufferGetBlockNumber(buf), offnum); /* * Dead line pointers can have index pointers pointing to them. So they * can't be treated as visible */ if (ItemIdIsDead(itemid)) { all_visible = false; break; } Assert(ItemIdIsNormal(itemid)); tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid); switch (HeapTupleSatisfiesVacuum(tuple.t_data, OldestXmin, buf)) { case HEAPTUPLE_LIVE: { TransactionId xmin; /* Check comments in lazy_scan_heap. */ if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED)) { all_visible = false; break; } /* * The inserter definitely committed. But is it old * enough that everyone sees it as committed? */ xmin = HeapTupleHeaderGetXmin(tuple.t_data); if (!TransactionIdPrecedes(xmin, OldestXmin)) { all_visible = false; break; } /* Track newest xmin on page. */ if (TransactionIdFollows(xmin, *visibility_cutoff_xid)) *visibility_cutoff_xid = xmin; } break; case HEAPTUPLE_DEAD: case HEAPTUPLE_RECENTLY_DEAD: case HEAPTUPLE_INSERT_IN_PROGRESS: case HEAPTUPLE_DELETE_IN_PROGRESS: all_visible = false; break; default: elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result"); break; } } /* scan along page */ return all_visible; }
Definition at line 1191 of file vacuumlazy.c.
References BufferGetPage, FirstOffsetNumber, FreezeLimit, heap_tuple_needs_freeze(), ItemIdIsNormal, MultiXactFrzLimit, OffsetNumberNext, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageIsEmpty, and PageIsNew.
Referenced by lazy_scan_heap().
{ Page page; OffsetNumber offnum, maxoff; HeapTupleHeader tupleheader; page = BufferGetPage(buf); if (PageIsNew(page) || PageIsEmpty(page)) { /* PageIsNew probably shouldn't happen... */ return false; } maxoff = PageGetMaxOffsetNumber(page); for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { ItemId itemid; itemid = PageGetItemId(page, offnum); if (!ItemIdIsNormal(itemid)) continue; tupleheader = (HeapTupleHeader) PageGetItem(page, itemid); if (heap_tuple_needs_freeze(tupleheader, FreezeLimit, MultiXactFrzLimit, buf)) return true; } /* scan along page */ return false; }
static void lazy_cleanup_index | ( | Relation | indrel, | |
IndexBulkDeleteResult * | stats, | |||
LVRelStats * | vacrelstats | |||
) | [static] |
Definition at line 1267 of file vacuumlazy.c.
References IndexVacuumInfo::analyze_only, elevel, ereport, errdetail(), errmsg(), IndexBulkDeleteResult::estimated_count, IndexVacuumInfo::estimated_count, IndexVacuumInfo::index, index_vacuum_cleanup(), InvalidMultiXactId, InvalidTransactionId, IndexVacuumInfo::message_level, LVRelStats::new_rel_tuples, IndexVacuumInfo::num_heap_tuples, IndexBulkDeleteResult::num_index_tuples, IndexBulkDeleteResult::num_pages, IndexBulkDeleteResult::pages_deleted, IndexBulkDeleteResult::pages_free, pfree(), pg_rusage_init(), pg_rusage_show(), LVRelStats::rel_pages, RelationGetRelationName, LVRelStats::scanned_pages, IndexVacuumInfo::strategy, IndexBulkDeleteResult::tuples_removed, and vac_update_relstats().
Referenced by lazy_scan_heap().
{ IndexVacuumInfo ivinfo; PGRUsage ru0; pg_rusage_init(&ru0); ivinfo.index = indrel; ivinfo.analyze_only = false; ivinfo.estimated_count = (vacrelstats->scanned_pages < vacrelstats->rel_pages); ivinfo.message_level = elevel; ivinfo.num_heap_tuples = vacrelstats->new_rel_tuples; ivinfo.strategy = vac_strategy; stats = index_vacuum_cleanup(&ivinfo, stats); if (!stats) return; /* * Now update statistics in pg_class, but only if the index says the count * is accurate. */ if (!stats->estimated_count) vac_update_relstats(indrel, stats->num_pages, stats->num_index_tuples, 0, false, InvalidTransactionId, InvalidMultiXactId); ereport(elevel, (errmsg("index \"%s\" now contains %.0f row versions in %u pages", RelationGetRelationName(indrel), stats->num_index_tuples, stats->num_pages), errdetail("%.0f index row versions were removed.\n" "%u index pages have been deleted, %u are currently reusable.\n" "%s.", stats->tuples_removed, stats->pages_deleted, stats->pages_free, pg_rusage_show(&ru0)))); pfree(stats); }
static void lazy_record_dead_tuple | ( | LVRelStats * | vacrelstats, | |
ItemPointer | itemptr | |||
) | [static] |
Definition at line 1594 of file vacuumlazy.c.
References LVRelStats::dead_tuples, LVRelStats::max_dead_tuples, and LVRelStats::num_dead_tuples.
Referenced by lazy_scan_heap().
{ /* * The array shouldn't overflow under normal behavior, but perhaps it * could if we are given a really small maintenance_work_mem. In that * case, just forget the last few tuples (we'll get 'em next time). */ if (vacrelstats->num_dead_tuples < vacrelstats->max_dead_tuples) { vacrelstats->dead_tuples[vacrelstats->num_dead_tuples] = *itemptr; vacrelstats->num_dead_tuples++; } }
static void lazy_scan_heap | ( | Relation | onerel, | |
LVRelStats * | vacrelstats, | |||
Relation * | Irel, | |||
int | nindexes, | |||
bool | scan_all | |||
) | [static] |
Definition at line 393 of file vacuumlazy.c.
References Assert, buf, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferGetPageSize, BufferIsValid, ConditionalLockBufferForCleanup(), elevel, elog, ereport, errdetail(), errmsg(), ERROR, ExclusiveLock, FirstOffsetNumber, FreezeLimit, get_namespace_name(), heap_freeze_tuple(), heap_page_prune(), HEAP_XMIN_COMMITTED, HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleGetOid, HeapTupleHeaderAdvanceLatestRemovedXid(), HeapTupleHeaderGetXmin, HeapTupleIsHeapOnly, HeapTupleIsHotUpdated, HeapTupleSatisfiesVacuum(), i, InvalidTransactionId, InvalidXLogRecPtr, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet, LVRelStats::latestRemovedXid, lazy_check_needs_freeze(), lazy_cleanup_index(), lazy_record_dead_tuple(), lazy_space_alloc(), lazy_vacuum_heap(), lazy_vacuum_index(), lazy_vacuum_page(), LockBuffer(), LockBufferForCleanup(), LockRelationForExtension(), log_heap_freeze(), MAIN_FORKNUM, MarkBufferDirty(), LVRelStats::max_dead_tuples, MaxHeapTuplesPerPage, MultiXactFrzLimit, LVRelStats::new_rel_tuples, LVRelStats::nonempty_pages, LVRelStats::num_dead_tuples, LVRelStats::num_index_scans, OffsetNumberNext, OidIsValid, OldestXmin, PageClearAllVisible, PageGetHeapFreeSpace(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageInit(), PageIsAllVisible, PageIsEmpty, PageIsNew, PageSetAllVisible, PageSetLSN, palloc0(), pg_rusage_init(), pg_rusage_show(), RBM_NORMAL, RelationData::rd_rel, ReadBufferExtended(), RecordPageWithFreeSpace(), LVRelStats::rel_pages, RelationGetNamespace, RelationGetNumberOfBlocks, RelationGetRelationName, RelationNeedsWAL, ReleaseBuffer(), LVRelStats::scanned_pages, LVRelStats::scanned_tuples, SKIP_PAGES_THRESHOLD, HeapTupleData::t_data, HeapTupleHeaderData::t_infomask, HeapTupleData::t_len, HeapTupleData::t_self, TransactionIdFollows(), TransactionIdPrecedes(), LVRelStats::tuples_deleted, UnlockRelationForExtension(), UnlockReleaseBuffer(), vac_estimate_reltuples(), vacuum_delay_point(), vacuum_log_cleanup_info(), visibilitymap_clear(), visibilitymap_pin(), visibilitymap_set(), visibilitymap_test(), and WARNING.
Referenced by lazy_vacuum_rel().
{ BlockNumber nblocks, blkno; HeapTupleData tuple; char *relname; BlockNumber empty_pages, vacuumed_pages; double num_tuples, tups_vacuumed, nkeep, nunused; IndexBulkDeleteResult **indstats; int i; PGRUsage ru0; Buffer vmbuffer = InvalidBuffer; BlockNumber next_not_all_visible_block; bool skipping_all_visible_blocks; pg_rusage_init(&ru0); relname = RelationGetRelationName(onerel); ereport(elevel, (errmsg("vacuuming \"%s.%s\"", get_namespace_name(RelationGetNamespace(onerel)), relname))); empty_pages = vacuumed_pages = 0; num_tuples = tups_vacuumed = nkeep = nunused = 0; indstats = (IndexBulkDeleteResult **) palloc0(nindexes * sizeof(IndexBulkDeleteResult *)); nblocks = RelationGetNumberOfBlocks(onerel); vacrelstats->rel_pages = nblocks; vacrelstats->scanned_pages = 0; vacrelstats->nonempty_pages = 0; vacrelstats->latestRemovedXid = InvalidTransactionId; lazy_space_alloc(vacrelstats, nblocks); /* * We want to skip pages that don't require vacuuming according to the * visibility map, but only when we can skip at least SKIP_PAGES_THRESHOLD * consecutive pages. Since we're reading sequentially, the OS should be * doing readahead for us, so there's no gain in skipping a page now and * then; that's likely to disable readahead and so be counterproductive. * Also, skipping even a single page means that we can't update * relfrozenxid, so we only want to do it if we can skip a goodly number * of pages. * * Before entering the main loop, establish the invariant that * next_not_all_visible_block is the next block number >= blkno that's not * all-visible according to the visibility map, or nblocks if there's no * such block. Also, we set up the skipping_all_visible_blocks flag, * which is needed because we need hysteresis in the decision: once we've * started skipping blocks, we may as well skip everything up to the next * not-all-visible block. * * Note: if scan_all is true, we won't actually skip any pages; but we * maintain next_not_all_visible_block anyway, so as to set up the * all_visible_according_to_vm flag correctly for each page. * * Note: The value returned by visibilitymap_test could be slightly * out-of-date, since we make this test before reading the corresponding * heap page or locking the buffer. This is OK. If we mistakenly think * that the page is all-visible when in fact the flag's just been cleared, * we might fail to vacuum the page. But it's OK to skip pages when * scan_all is not set, so no great harm done; the next vacuum will find * them. If we make the reverse mistake and vacuum a page unnecessarily, * it'll just be a no-op. */ for (next_not_all_visible_block = 0; next_not_all_visible_block < nblocks; next_not_all_visible_block++) { if (!visibilitymap_test(onerel, next_not_all_visible_block, &vmbuffer)) break; vacuum_delay_point(); } if (next_not_all_visible_block >= SKIP_PAGES_THRESHOLD) skipping_all_visible_blocks = true; else skipping_all_visible_blocks = false; for (blkno = 0; blkno < nblocks; blkno++) { Buffer buf; Page page; OffsetNumber offnum, maxoff; bool tupgone, hastup; int prev_dead_count; OffsetNumber frozen[MaxOffsetNumber]; int nfrozen; Size freespace; bool all_visible_according_to_vm; bool all_visible; bool has_dead_tuples; TransactionId visibility_cutoff_xid = InvalidTransactionId; if (blkno == next_not_all_visible_block) { /* Time to advance next_not_all_visible_block */ for (next_not_all_visible_block++; next_not_all_visible_block < nblocks; next_not_all_visible_block++) { if (!visibilitymap_test(onerel, next_not_all_visible_block, &vmbuffer)) break; vacuum_delay_point(); } /* * We know we can't skip the current block. But set up * skipping_all_visible_blocks to do the right thing at the * following blocks. */ if (next_not_all_visible_block - blkno > SKIP_PAGES_THRESHOLD) skipping_all_visible_blocks = true; else skipping_all_visible_blocks = false; all_visible_according_to_vm = false; } else { /* Current block is all-visible */ if (skipping_all_visible_blocks && !scan_all) continue; all_visible_according_to_vm = true; } vacuum_delay_point(); /* * If we are close to overrunning the available space for dead-tuple * TIDs, pause and do a cycle of vacuuming before we tackle this page. */ if ((vacrelstats->max_dead_tuples - vacrelstats->num_dead_tuples) < MaxHeapTuplesPerPage && vacrelstats->num_dead_tuples > 0) { /* * Before beginning index vacuuming, we release any pin we may * hold on the visibility map page. This isn't necessary for * correctness, but we do it anyway to avoid holding the pin * across a lengthy, unrelated operation. */ if (BufferIsValid(vmbuffer)) { ReleaseBuffer(vmbuffer); vmbuffer = InvalidBuffer; } /* Log cleanup info before we touch indexes */ vacuum_log_cleanup_info(onerel, vacrelstats); /* Remove index entries */ for (i = 0; i < nindexes; i++) lazy_vacuum_index(Irel[i], &indstats[i], vacrelstats); /* Remove tuples from heap */ lazy_vacuum_heap(onerel, vacrelstats); /* * Forget the now-vacuumed tuples, and press on, but be careful * not to reset latestRemovedXid since we want that value to be * valid. */ vacrelstats->num_dead_tuples = 0; vacrelstats->num_index_scans++; } /* * Pin the visibility map page in case we need to mark the page * all-visible. In most cases this will be very cheap, because we'll * already have the correct page pinned anyway. However, it's * possible that (a) next_not_all_visible_block is covered by a * different VM page than the current block or (b) we released our pin * and did a cycle of index vacuuming. */ visibilitymap_pin(onerel, blkno, &vmbuffer); buf = ReadBufferExtended(onerel, MAIN_FORKNUM, blkno, RBM_NORMAL, vac_strategy); /* We need buffer cleanup lock so that we can prune HOT chains. */ if (!ConditionalLockBufferForCleanup(buf)) { /* * If we're not scanning the whole relation to guard against XID * wraparound, it's OK to skip vacuuming a page. The next vacuum * will clean it up. */ if (!scan_all) { ReleaseBuffer(buf); continue; } /* * If this is a wraparound checking vacuum, then we read the page * with share lock to see if any xids need to be frozen. If the * page doesn't need attention we just skip and continue. If it * does, we wait for cleanup lock. * * We could defer the lock request further by remembering the page * and coming back to it later, or we could even register * ourselves for multiple buffers and then service whichever one * is received first. For now, this seems good enough. */ LockBuffer(buf, BUFFER_LOCK_SHARE); if (!lazy_check_needs_freeze(buf)) { UnlockReleaseBuffer(buf); continue; } LockBuffer(buf, BUFFER_LOCK_UNLOCK); LockBufferForCleanup(buf); /* drop through to normal processing */ } vacrelstats->scanned_pages++; page = BufferGetPage(buf); if (PageIsNew(page)) { /* * An all-zeroes page could be left over if a backend extends the * relation but crashes before initializing the page. Reclaim such * pages for use. * * We have to be careful here because we could be looking at a * page that someone has just added to the relation and not yet * been able to initialize (see RelationGetBufferForTuple). To * protect against that, release the buffer lock, grab the * relation extension lock momentarily, and re-lock the buffer. If * the page is still uninitialized by then, it must be left over * from a crashed backend, and we can initialize it. * * We don't really need the relation lock when this is a new or * temp relation, but it's probably not worth the code space to * check that, since this surely isn't a critical path. * * Note: the comparable code in vacuum.c need not worry because * it's got exclusive lock on the whole relation. */ LockBuffer(buf, BUFFER_LOCK_UNLOCK); LockRelationForExtension(onerel, ExclusiveLock); UnlockRelationForExtension(onerel, ExclusiveLock); LockBufferForCleanup(buf); if (PageIsNew(page)) { ereport(WARNING, (errmsg("relation \"%s\" page %u is uninitialized --- fixing", relname, blkno))); PageInit(page, BufferGetPageSize(buf), 0); empty_pages++; } freespace = PageGetHeapFreeSpace(page); MarkBufferDirty(buf); UnlockReleaseBuffer(buf); RecordPageWithFreeSpace(onerel, blkno, freespace); continue; } if (PageIsEmpty(page)) { empty_pages++; freespace = PageGetHeapFreeSpace(page); /* empty pages are always all-visible */ if (!PageIsAllVisible(page)) { PageSetAllVisible(page); MarkBufferDirty(buf); visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr, vmbuffer, InvalidTransactionId); } UnlockReleaseBuffer(buf); RecordPageWithFreeSpace(onerel, blkno, freespace); continue; } /* * Prune all HOT-update chains in this page. * * We count tuples removed by the pruning step as removed by VACUUM. */ tups_vacuumed += heap_page_prune(onerel, buf, OldestXmin, false, &vacrelstats->latestRemovedXid); /* * Now scan the page to collect vacuumable items and check for tuples * requiring freezing. */ all_visible = true; has_dead_tuples = false; nfrozen = 0; hastup = false; prev_dead_count = vacrelstats->num_dead_tuples; maxoff = PageGetMaxOffsetNumber(page); /* * Note: If you change anything in the loop below, also look at * heap_page_is_all_visible to see if that needs to be changed. */ for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { ItemId itemid; itemid = PageGetItemId(page, offnum); /* Unused items require no processing, but we count 'em */ if (!ItemIdIsUsed(itemid)) { nunused += 1; continue; } /* Redirect items mustn't be touched */ if (ItemIdIsRedirected(itemid)) { hastup = true; /* this page won't be truncatable */ continue; } ItemPointerSet(&(tuple.t_self), blkno, offnum); /* * DEAD item pointers are to be vacuumed normally; but we don't * count them in tups_vacuumed, else we'd be double-counting (at * least in the common case where heap_page_prune() just freed up * a non-HOT tuple). */ if (ItemIdIsDead(itemid)) { lazy_record_dead_tuple(vacrelstats, &(tuple.t_self)); all_visible = false; continue; } Assert(ItemIdIsNormal(itemid)); tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid); tuple.t_len = ItemIdGetLength(itemid); tupgone = false; switch (HeapTupleSatisfiesVacuum(tuple.t_data, OldestXmin, buf)) { case HEAPTUPLE_DEAD: /* * Ordinarily, DEAD tuples would have been removed by * heap_page_prune(), but it's possible that the tuple * state changed since heap_page_prune() looked. In * particular an INSERT_IN_PROGRESS tuple could have * changed to DEAD if the inserter aborted. So this * cannot be considered an error condition. * * If the tuple is HOT-updated then it must only be * removed by a prune operation; so we keep it just as if * it were RECENTLY_DEAD. Also, if it's a heap-only * tuple, we choose to keep it, because it'll be a lot * cheaper to get rid of it in the next pruning pass than * to treat it like an indexed tuple. */ if (HeapTupleIsHotUpdated(&tuple) || HeapTupleIsHeapOnly(&tuple)) nkeep += 1; else tupgone = true; /* we can delete the tuple */ all_visible = false; break; case HEAPTUPLE_LIVE: /* Tuple is good --- but let's do some validity checks */ if (onerel->rd_rel->relhasoids && !OidIsValid(HeapTupleGetOid(&tuple))) elog(WARNING, "relation \"%s\" TID %u/%u: OID is invalid", relname, blkno, offnum); /* * Is the tuple definitely visible to all transactions? * * NB: Like with per-tuple hint bits, we can't set the * PD_ALL_VISIBLE flag if the inserter committed * asynchronously. See SetHintBits for more info. Check * that the HEAP_XMIN_COMMITTED hint bit is set because of * that. */ if (all_visible) { TransactionId xmin; if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED)) { all_visible = false; break; } /* * The inserter definitely committed. But is it old * enough that everyone sees it as committed? */ xmin = HeapTupleHeaderGetXmin(tuple.t_data); if (!TransactionIdPrecedes(xmin, OldestXmin)) { all_visible = false; break; } /* Track newest xmin on page. */ if (TransactionIdFollows(xmin, visibility_cutoff_xid)) visibility_cutoff_xid = xmin; } break; case HEAPTUPLE_RECENTLY_DEAD: /* * If tuple is recently deleted then we must not remove it * from relation. */ nkeep += 1; all_visible = false; break; case HEAPTUPLE_INSERT_IN_PROGRESS: /* This is an expected case during concurrent vacuum */ all_visible = false; break; case HEAPTUPLE_DELETE_IN_PROGRESS: /* This is an expected case during concurrent vacuum */ all_visible = false; break; default: elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result"); break; } if (tupgone) { lazy_record_dead_tuple(vacrelstats, &(tuple.t_self)); HeapTupleHeaderAdvanceLatestRemovedXid(tuple.t_data, &vacrelstats->latestRemovedXid); tups_vacuumed += 1; has_dead_tuples = true; } else { num_tuples += 1; hastup = true; /* * Each non-removable tuple must be checked to see if it needs * freezing. Note we already have exclusive buffer lock. */ if (heap_freeze_tuple(tuple.t_data, FreezeLimit, MultiXactFrzLimit)) frozen[nfrozen++] = offnum; } } /* scan along page */ /* * If we froze any tuples, mark the buffer dirty, and write a WAL * record recording the changes. We must log the changes to be * crash-safe against future truncation of CLOG. */ if (nfrozen > 0) { MarkBufferDirty(buf); if (RelationNeedsWAL(onerel)) { XLogRecPtr recptr; recptr = log_heap_freeze(onerel, buf, FreezeLimit, MultiXactFrzLimit, frozen, nfrozen); PageSetLSN(page, recptr); } } /* * If there are no indexes then we can vacuum the page right now * instead of doing a second scan. */ if (nindexes == 0 && vacrelstats->num_dead_tuples > 0) { /* Remove tuples from heap */ lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats, &vmbuffer); /* * Forget the now-vacuumed tuples, and press on, but be careful * not to reset latestRemovedXid since we want that value to be * valid. */ vacrelstats->num_dead_tuples = 0; vacuumed_pages++; } freespace = PageGetHeapFreeSpace(page); /* mark page all-visible, if appropriate */ if (all_visible && !all_visible_according_to_vm) { /* * It should never be the case that the visibility map page is set * while the page-level bit is clear, but the reverse is allowed * (if checksums are not enabled). Regardless, set the both bits * so that we get back in sync. * * NB: If the heap page is all-visible but the VM bit is not set, * we don't need to dirty the heap page. However, if checksums are * enabled, we do need to make sure that the heap page is dirtied * before passing it to visibilitymap_set(), because it may be * logged. Given that this situation should only happen in rare * cases after a crash, it is not worth optimizing. */ PageSetAllVisible(page); MarkBufferDirty(buf); visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr, vmbuffer, visibility_cutoff_xid); } /* * As of PostgreSQL 9.2, the visibility map bit should never be set if * the page-level bit is clear. However, it's possible that the bit * got cleared after we checked it and before we took the buffer * content lock, so we must recheck before jumping to the conclusion * that something bad has happened. */ else if (all_visible_according_to_vm && !PageIsAllVisible(page) && visibilitymap_test(onerel, blkno, &vmbuffer)) { elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u", relname, blkno); visibilitymap_clear(onerel, blkno, vmbuffer); } /* * It's possible for the value returned by GetOldestXmin() to move * backwards, so it's not wrong for us to see tuples that appear to * not be visible to everyone yet, while PD_ALL_VISIBLE is already * set. The real safe xmin value never moves backwards, but * GetOldestXmin() is conservative and sometimes returns a value * that's unnecessarily small, so if we see that contradiction it just * means that the tuples that we think are not visible to everyone yet * actually are, and the PD_ALL_VISIBLE flag is correct. * * There should never be dead tuples on a page with PD_ALL_VISIBLE * set, however. */ else if (PageIsAllVisible(page) && has_dead_tuples) { elog(WARNING, "page containing dead tuples is marked as all-visible in relation \"%s\" page %u", relname, blkno); PageClearAllVisible(page); MarkBufferDirty(buf); visibilitymap_clear(onerel, blkno, vmbuffer); } UnlockReleaseBuffer(buf); /* Remember the location of the last page with nonremovable tuples */ if (hastup) vacrelstats->nonempty_pages = blkno + 1; /* * If we remembered any tuples for deletion, then the page will be * visited again by lazy_vacuum_heap, which will compute and record * its post-compaction free space. If not, then we're done with this * page, so remember its free space as-is. (This path will always be * taken if there are no indexes.) */ if (vacrelstats->num_dead_tuples == prev_dead_count) RecordPageWithFreeSpace(onerel, blkno, freespace); } /* save stats for use later */ vacrelstats->scanned_tuples = num_tuples; vacrelstats->tuples_deleted = tups_vacuumed; /* now we can compute the new value for pg_class.reltuples */ vacrelstats->new_rel_tuples = vac_estimate_reltuples(onerel, false, nblocks, vacrelstats->scanned_pages, num_tuples); /* * Release any remaining pin on visibility map page. */ if (BufferIsValid(vmbuffer)) { ReleaseBuffer(vmbuffer); vmbuffer = InvalidBuffer; } /* If any tuples need to be deleted, perform final vacuum cycle */ /* XXX put a threshold on min number of tuples here? */ if (vacrelstats->num_dead_tuples > 0) { /* Log cleanup info before we touch indexes */ vacuum_log_cleanup_info(onerel, vacrelstats); /* Remove index entries */ for (i = 0; i < nindexes; i++) lazy_vacuum_index(Irel[i], &indstats[i], vacrelstats); /* Remove tuples from heap */ lazy_vacuum_heap(onerel, vacrelstats); vacrelstats->num_index_scans++; } /* Do post-vacuum cleanup and statistics update for each index */ for (i = 0; i < nindexes; i++) lazy_cleanup_index(Irel[i], indstats[i], vacrelstats); /* If no indexes, make log report that lazy_vacuum_heap would've made */ if (vacuumed_pages) ereport(elevel, (errmsg("\"%s\": removed %.0f row versions in %u pages", RelationGetRelationName(onerel), tups_vacuumed, vacuumed_pages))); ereport(elevel, (errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages", RelationGetRelationName(onerel), tups_vacuumed, num_tuples, vacrelstats->scanned_pages, nblocks), errdetail("%.0f dead row versions cannot be removed yet.\n" "There were %.0f unused item pointers.\n" "%u pages are entirely empty.\n" "%s.", nkeep, nunused, empty_pages, pg_rusage_show(&ru0)))); }
static void lazy_space_alloc | ( | LVRelStats * | vacrelstats, | |
BlockNumber | relblocks | |||
) | [static] |
Definition at line 1562 of file vacuumlazy.c.
References LVRelStats::dead_tuples, LVRelStats::hasindex, LAZY_ALLOC_TUPLES, maintenance_work_mem, Max, LVRelStats::max_dead_tuples, MaxAllocSize, MaxHeapTuplesPerPage, Min, LVRelStats::num_dead_tuples, and palloc().
Referenced by lazy_scan_heap().
{ long maxtuples; if (vacrelstats->hasindex) { maxtuples = (maintenance_work_mem * 1024L) / sizeof(ItemPointerData); maxtuples = Min(maxtuples, INT_MAX); maxtuples = Min(maxtuples, MaxAllocSize / sizeof(ItemPointerData)); /* curious coding here to ensure the multiplication can't overflow */ if ((BlockNumber) (maxtuples / LAZY_ALLOC_TUPLES) > relblocks) maxtuples = relblocks * LAZY_ALLOC_TUPLES; /* stay sane if small maintenance_work_mem */ maxtuples = Max(maxtuples, MaxHeapTuplesPerPage); } else { maxtuples = MaxHeapTuplesPerPage; } vacrelstats->num_dead_tuples = 0; vacrelstats->max_dead_tuples = (int) maxtuples; vacrelstats->dead_tuples = (ItemPointer) palloc(maxtuples * sizeof(ItemPointerData)); }
static bool lazy_tid_reaped | ( | ItemPointer | itemptr, | |
void * | state | |||
) | [static] |
Definition at line 1617 of file vacuumlazy.c.
References LVRelStats::dead_tuples, NULL, and LVRelStats::num_dead_tuples.
Referenced by lazy_vacuum_index().
{ LVRelStats *vacrelstats = (LVRelStats *) state; ItemPointer res; res = (ItemPointer) bsearch((void *) itemptr, (void *) vacrelstats->dead_tuples, vacrelstats->num_dead_tuples, sizeof(ItemPointerData), vac_cmp_itemptr); return (res != NULL); }
static void lazy_truncate_heap | ( | Relation | onerel, | |
LVRelStats * | vacrelstats | |||
) | [static] |
Definition at line 1320 of file vacuumlazy.c.
References AccessExclusiveLock, CHECK_FOR_INTERRUPTS, ConditionalLockRelation(), count_nondeletable_pages(), elevel, ereport, errdetail(), errmsg(), LVRelStats::lock_waiter_detected, LVRelStats::nonempty_pages, LVRelStats::pages_removed, pg_rusage_init(), pg_rusage_show(), pg_usleep(), LVRelStats::rel_pages, RelationGetNumberOfBlocks, RelationGetRelationName, RelationTruncate(), UnlockRelation(), VACUUM_TRUNCATE_LOCK_TIMEOUT, and VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL.
Referenced by lazy_vacuum_rel().
{ BlockNumber old_rel_pages = vacrelstats->rel_pages; BlockNumber new_rel_pages; PGRUsage ru0; int lock_retry; pg_rusage_init(&ru0); /* * Loop until no more truncating can be done. */ do { /* * We need full exclusive lock on the relation in order to do * truncation. If we can't get it, give up rather than waiting --- we * don't want to block other backends, and we don't want to deadlock * (which is quite possible considering we already hold a lower-grade * lock). */ vacrelstats->lock_waiter_detected = false; lock_retry = 0; while (true) { if (ConditionalLockRelation(onerel, AccessExclusiveLock)) break; /* * Check for interrupts while trying to (re-)acquire the exclusive * lock. */ CHECK_FOR_INTERRUPTS(); if (++lock_retry > (VACUUM_TRUNCATE_LOCK_TIMEOUT / VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL)) { /* * We failed to establish the lock in the specified number of * retries. This means we give up truncating. */ vacrelstats->lock_waiter_detected = true; ereport(elevel, (errmsg("\"%s\": stopping truncate due to conflicting lock request", RelationGetRelationName(onerel)))); return; } pg_usleep(VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL); } /* * Now that we have exclusive lock, look to see if the rel has grown * whilst we were vacuuming with non-exclusive lock. If so, give up; * the newly added pages presumably contain non-deletable tuples. */ new_rel_pages = RelationGetNumberOfBlocks(onerel); if (new_rel_pages != old_rel_pages) { /* * Note: we intentionally don't update vacrelstats->rel_pages with * the new rel size here. If we did, it would amount to assuming * that the new pages are empty, which is unlikely. Leaving the * numbers alone amounts to assuming that the new pages have the * same tuple density as existing ones, which is less unlikely. */ UnlockRelation(onerel, AccessExclusiveLock); return; } /* * Scan backwards from the end to verify that the end pages actually * contain no tuples. This is *necessary*, not optional, because * other backends could have added tuples to these pages whilst we * were vacuuming. */ new_rel_pages = count_nondeletable_pages(onerel, vacrelstats); if (new_rel_pages >= old_rel_pages) { /* can't do anything after all */ UnlockRelation(onerel, AccessExclusiveLock); return; } /* * Okay to truncate. */ RelationTruncate(onerel, new_rel_pages); /* * We can release the exclusive lock as soon as we have truncated. * Other backends can't safely access the relation until they have * processed the smgr invalidation that smgrtruncate sent out ... but * that should happen as part of standard invalidation processing once * they acquire lock on the relation. */ UnlockRelation(onerel, AccessExclusiveLock); /* * Update statistics. Here, it *is* correct to adjust rel_pages * without also touching reltuples, since the tuple count wasn't * changed by the truncation. */ vacrelstats->pages_removed += old_rel_pages - new_rel_pages; vacrelstats->rel_pages = new_rel_pages; ereport(elevel, (errmsg("\"%s\": truncated %u to %u pages", RelationGetRelationName(onerel), old_rel_pages, new_rel_pages), errdetail("%s.", pg_rusage_show(&ru0)))); old_rel_pages = new_rel_pages; } while (new_rel_pages > vacrelstats->nonempty_pages && vacrelstats->lock_waiter_detected); }
static void lazy_vacuum_heap | ( | Relation | onerel, | |
LVRelStats * | vacrelstats | |||
) | [static] |
Definition at line 1053 of file vacuumlazy.c.
References buf, BufferGetPage, BufferIsValid, ConditionalLockBufferForCleanup(), LVRelStats::dead_tuples, elevel, ereport, errdetail(), errmsg(), ItemPointerGetBlockNumber, lazy_vacuum_page(), MAIN_FORKNUM, PageGetHeapFreeSpace(), pg_rusage_init(), pg_rusage_show(), RBM_NORMAL, ReadBufferExtended(), RecordPageWithFreeSpace(), RelationGetRelationName, ReleaseBuffer(), UnlockReleaseBuffer(), and vacuum_delay_point().
Referenced by lazy_scan_heap().
{ int tupindex; int npages; PGRUsage ru0; Buffer vmbuffer = InvalidBuffer; pg_rusage_init(&ru0); npages = 0; tupindex = 0; while (tupindex < vacrelstats->num_dead_tuples) { BlockNumber tblk; Buffer buf; Page page; Size freespace; vacuum_delay_point(); tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]); buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL, vac_strategy); if (!ConditionalLockBufferForCleanup(buf)) { ReleaseBuffer(buf); ++tupindex; continue; } tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats, &vmbuffer); /* Now that we've compacted the page, record its available space */ page = BufferGetPage(buf); freespace = PageGetHeapFreeSpace(page); UnlockReleaseBuffer(buf); RecordPageWithFreeSpace(onerel, tblk, freespace); npages++; } if (BufferIsValid(vmbuffer)) { ReleaseBuffer(vmbuffer); vmbuffer = InvalidBuffer; } ereport(elevel, (errmsg("\"%s\": removed %d row versions in %d pages", RelationGetRelationName(onerel), tupindex, npages), errdetail("%s.", pg_rusage_show(&ru0)))); }
static void lazy_vacuum_index | ( | Relation | indrel, | |
IndexBulkDeleteResult ** | stats, | |||
LVRelStats * | vacrelstats | |||
) | [static] |
Definition at line 1236 of file vacuumlazy.c.
References IndexVacuumInfo::analyze_only, elevel, ereport, errdetail(), errmsg(), IndexVacuumInfo::estimated_count, IndexVacuumInfo::index, index_bulk_delete(), lazy_tid_reaped(), IndexVacuumInfo::message_level, IndexVacuumInfo::num_heap_tuples, LVRelStats::old_rel_tuples, pg_rusage_init(), pg_rusage_show(), RelationGetRelationName, and IndexVacuumInfo::strategy.
Referenced by lazy_scan_heap().
{ IndexVacuumInfo ivinfo; PGRUsage ru0; pg_rusage_init(&ru0); ivinfo.index = indrel; ivinfo.analyze_only = false; ivinfo.estimated_count = true; ivinfo.message_level = elevel; ivinfo.num_heap_tuples = vacrelstats->old_rel_tuples; ivinfo.strategy = vac_strategy; /* Do bulk deletion */ *stats = index_bulk_delete(&ivinfo, *stats, lazy_tid_reaped, (void *) vacrelstats); ereport(elevel, (errmsg("scanned index \"%s\" to remove %d row versions", RelationGetRelationName(indrel), vacrelstats->num_dead_tuples), errdetail("%s.", pg_rusage_show(&ru0)))); }
static int lazy_vacuum_page | ( | Relation | onerel, | |
BlockNumber | blkno, | |||
Buffer | buffer, | |||
int | tupindex, | |||
LVRelStats * | vacrelstats, | |||
Buffer * | vmbuffer | |||
) | [static] |
Definition at line 1119 of file vacuumlazy.c.
References Assert, BufferGetPage, BufferIsValid, LVRelStats::dead_tuples, END_CRIT_SECTION, heap_page_is_all_visible(), InvalidXLogRecPtr, ItemIdSetUnused, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LVRelStats::latestRemovedXid, log_heap_clean(), MarkBufferDirty(), NULL, LVRelStats::num_dead_tuples, PageGetItemId, PageRepairFragmentation(), PageSetAllVisible, PageSetLSN, RelationNeedsWAL, START_CRIT_SECTION, visibilitymap_set(), and visibilitymap_test().
Referenced by lazy_scan_heap(), and lazy_vacuum_heap().
{ Page page = BufferGetPage(buffer); OffsetNumber unused[MaxOffsetNumber]; int uncnt = 0; TransactionId visibility_cutoff_xid; START_CRIT_SECTION(); for (; tupindex < vacrelstats->num_dead_tuples; tupindex++) { BlockNumber tblk; OffsetNumber toff; ItemId itemid; tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]); if (tblk != blkno) break; /* past end of tuples for this block */ toff = ItemPointerGetOffsetNumber(&vacrelstats->dead_tuples[tupindex]); itemid = PageGetItemId(page, toff); ItemIdSetUnused(itemid); unused[uncnt++] = toff; } PageRepairFragmentation(page); /* * Mark buffer dirty before we write WAL. * * If checksums are enabled, visibilitymap_set() may log the heap page, so * we must mark heap buffer dirty before calling visibilitymap_set(). */ MarkBufferDirty(buffer); /* * Now that we have removed the dead tuples from the page, once again check * if the page has become all-visible. */ if (!visibilitymap_test(onerel, blkno, vmbuffer) && heap_page_is_all_visible(buffer, &visibility_cutoff_xid)) { Assert(BufferIsValid(*vmbuffer)); PageSetAllVisible(page); visibilitymap_set(onerel, blkno, buffer, InvalidXLogRecPtr, *vmbuffer, visibility_cutoff_xid); } /* XLOG stuff */ if (RelationNeedsWAL(onerel)) { XLogRecPtr recptr; recptr = log_heap_clean(onerel, buffer, NULL, 0, NULL, 0, unused, uncnt, vacrelstats->latestRemovedXid); PageSetLSN(page, recptr); } END_CRIT_SECTION(); return tupindex; }
void lazy_vacuum_rel | ( | Relation | onerel, | |
VacuumStmt * | vacstmt, | |||
BufferAccessStrategy | bstrategy | |||
) |
Definition at line 168 of file vacuumlazy.c.
References elevel, ereport, errmsg(), FreeSpaceMapVacuum(), VacuumStmt::freeze_min_age, VacuumStmt::freeze_table_age, FreezeLimit, get_database_name(), get_namespace_name(), GetCurrentTimestamp(), LVRelStats::hasindex, IsAutoVacuumWorkerProcess(), lazy_scan_heap(), lazy_truncate_heap(), LVRelStats::lock_waiter_detected, LOG, Log_autovacuum_min_duration, MultiXactFrzLimit, MyDatabaseId, LVRelStats::new_rel_tuples, NoLock, LVRelStats::nonempty_pages, LVRelStats::num_index_scans, LVRelStats::old_rel_pages, LVRelStats::old_rel_tuples, OldestXmin, VacuumStmt::options, LVRelStats::pages_removed, palloc0(), pg_rusage_init(), pg_rusage_show(), pgstat_report_vacuum(), RelationData::rd_rel, LVRelStats::rel_pages, REL_TRUNCATE_FRACTION, REL_TRUNCATE_MINIMUM, RelationGetNamespace, RelationGetRelationName, RelationGetRelid, RELKIND_MATVIEW, RowExclusiveLock, LVRelStats::scanned_pages, TimestampDifference(), TimestampDifferenceExceeds(), TransactionIdPrecedesOrEquals(), LVRelStats::tuples_deleted, vac_close_indexes(), vac_open_indexes(), vac_update_relstats(), VACOPT_VERBOSE, vacuum_set_xid_limits(), VacuumPageDirty, VacuumPageHit, VacuumPageMiss, and visibilitymap_count().
Referenced by vacuum_rel().
{ LVRelStats *vacrelstats; Relation *Irel; int nindexes; BlockNumber possibly_freeable; PGRUsage ru0; TimestampTz starttime = 0; long secs; int usecs; double read_rate, write_rate; bool scan_all; TransactionId freezeTableLimit; BlockNumber new_rel_pages; double new_rel_tuples; BlockNumber new_rel_allvisible; TransactionId new_frozen_xid; MultiXactId new_min_multi; /* measure elapsed time iff autovacuum logging requires it */ if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0) { pg_rusage_init(&ru0); starttime = GetCurrentTimestamp(); } if (vacstmt->options & VACOPT_VERBOSE) elevel = INFO; else elevel = DEBUG2; vac_strategy = bstrategy; vacuum_set_xid_limits(vacstmt->freeze_min_age, vacstmt->freeze_table_age, onerel->rd_rel->relisshared, &OldestXmin, &FreezeLimit, &freezeTableLimit, &MultiXactFrzLimit); scan_all = TransactionIdPrecedesOrEquals(onerel->rd_rel->relfrozenxid, freezeTableLimit); vacrelstats = (LVRelStats *) palloc0(sizeof(LVRelStats)); vacrelstats->old_rel_pages = onerel->rd_rel->relpages; vacrelstats->old_rel_tuples = onerel->rd_rel->reltuples; vacrelstats->num_index_scans = 0; vacrelstats->pages_removed = 0; vacrelstats->lock_waiter_detected = false; /* Open all indexes of the relation */ vac_open_indexes(onerel, RowExclusiveLock, &nindexes, &Irel); vacrelstats->hasindex = (nindexes > 0); /* Do the vacuuming */ lazy_scan_heap(onerel, vacrelstats, Irel, nindexes, scan_all); /* Done with indexes */ vac_close_indexes(nindexes, Irel, NoLock); /* * Optionally truncate the relation. * * Don't even think about it unless we have a shot at releasing a goodly * number of pages. Otherwise, the time taken isn't worth it. * * Leave a populated materialized view with at least one page. */ if (onerel->rd_rel->relkind == RELKIND_MATVIEW && vacrelstats->nonempty_pages == 0) vacrelstats->nonempty_pages = 1; possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages; if (possibly_freeable > 0 && (possibly_freeable >= REL_TRUNCATE_MINIMUM || possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION)) lazy_truncate_heap(onerel, vacrelstats); /* Vacuum the Free Space Map */ FreeSpaceMapVacuum(onerel); /* * Update statistics in pg_class. * * A corner case here is that if we scanned no pages at all because every * page is all-visible, we should not update relpages/reltuples, because * we have no new information to contribute. In particular this keeps us * from replacing relpages=reltuples=0 (which means "unknown tuple * density") with nonzero relpages and reltuples=0 (which means "zero * tuple density") unless there's some actual evidence for the latter. * * We do update relallvisible even in the corner case, since if the table * is all-visible we'd definitely like to know that. But clamp the value * to be not more than what we're setting relpages to. * * Also, don't change relfrozenxid if we skipped any pages, since then we * don't know for certain that all tuples have a newer xmin. */ new_rel_pages = vacrelstats->rel_pages; new_rel_tuples = vacrelstats->new_rel_tuples; if (vacrelstats->scanned_pages == 0 && new_rel_pages > 0) { new_rel_pages = vacrelstats->old_rel_pages; new_rel_tuples = vacrelstats->old_rel_tuples; } new_rel_allvisible = visibilitymap_count(onerel); if (new_rel_allvisible > new_rel_pages) new_rel_allvisible = new_rel_pages; new_frozen_xid = FreezeLimit; if (vacrelstats->scanned_pages < vacrelstats->rel_pages) new_frozen_xid = InvalidTransactionId; new_min_multi = MultiXactFrzLimit; if (vacrelstats->scanned_pages < vacrelstats->rel_pages) new_min_multi = InvalidMultiXactId; vac_update_relstats(onerel, new_rel_pages, new_rel_tuples, new_rel_allvisible, vacrelstats->hasindex, new_frozen_xid, new_min_multi); /* report results to the stats collector, too */ pgstat_report_vacuum(RelationGetRelid(onerel), onerel->rd_rel->relisshared, new_rel_tuples); /* and log the action if appropriate */ if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0) { TimestampTz endtime = GetCurrentTimestamp(); if (Log_autovacuum_min_duration == 0 || TimestampDifferenceExceeds(starttime, endtime, Log_autovacuum_min_duration)) { TimestampDifference(starttime, endtime, &secs, &usecs); read_rate = 0; write_rate = 0; if ((secs > 0) || (usecs > 0)) { read_rate = (double) BLCKSZ *VacuumPageMiss / (1024 * 1024) / (secs + usecs / 1000000.0); write_rate = (double) BLCKSZ *VacuumPageDirty / (1024 * 1024) / (secs + usecs / 1000000.0); } ereport(LOG, (errmsg("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n" "pages: %d removed, %d remain\n" "tuples: %.0f removed, %.0f remain\n" "buffer usage: %d hits, %d misses, %d dirtied\n" "avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n" "system usage: %s", get_database_name(MyDatabaseId), get_namespace_name(RelationGetNamespace(onerel)), RelationGetRelationName(onerel), vacrelstats->num_index_scans, vacrelstats->pages_removed, vacrelstats->rel_pages, vacrelstats->tuples_deleted, vacrelstats->new_rel_tuples, VacuumPageHit, VacuumPageMiss, VacuumPageDirty, read_rate, write_rate, pg_rusage_show(&ru0)))); } } }
static int vac_cmp_itemptr | ( | const void * | left, | |
const void * | right | |||
) | [static] |
Definition at line 1635 of file vacuumlazy.c.
References ItemPointerGetBlockNumber, and ItemPointerGetOffsetNumber.
{ BlockNumber lblk, rblk; OffsetNumber loff, roff; lblk = ItemPointerGetBlockNumber((ItemPointer) left); rblk = ItemPointerGetBlockNumber((ItemPointer) right); if (lblk < rblk) return -1; if (lblk > rblk) return 1; loff = ItemPointerGetOffsetNumber((ItemPointer) left); roff = ItemPointerGetOffsetNumber((ItemPointer) right); if (loff < roff) return -1; if (loff > roff) return 1; return 0; }
static void vacuum_log_cleanup_info | ( | Relation | rel, | |
LVRelStats * | vacrelstats | |||
) | [static] |
Definition at line 360 of file vacuumlazy.c.
References LVRelStats::latestRemovedXid, log_heap_cleanup_info(), RelationData::rd_node, RelationNeedsWAL, TransactionIdIsValid, and XLogIsNeeded.
Referenced by lazy_scan_heap().
{ /* * Skip this for relations for which no WAL is to be written, or if we're * not trying to support archive recovery. */ if (!RelationNeedsWAL(rel) || !XLogIsNeeded()) return; /* * No need to write the record at all unless it contains a valid value */ if (TransactionIdIsValid(vacrelstats->latestRemovedXid)) (void) log_heap_cleanup_info(rel->rd_node, vacrelstats->latestRemovedXid); }
int elevel = -1 [static] |
Definition at line 124 of file vacuumlazy.c.
Referenced by analyze_rel(), copy_heap_data(), count_nondeletable_pages(), errfinish(), lazy_cleanup_index(), lazy_scan_heap(), lazy_truncate_heap(), lazy_vacuum_heap(), lazy_vacuum_index(), lazy_vacuum_rel(), load_libraries(), RestoreArchivedFile(), and set_config_sourcefile().
TransactionId FreezeLimit [static] |
Definition at line 127 of file vacuumlazy.c.
Referenced by lazy_check_needs_freeze(), lazy_scan_heap(), and lazy_vacuum_rel().
MultiXactId MultiXactFrzLimit [static] |
Definition at line 128 of file vacuumlazy.c.
Referenced by copy_heap_data(), lazy_check_needs_freeze(), lazy_scan_heap(), and lazy_vacuum_rel().
TransactionId OldestXmin [static] |
Definition at line 126 of file vacuumlazy.c.
Referenced by acquire_sample_rows(), copy_heap_data(), heap_page_is_all_visible(), IndexBuildHeapScan(), lazy_scan_heap(), and lazy_vacuum_rel().
BufferAccessStrategy vac_strategy [static] |
Definition at line 130 of file vacuumlazy.c.