#include "postgres.h"
#include <limits.h>
#include "access/htup_details.h"
#include "access/nbtree.h"
#include "catalog/index.h"
#include "commands/tablespace.h"
#include "executor/executor.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "utils/datum.h"
#include "utils/logtape.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/pg_rusage.h"
#include "utils/rel.h"
#include "utils/sortsupport.h"
#include "utils/tuplesort.h"
#include "qsort_tuple.c"
Go to the source code of this file.
#define CLUSTER_SORT 3 |
Definition at line 124 of file tuplesort.c.
Referenced by tuplesort_begin_cluster().
Definition at line 394 of file tuplesort.c.
Referenced by make_bounded_heap(), and puttuple_common().
Definition at line 395 of file tuplesort.c.
Referenced by tuplesort_putheaptuple(), tuplesort_putindextuple(), and tuplesort_puttupleslot().
#define DATUM_SORT 2 |
Definition at line 123 of file tuplesort.c.
Referenced by tuplesort_begin_datum().
Definition at line 401 of file tuplesort.c.
Referenced by free_sort_tuple(), grow_memtuples(), writetup_cluster(), writetup_datum(), writetup_heap(), and writetup_index().
#define HEAP_SORT 0 |
Definition at line 121 of file tuplesort.c.
Referenced by tuplesort_begin_heap().
#define HEAPCOMPARE | ( | tup1, | ||
tup2 | ||||
) |
(checkIndex && ((tup1)->tupindex != (tup2)->tupindex) ? \ ((tup1)->tupindex) - ((tup2)->tupindex) : \ COMPARETUP(state, tup1, tup2))
Definition at line 2490 of file tuplesort.c.
Referenced by tuplesort_heap_insert(), and tuplesort_heap_siftup().
#define INDEX_SORT 1 |
Definition at line 122 of file tuplesort.c.
Referenced by tuplesort_begin_index_btree().
Definition at line 399 of file tuplesort.c.
Referenced by dumptuples(), grow_memtuples(), mergeprereadone(), puttuple_common(), and tuplesort_begin_common().
#define LogicalTapeReadExact | ( | tapeset, | ||
tapenum, | ||||
ptr, | ||||
len | ||||
) |
do { \ if (LogicalTapeRead(tapeset, tapenum, ptr, len) != (size_t) (len)) \ elog(ERROR, "unexpected end of data"); \ } while(0)
Definition at line 445 of file tuplesort.c.
Referenced by readtup_cluster(), readtup_datum(), readtup_heap(), and readtup_index().
#define MERGE_BUFFER_SIZE (BLCKSZ * 32) |
Definition at line 197 of file tuplesort.c.
Referenced by tuplesort_merge_order().
#define MINORDER 6 |
Definition at line 195 of file tuplesort.c.
Referenced by tuplesort_merge_order().
Definition at line 397 of file tuplesort.c.
Referenced by mergeprereadone(), and tuplesort_gettuple_common().
Definition at line 398 of file tuplesort.c.
Referenced by make_bounded_heap(), and sort_bounded_heap().
#define TAPE_BUFFER_OVERHEAD (BLCKSZ * 3) |
Definition at line 196 of file tuplesort.c.
Referenced by tuplesort_merge_order().
Definition at line 400 of file tuplesort.c.
Referenced by copytup_cluster(), copytup_heap(), copytup_index(), grow_memtuples(), inittapes(), readtup_cluster(), readtup_datum(), readtup_heap(), readtup_index(), tuplesort_begin_common(), and tuplesort_putdatum().
Definition at line 396 of file tuplesort.c.
Referenced by dumptuples(), and mergeonerun().
typedef int(* SortTupleComparator)(const SortTuple *a, const SortTuple *b, Tuplesortstate *state) |
Definition at line 199 of file tuplesort.c.
enum TupSortStatus |
Definition at line 174 of file tuplesort.c.
{ TSS_INITIAL, /* Loading tuples; still within memory limit */ TSS_BOUNDED, /* Loading tuples into bounded-size heap */ TSS_BUILDRUNS, /* Loading tuples; writing to tape */ TSS_SORTEDINMEM, /* Sort completed entirely in memory */ TSS_SORTEDONTAPE, /* Sort completed, final run is on tape */ TSS_FINALMERGE /* Performing final merge on-the-fly */ } TupSortStatus;
static void beginmerge | ( | Tuplesortstate * | state | ) | [static] |
Definition at line 2097 of file tuplesort.c.
References Tuplesortstate::activeTapes, Assert, Tuplesortstate::availMem, Tuplesortstate::maxTapes, Tuplesortstate::memtupcount, Tuplesortstate::memtuples, Tuplesortstate::memtupsize, Tuplesortstate::mergeactive, Tuplesortstate::mergeavailmem, Tuplesortstate::mergeavailslots, Tuplesortstate::mergefirstfree, Tuplesortstate::mergefreelist, Tuplesortstate::mergelast, Tuplesortstate::mergenext, mergepreread(), Tuplesortstate::tapeRange, Tuplesortstate::tp_dummy, Tuplesortstate::tp_runs, Tuplesortstate::tp_tapenum, SortTuple::tupindex, and tuplesort_heap_insert().
Referenced by mergeonerun(), and mergeruns().
{ int activeTapes; int tapenum; int srcTape; int slotsPerTape; long spacePerTape; /* Heap should be empty here */ Assert(state->memtupcount == 0); /* Adjust run counts and mark the active tapes */ memset(state->mergeactive, 0, state->maxTapes * sizeof(*state->mergeactive)); activeTapes = 0; for (tapenum = 0; tapenum < state->tapeRange; tapenum++) { if (state->tp_dummy[tapenum] > 0) state->tp_dummy[tapenum]--; else { Assert(state->tp_runs[tapenum] > 0); state->tp_runs[tapenum]--; srcTape = state->tp_tapenum[tapenum]; state->mergeactive[srcTape] = true; activeTapes++; } } state->activeTapes = activeTapes; /* Clear merge-pass state variables */ memset(state->mergenext, 0, state->maxTapes * sizeof(*state->mergenext)); memset(state->mergelast, 0, state->maxTapes * sizeof(*state->mergelast)); state->mergefreelist = 0; /* nothing in the freelist */ state->mergefirstfree = activeTapes; /* 1st slot avail for preread */ /* * Initialize space allocation to let each active input tape have an equal * share of preread space. */ Assert(activeTapes > 0); slotsPerTape = (state->memtupsize - state->mergefirstfree) / activeTapes; Assert(slotsPerTape > 0); spacePerTape = state->availMem / activeTapes; for (srcTape = 0; srcTape < state->maxTapes; srcTape++) { if (state->mergeactive[srcTape]) { state->mergeavailslots[srcTape] = slotsPerTape; state->mergeavailmem[srcTape] = spacePerTape; } } /* * Preread as many tuples as possible (and at least one) from each active * tape */ mergepreread(state); /* Load the merge heap with the first tuple from each input tape */ for (srcTape = 0; srcTape < state->maxTapes; srcTape++) { int tupIndex = state->mergenext[srcTape]; SortTuple *tup; if (tupIndex) { tup = &state->memtuples[tupIndex]; state->mergenext[srcTape] = tup->tupindex; if (state->mergenext[srcTape] == 0) state->mergelast[srcTape] = 0; tuplesort_heap_insert(state, tup, srcTape, false); /* put the now-unused memtuples entry on the freelist */ tup->tupindex = state->mergefreelist; state->mergefreelist = tupIndex; state->mergeavailslots[srcTape]++; } } }
static int comparetup_cluster | ( | const SortTuple * | a, | |
const SortTuple * | b, | |||
Tuplesortstate * | state | |||
) | [static] |
Definition at line 2912 of file tuplesort.c.
References SortTuple::datum1, Tuplesortstate::estate, ExecStoreTuple(), FormIndexDatum(), GetPerTupleExprContext, heap_getattr, IndexInfo::ii_Expressions, IndexInfo::ii_KeyAttrNumbers, Tuplesortstate::indexInfo, Tuplesortstate::indexScanKey, inlineApplySortFunction(), InvalidBuffer, SortTuple::isnull1, Tuplesortstate::nKeys, NULL, ResetPerTupleExprContext, ScanKeyData::sk_collation, ScanKeyData::sk_flags, ScanKeyData::sk_func, Tuplesortstate::tupDesc, and SortTuple::tuple.
{ ScanKey scanKey = state->indexScanKey; HeapTuple ltup; HeapTuple rtup; TupleDesc tupDesc; int nkey; int32 compare; /* Compare the leading sort key, if it's simple */ if (state->indexInfo->ii_KeyAttrNumbers[0] != 0) { compare = inlineApplySortFunction(&scanKey->sk_func, scanKey->sk_flags, scanKey->sk_collation, a->datum1, a->isnull1, b->datum1, b->isnull1); if (compare != 0 || state->nKeys == 1) return compare; /* Compare additional columns the hard way */ scanKey++; nkey = 1; } else { /* Must compare all keys the hard way */ nkey = 0; } /* Compare additional sort keys */ ltup = (HeapTuple) a->tuple; rtup = (HeapTuple) b->tuple; if (state->indexInfo->ii_Expressions == NULL) { /* If not expression index, just compare the proper heap attrs */ tupDesc = state->tupDesc; for (; nkey < state->nKeys; nkey++, scanKey++) { AttrNumber attno = state->indexInfo->ii_KeyAttrNumbers[nkey]; Datum datum1, datum2; bool isnull1, isnull2; datum1 = heap_getattr(ltup, attno, tupDesc, &isnull1); datum2 = heap_getattr(rtup, attno, tupDesc, &isnull2); compare = inlineApplySortFunction(&scanKey->sk_func, scanKey->sk_flags, scanKey->sk_collation, datum1, isnull1, datum2, isnull2); if (compare != 0) return compare; } } else { /* * In the expression index case, compute the whole index tuple and * then compare values. It would perhaps be faster to compute only as * many columns as we need to compare, but that would require * duplicating all the logic in FormIndexDatum. */ Datum l_index_values[INDEX_MAX_KEYS]; bool l_index_isnull[INDEX_MAX_KEYS]; Datum r_index_values[INDEX_MAX_KEYS]; bool r_index_isnull[INDEX_MAX_KEYS]; TupleTableSlot *ecxt_scantuple; /* Reset context each time to prevent memory leakage */ ResetPerTupleExprContext(state->estate); ecxt_scantuple = GetPerTupleExprContext(state->estate)->ecxt_scantuple; ExecStoreTuple(ltup, ecxt_scantuple, InvalidBuffer, false); FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate, l_index_values, l_index_isnull); ExecStoreTuple(rtup, ecxt_scantuple, InvalidBuffer, false); FormIndexDatum(state->indexInfo, ecxt_scantuple, state->estate, r_index_values, r_index_isnull); for (; nkey < state->nKeys; nkey++, scanKey++) { compare = inlineApplySortFunction(&scanKey->sk_func, scanKey->sk_flags, scanKey->sk_collation, l_index_values[nkey], l_index_isnull[nkey], r_index_values[nkey], r_index_isnull[nkey]); if (compare != 0) return compare; } } return 0; }
static int comparetup_datum | ( | const SortTuple * | a, | |
const SortTuple * | b, | |||
Tuplesortstate * | state | |||
) | [static] |
Definition at line 3340 of file tuplesort.c.
References ApplySortComparator(), SortTuple::datum1, SortTuple::isnull1, and Tuplesortstate::onlyKey.
static int comparetup_heap | ( | const SortTuple * | a, | |
const SortTuple * | b, | |||
Tuplesortstate * | state | |||
) | [static] |
Definition at line 2772 of file tuplesort.c.
References ApplySortComparator(), SortTuple::datum1, heap_getattr, SortTuple::isnull1, Tuplesortstate::nKeys, Tuplesortstate::sortKeys, SortSupportData::ssup_attno, HeapTupleData::t_data, HeapTupleData::t_len, Tuplesortstate::tupDesc, and SortTuple::tuple.
{ SortSupport sortKey = state->sortKeys; HeapTupleData ltup; HeapTupleData rtup; TupleDesc tupDesc; int nkey; int32 compare; /* Compare the leading sort key */ compare = ApplySortComparator(a->datum1, a->isnull1, b->datum1, b->isnull1, sortKey); if (compare != 0) return compare; /* Compare additional sort keys */ ltup.t_len = ((MinimalTuple) a->tuple)->t_len + MINIMAL_TUPLE_OFFSET; ltup.t_data = (HeapTupleHeader) ((char *) a->tuple - MINIMAL_TUPLE_OFFSET); rtup.t_len = ((MinimalTuple) b->tuple)->t_len + MINIMAL_TUPLE_OFFSET; rtup.t_data = (HeapTupleHeader) ((char *) b->tuple - MINIMAL_TUPLE_OFFSET); tupDesc = state->tupDesc; sortKey++; for (nkey = 1; nkey < state->nKeys; nkey++, sortKey++) { AttrNumber attno = sortKey->ssup_attno; Datum datum1, datum2; bool isnull1, isnull2; datum1 = heap_getattr(<up, attno, tupDesc, &isnull1); datum2 = heap_getattr(&rtup, attno, tupDesc, &isnull2); compare = ApplySortComparator(datum1, isnull1, datum2, isnull2, sortKey); if (compare != 0) return compare; } return 0; }
static int comparetup_index_btree | ( | const SortTuple * | a, | |
const SortTuple * | b, | |||
Tuplesortstate * | state | |||
) | [static] |
Definition at line 3092 of file tuplesort.c.
References Assert, BuildIndexValueDescription(), SortTuple::datum1, Tuplesortstate::enforceUnique, ereport, errcode(), errdetail(), errmsg(), ERROR, errtableconstraint(), Tuplesortstate::heapRel, index_deform_tuple(), index_getattr, Tuplesortstate::indexRel, Tuplesortstate::indexScanKey, inlineApplySortFunction(), SortTuple::isnull1, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, Tuplesortstate::nKeys, RelationGetDescr, RelationGetRelationName, ScanKeyData::sk_collation, ScanKeyData::sk_flags, ScanKeyData::sk_func, IndexTupleData::t_tid, SortTuple::tuple, and values.
{ /* * This is similar to _bt_tuplecompare(), but we have already done the * index_getattr calls for the first column, and we need to keep track of * whether any null fields are present. Also see the special treatment * for equal keys at the end. */ ScanKey scanKey = state->indexScanKey; IndexTuple tuple1; IndexTuple tuple2; int keysz; TupleDesc tupDes; bool equal_hasnull = false; int nkey; int32 compare; /* Compare the leading sort key */ compare = inlineApplySortFunction(&scanKey->sk_func, scanKey->sk_flags, scanKey->sk_collation, a->datum1, a->isnull1, b->datum1, b->isnull1); if (compare != 0) return compare; /* they are equal, so we only need to examine one null flag */ if (a->isnull1) equal_hasnull = true; /* Compare additional sort keys */ tuple1 = (IndexTuple) a->tuple; tuple2 = (IndexTuple) b->tuple; keysz = state->nKeys; tupDes = RelationGetDescr(state->indexRel); scanKey++; for (nkey = 2; nkey <= keysz; nkey++, scanKey++) { Datum datum1, datum2; bool isnull1, isnull2; datum1 = index_getattr(tuple1, nkey, tupDes, &isnull1); datum2 = index_getattr(tuple2, nkey, tupDes, &isnull2); compare = inlineApplySortFunction(&scanKey->sk_func, scanKey->sk_flags, scanKey->sk_collation, datum1, isnull1, datum2, isnull2); if (compare != 0) return compare; /* done when we find unequal attributes */ /* they are equal, so we only need to examine one null flag */ if (isnull1) equal_hasnull = true; } /* * If btree has asked us to enforce uniqueness, complain if two equal * tuples are detected (unless there was at least one NULL field). * * It is sufficient to make the test here, because if two tuples are equal * they *must* get compared at some stage of the sort --- otherwise the * sort algorithm wouldn't have checked whether one must appear before the * other. */ if (state->enforceUnique && !equal_hasnull) { Datum values[INDEX_MAX_KEYS]; bool isnull[INDEX_MAX_KEYS]; /* * Some rather brain-dead implementations of qsort (such as the one in * QNX 4) will sometimes call the comparison routine to compare a * value to itself, but we always use our own implementation, which * does not. */ Assert(tuple1 != tuple2); index_deform_tuple(tuple1, tupDes, values, isnull); ereport(ERROR, (errcode(ERRCODE_UNIQUE_VIOLATION), errmsg("could not create unique index \"%s\"", RelationGetRelationName(state->indexRel)), errdetail("Key %s is duplicated.", BuildIndexValueDescription(state->indexRel, values, isnull)), errtableconstraint(state->heapRel, RelationGetRelationName(state->indexRel)))); } /* * If key values are equal, we sort on ItemPointer. This does not affect * validity of the finished index, but it may be useful to have index * scans in physical order. */ { BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid); BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid); if (blk1 != blk2) return (blk1 < blk2) ? -1 : 1; } { OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid); OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid); if (pos1 != pos2) return (pos1 < pos2) ? -1 : 1; } return 0; }
static int comparetup_index_hash | ( | const SortTuple * | a, | |
const SortTuple * | b, | |||
Tuplesortstate * | state | |||
) | [static] |
Definition at line 3208 of file tuplesort.c.
References Assert, SortTuple::datum1, DatumGetUInt32, Tuplesortstate::hash_mask, SortTuple::isnull1, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, IndexTupleData::t_tid, and SortTuple::tuple.
{ uint32 hash1; uint32 hash2; IndexTuple tuple1; IndexTuple tuple2; /* * Fetch hash keys and mask off bits we don't want to sort by. We know * that the first column of the index tuple is the hash key. */ Assert(!a->isnull1); hash1 = DatumGetUInt32(a->datum1) & state->hash_mask; Assert(!b->isnull1); hash2 = DatumGetUInt32(b->datum1) & state->hash_mask; if (hash1 > hash2) return 1; else if (hash1 < hash2) return -1; /* * If hash values are equal, we sort on ItemPointer. This does not affect * validity of the finished index, but it may be useful to have index * scans in physical order. */ tuple1 = (IndexTuple) a->tuple; tuple2 = (IndexTuple) b->tuple; { BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid); BlockNumber blk2 = ItemPointerGetBlockNumber(&tuple2->t_tid); if (blk1 != blk2) return (blk1 < blk2) ? -1 : 1; } { OffsetNumber pos1 = ItemPointerGetOffsetNumber(&tuple1->t_tid); OffsetNumber pos2 = ItemPointerGetOffsetNumber(&tuple2->t_tid); if (pos1 != pos2) return (pos1 < pos2) ? -1 : 1; } return 0; }
static void copytup_cluster | ( | Tuplesortstate * | state, | |
SortTuple * | stup, | |||
void * | tup | |||
) | [static] |
Definition at line 3015 of file tuplesort.c.
References SortTuple::datum1, GetMemoryChunkSpace(), heap_copytuple(), heap_getattr, IndexInfo::ii_KeyAttrNumbers, Tuplesortstate::indexInfo, SortTuple::isnull1, Tuplesortstate::tupDesc, SortTuple::tuple, and USEMEM.
{ HeapTuple tuple = (HeapTuple) tup; /* copy the tuple into sort storage */ tuple = heap_copytuple(tuple); stup->tuple = (void *) tuple; USEMEM(state, GetMemoryChunkSpace(tuple)); /* set up first-column key value, if it's a simple column */ if (state->indexInfo->ii_KeyAttrNumbers[0] != 0) stup->datum1 = heap_getattr(tuple, state->indexInfo->ii_KeyAttrNumbers[0], state->tupDesc, &stup->isnull1); }
static void copytup_datum | ( | Tuplesortstate * | state, | |
SortTuple * | stup, | |||
void * | tup | |||
) | [static] |
static void copytup_heap | ( | Tuplesortstate * | state, | |
SortTuple * | stup, | |||
void * | tup | |||
) | [static] |
Definition at line 2817 of file tuplesort.c.
References SortTuple::datum1, ExecCopySlotMinimalTuple(), GetMemoryChunkSpace(), heap_getattr, SortTuple::isnull1, Tuplesortstate::sortKeys, SortSupportData::ssup_attno, HeapTupleData::t_data, MinimalTupleData::t_len, HeapTupleData::t_len, Tuplesortstate::tupDesc, SortTuple::tuple, and USEMEM.
{ /* * We expect the passed "tup" to be a TupleTableSlot, and form a * MinimalTuple using the exported interface for that. */ TupleTableSlot *slot = (TupleTableSlot *) tup; MinimalTuple tuple; HeapTupleData htup; /* copy the tuple into sort storage */ tuple = ExecCopySlotMinimalTuple(slot); stup->tuple = (void *) tuple; USEMEM(state, GetMemoryChunkSpace(tuple)); /* set up first-column key value */ htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET; htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET); stup->datum1 = heap_getattr(&htup, state->sortKeys[0].ssup_attno, state->tupDesc, &stup->isnull1); }
static void copytup_index | ( | Tuplesortstate * | state, | |
SortTuple * | stup, | |||
void * | tup | |||
) | [static] |
Definition at line 3257 of file tuplesort.c.
References SortTuple::datum1, GetMemoryChunkSpace(), index_getattr, Tuplesortstate::indexRel, IndexTupleSize, SortTuple::isnull1, palloc(), RelationGetDescr, SortTuple::tuple, and USEMEM.
{ IndexTuple tuple = (IndexTuple) tup; unsigned int tuplen = IndexTupleSize(tuple); IndexTuple newtuple; /* copy the tuple into sort storage */ newtuple = (IndexTuple) palloc(tuplen); memcpy(newtuple, tuple, tuplen); USEMEM(state, GetMemoryChunkSpace(newtuple)); stup->tuple = (void *) newtuple; /* set up first-column key value */ stup->datum1 = index_getattr(newtuple, 1, RelationGetDescr(state->indexRel), &stup->isnull1); }
static void dumptuples | ( | Tuplesortstate * | state, | |
bool | alltuples | |||
) | [static] |
Definition at line 2284 of file tuplesort.c.
References Assert, Tuplesortstate::currentRun, Tuplesortstate::destTape, elog, LACKMEM, LOG, markrunend(), Tuplesortstate::memtupcount, Tuplesortstate::memtuples, Tuplesortstate::memtupsize, pg_rusage_show(), selectnewtape(), Tuplesortstate::tp_dummy, Tuplesortstate::tp_runs, Tuplesortstate::tp_tapenum, SortTuple::tupindex, tuplesort_heap_siftup(), and WRITETUP.
Referenced by puttuple_common(), and tuplesort_performsort().
{ while (alltuples || (LACKMEM(state) && state->memtupcount > 1) || state->memtupcount >= state->memtupsize) { /* * Dump the heap's frontmost entry, and sift up to remove it from the * heap. */ Assert(state->memtupcount > 0); WRITETUP(state, state->tp_tapenum[state->destTape], &state->memtuples[0]); tuplesort_heap_siftup(state, true); /* * If the heap is empty *or* top run number has changed, we've * finished the current run. */ if (state->memtupcount == 0 || state->currentRun != state->memtuples[0].tupindex) { markrunend(state, state->tp_tapenum[state->destTape]); state->currentRun++; state->tp_runs[state->destTape]++; state->tp_dummy[state->destTape]--; /* per Alg D step D2 */ #ifdef TRACE_SORT if (trace_sort) elog(LOG, "finished writing%s run %d to tape %d: %s", (state->memtupcount == 0) ? " final" : "", state->currentRun, state->destTape, pg_rusage_show(&state->ru_start)); #endif /* * Done if heap is empty, else prepare for new run. */ if (state->memtupcount == 0) break; Assert(state->currentRun == state->memtuples[0].tupindex); selectnewtape(state); } } }
static void free_sort_tuple | ( | Tuplesortstate * | state, | |
SortTuple * | stup | |||
) | [static] |
Definition at line 3444 of file tuplesort.c.
References FREEMEM, GetMemoryChunkSpace(), pfree(), and SortTuple::tuple.
Referenced by make_bounded_heap(), and puttuple_common().
{ FREEMEM(state, GetMemoryChunkSpace(stup->tuple)); pfree(stup->tuple); }
static unsigned int getlen | ( | Tuplesortstate * | state, | |
int | tapenum, | |||
bool | eofOK | |||
) | [static] |
Definition at line 2679 of file tuplesort.c.
References elog, ERROR, LogicalTapeRead(), and Tuplesortstate::tapeset.
Referenced by mergeprereadone(), and tuplesort_gettuple_common().
static bool grow_memtuples | ( | Tuplesortstate * | state | ) | [static] |
Definition at line 978 of file tuplesort.c.
References Tuplesortstate::allowedMem, Tuplesortstate::availMem, elog, ERROR, FREEMEM, GetMemoryChunkSpace(), Tuplesortstate::growmemtuples, LACKMEM, MaxAllocSize, Tuplesortstate::memtuples, Tuplesortstate::memtupsize, repalloc(), and USEMEM.
Referenced by puttuple_common().
{ int newmemtupsize; int memtupsize = state->memtupsize; long memNowUsed = state->allowedMem - state->availMem; /* Forget it if we've already maxed out memtuples, per comment above */ if (!state->growmemtuples) return false; /* Select new value of memtupsize */ if (memNowUsed <= state->availMem) { /* * It is surely safe to double memtupsize if we've used no more than * half of allowedMem. * * Note: it might seem that we need to worry about memtupsize * 2 * overflowing an int, but the MaxAllocSize clamp applied below * ensures the existing memtupsize can't be large enough for that. */ newmemtupsize = memtupsize * 2; } else { /* * This will be the last increment of memtupsize. Abandon doubling * strategy and instead increase as much as we safely can. * * To stay within allowedMem, we can't increase memtupsize by more * than availMem / sizeof(SortTuple) elements. In practice, we want * to increase it by considerably less, because we need to leave some * space for the tuples to which the new array slots will refer. We * assume the new tuples will be about the same size as the tuples * we've already seen, and thus we can extrapolate from the space * consumption so far to estimate an appropriate new size for the * memtuples array. The optimal value might be higher or lower than * this estimate, but it's hard to know that in advance. * * This calculation is safe against enlarging the array so much that * LACKMEM becomes true, because the memory currently used includes * the present array; thus, there would be enough allowedMem for the * new array elements even if no other memory were currently used. * * We do the arithmetic in float8, because otherwise the product of * memtupsize and allowedMem could overflow. (A little algebra shows * that grow_ratio must be less than 2 here, so we are not risking * integer overflow this way.) Any inaccuracy in the result should be * insignificant; but even if we computed a completely insane result, * the checks below will prevent anything really bad from happening. */ double grow_ratio; grow_ratio = (double) state->allowedMem / (double) memNowUsed; newmemtupsize = (int) (memtupsize * grow_ratio); /* We won't make any further enlargement attempts */ state->growmemtuples = false; } /* Must enlarge array by at least one element, else report failure */ if (newmemtupsize <= memtupsize) goto noalloc; /* * On a 64-bit machine, allowedMem could be more than MaxAllocSize. Clamp * to ensure our request won't be rejected by palloc. */ if ((Size) newmemtupsize >= MaxAllocSize / sizeof(SortTuple)) { newmemtupsize = (int) (MaxAllocSize / sizeof(SortTuple)); state->growmemtuples = false; /* can't grow any more */ } /* * We need to be sure that we do not cause LACKMEM to become true, else * the space management algorithm will go nuts. The code above should * never generate a dangerous request, but to be safe, check explicitly * that the array growth fits within availMem. (We could still cause * LACKMEM if the memory chunk overhead associated with the memtuples * array were to increase. That shouldn't happen with any sane value of * allowedMem, because at any array size large enough to risk LACKMEM, * palloc would be treating both old and new arrays as separate chunks. * But we'll check LACKMEM explicitly below just in case.) */ if (state->availMem < (long) ((newmemtupsize - memtupsize) * sizeof(SortTuple))) goto noalloc; /* OK, do it */ FREEMEM(state, GetMemoryChunkSpace(state->memtuples)); state->memtupsize = newmemtupsize; state->memtuples = (SortTuple *) repalloc(state->memtuples, state->memtupsize * sizeof(SortTuple)); USEMEM(state, GetMemoryChunkSpace(state->memtuples)); if (LACKMEM(state)) elog(ERROR, "unexpected out-of-memory situation during sort"); return true; noalloc: /* If for any reason we didn't realloc, shut off future attempts */ state->growmemtuples = false; return false; }
static void inittapes | ( | Tuplesortstate * | state | ) | [static] |
Definition at line 1747 of file tuplesort.c.
References Tuplesortstate::allowedMem, Assert, Tuplesortstate::currentRun, Tuplesortstate::destTape, elog, GetMemoryChunkSpace(), Tuplesortstate::Level, LOG, LogicalTapeSetCreate(), Tuplesortstate::maxTapes, Tuplesortstate::memtupcount, Tuplesortstate::memtuples, Tuplesortstate::memtupsize, Tuplesortstate::mergeactive, Tuplesortstate::mergeavailmem, Tuplesortstate::mergeavailslots, Tuplesortstate::mergelast, Tuplesortstate::mergenext, Min, palloc0(), pg_rusage_show(), PrepareTempTablespaces(), Tuplesortstate::status, Tuplesortstate::tapeRange, Tuplesortstate::tapeset, Tuplesortstate::tp_dummy, Tuplesortstate::tp_fib, Tuplesortstate::tp_runs, Tuplesortstate::tp_tapenum, tuplesort_heap_insert(), tuplesort_merge_order(), and USEMEM.
Referenced by puttuple_common().
{ int maxTapes, ntuples, j; long tapeSpace; /* Compute number of tapes to use: merge order plus 1 */ maxTapes = tuplesort_merge_order(state->allowedMem) + 1; /* * We must have at least 2*maxTapes slots in the memtuples[] array, else * we'd not have room for merge heap plus preread. It seems unlikely that * this case would ever occur, but be safe. */ maxTapes = Min(maxTapes, state->memtupsize / 2); state->maxTapes = maxTapes; state->tapeRange = maxTapes - 1; #ifdef TRACE_SORT if (trace_sort) elog(LOG, "switching to external sort with %d tapes: %s", maxTapes, pg_rusage_show(&state->ru_start)); #endif /* * Decrease availMem to reflect the space needed for tape buffers; but * don't decrease it to the point that we have no room for tuples. (That * case is only likely to occur if sorting pass-by-value Datums; in all * other scenarios the memtuples[] array is unlikely to occupy more than * half of allowedMem. In the pass-by-value case it's not important to * account for tuple space, so we don't care if LACKMEM becomes * inaccurate.) */ tapeSpace = maxTapes * TAPE_BUFFER_OVERHEAD; if (tapeSpace + GetMemoryChunkSpace(state->memtuples) < state->allowedMem) USEMEM(state, tapeSpace); /* * Make sure that the temp file(s) underlying the tape set are created in * suitable temp tablespaces. */ PrepareTempTablespaces(); /* * Create the tape set and allocate the per-tape data arrays. */ state->tapeset = LogicalTapeSetCreate(maxTapes); state->mergeactive = (bool *) palloc0(maxTapes * sizeof(bool)); state->mergenext = (int *) palloc0(maxTapes * sizeof(int)); state->mergelast = (int *) palloc0(maxTapes * sizeof(int)); state->mergeavailslots = (int *) palloc0(maxTapes * sizeof(int)); state->mergeavailmem = (long *) palloc0(maxTapes * sizeof(long)); state->tp_fib = (int *) palloc0(maxTapes * sizeof(int)); state->tp_runs = (int *) palloc0(maxTapes * sizeof(int)); state->tp_dummy = (int *) palloc0(maxTapes * sizeof(int)); state->tp_tapenum = (int *) palloc0(maxTapes * sizeof(int)); /* * Convert the unsorted contents of memtuples[] into a heap. Each tuple is * marked as belonging to run number zero. * * NOTE: we pass false for checkIndex since there's no point in comparing * indexes in this step, even though we do intend the indexes to be part * of the sort key... */ ntuples = state->memtupcount; state->memtupcount = 0; /* make the heap empty */ for (j = 0; j < ntuples; j++) { /* Must copy source tuple to avoid possible overwrite */ SortTuple stup = state->memtuples[j]; tuplesort_heap_insert(state, &stup, 0, false); } Assert(state->memtupcount == ntuples); state->currentRun = 0; /* * Initialize variables of Algorithm D (step D1). */ for (j = 0; j < maxTapes; j++) { state->tp_fib[j] = 1; state->tp_runs[j] = 0; state->tp_dummy[j] = 1; state->tp_tapenum[j] = j; } state->tp_fib[state->tapeRange] = 0; state->tp_dummy[state->tapeRange] = 0; state->Level = 1; state->destTape = 0; state->status = TSS_BUILDRUNS; }
static int32 inlineApplySortFunction | ( | FmgrInfo * | sortFunction, | |
int | sk_flags, | |||
Oid | collation, | |||
Datum | datum1, | |||
bool | isNull1, | |||
Datum | datum2, | |||
bool | isNull2 | |||
) | [inline, static] |
Definition at line 2732 of file tuplesort.c.
References DatumGetInt32, myFunctionCall2Coll(), SK_BT_DESC, and SK_BT_NULLS_FIRST.
Referenced by comparetup_cluster(), and comparetup_index_btree().
{ int32 compare; if (isNull1) { if (isNull2) compare = 0; /* NULL "=" NULL */ else if (sk_flags & SK_BT_NULLS_FIRST) compare = -1; /* NULL "<" NOT_NULL */ else compare = 1; /* NULL ">" NOT_NULL */ } else if (isNull2) { if (sk_flags & SK_BT_NULLS_FIRST) compare = 1; /* NOT_NULL ">" NULL */ else compare = -1; /* NOT_NULL "<" NULL */ } else { compare = DatumGetInt32(myFunctionCall2Coll(sortFunction, collation, datum1, datum2)); if (sk_flags & SK_BT_DESC) compare = -compare; } return compare; }
static void make_bounded_heap | ( | Tuplesortstate * | state | ) | [static] |
Definition at line 2509 of file tuplesort.c.
References Assert, Tuplesortstate::bound, Tuplesortstate::bounded, CHECK_FOR_INTERRUPTS, COMPARETUP, free_sort_tuple(), i, Tuplesortstate::memtupcount, Tuplesortstate::memtuples, REVERSEDIRECTION, Tuplesortstate::status, TSS_INITIAL, tuplesort_heap_insert(), and tuplesort_heap_siftup().
Referenced by puttuple_common().
{ int tupcount = state->memtupcount; int i; Assert(state->status == TSS_INITIAL); Assert(state->bounded); Assert(tupcount >= state->bound); /* Reverse sort direction so largest entry will be at root */ REVERSEDIRECTION(state); state->memtupcount = 0; /* make the heap empty */ for (i = 0; i < tupcount; i++) { if (state->memtupcount >= state->bound && COMPARETUP(state, &state->memtuples[i], &state->memtuples[0]) <= 0) { /* New tuple would just get thrown out, so skip it */ free_sort_tuple(state, &state->memtuples[i]); CHECK_FOR_INTERRUPTS(); } else { /* Insert next tuple into heap */ /* Must copy source tuple to avoid possible overwrite */ SortTuple stup = state->memtuples[i]; tuplesort_heap_insert(state, &stup, 0, false); /* If heap too full, discard largest entry */ if (state->memtupcount > state->bound) { free_sort_tuple(state, &state->memtuples[0]); tuplesort_heap_siftup(state, false); } } } Assert(state->memtupcount == state->bound); state->status = TSS_BOUNDED; }
static void markrunend | ( | Tuplesortstate * | state, | |
int | tapenum | |||
) | [static] |
Definition at line 2692 of file tuplesort.c.
References LogicalTapeWrite(), and Tuplesortstate::tapeset.
Referenced by dumptuples(), and mergeonerun().
{ unsigned int len = 0; LogicalTapeWrite(state->tapeset, tapenum, (void *) &len, sizeof(len)); }
static void mergeonerun | ( | Tuplesortstate * | state | ) | [static] |
Definition at line 2023 of file tuplesort.c.
References Tuplesortstate::activeTapes, Tuplesortstate::availMem, beginmerge(), elog, LOG, markrunend(), Tuplesortstate::memtupcount, Tuplesortstate::memtuples, Tuplesortstate::mergeavailmem, Tuplesortstate::mergeavailslots, Tuplesortstate::mergefreelist, Tuplesortstate::mergelast, Tuplesortstate::mergenext, mergepreread(), pg_rusage_show(), Tuplesortstate::tapeRange, Tuplesortstate::tp_runs, Tuplesortstate::tp_tapenum, SortTuple::tupindex, tuplesort_heap_insert(), tuplesort_heap_siftup(), and WRITETUP.
Referenced by mergeruns().
{ int destTape = state->tp_tapenum[state->tapeRange]; int srcTape; int tupIndex; SortTuple *tup; long priorAvail, spaceFreed; /* * Start the merge by loading one tuple from each active source tape into * the heap. We can also decrease the input run/dummy run counts. */ beginmerge(state); /* * Execute merge by repeatedly extracting lowest tuple in heap, writing it * out, and replacing it with next tuple from same tape (if there is * another one). */ while (state->memtupcount > 0) { /* write the tuple to destTape */ priorAvail = state->availMem; srcTape = state->memtuples[0].tupindex; WRITETUP(state, destTape, &state->memtuples[0]); /* writetup adjusted total free space, now fix per-tape space */ spaceFreed = state->availMem - priorAvail; state->mergeavailmem[srcTape] += spaceFreed; /* compact the heap */ tuplesort_heap_siftup(state, false); if ((tupIndex = state->mergenext[srcTape]) == 0) { /* out of preloaded data on this tape, try to read more */ mergepreread(state); /* if still no data, we've reached end of run on this tape */ if ((tupIndex = state->mergenext[srcTape]) == 0) continue; } /* pull next preread tuple from list, insert in heap */ tup = &state->memtuples[tupIndex]; state->mergenext[srcTape] = tup->tupindex; if (state->mergenext[srcTape] == 0) state->mergelast[srcTape] = 0; tuplesort_heap_insert(state, tup, srcTape, false); /* put the now-unused memtuples entry on the freelist */ tup->tupindex = state->mergefreelist; state->mergefreelist = tupIndex; state->mergeavailslots[srcTape]++; } /* * When the heap empties, we're done. Write an end-of-run marker on the * output tape, and increment its count of real runs. */ markrunend(state, destTape); state->tp_runs[state->tapeRange]++; #ifdef TRACE_SORT if (trace_sort) elog(LOG, "finished %d-way merge step: %s", state->activeTapes, pg_rusage_show(&state->ru_start)); #endif }
static void mergepreread | ( | Tuplesortstate * | state | ) | [static] |
Definition at line 2203 of file tuplesort.c.
References Tuplesortstate::maxTapes, and mergeprereadone().
Referenced by beginmerge(), and mergeonerun().
{ int srcTape; for (srcTape = 0; srcTape < state->maxTapes; srcTape++) mergeprereadone(state, srcTape); }
static void mergeprereadone | ( | Tuplesortstate * | state, | |
int | srcTape | |||
) | [static] |
Definition at line 2219 of file tuplesort.c.
References Assert, Tuplesortstate::availMem, getlen(), LACKMEM, Tuplesortstate::memtuples, Tuplesortstate::mergeactive, Tuplesortstate::mergeavailmem, Tuplesortstate::mergeavailslots, Tuplesortstate::mergefirstfree, Tuplesortstate::mergefreelist, Tuplesortstate::mergelast, Tuplesortstate::mergenext, READTUP, and SortTuple::tupindex.
Referenced by mergepreread(), and tuplesort_gettuple_common().
{ unsigned int tuplen; SortTuple stup; int tupIndex; long priorAvail, spaceUsed; if (!state->mergeactive[srcTape]) return; /* tape's run is already exhausted */ priorAvail = state->availMem; state->availMem = state->mergeavailmem[srcTape]; while ((state->mergeavailslots[srcTape] > 0 && !LACKMEM(state)) || state->mergenext[srcTape] == 0) { /* read next tuple, if any */ if ((tuplen = getlen(state, srcTape, true)) == 0) { state->mergeactive[srcTape] = false; break; } READTUP(state, &stup, srcTape, tuplen); /* find a free slot in memtuples[] for it */ tupIndex = state->mergefreelist; if (tupIndex) state->mergefreelist = state->memtuples[tupIndex].tupindex; else { tupIndex = state->mergefirstfree++; Assert(tupIndex < state->memtupsize); } state->mergeavailslots[srcTape]--; /* store tuple, append to list for its tape */ stup.tupindex = 0; state->memtuples[tupIndex] = stup; if (state->mergelast[srcTape]) state->memtuples[state->mergelast[srcTape]].tupindex = tupIndex; else state->mergenext[srcTape] = tupIndex; state->mergelast[srcTape] = tupIndex; } /* update per-tape and global availmem counts */ spaceUsed = state->mergeavailmem[srcTape] - state->availMem; state->mergeavailmem[srcTape] = state->availMem; state->availMem = priorAvail - spaceUsed; }
static void mergeruns | ( | Tuplesortstate * | state | ) | [static] |
Definition at line 1889 of file tuplesort.c.
References Assert, beginmerge(), Tuplesortstate::currentRun, Tuplesortstate::destTape, Tuplesortstate::Level, LogicalTapeFreeze(), LogicalTapeRewind(), LogicalTapeSetForgetFreeSpace(), Tuplesortstate::memtupcount, mergeonerun(), Tuplesortstate::randomAccess, Tuplesortstate::result_tape, Tuplesortstate::status, Tuplesortstate::tapeRange, Tuplesortstate::tapeset, Tuplesortstate::tp_dummy, Tuplesortstate::tp_runs, Tuplesortstate::tp_tapenum, and TSS_BUILDRUNS.
Referenced by tuplesort_performsort().
{ int tapenum, svTape, svRuns, svDummy; Assert(state->status == TSS_BUILDRUNS); Assert(state->memtupcount == 0); /* * If we produced only one initial run (quite likely if the total data * volume is between 1X and 2X workMem), we can just use that tape as the * finished output, rather than doing a useless merge. (This obvious * optimization is not in Knuth's algorithm.) */ if (state->currentRun == 1) { state->result_tape = state->tp_tapenum[state->destTape]; /* must freeze and rewind the finished output tape */ LogicalTapeFreeze(state->tapeset, state->result_tape); state->status = TSS_SORTEDONTAPE; return; } /* End of step D2: rewind all output tapes to prepare for merging */ for (tapenum = 0; tapenum < state->tapeRange; tapenum++) LogicalTapeRewind(state->tapeset, tapenum, false); for (;;) { /* * At this point we know that tape[T] is empty. If there's just one * (real or dummy) run left on each input tape, then only one merge * pass remains. If we don't have to produce a materialized sorted * tape, we can stop at this point and do the final merge on-the-fly. */ if (!state->randomAccess) { bool allOneRun = true; Assert(state->tp_runs[state->tapeRange] == 0); for (tapenum = 0; tapenum < state->tapeRange; tapenum++) { if (state->tp_runs[tapenum] + state->tp_dummy[tapenum] != 1) { allOneRun = false; break; } } if (allOneRun) { /* Tell logtape.c we won't be writing anymore */ LogicalTapeSetForgetFreeSpace(state->tapeset); /* Initialize for the final merge pass */ beginmerge(state); state->status = TSS_FINALMERGE; return; } } /* Step D5: merge runs onto tape[T] until tape[P] is empty */ while (state->tp_runs[state->tapeRange - 1] || state->tp_dummy[state->tapeRange - 1]) { bool allDummy = true; for (tapenum = 0; tapenum < state->tapeRange; tapenum++) { if (state->tp_dummy[tapenum] == 0) { allDummy = false; break; } } if (allDummy) { state->tp_dummy[state->tapeRange]++; for (tapenum = 0; tapenum < state->tapeRange; tapenum++) state->tp_dummy[tapenum]--; } else mergeonerun(state); } /* Step D6: decrease level */ if (--state->Level == 0) break; /* rewind output tape T to use as new input */ LogicalTapeRewind(state->tapeset, state->tp_tapenum[state->tapeRange], false); /* rewind used-up input tape P, and prepare it for write pass */ LogicalTapeRewind(state->tapeset, state->tp_tapenum[state->tapeRange - 1], true); state->tp_runs[state->tapeRange - 1] = 0; /* * reassign tape units per step D6; note we no longer care about A[] */ svTape = state->tp_tapenum[state->tapeRange]; svDummy = state->tp_dummy[state->tapeRange]; svRuns = state->tp_runs[state->tapeRange]; for (tapenum = state->tapeRange; tapenum > 0; tapenum--) { state->tp_tapenum[tapenum] = state->tp_tapenum[tapenum - 1]; state->tp_dummy[tapenum] = state->tp_dummy[tapenum - 1]; state->tp_runs[tapenum] = state->tp_runs[tapenum - 1]; } state->tp_tapenum[0] = svTape; state->tp_dummy[0] = svDummy; state->tp_runs[0] = svRuns; } /* * Done. Knuth says that the result is on TAPE[1], but since we exited * the loop without performing the last iteration of step D6, we have not * rearranged the tape unit assignment, and therefore the result is on * TAPE[T]. We need to do it this way so that we can freeze the final * output tape while rewinding it. The last iteration of step D6 would be * a waste of cycles anyway... */ state->result_tape = state->tp_tapenum[state->tapeRange]; LogicalTapeFreeze(state->tapeset, state->result_tape); state->status = TSS_SORTEDONTAPE; }
static Datum myFunctionCall2Coll | ( | FmgrInfo * | flinfo, | |
Oid | collation, | |||
Datum | arg1, | |||
Datum | arg2 | |||
) | [inline, static] |
Definition at line 2704 of file tuplesort.c.
References FunctionCallInfoData::arg, FunctionCallInfoData::argnull, elog, ERROR, FunctionCallInfoData::flinfo, FmgrInfo::fn_oid, FunctionCallInvoke, InitFunctionCallInfoData, FunctionCallInfoData::isnull, and NULL.
Referenced by inlineApplySortFunction().
{ FunctionCallInfoData fcinfo; Datum result; InitFunctionCallInfoData(fcinfo, flinfo, 2, collation, NULL, NULL); fcinfo.arg[0] = arg1; fcinfo.arg[1] = arg2; fcinfo.argnull[0] = false; fcinfo.argnull[1] = false; result = FunctionCallInvoke(&fcinfo); /* Check for null result, since caller is clearly not expecting one */ if (fcinfo.isnull) elog(ERROR, "function %u returned NULL", fcinfo.flinfo->fn_oid); return result; }
static void puttuple_common | ( | Tuplesortstate * | state, | |
SortTuple * | tuple | |||
) | [static] |
Definition at line 1187 of file tuplesort.c.
References Assert, Tuplesortstate::bound, Tuplesortstate::bounded, CHECK_FOR_INTERRUPTS, COMPARETUP, Tuplesortstate::currentRun, dumptuples(), elog, ERROR, free_sort_tuple(), grow_memtuples(), inittapes(), LACKMEM, LOG, make_bounded_heap(), Tuplesortstate::memtupcount, Tuplesortstate::memtuples, Tuplesortstate::memtupsize, pg_rusage_show(), Tuplesortstate::status, TSS_BOUNDED, TSS_BUILDRUNS, TSS_INITIAL, tuplesort_heap_insert(), and tuplesort_heap_siftup().
Referenced by tuplesort_putdatum(), tuplesort_putheaptuple(), tuplesort_putindextuple(), and tuplesort_puttupleslot().
{ switch (state->status) { case TSS_INITIAL: /* * Save the tuple into the unsorted array. First, grow the array * as needed. Note that we try to grow the array when there is * still one free slot remaining --- if we fail, there'll still be * room to store the incoming tuple, and then we'll switch to * tape-based operation. */ if (state->memtupcount >= state->memtupsize - 1) { (void) grow_memtuples(state); Assert(state->memtupcount < state->memtupsize); } state->memtuples[state->memtupcount++] = *tuple; /* * Check if it's time to switch over to a bounded heapsort. We do * so if the input tuple count exceeds twice the desired tuple * count (this is a heuristic for where heapsort becomes cheaper * than a quicksort), or if we've just filled workMem and have * enough tuples to meet the bound. * * Note that once we enter TSS_BOUNDED state we will always try to * complete the sort that way. In the worst case, if later input * tuples are larger than earlier ones, this might cause us to * exceed workMem significantly. */ if (state->bounded && (state->memtupcount > state->bound * 2 || (state->memtupcount > state->bound && LACKMEM(state)))) { #ifdef TRACE_SORT if (trace_sort) elog(LOG, "switching to bounded heapsort at %d tuples: %s", state->memtupcount, pg_rusage_show(&state->ru_start)); #endif make_bounded_heap(state); return; } /* * Done if we still fit in available memory and have array slots. */ if (state->memtupcount < state->memtupsize && !LACKMEM(state)) return; /* * Nope; time to switch to tape-based operation. */ inittapes(state); /* * Dump tuples until we are back under the limit. */ dumptuples(state, false); break; case TSS_BOUNDED: /* * We don't want to grow the array here, so check whether the new * tuple can be discarded before putting it in. This should be a * good speed optimization, too, since when there are many more * input tuples than the bound, most input tuples can be discarded * with just this one comparison. Note that because we currently * have the sort direction reversed, we must check for <= not >=. */ if (COMPARETUP(state, tuple, &state->memtuples[0]) <= 0) { /* new tuple <= top of the heap, so we can discard it */ free_sort_tuple(state, tuple); CHECK_FOR_INTERRUPTS(); } else { /* discard top of heap, sift up, insert new tuple */ free_sort_tuple(state, &state->memtuples[0]); tuplesort_heap_siftup(state, false); tuplesort_heap_insert(state, tuple, 0, false); } break; case TSS_BUILDRUNS: /* * Insert the tuple into the heap, with run number currentRun if * it can go into the current run, else run number currentRun+1. * The tuple can go into the current run if it is >= the first * not-yet-output tuple. (Actually, it could go into the current * run if it is >= the most recently output tuple ... but that * would require keeping around the tuple we last output, and it's * simplest to let writetup free each tuple as soon as it's * written.) * * Note there will always be at least one tuple in the heap at * this point; see dumptuples. */ Assert(state->memtupcount > 0); if (COMPARETUP(state, tuple, &state->memtuples[0]) >= 0) tuplesort_heap_insert(state, tuple, state->currentRun, true); else tuplesort_heap_insert(state, tuple, state->currentRun + 1, true); /* * If we are over the memory limit, dump tuples till we're under. */ dumptuples(state, false); break; default: elog(ERROR, "invalid tuplesort state"); break; } }
static void readtup_cluster | ( | Tuplesortstate * | state, | |
SortTuple * | stup, | |||
int | tapenum, | |||
unsigned int | len | |||
) | [static] |
Definition at line 3053 of file tuplesort.c.
References SortTuple::datum1, GetMemoryChunkSpace(), heap_getattr, HEAPTUPLESIZE, IndexInfo::ii_KeyAttrNumbers, Tuplesortstate::indexInfo, SortTuple::isnull1, LogicalTapeReadExact, palloc(), Tuplesortstate::randomAccess, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, Tuplesortstate::tapeset, Tuplesortstate::tupDesc, SortTuple::tuple, and USEMEM.
{ unsigned int t_len = tuplen - sizeof(ItemPointerData) - sizeof(int); HeapTuple tuple = (HeapTuple) palloc(t_len + HEAPTUPLESIZE); USEMEM(state, GetMemoryChunkSpace(tuple)); /* Reconstruct the HeapTupleData header */ tuple->t_data = (HeapTupleHeader) ((char *) tuple + HEAPTUPLESIZE); tuple->t_len = t_len; LogicalTapeReadExact(state->tapeset, tapenum, &tuple->t_self, sizeof(ItemPointerData)); /* We don't currently bother to reconstruct t_tableOid */ tuple->t_tableOid = InvalidOid; /* Read in the tuple body */ LogicalTapeReadExact(state->tapeset, tapenum, tuple->t_data, tuple->t_len); if (state->randomAccess) /* need trailing length word? */ LogicalTapeReadExact(state->tapeset, tapenum, &tuplen, sizeof(tuplen)); stup->tuple = (void *) tuple; /* set up first-column key value, if it's a simple column */ if (state->indexInfo->ii_KeyAttrNumbers[0] != 0) stup->datum1 = heap_getattr(tuple, state->indexInfo->ii_KeyAttrNumbers[0], state->tupDesc, &stup->isnull1); }
static void readtup_datum | ( | Tuplesortstate * | state, | |
SortTuple * | stup, | |||
int | tapenum, | |||
unsigned int | len | |||
) | [static] |
Definition at line 3396 of file tuplesort.c.
References Assert, SortTuple::datum1, Tuplesortstate::datumTypeByVal, GetMemoryChunkSpace(), SortTuple::isnull1, LogicalTapeReadExact, palloc(), PointerGetDatum, Tuplesortstate::randomAccess, Tuplesortstate::tapeset, SortTuple::tuple, and USEMEM.
{ unsigned int tuplen = len - sizeof(unsigned int); if (tuplen == 0) { /* it's NULL */ stup->datum1 = (Datum) 0; stup->isnull1 = true; stup->tuple = NULL; } else if (state->datumTypeByVal) { Assert(tuplen == sizeof(Datum)); LogicalTapeReadExact(state->tapeset, tapenum, &stup->datum1, tuplen); stup->isnull1 = false; stup->tuple = NULL; } else { void *raddr = palloc(tuplen); LogicalTapeReadExact(state->tapeset, tapenum, raddr, tuplen); stup->datum1 = PointerGetDatum(raddr); stup->isnull1 = false; stup->tuple = raddr; USEMEM(state, GetMemoryChunkSpace(raddr)); } if (state->randomAccess) /* need trailing length word? */ LogicalTapeReadExact(state->tapeset, tapenum, &tuplen, sizeof(tuplen)); }
static void readtup_heap | ( | Tuplesortstate * | state, | |
SortTuple * | stup, | |||
int | tapenum, | |||
unsigned int | len | |||
) | [static] |
Definition at line 2865 of file tuplesort.c.
References SortTuple::datum1, GetMemoryChunkSpace(), heap_getattr, SortTuple::isnull1, LogicalTapeReadExact, palloc(), Tuplesortstate::randomAccess, Tuplesortstate::sortKeys, SortSupportData::ssup_attno, HeapTupleData::t_data, HeapTupleData::t_len, MinimalTupleData::t_len, Tuplesortstate::tapeset, Tuplesortstate::tupDesc, SortTuple::tuple, and USEMEM.
{ unsigned int tupbodylen = len - sizeof(int); unsigned int tuplen = tupbodylen + MINIMAL_TUPLE_DATA_OFFSET; MinimalTuple tuple = (MinimalTuple) palloc(tuplen); char *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET; HeapTupleData htup; USEMEM(state, GetMemoryChunkSpace(tuple)); /* read in the tuple proper */ tuple->t_len = tuplen; LogicalTapeReadExact(state->tapeset, tapenum, tupbody, tupbodylen); if (state->randomAccess) /* need trailing length word? */ LogicalTapeReadExact(state->tapeset, tapenum, &tuplen, sizeof(tuplen)); stup->tuple = (void *) tuple; /* set up first-column key value */ htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET; htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET); stup->datum1 = heap_getattr(&htup, state->sortKeys[0].ssup_attno, state->tupDesc, &stup->isnull1); }
static void readtup_index | ( | Tuplesortstate * | state, | |
SortTuple * | stup, | |||
int | tapenum, | |||
unsigned int | len | |||
) | [static] |
Definition at line 3295 of file tuplesort.c.
References SortTuple::datum1, GetMemoryChunkSpace(), index_getattr, Tuplesortstate::indexRel, SortTuple::isnull1, LogicalTapeReadExact, palloc(), Tuplesortstate::randomAccess, RelationGetDescr, Tuplesortstate::tapeset, SortTuple::tuple, and USEMEM.
{ unsigned int tuplen = len - sizeof(unsigned int); IndexTuple tuple = (IndexTuple) palloc(tuplen); USEMEM(state, GetMemoryChunkSpace(tuple)); LogicalTapeReadExact(state->tapeset, tapenum, tuple, tuplen); if (state->randomAccess) /* need trailing length word? */ LogicalTapeReadExact(state->tapeset, tapenum, &tuplen, sizeof(tuplen)); stup->tuple = (void *) tuple; /* set up first-column key value */ stup->datum1 = index_getattr(tuple, 1, RelationGetDescr(state->indexRel), &stup->isnull1); }
static void reversedirection_datum | ( | Tuplesortstate * | state | ) | [static] |
Definition at line 3434 of file tuplesort.c.
References Tuplesortstate::onlyKey, SortSupportData::ssup_nulls_first, and SortSupportData::ssup_reverse.
{ state->onlyKey->ssup_reverse = !state->onlyKey->ssup_reverse; state->onlyKey->ssup_nulls_first = !state->onlyKey->ssup_nulls_first; }
static void reversedirection_heap | ( | Tuplesortstate * | state | ) | [static] |
Definition at line 2893 of file tuplesort.c.
References Tuplesortstate::nKeys, Tuplesortstate::sortKeys, SortSupportData::ssup_nulls_first, and SortSupportData::ssup_reverse.
{ SortSupport sortKey = state->sortKeys; int nkey; for (nkey = 0; nkey < state->nKeys; nkey++, sortKey++) { sortKey->ssup_reverse = !sortKey->ssup_reverse; sortKey->ssup_nulls_first = !sortKey->ssup_nulls_first; } }
static void reversedirection_index_btree | ( | Tuplesortstate * | state | ) | [static] |
Definition at line 3316 of file tuplesort.c.
References Tuplesortstate::indexScanKey, Tuplesortstate::nKeys, SK_BT_DESC, and ScanKeyData::sk_flags.
{ ScanKey scanKey = state->indexScanKey; int nkey; for (nkey = 0; nkey < state->nKeys; nkey++, scanKey++) { scanKey->sk_flags ^= (SK_BT_DESC | SK_BT_NULLS_FIRST); } }
static void reversedirection_index_hash | ( | Tuplesortstate * | state | ) | [static] |
static void selectnewtape | ( | Tuplesortstate * | state | ) | [static] |
Definition at line 1854 of file tuplesort.c.
References Tuplesortstate::destTape, Tuplesortstate::Level, Tuplesortstate::tapeRange, Tuplesortstate::tp_dummy, and Tuplesortstate::tp_fib.
Referenced by dumptuples().
{ int j; int a; /* Step D3: advance j (destTape) */ if (state->tp_dummy[state->destTape] < state->tp_dummy[state->destTape + 1]) { state->destTape++; return; } if (state->tp_dummy[state->destTape] != 0) { state->destTape = 0; return; } /* Step D4: increase level */ state->Level++; a = state->tp_fib[0]; for (j = 0; j < state->tapeRange; j++) { state->tp_dummy[j] = a + state->tp_fib[j + 1] - state->tp_fib[j]; state->tp_fib[j] = a + state->tp_fib[j + 1]; } state->destTape = 0; }
static void sort_bounded_heap | ( | Tuplesortstate * | state | ) | [static] |
Definition at line 2556 of file tuplesort.c.
References Assert, Tuplesortstate::bound, Tuplesortstate::bounded, Tuplesortstate::boundUsed, Tuplesortstate::memtupcount, Tuplesortstate::memtuples, REVERSEDIRECTION, Tuplesortstate::status, TSS_BOUNDED, and tuplesort_heap_siftup().
Referenced by tuplesort_performsort().
{ int tupcount = state->memtupcount; Assert(state->status == TSS_BOUNDED); Assert(state->bounded); Assert(tupcount == state->bound); /* * We can unheapify in place because each sift-up will remove the largest * entry, which we can promptly store in the newly freed slot at the end. * Once we're down to a single-entry heap, we're done. */ while (state->memtupcount > 1) { SortTuple stup = state->memtuples[0]; /* this sifts-up the next-largest entry and decreases memtupcount */ tuplesort_heap_siftup(state, false); state->memtuples[state->memtupcount] = stup; } state->memtupcount = tupcount; /* * Reverse sort direction back to the original state. This is not * actually necessary but seems like a good idea for tidiness. */ REVERSEDIRECTION(state); state->status = TSS_SORTEDINMEM; state->boundUsed = true; }
Tuplesortstate* tuplesort_begin_cluster | ( | TupleDesc | tupDesc, | |
Relation | indexRel, | |||
int | workMem, | |||
bool | randomAccess | |||
) |
Definition at line 663 of file tuplesort.c.
References _bt_mkscankey_nodata(), Assert, BTREE_AM_OID, BuildIndexInfo(), CLUSTER_SORT, Tuplesortstate::comparetup, Tuplesortstate::copytup, CreateExecutorState(), ExprContext::ecxt_scantuple, elog, Tuplesortstate::estate, GetPerTupleExprContext, IndexInfo::ii_Expressions, Tuplesortstate::indexInfo, Tuplesortstate::indexScanKey, LOG, MakeSingleTupleTableSlot(), MemoryContextSwitchTo(), Tuplesortstate::nKeys, NULL, RelationData::rd_rel, Tuplesortstate::readtup, RelationGetNumberOfAttributes, Tuplesortstate::reversedirection, Tuplesortstate::sortcontext, Tuplesortstate::tupDesc, tuplesort_begin_common(), and Tuplesortstate::writetup.
Referenced by copy_heap_data().
{ Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess); MemoryContext oldcontext; Assert(indexRel->rd_rel->relam == BTREE_AM_OID); oldcontext = MemoryContextSwitchTo(state->sortcontext); #ifdef TRACE_SORT if (trace_sort) elog(LOG, "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c", RelationGetNumberOfAttributes(indexRel), workMem, randomAccess ? 't' : 'f'); #endif state->nKeys = RelationGetNumberOfAttributes(indexRel); TRACE_POSTGRESQL_SORT_START(CLUSTER_SORT, false, /* no unique check */ state->nKeys, workMem, randomAccess); state->comparetup = comparetup_cluster; state->copytup = copytup_cluster; state->writetup = writetup_cluster; state->readtup = readtup_cluster; state->reversedirection = reversedirection_index_btree; state->indexInfo = BuildIndexInfo(indexRel); state->indexScanKey = _bt_mkscankey_nodata(indexRel); state->tupDesc = tupDesc; /* assume we need not copy tupDesc */ if (state->indexInfo->ii_Expressions != NULL) { TupleTableSlot *slot; ExprContext *econtext; /* * We will need to use FormIndexDatum to evaluate the index * expressions. To do that, we need an EState, as well as a * TupleTableSlot to put the table tuples into. The econtext's * scantuple has to point to that slot, too. */ state->estate = CreateExecutorState(); slot = MakeSingleTupleTableSlot(tupDesc); econtext = GetPerTupleExprContext(state->estate); econtext->ecxt_scantuple = slot; } MemoryContextSwitchTo(oldcontext); return state; }
static Tuplesortstate * tuplesort_begin_common | ( | int | workMem, | |
bool | randomAccess | |||
) | [static] |
Definition at line 535 of file tuplesort.c.
References ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE, ALLOCSET_DEFAULT_MINSIZE, AllocSetContextCreate(), Tuplesortstate::allowedMem, Tuplesortstate::availMem, Tuplesortstate::bounded, Tuplesortstate::boundUsed, CurrentMemoryContext, Tuplesortstate::currentRun, elog, ERROR, GetMemoryChunkSpace(), Tuplesortstate::growmemtuples, LACKMEM, MemoryContextSwitchTo(), Tuplesortstate::memtupcount, Tuplesortstate::memtuples, Tuplesortstate::memtupsize, palloc(), palloc0(), pg_rusage_init(), Tuplesortstate::randomAccess, Tuplesortstate::result_tape, Tuplesortstate::sortcontext, Tuplesortstate::status, Tuplesortstate::tapeset, and USEMEM.
Referenced by tuplesort_begin_cluster(), tuplesort_begin_datum(), tuplesort_begin_heap(), tuplesort_begin_index_btree(), and tuplesort_begin_index_hash().
{ Tuplesortstate *state; MemoryContext sortcontext; MemoryContext oldcontext; /* * Create a working memory context for this sort operation. All data * needed by the sort will live inside this context. */ sortcontext = AllocSetContextCreate(CurrentMemoryContext, "TupleSort", ALLOCSET_DEFAULT_MINSIZE, ALLOCSET_DEFAULT_INITSIZE, ALLOCSET_DEFAULT_MAXSIZE); /* * Make the Tuplesortstate within the per-sort context. This way, we * don't need a separate pfree() operation for it at shutdown. */ oldcontext = MemoryContextSwitchTo(sortcontext); state = (Tuplesortstate *) palloc0(sizeof(Tuplesortstate)); #ifdef TRACE_SORT if (trace_sort) pg_rusage_init(&state->ru_start); #endif state->status = TSS_INITIAL; state->randomAccess = randomAccess; state->bounded = false; state->boundUsed = false; state->allowedMem = workMem * 1024L; state->availMem = state->allowedMem; state->sortcontext = sortcontext; state->tapeset = NULL; state->memtupcount = 0; state->memtupsize = 1024; /* initial guess */ state->growmemtuples = true; state->memtuples = (SortTuple *) palloc(state->memtupsize * sizeof(SortTuple)); USEMEM(state, GetMemoryChunkSpace(state->memtuples)); /* workMem must be large enough for the minimal memtuples array */ if (LACKMEM(state)) elog(ERROR, "insufficient memory allowed for sort"); state->currentRun = 0; /* * maxTapes, tapeRange, and Algorithm D variables will be initialized by * inittapes(), if needed */ state->result_tape = -1; /* flag that result tape has not been formed */ MemoryContextSwitchTo(oldcontext); return state; }
Tuplesortstate* tuplesort_begin_datum | ( | Oid | datumType, | |
Oid | sortOperator, | |||
Oid | sortCollation, | |||
bool | nullsFirstFlag, | |||
int | workMem, | |||
bool | randomAccess | |||
) |
Definition at line 804 of file tuplesort.c.
References Tuplesortstate::comparetup, Tuplesortstate::copytup, CurrentMemoryContext, DATUM_SORT, Tuplesortstate::datumType, Tuplesortstate::datumTypeByVal, Tuplesortstate::datumTypeLen, elog, get_typlenbyval(), LOG, MemoryContextSwitchTo(), Tuplesortstate::nKeys, Tuplesortstate::onlyKey, palloc0(), PrepareSortSupportFromOrderingOp(), Tuplesortstate::readtup, Tuplesortstate::reversedirection, Tuplesortstate::sortcontext, SortSupportData::ssup_collation, SortSupportData::ssup_cxt, SortSupportData::ssup_nulls_first, tuplesort_begin_common(), and Tuplesortstate::writetup.
Referenced by initialize_aggregates(), and validate_index().
{ Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess); MemoryContext oldcontext; int16 typlen; bool typbyval; oldcontext = MemoryContextSwitchTo(state->sortcontext); #ifdef TRACE_SORT if (trace_sort) elog(LOG, "begin datum sort: workMem = %d, randomAccess = %c", workMem, randomAccess ? 't' : 'f'); #endif state->nKeys = 1; /* always a one-column sort */ TRACE_POSTGRESQL_SORT_START(DATUM_SORT, false, /* no unique check */ 1, workMem, randomAccess); state->comparetup = comparetup_datum; state->copytup = copytup_datum; state->writetup = writetup_datum; state->readtup = readtup_datum; state->reversedirection = reversedirection_datum; state->datumType = datumType; /* Prepare SortSupport data */ state->onlyKey = (SortSupport) palloc0(sizeof(SortSupportData)); state->onlyKey->ssup_cxt = CurrentMemoryContext; state->onlyKey->ssup_collation = sortCollation; state->onlyKey->ssup_nulls_first = nullsFirstFlag; PrepareSortSupportFromOrderingOp(sortOperator, state->onlyKey); /* lookup necessary attributes of the datum type */ get_typlenbyval(datumType, &typlen, &typbyval); state->datumTypeLen = typlen; state->datumTypeByVal = typbyval; MemoryContextSwitchTo(oldcontext); return state; }
Tuplesortstate* tuplesort_begin_heap | ( | TupleDesc | tupDesc, | |
int | nkeys, | |||
AttrNumber * | attNums, | |||
Oid * | sortOperators, | |||
Oid * | sortCollations, | |||
bool * | nullsFirstFlags, | |||
int | workMem, | |||
bool | randomAccess | |||
) |
Definition at line 599 of file tuplesort.c.
References AssertArg, Tuplesortstate::comparetup, Tuplesortstate::copytup, CurrentMemoryContext, elog, HEAP_SORT, i, LOG, MemoryContextSwitchTo(), Tuplesortstate::nKeys, Tuplesortstate::onlyKey, palloc0(), PrepareSortSupportFromOrderingOp(), Tuplesortstate::readtup, Tuplesortstate::reversedirection, Tuplesortstate::sortcontext, Tuplesortstate::sortKeys, SortSupportData::ssup_attno, SortSupportData::ssup_collation, SortSupportData::ssup_cxt, SortSupportData::ssup_nulls_first, Tuplesortstate::tupDesc, tuplesort_begin_common(), and Tuplesortstate::writetup.
Referenced by ExecSort(), and initialize_aggregates().
{ Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess); MemoryContext oldcontext; int i; oldcontext = MemoryContextSwitchTo(state->sortcontext); AssertArg(nkeys > 0); #ifdef TRACE_SORT if (trace_sort) elog(LOG, "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c", nkeys, workMem, randomAccess ? 't' : 'f'); #endif state->nKeys = nkeys; TRACE_POSTGRESQL_SORT_START(HEAP_SORT, false, /* no unique check */ nkeys, workMem, randomAccess); state->comparetup = comparetup_heap; state->copytup = copytup_heap; state->writetup = writetup_heap; state->readtup = readtup_heap; state->reversedirection = reversedirection_heap; state->tupDesc = tupDesc; /* assume we need not copy tupDesc */ /* Prepare SortSupport data for each column */ state->sortKeys = (SortSupport) palloc0(nkeys * sizeof(SortSupportData)); for (i = 0; i < nkeys; i++) { SortSupport sortKey = state->sortKeys + i; AssertArg(attNums[i] != 0); AssertArg(sortOperators[i] != 0); sortKey->ssup_cxt = CurrentMemoryContext; sortKey->ssup_collation = sortCollations[i]; sortKey->ssup_nulls_first = nullsFirstFlags[i]; sortKey->ssup_attno = attNums[i]; PrepareSortSupportFromOrderingOp(sortOperators[i], sortKey); } if (nkeys == 1) state->onlyKey = state->sortKeys; MemoryContextSwitchTo(oldcontext); return state; }
Tuplesortstate* tuplesort_begin_index_btree | ( | Relation | heapRel, | |
Relation | indexRel, | |||
bool | enforceUnique, | |||
int | workMem, | |||
bool | randomAccess | |||
) |
Definition at line 724 of file tuplesort.c.
References _bt_mkscankey_nodata(), Tuplesortstate::comparetup, Tuplesortstate::copytup, elog, Tuplesortstate::enforceUnique, Tuplesortstate::heapRel, INDEX_SORT, Tuplesortstate::indexRel, Tuplesortstate::indexScanKey, LOG, MemoryContextSwitchTo(), Tuplesortstate::nKeys, Tuplesortstate::readtup, RelationGetNumberOfAttributes, Tuplesortstate::reversedirection, Tuplesortstate::sortcontext, tuplesort_begin_common(), and Tuplesortstate::writetup.
Referenced by _bt_spoolinit().
{ Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess); MemoryContext oldcontext; oldcontext = MemoryContextSwitchTo(state->sortcontext); #ifdef TRACE_SORT if (trace_sort) elog(LOG, "begin index sort: unique = %c, workMem = %d, randomAccess = %c", enforceUnique ? 't' : 'f', workMem, randomAccess ? 't' : 'f'); #endif state->nKeys = RelationGetNumberOfAttributes(indexRel); TRACE_POSTGRESQL_SORT_START(INDEX_SORT, enforceUnique, state->nKeys, workMem, randomAccess); state->comparetup = comparetup_index_btree; state->copytup = copytup_index; state->writetup = writetup_index; state->readtup = readtup_index; state->reversedirection = reversedirection_index_btree; state->heapRel = heapRel; state->indexRel = indexRel; state->indexScanKey = _bt_mkscankey_nodata(indexRel); state->enforceUnique = enforceUnique; MemoryContextSwitchTo(oldcontext); return state; }
Tuplesortstate* tuplesort_begin_index_hash | ( | Relation | heapRel, | |
Relation | indexRel, | |||
uint32 | hash_mask, | |||
int | workMem, | |||
bool | randomAccess | |||
) |
Definition at line 767 of file tuplesort.c.
References Tuplesortstate::comparetup, Tuplesortstate::copytup, elog, Tuplesortstate::hash_mask, Tuplesortstate::heapRel, Tuplesortstate::indexRel, LOG, MemoryContextSwitchTo(), Tuplesortstate::nKeys, Tuplesortstate::readtup, Tuplesortstate::reversedirection, Tuplesortstate::sortcontext, tuplesort_begin_common(), and Tuplesortstate::writetup.
Referenced by _h_spoolinit().
{ Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess); MemoryContext oldcontext; oldcontext = MemoryContextSwitchTo(state->sortcontext); #ifdef TRACE_SORT if (trace_sort) elog(LOG, "begin index sort: hash_mask = 0x%x, workMem = %d, randomAccess = %c", hash_mask, workMem, randomAccess ? 't' : 'f'); #endif state->nKeys = 1; /* Only one sort column, the hash code */ state->comparetup = comparetup_index_hash; state->copytup = copytup_index; state->writetup = writetup_index; state->readtup = readtup_index; state->reversedirection = reversedirection_index_hash; state->heapRel = heapRel; state->indexRel = indexRel; state->hash_mask = hash_mask; MemoryContextSwitchTo(oldcontext); return state; }
void tuplesort_end | ( | Tuplesortstate * | state | ) |
Definition at line 901 of file tuplesort.c.
References Tuplesortstate::allowedMem, Tuplesortstate::availMem, ExprContext::ecxt_scantuple, elog, Tuplesortstate::estate, ExecDropSingleTupleTableSlot(), FreeExecutorState(), GetPerTupleExprContext, LOG, LogicalTapeSetBlocks(), LogicalTapeSetClose(), MemoryContextDelete(), MemoryContextSwitchTo(), NULL, pg_rusage_show(), Tuplesortstate::sortcontext, and Tuplesortstate::tapeset.
Referenced by _bt_spooldestroy(), _h_spooldestroy(), copy_heap_data(), ExecEndAgg(), ExecEndSort(), ExecReScanAgg(), ExecReScanSort(), initialize_aggregates(), process_ordered_aggregate_multi(), process_ordered_aggregate_single(), and validate_index().
{ /* context swap probably not needed, but let's be safe */ MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); #ifdef TRACE_SORT long spaceUsed; if (state->tapeset) spaceUsed = LogicalTapeSetBlocks(state->tapeset); else spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024; #endif /* * Delete temporary "tape" files, if any. * * Note: want to include this in reported total cost of sort, hence need * for two #ifdef TRACE_SORT sections. */ if (state->tapeset) LogicalTapeSetClose(state->tapeset); #ifdef TRACE_SORT if (trace_sort) { if (state->tapeset) elog(LOG, "external sort ended, %ld disk blocks used: %s", spaceUsed, pg_rusage_show(&state->ru_start)); else elog(LOG, "internal sort ended, %ld KB used: %s", spaceUsed, pg_rusage_show(&state->ru_start)); } TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, spaceUsed); #else /* * If you disabled TRACE_SORT, you can still probe sort__done, but you * ain't getting space-used stats. */ TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, 0L); #endif /* Free any execution state created for CLUSTER case */ if (state->estate != NULL) { ExprContext *econtext = GetPerTupleExprContext(state->estate); ExecDropSingleTupleTableSlot(econtext->ecxt_scantuple); FreeExecutorState(state->estate); } MemoryContextSwitchTo(oldcontext); /* * Free the per-sort memory context, thereby releasing all working memory, * including the Tuplesortstate struct itself. */ MemoryContextDelete(state->sortcontext); }
void tuplesort_get_stats | ( | Tuplesortstate * | state, | |
const char ** | sortMethod, | |||
const char ** | spaceType, | |||
long * | spaceUsed | |||
) |
Definition at line 2437 of file tuplesort.c.
References Tuplesortstate::allowedMem, Tuplesortstate::availMem, Tuplesortstate::boundUsed, LogicalTapeSetBlocks(), Tuplesortstate::status, Tuplesortstate::tapeset, TSS_FINALMERGE, TSS_SORTEDINMEM, and TSS_SORTEDONTAPE.
Referenced by show_sort_info().
{ /* * Note: it might seem we should provide both memory and disk usage for a * disk-based sort. However, the current code doesn't track memory space * accurately once we have begun to return tuples to the caller (since we * don't account for pfree's the caller is expected to do), so we cannot * rely on availMem in a disk sort. This does not seem worth the overhead * to fix. Is it worth creating an API for the memory context code to * tell us how much is actually used in sortcontext? */ if (state->tapeset) { *spaceType = "Disk"; *spaceUsed = LogicalTapeSetBlocks(state->tapeset) * (BLCKSZ / 1024); } else { *spaceType = "Memory"; *spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024; } switch (state->status) { case TSS_SORTEDINMEM: if (state->boundUsed) *sortMethod = "top-N heapsort"; else *sortMethod = "quicksort"; break; case TSS_SORTEDONTAPE: *sortMethod = "external sort"; break; case TSS_FINALMERGE: *sortMethod = "external merge"; break; default: *sortMethod = "still in progress"; break; } }
bool tuplesort_getdatum | ( | Tuplesortstate * | state, | |
bool | forward, | |||
Datum * | val, | |||
bool * | isNull | |||
) |
Definition at line 1679 of file tuplesort.c.
References SortTuple::datum1, datumCopy(), Tuplesortstate::datumTypeByVal, Tuplesortstate::datumTypeLen, SortTuple::isnull1, MemoryContextSwitchTo(), Tuplesortstate::sortcontext, and tuplesort_gettuple_common().
Referenced by process_ordered_aggregate_single(), and validate_index_heapscan().
{ MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); SortTuple stup; bool should_free; if (!tuplesort_gettuple_common(state, forward, &stup, &should_free)) { MemoryContextSwitchTo(oldcontext); return false; } if (stup.isnull1 || state->datumTypeByVal) { *val = stup.datum1; *isNull = stup.isnull1; } else { if (should_free) *val = stup.datum1; else *val = datumCopy(stup.datum1, false, state->datumTypeLen); *isNull = false; } MemoryContextSwitchTo(oldcontext); return true; }
HeapTuple tuplesort_getheaptuple | ( | Tuplesortstate * | state, | |
bool | forward, | |||
bool * | should_free | |||
) |
Definition at line 1638 of file tuplesort.c.
References MemoryContextSwitchTo(), Tuplesortstate::sortcontext, SortTuple::tuple, and tuplesort_gettuple_common().
Referenced by copy_heap_data().
{ MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); SortTuple stup; if (!tuplesort_gettuple_common(state, forward, &stup, should_free)) stup.tuple = NULL; MemoryContextSwitchTo(oldcontext); return stup.tuple; }
IndexTuple tuplesort_getindextuple | ( | Tuplesortstate * | state, | |
bool | forward, | |||
bool * | should_free | |||
) |
Definition at line 1657 of file tuplesort.c.
References MemoryContextSwitchTo(), Tuplesortstate::sortcontext, SortTuple::tuple, and tuplesort_gettuple_common().
Referenced by _bt_load(), and _h_indexbuild().
{ MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); SortTuple stup; if (!tuplesort_gettuple_common(state, forward, &stup, should_free)) stup.tuple = NULL; MemoryContextSwitchTo(oldcontext); return (IndexTuple) stup.tuple; }
static bool tuplesort_gettuple_common | ( | Tuplesortstate * | state, | |
bool | forward, | |||
SortTuple * | stup, | |||
bool * | should_free | |||
) | [static] |
Definition at line 1407 of file tuplesort.c.
References Assert, Tuplesortstate::availMem, Tuplesortstate::bound, Tuplesortstate::bounded, Tuplesortstate::current, elog, Tuplesortstate::eof_reached, ERROR, getlen(), GetMemoryChunkSpace(), LogicalTapeBackspace(), Tuplesortstate::memtupcount, Tuplesortstate::memtuples, Tuplesortstate::mergeavailmem, Tuplesortstate::mergeavailslots, Tuplesortstate::mergefreelist, Tuplesortstate::mergelast, Tuplesortstate::mergenext, mergeprereadone(), Tuplesortstate::randomAccess, READTUP, Tuplesortstate::result_tape, Tuplesortstate::status, Tuplesortstate::tapeset, TSS_FINALMERGE, TSS_SORTEDINMEM, TSS_SORTEDONTAPE, SortTuple::tupindex, SortTuple::tuple, tuplesort_heap_insert(), and tuplesort_heap_siftup().
Referenced by tuplesort_getdatum(), tuplesort_getheaptuple(), tuplesort_getindextuple(), and tuplesort_gettupleslot().
{ unsigned int tuplen; switch (state->status) { case TSS_SORTEDINMEM: Assert(forward || state->randomAccess); *should_free = false; if (forward) { if (state->current < state->memtupcount) { *stup = state->memtuples[state->current++]; return true; } state->eof_reached = true; /* * Complain if caller tries to retrieve more tuples than * originally asked for in a bounded sort. This is because * returning EOF here might be the wrong thing. */ if (state->bounded && state->current >= state->bound) elog(ERROR, "retrieved too many tuples in a bounded sort"); return false; } else { if (state->current <= 0) return false; /* * if all tuples are fetched already then we return last * tuple, else - tuple before last returned. */ if (state->eof_reached) state->eof_reached = false; else { state->current--; /* last returned tuple */ if (state->current <= 0) return false; } *stup = state->memtuples[state->current - 1]; return true; } break; case TSS_SORTEDONTAPE: Assert(forward || state->randomAccess); *should_free = true; if (forward) { if (state->eof_reached) return false; if ((tuplen = getlen(state, state->result_tape, true)) != 0) { READTUP(state, stup, state->result_tape, tuplen); return true; } else { state->eof_reached = true; return false; } } /* * Backward. * * if all tuples are fetched already then we return last tuple, * else - tuple before last returned. */ if (state->eof_reached) { /* * Seek position is pointing just past the zero tuplen at the * end of file; back up to fetch last tuple's ending length * word. If seek fails we must have a completely empty file. */ if (!LogicalTapeBackspace(state->tapeset, state->result_tape, 2 * sizeof(unsigned int))) return false; state->eof_reached = false; } else { /* * Back up and fetch previously-returned tuple's ending length * word. If seek fails, assume we are at start of file. */ if (!LogicalTapeBackspace(state->tapeset, state->result_tape, sizeof(unsigned int))) return false; tuplen = getlen(state, state->result_tape, false); /* * Back up to get ending length word of tuple before it. */ if (!LogicalTapeBackspace(state->tapeset, state->result_tape, tuplen + 2 * sizeof(unsigned int))) { /* * If that fails, presumably the prev tuple is the first * in the file. Back up so that it becomes next to read * in forward direction (not obviously right, but that is * what in-memory case does). */ if (!LogicalTapeBackspace(state->tapeset, state->result_tape, tuplen + sizeof(unsigned int))) elog(ERROR, "bogus tuple length in backward scan"); return false; } } tuplen = getlen(state, state->result_tape, false); /* * Now we have the length of the prior tuple, back up and read it. * Note: READTUP expects we are positioned after the initial * length word of the tuple, so back up to that point. */ if (!LogicalTapeBackspace(state->tapeset, state->result_tape, tuplen)) elog(ERROR, "bogus tuple length in backward scan"); READTUP(state, stup, state->result_tape, tuplen); return true; case TSS_FINALMERGE: Assert(forward); *should_free = true; /* * This code should match the inner loop of mergeonerun(). */ if (state->memtupcount > 0) { int srcTape = state->memtuples[0].tupindex; Size tuplen; int tupIndex; SortTuple *newtup; *stup = state->memtuples[0]; /* returned tuple is no longer counted in our memory space */ if (stup->tuple) { tuplen = GetMemoryChunkSpace(stup->tuple); state->availMem += tuplen; state->mergeavailmem[srcTape] += tuplen; } tuplesort_heap_siftup(state, false); if ((tupIndex = state->mergenext[srcTape]) == 0) { /* * out of preloaded data on this tape, try to read more * * Unlike mergeonerun(), we only preload from the single * tape that's run dry. See mergepreread() comments. */ mergeprereadone(state, srcTape); /* * if still no data, we've reached end of run on this tape */ if ((tupIndex = state->mergenext[srcTape]) == 0) return true; } /* pull next preread tuple from list, insert in heap */ newtup = &state->memtuples[tupIndex]; state->mergenext[srcTape] = newtup->tupindex; if (state->mergenext[srcTape] == 0) state->mergelast[srcTape] = 0; tuplesort_heap_insert(state, newtup, srcTape, false); /* put the now-unused memtuples entry on the freelist */ newtup->tupindex = state->mergefreelist; state->mergefreelist = tupIndex; state->mergeavailslots[srcTape]++; return true; } return false; default: elog(ERROR, "invalid tuplesort state"); return false; /* keep compiler quiet */ } }
bool tuplesort_gettupleslot | ( | Tuplesortstate * | state, | |
bool | forward, | |||
TupleTableSlot * | slot | |||
) |
Definition at line 1608 of file tuplesort.c.
References ExecClearTuple(), ExecStoreMinimalTuple(), MemoryContextSwitchTo(), Tuplesortstate::sortcontext, SortTuple::tuple, and tuplesort_gettuple_common().
Referenced by ExecSort(), and process_ordered_aggregate_multi().
{ MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); SortTuple stup; bool should_free; if (!tuplesort_gettuple_common(state, forward, &stup, &should_free)) stup.tuple = NULL; MemoryContextSwitchTo(oldcontext); if (stup.tuple) { ExecStoreMinimalTuple((MinimalTuple) stup.tuple, slot, should_free); return true; } else { ExecClearTuple(slot); return false; } }
static void tuplesort_heap_insert | ( | Tuplesortstate * | state, | |
SortTuple * | tuple, | |||
int | tupleindex, | |||
bool | checkIndex | |||
) | [static] |
Definition at line 2600 of file tuplesort.c.
References Assert, CHECK_FOR_INTERRUPTS, HEAPCOMPARE, i, Tuplesortstate::memtupcount, Tuplesortstate::memtuples, Tuplesortstate::memtupsize, and SortTuple::tupindex.
Referenced by beginmerge(), inittapes(), make_bounded_heap(), mergeonerun(), puttuple_common(), and tuplesort_gettuple_common().
{ SortTuple *memtuples; int j; /* * Save the tupleindex --- see notes above about writing on *tuple. It's a * historical artifact that tupleindex is passed as a separate argument * and not in *tuple, but it's notationally convenient so let's leave it * that way. */ tuple->tupindex = tupleindex; memtuples = state->memtuples; Assert(state->memtupcount < state->memtupsize); CHECK_FOR_INTERRUPTS(); /* * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth is * using 1-based array indexes, not 0-based. */ j = state->memtupcount++; while (j > 0) { int i = (j - 1) >> 1; if (HEAPCOMPARE(tuple, &memtuples[i]) >= 0) break; memtuples[j] = memtuples[i]; j = i; } memtuples[j] = *tuple; }
static void tuplesort_heap_siftup | ( | Tuplesortstate * | state, | |
bool | checkIndex | |||
) | [static] |
Definition at line 2641 of file tuplesort.c.
References CHECK_FOR_INTERRUPTS, HEAPCOMPARE, i, Tuplesortstate::memtupcount, and Tuplesortstate::memtuples.
Referenced by dumptuples(), make_bounded_heap(), mergeonerun(), puttuple_common(), sort_bounded_heap(), and tuplesort_gettuple_common().
{ SortTuple *memtuples = state->memtuples; SortTuple *tuple; int i, n; if (--state->memtupcount <= 0) return; CHECK_FOR_INTERRUPTS(); n = state->memtupcount; tuple = &memtuples[n]; /* tuple that must be reinserted */ i = 0; /* i is where the "hole" is */ for (;;) { int j = 2 * i + 1; if (j >= n) break; if (j + 1 < n && HEAPCOMPARE(&memtuples[j], &memtuples[j + 1]) > 0) j++; if (HEAPCOMPARE(tuple, &memtuples[j]) <= 0) break; memtuples[i] = memtuples[j]; i = j; } memtuples[i] = *tuple; }
void tuplesort_markpos | ( | Tuplesortstate * | state | ) |
Definition at line 2369 of file tuplesort.c.
References Assert, Tuplesortstate::current, elog, Tuplesortstate::eof_reached, ERROR, LogicalTapeTell(), Tuplesortstate::markpos_block, Tuplesortstate::markpos_eof, Tuplesortstate::markpos_offset, MemoryContextSwitchTo(), Tuplesortstate::randomAccess, Tuplesortstate::result_tape, Tuplesortstate::sortcontext, Tuplesortstate::status, Tuplesortstate::tapeset, TSS_SORTEDINMEM, and TSS_SORTEDONTAPE.
Referenced by ExecSortMarkPos().
{ MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); Assert(state->randomAccess); switch (state->status) { case TSS_SORTEDINMEM: state->markpos_offset = state->current; state->markpos_eof = state->eof_reached; break; case TSS_SORTEDONTAPE: LogicalTapeTell(state->tapeset, state->result_tape, &state->markpos_block, &state->markpos_offset); state->markpos_eof = state->eof_reached; break; default: elog(ERROR, "invalid tuplesort state"); break; } MemoryContextSwitchTo(oldcontext); }
int tuplesort_merge_order | ( | long | allowedMem | ) |
Definition at line 1718 of file tuplesort.c.
References Max, MERGE_BUFFER_SIZE, MINORDER, and TAPE_BUFFER_OVERHEAD.
Referenced by cost_sort(), and inittapes().
{ int mOrder; /* * We need one tape for each merge input, plus another one for the output, * and each of these tapes needs buffer space. In addition we want * MERGE_BUFFER_SIZE workspace per input tape (but the output tape doesn't * count). * * Note: you might be thinking we need to account for the memtuples[] * array in this calculation, but we effectively treat that as part of the * MERGE_BUFFER_SIZE workspace. */ mOrder = (allowedMem - TAPE_BUFFER_OVERHEAD) / (MERGE_BUFFER_SIZE + TAPE_BUFFER_OVERHEAD); /* Even in minimum memory, use at least a MINORDER merge */ mOrder = Max(mOrder, MINORDER); return mOrder; }
void tuplesort_performsort | ( | Tuplesortstate * | state | ) |
Definition at line 1312 of file tuplesort.c.
References Tuplesortstate::activeTapes, Tuplesortstate::comparetup, Tuplesortstate::current, dumptuples(), elog, Tuplesortstate::eof_reached, ERROR, LOG, Tuplesortstate::markpos_block, Tuplesortstate::markpos_eof, Tuplesortstate::markpos_offset, MemoryContextSwitchTo(), Tuplesortstate::memtupcount, Tuplesortstate::memtuples, mergeruns(), NULL, Tuplesortstate::onlyKey, pg_rusage_show(), sort_bounded_heap(), Tuplesortstate::sortcontext, Tuplesortstate::status, TSS_BOUNDED, TSS_BUILDRUNS, TSS_FINALMERGE, and TSS_INITIAL.
Referenced by _bt_leafbuild(), _h_indexbuild(), copy_heap_data(), ExecSort(), process_ordered_aggregate_multi(), process_ordered_aggregate_single(), and validate_index().
{ MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); #ifdef TRACE_SORT if (trace_sort) elog(LOG, "performsort starting: %s", pg_rusage_show(&state->ru_start)); #endif switch (state->status) { case TSS_INITIAL: /* * We were able to accumulate all the tuples within the allowed * amount of memory. Just qsort 'em and we're done. */ if (state->memtupcount > 1) { /* Can we use the single-key sort function? */ if (state->onlyKey != NULL) qsort_ssup(state->memtuples, state->memtupcount, state->onlyKey); else qsort_tuple(state->memtuples, state->memtupcount, state->comparetup, state); } state->current = 0; state->eof_reached = false; state->markpos_offset = 0; state->markpos_eof = false; state->status = TSS_SORTEDINMEM; break; case TSS_BOUNDED: /* * We were able to accumulate all the tuples required for output * in memory, using a heap to eliminate excess tuples. Now we * have to transform the heap to a properly-sorted array. */ sort_bounded_heap(state); state->current = 0; state->eof_reached = false; state->markpos_offset = 0; state->markpos_eof = false; state->status = TSS_SORTEDINMEM; break; case TSS_BUILDRUNS: /* * Finish tape-based sort. First, flush all tuples remaining in * memory out to tape; then merge until we have a single remaining * run (or, if !randomAccess, one run per tape). Note that * mergeruns sets the correct state->status. */ dumptuples(state, true); mergeruns(state); state->eof_reached = false; state->markpos_block = 0L; state->markpos_offset = 0; state->markpos_eof = false; break; default: elog(ERROR, "invalid tuplesort state"); break; } #ifdef TRACE_SORT if (trace_sort) { if (state->status == TSS_FINALMERGE) elog(LOG, "performsort done (except %d-way final merge): %s", state->activeTapes, pg_rusage_show(&state->ru_start)); else elog(LOG, "performsort done: %s", pg_rusage_show(&state->ru_start)); } #endif MemoryContextSwitchTo(oldcontext); }
void tuplesort_putdatum | ( | Tuplesortstate * | state, | |
Datum | val, | |||
bool | isNull | |||
) |
Definition at line 1155 of file tuplesort.c.
References SortTuple::datum1, datumCopy(), DatumGetPointer, Tuplesortstate::datumTypeByVal, Tuplesortstate::datumTypeLen, GetMemoryChunkSpace(), SortTuple::isnull1, MemoryContextSwitchTo(), puttuple_common(), Tuplesortstate::sortcontext, SortTuple::tuple, and USEMEM.
Referenced by advance_aggregates(), and validate_index_callback().
{ MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); SortTuple stup; /* * If it's a pass-by-reference value, copy it into memory we control, and * decrease availMem. Then call the common code. */ if (isNull || state->datumTypeByVal) { stup.datum1 = val; stup.isnull1 = isNull; stup.tuple = NULL; /* no separate storage */ } else { stup.datum1 = datumCopy(val, false, state->datumTypeLen); stup.isnull1 = false; stup.tuple = DatumGetPointer(stup.datum1); USEMEM(state, GetMemoryChunkSpace(stup.tuple)); } puttuple_common(state, &stup); MemoryContextSwitchTo(oldcontext); }
void tuplesort_putheaptuple | ( | Tuplesortstate * | state, | |
HeapTuple | tup | |||
) |
Definition at line 1111 of file tuplesort.c.
References COPYTUP, MemoryContextSwitchTo(), puttuple_common(), and Tuplesortstate::sortcontext.
Referenced by copy_heap_data().
{ MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); SortTuple stup; /* * Copy the given tuple into memory we control, and decrease availMem. * Then call the common code. */ COPYTUP(state, &stup, (void *) tup); puttuple_common(state, &stup); MemoryContextSwitchTo(oldcontext); }
void tuplesort_putindextuple | ( | Tuplesortstate * | state, | |
IndexTuple | tuple | |||
) |
Definition at line 1133 of file tuplesort.c.
References COPYTUP, MemoryContextSwitchTo(), puttuple_common(), and Tuplesortstate::sortcontext.
Referenced by _bt_spool(), and _h_spool().
{ MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); SortTuple stup; /* * Copy the given tuple into memory we control, and decrease availMem. * Then call the common code. */ COPYTUP(state, &stup, (void *) tuple); puttuple_common(state, &stup); MemoryContextSwitchTo(oldcontext); }
void tuplesort_puttupleslot | ( | Tuplesortstate * | state, | |
TupleTableSlot * | slot | |||
) |
Definition at line 1089 of file tuplesort.c.
References COPYTUP, MemoryContextSwitchTo(), puttuple_common(), and Tuplesortstate::sortcontext.
Referenced by advance_aggregates(), and ExecSort().
{ MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); SortTuple stup; /* * Copy the given tuple into memory we control, and decrease availMem. * Then call the common code. */ COPYTUP(state, &stup, (void *) slot); puttuple_common(state, &stup); MemoryContextSwitchTo(oldcontext); }
void tuplesort_rescan | ( | Tuplesortstate * | state | ) |
Definition at line 2334 of file tuplesort.c.
References Assert, Tuplesortstate::current, elog, Tuplesortstate::eof_reached, ERROR, LogicalTapeRewind(), Tuplesortstate::markpos_block, Tuplesortstate::markpos_eof, Tuplesortstate::markpos_offset, MemoryContextSwitchTo(), Tuplesortstate::randomAccess, Tuplesortstate::result_tape, Tuplesortstate::sortcontext, Tuplesortstate::status, Tuplesortstate::tapeset, TSS_SORTEDINMEM, and TSS_SORTEDONTAPE.
Referenced by ExecReScanSort().
{ MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); Assert(state->randomAccess); switch (state->status) { case TSS_SORTEDINMEM: state->current = 0; state->eof_reached = false; state->markpos_offset = 0; state->markpos_eof = false; break; case TSS_SORTEDONTAPE: LogicalTapeRewind(state->tapeset, state->result_tape, false); state->eof_reached = false; state->markpos_block = 0L; state->markpos_offset = 0; state->markpos_eof = false; break; default: elog(ERROR, "invalid tuplesort state"); break; } MemoryContextSwitchTo(oldcontext); }
void tuplesort_restorepos | ( | Tuplesortstate * | state | ) |
Definition at line 2401 of file tuplesort.c.
References Assert, Tuplesortstate::current, elog, Tuplesortstate::eof_reached, ERROR, LogicalTapeSeek(), Tuplesortstate::markpos_block, Tuplesortstate::markpos_eof, Tuplesortstate::markpos_offset, MemoryContextSwitchTo(), Tuplesortstate::randomAccess, Tuplesortstate::result_tape, Tuplesortstate::sortcontext, Tuplesortstate::status, Tuplesortstate::tapeset, TSS_SORTEDINMEM, and TSS_SORTEDONTAPE.
Referenced by ExecSortRestrPos().
{ MemoryContext oldcontext = MemoryContextSwitchTo(state->sortcontext); Assert(state->randomAccess); switch (state->status) { case TSS_SORTEDINMEM: state->current = state->markpos_offset; state->eof_reached = state->markpos_eof; break; case TSS_SORTEDONTAPE: if (!LogicalTapeSeek(state->tapeset, state->result_tape, state->markpos_block, state->markpos_offset)) elog(ERROR, "tuplesort_restorepos failed"); state->eof_reached = state->markpos_eof; break; default: elog(ERROR, "invalid tuplesort state"); break; } MemoryContextSwitchTo(oldcontext); }
void tuplesort_set_bound | ( | Tuplesortstate * | state, | |
int64 | bound | |||
) |
Definition at line 870 of file tuplesort.c.
References Assert, Tuplesortstate::bound, Tuplesortstate::bounded, Tuplesortstate::memtupcount, Tuplesortstate::status, and TSS_INITIAL.
Referenced by ExecSort().
{ /* Assert we're called before loading any tuples */ Assert(state->status == TSS_INITIAL); Assert(state->memtupcount == 0); Assert(!state->bounded); #ifdef DEBUG_BOUNDED_SORT /* Honor GUC setting that disables the feature (for easy testing) */ if (!optimize_bounded_sort) return; #endif /* We want to be able to compute bound * 2, so limit the setting */ if (bound > (int64) (INT_MAX / 2)) return; state->bounded = true; state->bound = (int) bound; }
static void writetup_cluster | ( | Tuplesortstate * | state, | |
int | tapenum, | |||
SortTuple * | stup | |||
) | [static] |
Definition at line 3032 of file tuplesort.c.
References FREEMEM, GetMemoryChunkSpace(), heap_freetuple(), LogicalTapeWrite(), Tuplesortstate::randomAccess, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, Tuplesortstate::tapeset, and SortTuple::tuple.
{ HeapTuple tuple = (HeapTuple) stup->tuple; unsigned int tuplen = tuple->t_len + sizeof(ItemPointerData) + sizeof(int); /* We need to store t_self, but not other fields of HeapTupleData */ LogicalTapeWrite(state->tapeset, tapenum, &tuplen, sizeof(tuplen)); LogicalTapeWrite(state->tapeset, tapenum, &tuple->t_self, sizeof(ItemPointerData)); LogicalTapeWrite(state->tapeset, tapenum, tuple->t_data, tuple->t_len); if (state->randomAccess) /* need trailing length word? */ LogicalTapeWrite(state->tapeset, tapenum, &tuplen, sizeof(tuplen)); FREEMEM(state, GetMemoryChunkSpace(tuple)); heap_freetuple(tuple); }
static void writetup_datum | ( | Tuplesortstate * | state, | |
int | tapenum, | |||
SortTuple * | stup | |||
) | [static] |
Definition at line 3355 of file tuplesort.c.
References Assert, SortTuple::datum1, DatumGetPointer, datumGetSize(), Tuplesortstate::datumTypeByVal, Tuplesortstate::datumTypeLen, FREEMEM, GetMemoryChunkSpace(), SortTuple::isnull1, LogicalTapeWrite(), pfree(), Tuplesortstate::randomAccess, Tuplesortstate::tapeset, and SortTuple::tuple.
{ void *waddr; unsigned int tuplen; unsigned int writtenlen; if (stup->isnull1) { waddr = NULL; tuplen = 0; } else if (state->datumTypeByVal) { waddr = &stup->datum1; tuplen = sizeof(Datum); } else { waddr = DatumGetPointer(stup->datum1); tuplen = datumGetSize(stup->datum1, false, state->datumTypeLen); Assert(tuplen != 0); } writtenlen = tuplen + sizeof(unsigned int); LogicalTapeWrite(state->tapeset, tapenum, (void *) &writtenlen, sizeof(writtenlen)); LogicalTapeWrite(state->tapeset, tapenum, waddr, tuplen); if (state->randomAccess) /* need trailing length word? */ LogicalTapeWrite(state->tapeset, tapenum, (void *) &writtenlen, sizeof(writtenlen)); if (stup->tuple) { FREEMEM(state, GetMemoryChunkSpace(stup->tuple)); pfree(stup->tuple); } }
static void writetup_heap | ( | Tuplesortstate * | state, | |
int | tapenum, | |||
SortTuple * | stup | |||
) | [static] |
Definition at line 2841 of file tuplesort.c.
References FREEMEM, GetMemoryChunkSpace(), heap_free_minimal_tuple(), LogicalTapeWrite(), Tuplesortstate::randomAccess, MinimalTupleData::t_len, Tuplesortstate::tapeset, and SortTuple::tuple.
{ MinimalTuple tuple = (MinimalTuple) stup->tuple; /* the part of the MinimalTuple we'll write: */ char *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET; unsigned int tupbodylen = tuple->t_len - MINIMAL_TUPLE_DATA_OFFSET; /* total on-disk footprint: */ unsigned int tuplen = tupbodylen + sizeof(int); LogicalTapeWrite(state->tapeset, tapenum, (void *) &tuplen, sizeof(tuplen)); LogicalTapeWrite(state->tapeset, tapenum, (void *) tupbody, tupbodylen); if (state->randomAccess) /* need trailing length word? */ LogicalTapeWrite(state->tapeset, tapenum, (void *) &tuplen, sizeof(tuplen)); FREEMEM(state, GetMemoryChunkSpace(tuple)); heap_free_minimal_tuple(tuple); }
static void writetup_index | ( | Tuplesortstate * | state, | |
int | tapenum, | |||
SortTuple * | stup | |||
) | [static] |
Definition at line 3276 of file tuplesort.c.
References FREEMEM, GetMemoryChunkSpace(), IndexTupleSize, LogicalTapeWrite(), pfree(), Tuplesortstate::randomAccess, Tuplesortstate::tapeset, and SortTuple::tuple.
{ IndexTuple tuple = (IndexTuple) stup->tuple; unsigned int tuplen; tuplen = IndexTupleSize(tuple) + sizeof(tuplen); LogicalTapeWrite(state->tapeset, tapenum, (void *) &tuplen, sizeof(tuplen)); LogicalTapeWrite(state->tapeset, tapenum, (void *) tuple, IndexTupleSize(tuple)); if (state->randomAccess) /* need trailing length word? */ LogicalTapeWrite(state->tapeset, tapenum, (void *) &tuplen, sizeof(tuplen)); FREEMEM(state, GetMemoryChunkSpace(tuple)); pfree(tuple); }