mirror of
https://git.postgresql.org/git/postgresql.git
synced 2025-01-06 15:24:56 +08:00
Performance improvement for MultiRecordFreeSpace on large relations ---
avoid O(N^2) behavior. Problem noted and fixed by Stephen Marshall <smarshall@wsicorp.com>, with some help from Tom Lane.
This commit is contained in:
parent
de96cd5e3a
commit
b2735fcd52
@ -13,7 +13,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.237 2002/09/04 20:31:16 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.238 2002/09/20 19:56:01 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -1321,9 +1321,10 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
|
||||
pfree(vtlinks);
|
||||
}
|
||||
|
||||
elog(elevel, "Pages %u: Changed %u, reaped %u, Empty %u, New %u; \
|
||||
Tup %.0f: Vac %.0f, Keep/VTL %.0f/%u, UnUsed %.0f, MinLen %lu, MaxLen %lu; \
|
||||
Re-using: Free/Avail. Space %.0f/%.0f; EndEmpty/Avail. Pages %u/%u.\n\t%s",
|
||||
elog(elevel, "Pages %u: Changed %u, reaped %u, Empty %u, New %u; "
|
||||
"Tup %.0f: Vac %.0f, Keep/VTL %.0f/%u, UnUsed %.0f, MinLen %lu, "
|
||||
"MaxLen %lu; Re-using: Free/Avail. Space %.0f/%.0f; "
|
||||
"EndEmpty/Avail. Pages %u/%u.\n\t%s",
|
||||
nblocks, changed_pages, vacuum_pages->num_pages, empty_pages,
|
||||
new_pages, num_tuples, tups_vacuumed,
|
||||
nkeep, vacrelstats->num_vtlinks,
|
||||
@ -2597,8 +2598,8 @@ scan_index(Relation indrel, double num_tuples)
|
||||
{
|
||||
if (stats->num_index_tuples > num_tuples ||
|
||||
!vac_is_partial_index(indrel))
|
||||
elog(WARNING, "Index %s: NUMBER OF INDEX' TUPLES (%.0f) IS NOT THE SAME AS HEAP' (%.0f).\
|
||||
\n\tRecreate the index.",
|
||||
elog(WARNING, "Index %s: NUMBER OF INDEX' TUPLES (%.0f) IS NOT THE SAME AS HEAP' (%.0f)."
|
||||
"\n\tRecreate the index.",
|
||||
RelationGetRelationName(indrel),
|
||||
stats->num_index_tuples, num_tuples);
|
||||
}
|
||||
@ -2651,8 +2652,8 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
|
||||
{
|
||||
if (stats->num_index_tuples > num_tuples + keep_tuples ||
|
||||
!vac_is_partial_index(indrel))
|
||||
elog(WARNING, "Index %s: NUMBER OF INDEX' TUPLES (%.0f) IS NOT THE SAME AS HEAP' (%.0f).\
|
||||
\n\tRecreate the index.",
|
||||
elog(WARNING, "Index %s: NUMBER OF INDEX' TUPLES (%.0f) IS NOT THE SAME AS HEAP' (%.0f)."
|
||||
"\n\tRecreate the index.",
|
||||
RelationGetRelationName(indrel),
|
||||
stats->num_index_tuples, num_tuples);
|
||||
}
|
||||
@ -2731,35 +2732,32 @@ vac_update_fsm(Relation onerel, VacPageList fraged_pages,
|
||||
{
|
||||
int nPages = fraged_pages->num_pages;
|
||||
int i;
|
||||
BlockNumber *pages;
|
||||
Size *spaceAvail;
|
||||
PageFreeSpaceInfo *pageSpaces;
|
||||
|
||||
/* +1 to avoid palloc(0) */
|
||||
pages = (BlockNumber *) palloc((nPages + 1) * sizeof(BlockNumber));
|
||||
spaceAvail = (Size *) palloc((nPages + 1) * sizeof(Size));
|
||||
pageSpaces = (PageFreeSpaceInfo *)
|
||||
palloc((nPages + 1) * sizeof(PageFreeSpaceInfo));
|
||||
|
||||
for (i = 0; i < nPages; i++)
|
||||
{
|
||||
pages[i] = fraged_pages->pagedesc[i]->blkno;
|
||||
spaceAvail[i] = fraged_pages->pagedesc[i]->free;
|
||||
pageSpaces[i].blkno = fraged_pages->pagedesc[i]->blkno;
|
||||
pageSpaces[i].avail = fraged_pages->pagedesc[i]->free;
|
||||
|
||||
/*
|
||||
* fraged_pages may contain entries for pages that we later
|
||||
* decided to truncate from the relation; don't enter them into
|
||||
* the map!
|
||||
* the free space map!
|
||||
*/
|
||||
if (pages[i] >= rel_pages)
|
||||
if (pageSpaces[i].blkno >= rel_pages)
|
||||
{
|
||||
nPages = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
MultiRecordFreeSpace(&onerel->rd_node,
|
||||
0, MaxBlockNumber,
|
||||
nPages, pages, spaceAvail);
|
||||
pfree(pages);
|
||||
pfree(spaceAvail);
|
||||
MultiRecordFreeSpace(&onerel->rd_node, 0, nPages, pageSpaces);
|
||||
|
||||
pfree(pageSpaces);
|
||||
}
|
||||
|
||||
/* Copy a VacPage structure */
|
||||
|
@ -31,7 +31,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/vacuumlazy.c,v 1.19 2002/09/04 20:31:17 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/commands/vacuumlazy.c,v 1.20 2002/09/20 19:56:01 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -87,9 +87,8 @@ typedef struct LVRelStats
|
||||
/* We use a simple array until it fills up, then convert to heap */
|
||||
bool fs_is_heap; /* are we using heap organization? */
|
||||
int num_free_pages; /* current # of entries */
|
||||
int max_free_pages; /* # slots allocated in arrays */
|
||||
BlockNumber *free_pages; /* array or heap of block numbers */
|
||||
Size *free_spaceavail; /* array or heap of available space */
|
||||
int max_free_pages; /* # slots allocated in array */
|
||||
PageFreeSpaceInfo *free_pages; /* array or heap of blkno/avail */
|
||||
} LVRelStats;
|
||||
|
||||
|
||||
@ -119,6 +118,7 @@ static bool lazy_tid_reaped(ItemPointer itemptr, void *state);
|
||||
static bool dummy_tid_reaped(ItemPointer itemptr, void *state);
|
||||
static void lazy_update_fsm(Relation onerel, LVRelStats *vacrelstats);
|
||||
static int vac_cmp_itemptr(const void *left, const void *right);
|
||||
static int vac_cmp_page_spaces(const void *left, const void *right);
|
||||
|
||||
|
||||
/*
|
||||
@ -432,8 +432,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
|
||||
lazy_scan_index(Irel[i], vacrelstats);
|
||||
}
|
||||
|
||||
elog(elevel, "Pages %u: Changed %u, Empty %u; \
|
||||
Tup %.0f: Vac %.0f, Keep %.0f, UnUsed %.0f.\n\tTotal %s",
|
||||
elog(elevel, "Pages %u: Changed %u, Empty %u; Tup %.0f: Vac %.0f, Keep %.0f, UnUsed %.0f.\n\tTotal %s",
|
||||
nblocks, changed_pages, empty_pages,
|
||||
num_tuples, tups_vacuumed, nkeep, nunused,
|
||||
vac_show_rusage(&ru0));
|
||||
@ -662,8 +661,7 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
|
||||
{
|
||||
BlockNumber old_rel_pages = vacrelstats->rel_pages;
|
||||
BlockNumber new_rel_pages;
|
||||
BlockNumber *pages;
|
||||
Size *spaceavail;
|
||||
PageFreeSpaceInfo *pageSpaces;
|
||||
int n;
|
||||
int i,
|
||||
j;
|
||||
@ -736,20 +734,20 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
|
||||
* Drop free-space info for removed blocks; these must not get entered
|
||||
* into the FSM!
|
||||
*/
|
||||
pages = vacrelstats->free_pages;
|
||||
spaceavail = vacrelstats->free_spaceavail;
|
||||
pageSpaces = vacrelstats->free_pages;
|
||||
n = vacrelstats->num_free_pages;
|
||||
j = 0;
|
||||
for (i = 0; i < n; i++)
|
||||
{
|
||||
if (pages[i] < new_rel_pages)
|
||||
if (pageSpaces[i].blkno < new_rel_pages)
|
||||
{
|
||||
pages[j] = pages[i];
|
||||
spaceavail[j] = spaceavail[i];
|
||||
pageSpaces[j] = pageSpaces[i];
|
||||
j++;
|
||||
}
|
||||
}
|
||||
vacrelstats->num_free_pages = j;
|
||||
/* We destroyed the heap ordering, so mark array unordered */
|
||||
vacrelstats->fs_is_heap = false;
|
||||
|
||||
/*
|
||||
* We keep the exclusive lock until commit (perhaps not necessary)?
|
||||
@ -913,10 +911,8 @@ lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
|
||||
vacrelstats->fs_is_heap = false;
|
||||
vacrelstats->num_free_pages = 0;
|
||||
vacrelstats->max_free_pages = maxpages;
|
||||
vacrelstats->free_pages = (BlockNumber *)
|
||||
palloc(maxpages * sizeof(BlockNumber));
|
||||
vacrelstats->free_spaceavail = (Size *)
|
||||
palloc(maxpages * sizeof(Size));
|
||||
vacrelstats->free_pages = (PageFreeSpaceInfo *)
|
||||
palloc(maxpages * sizeof(PageFreeSpaceInfo));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -946,8 +942,7 @@ lazy_record_free_space(LVRelStats *vacrelstats,
|
||||
BlockNumber page,
|
||||
Size avail)
|
||||
{
|
||||
BlockNumber *pages;
|
||||
Size *spaceavail;
|
||||
PageFreeSpaceInfo *pageSpaces;
|
||||
int n;
|
||||
|
||||
/* Ignore pages with little free space */
|
||||
@ -955,15 +950,14 @@ lazy_record_free_space(LVRelStats *vacrelstats,
|
||||
return;
|
||||
|
||||
/* Copy pointers to local variables for notational simplicity */
|
||||
pages = vacrelstats->free_pages;
|
||||
spaceavail = vacrelstats->free_spaceavail;
|
||||
pageSpaces = vacrelstats->free_pages;
|
||||
n = vacrelstats->max_free_pages;
|
||||
|
||||
/* If we haven't filled the array yet, just keep adding entries */
|
||||
if (vacrelstats->num_free_pages < n)
|
||||
{
|
||||
pages[vacrelstats->num_free_pages] = page;
|
||||
spaceavail[vacrelstats->num_free_pages] = avail;
|
||||
pageSpaces[vacrelstats->num_free_pages].blkno = page;
|
||||
pageSpaces[vacrelstats->num_free_pages].avail = avail;
|
||||
vacrelstats->num_free_pages++;
|
||||
return;
|
||||
}
|
||||
@ -971,7 +965,7 @@ lazy_record_free_space(LVRelStats *vacrelstats,
|
||||
/*----------
|
||||
* The rest of this routine works with "heap" organization of the
|
||||
* free space arrays, wherein we maintain the heap property
|
||||
* spaceavail[(j-1) div 2] <= spaceavail[j] for 0 < j < n.
|
||||
* avail[(j-1) div 2] <= avail[j] for 0 < j < n.
|
||||
* In particular, the zero'th element always has the smallest available
|
||||
* space and can be discarded to make room for a new page with more space.
|
||||
* See Knuth's discussion of heap-based priority queues, sec 5.2.3;
|
||||
@ -991,8 +985,8 @@ lazy_record_free_space(LVRelStats *vacrelstats,
|
||||
|
||||
while (--l >= 0)
|
||||
{
|
||||
BlockNumber R = pages[l];
|
||||
Size K = spaceavail[l];
|
||||
BlockNumber R = pageSpaces[l].blkno;
|
||||
Size K = pageSpaces[l].avail;
|
||||
int i; /* i is where the "hole" is */
|
||||
|
||||
i = l;
|
||||
@ -1002,23 +996,22 @@ lazy_record_free_space(LVRelStats *vacrelstats,
|
||||
|
||||
if (j >= n)
|
||||
break;
|
||||
if (j + 1 < n && spaceavail[j] > spaceavail[j + 1])
|
||||
if (j + 1 < n && pageSpaces[j].avail > pageSpaces[j + 1].avail)
|
||||
j++;
|
||||
if (K <= spaceavail[j])
|
||||
if (K <= pageSpaces[j].avail)
|
||||
break;
|
||||
pages[i] = pages[j];
|
||||
spaceavail[i] = spaceavail[j];
|
||||
pageSpaces[i] = pageSpaces[j];
|
||||
i = j;
|
||||
}
|
||||
pages[i] = R;
|
||||
spaceavail[i] = K;
|
||||
pageSpaces[i].blkno = R;
|
||||
pageSpaces[i].avail = K;
|
||||
}
|
||||
|
||||
vacrelstats->fs_is_heap = true;
|
||||
}
|
||||
|
||||
/* If new page has more than zero'th entry, insert it into heap */
|
||||
if (avail > spaceavail[0])
|
||||
if (avail > pageSpaces[0].avail)
|
||||
{
|
||||
/*
|
||||
* Notionally, we replace the zero'th entry with the new data, and
|
||||
@ -1034,16 +1027,15 @@ lazy_record_free_space(LVRelStats *vacrelstats,
|
||||
|
||||
if (j >= n)
|
||||
break;
|
||||
if (j + 1 < n && spaceavail[j] > spaceavail[j + 1])
|
||||
if (j + 1 < n && pageSpaces[j].avail > pageSpaces[j + 1].avail)
|
||||
j++;
|
||||
if (avail <= spaceavail[j])
|
||||
if (avail <= pageSpaces[j].avail)
|
||||
break;
|
||||
pages[i] = pages[j];
|
||||
spaceavail[i] = spaceavail[j];
|
||||
pageSpaces[i] = pageSpaces[j];
|
||||
i = j;
|
||||
}
|
||||
pages[i] = page;
|
||||
spaceavail[i] = avail;
|
||||
pageSpaces[i].blkno = page;
|
||||
pageSpaces[i].avail = avail;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1085,16 +1077,17 @@ dummy_tid_reaped(ItemPointer itemptr, void *state)
|
||||
static void
|
||||
lazy_update_fsm(Relation onerel, LVRelStats *vacrelstats)
|
||||
{
|
||||
PageFreeSpaceInfo *pageSpaces = vacrelstats->free_pages;
|
||||
int nPages = vacrelstats->num_free_pages;
|
||||
|
||||
/*
|
||||
* Since MultiRecordFreeSpace doesn't currently impose any
|
||||
* restrictions on the ordering of the input, we can just pass it the
|
||||
* arrays as-is, whether they are in heap or linear order.
|
||||
* Sort data into order, as required by MultiRecordFreeSpace.
|
||||
*/
|
||||
MultiRecordFreeSpace(&onerel->rd_node,
|
||||
0, MaxBlockNumber,
|
||||
vacrelstats->num_free_pages,
|
||||
vacrelstats->free_pages,
|
||||
vacrelstats->free_spaceavail);
|
||||
if (nPages > 1)
|
||||
qsort(pageSpaces, nPages, sizeof(PageFreeSpaceInfo),
|
||||
vac_cmp_page_spaces);
|
||||
|
||||
MultiRecordFreeSpace(&onerel->rd_node, 0, nPages, pageSpaces);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1126,3 +1119,16 @@ vac_cmp_itemptr(const void *left, const void *right)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
vac_cmp_page_spaces(const void *left, const void *right)
|
||||
{
|
||||
PageFreeSpaceInfo *linfo = (PageFreeSpaceInfo *) left;
|
||||
PageFreeSpaceInfo *rinfo = (PageFreeSpaceInfo *) right;
|
||||
|
||||
if (linfo->blkno < rinfo->blkno)
|
||||
return -1;
|
||||
else if (linfo->blkno > rinfo->blkno)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/freespace/freespace.c,v 1.13 2002/09/04 20:31:25 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/freespace/freespace.c,v 1.14 2002/09/20 19:56:01 tgl Exp $
|
||||
*
|
||||
*
|
||||
* NOTES:
|
||||
@ -99,6 +99,7 @@ struct FSMRelation
|
||||
* about */
|
||||
int numChunks; /* number of FSMChunks allocated to rel */
|
||||
FSMChunk *relChunks; /* linked list of page info chunks */
|
||||
FSMChunk *lastChunk; /* last chunk in linked list */
|
||||
};
|
||||
|
||||
/*
|
||||
@ -142,6 +143,7 @@ static bool lookup_fsm_page_entry(FSMRelation *fsmrel, BlockNumber page,
|
||||
static bool insert_fsm_page_entry(FSMRelation *fsmrel,
|
||||
BlockNumber page, Size spaceAvail,
|
||||
FSMChunk *chunk, int chunkRelIndex);
|
||||
static bool append_fsm_chunk(FSMRelation *fsmrel);
|
||||
static bool push_fsm_page_entry(BlockNumber page, Size spaceAvail,
|
||||
FSMChunk *chunk, int chunkRelIndex);
|
||||
static void delete_fsm_page_entry(FSMRelation *fsmrel, FSMChunk *chunk,
|
||||
@ -359,21 +361,20 @@ RecordAndGetPageWithFreeSpace(RelFileNode *rel,
|
||||
* MultiRecordFreeSpace - record available-space info about multiple pages
|
||||
* of a relation in one call.
|
||||
*
|
||||
* First, if minPage <= maxPage, the FSM must discard any entries it has for
|
||||
* pages in that page number range (inclusive). This allows obsolete info
|
||||
* to be discarded. Second, if nPages > 0, record the page numbers and free
|
||||
* space amounts in the given arrays. As with RecordFreeSpace, the FSM is at
|
||||
* liberty to discard some of the information. However, it *must* discard
|
||||
* previously stored info in the minPage..maxPage range (for example, this
|
||||
* case is used to remove info about deleted pages during relation truncation).
|
||||
* First, the FSM must discard any entries it has for pages >= minPage.
|
||||
* This allows obsolete info to be discarded (for example, it is used when
|
||||
* truncating a relation). Any entries before minPage should be kept.
|
||||
*
|
||||
* Second, if nPages > 0, record the page numbers and free space amounts in
|
||||
* the given pageSpaces[] array. As with RecordFreeSpace, the FSM is at
|
||||
* liberty to discard some of this information. The pageSpaces[] array must
|
||||
* be sorted in order by blkno, and may not contain entries before minPage.
|
||||
*/
|
||||
void
|
||||
MultiRecordFreeSpace(RelFileNode *rel,
|
||||
BlockNumber minPage,
|
||||
BlockNumber maxPage,
|
||||
int nPages,
|
||||
BlockNumber *pages,
|
||||
Size *spaceAvail)
|
||||
PageFreeSpaceInfo *pageSpaces)
|
||||
{
|
||||
FSMRelation *fsmrel;
|
||||
int i;
|
||||
@ -383,59 +384,64 @@ MultiRecordFreeSpace(RelFileNode *rel,
|
||||
if (fsmrel)
|
||||
{
|
||||
/*
|
||||
* Remove entries in specified range
|
||||
* Remove entries >= minPage
|
||||
*/
|
||||
if (minPage <= maxPage)
|
||||
{
|
||||
FSMChunk *chunk;
|
||||
int chunkRelIndex;
|
||||
bool done;
|
||||
|
||||
/* Use lookup to locate first entry >= minPage */
|
||||
lookup_fsm_page_entry(fsmrel, minPage, &chunk, &chunkRelIndex);
|
||||
/* Set free space to 0 for each page within range */
|
||||
done = false;
|
||||
while (chunk && !done)
|
||||
/* Set free space to 0 for each page >= minPage */
|
||||
while (chunk)
|
||||
{
|
||||
int numPages = chunk->numPages;
|
||||
|
||||
for (; chunkRelIndex < numPages; chunkRelIndex++)
|
||||
{
|
||||
if (chunk->pages[chunkRelIndex] > maxPage)
|
||||
{
|
||||
done = true;
|
||||
break;
|
||||
}
|
||||
chunk->bytes[chunkRelIndex] = 0;
|
||||
}
|
||||
for (i = chunkRelIndex; i < numPages; i++)
|
||||
chunk->bytes[i] = 0;
|
||||
chunk = chunk->next;
|
||||
chunkRelIndex = 0;
|
||||
}
|
||||
/* Now compact out the zeroed entries */
|
||||
/* Now compact out the zeroed entries, along with any other junk */
|
||||
compact_fsm_page_list(fsmrel);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add new entries, if appropriate.
|
||||
*
|
||||
* XXX we could probably be smarter about this than doing it
|
||||
* completely separately for each one. FIXME later.
|
||||
*
|
||||
* One thing we can do is short-circuit the process entirely if a
|
||||
* page (a) has too little free space to be recorded, and (b) is
|
||||
* within the minPage..maxPage range --- then we deleted any old
|
||||
* entry above, and we aren't going to make a new one. This is
|
||||
* particularly useful since in most cases, all the passed pages
|
||||
* will in fact be in the minPage..maxPage range.
|
||||
* This can be much cheaper than a full fsm_record_free_space()
|
||||
* call because we know we are appending to the end of the relation.
|
||||
*/
|
||||
for (i = 0; i < nPages; i++)
|
||||
{
|
||||
BlockNumber page = pages[i];
|
||||
Size avail = spaceAvail[i];
|
||||
BlockNumber page = pageSpaces[i].blkno;
|
||||
Size avail = pageSpaces[i].avail;
|
||||
FSMChunk *chunk;
|
||||
|
||||
if (avail >= fsmrel->threshold ||
|
||||
page < minPage || page > maxPage)
|
||||
fsm_record_free_space(fsmrel, page, avail);
|
||||
/* Check caller provides sorted data */
|
||||
if (i > 0 ? (page <= pageSpaces[i-1].blkno) : (page < minPage))
|
||||
elog(ERROR, "MultiRecordFreeSpace: data not in page order");
|
||||
|
||||
/* Ignore pages too small to fit */
|
||||
if (avail < fsmrel->threshold)
|
||||
continue;
|
||||
|
||||
/* Get another chunk if needed */
|
||||
/* We may need to loop if acquire_fsm_free_space() fails */
|
||||
while ((chunk = fsmrel->lastChunk) == NULL ||
|
||||
chunk->numPages >= CHUNKPAGES)
|
||||
{
|
||||
if (!append_fsm_chunk(fsmrel))
|
||||
acquire_fsm_free_space();
|
||||
}
|
||||
|
||||
/* Recheck in case threshold was raised by acquire */
|
||||
if (avail < fsmrel->threshold)
|
||||
continue;
|
||||
|
||||
/* Okay to store */
|
||||
chunk->pages[chunk->numPages] = page;
|
||||
chunk->bytes[chunk->numPages] = (ItemLength) avail;
|
||||
chunk->numPages++;
|
||||
fsmrel->numPages++;
|
||||
}
|
||||
}
|
||||
LWLockRelease(FreeSpaceLock);
|
||||
@ -538,6 +544,7 @@ create_fsm_rel(RelFileNode *rel)
|
||||
fsmrel->numPages = 0;
|
||||
fsmrel->numChunks = 0;
|
||||
fsmrel->relChunks = NULL;
|
||||
fsmrel->lastChunk = NULL;
|
||||
/* Discard lowest-priority existing rel, if we are over limit */
|
||||
if (FreeSpaceMap->numRels >= MaxFSMRelations)
|
||||
delete_fsm_rel(FreeSpaceMap->relListTail);
|
||||
@ -847,29 +854,12 @@ insert_fsm_page_entry(FSMRelation *fsmrel, BlockNumber page, Size spaceAvail,
|
||||
if (fsmrel->numPages >= fsmrel->numChunks * CHUNKPAGES)
|
||||
{
|
||||
/* No free space within chunk list, so need another chunk */
|
||||
FSMChunk *newChunk;
|
||||
|
||||
if ((newChunk = FreeSpaceMap->freeChunks) == NULL)
|
||||
if (!append_fsm_chunk(fsmrel))
|
||||
return false; /* can't do it */
|
||||
FreeSpaceMap->freeChunks = newChunk->next;
|
||||
FreeSpaceMap->numFreeChunks--;
|
||||
newChunk->next = NULL;
|
||||
newChunk->numPages = 0;
|
||||
if (fsmrel->relChunks == NULL)
|
||||
fsmrel->relChunks = newChunk;
|
||||
else
|
||||
{
|
||||
FSMChunk *priorChunk = fsmrel->relChunks;
|
||||
|
||||
while (priorChunk->next != NULL)
|
||||
priorChunk = priorChunk->next;
|
||||
priorChunk->next = newChunk;
|
||||
}
|
||||
fsmrel->numChunks++;
|
||||
if (chunk == NULL)
|
||||
{
|
||||
/* Original search found that new page belongs at end */
|
||||
chunk = newChunk;
|
||||
chunk = fsmrel->lastChunk;
|
||||
chunkRelIndex = 0;
|
||||
}
|
||||
}
|
||||
@ -900,6 +890,38 @@ insert_fsm_page_entry(FSMRelation *fsmrel, BlockNumber page, Size spaceAvail,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Add one chunk to a FSMRelation's chunk list, if possible.
|
||||
*
|
||||
* Returns TRUE if successful, FALSE if no space available. Note that on
|
||||
* success, the new chunk is easily accessible via fsmrel->lastChunk.
|
||||
*/
|
||||
static bool
|
||||
append_fsm_chunk(FSMRelation *fsmrel)
|
||||
{
|
||||
FSMChunk *newChunk;
|
||||
|
||||
/* Remove a chunk from the freelist */
|
||||
if ((newChunk = FreeSpaceMap->freeChunks) == NULL)
|
||||
return false; /* can't do it */
|
||||
FreeSpaceMap->freeChunks = newChunk->next;
|
||||
FreeSpaceMap->numFreeChunks--;
|
||||
|
||||
/* Initialize chunk to empty */
|
||||
newChunk->next = NULL;
|
||||
newChunk->numPages = 0;
|
||||
|
||||
/* Link it into FSMRelation */
|
||||
if (fsmrel->relChunks == NULL)
|
||||
fsmrel->relChunks = newChunk;
|
||||
else
|
||||
fsmrel->lastChunk->next = newChunk;
|
||||
fsmrel->lastChunk = newChunk;
|
||||
fsmrel->numChunks++;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Auxiliary routine for insert_fsm_page_entry: try to push entries to the
|
||||
* right to insert at chunk/chunkRelIndex. Return TRUE if successful.
|
||||
@ -1016,6 +1038,7 @@ compact_fsm_page_list(FSMRelation *fsmrel)
|
||||
fsmrel->numPages = 0;
|
||||
fsmrel->numChunks = 0;
|
||||
fsmrel->relChunks = NULL;
|
||||
fsmrel->lastChunk = NULL;
|
||||
free_chunk_chain(dstChunk);
|
||||
}
|
||||
else
|
||||
@ -1026,6 +1049,7 @@ compact_fsm_page_list(FSMRelation *fsmrel)
|
||||
dstChunk->numPages = dstIndex;
|
||||
free_chunk_chain(dstChunk->next);
|
||||
dstChunk->next = NULL;
|
||||
fsmrel->lastChunk = dstChunk;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/smgr/smgr.c,v 1.60 2002/09/04 20:31:26 momjian Exp $
|
||||
* $Header: /cvsroot/pgsql/src/backend/storage/smgr/smgr.c,v 1.61 2002/09/20 19:56:01 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -410,9 +410,7 @@ smgrtruncate(int16 which, Relation reln, BlockNumber nblocks)
|
||||
* for the about-to-be-deleted blocks. We want to be sure it
|
||||
* won't return bogus block numbers later on.
|
||||
*/
|
||||
MultiRecordFreeSpace(&reln->rd_node,
|
||||
nblocks, MaxBlockNumber,
|
||||
0, NULL, NULL);
|
||||
MultiRecordFreeSpace(&reln->rd_node, nblocks, 0, NULL);
|
||||
|
||||
newblks = (*(smgrsw[which].smgr_truncate)) (reln, nblocks);
|
||||
if (newblks == InvalidBlockNumber)
|
||||
|
@ -7,7 +7,7 @@
|
||||
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
|
||||
* Portions Copyright (c) 1994, Regents of the University of California
|
||||
*
|
||||
* $Id: freespace.h,v 1.7 2002/06/20 20:29:52 momjian Exp $
|
||||
* $Id: freespace.h,v 1.8 2002/09/20 19:56:01 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -18,6 +18,16 @@
|
||||
#include "storage/relfilenode.h"
|
||||
|
||||
|
||||
/*
|
||||
* exported types
|
||||
*/
|
||||
typedef struct PageFreeSpaceInfo
|
||||
{
|
||||
BlockNumber blkno; /* which page in relation */
|
||||
Size avail; /* space available on this page */
|
||||
} PageFreeSpaceInfo;
|
||||
|
||||
|
||||
extern int MaxFSMRelations;
|
||||
extern int MaxFSMPages;
|
||||
|
||||
@ -37,10 +47,8 @@ extern BlockNumber RecordAndGetPageWithFreeSpace(RelFileNode *rel,
|
||||
Size spaceNeeded);
|
||||
extern void MultiRecordFreeSpace(RelFileNode *rel,
|
||||
BlockNumber minPage,
|
||||
BlockNumber maxPage,
|
||||
int nPages,
|
||||
BlockNumber *pages,
|
||||
Size *spaceAvail);
|
||||
PageFreeSpaceInfo *pageSpaces);
|
||||
extern void FreeSpaceMapForgetRel(RelFileNode *rel);
|
||||
extern void FreeSpaceMapForgetDatabase(Oid dbid);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user