2005-03-12 23:36:24 +08:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* pg_buffercache_pages.c
|
2005-10-15 10:49:52 +08:00
|
|
|
* display some contents of the buffer cache
|
2005-03-12 23:36:24 +08:00
|
|
|
*
|
2010-09-21 04:08:53 +08:00
|
|
|
* contrib/pg_buffercache/pg_buffercache_pages.c
|
2005-03-12 23:36:24 +08:00
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
#include "postgres.h"
|
2006-10-23 01:49:21 +08:00
|
|
|
|
2012-08-31 04:15:44 +08:00
|
|
|
#include "access/htup_details.h"
|
2005-03-12 23:36:24 +08:00
|
|
|
#include "catalog/pg_type.h"
|
2006-10-23 01:49:21 +08:00
|
|
|
#include "funcapi.h"
|
2005-03-12 23:36:24 +08:00
|
|
|
#include "storage/buf_internals.h"
|
|
|
|
#include "storage/bufmgr.h"
|
|
|
|
|
|
|
|
|
2014-08-22 06:28:37 +08:00
|
|
|
#define NUM_BUFFERCACHE_PAGES_MIN_ELEM 8
|
|
|
|
#define NUM_BUFFERCACHE_PAGES_ELEM 9
|
2005-03-12 23:36:24 +08:00
|
|
|
|
2006-05-31 06:12:16 +08:00
|
|
|
PG_MODULE_MAGIC;
|
|
|
|
|
2005-03-12 23:36:24 +08:00
|
|
|
/*
|
|
|
|
* Record structure holding the to be exposed cache data.
|
|
|
|
*/
|
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
uint32 bufferid;
|
|
|
|
Oid relfilenode;
|
|
|
|
Oid reltablespace;
|
|
|
|
Oid reldatabase;
|
2008-08-14 20:56:41 +08:00
|
|
|
ForkNumber forknum;
|
2005-10-15 10:49:52 +08:00
|
|
|
BlockNumber blocknum;
|
2005-03-12 23:36:24 +08:00
|
|
|
bool isvalid;
|
|
|
|
bool isdirty;
|
2007-04-08 00:09:14 +08:00
|
|
|
uint16 usagecount;
|
2015-05-24 09:35:49 +08:00
|
|
|
|
2014-08-22 06:28:37 +08:00
|
|
|
/*
|
|
|
|
* An int32 is sufficiently large, as MAX_BACKENDS prevents a buffer from
|
|
|
|
* being pinned by too many backends and each backend will only pin once
|
2014-08-30 20:03:21 +08:00
|
|
|
* because of bufmgr.c's PrivateRefCount infrastructure.
|
2014-08-22 06:28:37 +08:00
|
|
|
*/
|
|
|
|
int32 pinning_backends;
|
2009-06-11 22:49:15 +08:00
|
|
|
} BufferCachePagesRec;
|
2005-03-12 23:36:24 +08:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Function context for data persisting over repeated calls.
|
|
|
|
*/
|
2005-10-15 10:49:52 +08:00
|
|
|
typedef struct
|
2005-03-12 23:36:24 +08:00
|
|
|
{
|
2006-10-23 01:49:21 +08:00
|
|
|
TupleDesc tupdesc;
|
2005-10-15 10:49:52 +08:00
|
|
|
BufferCachePagesRec *record;
|
2009-06-11 22:49:15 +08:00
|
|
|
} BufferCachePagesContext;
|
2005-03-12 23:36:24 +08:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Function returning data from the shared buffer cache - buffer number,
|
|
|
|
* relation node/tablespace/database/blocknum and dirty indicator.
|
|
|
|
*/
|
|
|
|
PG_FUNCTION_INFO_V1(pg_buffercache_pages);
|
2006-10-23 01:49:21 +08:00
|
|
|
|
2005-03-12 23:36:24 +08:00
|
|
|
Datum
|
|
|
|
pg_buffercache_pages(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2005-10-15 10:49:52 +08:00
|
|
|
FuncCallContext *funcctx;
|
|
|
|
Datum result;
|
|
|
|
MemoryContext oldcontext;
|
|
|
|
BufferCachePagesContext *fctx; /* User function context. */
|
|
|
|
TupleDesc tupledesc;
|
2014-08-22 06:28:37 +08:00
|
|
|
TupleDesc expected_tupledesc;
|
2005-10-15 10:49:52 +08:00
|
|
|
HeapTuple tuple;
|
2005-03-12 23:36:24 +08:00
|
|
|
|
|
|
|
if (SRF_IS_FIRSTCALL())
|
|
|
|
{
|
2006-10-04 08:30:14 +08:00
|
|
|
int i;
|
2005-03-12 23:36:24 +08:00
|
|
|
|
|
|
|
funcctx = SRF_FIRSTCALL_INIT();
|
|
|
|
|
|
|
|
/* Switch context when allocating stuff to be used in later calls */
|
|
|
|
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
|
2005-10-15 10:49:52 +08:00
|
|
|
|
2006-10-23 01:49:21 +08:00
|
|
|
/* Create a user function context for cross-call persistence */
|
|
|
|
fctx = (BufferCachePagesContext *) palloc(sizeof(BufferCachePagesContext));
|
|
|
|
|
2014-08-22 06:28:37 +08:00
|
|
|
/*
|
|
|
|
* To smoothly support upgrades from version 1.0 of this extension
|
2014-08-22 15:25:47 +08:00
|
|
|
* transparently handle the (non-)existence of the pinning_backends
|
2014-08-22 06:28:37 +08:00
|
|
|
* column. We unfortunately have to get the result type for that... -
|
|
|
|
* we can't use the result type determined by the function definition
|
|
|
|
* without potentially crashing when somebody uses the old (or even
|
|
|
|
* wrong) function definition though.
|
|
|
|
*/
|
|
|
|
if (get_call_result_type(fcinfo, NULL, &expected_tupledesc) != TYPEFUNC_COMPOSITE)
|
|
|
|
elog(ERROR, "return type must be a row type");
|
|
|
|
|
|
|
|
if (expected_tupledesc->natts < NUM_BUFFERCACHE_PAGES_MIN_ELEM ||
|
|
|
|
expected_tupledesc->natts > NUM_BUFFERCACHE_PAGES_ELEM)
|
|
|
|
elog(ERROR, "incorrect number of output arguments");
|
|
|
|
|
2006-10-23 01:49:21 +08:00
|
|
|
/* Construct a tuple descriptor for the result rows. */
|
2014-08-22 06:28:37 +08:00
|
|
|
tupledesc = CreateTemplateTupleDesc(expected_tupledesc->natts, false);
|
2005-03-12 23:36:24 +08:00
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 1, "bufferid",
|
2005-10-15 10:49:52 +08:00
|
|
|
INT4OID, -1, 0);
|
2005-03-12 23:36:24 +08:00
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 2, "relfilenode",
|
2005-10-15 10:49:52 +08:00
|
|
|
OIDOID, -1, 0);
|
2005-03-12 23:36:24 +08:00
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 3, "reltablespace",
|
2005-10-15 10:49:52 +08:00
|
|
|
OIDOID, -1, 0);
|
2005-03-12 23:36:24 +08:00
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 4, "reldatabase",
|
2005-10-15 10:49:52 +08:00
|
|
|
OIDOID, -1, 0);
|
2008-08-14 20:56:41 +08:00
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 5, "relforknumber",
|
|
|
|
INT2OID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 6, "relblocknumber",
|
2005-10-15 10:49:52 +08:00
|
|
|
INT8OID, -1, 0);
|
2008-08-14 20:56:41 +08:00
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 7, "isdirty",
|
2005-10-15 10:49:52 +08:00
|
|
|
BOOLOID, -1, 0);
|
2008-08-14 20:56:41 +08:00
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 8, "usage_count",
|
2007-04-08 00:09:14 +08:00
|
|
|
INT2OID, -1, 0);
|
2005-03-12 23:36:24 +08:00
|
|
|
|
2014-08-22 06:28:37 +08:00
|
|
|
if (expected_tupledesc->natts == NUM_BUFFERCACHE_PAGES_ELEM)
|
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 9, "pinning_backends",
|
|
|
|
INT4OID, -1, 0);
|
|
|
|
|
2006-10-23 01:49:21 +08:00
|
|
|
fctx->tupdesc = BlessTupleDesc(tupledesc);
|
2005-03-12 23:36:24 +08:00
|
|
|
|
|
|
|
/* Allocate NBuffers worth of BufferCachePagesRec records. */
|
|
|
|
fctx->record = (BufferCachePagesRec *) palloc(sizeof(BufferCachePagesRec) * NBuffers);
|
|
|
|
|
2006-10-23 01:49:21 +08:00
|
|
|
/* Set max calls and remember the user function context. */
|
|
|
|
funcctx->max_calls = NBuffers;
|
|
|
|
funcctx->user_fctx = fctx;
|
2005-03-12 23:36:24 +08:00
|
|
|
|
|
|
|
/* Return to original context when allocating transient memory */
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
|
2005-10-15 10:49:52 +08:00
|
|
|
/*
|
2006-10-04 08:30:14 +08:00
|
|
|
* To get a consistent picture of the buffer state, we must lock all
|
|
|
|
* partitions of the buffer map. Needless to say, this is horrible
|
2007-07-17 05:20:36 +08:00
|
|
|
* for concurrency. Must grab locks in increasing order to avoid
|
|
|
|
* possible deadlocks.
|
2005-03-12 23:36:24 +08:00
|
|
|
*/
|
2006-07-23 11:07:58 +08:00
|
|
|
for (i = 0; i < NUM_BUFFER_PARTITIONS; i++)
|
2014-01-28 00:07:44 +08:00
|
|
|
LWLockAcquire(BufMappingPartitionLockByIndex(i), LW_SHARED);
|
2005-03-12 23:36:24 +08:00
|
|
|
|
2006-07-23 11:07:58 +08:00
|
|
|
/*
|
2015-05-20 21:18:11 +08:00
|
|
|
* Scan through all the buffers, saving the relevant fields in the
|
2006-07-23 11:07:58 +08:00
|
|
|
* fctx->record structure.
|
|
|
|
*/
|
Align buffer descriptors to cache line boundaries.
Benchmarks has shown that aligning the buffer descriptor array to
cache lines is important for scalability; especially on bigger,
multi-socket, machines.
Currently the array sometimes already happens to be aligned by
happenstance, depending how large previous shared memory allocations
were. That can lead to wildly varying performance results after minor
configuration changes.
In addition to aligning the start of descriptor array, also force the
size of individual descriptors to be of a common cache line size (64
bytes). That happens to already be the case on 64bit platforms, but
this way we can change the struct BufferDesc more easily.
As the alignment primarily matters in highly concurrent workloads
which probably all are 64bit these days, and the space wastage of
element alignment would be a bit more noticeable on 32bit systems, we
don't force the stride to be cacheline sized on 32bit platforms for
now. If somebody does actual performance testing, we can reevaluate
that decision by changing the definition of BUFFERDESC_PADDED_SIZE.
Discussion: 20140202151319.GD32123@awork2.anarazel.de
Per discussion with Bruce Momjan, Tom Lane, Robert Haas, and Peter
Geoghegan.
2015-01-30 00:49:03 +08:00
|
|
|
for (i = 0; i < NBuffers; i++)
|
2005-03-12 23:36:24 +08:00
|
|
|
{
|
Align buffer descriptors to cache line boundaries.
Benchmarks has shown that aligning the buffer descriptor array to
cache lines is important for scalability; especially on bigger,
multi-socket, machines.
Currently the array sometimes already happens to be aligned by
happenstance, depending how large previous shared memory allocations
were. That can lead to wildly varying performance results after minor
configuration changes.
In addition to aligning the start of descriptor array, also force the
size of individual descriptors to be of a common cache line size (64
bytes). That happens to already be the case on 64bit platforms, but
this way we can change the struct BufferDesc more easily.
As the alignment primarily matters in highly concurrent workloads
which probably all are 64bit these days, and the space wastage of
element alignment would be a bit more noticeable on 32bit systems, we
don't force the stride to be cacheline sized on 32bit platforms for
now. If somebody does actual performance testing, we can reevaluate
that decision by changing the definition of BUFFERDESC_PADDED_SIZE.
Discussion: 20140202151319.GD32123@awork2.anarazel.de
Per discussion with Bruce Momjan, Tom Lane, Robert Haas, and Peter
Geoghegan.
2015-01-30 00:49:03 +08:00
|
|
|
volatile BufferDesc *bufHdr;
|
|
|
|
|
|
|
|
bufHdr = GetBufferDescriptor(i);
|
2005-03-12 23:36:24 +08:00
|
|
|
/* Lock each buffer header before inspecting. */
|
|
|
|
LockBufHdr(bufHdr);
|
|
|
|
|
|
|
|
fctx->record[i].bufferid = BufferDescriptorGetBuffer(bufHdr);
|
2005-10-13 00:45:14 +08:00
|
|
|
fctx->record[i].relfilenode = bufHdr->tag.rnode.relNode;
|
|
|
|
fctx->record[i].reltablespace = bufHdr->tag.rnode.spcNode;
|
|
|
|
fctx->record[i].reldatabase = bufHdr->tag.rnode.dbNode;
|
2008-08-14 20:56:41 +08:00
|
|
|
fctx->record[i].forknum = bufHdr->tag.forkNum;
|
2005-03-12 23:36:24 +08:00
|
|
|
fctx->record[i].blocknum = bufHdr->tag.blockNum;
|
2007-04-08 00:09:14 +08:00
|
|
|
fctx->record[i].usagecount = bufHdr->usage_count;
|
2014-08-22 06:28:37 +08:00
|
|
|
fctx->record[i].pinning_backends = bufHdr->refcount;
|
2005-03-12 23:36:24 +08:00
|
|
|
|
2005-10-15 10:49:52 +08:00
|
|
|
if (bufHdr->flags & BM_DIRTY)
|
2005-03-12 23:36:24 +08:00
|
|
|
fctx->record[i].isdirty = true;
|
|
|
|
else
|
|
|
|
fctx->record[i].isdirty = false;
|
|
|
|
|
|
|
|
/* Note if the buffer is valid, and has storage created */
|
2005-10-13 00:45:14 +08:00
|
|
|
if ((bufHdr->flags & BM_VALID) && (bufHdr->flags & BM_TAG_VALID))
|
2005-03-12 23:36:24 +08:00
|
|
|
fctx->record[i].isvalid = true;
|
|
|
|
else
|
|
|
|
fctx->record[i].isvalid = false;
|
|
|
|
|
|
|
|
UnlockBufHdr(bufHdr);
|
|
|
|
}
|
|
|
|
|
2007-07-17 05:20:36 +08:00
|
|
|
/*
|
|
|
|
* And release locks. We do this in reverse order for two reasons:
|
|
|
|
* (1) Anyone else who needs more than one of the locks will be trying
|
2007-11-16 05:14:46 +08:00
|
|
|
* to lock them in increasing order; we don't want to release the
|
|
|
|
* other process until it can get all the locks it needs. (2) This
|
|
|
|
* avoids O(N^2) behavior inside LWLockRelease.
|
2007-07-17 05:20:36 +08:00
|
|
|
*/
|
|
|
|
for (i = NUM_BUFFER_PARTITIONS; --i >= 0;)
|
2014-01-28 00:07:44 +08:00
|
|
|
LWLockRelease(BufMappingPartitionLockByIndex(i));
|
2005-03-12 23:36:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
funcctx = SRF_PERCALL_SETUP();
|
2005-10-15 10:49:52 +08:00
|
|
|
|
2005-03-12 23:36:24 +08:00
|
|
|
/* Get the saved state */
|
|
|
|
fctx = funcctx->user_fctx;
|
|
|
|
|
|
|
|
if (funcctx->call_cntr < funcctx->max_calls)
|
|
|
|
{
|
2005-10-15 10:49:52 +08:00
|
|
|
uint32 i = funcctx->call_cntr;
|
2006-10-23 01:49:21 +08:00
|
|
|
Datum values[NUM_BUFFERCACHE_PAGES_ELEM];
|
|
|
|
bool nulls[NUM_BUFFERCACHE_PAGES_ELEM];
|
2005-10-15 10:49:52 +08:00
|
|
|
|
2006-10-23 01:49:21 +08:00
|
|
|
values[0] = Int32GetDatum(fctx->record[i].bufferid);
|
|
|
|
nulls[0] = false;
|
2005-03-12 23:36:24 +08:00
|
|
|
|
|
|
|
/*
|
2005-10-15 10:49:52 +08:00
|
|
|
* Set all fields except the bufferid to null if the buffer is unused
|
|
|
|
* or not valid.
|
2005-03-12 23:36:24 +08:00
|
|
|
*/
|
|
|
|
if (fctx->record[i].blocknum == InvalidBlockNumber ||
|
2005-10-15 10:49:52 +08:00
|
|
|
fctx->record[i].isvalid == false)
|
2005-03-12 23:36:24 +08:00
|
|
|
{
|
2006-10-23 01:49:21 +08:00
|
|
|
nulls[1] = true;
|
|
|
|
nulls[2] = true;
|
|
|
|
nulls[3] = true;
|
|
|
|
nulls[4] = true;
|
|
|
|
nulls[5] = true;
|
2007-04-08 00:09:14 +08:00
|
|
|
nulls[6] = true;
|
2008-08-14 20:56:41 +08:00
|
|
|
nulls[7] = true;
|
2014-08-22 06:28:37 +08:00
|
|
|
/* unused for v1.0 callers, but the array is always long enough */
|
|
|
|
nulls[8] = true;
|
2005-03-12 23:36:24 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2006-10-23 01:49:21 +08:00
|
|
|
values[1] = ObjectIdGetDatum(fctx->record[i].relfilenode);
|
|
|
|
nulls[1] = false;
|
|
|
|
values[2] = ObjectIdGetDatum(fctx->record[i].reltablespace);
|
|
|
|
nulls[2] = false;
|
|
|
|
values[3] = ObjectIdGetDatum(fctx->record[i].reldatabase);
|
|
|
|
nulls[3] = false;
|
2008-08-14 20:56:41 +08:00
|
|
|
values[4] = ObjectIdGetDatum(fctx->record[i].forknum);
|
2006-10-23 01:49:21 +08:00
|
|
|
nulls[4] = false;
|
2008-08-14 20:56:41 +08:00
|
|
|
values[5] = Int64GetDatum((int64) fctx->record[i].blocknum);
|
2006-10-23 01:49:21 +08:00
|
|
|
nulls[5] = false;
|
2008-08-14 20:56:41 +08:00
|
|
|
values[6] = BoolGetDatum(fctx->record[i].isdirty);
|
2007-04-08 00:09:14 +08:00
|
|
|
nulls[6] = false;
|
2008-08-14 20:56:41 +08:00
|
|
|
values[7] = Int16GetDatum(fctx->record[i].usagecount);
|
|
|
|
nulls[7] = false;
|
2014-08-22 06:28:37 +08:00
|
|
|
/* unused for v1.0 callers, but the array is always long enough */
|
|
|
|
values[8] = Int32GetDatum(fctx->record[i].pinning_backends);
|
|
|
|
nulls[8] = false;
|
2005-03-12 23:36:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Build and return the tuple. */
|
2006-10-23 01:49:21 +08:00
|
|
|
tuple = heap_form_tuple(fctx->tupdesc, values, nulls);
|
2005-03-12 23:36:24 +08:00
|
|
|
result = HeapTupleGetDatum(tuple);
|
|
|
|
|
|
|
|
SRF_RETURN_NEXT(funcctx, result);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
SRF_RETURN_DONE(funcctx);
|
|
|
|
}
|