2005-03-12 23:36:24 +08:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* pg_buffercache_pages.c
|
2005-10-15 10:49:52 +08:00
|
|
|
* display some contents of the buffer cache
|
2005-03-12 23:36:24 +08:00
|
|
|
*
|
2010-09-21 04:08:53 +08:00
|
|
|
* contrib/pg_buffercache/pg_buffercache_pages.c
|
2005-03-12 23:36:24 +08:00
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
#include "postgres.h"
|
2006-10-23 01:49:21 +08:00
|
|
|
|
2012-08-31 04:15:44 +08:00
|
|
|
#include "access/htup_details.h"
|
2005-03-12 23:36:24 +08:00
|
|
|
#include "catalog/pg_type.h"
|
2006-10-23 01:49:21 +08:00
|
|
|
#include "funcapi.h"
|
2005-03-12 23:36:24 +08:00
|
|
|
#include "storage/buf_internals.h"
|
|
|
|
#include "storage/bufmgr.h"
|
|
|
|
|
|
|
|
|
2014-08-22 06:28:37 +08:00
|
|
|
#define NUM_BUFFERCACHE_PAGES_MIN_ELEM 8
|
|
|
|
#define NUM_BUFFERCACHE_PAGES_ELEM 9
|
2005-03-12 23:36:24 +08:00
|
|
|
|
2006-05-31 06:12:16 +08:00
|
|
|
PG_MODULE_MAGIC;
|
|
|
|
|
2005-03-12 23:36:24 +08:00
|
|
|
/*
|
|
|
|
* Record structure holding the to be exposed cache data.
|
|
|
|
*/
|
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
uint32 bufferid;
|
|
|
|
Oid relfilenode;
|
|
|
|
Oid reltablespace;
|
|
|
|
Oid reldatabase;
|
2008-08-14 20:56:41 +08:00
|
|
|
ForkNumber forknum;
|
2005-10-15 10:49:52 +08:00
|
|
|
BlockNumber blocknum;
|
2005-03-12 23:36:24 +08:00
|
|
|
bool isvalid;
|
|
|
|
bool isdirty;
|
2007-04-08 00:09:14 +08:00
|
|
|
uint16 usagecount;
|
2015-05-24 09:35:49 +08:00
|
|
|
|
2014-08-22 06:28:37 +08:00
|
|
|
/*
|
|
|
|
* An int32 is sufficiently large, as MAX_BACKENDS prevents a buffer from
|
|
|
|
* being pinned by too many backends and each backend will only pin once
|
2014-08-30 20:03:21 +08:00
|
|
|
* because of bufmgr.c's PrivateRefCount infrastructure.
|
2014-08-22 06:28:37 +08:00
|
|
|
*/
|
|
|
|
int32 pinning_backends;
|
2009-06-11 22:49:15 +08:00
|
|
|
} BufferCachePagesRec;
|
2005-03-12 23:36:24 +08:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Function context for data persisting over repeated calls.
|
|
|
|
*/
|
2005-10-15 10:49:52 +08:00
|
|
|
typedef struct
|
2005-03-12 23:36:24 +08:00
|
|
|
{
|
2006-10-23 01:49:21 +08:00
|
|
|
TupleDesc tupdesc;
|
2005-10-15 10:49:52 +08:00
|
|
|
BufferCachePagesRec *record;
|
2009-06-11 22:49:15 +08:00
|
|
|
} BufferCachePagesContext;
|
2005-03-12 23:36:24 +08:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Function returning data from the shared buffer cache - buffer number,
|
|
|
|
* relation node/tablespace/database/blocknum and dirty indicator.
|
|
|
|
*/
|
|
|
|
PG_FUNCTION_INFO_V1(pg_buffercache_pages);
|
2006-10-23 01:49:21 +08:00
|
|
|
|
2005-03-12 23:36:24 +08:00
|
|
|
Datum
|
|
|
|
pg_buffercache_pages(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2005-10-15 10:49:52 +08:00
|
|
|
FuncCallContext *funcctx;
|
|
|
|
Datum result;
|
|
|
|
MemoryContext oldcontext;
|
Phase 2 of pgindent updates.
Change pg_bsd_indent to follow upstream rules for placement of comments
to the right of code, and remove pgindent hack that caused comments
following #endif to not obey the general rule.
Commit e3860ffa4dd0dad0dd9eea4be9cc1412373a8c89 wasn't actually using
the published version of pg_bsd_indent, but a hacked-up version that
tried to minimize the amount of movement of comments to the right of
code. The situation of interest is where such a comment has to be
moved to the right of its default placement at column 33 because there's
code there. BSD indent has always moved right in units of tab stops
in such cases --- but in the previous incarnation, indent was working
in 8-space tab stops, while now it knows we use 4-space tabs. So the
net result is that in about half the cases, such comments are placed
one tab stop left of before. This is better all around: it leaves
more room on the line for comment text, and it means that in such
cases the comment uniformly starts at the next 4-space tab stop after
the code, rather than sometimes one and sometimes two tabs after.
Also, ensure that comments following #endif are indented the same
as comments following other preprocessor commands such as #else.
That inconsistency turns out to have been self-inflicted damage
from a poorly-thought-through post-indent "fixup" in pgindent.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
2017-06-22 03:18:54 +08:00
|
|
|
BufferCachePagesContext *fctx; /* User function context. */
|
2005-10-15 10:49:52 +08:00
|
|
|
TupleDesc tupledesc;
|
2014-08-22 06:28:37 +08:00
|
|
|
TupleDesc expected_tupledesc;
|
2005-10-15 10:49:52 +08:00
|
|
|
HeapTuple tuple;
|
2005-03-12 23:36:24 +08:00
|
|
|
|
|
|
|
if (SRF_IS_FIRSTCALL())
|
|
|
|
{
|
2006-10-04 08:30:14 +08:00
|
|
|
int i;
|
2005-03-12 23:36:24 +08:00
|
|
|
|
|
|
|
funcctx = SRF_FIRSTCALL_INIT();
|
|
|
|
|
|
|
|
/* Switch context when allocating stuff to be used in later calls */
|
|
|
|
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
|
2005-10-15 10:49:52 +08:00
|
|
|
|
2006-10-23 01:49:21 +08:00
|
|
|
/* Create a user function context for cross-call persistence */
|
|
|
|
fctx = (BufferCachePagesContext *) palloc(sizeof(BufferCachePagesContext));
|
|
|
|
|
2014-08-22 06:28:37 +08:00
|
|
|
/*
|
|
|
|
* To smoothly support upgrades from version 1.0 of this extension
|
2014-08-22 15:25:47 +08:00
|
|
|
* transparently handle the (non-)existence of the pinning_backends
|
2014-08-22 06:28:37 +08:00
|
|
|
* column. We unfortunately have to get the result type for that... -
|
|
|
|
* we can't use the result type determined by the function definition
|
|
|
|
* without potentially crashing when somebody uses the old (or even
|
|
|
|
* wrong) function definition though.
|
|
|
|
*/
|
|
|
|
if (get_call_result_type(fcinfo, NULL, &expected_tupledesc) != TYPEFUNC_COMPOSITE)
|
|
|
|
elog(ERROR, "return type must be a row type");
|
|
|
|
|
|
|
|
if (expected_tupledesc->natts < NUM_BUFFERCACHE_PAGES_MIN_ELEM ||
|
|
|
|
expected_tupledesc->natts > NUM_BUFFERCACHE_PAGES_ELEM)
|
|
|
|
elog(ERROR, "incorrect number of output arguments");
|
|
|
|
|
2006-10-23 01:49:21 +08:00
|
|
|
/* Construct a tuple descriptor for the result rows. */
|
Remove WITH OIDS support, change oid catalog column visibility.
Previously tables declared WITH OIDS, including a significant fraction
of the catalog tables, stored the oid column not as a normal column,
but as part of the tuple header.
This special column was not shown by default, which was somewhat odd,
as it's often (consider e.g. pg_class.oid) one of the more important
parts of a row. Neither pg_dump nor COPY included the contents of the
oid column by default.
The fact that the oid column was not an ordinary column necessitated a
significant amount of special case code to support oid columns. That
already was painful for the existing, but upcoming work aiming to make
table storage pluggable, would have required expanding and duplicating
that "specialness" significantly.
WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0).
Remove it.
Removing includes:
- CREATE TABLE and ALTER TABLE syntax for declaring the table to be
WITH OIDS has been removed (WITH (oids[ = true]) will error out)
- pg_dump does not support dumping tables declared WITH OIDS and will
issue a warning when dumping one (and ignore the oid column).
- restoring an pg_dump archive with pg_restore will warn when
restoring a table with oid contents (and ignore the oid column)
- COPY will refuse to load binary dump that includes oids.
- pg_upgrade will error out when encountering tables declared WITH
OIDS, they have to be altered to remove the oid column first.
- Functionality to access the oid of the last inserted row (like
plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed.
The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false)
for CREATE TABLE) is still supported. While that requires a bit of
support code, it seems unnecessary to break applications / dumps that
do not use oids, and are explicit about not using them.
The biggest user of WITH OID columns was postgres' catalog. This
commit changes all 'magic' oid columns to be columns that are normally
declared and stored. To reduce unnecessary query breakage all the
newly added columns are still named 'oid', even if a table's column
naming scheme would indicate 'reloid' or such. This obviously
requires adapting a lot code, mostly replacing oid access via
HeapTupleGetOid() with access to the underlying Form_pg_*->oid column.
The bootstrap process now assigns oids for all oid columns in
genbki.pl that do not have an explicit value (starting at the largest
oid previously used), only oids assigned later by oids will be above
FirstBootstrapObjectId. As the oid column now is a normal column the
special bootstrap syntax for oids has been removed.
Oids are not automatically assigned during insertion anymore, all
backend code explicitly assigns oids with GetNewOidWithIndex(). For
the rare case that insertions into the catalog via SQL are called for
the new pg_nextoid() function can be used (which only works on catalog
tables).
The fact that oid columns on system tables are now normal columns
means that they will be included in the set of columns expanded
by * (i.e. SELECT * FROM pg_class will now include the table's oid,
previously it did not). It'd not technically be hard to hide oid
column by default, but that'd mean confusing behavior would either
have to be carried forward forever, or it'd cause breakage down the
line.
While it's not unlikely that further adjustments are needed, the
scope/invasiveness of the patch makes it worthwhile to get merge this
now. It's painful to maintain externally, too complicated to commit
after the code code freeze, and a dependency of a number of other
patches.
Catversion bump, for obvious reasons.
Author: Andres Freund, with contributions by John Naylor
Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 07:36:57 +08:00
|
|
|
tupledesc = CreateTemplateTupleDesc(expected_tupledesc->natts);
|
2005-03-12 23:36:24 +08:00
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 1, "bufferid",
|
2005-10-15 10:49:52 +08:00
|
|
|
INT4OID, -1, 0);
|
2005-03-12 23:36:24 +08:00
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 2, "relfilenode",
|
2005-10-15 10:49:52 +08:00
|
|
|
OIDOID, -1, 0);
|
2005-03-12 23:36:24 +08:00
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 3, "reltablespace",
|
2005-10-15 10:49:52 +08:00
|
|
|
OIDOID, -1, 0);
|
2005-03-12 23:36:24 +08:00
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 4, "reldatabase",
|
2005-10-15 10:49:52 +08:00
|
|
|
OIDOID, -1, 0);
|
2008-08-14 20:56:41 +08:00
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 5, "relforknumber",
|
|
|
|
INT2OID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 6, "relblocknumber",
|
2005-10-15 10:49:52 +08:00
|
|
|
INT8OID, -1, 0);
|
2008-08-14 20:56:41 +08:00
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 7, "isdirty",
|
2005-10-15 10:49:52 +08:00
|
|
|
BOOLOID, -1, 0);
|
2008-08-14 20:56:41 +08:00
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 8, "usage_count",
|
2007-04-08 00:09:14 +08:00
|
|
|
INT2OID, -1, 0);
|
2005-03-12 23:36:24 +08:00
|
|
|
|
2014-08-22 06:28:37 +08:00
|
|
|
if (expected_tupledesc->natts == NUM_BUFFERCACHE_PAGES_ELEM)
|
|
|
|
TupleDescInitEntry(tupledesc, (AttrNumber) 9, "pinning_backends",
|
|
|
|
INT4OID, -1, 0);
|
|
|
|
|
2006-10-23 01:49:21 +08:00
|
|
|
fctx->tupdesc = BlessTupleDesc(tupledesc);
|
2005-03-12 23:36:24 +08:00
|
|
|
|
|
|
|
/* Allocate NBuffers worth of BufferCachePagesRec records. */
|
2016-09-15 21:22:52 +08:00
|
|
|
fctx->record = (BufferCachePagesRec *)
|
|
|
|
MemoryContextAllocHuge(CurrentMemoryContext,
|
|
|
|
sizeof(BufferCachePagesRec) * NBuffers);
|
2005-03-12 23:36:24 +08:00
|
|
|
|
2006-10-23 01:49:21 +08:00
|
|
|
/* Set max calls and remember the user function context. */
|
|
|
|
funcctx->max_calls = NBuffers;
|
|
|
|
funcctx->user_fctx = fctx;
|
2005-03-12 23:36:24 +08:00
|
|
|
|
|
|
|
/* Return to original context when allocating transient memory */
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
|
2006-07-23 11:07:58 +08:00
|
|
|
/*
|
2015-05-20 21:18:11 +08:00
|
|
|
* Scan through all the buffers, saving the relevant fields in the
|
2006-07-23 11:07:58 +08:00
|
|
|
* fctx->record structure.
|
2016-09-29 18:16:30 +08:00
|
|
|
*
|
|
|
|
* We don't hold the partition locks, so we don't get a consistent
|
|
|
|
* snapshot across all buffers, but we do grab the buffer header
|
|
|
|
* locks, so the information of each buffer is self-consistent.
|
2006-07-23 11:07:58 +08:00
|
|
|
*/
|
Align buffer descriptors to cache line boundaries.
Benchmarks has shown that aligning the buffer descriptor array to
cache lines is important for scalability; especially on bigger,
multi-socket, machines.
Currently the array sometimes already happens to be aligned by
happenstance, depending how large previous shared memory allocations
were. That can lead to wildly varying performance results after minor
configuration changes.
In addition to aligning the start of descriptor array, also force the
size of individual descriptors to be of a common cache line size (64
bytes). That happens to already be the case on 64bit platforms, but
this way we can change the struct BufferDesc more easily.
As the alignment primarily matters in highly concurrent workloads
which probably all are 64bit these days, and the space wastage of
element alignment would be a bit more noticeable on 32bit systems, we
don't force the stride to be cacheline sized on 32bit platforms for
now. If somebody does actual performance testing, we can reevaluate
that decision by changing the definition of BUFFERDESC_PADDED_SIZE.
Discussion: 20140202151319.GD32123@awork2.anarazel.de
Per discussion with Bruce Momjan, Tom Lane, Robert Haas, and Peter
Geoghegan.
2015-01-30 00:49:03 +08:00
|
|
|
for (i = 0; i < NBuffers; i++)
|
2005-03-12 23:36:24 +08:00
|
|
|
{
|
Allow Pin/UnpinBuffer to operate in a lockfree manner.
Pinning/Unpinning a buffer is a very frequent operation; especially in
read-mostly cache resident workloads. Benchmarking shows that in various
scenarios the spinlock protecting a buffer header's state becomes a
significant bottleneck. The problem can be reproduced with pgbench -S on
larger machines, but can be considerably worse for queries which touch
the same buffers over and over at a high frequency (e.g. nested loops
over a small inner table).
To allow atomic operations to be used, cram BufferDesc's flags,
usage_count, buf_hdr_lock, refcount into a single 32bit atomic variable;
that allows to manipulate them together using 32bit compare-and-swap
operations. This requires reducing MAX_BACKENDS to 2^18-1 (which could
be lifted by using a 64bit field, but it's not a realistic configuration
atm).
As not all operations can easily implemented in a lockfree manner,
implement the previous buf_hdr_lock via a flag bit in the atomic
variable. That way we can continue to lock the header in places where
it's needed, but can get away without acquiring it in the more frequent
hot-paths. There's some additional operations which can be done without
the lock, but aren't in this patch; but the most important places are
covered.
As bufmgr.c now essentially re-implements spinlocks, abstract the delay
logic from s_lock.c into something more generic. It now has already two
users, and more are coming up; there's a follupw patch for lwlock.c at
least.
This patch is based on a proof-of-concept written by me, which Alexander
Korotkov made into a fully working patch; the committed version is again
revised by me. Benchmarking and testing has, amongst others, been
provided by Dilip Kumar, Alexander Korotkov, Robert Haas.
On a large x86 system improvements for readonly pgbench, with a high
client count, of a factor of 8 have been observed.
Author: Alexander Korotkov and Andres Freund
Discussion: 2400449.GjM57CE0Yg@dinodell
2016-04-11 11:12:32 +08:00
|
|
|
BufferDesc *bufHdr;
|
|
|
|
uint32 buf_state;
|
Align buffer descriptors to cache line boundaries.
Benchmarks has shown that aligning the buffer descriptor array to
cache lines is important for scalability; especially on bigger,
multi-socket, machines.
Currently the array sometimes already happens to be aligned by
happenstance, depending how large previous shared memory allocations
were. That can lead to wildly varying performance results after minor
configuration changes.
In addition to aligning the start of descriptor array, also force the
size of individual descriptors to be of a common cache line size (64
bytes). That happens to already be the case on 64bit platforms, but
this way we can change the struct BufferDesc more easily.
As the alignment primarily matters in highly concurrent workloads
which probably all are 64bit these days, and the space wastage of
element alignment would be a bit more noticeable on 32bit systems, we
don't force the stride to be cacheline sized on 32bit platforms for
now. If somebody does actual performance testing, we can reevaluate
that decision by changing the definition of BUFFERDESC_PADDED_SIZE.
Discussion: 20140202151319.GD32123@awork2.anarazel.de
Per discussion with Bruce Momjan, Tom Lane, Robert Haas, and Peter
Geoghegan.
2015-01-30 00:49:03 +08:00
|
|
|
|
|
|
|
bufHdr = GetBufferDescriptor(i);
|
2005-03-12 23:36:24 +08:00
|
|
|
/* Lock each buffer header before inspecting. */
|
Allow Pin/UnpinBuffer to operate in a lockfree manner.
Pinning/Unpinning a buffer is a very frequent operation; especially in
read-mostly cache resident workloads. Benchmarking shows that in various
scenarios the spinlock protecting a buffer header's state becomes a
significant bottleneck. The problem can be reproduced with pgbench -S on
larger machines, but can be considerably worse for queries which touch
the same buffers over and over at a high frequency (e.g. nested loops
over a small inner table).
To allow atomic operations to be used, cram BufferDesc's flags,
usage_count, buf_hdr_lock, refcount into a single 32bit atomic variable;
that allows to manipulate them together using 32bit compare-and-swap
operations. This requires reducing MAX_BACKENDS to 2^18-1 (which could
be lifted by using a 64bit field, but it's not a realistic configuration
atm).
As not all operations can easily implemented in a lockfree manner,
implement the previous buf_hdr_lock via a flag bit in the atomic
variable. That way we can continue to lock the header in places where
it's needed, but can get away without acquiring it in the more frequent
hot-paths. There's some additional operations which can be done without
the lock, but aren't in this patch; but the most important places are
covered.
As bufmgr.c now essentially re-implements spinlocks, abstract the delay
logic from s_lock.c into something more generic. It now has already two
users, and more are coming up; there's a follupw patch for lwlock.c at
least.
This patch is based on a proof-of-concept written by me, which Alexander
Korotkov made into a fully working patch; the committed version is again
revised by me. Benchmarking and testing has, amongst others, been
provided by Dilip Kumar, Alexander Korotkov, Robert Haas.
On a large x86 system improvements for readonly pgbench, with a high
client count, of a factor of 8 have been observed.
Author: Alexander Korotkov and Andres Freund
Discussion: 2400449.GjM57CE0Yg@dinodell
2016-04-11 11:12:32 +08:00
|
|
|
buf_state = LockBufHdr(bufHdr);
|
2005-03-12 23:36:24 +08:00
|
|
|
|
|
|
|
fctx->record[i].bufferid = BufferDescriptorGetBuffer(bufHdr);
|
2005-10-13 00:45:14 +08:00
|
|
|
fctx->record[i].relfilenode = bufHdr->tag.rnode.relNode;
|
|
|
|
fctx->record[i].reltablespace = bufHdr->tag.rnode.spcNode;
|
|
|
|
fctx->record[i].reldatabase = bufHdr->tag.rnode.dbNode;
|
2008-08-14 20:56:41 +08:00
|
|
|
fctx->record[i].forknum = bufHdr->tag.forkNum;
|
2005-03-12 23:36:24 +08:00
|
|
|
fctx->record[i].blocknum = bufHdr->tag.blockNum;
|
Allow Pin/UnpinBuffer to operate in a lockfree manner.
Pinning/Unpinning a buffer is a very frequent operation; especially in
read-mostly cache resident workloads. Benchmarking shows that in various
scenarios the spinlock protecting a buffer header's state becomes a
significant bottleneck. The problem can be reproduced with pgbench -S on
larger machines, but can be considerably worse for queries which touch
the same buffers over and over at a high frequency (e.g. nested loops
over a small inner table).
To allow atomic operations to be used, cram BufferDesc's flags,
usage_count, buf_hdr_lock, refcount into a single 32bit atomic variable;
that allows to manipulate them together using 32bit compare-and-swap
operations. This requires reducing MAX_BACKENDS to 2^18-1 (which could
be lifted by using a 64bit field, but it's not a realistic configuration
atm).
As not all operations can easily implemented in a lockfree manner,
implement the previous buf_hdr_lock via a flag bit in the atomic
variable. That way we can continue to lock the header in places where
it's needed, but can get away without acquiring it in the more frequent
hot-paths. There's some additional operations which can be done without
the lock, but aren't in this patch; but the most important places are
covered.
As bufmgr.c now essentially re-implements spinlocks, abstract the delay
logic from s_lock.c into something more generic. It now has already two
users, and more are coming up; there's a follupw patch for lwlock.c at
least.
This patch is based on a proof-of-concept written by me, which Alexander
Korotkov made into a fully working patch; the committed version is again
revised by me. Benchmarking and testing has, amongst others, been
provided by Dilip Kumar, Alexander Korotkov, Robert Haas.
On a large x86 system improvements for readonly pgbench, with a high
client count, of a factor of 8 have been observed.
Author: Alexander Korotkov and Andres Freund
Discussion: 2400449.GjM57CE0Yg@dinodell
2016-04-11 11:12:32 +08:00
|
|
|
fctx->record[i].usagecount = BUF_STATE_GET_USAGECOUNT(buf_state);
|
|
|
|
fctx->record[i].pinning_backends = BUF_STATE_GET_REFCOUNT(buf_state);
|
2005-03-12 23:36:24 +08:00
|
|
|
|
Allow Pin/UnpinBuffer to operate in a lockfree manner.
Pinning/Unpinning a buffer is a very frequent operation; especially in
read-mostly cache resident workloads. Benchmarking shows that in various
scenarios the spinlock protecting a buffer header's state becomes a
significant bottleneck. The problem can be reproduced with pgbench -S on
larger machines, but can be considerably worse for queries which touch
the same buffers over and over at a high frequency (e.g. nested loops
over a small inner table).
To allow atomic operations to be used, cram BufferDesc's flags,
usage_count, buf_hdr_lock, refcount into a single 32bit atomic variable;
that allows to manipulate them together using 32bit compare-and-swap
operations. This requires reducing MAX_BACKENDS to 2^18-1 (which could
be lifted by using a 64bit field, but it's not a realistic configuration
atm).
As not all operations can easily implemented in a lockfree manner,
implement the previous buf_hdr_lock via a flag bit in the atomic
variable. That way we can continue to lock the header in places where
it's needed, but can get away without acquiring it in the more frequent
hot-paths. There's some additional operations which can be done without
the lock, but aren't in this patch; but the most important places are
covered.
As bufmgr.c now essentially re-implements spinlocks, abstract the delay
logic from s_lock.c into something more generic. It now has already two
users, and more are coming up; there's a follupw patch for lwlock.c at
least.
This patch is based on a proof-of-concept written by me, which Alexander
Korotkov made into a fully working patch; the committed version is again
revised by me. Benchmarking and testing has, amongst others, been
provided by Dilip Kumar, Alexander Korotkov, Robert Haas.
On a large x86 system improvements for readonly pgbench, with a high
client count, of a factor of 8 have been observed.
Author: Alexander Korotkov and Andres Freund
Discussion: 2400449.GjM57CE0Yg@dinodell
2016-04-11 11:12:32 +08:00
|
|
|
if (buf_state & BM_DIRTY)
|
2005-03-12 23:36:24 +08:00
|
|
|
fctx->record[i].isdirty = true;
|
|
|
|
else
|
|
|
|
fctx->record[i].isdirty = false;
|
|
|
|
|
|
|
|
/* Note if the buffer is valid, and has storage created */
|
Allow Pin/UnpinBuffer to operate in a lockfree manner.
Pinning/Unpinning a buffer is a very frequent operation; especially in
read-mostly cache resident workloads. Benchmarking shows that in various
scenarios the spinlock protecting a buffer header's state becomes a
significant bottleneck. The problem can be reproduced with pgbench -S on
larger machines, but can be considerably worse for queries which touch
the same buffers over and over at a high frequency (e.g. nested loops
over a small inner table).
To allow atomic operations to be used, cram BufferDesc's flags,
usage_count, buf_hdr_lock, refcount into a single 32bit atomic variable;
that allows to manipulate them together using 32bit compare-and-swap
operations. This requires reducing MAX_BACKENDS to 2^18-1 (which could
be lifted by using a 64bit field, but it's not a realistic configuration
atm).
As not all operations can easily implemented in a lockfree manner,
implement the previous buf_hdr_lock via a flag bit in the atomic
variable. That way we can continue to lock the header in places where
it's needed, but can get away without acquiring it in the more frequent
hot-paths. There's some additional operations which can be done without
the lock, but aren't in this patch; but the most important places are
covered.
As bufmgr.c now essentially re-implements spinlocks, abstract the delay
logic from s_lock.c into something more generic. It now has already two
users, and more are coming up; there's a follupw patch for lwlock.c at
least.
This patch is based on a proof-of-concept written by me, which Alexander
Korotkov made into a fully working patch; the committed version is again
revised by me. Benchmarking and testing has, amongst others, been
provided by Dilip Kumar, Alexander Korotkov, Robert Haas.
On a large x86 system improvements for readonly pgbench, with a high
client count, of a factor of 8 have been observed.
Author: Alexander Korotkov and Andres Freund
Discussion: 2400449.GjM57CE0Yg@dinodell
2016-04-11 11:12:32 +08:00
|
|
|
if ((buf_state & BM_VALID) && (buf_state & BM_TAG_VALID))
|
2005-03-12 23:36:24 +08:00
|
|
|
fctx->record[i].isvalid = true;
|
|
|
|
else
|
|
|
|
fctx->record[i].isvalid = false;
|
|
|
|
|
Allow Pin/UnpinBuffer to operate in a lockfree manner.
Pinning/Unpinning a buffer is a very frequent operation; especially in
read-mostly cache resident workloads. Benchmarking shows that in various
scenarios the spinlock protecting a buffer header's state becomes a
significant bottleneck. The problem can be reproduced with pgbench -S on
larger machines, but can be considerably worse for queries which touch
the same buffers over and over at a high frequency (e.g. nested loops
over a small inner table).
To allow atomic operations to be used, cram BufferDesc's flags,
usage_count, buf_hdr_lock, refcount into a single 32bit atomic variable;
that allows to manipulate them together using 32bit compare-and-swap
operations. This requires reducing MAX_BACKENDS to 2^18-1 (which could
be lifted by using a 64bit field, but it's not a realistic configuration
atm).
As not all operations can easily implemented in a lockfree manner,
implement the previous buf_hdr_lock via a flag bit in the atomic
variable. That way we can continue to lock the header in places where
it's needed, but can get away without acquiring it in the more frequent
hot-paths. There's some additional operations which can be done without
the lock, but aren't in this patch; but the most important places are
covered.
As bufmgr.c now essentially re-implements spinlocks, abstract the delay
logic from s_lock.c into something more generic. It now has already two
users, and more are coming up; there's a follupw patch for lwlock.c at
least.
This patch is based on a proof-of-concept written by me, which Alexander
Korotkov made into a fully working patch; the committed version is again
revised by me. Benchmarking and testing has, amongst others, been
provided by Dilip Kumar, Alexander Korotkov, Robert Haas.
On a large x86 system improvements for readonly pgbench, with a high
client count, of a factor of 8 have been observed.
Author: Alexander Korotkov and Andres Freund
Discussion: 2400449.GjM57CE0Yg@dinodell
2016-04-11 11:12:32 +08:00
|
|
|
UnlockBufHdr(bufHdr, buf_state);
|
2005-03-12 23:36:24 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
funcctx = SRF_PERCALL_SETUP();
|
2005-10-15 10:49:52 +08:00
|
|
|
|
2005-03-12 23:36:24 +08:00
|
|
|
/* Get the saved state */
|
|
|
|
fctx = funcctx->user_fctx;
|
|
|
|
|
|
|
|
if (funcctx->call_cntr < funcctx->max_calls)
|
|
|
|
{
|
2005-10-15 10:49:52 +08:00
|
|
|
uint32 i = funcctx->call_cntr;
|
2006-10-23 01:49:21 +08:00
|
|
|
Datum values[NUM_BUFFERCACHE_PAGES_ELEM];
|
|
|
|
bool nulls[NUM_BUFFERCACHE_PAGES_ELEM];
|
2005-10-15 10:49:52 +08:00
|
|
|
|
2006-10-23 01:49:21 +08:00
|
|
|
values[0] = Int32GetDatum(fctx->record[i].bufferid);
|
|
|
|
nulls[0] = false;
|
2005-03-12 23:36:24 +08:00
|
|
|
|
|
|
|
/*
|
2005-10-15 10:49:52 +08:00
|
|
|
* Set all fields except the bufferid to null if the buffer is unused
|
|
|
|
* or not valid.
|
2005-03-12 23:36:24 +08:00
|
|
|
*/
|
|
|
|
if (fctx->record[i].blocknum == InvalidBlockNumber ||
|
2005-10-15 10:49:52 +08:00
|
|
|
fctx->record[i].isvalid == false)
|
2005-03-12 23:36:24 +08:00
|
|
|
{
|
2006-10-23 01:49:21 +08:00
|
|
|
nulls[1] = true;
|
|
|
|
nulls[2] = true;
|
|
|
|
nulls[3] = true;
|
|
|
|
nulls[4] = true;
|
|
|
|
nulls[5] = true;
|
2007-04-08 00:09:14 +08:00
|
|
|
nulls[6] = true;
|
2008-08-14 20:56:41 +08:00
|
|
|
nulls[7] = true;
|
2014-08-22 06:28:37 +08:00
|
|
|
/* unused for v1.0 callers, but the array is always long enough */
|
|
|
|
nulls[8] = true;
|
2005-03-12 23:36:24 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2006-10-23 01:49:21 +08:00
|
|
|
values[1] = ObjectIdGetDatum(fctx->record[i].relfilenode);
|
|
|
|
nulls[1] = false;
|
|
|
|
values[2] = ObjectIdGetDatum(fctx->record[i].reltablespace);
|
|
|
|
nulls[2] = false;
|
|
|
|
values[3] = ObjectIdGetDatum(fctx->record[i].reldatabase);
|
|
|
|
nulls[3] = false;
|
2008-08-14 20:56:41 +08:00
|
|
|
values[4] = ObjectIdGetDatum(fctx->record[i].forknum);
|
2006-10-23 01:49:21 +08:00
|
|
|
nulls[4] = false;
|
2008-08-14 20:56:41 +08:00
|
|
|
values[5] = Int64GetDatum((int64) fctx->record[i].blocknum);
|
2006-10-23 01:49:21 +08:00
|
|
|
nulls[5] = false;
|
2008-08-14 20:56:41 +08:00
|
|
|
values[6] = BoolGetDatum(fctx->record[i].isdirty);
|
2007-04-08 00:09:14 +08:00
|
|
|
nulls[6] = false;
|
2008-08-14 20:56:41 +08:00
|
|
|
values[7] = Int16GetDatum(fctx->record[i].usagecount);
|
|
|
|
nulls[7] = false;
|
2014-08-22 06:28:37 +08:00
|
|
|
/* unused for v1.0 callers, but the array is always long enough */
|
|
|
|
values[8] = Int32GetDatum(fctx->record[i].pinning_backends);
|
|
|
|
nulls[8] = false;
|
2005-03-12 23:36:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Build and return the tuple. */
|
2006-10-23 01:49:21 +08:00
|
|
|
tuple = heap_form_tuple(fctx->tupdesc, values, nulls);
|
2005-03-12 23:36:24 +08:00
|
|
|
result = HeapTupleGetDatum(tuple);
|
|
|
|
|
|
|
|
SRF_RETURN_NEXT(funcctx, result);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
SRF_RETURN_DONE(funcctx);
|
|
|
|
}
|