mirror of
https://git.postgresql.org/git/postgresql.git
synced 2024-12-27 08:39:28 +08:00
Reduce the number of semaphores used under --disable-spinlocks.
Instead of allocating a semaphore from the operating system for every spinlock, allocate a fixed number of semaphores (by default, 1024) from the operating system and multiplex all the spinlocks that get created onto them. This could self-deadlock if a process attempted to acquire more than one spinlock at a time, but since processes aren't supposed to execute anything other than short stretches of straight-line code while holding a spinlock, that shouldn't happen. One motivation for this change is that, with the introduction of dynamic shared memory, it may be desirable to create spinlocks that last for less than the lifetime of the server. Without this change, attempting to use such facilities under --disable-spinlocks would quickly exhaust any supply of available semaphores. Quite apart from that, it's desirable to contain the quantity of semaphores needed to run the server simply on convenience grounds, since using too many may make it harder to get PostgreSQL running on a new platform, which is mostly the point of --disable-spinlocks in the first place. Patch by me; review by Tom Lane.
This commit is contained in:
parent
3739e5ab93
commit
daa7527afc
@ -471,6 +471,9 @@ typedef struct
|
||||
slock_t *ShmemLock;
|
||||
VariableCache ShmemVariableCache;
|
||||
Backend *ShmemBackendArray;
|
||||
#ifndef HAVE_SPINLOCKS
|
||||
PGSemaphore SpinlockSemaArray;
|
||||
#endif
|
||||
LWLock *LWLockArray;
|
||||
slock_t *ProcStructLock;
|
||||
PROC_HDR *ProcGlobal;
|
||||
@ -5626,6 +5629,9 @@ save_backend_variables(BackendParameters *param, Port *port,
|
||||
param->ShmemVariableCache = ShmemVariableCache;
|
||||
param->ShmemBackendArray = ShmemBackendArray;
|
||||
|
||||
#ifndef HAVE_SPINLOCKS
|
||||
param->SpinlockSemaArray = SpinlockSemaArray;
|
||||
#endif
|
||||
param->LWLockArray = LWLockArray;
|
||||
param->ProcStructLock = ProcStructLock;
|
||||
param->ProcGlobal = ProcGlobal;
|
||||
@ -5854,6 +5860,9 @@ restore_backend_variables(BackendParameters *param, Port *port)
|
||||
ShmemVariableCache = param->ShmemVariableCache;
|
||||
ShmemBackendArray = param->ShmemBackendArray;
|
||||
|
||||
#ifndef HAVE_SPINLOCKS
|
||||
SpinlockSemaArray = param->SpinlockSemaArray;
|
||||
#endif
|
||||
LWLockArray = param->LWLockArray;
|
||||
ProcStructLock = param->ProcStructLock;
|
||||
ProcGlobal = param->ProcGlobal;
|
||||
|
@ -105,6 +105,7 @@ CreateSharedMemoryAndSemaphores(bool makePrivate, int port)
|
||||
* need to be so careful during the actual allocation phase.
|
||||
*/
|
||||
size = 100000;
|
||||
size = add_size(size, SpinlockSemaSize());
|
||||
size = add_size(size, hash_estimate_size(SHMEM_INDEX_SIZE,
|
||||
sizeof(ShmemIndexEnt)));
|
||||
size = add_size(size, BufferShmemSize());
|
||||
|
@ -116,9 +116,24 @@ InitShmemAllocation(void)
|
||||
Assert(shmhdr != NULL);
|
||||
|
||||
/*
|
||||
* Initialize the spinlock used by ShmemAlloc. We have to do the space
|
||||
* allocation the hard way, since obviously ShmemAlloc can't be called
|
||||
* yet.
|
||||
* If spinlocks are disabled, initialize emulation layer. We have to do
|
||||
* the space allocation the hard way, since obviously ShmemAlloc can't be
|
||||
* called yet.
|
||||
*/
|
||||
#ifndef HAVE_SPINLOCKS
|
||||
{
|
||||
PGSemaphore spinsemas;
|
||||
|
||||
spinsemas = (PGSemaphore) (((char *) shmhdr) + shmhdr->freeoffset);
|
||||
shmhdr->freeoffset += MAXALIGN(SpinlockSemaSize());
|
||||
SpinlockSemaInit(spinsemas);
|
||||
Assert(shmhdr->freeoffset <= shmhdr->totalsize);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Initialize the spinlock used by ShmemAlloc; we have to do this the hard
|
||||
* way, too, for the same reasons as above.
|
||||
*/
|
||||
ShmemLock = (slock_t *) (((char *) shmhdr) + shmhdr->freeoffset);
|
||||
shmhdr->freeoffset += MAXALIGN(sizeof(slock_t));
|
||||
|
@ -29,6 +29,18 @@
|
||||
#include "storage/spin.h"
|
||||
|
||||
|
||||
PGSemaphore SpinlockSemaArray;
|
||||
|
||||
/*
|
||||
* Report the amount of shared memory needed to store semaphores for spinlock
|
||||
* support.
|
||||
*/
|
||||
Size
|
||||
SpinlockSemaSize(void)
|
||||
{
|
||||
return SpinlockSemas() * sizeof(PGSemaphoreData);
|
||||
}
|
||||
|
||||
#ifdef HAVE_SPINLOCKS
|
||||
|
||||
/*
|
||||
@ -52,22 +64,20 @@ SpinlockSemas(void)
|
||||
int
|
||||
SpinlockSemas(void)
|
||||
{
|
||||
int nsemas;
|
||||
return NUM_SPINLOCK_SEMAPHORES;
|
||||
}
|
||||
|
||||
/*
|
||||
* It would be cleaner to distribute this logic into the affected modules,
|
||||
* similar to the way shmem space estimation is handled.
|
||||
*
|
||||
* For now, though, there are few enough users of spinlocks that we just
|
||||
* keep the knowledge here.
|
||||
*/
|
||||
nsemas = NumLWLocks(); /* one for each lwlock */
|
||||
nsemas += NBuffers; /* one for each buffer header */
|
||||
nsemas += max_wal_senders; /* one for each wal sender process */
|
||||
nsemas += num_xloginsert_slots; /* one for each WAL insertion slot */
|
||||
nsemas += 30; /* plus a bunch for other small-scale use */
|
||||
/*
|
||||
* Initialize semaphores.
|
||||
*/
|
||||
extern void
|
||||
SpinlockSemaInit(PGSemaphore spinsemas)
|
||||
{
|
||||
int i;
|
||||
|
||||
return nsemas;
|
||||
for (i = 0; i < NUM_SPINLOCK_SEMAPHORES; ++i)
|
||||
PGSemaphoreCreate(&spinsemas[i]);
|
||||
SpinlockSemaArray = spinsemas;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -77,13 +87,15 @@ SpinlockSemas(void)
|
||||
void
|
||||
s_init_lock_sema(volatile slock_t *lock)
|
||||
{
|
||||
PGSemaphoreCreate((PGSemaphore) lock);
|
||||
static int counter = 0;
|
||||
|
||||
*lock = (++counter) % NUM_SPINLOCK_SEMAPHORES;
|
||||
}
|
||||
|
||||
void
|
||||
s_unlock_sema(volatile slock_t *lock)
|
||||
{
|
||||
PGSemaphoreUnlock((PGSemaphore) lock);
|
||||
PGSemaphoreUnlock(&SpinlockSemaArray[*lock]);
|
||||
}
|
||||
|
||||
bool
|
||||
@ -98,7 +110,7 @@ int
|
||||
tas_sema(volatile slock_t *lock)
|
||||
{
|
||||
/* Note that TAS macros return 0 if *success* */
|
||||
return !PGSemaphoreTryLock((PGSemaphore) lock);
|
||||
return !PGSemaphoreTryLock(&SpinlockSemaArray[*lock]);
|
||||
}
|
||||
|
||||
#endif /* !HAVE_SPINLOCKS */
|
||||
|
@ -56,6 +56,14 @@
|
||||
*/
|
||||
#define NUM_USER_DEFINED_LWLOCKS 4
|
||||
|
||||
/*
|
||||
* When we don't have native spinlocks, we use semaphores to simulate them.
|
||||
* Decreasing this value reduces consumption of OS resources; increasing it
|
||||
* may improve performance, but supplying a real spinlock implementation is
|
||||
* probably far better.
|
||||
*/
|
||||
#define NUM_SPINLOCK_SEMAPHORES 1024
|
||||
|
||||
/*
|
||||
* Define this if you want to allow the lo_import and lo_export SQL
|
||||
* functions to be executed by ordinary users. By default these
|
||||
|
@ -915,7 +915,7 @@ spin_delay(void)
|
||||
* to fall foul of kernel limits on number of semaphores, so don't use this
|
||||
* unless you must! The subroutines appear in spin.c.
|
||||
*/
|
||||
typedef PGSemaphoreData slock_t;
|
||||
typedef int slock_t;
|
||||
|
||||
extern bool s_lock_free_sema(volatile slock_t *lock);
|
||||
extern void s_unlock_sema(volatile slock_t *lock);
|
||||
|
@ -69,5 +69,11 @@
|
||||
|
||||
|
||||
extern int SpinlockSemas(void);
|
||||
extern Size SpinlockSemaSize(void);
|
||||
|
||||
#ifndef HAVE_SPINLOCKS
|
||||
extern void SpinlockSemaInit(PGSemaphore);
|
||||
extern PGSemaphore SpinlockSemaArray;
|
||||
#endif
|
||||
|
||||
#endif /* SPIN_H */
|
||||
|
Loading…
Reference in New Issue
Block a user