Merge pull request #99168 from RandomShaper/even_better_spinlock

SpinLock: Overhaul false sharing prevention
This commit is contained in:
Thaddeus Crews 2024-12-09 14:33:31 -06:00
commit da8b9ba1ff
No known key found for this signature in database
GPG Key ID: 62181B86FE9E5D84

View File

@ -31,8 +31,15 @@
#ifndef SPIN_LOCK_H
#define SPIN_LOCK_H
#include "core/os/thread.h"
#include "core/typedefs.h"
#ifdef THREADS_ENABLED
// Note the implementations below avoid false sharing by ensuring their
// sizes match the assumed cache line. We can't use align attributes
// because these objects may end up unaligned in semi-tightly packed arrays.
#ifdef _MSC_VER
#include <intrin.h>
#endif
@ -42,7 +49,10 @@
#include <os/lock.h>
class SpinLock {
mutable os_unfair_lock _lock = OS_UNFAIR_LOCK_INIT;
union {
mutable os_unfair_lock _lock = OS_UNFAIR_LOCK_INIT;
char aligner[Thread::CACHE_LINE_BYTES];
};
public:
_ALWAYS_INLINE_ void lock() const {
@ -54,9 +64,7 @@ public:
}
};
#else
#include "core/os/thread.h"
#else // __APPLE__
#include <atomic>
@ -84,8 +92,11 @@ _ALWAYS_INLINE_ static void _cpu_pause() {
static_assert(std::atomic_bool::is_always_lock_free);
class alignas(Thread::CACHE_LINE_BYTES) SpinLock {
mutable std::atomic<bool> locked = ATOMIC_VAR_INIT(false);
class SpinLock {
union {
mutable std::atomic<bool> locked = ATOMIC_VAR_INIT(false);
char aligner[Thread::CACHE_LINE_BYTES];
};
public:
_ALWAYS_INLINE_ void lock() const {
@ -107,4 +118,14 @@ public:
#endif // __APPLE__
#else // THREADS_ENABLED
class SpinLock {
public:
void lock() const {}
void unlock() const {}
};
#endif // THREADS_ENABLED
#endif // SPIN_LOCK_H