When the 64bit atomics simulation is in use, we can't necessarily
guarantee the correct alignment of the atomics due to lack of compiler
support for doing so- that's fine from a safety perspective, because
everything is protected by a lock, but we asserted the alignment in
all cases.  Weaken them.  Per complaint from Alvaro Herrera.

My #ifdefery for PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY wasn't
sufficient. Fix that.  Per complaint from Alexander Korotkov.
This commit is contained in:
Andres Freund 2017-04-07 17:03:21 -07:00
parent 453aaf7688
commit f13a9121f9
2 changed files with 62 additions and 16 deletions

View File

@ -425,30 +425,41 @@ pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
static inline void
pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
{
/*
* Can't necessarily enforce alignment - and don't need it - when using
* the spinlock based fallback implementation. Therefore only assert when
* not using it.
*/
#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
AssertPointerAlignment(ptr, 8);
#endif
pg_atomic_init_u64_impl(ptr, val);
}
static inline uint64
pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
{
#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
AssertPointerAlignment(ptr, 8);
#endif
return pg_atomic_read_u64_impl(ptr);
}
static inline void
pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
{
#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
AssertPointerAlignment(ptr, 8);
#endif
pg_atomic_write_u64_impl(ptr, val);
}
static inline uint64
pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval)
{
#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
AssertPointerAlignment(ptr, 8);
#endif
return pg_atomic_exchange_u64_impl(ptr, newval);
}
@ -456,22 +467,28 @@ static inline bool
pg_atomic_compare_exchange_u64(volatile pg_atomic_uint64 *ptr,
uint64 *expected, uint64 newval)
{
#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
AssertPointerAlignment(ptr, 8);
AssertPointerAlignment(expected, 8);
#endif
return pg_atomic_compare_exchange_u64_impl(ptr, expected, newval);
}
static inline uint64
pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
{
#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
AssertPointerAlignment(ptr, 8);
#endif
return pg_atomic_fetch_add_u64_impl(ptr, add_);
}
static inline uint64
pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
{
#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
AssertPointerAlignment(ptr, 8);
#endif
Assert(sub_ != PG_INT64_MIN);
return pg_atomic_fetch_sub_u64_impl(ptr, sub_);
}
@ -479,28 +496,36 @@ pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
static inline uint64
pg_atomic_fetch_and_u64(volatile pg_atomic_uint64 *ptr, uint64 and_)
{
#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
AssertPointerAlignment(ptr, 8);
#endif
return pg_atomic_fetch_and_u64_impl(ptr, and_);
}
static inline uint64
pg_atomic_fetch_or_u64(volatile pg_atomic_uint64 *ptr, uint64 or_)
{
#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
AssertPointerAlignment(ptr, 8);
#endif
return pg_atomic_fetch_or_u64_impl(ptr, or_);
}
static inline uint64
pg_atomic_add_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
{
#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
AssertPointerAlignment(ptr, 8);
#endif
return pg_atomic_add_fetch_u64_impl(ptr, add_);
}
static inline uint64
pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
{
#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
AssertPointerAlignment(ptr, 8);
#endif
Assert(sub_ != PG_INT64_MIN);
return pg_atomic_sub_fetch_u64_impl(ptr, sub_);
}

View File

@ -271,26 +271,26 @@ pg_atomic_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 xchg_)
}
#endif
#ifndef PG_HAVE_ATOMIC_READ_U64
#define PG_HAVE_ATOMIC_READ_U64
static inline uint64
pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
{
return *(&ptr->value);
}
#endif
#ifndef PG_HAVE_ATOMIC_WRITE_U64
#define PG_HAVE_ATOMIC_WRITE_U64
#if defined(PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY) && \
!defined(PG_HAVE_ATOMIC_U64_SIMULATION)
static inline void
pg_atomic_write_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val)
{
/*
* On this platform aligned 64bit writes are guaranteed to be atomic,
* except if using the fallback implementation, where can't guarantee the
* required alignment.
*/
AssertPointerAlignment(ptr, 8);
ptr->value = val;
}
#endif
#ifndef PG_HAVE_ATOMIC_WRITE_U64
#define PG_HAVE_ATOMIC_WRITE_U64
#else
static inline void
pg_atomic_write_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val)
{
@ -300,10 +300,30 @@ pg_atomic_write_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val)
*/
pg_atomic_exchange_u64_impl(ptr, val);
}
#endif
#endif /* PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY && !PG_HAVE_ATOMIC_U64_SIMULATION */
#endif /* PG_HAVE_ATOMIC_WRITE_U64 */
#ifndef PG_HAVE_ATOMIC_READ_U64
#define PG_HAVE_ATOMIC_READ_U64
#if defined(PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY) && \
!defined(PG_HAVE_ATOMIC_U64_SIMULATION)
static inline uint64
pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
{
/*
* On this platform aligned 64bit reads are guaranteed to be atomic,
* except if using the fallback implementation, where can't guarantee the
* required alignment.
*/
AssertPointerAlignment(ptr, 8);
return *(&ptr->value);
}
#else
static inline uint64
pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
{
@ -319,7 +339,8 @@ pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
return old;
}
#endif
#endif /* PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY && !PG_HAVE_ATOMIC_U64_SIMULATION */
#endif /* PG_HAVE_ATOMIC_READ_U64 */
#ifndef PG_HAVE_ATOMIC_INIT_U64
#define PG_HAVE_ATOMIC_INIT_U64