locks.h (compare_and_swap): Use GCC atomic intrinsics.

2012-06-20  David Edelsohn  <dje.gcc@gmail.com>
            Alan Modra  <amodra@gmail.com>

        * sysdep/powerpc/locks.h (compare_and_swap): Use GCC atomic
        intrinsics.
        (release_set): Same.
        (compare_and_swap_release): Same.
        (read_barrier): Same.
        (write_barrier): Same.

Co-Authored-By: Alan Modra <amodra@gmail.com>

From-SVN: r188829
This commit is contained in:
David Edelsohn 2012-06-20 13:02:56 +00:00 committed by David Edelsohn
parent a75f501709
commit 54af9de73f
2 changed files with 36 additions and 50 deletions

View File

@ -1,3 +1,13 @@
2012-06-20 David Edelsohn <dje.gcc@gmail.com>
Alan Modra <amodra@gmail.com>
* sysdep/powerpc/locks.h (compare_and_swap): Use GCC atomic
intrinsics.
(release_set): Same.
(compare_and_swap_release): Same.
(read_barrier): Same.
(write_barrier): Same.
2012-06-15 Andreas Schwab <schwab@linux-m68k.org>
* sysdep/m68k/locks.h (compare_and_swap): Use

View File

@ -11,87 +11,63 @@ details. */
#ifndef __SYSDEP_LOCKS_H__
#define __SYSDEP_LOCKS_H__
#ifdef __LP64__
#define _LARX "ldarx "
#define _STCX "stdcx. "
#else
#define _LARX "lwarx "
#ifdef __PPC405__
#define _STCX "sync; stwcx. "
#else
#define _STCX "stwcx. "
#endif
#endif
typedef size_t obj_addr_t; /* Integer type big enough for object */
/* address. */
// Atomically replace *addr by new_val if it was initially equal to old.
// Return true if the comparison succeeded.
// Assumed to have acquire semantics, i.e. later memory operations
// cannot execute before the compare_and_swap finishes.
inline static bool
compare_and_swap (volatile obj_addr_t *addr, obj_addr_t old,
compare_and_swap (volatile obj_addr_t *addr,
obj_addr_t old,
obj_addr_t new_val)
{
obj_addr_t ret;
__asm__ __volatile__ (
" " _LARX "%0,0,%1 \n"
" xor. %0,%3,%0\n"
" bne $+12\n"
" " _STCX "%2,0,%1\n"
" bne- $-16\n"
: "=&r" (ret)
: "r" (addr), "r" (new_val), "r" (old)
: "cr0", "memory");
/* This version of __compare_and_swap is to be used when acquiring
a lock, so we don't need to worry about whether other memory
operations have completed, but we do need to be sure that any loads
after this point really occur after we have acquired the lock. */
__asm__ __volatile__ ("isync" : : : "memory");
return ret == 0;
return __atomic_compare_exchange_n (addr, &old, new_val, 0,
__ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
}
// Set *addr to new_val with release semantics, i.e. making sure
// that prior loads and stores complete before this
// assignment.
inline static void
release_set (volatile obj_addr_t *addr, obj_addr_t new_val)
{
__asm__ __volatile__ ("sync" : : : "memory");
*addr = new_val;
__atomic_store_n(addr, val, __ATOMIC_RELEASE);
}
// Compare_and_swap with release semantics instead of acquire semantics.
inline static bool
compare_and_swap_release (volatile obj_addr_t *addr, obj_addr_t old,
obj_addr_t new_val)
{
obj_addr_t ret;
__asm__ __volatile__ ("sync" : : : "memory");
__asm__ __volatile__ (
" " _LARX "%0,0,%1 \n"
" xor. %0,%3,%0\n"
" bne $+12\n"
" " _STCX "%2,0,%1\n"
" bne- $-16\n"
: "=&r" (ret)
: "r" (addr), "r" (new_val), "r" (old)
: "cr0", "memory");
return ret == 0;
return __atomic_compare_exchange_n (addr, &old, new_val, 0,
__ATOMIC_RELEASE, __ATOMIC_RELAXED);
}
// Ensure that subsequent instructions do not execute on stale
// data that was loaded from memory before the barrier.
inline static void
read_barrier ()
{
__asm__ __volatile__ ("isync" : : : "memory");
__atomic_thread_fence (__ATOMIC_ACQUIRE);
}
// Ensure that prior stores to memory are completed with respect to other
// processors.
inline static void
write_barrier ()
{
__asm__ __volatile__ ("sync" : : : "memory");
__atomic_thread_fence (__ATOMIC_RELEASE);
}
#endif