* sysdeps/unix/sysv/linux/sparc/lowlevellock.h (lll_robust_mutex_dead,

lll_robust_mutex_trylock, lll_robust_mutex_lock,
	lll_robust_mutex_cond_lock, lll_robust_mutex_timedlock,
	lll_robust_mutex_unlock): Define.
	(__lll_robust_lock_wait, __lll_robust_timedlock_wait): New prototypes.
This commit is contained in:
Jakub Jelinek 2006-03-01 07:44:57 +00:00
parent 6a8c1091fd
commit a1ea66ea29
2 changed files with 64 additions and 2 deletions

View File

@ -1,3 +1,11 @@
2006-03-01 Jakub Jelinek <jakub@redhat.com>
* sysdeps/unix/sysv/linux/sparc/lowlevellock.h (lll_robust_mutex_dead,
lll_robust_mutex_trylock, lll_robust_mutex_lock,
lll_robust_mutex_cond_lock, lll_robust_mutex_timedlock,
lll_robust_mutex_unlock): Define.
(__lll_robust_lock_wait, __lll_robust_timedlock_wait): New prototypes.
2006-02-28 H.J. Lu <hongjiu.lu@intel.com>
* sysdeps/unix/sysv/linux/ia64/clone2.S: Include <clone2.S>

View File

@ -78,6 +78,15 @@
INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
})
#define lll_robust_mutex_dead(futexv) \
do \
{ \
int *__futexp = &(futexv); \
atomic_or (__futexp, FUTEX_OWNER_DIED); \
lll_futex_wake (__futexp, 1); \
} \
while (0)
/* Returns non-zero if error happened, zero if success. */
#ifdef __sparc32_atomic_do_lock
/* Avoid FUTEX_WAKE_OP if supporting pre-v9 CPUs. */
@ -112,9 +121,18 @@ __lll_mutex_cond_trylock (int *futex)
}
#define lll_mutex_cond_trylock(futex) __lll_mutex_cond_trylock (&(futex))
static inline int
__attribute__ ((always_inline))
__lll_robust_mutex_trylock (int *futex, int id)
{
return atomic_compare_and_exchange_val_acq (futex, id, 0) != 0;
}
#define lll_robust_mutex_trylock(futex, id) \
__lll_robust_mutex_trylock (&(futex), id)
extern void __lll_lock_wait (int *futex) attribute_hidden;
extern int __lll_robust_lock_wait (int *futex) attribute_hidden;
static inline void
__attribute__ ((always_inline))
@ -127,6 +145,17 @@ __lll_mutex_lock (int *futex)
}
#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
static inline int
__attribute__ ((always_inline))
__lll_robust_mutex_lock (int *futex, int id)
{
int result = 0;
if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
result = __lll_robust_lock_wait (futex);
return result;
}
#define lll_robust_mutex_lock(futex, id) \
__lll_robust_mutex_lock (&(futex), id)
static inline void
__attribute__ ((always_inline))
@ -139,10 +168,14 @@ __lll_mutex_cond_lock (int *futex)
}
#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
#define lll_robust_mutex_cond_lock(futex, id) \
__lll_robust_mutex_lock (&(futex), (id) | FUTEX_WAITERS)
extern int __lll_timedlock_wait (int *futex, const struct timespec *)
attribute_hidden;
extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *)
attribute_hidden;
static inline int
__attribute__ ((always_inline))
@ -158,6 +191,19 @@ __lll_mutex_timedlock (int *futex, const struct timespec *abstime)
#define lll_mutex_timedlock(futex, abstime) \
__lll_mutex_timedlock (&(futex), abstime)
static inline int
__attribute__ ((always_inline))
__lll_robust_mutex_timedlock (int *futex, const struct timespec *abstime,
int id)
{
int result = 0;
if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
result = __lll_robust_timedlock_wait (futex, abstime);
return result;
}
#define lll_robust_mutex_timedlock(futex, abstime, id) \
__lll_robust_mutex_timedlock (&(futex), abstime, id)
#define lll_mutex_unlock(lock) \
((void) ({ \
int *__futex = &(lock); \
@ -166,6 +212,14 @@ __lll_mutex_timedlock (int *futex, const struct timespec *abstime)
lll_futex_wake (__futex, 1); \
}))
#define lll_robust_mutex_unlock(lock) \
((void) ({ \
int *__futex = &(lock); \
int __val = atomic_exchange_rel (__futex, 0); \
if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
lll_futex_wake (__futex, 1); \
}))
#define lll_mutex_unlock_force(lock) \
((void) ({ \
int *__futex = &(lock); \