mirror of
git://sourceware.org/git/glibc.git
synced 2025-03-01 13:17:19 +08:00
* sysdeps/unix/sysv/linux/kernel-features.h: Add
__ASSUME_SET_ROBUST_LIST.
This commit is contained in:
parent
5b20043897
commit
0f6699ea05
@ -1,3 +1,8 @@
|
|||||||
|
2006-03-27 Ulrich Drepper <drepper@redhat.com>
|
||||||
|
|
||||||
|
* sysdeps/unix/sysv/linux/kernel-features.h: Add
|
||||||
|
__ASSUME_SET_ROBUST_LIST.
|
||||||
|
|
||||||
2006-03-27 Jakub Jelinek <jakub@redhat.com>
|
2006-03-27 Jakub Jelinek <jakub@redhat.com>
|
||||||
|
|
||||||
* wcsmbs/wchar.h (btowc, wctob): Don't optimize in C++.
|
* wcsmbs/wchar.h (btowc, wctob): Don't optimize in C++.
|
||||||
|
@ -206,7 +206,7 @@ tests = tst-typesizes \
|
|||||||
tst-cond14 tst-cond15 tst-cond16 tst-cond17 tst-cond18 tst-cond19 \
|
tst-cond14 tst-cond15 tst-cond16 tst-cond17 tst-cond18 tst-cond19 \
|
||||||
tst-cond20 tst-cond21 \
|
tst-cond20 tst-cond21 \
|
||||||
tst-robust1 tst-robust2 tst-robust3 tst-robust4 tst-robust5 \
|
tst-robust1 tst-robust2 tst-robust3 tst-robust4 tst-robust5 \
|
||||||
tst-robust6 tst-robust7 \
|
tst-robust6 tst-robust7 tst-robust8 \
|
||||||
tst-rwlock1 tst-rwlock2 tst-rwlock3 tst-rwlock4 tst-rwlock5 \
|
tst-rwlock1 tst-rwlock2 tst-rwlock3 tst-rwlock4 tst-rwlock5 \
|
||||||
tst-rwlock6 tst-rwlock7 tst-rwlock8 tst-rwlock9 tst-rwlock10 \
|
tst-rwlock6 tst-rwlock7 tst-rwlock8 tst-rwlock9 tst-rwlock10 \
|
||||||
tst-rwlock11 tst-rwlock12 tst-rwlock13 tst-rwlock14 \
|
tst-rwlock11 tst-rwlock12 tst-rwlock13 tst-rwlock14 \
|
||||||
|
@ -365,12 +365,6 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
|
|||||||
/* The process ID is also the same as that of the caller. */
|
/* The process ID is also the same as that of the caller. */
|
||||||
pd->pid = THREAD_GETMEM (THREAD_SELF, pid);
|
pd->pid = THREAD_GETMEM (THREAD_SELF, pid);
|
||||||
|
|
||||||
/* List of robust mutexes. */
|
|
||||||
#ifdef __PTHREAD_MUTEX_HAVE_PREV
|
|
||||||
pd->robust_list.__prev = &pd->robust_list;
|
|
||||||
#endif
|
|
||||||
pd->robust_list.__next = &pd->robust_list;
|
|
||||||
|
|
||||||
/* Allocate the DTV for this thread. */
|
/* Allocate the DTV for this thread. */
|
||||||
if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
|
if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
|
||||||
{
|
{
|
||||||
@ -505,12 +499,6 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
|
|||||||
/* The process ID is also the same as that of the caller. */
|
/* The process ID is also the same as that of the caller. */
|
||||||
pd->pid = THREAD_GETMEM (THREAD_SELF, pid);
|
pd->pid = THREAD_GETMEM (THREAD_SELF, pid);
|
||||||
|
|
||||||
/* List of robust mutexes. */
|
|
||||||
#ifdef __PTHREAD_MUTEX_HAVE_PREV
|
|
||||||
pd->robust_list.__prev = &pd->robust_list;
|
|
||||||
#endif
|
|
||||||
pd->robust_list.__next = &pd->robust_list;
|
|
||||||
|
|
||||||
/* Allocate the DTV for this thread. */
|
/* Allocate the DTV for this thread. */
|
||||||
if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
|
if (_dl_allocate_tls (TLS_TPADJ (pd)) == NULL)
|
||||||
{
|
{
|
||||||
@ -634,6 +622,18 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
|
|||||||
stillborn thread could be canceled while the lock is taken. */
|
stillborn thread could be canceled while the lock is taken. */
|
||||||
pd->lock = LLL_LOCK_INITIALIZER;
|
pd->lock = LLL_LOCK_INITIALIZER;
|
||||||
|
|
||||||
|
/* The robust mutex lists also need to be initialized
|
||||||
|
unconditionally because the cleanup for the previous stack owner
|
||||||
|
might have happened in the kernel. */
|
||||||
|
pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
|
||||||
|
- offsetof (pthread_mutex_t,
|
||||||
|
__data.__list.__next));
|
||||||
|
pd->robust_head.list_op_pending = NULL;
|
||||||
|
#ifdef __PTHREAD_MUTEX_HAVE_PREV
|
||||||
|
pd->robust_prev = &pd->robust_head;
|
||||||
|
#endif
|
||||||
|
pd->robust_head.list = &pd->robust_head;
|
||||||
|
|
||||||
/* We place the thread descriptor at the end of the stack. */
|
/* We place the thread descriptor at the end of the stack. */
|
||||||
*pdp = pd;
|
*pdp = pd;
|
||||||
|
|
||||||
|
45
nptl/descr.h
45
nptl/descr.h
@ -102,6 +102,15 @@ struct xid_command
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/* Data structure used by the kernel to find robust futexes. */
|
||||||
|
struct robust_list_head
|
||||||
|
{
|
||||||
|
void *list;
|
||||||
|
long int futex_offset;
|
||||||
|
void *list_op_pending;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
/* Thread descriptor data structure. */
|
/* Thread descriptor data structure. */
|
||||||
struct pthread
|
struct pthread
|
||||||
{
|
{
|
||||||
@ -136,25 +145,43 @@ struct pthread
|
|||||||
|
|
||||||
/* List of robust mutexes the thread is holding. */
|
/* List of robust mutexes the thread is holding. */
|
||||||
#ifdef __PTHREAD_MUTEX_HAVE_PREV
|
#ifdef __PTHREAD_MUTEX_HAVE_PREV
|
||||||
__pthread_list_t robust_list;
|
void *robust_prev;
|
||||||
|
struct robust_list_head robust_head;
|
||||||
|
|
||||||
|
/* The list above is strange. It is basically a double linked list
|
||||||
|
but the pointer to the next/previous element of the list points
|
||||||
|
in the middle of the object, the __next element. Whenever
|
||||||
|
casting to __pthread_list_t we need to adjust the pointer
|
||||||
|
first. */
|
||||||
|
# define QUEUE_PTR_ADJUST (offsetof (__pthread_list_t, __next))
|
||||||
|
|
||||||
# define ENQUEUE_MUTEX(mutex) \
|
# define ENQUEUE_MUTEX(mutex) \
|
||||||
do { \
|
do { \
|
||||||
__pthread_list_t *next = THREAD_GETMEM (THREAD_SELF, robust_list.__next); \
|
__pthread_list_t *next = (THREAD_GETMEM (THREAD_SELF, robust_head.list) \
|
||||||
next->__prev = &mutex->__data.__list; \
|
- QUEUE_PTR_ADJUST); \
|
||||||
mutex->__data.__list.__next = next; \
|
next->__prev = (void *) &mutex->__data.__list.__next; \
|
||||||
mutex->__data.__list.__prev = &THREAD_SELF->robust_list; \
|
mutex->__data.__list.__next = (void *) &next->__next; \
|
||||||
THREAD_SETMEM (THREAD_SELF, robust_list.__next, &mutex->__data.__list); \
|
mutex->__data.__list.__prev = (void *) &THREAD_SELF->robust_head; \
|
||||||
|
THREAD_SETMEM (THREAD_SELF, robust_head.list, \
|
||||||
|
&mutex->__data.__list.__next); \
|
||||||
} while (0)
|
} while (0)
|
||||||
# define DEQUEUE_MUTEX(mutex) \
|
# define DEQUEUE_MUTEX(mutex) \
|
||||||
do { \
|
do { \
|
||||||
mutex->__data.__list.__next->__prev = mutex->__data.__list.__prev; \
|
__pthread_list_t *next = (__pthread_list_t *) \
|
||||||
mutex->__data.__list.__prev->__next = mutex->__data.__list.__next; \
|
((char *) mutex->__data.__list.__next - QUEUE_PTR_ADJUST); \
|
||||||
|
next->__prev = mutex->__data.__list.__prev; \
|
||||||
|
__pthread_list_t *prev = (__pthread_list_t *) \
|
||||||
|
((char *) mutex->__data.__list.__prev - QUEUE_PTR_ADJUST); \
|
||||||
|
prev->__next = mutex->__data.__list.__next; \
|
||||||
mutex->__data.__list.__prev = NULL; \
|
mutex->__data.__list.__prev = NULL; \
|
||||||
mutex->__data.__list.__next = NULL; \
|
mutex->__data.__list.__next = NULL; \
|
||||||
} while (0)
|
} while (0)
|
||||||
#else
|
#else
|
||||||
__pthread_slist_t robust_list;
|
union
|
||||||
|
{
|
||||||
|
__pthread_slist_t robust_list;
|
||||||
|
struct robust_list_head robust_head;
|
||||||
|
};
|
||||||
|
|
||||||
# define ENQUEUE_MUTEX(mutex) \
|
# define ENQUEUE_MUTEX(mutex) \
|
||||||
do { \
|
do { \
|
||||||
|
28
nptl/init.c
28
nptl/init.c
@ -60,6 +60,15 @@
|
|||||||
size_t __static_tls_size;
|
size_t __static_tls_size;
|
||||||
size_t __static_tls_align_m1;
|
size_t __static_tls_align_m1;
|
||||||
|
|
||||||
|
#ifndef __ASSUME_SET_ROBUST_LIST
|
||||||
|
/* Negative if we do not have the system call and we can use it. */
|
||||||
|
int __set_robust_list_avail;
|
||||||
|
# define set_robust_list_not_avail() \
|
||||||
|
__set_robust_list_avail = -1
|
||||||
|
#else
|
||||||
|
# define set_robust_list_not_avail() do { } while (0)
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Version of the library, used in libthread_db to detect mismatches. */
|
/* Version of the library, used in libthread_db to detect mismatches. */
|
||||||
static const char nptl_version[] __attribute_used__ = VERSION;
|
static const char nptl_version[] __attribute_used__ = VERSION;
|
||||||
|
|
||||||
@ -247,10 +256,6 @@ __pthread_initialize_minimal_internal (void)
|
|||||||
struct pthread *pd = THREAD_SELF;
|
struct pthread *pd = THREAD_SELF;
|
||||||
INTERNAL_SYSCALL_DECL (err);
|
INTERNAL_SYSCALL_DECL (err);
|
||||||
pd->pid = pd->tid = INTERNAL_SYSCALL (set_tid_address, err, 1, &pd->tid);
|
pd->pid = pd->tid = INTERNAL_SYSCALL (set_tid_address, err, 1, &pd->tid);
|
||||||
#ifdef __PTHREAD_MUTEX_HAVE_PREV
|
|
||||||
pd->robust_list.__prev = &pd->robust_list;
|
|
||||||
#endif
|
|
||||||
pd->robust_list.__next = &pd->robust_list;
|
|
||||||
THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
|
THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
|
||||||
THREAD_SETMEM (pd, user_stack, true);
|
THREAD_SETMEM (pd, user_stack, true);
|
||||||
if (LLL_LOCK_INITIALIZER != 0)
|
if (LLL_LOCK_INITIALIZER != 0)
|
||||||
@ -259,6 +264,21 @@ __pthread_initialize_minimal_internal (void)
|
|||||||
THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
|
THREAD_SETMEM (pd, cpuclock_offset, GL(dl_cpuclock_offset));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* Initialize the robust mutex data. */
|
||||||
|
#ifdef __PTHREAD_MUTEX_HAVE_PREV
|
||||||
|
pd->robust_prev = &pd->robust_head;
|
||||||
|
#endif
|
||||||
|
pd->robust_head.list = &pd->robust_head;
|
||||||
|
#ifdef __NR_set_robust_list
|
||||||
|
pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
|
||||||
|
- offsetof (pthread_mutex_t,
|
||||||
|
__data.__list.__next));
|
||||||
|
int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
|
||||||
|
sizeof (struct robust_list_head));
|
||||||
|
if (INTERNAL_SYSCALL_ERROR_P (res, err))
|
||||||
|
#endif
|
||||||
|
set_robust_list_not_avail ();
|
||||||
|
|
||||||
/* Set initial thread's stack block from 0 up to __libc_stack_end.
|
/* Set initial thread's stack block from 0 up to __libc_stack_end.
|
||||||
It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
|
It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
|
||||||
purposes this is good enough. */
|
purposes this is good enough. */
|
||||||
|
@ -31,6 +31,7 @@
|
|||||||
#include <internaltypes.h>
|
#include <internaltypes.h>
|
||||||
#include <pthread-functions.h>
|
#include <pthread-functions.h>
|
||||||
#include <atomic.h>
|
#include <atomic.h>
|
||||||
|
#include <kernel-features.h>
|
||||||
|
|
||||||
|
|
||||||
/* Atomic operations on TLS memory. */
|
/* Atomic operations on TLS memory. */
|
||||||
@ -60,13 +61,13 @@
|
|||||||
/* Internal mutex type value. */
|
/* Internal mutex type value. */
|
||||||
enum
|
enum
|
||||||
{
|
{
|
||||||
PTHREAD_MUTEX_ROBUST_PRIVATE_NP = 16,
|
PTHREAD_MUTEX_ROBUST_NORMAL_NP = 16,
|
||||||
PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP
|
PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
|
||||||
= PTHREAD_MUTEX_ROBUST_PRIVATE_NP | PTHREAD_MUTEX_RECURSIVE_NP,
|
= PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_RECURSIVE_NP,
|
||||||
PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP
|
PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
|
||||||
= PTHREAD_MUTEX_ROBUST_PRIVATE_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
|
= PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_ERRORCHECK_NP,
|
||||||
PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP
|
PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
|
||||||
= PTHREAD_MUTEX_ROBUST_PRIVATE_NP | PTHREAD_MUTEX_ADAPTIVE_NP,
|
= PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_ADAPTIVE_NP,
|
||||||
PTHREAD_MUTEX_PRIO_INHERIT_PRIVATE_NP = 32,
|
PTHREAD_MUTEX_PRIO_INHERIT_PRIVATE_NP = 32,
|
||||||
PTHREAD_MUTEX_PRIO_PROTECT_PRIVATE_NP = 64
|
PTHREAD_MUTEX_PRIO_PROTECT_PRIVATE_NP = 64
|
||||||
};
|
};
|
||||||
@ -128,6 +129,11 @@ hidden_proto (__pthread_keys)
|
|||||||
/* Number of threads running. */
|
/* Number of threads running. */
|
||||||
extern unsigned int __nptl_nthreads attribute_hidden;
|
extern unsigned int __nptl_nthreads attribute_hidden;
|
||||||
|
|
||||||
|
#ifndef __ASSUME_SET_ROBUST_LIST
|
||||||
|
/* Negative if we do not have the system call and we can use it. */
|
||||||
|
extern int __set_robust_list_avail attribute_hidden;
|
||||||
|
#endif
|
||||||
|
|
||||||
/* The library can run in debugging mode where it performs a lot more
|
/* The library can run in debugging mode where it performs a lot more
|
||||||
tests. */
|
tests. */
|
||||||
extern int __pthread_debug attribute_hidden;
|
extern int __pthread_debug attribute_hidden;
|
||||||
@ -504,4 +510,15 @@ extern int __nptl_setxid (struct xid_command *cmdp) attribute_hidden;
|
|||||||
# define PTHREAD_STATIC_FN_REQUIRE(name) __asm (".globl " #name);
|
# define PTHREAD_STATIC_FN_REQUIRE(name) __asm (".globl " #name);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
#ifndef __NR_set_robust_list
|
||||||
|
/* XXX For the time being... Once we can rely on the kernel headers
|
||||||
|
having the definition remove these lines. */
|
||||||
|
# if defined __i386__
|
||||||
|
# define __NR_set_robust_list 311
|
||||||
|
# elif defined __x86_64__
|
||||||
|
# define __NR_set_robust_list 273
|
||||||
|
# endif
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* pthreadP.h */
|
#endif /* pthreadP.h */
|
||||||
|
@ -229,6 +229,19 @@ start_thread (void *arg)
|
|||||||
/* Initialize resolver state pointer. */
|
/* Initialize resolver state pointer. */
|
||||||
__resp = &pd->res;
|
__resp = &pd->res;
|
||||||
|
|
||||||
|
#ifdef __NR_set_robust_list
|
||||||
|
# ifndef __ASSUME_SET_ROBUST_LIST
|
||||||
|
if (__set_robust_list_avail >= 0)
|
||||||
|
# endif
|
||||||
|
{
|
||||||
|
INTERNAL_SYSCALL_DECL (err);
|
||||||
|
/* This call should never fail because the initial call in init.c
|
||||||
|
succeeded. */
|
||||||
|
INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
|
||||||
|
sizeof (struct robust_list_head));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/* This is where the try/finally block should be created. For
|
/* This is where the try/finally block should be created. For
|
||||||
compilers without that support we do use setjmp. */
|
compilers without that support we do use setjmp. */
|
||||||
struct pthread_unwind_buf unwind_buf;
|
struct pthread_unwind_buf unwind_buf;
|
||||||
@ -310,35 +323,34 @@ start_thread (void *arg)
|
|||||||
the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
|
the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
|
||||||
atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
|
atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
|
||||||
|
|
||||||
|
#ifndef __ASSUME_SET_ROBUST_LIST
|
||||||
/* If this thread has any robust mutexes locked, handle them now. */
|
/* If this thread has any robust mutexes locked, handle them now. */
|
||||||
#if __WORDSIZE == 64
|
# if __WORDSIZE == 64
|
||||||
__pthread_list_t *robust = pd->robust_list.__next;
|
void *robust = pd->robust_head.list;
|
||||||
#else
|
# else
|
||||||
__pthread_slist_t *robust = pd->robust_list.__next;
|
__pthread_slist_t *robust = pd->robust_list.__next;
|
||||||
#endif
|
# endif
|
||||||
if (__builtin_expect (robust != &pd->robust_list, 0))
|
/* We let the kernel do the notification if it is able to do so. */
|
||||||
|
if (__set_robust_list_avail < 0
|
||||||
|
&& __builtin_expect (robust != &pd->robust_head, 0))
|
||||||
{
|
{
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
|
struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
|
||||||
((char *) robust - offsetof (struct __pthread_mutex_s, __list));
|
((char *) robust - offsetof (struct __pthread_mutex_s,
|
||||||
robust = robust->__next;
|
__list.__next));
|
||||||
|
robust = *((void **) robust);
|
||||||
|
|
||||||
this->__list.__next = NULL;
|
# ifdef __PTHREAD_MUTEX_HAVE_PREV
|
||||||
#ifdef __PTHREAD_MUTEX_HAVE_PREV
|
|
||||||
this->__list.__prev = NULL;
|
this->__list.__prev = NULL;
|
||||||
#endif
|
# endif
|
||||||
|
this->__list.__next = NULL;
|
||||||
|
|
||||||
lll_robust_mutex_dead (this->__lock);
|
lll_robust_mutex_dead (this->__lock);
|
||||||
}
|
}
|
||||||
while (robust != &pd->robust_list);
|
while (robust != &pd->robust_head);
|
||||||
|
|
||||||
/* Clean up so that the thread descriptor can be reused. */
|
|
||||||
pd->robust_list.__next = &pd->robust_list;
|
|
||||||
#ifdef __PTHREAD_MUTEX_HAVE_PREV
|
|
||||||
pd->robust_list.__prev = &pd->robust_list;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/* If the thread is detached free the TCB. */
|
/* If the thread is detached free the TCB. */
|
||||||
if (IS_DETACHED (pd))
|
if (IS_DETACHED (pd))
|
||||||
|
@ -26,7 +26,7 @@ pthread_mutex_consistent_np (mutex)
|
|||||||
pthread_mutex_t *mutex;
|
pthread_mutex_t *mutex;
|
||||||
{
|
{
|
||||||
/* Test whether this is a robust mutex with a dead owner. */
|
/* Test whether this is a robust mutex with a dead owner. */
|
||||||
if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_PRIVATE_NP) == 0
|
if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0
|
||||||
|| mutex->__data.__owner != PTHREAD_MUTEX_INCONSISTENT)
|
|| mutex->__data.__owner != PTHREAD_MUTEX_INCONSISTENT)
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
|
|
||||||
|
@ -25,15 +25,9 @@ int
|
|||||||
__pthread_mutex_destroy (mutex)
|
__pthread_mutex_destroy (mutex)
|
||||||
pthread_mutex_t *mutex;
|
pthread_mutex_t *mutex;
|
||||||
{
|
{
|
||||||
if (mutex->__data.__nusers != 0)
|
if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0
|
||||||
{
|
&& mutex->__data.__nusers != 0)
|
||||||
if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_PRIVATE_NP) != 0
|
return EBUSY;
|
||||||
&& (mutex->__data.__lock & FUTEX_OWNER_DIED) != 0
|
|
||||||
&& mutex->__data.__nusers == 1)
|
|
||||||
goto dead_robust_mutex;
|
|
||||||
|
|
||||||
return EBUSY;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Set to an invalid value. */
|
/* Set to an invalid value. */
|
||||||
dead_robust_mutex:
|
dead_robust_mutex:
|
||||||
|
@ -22,7 +22,6 @@
|
|||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include "pthreadP.h"
|
#include "pthreadP.h"
|
||||||
|
|
||||||
|
|
||||||
static const struct pthread_mutexattr default_attr =
|
static const struct pthread_mutexattr default_attr =
|
||||||
{
|
{
|
||||||
/* Default is a normal mutex, not shared between processes. */
|
/* Default is a normal mutex, not shared between processes. */
|
||||||
@ -42,10 +41,6 @@ __pthread_mutex_init (mutex, mutexattr)
|
|||||||
imutexattr = (const struct pthread_mutexattr *) mutexattr ?: &default_attr;
|
imutexattr = (const struct pthread_mutexattr *) mutexattr ?: &default_attr;
|
||||||
|
|
||||||
/* Sanity checks. */
|
/* Sanity checks. */
|
||||||
// XXX For now we cannot implement robust mutexes if they are shared.
|
|
||||||
if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) != 0
|
|
||||||
&& (imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_PSHARED) != 0)
|
|
||||||
return ENOTSUP;
|
|
||||||
// XXX For now we don't support priority inherited or priority protected
|
// XXX For now we don't support priority inherited or priority protected
|
||||||
// XXX mutexes.
|
// XXX mutexes.
|
||||||
if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_PROTOCOL_MASK)
|
if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_PROTOCOL_MASK)
|
||||||
@ -57,8 +52,18 @@ __pthread_mutex_init (mutex, mutexattr)
|
|||||||
|
|
||||||
/* Copy the values from the attribute. */
|
/* Copy the values from the attribute. */
|
||||||
mutex->__data.__kind = imutexattr->mutexkind & ~PTHREAD_MUTEXATTR_FLAG_BITS;
|
mutex->__data.__kind = imutexattr->mutexkind & ~PTHREAD_MUTEXATTR_FLAG_BITS;
|
||||||
|
|
||||||
if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) != 0)
|
if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) != 0)
|
||||||
mutex->__data.__kind |= PTHREAD_MUTEX_ROBUST_PRIVATE_NP;
|
{
|
||||||
|
#ifndef __ASSUME_SET_ROBUST_LIST
|
||||||
|
if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_PSHARED) != 0
|
||||||
|
&& __set_robust_list_avail < 0)
|
||||||
|
return ENOTSUP;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
mutex->__data.__kind |= PTHREAD_MUTEX_ROBUST_NORMAL_NP;
|
||||||
|
}
|
||||||
|
|
||||||
switch ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_PROTOCOL_MASK)
|
switch ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_PROTOCOL_MASK)
|
||||||
>> PTHREAD_MUTEXATTR_PROTOCOL_SHIFT)
|
>> PTHREAD_MUTEXATTR_PROTOCOL_SHIFT)
|
||||||
{
|
{
|
||||||
|
@ -108,25 +108,33 @@ __pthread_mutex_lock (mutex)
|
|||||||
assert (mutex->__data.__owner == 0);
|
assert (mutex->__data.__owner == 0);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP:
|
case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
|
||||||
case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
|
case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
|
||||||
case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
|
case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
|
||||||
case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
|
case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
|
||||||
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
||||||
|
&mutex->__data.__list.__next);
|
||||||
|
|
||||||
oldval = mutex->__data.__lock;
|
oldval = mutex->__data.__lock;
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
|
again:
|
||||||
if ((oldval & FUTEX_OWNER_DIED) != 0)
|
if ((oldval & FUTEX_OWNER_DIED) != 0)
|
||||||
{
|
{
|
||||||
/* The previous owner died. Try locking the mutex. */
|
/* The previous owner died. Try locking the mutex. */
|
||||||
int newval;
|
int newval = id;
|
||||||
while ((newval
|
#ifdef NO_INCR
|
||||||
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
newval |= FUTEX_WAITERS;
|
||||||
id, oldval))
|
#endif
|
||||||
!= oldval)
|
|
||||||
|
newval
|
||||||
|
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
||||||
|
newval, oldval);
|
||||||
|
|
||||||
|
if (newval != oldval)
|
||||||
{
|
{
|
||||||
if ((newval & FUTEX_OWNER_DIED) == 0)
|
|
||||||
goto normal;
|
|
||||||
oldval = newval;
|
oldval = newval;
|
||||||
|
goto again;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We got the mutex. */
|
/* We got the mutex. */
|
||||||
@ -135,6 +143,7 @@ __pthread_mutex_lock (mutex)
|
|||||||
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
|
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
|
||||||
|
|
||||||
ENQUEUE_MUTEX (mutex);
|
ENQUEUE_MUTEX (mutex);
|
||||||
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||||
|
|
||||||
/* Note that we deliberately exit here. If we fall
|
/* Note that we deliberately exit here. If we fall
|
||||||
through to the end of the function __nusers would be
|
through to the end of the function __nusers would be
|
||||||
@ -149,18 +158,23 @@ __pthread_mutex_lock (mutex)
|
|||||||
return EOWNERDEAD;
|
return EOWNERDEAD;
|
||||||
}
|
}
|
||||||
|
|
||||||
normal:
|
|
||||||
/* Check whether we already hold the mutex. */
|
/* Check whether we already hold the mutex. */
|
||||||
if (__builtin_expect ((mutex->__data.__lock & FUTEX_TID_MASK)
|
if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
|
||||||
== id, 0))
|
|
||||||
{
|
{
|
||||||
if (mutex->__data.__kind
|
if (mutex->__data.__kind
|
||||||
== PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP)
|
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
|
||||||
return EDEADLK;
|
{
|
||||||
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
||||||
|
NULL);
|
||||||
|
return EDEADLK;
|
||||||
|
}
|
||||||
|
|
||||||
if (mutex->__data.__kind
|
if (mutex->__data.__kind
|
||||||
== PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP)
|
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
|
||||||
{
|
{
|
||||||
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
||||||
|
NULL);
|
||||||
|
|
||||||
/* Just bump the counter. */
|
/* Just bump the counter. */
|
||||||
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
|
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
|
||||||
/* Overflow of the counter. */
|
/* Overflow of the counter. */
|
||||||
@ -180,6 +194,7 @@ __pthread_mutex_lock (mutex)
|
|||||||
/* This mutex is now not recoverable. */
|
/* This mutex is now not recoverable. */
|
||||||
mutex->__data.__count = 0;
|
mutex->__data.__count = 0;
|
||||||
lll_mutex_unlock (mutex->__data.__lock);
|
lll_mutex_unlock (mutex->__data.__lock);
|
||||||
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||||
return ENOTRECOVERABLE;
|
return ENOTRECOVERABLE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -187,6 +202,7 @@ __pthread_mutex_lock (mutex)
|
|||||||
|
|
||||||
mutex->__data.__count = 1;
|
mutex->__data.__count = 1;
|
||||||
ENQUEUE_MUTEX (mutex);
|
ENQUEUE_MUTEX (mutex);
|
||||||
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
@ -103,25 +103,27 @@ pthread_mutex_timedlock (mutex, abstime)
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP:
|
case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
|
||||||
case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
|
case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
|
||||||
case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
|
case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
|
||||||
case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
|
case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
|
||||||
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
||||||
|
&mutex->__data.__list.__next);
|
||||||
|
|
||||||
oldval = mutex->__data.__lock;
|
oldval = mutex->__data.__lock;
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
|
again:
|
||||||
if ((oldval & FUTEX_OWNER_DIED) != 0)
|
if ((oldval & FUTEX_OWNER_DIED) != 0)
|
||||||
{
|
{
|
||||||
/* The previous owner died. Try locking the mutex. */
|
/* The previous owner died. Try locking the mutex. */
|
||||||
int newval;
|
int newval
|
||||||
while ((newval
|
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
||||||
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
id, oldval);
|
||||||
id, oldval))
|
if (newval != oldval)
|
||||||
!= oldval)
|
|
||||||
{
|
{
|
||||||
if ((newval & FUTEX_OWNER_DIED) == 0)
|
|
||||||
goto normal;
|
|
||||||
oldval = newval;
|
oldval = newval;
|
||||||
|
goto again;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We got the mutex. */
|
/* We got the mutex. */
|
||||||
@ -130,6 +132,7 @@ pthread_mutex_timedlock (mutex, abstime)
|
|||||||
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
|
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
|
||||||
|
|
||||||
ENQUEUE_MUTEX (mutex);
|
ENQUEUE_MUTEX (mutex);
|
||||||
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||||
|
|
||||||
/* Note that we deliberately exist here. If we fall
|
/* Note that we deliberately exist here. If we fall
|
||||||
through to the end of the function __nusers would be
|
through to the end of the function __nusers would be
|
||||||
@ -138,18 +141,23 @@ pthread_mutex_timedlock (mutex, abstime)
|
|||||||
return EOWNERDEAD;
|
return EOWNERDEAD;
|
||||||
}
|
}
|
||||||
|
|
||||||
normal:
|
|
||||||
/* Check whether we already hold the mutex. */
|
/* Check whether we already hold the mutex. */
|
||||||
if (__builtin_expect ((mutex->__data.__lock & FUTEX_TID_MASK)
|
if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
|
||||||
== id, 0))
|
|
||||||
{
|
{
|
||||||
if (mutex->__data.__kind
|
if (mutex->__data.__kind
|
||||||
== PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP)
|
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
|
||||||
return EDEADLK;
|
{
|
||||||
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
||||||
|
NULL);
|
||||||
|
return EDEADLK;
|
||||||
|
}
|
||||||
|
|
||||||
if (mutex->__data.__kind
|
if (mutex->__data.__kind
|
||||||
== PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP)
|
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
|
||||||
{
|
{
|
||||||
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
||||||
|
NULL);
|
||||||
|
|
||||||
/* Just bump the counter. */
|
/* Just bump the counter. */
|
||||||
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
|
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
|
||||||
/* Overflow of the counter. */
|
/* Overflow of the counter. */
|
||||||
@ -170,6 +178,7 @@ pthread_mutex_timedlock (mutex, abstime)
|
|||||||
/* This mutex is now not recoverable. */
|
/* This mutex is now not recoverable. */
|
||||||
mutex->__data.__count = 0;
|
mutex->__data.__count = 0;
|
||||||
lll_mutex_unlock (mutex->__data.__lock);
|
lll_mutex_unlock (mutex->__data.__lock);
|
||||||
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||||
return ENOTRECOVERABLE;
|
return ENOTRECOVERABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -182,6 +191,7 @@ pthread_mutex_timedlock (mutex, abstime)
|
|||||||
|
|
||||||
mutex->__data.__count = 1;
|
mutex->__data.__count = 1;
|
||||||
ENQUEUE_MUTEX (mutex);
|
ENQUEUE_MUTEX (mutex);
|
||||||
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
@ -77,25 +77,28 @@ __pthread_mutex_trylock (mutex)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
||||||
case PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP:
|
case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
|
||||||
case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
|
case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
|
||||||
case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
|
case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
|
||||||
case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
|
case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
|
||||||
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
||||||
|
&mutex->__data.__list.__next);
|
||||||
|
|
||||||
oldval = mutex->__data.__lock;
|
oldval = mutex->__data.__lock;
|
||||||
do
|
do
|
||||||
{
|
{
|
||||||
|
again:
|
||||||
if ((oldval & FUTEX_OWNER_DIED) != 0)
|
if ((oldval & FUTEX_OWNER_DIED) != 0)
|
||||||
{
|
{
|
||||||
/* The previous owner died. Try locking the mutex. */
|
/* The previous owner died. Try locking the mutex. */
|
||||||
int newval;
|
int newval
|
||||||
while ((newval
|
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
||||||
= atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
|
id, oldval);
|
||||||
id, oldval))
|
|
||||||
!= oldval)
|
if (newval != oldval)
|
||||||
{
|
{
|
||||||
if ((newval & FUTEX_OWNER_DIED) == 0)
|
|
||||||
goto normal;
|
|
||||||
oldval = newval;
|
oldval = newval;
|
||||||
|
goto again;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We got the mutex. */
|
/* We got the mutex. */
|
||||||
@ -104,6 +107,7 @@ __pthread_mutex_trylock (mutex)
|
|||||||
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
|
mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
|
||||||
|
|
||||||
ENQUEUE_MUTEX (mutex);
|
ENQUEUE_MUTEX (mutex);
|
||||||
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||||
|
|
||||||
/* Note that we deliberately exist here. If we fall
|
/* Note that we deliberately exist here. If we fall
|
||||||
through to the end of the function __nusers would be
|
through to the end of the function __nusers would be
|
||||||
@ -112,18 +116,23 @@ __pthread_mutex_trylock (mutex)
|
|||||||
return EOWNERDEAD;
|
return EOWNERDEAD;
|
||||||
}
|
}
|
||||||
|
|
||||||
normal:
|
|
||||||
/* Check whether we already hold the mutex. */
|
/* Check whether we already hold the mutex. */
|
||||||
if (__builtin_expect ((mutex->__data.__lock & FUTEX_TID_MASK)
|
if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
|
||||||
== id, 0))
|
|
||||||
{
|
{
|
||||||
if (mutex->__data.__kind
|
if (mutex->__data.__kind
|
||||||
== PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP)
|
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
|
||||||
return EDEADLK;
|
{
|
||||||
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
||||||
|
NULL);
|
||||||
|
return EDEADLK;
|
||||||
|
}
|
||||||
|
|
||||||
if (mutex->__data.__kind
|
if (mutex->__data.__kind
|
||||||
== PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP)
|
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
|
||||||
{
|
{
|
||||||
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
||||||
|
NULL);
|
||||||
|
|
||||||
/* Just bump the counter. */
|
/* Just bump the counter. */
|
||||||
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
|
if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
|
||||||
/* Overflow of the counter. */
|
/* Overflow of the counter. */
|
||||||
@ -137,7 +146,11 @@ __pthread_mutex_trylock (mutex)
|
|||||||
|
|
||||||
oldval = lll_robust_mutex_trylock (mutex->__data.__lock, id);
|
oldval = lll_robust_mutex_trylock (mutex->__data.__lock, id);
|
||||||
if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
|
if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
|
||||||
return EBUSY;
|
{
|
||||||
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||||
|
|
||||||
|
return EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
robust:
|
robust:
|
||||||
if (__builtin_expect (mutex->__data.__owner
|
if (__builtin_expect (mutex->__data.__owner
|
||||||
@ -147,12 +160,14 @@ __pthread_mutex_trylock (mutex)
|
|||||||
mutex->__data.__count = 0;
|
mutex->__data.__count = 0;
|
||||||
if (oldval == id)
|
if (oldval == id)
|
||||||
lll_mutex_unlock (mutex->__data.__lock);
|
lll_mutex_unlock (mutex->__data.__lock);
|
||||||
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||||
return ENOTRECOVERABLE;
|
return ENOTRECOVERABLE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
while ((oldval & FUTEX_OWNER_DIED) != 0);
|
while ((oldval & FUTEX_OWNER_DIED) != 0);
|
||||||
|
|
||||||
ENQUEUE_MUTEX (mutex);
|
ENQUEUE_MUTEX (mutex);
|
||||||
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||||
|
|
||||||
mutex->__data.__owner = id;
|
mutex->__data.__owner = id;
|
||||||
++mutex->__data.__nusers;
|
++mutex->__data.__nusers;
|
||||||
|
@ -63,10 +63,12 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
|
|||||||
lll_mutex_unlock (mutex->__data.__lock);
|
lll_mutex_unlock (mutex->__data.__lock);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP:
|
case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
|
||||||
/* Recursive mutex. */
|
/* Recursive mutex. */
|
||||||
if ((mutex->__data.__lock & FUTEX_TID_MASK)
|
if ((mutex->__data.__lock & FUTEX_TID_MASK)
|
||||||
== THREAD_GETMEM (THREAD_SELF, tid))
|
== THREAD_GETMEM (THREAD_SELF, tid)
|
||||||
|
&& __builtin_expect (mutex->__data.__owner
|
||||||
|
== PTHREAD_MUTEX_INCONSISTENT, 0))
|
||||||
{
|
{
|
||||||
if (--mutex->__data.__count != 0)
|
if (--mutex->__data.__count != 0)
|
||||||
/* We still hold the mutex. */
|
/* We still hold the mutex. */
|
||||||
@ -84,9 +86,9 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
|
|||||||
|
|
||||||
goto robust;
|
goto robust;
|
||||||
|
|
||||||
case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
|
case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
|
||||||
case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
|
case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
|
||||||
case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
|
case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
|
||||||
if ((mutex->__data.__lock & FUTEX_TID_MASK)
|
if ((mutex->__data.__lock & FUTEX_TID_MASK)
|
||||||
!= THREAD_GETMEM (THREAD_SELF, tid)
|
!= THREAD_GETMEM (THREAD_SELF, tid)
|
||||||
|| ! lll_mutex_islocked (mutex->__data.__lock))
|
|| ! lll_mutex_islocked (mutex->__data.__lock))
|
||||||
@ -102,6 +104,8 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
|
|||||||
|
|
||||||
robust:
|
robust:
|
||||||
/* Remove mutex from the list. */
|
/* Remove mutex from the list. */
|
||||||
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
|
||||||
|
&mutex->__data.__list.__next);
|
||||||
DEQUEUE_MUTEX (mutex);
|
DEQUEUE_MUTEX (mutex);
|
||||||
|
|
||||||
mutex->__data.__owner = newowner;
|
mutex->__data.__owner = newowner;
|
||||||
@ -111,6 +115,8 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
|
|||||||
|
|
||||||
/* Unlock. */
|
/* Unlock. */
|
||||||
lll_robust_mutex_unlock (mutex->__data.__lock);
|
lll_robust_mutex_unlock (mutex->__data.__lock);
|
||||||
|
|
||||||
|
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
264
nptl/tst-robust8.c
Normal file
264
nptl/tst-robust8.c
Normal file
@ -0,0 +1,264 @@
|
|||||||
|
#include <pthread.h>
|
||||||
|
#include <signal.h>
|
||||||
|
#include <stdint.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <sys/mman.h>
|
||||||
|
#include <sys/wait.h>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
static void prepare (void);
|
||||||
|
#define PREPARE(argc, argv) prepare ()
|
||||||
|
static int do_test (void);
|
||||||
|
#define TEST_FUNCTION do_test ()
|
||||||
|
#define TIMEOUT 3
|
||||||
|
#include "../test-skeleton.c"
|
||||||
|
|
||||||
|
|
||||||
|
static int fd;
|
||||||
|
#define N 100
|
||||||
|
|
||||||
|
static void
|
||||||
|
prepare (void)
|
||||||
|
{
|
||||||
|
fd = create_temp_file ("tst-robust8", NULL);
|
||||||
|
if (fd == -1)
|
||||||
|
exit (1);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#define THESIGNAL SIGKILL
|
||||||
|
#define ROUNDS 5
|
||||||
|
#define THREADS 9
|
||||||
|
|
||||||
|
|
||||||
|
static const struct timespec before = { 0, 0 };
|
||||||
|
|
||||||
|
|
||||||
|
static pthread_mutex_t *map;
|
||||||
|
|
||||||
|
|
||||||
|
static void *
|
||||||
|
tf (void *arg)
|
||||||
|
{
|
||||||
|
long int nr = (long int) arg;
|
||||||
|
int fct = nr % 3;
|
||||||
|
|
||||||
|
uint8_t state[N];
|
||||||
|
memset (state, '\0', sizeof (state));
|
||||||
|
|
||||||
|
while (1)
|
||||||
|
{
|
||||||
|
int r = random () % N;
|
||||||
|
if (state[r] == 0)
|
||||||
|
{
|
||||||
|
int e;
|
||||||
|
|
||||||
|
switch (fct)
|
||||||
|
{
|
||||||
|
case 0:
|
||||||
|
e = pthread_mutex_lock (&map[r]);
|
||||||
|
if (e != 0)
|
||||||
|
{
|
||||||
|
printf ("mutex_lock of %d in thread %ld failed with %d\n",
|
||||||
|
r, nr, e);
|
||||||
|
exit (1);
|
||||||
|
}
|
||||||
|
state[r] = 1;
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
e = pthread_mutex_timedlock (&map[r], &before);
|
||||||
|
if (e != 0 && e != ETIMEDOUT)
|
||||||
|
{
|
||||||
|
printf ("\
|
||||||
|
mutex_timedlock of %d in thread %ld failed with %d\n",
|
||||||
|
r, nr, e);
|
||||||
|
exit (1);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
e = pthread_mutex_trylock (&map[r]);
|
||||||
|
if (e != 0 && e != EBUSY)
|
||||||
|
{
|
||||||
|
printf ("mutex_trylock of %d in thread %ld failed with %d\n",
|
||||||
|
r, nr, e);
|
||||||
|
exit (1);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (e == EOWNERDEAD)
|
||||||
|
pthread_mutex_consistent_np (&map[r]);
|
||||||
|
|
||||||
|
if (e == 0 || e == EOWNERDEAD)
|
||||||
|
state[r] = 1;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
int e = pthread_mutex_unlock (&map[r]);
|
||||||
|
if (e != 0)
|
||||||
|
{
|
||||||
|
printf ("mutex_unlock of %d in thread %ld failed with %d\n",
|
||||||
|
r, nr, e);
|
||||||
|
exit (1);
|
||||||
|
}
|
||||||
|
|
||||||
|
state[r] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void
|
||||||
|
child (int round)
|
||||||
|
{
|
||||||
|
for (int thread = 1; thread <= THREADS; ++thread)
|
||||||
|
{
|
||||||
|
pthread_t th;
|
||||||
|
if (pthread_create (&th, NULL, tf, (void *) (long int) thread) != 0)
|
||||||
|
{
|
||||||
|
printf ("cannot create thread %d in round %d\n", thread, round);
|
||||||
|
exit (1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct timespec ts;
|
||||||
|
ts.tv_sec = 0;
|
||||||
|
ts.tv_nsec = 1000000000 / ROUNDS;
|
||||||
|
while (nanosleep (&ts, &ts) != 0)
|
||||||
|
/* nothing */;
|
||||||
|
|
||||||
|
/* Time to die. */
|
||||||
|
kill (getpid (), THESIGNAL);
|
||||||
|
|
||||||
|
/* We better never get here. */
|
||||||
|
abort ();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static int
|
||||||
|
do_test (void)
|
||||||
|
{
|
||||||
|
if (ftruncate (fd, N * sizeof (pthread_mutex_t)) != 0)
|
||||||
|
{
|
||||||
|
puts ("cannot size new file");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
map = mmap (NULL, N * sizeof (pthread_mutex_t), PROT_READ | PROT_WRITE,
|
||||||
|
MAP_SHARED, fd, 0);
|
||||||
|
if (map == MAP_FAILED)
|
||||||
|
{
|
||||||
|
puts ("mapping failed");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
pthread_mutexattr_t ma;
|
||||||
|
if (pthread_mutexattr_init (&ma) != 0)
|
||||||
|
{
|
||||||
|
puts ("mutexattr_init failed");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
if (pthread_mutexattr_setrobust_np (&ma, PTHREAD_MUTEX_ROBUST_NP) != 0)
|
||||||
|
{
|
||||||
|
puts ("mutexattr_setrobust failed");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
if (pthread_mutexattr_setpshared (&ma, PTHREAD_PROCESS_SHARED) != 0)
|
||||||
|
{
|
||||||
|
puts ("mutexattr_setpshared failed");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int round = 1; round <= ROUNDS; ++round)
|
||||||
|
{
|
||||||
|
for (int n = 0; n < N; ++n)
|
||||||
|
{
|
||||||
|
int e = pthread_mutex_init (&map[n], &ma);
|
||||||
|
if (e == ENOTSUP)
|
||||||
|
{
|
||||||
|
puts ("cannot support pshared robust mutexes");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
if (e != 0)
|
||||||
|
{
|
||||||
|
printf ("mutex_init %d in round %d failed\n", n + 1, round);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pid_t p = fork ();
|
||||||
|
if (p == -1)
|
||||||
|
{
|
||||||
|
printf ("fork in round %d failed\n", round);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
if (p == 0)
|
||||||
|
child (round);
|
||||||
|
|
||||||
|
int status;
|
||||||
|
if (TEMP_FAILURE_RETRY (waitpid (p, &status, 0)) != p)
|
||||||
|
{
|
||||||
|
printf ("waitpid in round %d failed\n", round);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
if (!WIFSIGNALED (status))
|
||||||
|
{
|
||||||
|
printf ("child did not die of a signal in round %d\n", round);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
if (WTERMSIG (status) != THESIGNAL)
|
||||||
|
{
|
||||||
|
printf ("child did not die of signal %d in round %d\n",
|
||||||
|
THESIGNAL, round);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int n = 0; n < N; ++n)
|
||||||
|
{
|
||||||
|
int e = pthread_mutex_lock (&map[n]);
|
||||||
|
if (e != 0 && e != EOWNERDEAD)
|
||||||
|
{
|
||||||
|
printf ("mutex_lock %d failed in round %d\n", n + 1, round);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int n = 0; n < N; ++n)
|
||||||
|
if (pthread_mutex_unlock (&map[n]) != 0)
|
||||||
|
{
|
||||||
|
printf ("mutex_unlock %d failed in round %d\n", n + 1, round);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int n = 0; n < N; ++n)
|
||||||
|
{
|
||||||
|
int e = pthread_mutex_destroy (&map[n]);
|
||||||
|
if (e != 0)
|
||||||
|
{
|
||||||
|
printf ("mutex_destroy %d in round %d failed with %d\n",
|
||||||
|
n + 1, round, e);
|
||||||
|
printf("nusers = %d\n", (int) map[n].__data.__nusers);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pthread_mutexattr_destroy (&ma) != 0)
|
||||||
|
{
|
||||||
|
puts ("mutexattr_destroy failed");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (munmap (map, N * sizeof (pthread_mutex_t)) != 0)
|
||||||
|
{
|
||||||
|
puts ("munmap failed");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
@ -468,3 +468,9 @@
|
|||||||
&& (defined __i386__ || defined __x86_64__)
|
&& (defined __i386__ || defined __x86_64__)
|
||||||
# define __ASSUME_ATFCTS 1
|
# define __ASSUME_ATFCTS 1
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* Support for inter-process robust mutexes was added in 2.6.17. */
|
||||||
|
#if __LINUX_KERNEL_VERSION >= 0x020611 \
|
||||||
|
&& (defined __i386__ || defined __x86_64__)
|
||||||
|
# define __ASSUME_SET_ROBUST_LIST 1
|
||||||
|
#endif
|
||||||
|
Loading…
Reference in New Issue
Block a user