mirror of
git://sourceware.org/git/glibc.git
synced 2024-11-27 03:41:23 +08:00
Update.
* sysdeps/pthread/pthread.h: Add prototypes for pthread_spin_init, pthread_spin_destroy, pthread_spin_lock, pthread_spin_trylock, and pthread_spin_unlock. * sysdeps/pthread/bits/pthreadtypes.h: Change struct _pthread_fastlock into pthread_spinlock_t. Change all uses. * spinlock.c: Implement pthread_spin_lock. Rename __pthread_unlock to __pthread_spin_unlock and define weak alias for real name. Define pthread_spin_trylock, pthread_spin_init, and pthread_spin_destroy. Change all uses of _pthread_fastlock to pthread_spinlock_t. * spinlock.h: Rename __pthread_unlock to __pthread_spin_unlock. Change all uses of _pthread_fastlock to pthread_spinlock_t. * Versions [libpthread] (GLIBC_2.2): Add pthread_spin_init, pthread_spin_destroy, pthread_spin_lock, pthread_spin_trylock, and pthread_spin_unlock. * cancel.c: Use __pthread_spin_unlock instead of __pthread_unlock. Change all uses of _pthread_fastlock to pthread_spinlock_t. * condvar.c: Likewise. * internals.h: Likewise. * join.c: Likewise. * manager.c: Likewise. * mutex.c: Likewise. * pthread.c: Likewise. * rwlock.c: Likewise. * semaphore.c: Likewise. * signals.c: Likewise.
This commit is contained in:
parent
b3ae0650bc
commit
d8d914df68
@ -1,5 +1,33 @@
|
||||
2000-04-12 Ulrich Drepper <drepper@redhat.com>
|
||||
|
||||
* sysdeps/pthread/pthread.h: Add prototypes for pthread_spin_init,
|
||||
pthread_spin_destroy, pthread_spin_lock, pthread_spin_trylock,
|
||||
and pthread_spin_unlock.
|
||||
* sysdeps/pthread/bits/pthreadtypes.h: Change struct _pthread_fastlock
|
||||
into pthread_spinlock_t. Change all uses.
|
||||
* spinlock.c: Implement pthread_spin_lock.
|
||||
Rename __pthread_unlock to __pthread_spin_unlock and define weak
|
||||
alias for real name.
|
||||
Define pthread_spin_trylock, pthread_spin_init, and
|
||||
pthread_spin_destroy.
|
||||
Change all uses of _pthread_fastlock to pthread_spinlock_t.
|
||||
* spinlock.h: Rename __pthread_unlock to __pthread_spin_unlock.
|
||||
Change all uses of _pthread_fastlock to pthread_spinlock_t.
|
||||
* Versions [libpthread] (GLIBC_2.2): Add pthread_spin_init,
|
||||
pthread_spin_destroy, pthread_spin_lock, pthread_spin_trylock,
|
||||
and pthread_spin_unlock.
|
||||
* cancel.c: Use __pthread_spin_unlock instead of __pthread_unlock.
|
||||
Change all uses of _pthread_fastlock to pthread_spinlock_t.
|
||||
* condvar.c: Likewise.
|
||||
* internals.h: Likewise.
|
||||
* join.c: Likewise.
|
||||
* manager.c: Likewise.
|
||||
* mutex.c: Likewise.
|
||||
* pthread.c: Likewise.
|
||||
* rwlock.c: Likewise.
|
||||
* semaphore.c: Likewise.
|
||||
* signals.c: Likewise.
|
||||
|
||||
* sysdeps/unix/sysv/linux/bits/posix_opt.h: Add various new POSIX
|
||||
macros.
|
||||
* sysdeps/unix/sysv/linux/i386/bits/posix_opt.h: New file.
|
||||
|
@ -132,5 +132,7 @@ libpthread {
|
||||
|
||||
# New functions from IEEE Std. 10003.1-200x.
|
||||
sem_timedwait;
|
||||
pthread_spin_destroy; pthread_spin_init; pthread_spin_lock;
|
||||
pthread_spin_trylock; pthread_spin_unlock;
|
||||
}
|
||||
}
|
||||
|
@ -58,14 +58,14 @@ int pthread_cancel(pthread_t thread)
|
||||
|
||||
__pthread_lock(&handle->h_lock, NULL);
|
||||
if (invalid_handle(handle, thread)) {
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
__pthread_spin_unlock(&handle->h_lock);
|
||||
return ESRCH;
|
||||
}
|
||||
|
||||
th = handle->h_descr;
|
||||
|
||||
if (th->p_canceled) {
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
__pthread_spin_unlock(&handle->h_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -76,7 +76,7 @@ int pthread_cancel(pthread_t thread)
|
||||
/* If the thread has registered an extrication interface, then
|
||||
invoke the interface. If it returns 1, then we succeeded in
|
||||
dequeuing the thread from whatever waiting object it was enqueued
|
||||
with. In that case, it is our responsibility to wake it up.
|
||||
with. In that case, it is our responsibility to wake it up.
|
||||
And also to set the p_woken_by_cancel flag so the woken thread
|
||||
can tell that it was woken by cancellation. */
|
||||
|
||||
@ -85,7 +85,7 @@ int pthread_cancel(pthread_t thread)
|
||||
th->p_woken_by_cancel = dorestart;
|
||||
}
|
||||
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
__pthread_spin_unlock(&handle->h_lock);
|
||||
|
||||
/* If the thread has suspended or is about to, then we unblock it by
|
||||
issuing a restart, instead of a cancel signal. Otherwise we send
|
||||
@ -97,7 +97,7 @@ int pthread_cancel(pthread_t thread)
|
||||
|
||||
if (dorestart)
|
||||
restart(th);
|
||||
else
|
||||
else
|
||||
kill(pid, __pthread_sig_cancel);
|
||||
|
||||
return 0;
|
||||
|
@ -66,7 +66,7 @@ static int cond_extricate_func(void *obj, pthread_descr th)
|
||||
|
||||
__pthread_lock(&cond->__c_lock, self);
|
||||
did_remove = remove_from_queue(&cond->__c_waiting, th);
|
||||
__pthread_unlock(&cond->__c_lock);
|
||||
__pthread_spin_unlock(&cond->__c_lock);
|
||||
|
||||
return did_remove;
|
||||
}
|
||||
@ -101,7 +101,7 @@ int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
enqueue(&cond->__c_waiting, self);
|
||||
else
|
||||
already_canceled = 1;
|
||||
__pthread_unlock(&cond->__c_lock);
|
||||
__pthread_spin_unlock(&cond->__c_lock);
|
||||
|
||||
if (already_canceled) {
|
||||
__pthread_set_own_extricate_if(self, 0);
|
||||
@ -161,7 +161,7 @@ pthread_cond_timedwait_relative_old(pthread_cond_t *cond,
|
||||
enqueue(&cond->__c_waiting, self);
|
||||
else
|
||||
already_canceled = 1;
|
||||
__pthread_unlock(&cond->__c_lock);
|
||||
__pthread_spin_unlock(&cond->__c_lock);
|
||||
|
||||
if (already_canceled) {
|
||||
__pthread_set_own_extricate_if(self, 0);
|
||||
@ -231,7 +231,7 @@ pthread_cond_timedwait_relative_old(pthread_cond_t *cond,
|
||||
int was_on_queue;
|
||||
__pthread_lock(&cond->__c_lock, self);
|
||||
was_on_queue = remove_from_queue(&cond->__c_waiting, self);
|
||||
__pthread_unlock(&cond->__c_lock);
|
||||
__pthread_spin_unlock(&cond->__c_lock);
|
||||
|
||||
if (was_on_queue) {
|
||||
__pthread_set_own_extricate_if(self, 0);
|
||||
@ -295,7 +295,7 @@ pthread_cond_timedwait_relative_new(pthread_cond_t *cond,
|
||||
enqueue(&cond->__c_waiting, self);
|
||||
else
|
||||
already_canceled = 1;
|
||||
__pthread_unlock(&cond->__c_lock);
|
||||
__pthread_spin_unlock(&cond->__c_lock);
|
||||
|
||||
if (already_canceled) {
|
||||
__pthread_set_own_extricate_if(self, 0);
|
||||
@ -361,7 +361,7 @@ pthread_cond_timedwait_relative_new(pthread_cond_t *cond,
|
||||
|
||||
__pthread_lock(&cond->__c_lock, self);
|
||||
was_on_queue = remove_from_queue(&cond->__c_waiting, self);
|
||||
__pthread_unlock(&cond->__c_lock);
|
||||
__pthread_spin_unlock(&cond->__c_lock);
|
||||
|
||||
if (was_on_queue) {
|
||||
__pthread_set_own_extricate_if(self, 0);
|
||||
@ -402,7 +402,7 @@ int pthread_cond_signal(pthread_cond_t *cond)
|
||||
|
||||
__pthread_lock(&cond->__c_lock, NULL);
|
||||
th = dequeue(&cond->__c_waiting);
|
||||
__pthread_unlock(&cond->__c_lock);
|
||||
__pthread_spin_unlock(&cond->__c_lock);
|
||||
if (th != NULL) restart(th);
|
||||
return 0;
|
||||
}
|
||||
@ -415,7 +415,7 @@ int pthread_cond_broadcast(pthread_cond_t *cond)
|
||||
/* Copy the current state of the waiting queue and empty it */
|
||||
tosignal = cond->__c_waiting;
|
||||
cond->__c_waiting = NULL;
|
||||
__pthread_unlock(&cond->__c_lock);
|
||||
__pthread_spin_unlock(&cond->__c_lock);
|
||||
/* Now signal each process in the queue */
|
||||
while ((th = dequeue(&tosignal)) != NULL) restart(th);
|
||||
return 0;
|
||||
|
@ -131,7 +131,7 @@ struct _pthread_descr_struct {
|
||||
pthread_t p_tid; /* Thread identifier */
|
||||
int p_pid; /* PID of Unix process */
|
||||
int p_priority; /* Thread priority (== 0 if not realtime) */
|
||||
struct _pthread_fastlock * p_lock; /* Spinlock for synchronized accesses */
|
||||
pthread_spinlock_t * p_lock; /* Spinlock for synchronized accesses */
|
||||
int p_signal; /* last signal received */
|
||||
sigjmp_buf * p_signal_jmp; /* where to siglongjmp on a signal or NULL */
|
||||
sigjmp_buf * p_cancel_jmp; /* where to siglongjmp on a cancel or NULL */
|
||||
@ -183,7 +183,7 @@ struct _pthread_descr_struct {
|
||||
typedef struct pthread_handle_struct * pthread_handle;
|
||||
|
||||
struct pthread_handle_struct {
|
||||
struct _pthread_fastlock h_lock; /* Fast lock for sychronized access */
|
||||
pthread_spinlock_t h_lock; /* Fast lock for sychronized access */
|
||||
pthread_descr h_descr; /* Thread descriptor or NULL if invalid */
|
||||
char * h_bottom; /* Lowest address in the stack thread */
|
||||
};
|
||||
|
@ -62,7 +62,7 @@ void pthread_exit(void * retval)
|
||||
}
|
||||
/* See if someone is joining on us */
|
||||
joining = THREAD_GETMEM(self, p_joining);
|
||||
__pthread_unlock(THREAD_GETMEM(self, p_lock));
|
||||
__pthread_spin_unlock(THREAD_GETMEM(self, p_lock));
|
||||
/* Restart joining thread if any */
|
||||
if (joining != NULL) restart(joining);
|
||||
/* If this is the initial thread, block until all threads have terminated.
|
||||
@ -93,7 +93,7 @@ static int join_extricate_func(void *obj, pthread_descr th)
|
||||
jo = handle->h_descr;
|
||||
did_remove = jo->p_joining != NULL;
|
||||
jo->p_joining = NULL;
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
__pthread_spin_unlock(&handle->h_lock);
|
||||
|
||||
return did_remove;
|
||||
}
|
||||
@ -113,38 +113,38 @@ int pthread_join(pthread_t thread_id, void ** thread_return)
|
||||
|
||||
__pthread_lock(&handle->h_lock, self);
|
||||
if (invalid_handle(handle, thread_id)) {
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
__pthread_spin_unlock(&handle->h_lock);
|
||||
return ESRCH;
|
||||
}
|
||||
th = handle->h_descr;
|
||||
if (th == self) {
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
__pthread_spin_unlock(&handle->h_lock);
|
||||
return EDEADLK;
|
||||
}
|
||||
/* If detached or already joined, error */
|
||||
if (th->p_detached || th->p_joining != NULL) {
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
__pthread_spin_unlock(&handle->h_lock);
|
||||
return EINVAL;
|
||||
}
|
||||
/* If not terminated yet, suspend ourselves. */
|
||||
if (! th->p_terminated) {
|
||||
/* Register extrication interface */
|
||||
__pthread_set_own_extricate_if(self, &extr);
|
||||
__pthread_set_own_extricate_if(self, &extr);
|
||||
if (!(THREAD_GETMEM(self, p_canceled)
|
||||
&& THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
|
||||
th->p_joining = self;
|
||||
else
|
||||
already_canceled = 1;
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
__pthread_spin_unlock(&handle->h_lock);
|
||||
|
||||
if (already_canceled) {
|
||||
__pthread_set_own_extricate_if(self, 0);
|
||||
__pthread_set_own_extricate_if(self, 0);
|
||||
pthread_exit(PTHREAD_CANCELED);
|
||||
}
|
||||
|
||||
suspend(self);
|
||||
/* Deregister extrication interface */
|
||||
__pthread_set_own_extricate_if(self, 0);
|
||||
__pthread_set_own_extricate_if(self, 0);
|
||||
|
||||
/* This is a cancellation point */
|
||||
if (THREAD_GETMEM(self, p_woken_by_cancel)
|
||||
@ -156,7 +156,7 @@ int pthread_join(pthread_t thread_id, void ** thread_return)
|
||||
}
|
||||
/* Get return value */
|
||||
if (thread_return != NULL) *thread_return = th->p_retval;
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
__pthread_spin_unlock(&handle->h_lock);
|
||||
/* Send notification to thread manager */
|
||||
if (__pthread_manager_request >= 0) {
|
||||
request.req_thread = self;
|
||||
@ -177,24 +177,24 @@ int pthread_detach(pthread_t thread_id)
|
||||
|
||||
__pthread_lock(&handle->h_lock, NULL);
|
||||
if (invalid_handle(handle, thread_id)) {
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
__pthread_spin_unlock(&handle->h_lock);
|
||||
return ESRCH;
|
||||
}
|
||||
th = handle->h_descr;
|
||||
/* If already detached, error */
|
||||
if (th->p_detached) {
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
__pthread_spin_unlock(&handle->h_lock);
|
||||
return EINVAL;
|
||||
}
|
||||
/* If already joining, don't do anything. */
|
||||
if (th->p_joining != NULL) {
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
__pthread_spin_unlock(&handle->h_lock);
|
||||
return 0;
|
||||
}
|
||||
/* Mark as detached */
|
||||
th->p_detached = 1;
|
||||
terminated = th->p_terminated;
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
__pthread_spin_unlock(&handle->h_lock);
|
||||
/* If already terminated, notify thread manager to reclaim resources */
|
||||
if (terminated && __pthread_manager_request >= 0) {
|
||||
request.req_thread = thread_self();
|
||||
|
@ -194,7 +194,7 @@ int __pthread_manager_event(void *arg)
|
||||
/* Get the lock the manager will free once all is correctly set up. */
|
||||
__pthread_lock (THREAD_GETMEM((&__pthread_manager_thread), p_lock), NULL);
|
||||
/* Free it immediately. */
|
||||
__pthread_unlock (THREAD_GETMEM((&__pthread_manager_thread), p_lock));
|
||||
__pthread_spin_unlock (THREAD_GETMEM((&__pthread_manager_thread), p_lock));
|
||||
|
||||
return __pthread_manager(arg);
|
||||
}
|
||||
@ -260,7 +260,7 @@ static int pthread_start_thread_event(void *arg)
|
||||
/* Get the lock the manager will free once all is correctly set up. */
|
||||
__pthread_lock (THREAD_GETMEM(self, p_lock), NULL);
|
||||
/* Free it immediately. */
|
||||
__pthread_unlock (THREAD_GETMEM(self, p_lock));
|
||||
__pthread_spin_unlock (THREAD_GETMEM(self, p_lock));
|
||||
|
||||
/* Continue with the real function. */
|
||||
return pthread_start_thread (arg);
|
||||
@ -460,7 +460,7 @@ static int pthread_handle_create(pthread_t *thread, const pthread_attr_t *attr,
|
||||
__linuxthreads_create_event ();
|
||||
|
||||
/* Now restart the thread. */
|
||||
__pthread_unlock(new_thread->p_lock);
|
||||
__pthread_spin_unlock(new_thread->p_lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -509,7 +509,7 @@ static void pthread_free(pthread_descr th)
|
||||
__pthread_lock(&handle->h_lock, NULL);
|
||||
handle->h_descr = NULL;
|
||||
handle->h_bottom = (char *)(-1L);
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
__pthread_spin_unlock(&handle->h_lock);
|
||||
#ifdef FREE_THREAD_SELF
|
||||
FREE_THREAD_SELF(th, th->p_nr);
|
||||
#endif
|
||||
@ -580,7 +580,7 @@ static void pthread_exited(pid_t pid)
|
||||
}
|
||||
}
|
||||
detached = th->p_detached;
|
||||
__pthread_unlock(th->p_lock);
|
||||
__pthread_spin_unlock(th->p_lock);
|
||||
if (detached)
|
||||
pthread_free(th);
|
||||
break;
|
||||
@ -623,19 +623,19 @@ static void pthread_handle_free(pthread_t th_id)
|
||||
if (invalid_handle(handle, th_id)) {
|
||||
/* pthread_reap_children has deallocated the thread already,
|
||||
nothing needs to be done */
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
__pthread_spin_unlock(&handle->h_lock);
|
||||
return;
|
||||
}
|
||||
th = handle->h_descr;
|
||||
if (th->p_exited) {
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
__pthread_spin_unlock(&handle->h_lock);
|
||||
pthread_free(th);
|
||||
} else {
|
||||
/* The Unix process of the thread is still running.
|
||||
Mark the thread as detached so that the thread manager will
|
||||
deallocate its resources when the Unix process exits. */
|
||||
th->p_detached = 1;
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
__pthread_spin_unlock(&handle->h_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -109,7 +109,7 @@ int __pthread_mutex_unlock(pthread_mutex_t * mutex)
|
||||
{
|
||||
switch (mutex->__m_kind) {
|
||||
case PTHREAD_MUTEX_FAST_NP:
|
||||
__pthread_unlock(&mutex->__m_lock);
|
||||
__pthread_spin_unlock(&mutex->__m_lock);
|
||||
return 0;
|
||||
case PTHREAD_MUTEX_RECURSIVE_NP:
|
||||
if (mutex->__m_count > 0) {
|
||||
@ -117,13 +117,13 @@ int __pthread_mutex_unlock(pthread_mutex_t * mutex)
|
||||
return 0;
|
||||
}
|
||||
mutex->__m_owner = NULL;
|
||||
__pthread_unlock(&mutex->__m_lock);
|
||||
__pthread_spin_unlock(&mutex->__m_lock);
|
||||
return 0;
|
||||
case PTHREAD_MUTEX_ERRORCHECK_NP:
|
||||
if (mutex->__m_owner != thread_self() || mutex->__m_lock.__status == 0)
|
||||
return EPERM;
|
||||
mutex->__m_owner = NULL;
|
||||
__pthread_unlock(&mutex->__m_lock);
|
||||
__pthread_spin_unlock(&mutex->__m_lock);
|
||||
return 0;
|
||||
default:
|
||||
return EINVAL;
|
||||
|
@ -458,7 +458,7 @@ int __pthread_initialize_manager(void)
|
||||
__linuxthreads_create_event ();
|
||||
|
||||
/* Now restart the thread. */
|
||||
__pthread_unlock(__pthread_manager_thread.p_lock);
|
||||
__pthread_spin_unlock(__pthread_manager_thread.p_lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -585,16 +585,16 @@ int pthread_setschedparam(pthread_t thread, int policy,
|
||||
|
||||
__pthread_lock(&handle->h_lock, NULL);
|
||||
if (invalid_handle(handle, thread)) {
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
__pthread_spin_unlock(&handle->h_lock);
|
||||
return ESRCH;
|
||||
}
|
||||
th = handle->h_descr;
|
||||
if (__sched_setscheduler(th->p_pid, policy, param) == -1) {
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
__pthread_spin_unlock(&handle->h_lock);
|
||||
return errno;
|
||||
}
|
||||
th->p_priority = policy == SCHED_OTHER ? 0 : param->sched_priority;
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
__pthread_spin_unlock(&handle->h_lock);
|
||||
if (__pthread_manager_request >= 0)
|
||||
__pthread_manager_adjust_prio(th->p_priority);
|
||||
return 0;
|
||||
@ -608,11 +608,11 @@ int pthread_getschedparam(pthread_t thread, int *policy,
|
||||
|
||||
__pthread_lock(&handle->h_lock, NULL);
|
||||
if (invalid_handle(handle, thread)) {
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
__pthread_spin_unlock(&handle->h_lock);
|
||||
return ESRCH;
|
||||
}
|
||||
pid = handle->h_descr->p_pid;
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
__pthread_spin_unlock(&handle->h_lock);
|
||||
pol = __sched_getscheduler(pid);
|
||||
if (pol == -1) return errno;
|
||||
if (__sched_getparam(pid, param) == -1) return errno;
|
||||
@ -809,7 +809,7 @@ void __pthread_set_own_extricate_if(pthread_descr self, pthread_extricate_if *pe
|
||||
{
|
||||
__pthread_lock(self->p_lock, self);
|
||||
THREAD_SETMEM(self, p_extricate, peif);
|
||||
__pthread_unlock(self->p_lock);
|
||||
__pthread_spin_unlock(self->p_lock);
|
||||
}
|
||||
|
||||
/* Primitives for controlling thread execution */
|
||||
|
@ -217,7 +217,7 @@ __pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
|
||||
__pthread_lock (&rwlock->__rw_lock, NULL);
|
||||
readers = rwlock->__rw_readers;
|
||||
writer = rwlock->__rw_writer;
|
||||
__pthread_unlock (&rwlock->__rw_lock);
|
||||
__pthread_spin_unlock (&rwlock->__rw_lock);
|
||||
|
||||
if (readers > 0 || writer != NULL)
|
||||
return EBUSY;
|
||||
@ -247,12 +247,12 @@ __pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
|
||||
break;
|
||||
|
||||
enqueue (&rwlock->__rw_read_waiting, self);
|
||||
__pthread_unlock (&rwlock->__rw_lock);
|
||||
__pthread_spin_unlock (&rwlock->__rw_lock);
|
||||
suspend (self); /* This is not a cancellation point */
|
||||
}
|
||||
|
||||
++rwlock->__rw_readers;
|
||||
__pthread_unlock (&rwlock->__rw_lock);
|
||||
__pthread_spin_unlock (&rwlock->__rw_lock);
|
||||
|
||||
if (have_lock_already || out_of_mem)
|
||||
{
|
||||
@ -291,7 +291,7 @@ __pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
|
||||
retval = 0;
|
||||
}
|
||||
|
||||
__pthread_unlock (&rwlock->__rw_lock);
|
||||
__pthread_spin_unlock (&rwlock->__rw_lock);
|
||||
|
||||
if (retval == 0)
|
||||
{
|
||||
@ -320,13 +320,13 @@ __pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
|
||||
if (rwlock->__rw_readers == 0 && rwlock->__rw_writer == NULL)
|
||||
{
|
||||
rwlock->__rw_writer = self;
|
||||
__pthread_unlock (&rwlock->__rw_lock);
|
||||
__pthread_spin_unlock (&rwlock->__rw_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Suspend ourselves, then try again */
|
||||
enqueue (&rwlock->__rw_write_waiting, self);
|
||||
__pthread_unlock (&rwlock->__rw_lock);
|
||||
__pthread_spin_unlock (&rwlock->__rw_lock);
|
||||
suspend (self); /* This is not a cancellation point */
|
||||
}
|
||||
}
|
||||
@ -344,7 +344,7 @@ __pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
|
||||
rwlock->__rw_writer = thread_self ();
|
||||
result = 0;
|
||||
}
|
||||
__pthread_unlock (&rwlock->__rw_lock);
|
||||
__pthread_spin_unlock (&rwlock->__rw_lock);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -363,7 +363,7 @@ __pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
|
||||
/* Unlocking a write lock. */
|
||||
if (rwlock->__rw_writer != thread_self ())
|
||||
{
|
||||
__pthread_unlock (&rwlock->__rw_lock);
|
||||
__pthread_spin_unlock (&rwlock->__rw_lock);
|
||||
return EPERM;
|
||||
}
|
||||
rwlock->__rw_writer = NULL;
|
||||
@ -375,14 +375,14 @@ __pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
|
||||
/* Restart all waiting readers. */
|
||||
torestart = rwlock->__rw_read_waiting;
|
||||
rwlock->__rw_read_waiting = NULL;
|
||||
__pthread_unlock (&rwlock->__rw_lock);
|
||||
__pthread_spin_unlock (&rwlock->__rw_lock);
|
||||
while ((th = dequeue (&torestart)) != NULL)
|
||||
restart (th);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Restart one waiting writer. */
|
||||
__pthread_unlock (&rwlock->__rw_lock);
|
||||
__pthread_spin_unlock (&rwlock->__rw_lock);
|
||||
restart (th);
|
||||
}
|
||||
}
|
||||
@ -391,7 +391,7 @@ __pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
|
||||
/* Unlocking a read lock. */
|
||||
if (rwlock->__rw_readers == 0)
|
||||
{
|
||||
__pthread_unlock (&rwlock->__rw_lock);
|
||||
__pthread_spin_unlock (&rwlock->__rw_lock);
|
||||
return EPERM;
|
||||
}
|
||||
|
||||
@ -402,7 +402,7 @@ __pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
|
||||
else
|
||||
th = NULL;
|
||||
|
||||
__pthread_unlock (&rwlock->__rw_lock);
|
||||
__pthread_spin_unlock (&rwlock->__rw_lock);
|
||||
if (th != NULL)
|
||||
restart (th);
|
||||
|
||||
|
@ -33,7 +33,7 @@ int __new_sem_init(sem_t *sem, int pshared, unsigned int value)
|
||||
errno = ENOSYS;
|
||||
return -1;
|
||||
}
|
||||
__pthread_init_lock((struct _pthread_fastlock *) &sem->__sem_lock);
|
||||
__pthread_init_lock((pthread_spinlock_t *) &sem->__sem_lock);
|
||||
sem->__sem_value = value;
|
||||
sem->__sem_waiting = NULL;
|
||||
return 0;
|
||||
@ -48,9 +48,9 @@ static int new_sem_extricate_func(void *obj, pthread_descr th)
|
||||
sem_t *sem = obj;
|
||||
int did_remove = 0;
|
||||
|
||||
__pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
|
||||
__pthread_lock((pthread_spinlock_t *) &sem->__sem_lock, self);
|
||||
did_remove = remove_from_queue(&sem->__sem_waiting, th);
|
||||
__pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
|
||||
__pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
|
||||
|
||||
return did_remove;
|
||||
}
|
||||
@ -65,10 +65,10 @@ int __new_sem_wait(sem_t * sem)
|
||||
extr.pu_object = sem;
|
||||
extr.pu_extricate_func = new_sem_extricate_func;
|
||||
|
||||
__pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
|
||||
__pthread_lock((pthread_spinlock_t *) &sem->__sem_lock, self);
|
||||
if (sem->__sem_value > 0) {
|
||||
sem->__sem_value--;
|
||||
__pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
|
||||
__pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
|
||||
return 0;
|
||||
}
|
||||
/* Register extrication interface */
|
||||
@ -79,7 +79,7 @@ int __new_sem_wait(sem_t * sem)
|
||||
enqueue(&sem->__sem_waiting, self);
|
||||
else
|
||||
already_canceled = 1;
|
||||
__pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
|
||||
__pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
|
||||
|
||||
if (already_canceled) {
|
||||
__pthread_set_own_extricate_if(self, 0);
|
||||
@ -106,7 +106,7 @@ int __new_sem_trywait(sem_t * sem)
|
||||
{
|
||||
int retval;
|
||||
|
||||
__pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, NULL);
|
||||
__pthread_lock((pthread_spinlock_t *) &sem->__sem_lock, NULL);
|
||||
if (sem->__sem_value == 0) {
|
||||
errno = EAGAIN;
|
||||
retval = -1;
|
||||
@ -114,7 +114,7 @@ int __new_sem_trywait(sem_t * sem)
|
||||
sem->__sem_value--;
|
||||
retval = 0;
|
||||
}
|
||||
__pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
|
||||
__pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
|
||||
return retval;
|
||||
}
|
||||
|
||||
@ -125,19 +125,19 @@ int __new_sem_post(sem_t * sem)
|
||||
struct pthread_request request;
|
||||
|
||||
if (THREAD_GETMEM(self, p_in_sighandler) == NULL) {
|
||||
__pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
|
||||
__pthread_lock((pthread_spinlock_t *) &sem->__sem_lock, self);
|
||||
if (sem->__sem_waiting == NULL) {
|
||||
if (sem->__sem_value >= SEM_VALUE_MAX) {
|
||||
/* Overflow */
|
||||
errno = ERANGE;
|
||||
__pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
|
||||
__pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
|
||||
return -1;
|
||||
}
|
||||
sem->__sem_value++;
|
||||
__pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
|
||||
__pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
|
||||
} else {
|
||||
th = dequeue(&sem->__sem_waiting);
|
||||
__pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
|
||||
__pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
|
||||
restart(th);
|
||||
}
|
||||
} else {
|
||||
@ -200,17 +200,17 @@ int sem_timedwait(sem_t *sem, const struct timespec *abstime)
|
||||
sigset_t unblock;
|
||||
sigset_t initial_mask;
|
||||
|
||||
__pthread_lock((struct _pthread_fastlock *) &sem->__sem_lock, self);
|
||||
__pthread_lock((pthread_spinlock_t *) &sem->__sem_lock, self);
|
||||
if (sem->__sem_value > 0) {
|
||||
--sem->__sem_value;
|
||||
__pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
|
||||
__pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) {
|
||||
/* The standard requires that if the function would block and the
|
||||
time value is illegal, the function returns with an error. */
|
||||
__pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
|
||||
__pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
@ -226,7 +226,7 @@ int sem_timedwait(sem_t *sem, const struct timespec *abstime)
|
||||
enqueue(&sem->__sem_waiting, self);
|
||||
else
|
||||
already_canceled = 1;
|
||||
__pthread_unlock((struct _pthread_fastlock *) &sem->__sem_lock);
|
||||
__pthread_spin_unlock((pthread_spinlock_t *) &sem->__sem_lock);
|
||||
|
||||
if (already_canceled) {
|
||||
__pthread_set_own_extricate_if(self, 0);
|
||||
@ -288,9 +288,9 @@ int sem_timedwait(sem_t *sem, const struct timespec *abstime)
|
||||
/* __pthread_lock will queue back any spurious restarts that
|
||||
may happen to it. */
|
||||
|
||||
__pthread_lock((struct _pthread_fastlock *)&sem->__sem_lock, self);
|
||||
__pthread_lock((pthread_spinlock_t *)&sem->__sem_lock, self);
|
||||
was_on_queue = remove_from_queue(&sem->__sem_waiting, self);
|
||||
__pthread_unlock((struct _pthread_fastlock *)&sem->__sem_lock);
|
||||
__pthread_spin_unlock((pthread_spinlock_t *)&sem->__sem_lock);
|
||||
|
||||
if (was_on_queue) {
|
||||
__pthread_set_own_extricate_if(self, 0);
|
||||
|
@ -57,11 +57,11 @@ int pthread_kill(pthread_t thread, int signo)
|
||||
|
||||
__pthread_lock(&handle->h_lock, NULL);
|
||||
if (invalid_handle(handle, thread)) {
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
__pthread_spin_unlock(&handle->h_lock);
|
||||
return ESRCH;
|
||||
}
|
||||
pid = handle->h_descr->p_pid;
|
||||
__pthread_unlock(&handle->h_lock);
|
||||
__pthread_spin_unlock(&handle->h_lock);
|
||||
if (kill(pid, signo) == -1)
|
||||
return errno;
|
||||
else
|
||||
|
@ -22,10 +22,10 @@
|
||||
#include "spinlock.h"
|
||||
#include "restart.h"
|
||||
|
||||
/* The status field of a fastlock has the following meaning:
|
||||
0: fastlock is free
|
||||
1: fastlock is taken, no thread is waiting on it
|
||||
ADDR: fastlock is taken, ADDR is address of thread descriptor for
|
||||
/* The status field of a spinlock has the following meaning:
|
||||
0: spinlock is free
|
||||
1: spinlock is taken, no thread is waiting on it
|
||||
ADDR: psinlock is taken, ADDR is address of thread descriptor for
|
||||
first waiting thread, other waiting threads are linked via
|
||||
their p_nextlock field.
|
||||
The waiting list is not sorted by priority order.
|
||||
@ -36,7 +36,7 @@
|
||||
This is safe because there are no concurrent __pthread_unlock
|
||||
operations -- only the thread that locked the mutex can unlock it. */
|
||||
|
||||
void internal_function __pthread_lock(struct _pthread_fastlock * lock,
|
||||
void internal_function __pthread_lock(pthread_spinlock_t * lock,
|
||||
pthread_descr self)
|
||||
{
|
||||
long oldstatus, newstatus;
|
||||
@ -61,7 +61,7 @@ void internal_function __pthread_lock(struct _pthread_fastlock * lock,
|
||||
} while(! compare_and_swap(&lock->__status, oldstatus, newstatus,
|
||||
&lock->__spinlock));
|
||||
|
||||
/* Suspend with guard against spurious wakeup.
|
||||
/* Suspend with guard against spurious wakeup.
|
||||
This can happen in pthread_cond_timedwait_relative, when the thread
|
||||
wakes up due to timeout and is still on the condvar queue, and then
|
||||
locks the queue to remove itself. At that point it may still be on the
|
||||
@ -82,9 +82,17 @@ void internal_function __pthread_lock(struct _pthread_fastlock * lock,
|
||||
/* Put back any resumes we caught that don't belong to us. */
|
||||
while (spurious_wakeup_count--)
|
||||
restart(self);
|
||||
}
|
||||
|
||||
void internal_function __pthread_unlock(struct _pthread_fastlock * lock)
|
||||
return 0;
|
||||
}
|
||||
int __pthread_spin_lock(pthread_spinlock_t * lock)
|
||||
{
|
||||
__pthread_lock (lock, NULL);
|
||||
return 0;
|
||||
}
|
||||
weak_alias (__pthread_spin_lock, pthread_spin_lock)
|
||||
|
||||
int __pthread_spin_unlock(pthread_spinlock_t * lock)
|
||||
{
|
||||
long oldstatus;
|
||||
pthread_descr thr, * ptr, * maxptr;
|
||||
@ -98,7 +106,7 @@ again:
|
||||
be done here we would crash further down. */
|
||||
if (! compare_and_swap(&lock->__status, oldstatus, 0, &lock->__spinlock))
|
||||
goto again;
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
/* Find thread in waiting queue with maximal priority */
|
||||
ptr = (pthread_descr *) &lock->__status;
|
||||
@ -142,7 +150,34 @@ again:
|
||||
/* Wake up the selected waiting thread */
|
||||
thr->p_nextlock = NULL;
|
||||
restart(thr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
|
||||
|
||||
|
||||
int __pthread_spin_trylock (pthread_spinlock_t *lock)
|
||||
{
|
||||
return __pthread_trylock (lock);
|
||||
}
|
||||
weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
|
||||
|
||||
int __pthread_spin_init(pthread_spinlock_t *lock, int pshared)
|
||||
{
|
||||
if (pshared != 0)
|
||||
return ENOSYS;
|
||||
|
||||
__pthread_init_lock (lock);
|
||||
return 0;
|
||||
}
|
||||
weak_alias (__pthread_spin_init, pthread_spin_init)
|
||||
|
||||
int __pthread_spin_destroy(pthread_spinlock_t *lock)
|
||||
{
|
||||
/* Nothing to do. */
|
||||
return 0;
|
||||
}
|
||||
weak_alias (__pthread_spin_destroy, pthread_spin_destroy)
|
||||
|
||||
/* Compare-and-swap emulation with a spinlock */
|
||||
|
||||
|
@ -50,17 +50,17 @@ static inline int compare_and_swap(long * ptr, long oldval, long newval,
|
||||
|
||||
/* Internal locks */
|
||||
|
||||
extern void internal_function __pthread_lock(struct _pthread_fastlock * lock,
|
||||
extern void internal_function __pthread_lock(pthread_spinlock_t * lock,
|
||||
pthread_descr self);
|
||||
extern void internal_function __pthread_unlock(struct _pthread_fastlock *lock);
|
||||
extern int __pthread_spin_unlock(pthread_spinlock_t *lock);
|
||||
|
||||
static inline void __pthread_init_lock(struct _pthread_fastlock * lock)
|
||||
static inline void __pthread_init_lock(pthread_spinlock_t * lock)
|
||||
{
|
||||
lock->__status = 0;
|
||||
lock->__spinlock = 0;
|
||||
}
|
||||
|
||||
static inline int __pthread_trylock (struct _pthread_fastlock * lock)
|
||||
static inline int __pthread_trylock (pthread_spinlock_t * lock)
|
||||
{
|
||||
long oldstatus;
|
||||
|
||||
@ -99,4 +99,3 @@ static inline long atomic_decrement(struct pthread_atomic *pa)
|
||||
}
|
||||
|
||||
#define ATOMIC_INITIALIZER { 0, 0 }
|
||||
|
||||
|
@ -23,11 +23,11 @@
|
||||
#include <bits/sched.h>
|
||||
|
||||
/* Fast locks (not abstract because mutexes and conditions aren't abstract). */
|
||||
struct _pthread_fastlock
|
||||
typedef struct
|
||||
{
|
||||
long int __status; /* "Free" or "taken" or head of waiting list */
|
||||
int __spinlock; /* For compare-and-swap emulation */
|
||||
};
|
||||
} pthread_spinlock_t;
|
||||
|
||||
#ifndef _PTHREAD_DESCR_DEFINED
|
||||
/* Thread descriptors */
|
||||
@ -54,7 +54,7 @@ typedef struct
|
||||
/* Conditions (not abstract because of PTHREAD_COND_INITIALIZER */
|
||||
typedef struct
|
||||
{
|
||||
struct _pthread_fastlock __c_lock; /* Protect against concurrent access */
|
||||
pthread_spinlock_t __c_lock; /* Protect against concurrent access */
|
||||
_pthread_descr __c_waiting; /* Threads waiting on this condition */
|
||||
} pthread_cond_t;
|
||||
|
||||
@ -78,7 +78,7 @@ typedef struct
|
||||
int __m_count; /* Depth of recursive locking */
|
||||
_pthread_descr __m_owner; /* Owner thread (if recursive or errcheck) */
|
||||
int __m_kind; /* Mutex kind: fast, recursive or errcheck */
|
||||
struct _pthread_fastlock __m_lock; /* Underlying fast lock */
|
||||
pthread_spinlock_t __m_lock; /* Underlying fast lock */
|
||||
} pthread_mutex_t;
|
||||
|
||||
|
||||
@ -97,7 +97,7 @@ typedef int pthread_once_t;
|
||||
/* Read-write locks. */
|
||||
typedef struct _pthread_rwlock_t
|
||||
{
|
||||
struct _pthread_fastlock __rw_lock; /* Lock to guarantee mutual exclusion */
|
||||
pthread_spinlock_t __rw_lock; /* Lock to guarantee mutual exclusion */
|
||||
int __rw_readers; /* Number of readers */
|
||||
_pthread_descr __rw_writer; /* Identity of writer, or NULL if none */
|
||||
_pthread_descr __rw_read_waiting; /* Threads waiting for reading */
|
||||
|
@ -405,6 +405,27 @@ extern int pthread_rwlockattr_setkind_np (pthread_rwlockattr_t *__attr,
|
||||
int __pref) __THROW;
|
||||
#endif
|
||||
|
||||
#ifdef __USE_XOPEN2K
|
||||
/* The IEEE Std. 10003.1j-2000 introduces functions to implement
|
||||
spinlocks. */
|
||||
|
||||
/* Initialize the spinlock LOCK. If PSHARED is nonzero the spinlock can
|
||||
be shared between different processes. */
|
||||
extern int pthread_spin_init (pthread_spinlock_t *__lock, int __pshared);
|
||||
|
||||
/* Destroy the spinlock LOCK. */
|
||||
extern int pthread_spin_destroy (pthread_spinlock_t *__lock);
|
||||
|
||||
/* Wait until spinlock LOCK is retrieved. */
|
||||
extern int pthread_spin_lock (pthread_spinlock_t *__lock);
|
||||
|
||||
/* Try to lock spinlock LOCK. */
|
||||
extern int pthread_spin_trylock (pthread_spinlock_t *__lock);
|
||||
|
||||
/* Release spinlock LOCK. */
|
||||
extern int pthread_spin_unlock (pthread_spinlock_t *__lock);
|
||||
#endif
|
||||
|
||||
|
||||
/* Functions for handling thread-specific data. */
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user