mirror of
git://sourceware.org/git/glibc.git
synced 2024-12-09 04:11:27 +08:00
8c86ba4463
If any RPC fails, the reply port will already be deallocated. __pthread_thread_terminate thus has to defer taking its name until the very last __thread_terminate_release which doesn't reply a message. But then we have to read from the pthread structure. This introduces __pthread_dealloc_finish() which does the recording of the thread termination, so the slot can be reused really only just before the __thread_terminate_release call. Only the real thread can set it, so let's decouple this from the pthread_state by just removing the PTHREAD_TERMINATED state and add a terminated field.
233 lines
6.5 KiB
C
233 lines
6.5 KiB
C
/* Allocate a new thread structure.
|
|
Copyright (C) 2000-2022 Free Software Foundation, Inc.
|
|
This file is part of the GNU C Library.
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU Lesser General Public
|
|
License as published by the Free Software Foundation; either
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
Lesser General Public License for more details.
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
License along with the GNU C Library; if not, see
|
|
<https://www.gnu.org/licenses/>. */
|
|
|
|
#include <assert.h>
|
|
#include <errno.h>
|
|
#include <pthread.h>
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
|
|
#include <pt-internal.h>
|
|
|
|
/* This braindamage is necessary because the standard says that some
|
|
of the threads functions "shall fail" if "No thread could be found
|
|
corresponding to that specified by the given thread ID." */
|
|
|
|
/* The size of the thread ID lookup table. */
|
|
int __pthread_max_threads;
|
|
|
|
/* List of thread structures corresponding to free thread IDs. */
|
|
struct __pthread *__pthread_free_threads;
|
|
pthread_mutex_t __pthread_free_threads_lock;
|
|
|
|
static inline error_t
|
|
initialize_pthread (struct __pthread *new)
|
|
{
|
|
error_t err;
|
|
|
|
err = __pthread_init_specific (new);
|
|
if (err)
|
|
return err;
|
|
|
|
new->nr_refs = 1;
|
|
new->cancel_lock = (pthread_mutex_t) PTHREAD_MUTEX_INITIALIZER;
|
|
new->cancel_hook = NULL;
|
|
new->cancel_hook_arg = NULL;
|
|
new->cancel_state = PTHREAD_CANCEL_ENABLE;
|
|
new->cancel_type = PTHREAD_CANCEL_DEFERRED;
|
|
new->cancel_pending = 0;
|
|
|
|
new->state_lock = (pthread_mutex_t) PTHREAD_MUTEX_INITIALIZER;
|
|
new->state_cond = (pthread_cond_t) PTHREAD_COND_INITIALIZER;
|
|
new->terminated = FALSE;
|
|
|
|
memset (&new->res_state, '\0', sizeof (new->res_state));
|
|
|
|
new->tcb = NULL;
|
|
|
|
new->next = 0;
|
|
new->prevp = 0;
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
/* Allocate a new thread structure and its pthread thread ID (but not
|
|
a kernel thread). */
|
|
int
|
|
__pthread_alloc (struct __pthread **pthread)
|
|
{
|
|
error_t err;
|
|
|
|
struct __pthread *new;
|
|
struct __pthread **threads;
|
|
struct __pthread **old_threads;
|
|
int max_threads;
|
|
int new_max_threads;
|
|
|
|
__pthread_mutex_lock (&__pthread_free_threads_lock);
|
|
for (new = __pthread_free_threads; new; new = new->next)
|
|
{
|
|
/* There is no need to take NEW->STATE_LOCK: if NEW is on this
|
|
list, then it is protected by __PTHREAD_FREE_THREADS_LOCK
|
|
except in __pthread_dealloc_finish where after it is added to the
|
|
list (with the lock held), it drops the lock and then sets
|
|
NEW->STATE and immediately stops using NEW. */
|
|
if (new->terminated)
|
|
{
|
|
__pthread_dequeue (new);
|
|
break;
|
|
}
|
|
}
|
|
__pthread_mutex_unlock (&__pthread_free_threads_lock);
|
|
|
|
if (new)
|
|
{
|
|
if (new->tcb)
|
|
{
|
|
/* Drop old values */
|
|
_dl_deallocate_tls (new->tcb, 1);
|
|
}
|
|
|
|
err = initialize_pthread (new);
|
|
if (!err)
|
|
*pthread = new;
|
|
return err;
|
|
}
|
|
|
|
/* Allocate a new thread structure. */
|
|
new = malloc (sizeof (struct __pthread));
|
|
if (new == NULL)
|
|
return ENOMEM;
|
|
|
|
err = initialize_pthread (new);
|
|
if (err)
|
|
{
|
|
free (new);
|
|
return err;
|
|
}
|
|
|
|
retry:
|
|
__libc_rwlock_wrlock (GL (dl_pthread_threads_lock));
|
|
|
|
if (GL (dl_pthread_num_threads) < __pthread_max_threads)
|
|
{
|
|
/* We have a free slot. Use the slot number plus one as the
|
|
thread ID for the new thread. */
|
|
new->thread = 1 + GL (dl_pthread_num_threads)++;
|
|
GL (dl_pthread_threads)[new->thread - 1] = NULL;
|
|
|
|
__libc_rwlock_unlock (GL (dl_pthread_threads_lock));
|
|
|
|
*pthread = new;
|
|
return 0;
|
|
}
|
|
#ifdef PTHREAD_THREADS_MAX
|
|
else if (GL (dl_pthread_num_threads) >= PTHREAD_THREADS_MAX)
|
|
{
|
|
/* We have reached the limit on the number of threads per process. */
|
|
__libc_rwlock_unlock (GL (dl_pthread_threads_lock));
|
|
|
|
free (new);
|
|
return EAGAIN;
|
|
}
|
|
#endif
|
|
|
|
/* We are going to enlarge the threads table. Save its current
|
|
size. We're going to release the lock before doing the necessary
|
|
memory allocation, since that's a potentially blocking operation. */
|
|
max_threads = __pthread_max_threads;
|
|
|
|
__libc_rwlock_unlock (GL (dl_pthread_threads_lock));
|
|
|
|
/* Allocate a new lookup table that's twice as large. */
|
|
new_max_threads
|
|
= max_threads > 0 ? max_threads * 2 : _POSIX_THREAD_THREADS_MAX;
|
|
threads = malloc (new_max_threads * sizeof (struct __pthread *));
|
|
if (threads == NULL)
|
|
{
|
|
free (new);
|
|
return ENOMEM;
|
|
}
|
|
|
|
__libc_rwlock_wrlock (GL (dl_pthread_threads_lock));
|
|
|
|
/* Check if nobody else has already enlarged the table. */
|
|
if (max_threads != __pthread_max_threads)
|
|
{
|
|
/* Yep, they did. */
|
|
__libc_rwlock_unlock (GL (dl_pthread_threads_lock));
|
|
|
|
/* Free the newly allocated table and try again to allocate a slot. */
|
|
free (threads);
|
|
goto retry;
|
|
}
|
|
|
|
/* Copy over the contents of the old table. */
|
|
memcpy (threads, GL (dl_pthread_threads),
|
|
__pthread_max_threads * sizeof (struct __pthread *));
|
|
|
|
/* Save the location of the old table. We want to deallocate its
|
|
storage after we released the lock. */
|
|
old_threads = GL (dl_pthread_threads);
|
|
|
|
/* Replace the table with the new one. */
|
|
__pthread_max_threads = new_max_threads;
|
|
GL (dl_pthread_threads) = threads;
|
|
|
|
/* And allocate ourselves one of the newly created slots. */
|
|
new->thread = 1 + GL (dl_pthread_num_threads)++;
|
|
GL (dl_pthread_threads)[new->thread - 1] = NULL;
|
|
|
|
__libc_rwlock_unlock (GL (dl_pthread_threads_lock));
|
|
|
|
free (old_threads);
|
|
|
|
*pthread = new;
|
|
return 0;
|
|
}
|
|
|
|
void
|
|
attribute_hidden
|
|
__pthread_init_static_tls (struct link_map *map)
|
|
{
|
|
int i;
|
|
|
|
__libc_rwlock_wrlock (GL (dl_pthread_threads_lock));
|
|
for (i = 0; i < GL (dl_pthread_num_threads); ++i)
|
|
{
|
|
struct __pthread *t = GL (dl_pthread_threads)[i];
|
|
|
|
if (t == NULL)
|
|
continue;
|
|
|
|
# if TLS_TCB_AT_TP
|
|
void *dest = (char *) t->tcb - map->l_tls_offset;
|
|
# elif TLS_DTV_AT_TP
|
|
void *dest = (char *) t->tcb + map->l_tls_offset + TLS_PRE_TCB_SIZE;
|
|
# else
|
|
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
|
|
# endif
|
|
|
|
/* Initialize the memory. */
|
|
memset (__mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size),
|
|
'\0', map->l_tls_blocksize - map->l_tls_initimage_size);
|
|
}
|
|
__libc_rwlock_unlock (GL (dl_pthread_threads_lock));
|
|
}
|