glibc/nptl/pthread_mutex_init.c
Adhemerval Zanella 6b1472eb2e Refactor PI mutexes internal definitions
This patch adds the generic futex_lock_pi and futex_unlock_pi to wrap
around the syscall machinery required to issue the syscall calls. It
simplifies a bit the futex code required to implement PI mutexes.

No function changes, checked on x86_64-linux-gnu.

Reviewed-by: Carlos O'Donell <carlos@redhat.com>
2019-10-31 11:09:10 -03:00

162 lines
5.2 KiB
C

/* Copyright (C) 2002-2019 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <assert.h>
#include <errno.h>
#include <stdbool.h>
#include <string.h>
#include <kernel-features.h>
#include "pthreadP.h"
#include <atomic.h>
#include <pthread-offsets.h>
#include <futex-internal.h>
#include <stap-probe.h>
static const struct pthread_mutexattr default_mutexattr =
{
/* Default is a normal mutex, not shared between processes. */
.mutexkind = PTHREAD_MUTEX_NORMAL
};
static bool
prio_inherit_missing (void)
{
static int tpi_supported;
if (__glibc_unlikely (atomic_load_relaxed (&tpi_supported) == 0))
{
int e = futex_unlock_pi (&(unsigned int){0}, 0);
atomic_store_relaxed (&tpi_supported, e == ENOSYS ? -1 : 1);
}
return __glibc_unlikely (tpi_supported < 0);
}
int
__pthread_mutex_init (pthread_mutex_t *mutex,
const pthread_mutexattr_t *mutexattr)
{
const struct pthread_mutexattr *imutexattr;
ASSERT_TYPE_SIZE (pthread_mutex_t, __SIZEOF_PTHREAD_MUTEX_T);
ASSERT_PTHREAD_INTERNAL_OFFSET (pthread_mutex_t, __data.__nusers,
__PTHREAD_MUTEX_NUSERS_OFFSET);
ASSERT_PTHREAD_INTERNAL_OFFSET (pthread_mutex_t, __data.__kind,
__PTHREAD_MUTEX_KIND_OFFSET);
ASSERT_PTHREAD_INTERNAL_OFFSET (pthread_mutex_t, __data.__spins,
__PTHREAD_MUTEX_SPINS_OFFSET);
#if __PTHREAD_MUTEX_LOCK_ELISION
ASSERT_PTHREAD_INTERNAL_OFFSET (pthread_mutex_t, __data.__elision,
__PTHREAD_MUTEX_ELISION_OFFSET);
#endif
ASSERT_PTHREAD_INTERNAL_OFFSET (pthread_mutex_t, __data.__list,
__PTHREAD_MUTEX_LIST_OFFSET);
imutexattr = ((const struct pthread_mutexattr *) mutexattr
?: &default_mutexattr);
/* Sanity checks. */
switch (__builtin_expect (imutexattr->mutexkind
& PTHREAD_MUTEXATTR_PROTOCOL_MASK,
PTHREAD_PRIO_NONE
<< PTHREAD_MUTEXATTR_PROTOCOL_SHIFT))
{
case PTHREAD_PRIO_NONE << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
break;
case PTHREAD_PRIO_INHERIT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
if (__glibc_unlikely (prio_inherit_missing ()))
return ENOTSUP;
break;
default:
/* XXX: For now we don't support robust priority protected mutexes. */
if (imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST)
return ENOTSUP;
break;
}
/* Clear the whole variable. */
memset (mutex, '\0', __SIZEOF_PTHREAD_MUTEX_T);
/* Copy the values from the attribute. */
int mutex_kind = imutexattr->mutexkind & ~PTHREAD_MUTEXATTR_FLAG_BITS;
if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) != 0)
{
#ifndef __ASSUME_SET_ROBUST_LIST
if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_PSHARED) != 0
&& __set_robust_list_avail < 0)
return ENOTSUP;
#endif
mutex_kind |= PTHREAD_MUTEX_ROBUST_NORMAL_NP;
}
switch (imutexattr->mutexkind & PTHREAD_MUTEXATTR_PROTOCOL_MASK)
{
case PTHREAD_PRIO_INHERIT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
mutex_kind |= PTHREAD_MUTEX_PRIO_INHERIT_NP;
break;
case PTHREAD_PRIO_PROTECT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
mutex_kind |= PTHREAD_MUTEX_PRIO_PROTECT_NP;
int ceiling = (imutexattr->mutexkind
& PTHREAD_MUTEXATTR_PRIO_CEILING_MASK)
>> PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT;
if (! ceiling)
{
/* See __init_sched_fifo_prio. */
if (atomic_load_relaxed (&__sched_fifo_min_prio) == -1)
__init_sched_fifo_prio ();
if (ceiling < atomic_load_relaxed (&__sched_fifo_min_prio))
ceiling = atomic_load_relaxed (&__sched_fifo_min_prio);
}
mutex->__data.__lock = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
break;
default:
break;
}
/* The kernel when waking robust mutexes on exit never uses
FUTEX_PRIVATE_FLAG FUTEX_WAKE. */
if ((imutexattr->mutexkind & (PTHREAD_MUTEXATTR_FLAG_PSHARED
| PTHREAD_MUTEXATTR_FLAG_ROBUST)) != 0)
mutex_kind |= PTHREAD_MUTEX_PSHARED_BIT;
/* See concurrency notes regarding __kind in struct __pthread_mutex_s
in sysdeps/nptl/bits/thread-shared-types.h. */
atomic_store_relaxed (&(mutex->__data.__kind), mutex_kind);
/* Default values: mutex not used yet. */
// mutex->__count = 0; already done by memset
// mutex->__owner = 0; already done by memset
// mutex->__nusers = 0; already done by memset
// mutex->__spins = 0; already done by memset
// mutex->__next = NULL; already done by memset
LIBC_PROBE (mutex_init, 1, mutex);
return 0;
}
weak_alias (__pthread_mutex_init, pthread_mutex_init)
hidden_def (__pthread_mutex_init)