Add missing start-of-file descriptive comment.
[glibc.git] / nptl / pthread_mutex_init.c
blobd8fe4737289c0bd705b46ca19d09aee45b898128
1 /* Copyright (C) 2002-2018 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <stdbool.h>
22 #include <string.h>
23 #include <kernel-features.h>
24 #include "pthreadP.h"
25 #include <atomic.h>
26 #include <pthread-offsets.h>
28 #include <stap-probe.h>
30 static const struct pthread_mutexattr default_mutexattr =
32 /* Default is a normal mutex, not shared between processes. */
33 .mutexkind = PTHREAD_MUTEX_NORMAL
37 static bool
38 prio_inherit_missing (void)
40 #ifdef __NR_futex
41 static int tpi_supported;
42 if (__glibc_unlikely (tpi_supported == 0))
44 int lock = 0;
45 INTERNAL_SYSCALL_DECL (err);
46 int ret = INTERNAL_SYSCALL (futex, err, 4, &lock, FUTEX_UNLOCK_PI, 0, 0);
47 assert (INTERNAL_SYSCALL_ERROR_P (ret, err));
48 tpi_supported = INTERNAL_SYSCALL_ERRNO (ret, err) == ENOSYS ? -1 : 1;
50 return __glibc_unlikely (tpi_supported < 0);
51 #endif
52 return true;
55 int
56 __pthread_mutex_init (pthread_mutex_t *mutex,
57 const pthread_mutexattr_t *mutexattr)
59 const struct pthread_mutexattr *imutexattr;
61 ASSERT_TYPE_SIZE (pthread_mutex_t, __SIZEOF_PTHREAD_MUTEX_T);
63 ASSERT_PTHREAD_INTERNAL_OFFSET (pthread_mutex_t, __data.__nusers,
64 __PTHREAD_MUTEX_NUSERS_OFFSET);
65 ASSERT_PTHREAD_INTERNAL_OFFSET (pthread_mutex_t, __data.__kind,
66 __PTHREAD_MUTEX_KIND_OFFSET);
67 ASSERT_PTHREAD_INTERNAL_OFFSET (pthread_mutex_t, __data.__spins,
68 __PTHREAD_MUTEX_SPINS_OFFSET);
69 #if __PTHREAD_MUTEX_LOCK_ELISION
70 ASSERT_PTHREAD_INTERNAL_OFFSET (pthread_mutex_t, __data.__elision,
71 __PTHREAD_MUTEX_ELISION_OFFSET);
72 #endif
73 ASSERT_PTHREAD_INTERNAL_OFFSET (pthread_mutex_t, __data.__list,
74 __PTHREAD_MUTEX_LIST_OFFSET);
76 imutexattr = ((const struct pthread_mutexattr *) mutexattr
77 ?: &default_mutexattr);
79 /* Sanity checks. */
80 switch (__builtin_expect (imutexattr->mutexkind
81 & PTHREAD_MUTEXATTR_PROTOCOL_MASK,
82 PTHREAD_PRIO_NONE
83 << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT))
85 case PTHREAD_PRIO_NONE << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
86 break;
88 case PTHREAD_PRIO_INHERIT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
89 if (__glibc_unlikely (prio_inherit_missing ()))
90 return ENOTSUP;
91 break;
93 default:
94 /* XXX: For now we don't support robust priority protected mutexes. */
95 if (imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST)
96 return ENOTSUP;
97 break;
100 /* Clear the whole variable. */
101 memset (mutex, '\0', __SIZEOF_PTHREAD_MUTEX_T);
103 /* Copy the values from the attribute. */
104 mutex->__data.__kind = imutexattr->mutexkind & ~PTHREAD_MUTEXATTR_FLAG_BITS;
106 if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) != 0)
108 #ifndef __ASSUME_SET_ROBUST_LIST
109 if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_PSHARED) != 0
110 && __set_robust_list_avail < 0)
111 return ENOTSUP;
112 #endif
114 mutex->__data.__kind |= PTHREAD_MUTEX_ROBUST_NORMAL_NP;
117 switch (imutexattr->mutexkind & PTHREAD_MUTEXATTR_PROTOCOL_MASK)
119 case PTHREAD_PRIO_INHERIT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
120 mutex->__data.__kind |= PTHREAD_MUTEX_PRIO_INHERIT_NP;
121 break;
123 case PTHREAD_PRIO_PROTECT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
124 mutex->__data.__kind |= PTHREAD_MUTEX_PRIO_PROTECT_NP;
126 int ceiling = (imutexattr->mutexkind
127 & PTHREAD_MUTEXATTR_PRIO_CEILING_MASK)
128 >> PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT;
129 if (! ceiling)
131 /* See __init_sched_fifo_prio. */
132 if (atomic_load_relaxed (&__sched_fifo_min_prio) == -1)
133 __init_sched_fifo_prio ();
134 if (ceiling < atomic_load_relaxed (&__sched_fifo_min_prio))
135 ceiling = atomic_load_relaxed (&__sched_fifo_min_prio);
137 mutex->__data.__lock = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
138 break;
140 default:
141 break;
144 /* The kernel when waking robust mutexes on exit never uses
145 FUTEX_PRIVATE_FLAG FUTEX_WAKE. */
146 if ((imutexattr->mutexkind & (PTHREAD_MUTEXATTR_FLAG_PSHARED
147 | PTHREAD_MUTEXATTR_FLAG_ROBUST)) != 0)
148 mutex->__data.__kind |= PTHREAD_MUTEX_PSHARED_BIT;
150 /* Default values: mutex not used yet. */
151 // mutex->__count = 0; already done by memset
152 // mutex->__owner = 0; already done by memset
153 // mutex->__nusers = 0; already done by memset
154 // mutex->__spins = 0; already done by memset
155 // mutex->__next = NULL; already done by memset
157 LIBC_PROBE (mutex_init, 1, mutex);
159 return 0;
161 weak_alias (__pthread_mutex_init, pthread_mutex_init)
162 hidden_def (__pthread_mutex_init)