Have iconv accept redundant escape sequences in IBM900, IBM903, IBM905,
[glibc.git] / nptl / pthread_mutex_init.c
blob71ac7bc7f3df4e0837a5e06e1b0effb3b8d43479
1 /* Copyright (C) 2002-2016 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <stdbool.h>
22 #include <string.h>
23 #include <kernel-features.h>
24 #include "pthreadP.h"
25 #include <atomic.h>
27 #include <stap-probe.h>
29 static const struct pthread_mutexattr default_mutexattr =
31 /* Default is a normal mutex, not shared between processes. */
32 .mutexkind = PTHREAD_MUTEX_NORMAL
36 static bool
37 prio_inherit_missing (void)
39 #ifdef __NR_futex
40 # ifndef __ASSUME_FUTEX_LOCK_PI
41 static int tpi_supported;
42 if (__glibc_unlikely (tpi_supported == 0))
44 int lock = 0;
45 INTERNAL_SYSCALL_DECL (err);
46 int ret = INTERNAL_SYSCALL (futex, err, 4, &lock, FUTEX_UNLOCK_PI, 0, 0);
47 assert (INTERNAL_SYSCALL_ERROR_P (ret, err));
48 tpi_supported = INTERNAL_SYSCALL_ERRNO (ret, err) == ENOSYS ? -1 : 1;
50 return __glibc_unlikely (tpi_supported < 0);
51 # endif
52 return false;
53 #endif
54 return true;
57 int
58 __pthread_mutex_init (pthread_mutex_t *mutex,
59 const pthread_mutexattr_t *mutexattr)
61 const struct pthread_mutexattr *imutexattr;
63 assert (sizeof (pthread_mutex_t) <= __SIZEOF_PTHREAD_MUTEX_T);
65 imutexattr = ((const struct pthread_mutexattr *) mutexattr
66 ?: &default_mutexattr);
68 /* Sanity checks. */
69 switch (__builtin_expect (imutexattr->mutexkind
70 & PTHREAD_MUTEXATTR_PROTOCOL_MASK,
71 PTHREAD_PRIO_NONE
72 << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT))
74 case PTHREAD_PRIO_NONE << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
75 break;
77 case PTHREAD_PRIO_INHERIT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
78 if (__glibc_unlikely (prio_inherit_missing ()))
79 return ENOTSUP;
80 break;
82 default:
83 /* XXX: For now we don't support robust priority protected mutexes. */
84 if (imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST)
85 return ENOTSUP;
86 break;
89 /* Clear the whole variable. */
90 memset (mutex, '\0', __SIZEOF_PTHREAD_MUTEX_T);
92 /* Copy the values from the attribute. */
93 mutex->__data.__kind = imutexattr->mutexkind & ~PTHREAD_MUTEXATTR_FLAG_BITS;
95 if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_ROBUST) != 0)
97 #ifndef __ASSUME_SET_ROBUST_LIST
98 if ((imutexattr->mutexkind & PTHREAD_MUTEXATTR_FLAG_PSHARED) != 0
99 && __set_robust_list_avail < 0)
100 return ENOTSUP;
101 #endif
103 mutex->__data.__kind |= PTHREAD_MUTEX_ROBUST_NORMAL_NP;
106 switch (imutexattr->mutexkind & PTHREAD_MUTEXATTR_PROTOCOL_MASK)
108 case PTHREAD_PRIO_INHERIT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
109 mutex->__data.__kind |= PTHREAD_MUTEX_PRIO_INHERIT_NP;
110 break;
112 case PTHREAD_PRIO_PROTECT << PTHREAD_MUTEXATTR_PROTOCOL_SHIFT:
113 mutex->__data.__kind |= PTHREAD_MUTEX_PRIO_PROTECT_NP;
115 int ceiling = (imutexattr->mutexkind
116 & PTHREAD_MUTEXATTR_PRIO_CEILING_MASK)
117 >> PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT;
118 if (! ceiling)
120 /* See __init_sched_fifo_prio. */
121 if (atomic_load_relaxed (&__sched_fifo_min_prio) == -1)
122 __init_sched_fifo_prio ();
123 if (ceiling < atomic_load_relaxed (&__sched_fifo_min_prio))
124 ceiling = atomic_load_relaxed (&__sched_fifo_min_prio);
126 mutex->__data.__lock = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
127 break;
129 default:
130 break;
133 /* The kernel when waking robust mutexes on exit never uses
134 FUTEX_PRIVATE_FLAG FUTEX_WAKE. */
135 if ((imutexattr->mutexkind & (PTHREAD_MUTEXATTR_FLAG_PSHARED
136 | PTHREAD_MUTEXATTR_FLAG_ROBUST)) != 0)
137 mutex->__data.__kind |= PTHREAD_MUTEX_PSHARED_BIT;
139 /* Default values: mutex not used yet. */
140 // mutex->__count = 0; already done by memset
141 // mutex->__owner = 0; already done by memset
142 // mutex->__nusers = 0; already done by memset
143 // mutex->__spins = 0; already done by memset
144 // mutex->__next = NULL; already done by memset
146 LIBC_PROBE (mutex_init, 1, mutex);
148 return 0;
150 strong_alias (__pthread_mutex_init, pthread_mutex_init)
151 hidden_def (__pthread_mutex_init)