Remove unneeded setting of errno after malloc failure
[glibc.git] / hurd / hurdlock.c
blob382e9f2aa9b114c673708742621cae24d131552e
1 /* Hurd helpers for lowlevellocks.
2 Copyright (C) 1999-2018 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include "hurdlock.h"
20 #include <hurd.h>
21 #include <hurd/hurd.h>
22 #include <time.h>
23 #include <errno.h>
24 #include <unistd.h>
26 /* Convert an absolute timeout in nanoseconds to a relative
27 timeout in milliseconds. */
28 static inline int __attribute__ ((gnu_inline))
29 compute_reltime (const struct timespec *abstime, clockid_t clk)
31 struct timespec ts;
32 __clock_gettime (clk, &ts);
34 ts.tv_sec = abstime->tv_sec - ts.tv_sec;
35 ts.tv_nsec = abstime->tv_nsec - ts.tv_nsec;
37 if (ts.tv_nsec < 0)
39 --ts.tv_sec;
40 ts.tv_nsec += 1000000000;
43 return ts.tv_sec < 0 ? -1 : (int)(ts.tv_sec * 1000 + ts.tv_nsec / 1000000);
46 int
47 __lll_abstimed_wait (void *ptr, int val,
48 const struct timespec *tsp, int flags, int clk)
50 int mlsec = compute_reltime (tsp, clk);
51 return mlsec < 0 ? KERN_TIMEDOUT : lll_timed_wait (ptr, val, mlsec, flags);
54 int
55 __lll_abstimed_xwait (void *ptr, int lo, int hi,
56 const struct timespec *tsp, int flags, int clk)
58 int mlsec = compute_reltime (tsp, clk);
59 return mlsec < 0 ? KERN_TIMEDOUT : lll_timed_xwait (ptr, lo, hi, mlsec,
60 flags);
63 int
64 __lll_abstimed_lock (void *ptr,
65 const struct timespec *tsp, int flags, int clk)
67 if (lll_trylock (ptr) == 0)
68 return 0;
70 while (1)
72 if (atomic_exchange_acq ((int *)ptr, 2) == 0)
73 return 0;
74 else if (tsp->tv_nsec < 0 || tsp->tv_nsec >= 1000000000)
75 return EINVAL;
77 int mlsec = compute_reltime (tsp, clk);
78 if (mlsec < 0 || lll_timed_wait (ptr, 2, mlsec, flags) == KERN_TIMEDOUT)
79 return ETIMEDOUT;
83 /* Robust locks. */
85 /* Test if a given process id is still valid. */
86 static inline int
87 valid_pid (int pid)
89 task_t task = __pid2task (pid);
90 if (task == MACH_PORT_NULL)
91 return 0;
93 __mach_port_deallocate (__mach_task_self (), task);
94 return 1;
97 /* Robust locks have currently no support from the kernel; they
98 are simply implemented with periodic polling. When sleeping, the
99 maximum blocking time is determined by this constant. */
100 #define MAX_WAIT_TIME 1500
103 __lll_robust_lock (void *ptr, int flags)
105 int *iptr = (int *)ptr;
106 int id = __getpid ();
107 int wait_time = 25;
108 unsigned int val;
110 /* Try to set the lock word to our PID if it's clear. Otherwise,
111 mark it as having waiters. */
112 while (1)
114 val = *iptr;
115 if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
116 return 0;
117 else if (atomic_compare_and_exchange_bool_acq (iptr,
118 val | LLL_WAITERS, val) == 0)
119 break;
122 for (id |= LLL_WAITERS ; ; )
124 val = *iptr;
125 if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
126 return 0;
127 else if (val && !valid_pid (val & LLL_OWNER_MASK))
129 if (atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0)
130 return EOWNERDEAD;
132 else
134 lll_timed_wait (iptr, val, wait_time, flags);
135 if (wait_time < MAX_WAIT_TIME)
136 wait_time <<= 1;
142 __lll_robust_abstimed_lock (void *ptr,
143 const struct timespec *tsp, int flags, int clk)
145 int *iptr = (int *)ptr;
146 int id = __getpid ();
147 int wait_time = 25;
148 unsigned int val;
150 while (1)
152 val = *iptr;
153 if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
154 return 0;
155 else if (atomic_compare_and_exchange_bool_acq (iptr,
156 val | LLL_WAITERS, val) == 0)
157 break;
160 for (id |= LLL_WAITERS ; ; )
162 val = *iptr;
163 if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
164 return 0;
165 else if (val && !valid_pid (val & LLL_OWNER_MASK))
167 if (atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0)
168 return EOWNERDEAD;
170 else
172 int mlsec = compute_reltime (tsp, clk);
173 if (mlsec < 0)
174 return ETIMEDOUT;
175 else if (mlsec > wait_time)
176 mlsec = wait_time;
178 int res = lll_timed_wait (iptr, val, mlsec, flags);
179 if (res == KERN_TIMEDOUT)
180 return ETIMEDOUT;
181 else if (wait_time < MAX_WAIT_TIME)
182 wait_time <<= 1;
188 __lll_robust_trylock (void *ptr)
190 int *iptr = (int *)ptr;
191 int id = __getpid ();
192 unsigned int val = *iptr;
194 if (!val)
196 if (atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
197 return 0;
199 else if (!valid_pid (val & LLL_OWNER_MASK)
200 && atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0)
201 return EOWNERDEAD;
203 return EBUSY;
206 void
207 __lll_robust_unlock (void *ptr, int flags)
209 unsigned int val = atomic_load_relaxed ((unsigned int *)ptr);
210 while (1)
212 if (val & LLL_WAITERS)
214 lll_set_wake (ptr, 0, flags);
215 break;
217 else if (atomic_compare_exchange_weak_release ((unsigned int *)ptr, &val, 0))
218 break;