sparc: Remove 64 bit check on sparc32 wordsize (BZ 27574)
[glibc.git] / hurd / hurdlock.c
blob208867659e0621c24a523ffe916638be7cd04c49
1 /* Hurd helpers for lowlevellocks.
2 Copyright (C) 1999-2024 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
19 #include "hurdlock.h"
20 #include <hurd.h>
21 #include <hurd/hurd.h>
22 #include <time.h>
23 #include <errno.h>
24 #include <unistd.h>
26 /* Convert an absolute timeout in nanoseconds to a relative
27 timeout in milliseconds. */
28 static inline int __attribute__ ((gnu_inline))
29 compute_reltime (const struct timespec *abstime, clockid_t clk)
31 struct timespec ts;
32 __clock_gettime (clk, &ts);
34 ts.tv_sec = abstime->tv_sec - ts.tv_sec;
35 ts.tv_nsec = abstime->tv_nsec - ts.tv_nsec;
37 if (ts.tv_nsec < 0)
39 --ts.tv_sec;
40 ts.tv_nsec += 1000000000;
43 return ts.tv_sec < 0 ? -1 : (int)(ts.tv_sec * 1000 + ts.tv_nsec / 1000000);
46 int
47 __lll_abstimed_wait (void *ptr, int val,
48 const struct timespec *tsp, int flags, int clk)
50 if (clk != CLOCK_REALTIME)
51 return EINVAL;
53 int mlsec = compute_reltime (tsp, clk);
54 return mlsec < 0 ? KERN_TIMEDOUT : __lll_timed_wait (ptr, val, mlsec, flags);
57 int
58 __lll_abstimed_wait_intr (void *ptr, int val,
59 const struct timespec *tsp, int flags, int clk)
61 if (clk != CLOCK_REALTIME)
62 return EINVAL;
64 int mlsec = compute_reltime (tsp, clk);
65 return mlsec < 0 ? KERN_TIMEDOUT : __lll_timed_wait_intr (ptr, val, mlsec, flags);
68 int
69 __lll_abstimed_xwait (void *ptr, int lo, int hi,
70 const struct timespec *tsp, int flags, int clk)
72 if (clk != CLOCK_REALTIME)
73 return EINVAL;
75 int mlsec = compute_reltime (tsp, clk);
76 return mlsec < 0 ? KERN_TIMEDOUT : __lll_timed_xwait (ptr, lo, hi, mlsec,
77 flags);
80 int
81 __lll_abstimed_lock (void *ptr,
82 const struct timespec *tsp, int flags, int clk)
84 if (clk != CLOCK_REALTIME)
85 return EINVAL;
87 if (__lll_trylock (ptr) == 0)
88 return 0;
90 while (1)
92 if (atomic_exchange_acquire ((int *)ptr, 2) == 0)
93 return 0;
94 else if (! valid_nanoseconds (tsp->tv_nsec))
95 return EINVAL;
97 int mlsec = compute_reltime (tsp, clk);
98 if (mlsec < 0 || __lll_timed_wait (ptr, 2, mlsec, flags) == KERN_TIMEDOUT)
99 return ETIMEDOUT;
103 /* Robust locks. */
105 /* Test if a given process id is still valid. */
106 static inline int
107 valid_pid (int pid)
109 task_t task = __pid2task (pid);
110 if (task == MACH_PORT_NULL)
111 return 0;
113 __mach_port_deallocate (__mach_task_self (), task);
114 return 1;
117 /* Robust locks have currently no support from the kernel; they
118 are simply implemented with periodic polling. When sleeping, the
119 maximum blocking time is determined by this constant. */
120 #define MAX_WAIT_TIME 1500
123 __lll_robust_lock (void *ptr, int flags)
125 int *iptr = (int *)ptr;
126 int id = __getpid ();
127 int wait_time = 25;
128 unsigned int val;
130 /* Try to set the lock word to our PID if it's clear. Otherwise,
131 mark it as having waiters. */
132 while (1)
134 val = *iptr;
135 if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
136 return 0;
137 else if (atomic_compare_and_exchange_bool_acq (iptr,
138 val | LLL_WAITERS, val) == 0)
139 break;
142 for (id |= LLL_WAITERS ; ; )
144 val = *iptr;
145 if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
146 return 0;
147 else if (val && !valid_pid (val & LLL_OWNER_MASK))
149 if (atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0)
150 return EOWNERDEAD;
152 else
154 __lll_timed_wait (iptr, val, wait_time, flags);
155 if (wait_time < MAX_WAIT_TIME)
156 wait_time <<= 1;
162 __lll_robust_abstimed_lock (void *ptr,
163 const struct timespec *tsp, int flags, int clk)
165 int *iptr = (int *)ptr;
166 int id = __getpid ();
167 int wait_time = 25;
168 unsigned int val;
170 if (clk != CLOCK_REALTIME)
171 return EINVAL;
173 while (1)
175 val = *iptr;
176 if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
177 return 0;
178 else if (atomic_compare_and_exchange_bool_acq (iptr,
179 val | LLL_WAITERS, val) == 0)
180 break;
183 for (id |= LLL_WAITERS ; ; )
185 val = *iptr;
186 if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
187 return 0;
188 else if (val && !valid_pid (val & LLL_OWNER_MASK))
190 if (atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0)
191 return EOWNERDEAD;
193 else
195 int mlsec = compute_reltime (tsp, clk);
196 if (mlsec < 0)
197 return ETIMEDOUT;
198 else if (mlsec > wait_time)
199 mlsec = wait_time;
201 int res = __lll_timed_wait (iptr, val, mlsec, flags);
202 if (res == KERN_TIMEDOUT)
203 return ETIMEDOUT;
204 else if (wait_time < MAX_WAIT_TIME)
205 wait_time <<= 1;
211 __lll_robust_trylock (void *ptr)
213 int *iptr = (int *)ptr;
214 int id = __getpid ();
215 unsigned int val = *iptr;
217 if (!val)
219 if (atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
220 return 0;
222 else if (!valid_pid (val & LLL_OWNER_MASK)
223 && atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0)
224 return EOWNERDEAD;
226 return EBUSY;
229 void
230 __lll_robust_unlock (void *ptr, int flags)
232 unsigned int val = atomic_load_relaxed ((unsigned int *)ptr);
233 while (1)
235 if (val & LLL_WAITERS)
237 __lll_set_wake (ptr, 0, flags);
238 break;
240 else if (atomic_compare_exchange_weak_release ((unsigned int *)ptr, &val, 0))
241 break;