1 /* Hurd helpers for lowlevellocks.
2 Copyright (C) 1999-2022 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
21 #include <hurd/hurd.h>
26 /* Convert an absolute timeout in nanoseconds to a relative
27 timeout in milliseconds. */
28 static inline int __attribute__ ((gnu_inline
))
29 compute_reltime (const struct timespec
*abstime
, clockid_t clk
)
32 __clock_gettime (clk
, &ts
);
34 ts
.tv_sec
= abstime
->tv_sec
- ts
.tv_sec
;
35 ts
.tv_nsec
= abstime
->tv_nsec
- ts
.tv_nsec
;
40 ts
.tv_nsec
+= 1000000000;
43 return ts
.tv_sec
< 0 ? -1 : (int)(ts
.tv_sec
* 1000 + ts
.tv_nsec
/ 1000000);
47 __lll_abstimed_wait (void *ptr
, int val
,
48 const struct timespec
*tsp
, int flags
, int clk
)
50 if (clk
!= CLOCK_REALTIME
)
53 int mlsec
= compute_reltime (tsp
, clk
);
54 return mlsec
< 0 ? KERN_TIMEDOUT
: __lll_timed_wait (ptr
, val
, mlsec
, flags
);
58 __lll_abstimed_wait_intr (void *ptr
, int val
,
59 const struct timespec
*tsp
, int flags
, int clk
)
61 if (clk
!= CLOCK_REALTIME
)
64 int mlsec
= compute_reltime (tsp
, clk
);
65 return mlsec
< 0 ? KERN_TIMEDOUT
: __lll_timed_wait_intr (ptr
, val
, mlsec
, flags
);
69 __lll_abstimed_xwait (void *ptr
, int lo
, int hi
,
70 const struct timespec
*tsp
, int flags
, int clk
)
72 if (clk
!= CLOCK_REALTIME
)
75 int mlsec
= compute_reltime (tsp
, clk
);
76 return mlsec
< 0 ? KERN_TIMEDOUT
: __lll_timed_xwait (ptr
, lo
, hi
, mlsec
,
81 __lll_abstimed_lock (void *ptr
,
82 const struct timespec
*tsp
, int flags
, int clk
)
84 if (clk
!= CLOCK_REALTIME
)
87 if (__lll_trylock (ptr
) == 0)
92 if (atomic_exchange_acq ((int *)ptr
, 2) == 0)
94 else if (! valid_nanoseconds (tsp
->tv_nsec
))
97 int mlsec
= compute_reltime (tsp
, clk
);
98 if (mlsec
< 0 || __lll_timed_wait (ptr
, 2, mlsec
, flags
) == KERN_TIMEDOUT
)
105 /* Test if a given process id is still valid. */
109 task_t task
= __pid2task (pid
);
110 if (task
== MACH_PORT_NULL
)
113 __mach_port_deallocate (__mach_task_self (), task
);
117 /* Robust locks have currently no support from the kernel; they
118 are simply implemented with periodic polling. When sleeping, the
119 maximum blocking time is determined by this constant. */
120 #define MAX_WAIT_TIME 1500
123 __lll_robust_lock (void *ptr
, int flags
)
125 int *iptr
= (int *)ptr
;
126 int id
= __getpid ();
130 /* Try to set the lock word to our PID if it's clear. Otherwise,
131 mark it as having waiters. */
135 if (!val
&& atomic_compare_and_exchange_bool_acq (iptr
, id
, 0) == 0)
137 else if (atomic_compare_and_exchange_bool_acq (iptr
,
138 val
| LLL_WAITERS
, val
) == 0)
142 for (id
|= LLL_WAITERS
; ; )
145 if (!val
&& atomic_compare_and_exchange_bool_acq (iptr
, id
, 0) == 0)
147 else if (val
&& !valid_pid (val
& LLL_OWNER_MASK
))
149 if (atomic_compare_and_exchange_bool_acq (iptr
, id
, val
) == 0)
154 __lll_timed_wait (iptr
, val
, wait_time
, flags
);
155 if (wait_time
< MAX_WAIT_TIME
)
162 __lll_robust_abstimed_lock (void *ptr
,
163 const struct timespec
*tsp
, int flags
, int clk
)
165 int *iptr
= (int *)ptr
;
166 int id
= __getpid ();
170 if (clk
!= CLOCK_REALTIME
)
176 if (!val
&& atomic_compare_and_exchange_bool_acq (iptr
, id
, 0) == 0)
178 else if (atomic_compare_and_exchange_bool_acq (iptr
,
179 val
| LLL_WAITERS
, val
) == 0)
183 for (id
|= LLL_WAITERS
; ; )
186 if (!val
&& atomic_compare_and_exchange_bool_acq (iptr
, id
, 0) == 0)
188 else if (val
&& !valid_pid (val
& LLL_OWNER_MASK
))
190 if (atomic_compare_and_exchange_bool_acq (iptr
, id
, val
) == 0)
195 int mlsec
= compute_reltime (tsp
, clk
);
198 else if (mlsec
> wait_time
)
201 int res
= __lll_timed_wait (iptr
, val
, mlsec
, flags
);
202 if (res
== KERN_TIMEDOUT
)
204 else if (wait_time
< MAX_WAIT_TIME
)
211 __lll_robust_trylock (void *ptr
)
213 int *iptr
= (int *)ptr
;
214 int id
= __getpid ();
215 unsigned int val
= *iptr
;
219 if (atomic_compare_and_exchange_bool_acq (iptr
, id
, 0) == 0)
222 else if (!valid_pid (val
& LLL_OWNER_MASK
)
223 && atomic_compare_and_exchange_bool_acq (iptr
, id
, val
) == 0)
230 __lll_robust_unlock (void *ptr
, int flags
)
232 unsigned int val
= atomic_load_relaxed ((unsigned int *)ptr
);
235 if (val
& LLL_WAITERS
)
237 __lll_set_wake (ptr
, 0, flags
);
240 else if (atomic_compare_exchange_weak_release ((unsigned int *)ptr
, &val
, 0))