hurd: Fix copyright years
[glibc.git] / hurd / hurdlock.c
blob475fce0118f1b0e514f45bcaf24fe62692916099
1 /* Hurd helpers for lowlevellocks.
2 Copyright (C) 1999-2018 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include "hurdlock.h"
20 #include <hurd.h>
21 #include <hurd/hurd.h>
22 #include <time.h>
23 #include <errno.h>
24 #include <unistd.h>
26 /* Convert an absolute timeout in nanoseconds to a relative
27 timeout in milliseconds. */
28 static inline int __attribute__ ((gnu_inline))
29 compute_reltime (const struct timespec *abstime, clockid_t clk)
31 struct timespec ts;
32 __clock_gettime (clk, &ts);
34 ts.tv_sec = abstime->tv_sec - ts.tv_sec;
35 ts.tv_nsec = abstime->tv_nsec - ts.tv_nsec;
37 if (ts.tv_nsec < 0)
39 --ts.tv_sec;
40 ts.tv_nsec += 1000000000;
43 return (ts.tv_sec < 0 ? -1 :
44 (int)(ts.tv_sec * 1000 + ts.tv_nsec / 1000000));
47 int __lll_abstimed_wait (void *ptr, int val,
48 const struct timespec *tsp, int flags, int clk)
50 int mlsec = compute_reltime (tsp, clk);
51 return (mlsec < 0 ? KERN_TIMEDOUT :
52 lll_timed_wait (ptr, val, mlsec, flags));
55 int __lll_abstimed_xwait (void *ptr, int lo, int hi,
56 const struct timespec *tsp, int flags, int clk)
58 int mlsec = compute_reltime (tsp, clk);
59 return (mlsec < 0 ? KERN_TIMEDOUT :
60 lll_timed_xwait (ptr, lo, hi, mlsec, flags));
63 int __lll_abstimed_lock (void *ptr,
64 const struct timespec *tsp, int flags, int clk)
66 if (lll_trylock (ptr) == 0)
67 return (0);
69 while (1)
71 if (atomic_exchange_acq ((int *)ptr, 2) == 0)
72 return (0);
73 else if (tsp->tv_nsec < 0 || tsp->tv_nsec >= 1000000000)
74 return (EINVAL);
76 int mlsec = compute_reltime (tsp, clk);
77 if (mlsec < 0 || lll_timed_wait (ptr,
78 2, mlsec, flags) == KERN_TIMEDOUT)
79 return (ETIMEDOUT);
83 /* Robust locks. */
85 /* Test if a given process id is still valid. */
86 static inline int valid_pid (int pid)
88 task_t task = __pid2task (pid);
89 if (task == MACH_PORT_NULL)
90 return (0);
92 __mach_port_deallocate (__mach_task_self (), task);
93 return (1);
96 /* Robust locks have currently no support from the kernel; they
97 are simply implemented with periodic polling. When sleeping, the
98 maximum blocking time is determined by this constant. */
99 #define MAX_WAIT_TIME 1500
101 int __lll_robust_lock (void *ptr, int flags)
103 int *iptr = (int *)ptr;
104 int id = __getpid ();
105 int wait_time = 25;
106 unsigned int val;
108 /* Try to set the lock word to our PID if it's clear. Otherwise,
109 mark it as having waiters. */
110 while (1)
112 val = *iptr;
113 if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
114 return (0);
115 else if (atomic_compare_and_exchange_bool_acq (iptr,
116 val | LLL_WAITERS, val) == 0)
117 break;
120 for (id |= LLL_WAITERS ; ; )
122 val = *iptr;
123 if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
124 return (0);
125 else if (val && !valid_pid (val & LLL_OWNER_MASK))
127 if (atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0)
128 return (EOWNERDEAD);
130 else
132 lll_timed_wait (iptr, val, wait_time, flags);
133 if (wait_time < MAX_WAIT_TIME)
134 wait_time <<= 1;
139 int __lll_robust_abstimed_lock (void *ptr,
140 const struct timespec *tsp, int flags, int clk)
142 int *iptr = (int *)ptr;
143 int id = __getpid ();
144 int wait_time = 25;
145 unsigned int val;
147 while (1)
149 val = *iptr;
150 if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
151 return (0);
152 else if (atomic_compare_and_exchange_bool_acq (iptr,
153 val | LLL_WAITERS, val) == 0)
154 break;
157 for (id |= LLL_WAITERS ; ; )
159 val = *iptr;
160 if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
161 return (0);
162 else if (val && !valid_pid (val & LLL_OWNER_MASK))
164 if (atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0)
165 return (EOWNERDEAD);
167 else
169 int mlsec = compute_reltime (tsp, clk);
170 if (mlsec < 0)
171 return (ETIMEDOUT);
172 else if (mlsec > wait_time)
173 mlsec = wait_time;
175 int res = lll_timed_wait (iptr, val, mlsec, flags);
176 if (res == KERN_TIMEDOUT)
177 return (ETIMEDOUT);
178 else if (wait_time < MAX_WAIT_TIME)
179 wait_time <<= 1;
184 int __lll_robust_trylock (void *ptr)
186 int *iptr = (int *)ptr;
187 int id = __getpid ();
188 unsigned int val = *iptr;
190 if (!val)
192 if (atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
193 return (0);
195 else if (!valid_pid (val & LLL_OWNER_MASK) &&
196 atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0)
197 return (EOWNERDEAD);
199 return (EBUSY);
202 void __lll_robust_unlock (void *ptr, int flags)
204 unsigned int val = atomic_load_relaxed((unsigned int *)ptr);
205 while (1)
207 if (val & LLL_WAITERS)
209 lll_set_wake (ptr, 0, flags);
210 break;
212 else if (atomic_compare_exchange_weak_release ((unsigned int *)ptr, &val, 0))
213 break;