2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2000 BSDi
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/sys/dev/acpica/Osd/OsdSynch.c,v 1.21 2004/05/05 20:07:52 njl Exp $
28 * $DragonFly: src/sys/dev/acpica5/Osd/OsdSynch.c,v 1.11 2007/01/25 15:12:06 y0netan1 Exp $
32 * 6.1 : Mutual Exclusion and Synchronisation
38 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/sysctl.h>
42 #include <sys/thread.h>
43 #include <sys/thread2.h>
44 #include <sys/spinlock2.h>
46 #define _COMPONENT ACPI_OS_SERVICES
47 ACPI_MODULE_NAME("SYNCH")
49 MALLOC_DEFINE(M_ACPISEM
, "acpisem", "ACPI semaphore");
51 #define AS_LOCK(as) spin_lock_wr(&(as)->as_mtx)
52 #define AS_UNLOCK(as) spin_unlock_wr(&(as)->as_mtx)
56 * Simple counting semaphore implemented using a mutex. (Subsequently used
57 * in the OSI code to implement a mutex. Go figure.)
59 struct acpi_semaphore
{
60 struct spinlock as_mtx
;
68 #ifndef ACPI_NO_SEMAPHORES
69 #ifndef ACPI_SEMAPHORES_MAX_PENDING
70 #define ACPI_SEMAPHORES_MAX_PENDING 4
72 static int acpi_semaphore_debug
= 0;
73 TUNABLE_INT("debug.acpi_semaphore_debug", &acpi_semaphore_debug
);
74 SYSCTL_DECL(_debug_acpi
);
75 SYSCTL_INT(_debug_acpi
, OID_AUTO
, semaphore_debug
, CTLFLAG_RW
,
76 &acpi_semaphore_debug
, 0, "Enable ACPI semaphore debug messages");
77 #endif /* !ACPI_NO_SEMAPHORES */
80 AcpiOsCreateSemaphore(UINT32 MaxUnits
, UINT32 InitialUnits
,
81 ACPI_HANDLE
*OutHandle
)
83 #ifndef ACPI_NO_SEMAPHORES
84 struct acpi_semaphore
*as
;
86 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__
);
88 if (OutHandle
== NULL
)
89 return_ACPI_STATUS (AE_BAD_PARAMETER
);
90 if (InitialUnits
> MaxUnits
)
91 return_ACPI_STATUS (AE_BAD_PARAMETER
);
93 as
= kmalloc(sizeof(*as
), M_ACPISEM
, M_INTWAIT
| M_ZERO
);
95 spin_init(&as
->as_mtx
);
96 as
->as_units
= InitialUnits
;
97 as
->as_maxunits
= MaxUnits
;
98 as
->as_pendings
= as
->as_resetting
= as
->as_timeouts
= 0;
100 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX
,
101 "created semaphore %p max %d, initial %d\n",
102 as
, InitialUnits
, MaxUnits
));
104 *OutHandle
= (ACPI_HANDLE
)as
;
106 *OutHandle
= (ACPI_HANDLE
)OutHandle
;
107 #endif /* !ACPI_NO_SEMAPHORES */
109 return_ACPI_STATUS (AE_OK
);
113 AcpiOsDeleteSemaphore(ACPI_HANDLE Handle
)
115 #ifndef ACPI_NO_SEMAPHORES
116 struct acpi_semaphore
*as
= (struct acpi_semaphore
*)Handle
;
118 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__
);
120 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX
, "destroyed semaphore %p\n", as
));
121 spin_uninit(&as
->as_mtx
);
122 kfree(as
, M_ACPISEM
);
123 #endif /* !ACPI_NO_SEMAPHORES */
125 return_ACPI_STATUS (AE_OK
);
129 AcpiOsWaitSemaphore(ACPI_HANDLE Handle
, UINT32 Units
, UINT16 Timeout
)
131 #ifndef ACPI_NO_SEMAPHORES
133 struct acpi_semaphore
*as
= (struct acpi_semaphore
*)Handle
;
135 struct timeval timeouttv
, currenttv
, timelefttv
;
138 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__
);
141 return_ACPI_STATUS (AE_BAD_PARAMETER
);
144 return_ACPI_STATUS (AE_OK
);
147 if (as
->as_units
< Units
&& as
->as_timeouts
> 10) {
148 kprintf("%s: semaphore %p too many timeouts, resetting\n", __func__
, as
);
150 as
->as_units
= as
->as_maxunits
;
152 as
->as_resetting
= 1;
156 return_ACPI_STATUS (AE_TIME
);
159 if (as
->as_resetting
)
160 return_ACPI_STATUS (AE_TIME
);
163 /* a timeout of ACPI_WAIT_FOREVER means "forever" */
164 if (Timeout
== ACPI_WAIT_FOREVER
) {
166 timeouttv
.tv_sec
= ((0xffff/1000) + 1); /* cf. ACPI spec */
167 timeouttv
.tv_usec
= 0;
169 /* compute timeout using microseconds per tick */
170 tmo
= (Timeout
* 1000) / (1000000 / hz
);
173 timeouttv
.tv_sec
= Timeout
/ 1000;
174 timeouttv
.tv_usec
= (Timeout
% 1000) * 1000;
177 /* calculate timeout value in timeval */
178 getmicrouptime(¤ttv
);
179 timevaladd(&timeouttv
, ¤ttv
);
182 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX
,
183 "get %d units from semaphore %p (has %d), timeout %d\n",
184 Units
, as
, as
->as_units
, Timeout
));
186 if (as
->as_maxunits
== ACPI_NO_UNIT_LIMIT
) {
190 if (as
->as_units
>= Units
) {
191 as
->as_units
-= Units
;
196 /* limit number of pending treads */
197 if (as
->as_pendings
>= ACPI_SEMAPHORES_MAX_PENDING
) {
202 /* if timeout values of zero is specified, return immediately */
208 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX
,
209 "semaphore blocked, calling msleep(%p, %p, %d, \"acsem\", %d)\n",
210 as
, &as
->as_mtx
, PCATCH
, tmo
));
214 if (acpi_semaphore_debug
) {
215 kprintf("%s: Sleep %d, pending %d, semaphore %p, thread %d\n",
216 __func__
, Timeout
, as
->as_pendings
, as
, AcpiOsGetThreadId());
219 rv
= msleep(as
, &as
->as_mtx
, PCATCH
, "acsem", tmo
);
224 if (as
->as_resetting
) {
225 /* semaphore reset, return immediately */
226 if (as
->as_pendings
== 0) {
227 as
->as_resetting
= 0;
234 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX
, "msleep(%d) returned %d\n", tmo
, rv
));
235 if (rv
== EWOULDBLOCK
) {
240 /* check if we already awaited enough */
241 timelefttv
= timeouttv
;
242 getmicrouptime(¤ttv
);
243 timevalsub(&timelefttv
, ¤ttv
);
244 if (timelefttv
.tv_sec
< 0) {
245 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX
, "await semaphore %p timeout\n",
251 /* adjust timeout for the next sleep */
252 tmo
= (timelefttv
.tv_sec
* 1000000 + timelefttv
.tv_usec
) /
257 if (acpi_semaphore_debug
) {
258 kprintf("%s: Wakeup timeleft(%lu, %lu), tmo %u, sem %p, thread %d\n",
259 __func__
, timelefttv
.tv_sec
, timelefttv
.tv_usec
, tmo
, as
,
260 AcpiOsGetThreadId());
264 if (acpi_semaphore_debug
) {
265 if (result
== AE_TIME
&& Timeout
> 0) {
266 kprintf("%s: Timeout %d, pending %d, semaphore %p\n",
267 __func__
, Timeout
, as
->as_pendings
, as
);
269 if (result
== AE_OK
&& (as
->as_timeouts
> 0 || as
->as_pendings
> 0)) {
270 kprintf("%s: Acquire %d, units %d, pending %d, sem %p, thread %d\n",
271 __func__
, Units
, as
->as_units
, as
->as_pendings
, as
,
272 AcpiOsGetThreadId());
276 if (result
== AE_TIME
)
282 return_ACPI_STATUS (result
);
284 return_ACPI_STATUS (AE_OK
);
285 #endif /* !ACPI_NO_SEMAPHORES */
289 AcpiOsSignalSemaphore(ACPI_HANDLE Handle
, UINT32 Units
)
291 #ifndef ACPI_NO_SEMAPHORES
292 struct acpi_semaphore
*as
= (struct acpi_semaphore
*)Handle
;
295 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__
);
298 return_ACPI_STATUS(AE_BAD_PARAMETER
);
301 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX
,
302 "return %d units to semaphore %p (has %d)\n",
303 Units
, as
, as
->as_units
));
304 if (as
->as_maxunits
!= ACPI_NO_UNIT_LIMIT
) {
305 as
->as_units
+= Units
;
306 if (as
->as_units
> as
->as_maxunits
)
307 as
->as_units
= as
->as_maxunits
;
310 if (acpi_semaphore_debug
&& (as
->as_timeouts
> 0 || as
->as_pendings
> 0)) {
311 kprintf("%s: Release %d, units %d, pending %d, semaphore %p, thread %d\n",
312 __func__
, Units
, as
->as_units
, as
->as_pendings
, as
, AcpiOsGetThreadId());
317 #endif /* !ACPI_NO_SEMAPHORES */
319 return_ACPI_STATUS (AE_OK
);
322 struct acpi_spinlock
{
323 struct spinlock lock
;
324 #ifdef ACPI_DEBUG_LOCKS
332 AcpiOsCreateLock(ACPI_SPINLOCK
*OutHandle
)
336 if (OutHandle
== NULL
)
337 return (AE_BAD_PARAMETER
);
338 spin
= kmalloc(sizeof(*spin
), M_ACPISEM
, M_INTWAIT
|M_ZERO
);
339 spin_init(&spin
->lock
);
340 #ifdef ACPI_DEBUG_LOCKS
350 AcpiOsDeleteLock (ACPI_SPINLOCK Spin
)
354 spin_uninit(&Spin
->lock
);
355 kfree(Spin
, M_ACPISEM
);
359 * OS-dependent locking primitives. These routines should be able to be
360 * called from an interrupt-handler or cpu_idle thread.
362 * NB: some of ACPI-CA functions with locking flags, say AcpiSetRegister(),
363 * are changed to unconditionally call AcpiOsAcquireLock/AcpiOsReleaseLock.
366 #ifdef ACPI_DEBUG_LOCKS
367 _AcpiOsAcquireLock (ACPI_SPINLOCK Spin
, const char *func
, int line
)
369 AcpiOsAcquireLock (ACPI_SPINLOCK Spin
)
372 spin_lock_wr(&Spin
->lock
);
374 #ifdef ACPI_DEBUG_LOCKS
376 kprintf("%p(%s:%d): acpi_spinlock %p already held by %p(%s:%d)\n",
377 curthread
, func
, line
, Spin
, Spin
->owner
, Spin
->func
,
381 Spin
->owner
= curthread
;
390 AcpiOsReleaseLock (ACPI_SPINLOCK Spin
, UINT32 Flags
)
392 #ifdef ACPI_DEBUG_LOCKS
394 if (Spin
->owner
!= NULL
) {
395 kprintf("%p: acpi_spinlock %p is unexectedly held by %p(%s:%d)\n",
396 curthread
, Spin
, Spin
->owner
, Spin
->func
, Spin
->line
);
405 spin_unlock_wr(&Spin
->lock
);
408 /* Section 5.2.9.1: global lock acquire/release functions */
409 #define GL_ACQUIRED (-1)
411 #define GL_BIT_PENDING 0x1
412 #define GL_BIT_OWNED 0x2
413 #define GL_BIT_MASK (GL_BIT_PENDING | GL_BIT_OWNED)
416 * Acquire the global lock. If busy, set the pending bit. The caller
417 * will wait for notification from the BIOS that the lock is available
418 * and then attempt to acquire it again.
421 acpi_acquire_global_lock(uint32_t *lock
)
427 new = ((old
& ~GL_BIT_MASK
) | GL_BIT_OWNED
) |
428 ((old
>> 1) & GL_BIT_PENDING
);
429 } while (atomic_cmpset_int(lock
, old
, new) == 0);
431 return ((new < GL_BIT_MASK
) ? GL_ACQUIRED
: GL_BUSY
);
435 * Release the global lock, returning whether there is a waiter pending.
436 * If the BIOS set the pending bit, OSPM must notify the BIOS when it
440 acpi_release_global_lock(uint32_t *lock
)
446 new = old
& ~GL_BIT_MASK
;
447 } while (atomic_cmpset_int(lock
, old
, new) == 0);
449 return (old
& GL_BIT_PENDING
);