2 * Copyright (c) 2001 Matthew Dillon. All Rights Reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 /* Mutex pool routines. These routines are designed to be used as short
27 * term leaf mutexes (e.g. the last mutex you might acquire other then
28 * calling msleep()). They operate using a shared pool. A mutex is chosen
29 * from the pool based on the supplied pointer (which may or may not be
33 * - no structural overhead. Mutexes can be associated with structures
34 * without adding bloat to the structures.
35 * - mutexes can be obtained for invalid pointers, useful when uses
36 * mutexes to interlock destructor ops.
37 * - no initialization/destructor overhead.
38 * - can be used with msleep.
41 * - should generally only be used as leaf mutexes.
42 * - pool/pool dependancy ordering cannot be depended on.
43 * - possible L1 cache mastersip contention between cpus.
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
49 #include <sys/param.h>
51 #include <sys/kernel.h>
54 #include <sys/malloc.h>
55 #include <sys/mutex.h>
56 #include <sys/systm.h>
59 static MALLOC_DEFINE(M_MTXPOOL
, "mtx_pool", "mutex pool");
61 /* Pool sizes must be a power of two */
62 #ifndef MTX_POOL_LOCKBUILDER_SIZE
63 #define MTX_POOL_LOCKBUILDER_SIZE 128
65 #ifndef MTX_POOL_SLEEP_SIZE
66 #define MTX_POOL_SLEEP_SIZE 128
69 struct mtxpool_header
{
77 struct mtxpool_header mtx_pool_header
;
78 struct mtx mtx_pool_ary
[1];
81 static struct mtx_pool_lockbuilder
{
82 struct mtxpool_header mtx_pool_header
;
83 struct mtx mtx_pool_ary
[MTX_POOL_LOCKBUILDER_SIZE
];
86 #define mtx_pool_size mtx_pool_header.mtxpool_size
87 #define mtx_pool_mask mtx_pool_header.mtxpool_mask
88 #define mtx_pool_shift mtx_pool_header.mtxpool_shift
89 #define mtx_pool_next mtx_pool_header.mtxpool_next
91 struct mtx_pool
*mtxpool_sleep
;
92 struct mtx_pool
*mtxpool_lockbuilder
;
94 #if UINTPTR_MAX == UINT64_MAX /* 64 bits */
95 # define POINTER_BITS 64
96 # define HASH_MULTIPLIER 11400714819323198485u /* (2^64)*(sqrt(5)-1)/2 */
97 #else /* assume 32 bits */
98 # define POINTER_BITS 32
99 # define HASH_MULTIPLIER 2654435769u /* (2^32)*(sqrt(5)-1)/2 */
103 * Return the (shared) pool mutex associated with the specified address.
104 * The returned mutex is a leaf level mutex, meaning that if you obtain it
105 * you cannot obtain any other mutexes until you release it. You can
106 * legally msleep() on the mutex.
109 mtx_pool_find(struct mtx_pool
*pool
, void *ptr
)
113 KASSERT(pool
!= NULL
, ("_mtx_pool_find(): null pool"));
115 * Fibonacci hash, see Knuth's
116 * _Art of Computer Programming, Volume 3 / Sorting and Searching_
118 p
= ((HASH_MULTIPLIER
* (uintptr_t)ptr
) >> pool
->mtx_pool_shift
) &
120 return (&pool
->mtx_pool_ary
[p
]);
124 mtx_pool_initialize(struct mtx_pool
*pool
, const char *mtx_name
, int pool_size
,
129 pool
->mtx_pool_size
= pool_size
;
130 pool
->mtx_pool_mask
= pool_size
- 1;
131 for (i
= 1, maskbits
= 0; (i
& pool_size
) == 0; i
= i
<< 1)
133 pool
->mtx_pool_shift
= POINTER_BITS
- maskbits
;
134 pool
->mtx_pool_next
= 0;
135 for (i
= 0; i
< pool_size
; ++i
)
136 mtx_init(&pool
->mtx_pool_ary
[i
], mtx_name
, NULL
, opts
);
140 mtx_pool_create(const char *mtx_name
, int pool_size
, int opts
)
142 struct mtx_pool
*pool
;
144 if (pool_size
<= 0 || !powerof2(pool_size
)) {
145 printf("WARNING: %s pool size is not a power of 2.\n",
149 MALLOC(pool
, struct mtx_pool
*,
150 sizeof (struct mtx_pool
) + ((pool_size
- 1) * sizeof (struct mtx
)),
151 M_MTXPOOL
, M_WAITOK
| M_ZERO
);
152 mtx_pool_initialize(pool
, mtx_name
, pool_size
, opts
);
157 mtx_pool_destroy(struct mtx_pool
**poolp
)
160 struct mtx_pool
*pool
= *poolp
;
162 for (i
= pool
->mtx_pool_size
- 1; i
>= 0; --i
)
163 mtx_destroy(&pool
->mtx_pool_ary
[i
]);
164 FREE(pool
, M_MTXPOOL
);
169 mtx_pool_setup_static(void *dummy __unused
)
171 mtx_pool_initialize((struct mtx_pool
*)&lockbuilder_pool
,
172 "lockbuilder mtxpool", MTX_POOL_LOCKBUILDER_SIZE
,
173 MTX_DEF
| MTX_NOWITNESS
| MTX_QUIET
);
174 mtxpool_lockbuilder
= (struct mtx_pool
*)&lockbuilder_pool
;
178 mtx_pool_setup_dynamic(void *dummy __unused
)
180 mtxpool_sleep
= mtx_pool_create("sleep mtxpool",
181 MTX_POOL_SLEEP_SIZE
, MTX_DEF
);
185 * Obtain a (shared) mutex from the pool. The returned mutex is a leaf
186 * level mutex, meaning that if you obtain it you cannot obtain any other
187 * mutexes until you release it. You can legally msleep() on the mutex.
190 mtx_pool_alloc(struct mtx_pool
*pool
)
194 KASSERT(pool
!= NULL
, ("mtx_pool_alloc(): null pool"));
196 * mtx_pool_next is unprotected against multiple accesses,
197 * but simultaneous access by two CPUs should not be very
200 i
= pool
->mtx_pool_next
;
201 pool
->mtx_pool_next
= (i
+ 1) & pool
->mtx_pool_mask
;
202 return (&pool
->mtx_pool_ary
[i
]);
206 * The lockbuilder pool must be initialized early because the lockmgr
207 * and sx locks depend on it. The sx locks are used in the kernel
208 * memory allocator. The lockmgr subsystem is initialized by
209 * SYSINIT(..., SI_SUB_LOCKMGR, ...).
211 * We can't call MALLOC() to dynamically allocate the sleep pool
212 * until after kmeminit() has been called, which is done by
213 * SYSINIT(..., SI_SUB_KMEM, ...).
215 SYSINIT(mtxpooli1
, SI_SUB_MTX_POOL_STATIC
, SI_ORDER_FIRST
,
216 mtx_pool_setup_static
, NULL
);
217 SYSINIT(mtxpooli2
, SI_SUB_MTX_POOL_DYNAMIC
, SI_ORDER_FIRST
,
218 mtx_pool_setup_dynamic
, NULL
);