cxgbe/t4_tom: Read the chip's DDP page sizes and save them in a
[freebsd-src.git] / sys / kern / kern_rangelock.c
blob1c0faa3c06850561ccbce89fc0d4de554d3eb278
1 /*-
2 * Copyright (c) 2009 Konstantin Belousov <kib@FreeBSD.org>
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/lock.h>
33 #include <sys/mutex.h>
34 #include <sys/proc.h>
35 #include <sys/rangelock.h>
36 #include <sys/systm.h>
38 #include <vm/uma.h>
40 struct rl_q_entry {
41 TAILQ_ENTRY(rl_q_entry) rl_q_link;
42 off_t rl_q_start, rl_q_end;
43 int rl_q_flags;
46 static uma_zone_t rl_entry_zone;
48 static void
49 rangelock_sys_init(void)
52 rl_entry_zone = uma_zcreate("rl_entry", sizeof(struct rl_q_entry),
53 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
55 SYSINIT(vfs, SI_SUB_LOCK, SI_ORDER_ANY, rangelock_sys_init, NULL);
57 static struct rl_q_entry *
58 rlqentry_alloc(void)
61 return (uma_zalloc(rl_entry_zone, M_WAITOK));
64 void
65 rlqentry_free(struct rl_q_entry *rleq)
68 uma_zfree(rl_entry_zone, rleq);
71 void
72 rangelock_init(struct rangelock *lock)
75 TAILQ_INIT(&lock->rl_waiters);
76 lock->rl_currdep = NULL;
79 void
80 rangelock_destroy(struct rangelock *lock)
83 KASSERT(TAILQ_EMPTY(&lock->rl_waiters), ("Dangling waiters"));
87 * Two entries are compatible if their ranges do not overlap, or both
88 * entries are for read.
90 static int
91 ranges_overlap(const struct rl_q_entry *e1,
92 const struct rl_q_entry *e2)
95 if (e1->rl_q_start < e2->rl_q_end && e1->rl_q_end > e2->rl_q_start)
96 return (1);
97 return (0);
101 * Recalculate the lock->rl_currdep after an unlock.
103 static void
104 rangelock_calc_block(struct rangelock *lock)
106 struct rl_q_entry *entry, *nextentry, *entry1;
108 for (entry = lock->rl_currdep; entry != NULL; entry = nextentry) {
109 nextentry = TAILQ_NEXT(entry, rl_q_link);
110 if (entry->rl_q_flags & RL_LOCK_READ) {
111 /* Reads must not overlap with granted writes. */
112 for (entry1 = TAILQ_FIRST(&lock->rl_waiters);
113 !(entry1->rl_q_flags & RL_LOCK_READ);
114 entry1 = TAILQ_NEXT(entry1, rl_q_link)) {
115 if (ranges_overlap(entry, entry1))
116 goto out;
118 } else {
119 /* Write must not overlap with any granted locks. */
120 for (entry1 = TAILQ_FIRST(&lock->rl_waiters);
121 entry1 != entry;
122 entry1 = TAILQ_NEXT(entry1, rl_q_link)) {
123 if (ranges_overlap(entry, entry1))
124 goto out;
127 /* Move grantable write locks to the front. */
128 TAILQ_REMOVE(&lock->rl_waiters, entry, rl_q_link);
129 TAILQ_INSERT_HEAD(&lock->rl_waiters, entry, rl_q_link);
132 /* Grant this lock. */
133 entry->rl_q_flags |= RL_LOCK_GRANTED;
134 wakeup(entry);
136 out:
137 lock->rl_currdep = entry;
140 static void
141 rangelock_unlock_locked(struct rangelock *lock, struct rl_q_entry *entry,
142 struct mtx *ilk)
145 MPASS(lock != NULL && entry != NULL && ilk != NULL);
146 mtx_assert(ilk, MA_OWNED);
147 KASSERT(entry != lock->rl_currdep, ("stuck currdep"));
149 TAILQ_REMOVE(&lock->rl_waiters, entry, rl_q_link);
150 rangelock_calc_block(lock);
151 mtx_unlock(ilk);
152 if (curthread->td_rlqe == NULL)
153 curthread->td_rlqe = entry;
154 else
155 rlqentry_free(entry);
158 void
159 rangelock_unlock(struct rangelock *lock, void *cookie, struct mtx *ilk)
162 MPASS(lock != NULL && cookie != NULL && ilk != NULL);
164 mtx_lock(ilk);
165 rangelock_unlock_locked(lock, cookie, ilk);
169 * Unlock the sub-range of granted lock.
171 void *
172 rangelock_unlock_range(struct rangelock *lock, void *cookie, off_t start,
173 off_t end, struct mtx *ilk)
175 struct rl_q_entry *entry;
177 MPASS(lock != NULL && cookie != NULL && ilk != NULL);
178 entry = cookie;
179 KASSERT(entry->rl_q_flags & RL_LOCK_GRANTED,
180 ("Unlocking non-granted lock"));
181 KASSERT(entry->rl_q_start == start, ("wrong start"));
182 KASSERT(entry->rl_q_end >= end, ("wrong end"));
184 mtx_lock(ilk);
185 if (entry->rl_q_end == end) {
186 rangelock_unlock_locked(lock, cookie, ilk);
187 return (NULL);
189 entry->rl_q_end = end;
190 rangelock_calc_block(lock);
191 mtx_unlock(ilk);
192 return (cookie);
196 * Add the lock request to the queue of the pending requests for
197 * rangelock. Sleep until the request can be granted.
199 static void *
200 rangelock_enqueue(struct rangelock *lock, off_t start, off_t end, int mode,
201 struct mtx *ilk)
203 struct rl_q_entry *entry;
204 struct thread *td;
206 MPASS(lock != NULL && ilk != NULL);
208 td = curthread;
209 if (td->td_rlqe != NULL) {
210 entry = td->td_rlqe;
211 td->td_rlqe = NULL;
212 } else
213 entry = rlqentry_alloc();
214 MPASS(entry != NULL);
215 entry->rl_q_flags = mode;
216 entry->rl_q_start = start;
217 entry->rl_q_end = end;
219 mtx_lock(ilk);
221 * XXXKIB TODO. Check that a thread does not try to enqueue a
222 * lock that is incompatible with another request from the same
223 * thread.
226 TAILQ_INSERT_TAIL(&lock->rl_waiters, entry, rl_q_link);
227 if (lock->rl_currdep == NULL)
228 lock->rl_currdep = entry;
229 rangelock_calc_block(lock);
230 while (!(entry->rl_q_flags & RL_LOCK_GRANTED))
231 msleep(entry, ilk, 0, "range", 0);
232 mtx_unlock(ilk);
233 return (entry);
236 void *
237 rangelock_rlock(struct rangelock *lock, off_t start, off_t end, struct mtx *ilk)
240 return (rangelock_enqueue(lock, start, end, RL_LOCK_READ, ilk));
243 void *
244 rangelock_wlock(struct rangelock *lock, off_t start, off_t end, struct mtx *ilk)
247 return (rangelock_enqueue(lock, start, end, RL_LOCK_WRITE, ilk));