Ignore machine-check MSRs
[freebsd-src/fkvm-freebsd.git] / sys / kern / kern_rmlock.c
blobcbf5cc5c955bcad92c69dcb2b4703e02262e0100
1 /*-
2 * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
31 * Machine independent bits of reader/writer lock implementation.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include "opt_ddb.h"
39 #include <sys/param.h>
40 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/ktr.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/proc.h>
47 #include <sys/rmlock.h>
48 #include <sys/sched.h>
49 #include <sys/smp.h>
50 #include <sys/systm.h>
51 #include <sys/turnstile.h>
52 #include <sys/lock_profile.h>
53 #include <machine/cpu.h>
55 #ifdef DDB
56 #include <ddb/ddb.h>
57 #endif
59 #define RMPF_ONQUEUE 1
60 #define RMPF_SIGNAL 2
62 /*
63 * To support usage of rmlock in CVs and msleep
64 * yet another list for the priority tracker
65 * would be needed.
66 * Using this lock for cv and msleep also does
67 * not seem very useful
70 static __inline void compiler_memory_barrier(void) {
71 __asm __volatile("":::"memory");
74 static void assert_rm(struct lock_object *lock, int what);
75 static void lock_rm(struct lock_object *lock, int how);
76 static int unlock_rm(struct lock_object *lock);
78 struct lock_class lock_class_rm = {
79 .lc_name = "rm",
80 .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
81 .lc_assert = assert_rm,
82 #if 0
83 #ifdef DDB
84 .lc_ddb_show = db_show_rwlock,
85 #endif
86 #endif
87 .lc_lock = lock_rm,
88 .lc_unlock = unlock_rm,
91 static void
92 assert_rm(struct lock_object *lock, int what)
95 panic("assert_rm called");
98 static void
99 lock_rm(struct lock_object *lock, int how) {
100 panic("lock_rm called");
103 static int
104 unlock_rm(struct lock_object *lock) {
105 panic("unlock_rm called");
108 static struct mtx rm_spinlock;
110 MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);
113 * Add or remove tracker from per cpu list.
114 * The per cpu list can be traversed at any time in forward
115 * direction from an interrupt on the *local* cpu.
118 static void inline
119 rm_tracker_add(struct pcpu *pc, struct rm_priotracker* tracker) {
120 struct rm_queue* next;
121 /* Initialize all tracker pointers */
122 tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
123 next = pc->pc_rm_queue.rmq_next;
124 tracker->rmp_cpuQueue.rmq_next = next;
125 /* rmq_prev is not used during froward traversal */
126 next->rmq_prev = &tracker->rmp_cpuQueue;
127 /* Update pointer to first element */
128 pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
132 static void inline
133 rm_tracker_remove(struct pcpu *pc, struct rm_priotracker* tracker) {
134 struct rm_queue *next, *prev;
135 next = tracker->rmp_cpuQueue.rmq_next;
136 prev = tracker->rmp_cpuQueue.rmq_prev;
137 /* Not used during forward traversal */
138 next->rmq_prev = prev;
139 /* Remove from list */
140 prev->rmq_next = next;
146 static void rm_cleanIPI(void *arg) {
147 struct pcpu *pc;
148 struct rmlock* rm = arg;
149 struct rm_priotracker *tracker;
150 struct rm_queue* queue;
151 pc = pcpu_find(curcpu);
153 for(queue = pc->pc_rm_queue.rmq_next;
154 queue != &pc->pc_rm_queue;
155 queue = queue->rmq_next) {
156 tracker = (struct rm_priotracker *) queue;
157 if(tracker->rmp_rmlock == rm && tracker->rmp_flags == 0 ) {
158 tracker->rmp_flags = RMPF_ONQUEUE;
159 mtx_lock_spin(&rm_spinlock);
160 LIST_INSERT_HEAD(&rm->rm_activeReaders,tracker,
161 rmp_qentry);
162 mtx_unlock_spin(&rm_spinlock);
165 return;
170 void
171 rm_init(struct rmlock *rm, const char *name, int opts)
173 rm->rm_noreadtoken = 1;
174 LIST_INIT(&rm->rm_activeReaders);
175 mtx_init(&rm->rm_lock, name, "RM_MTX",MTX_NOWITNESS);
176 lock_init(&rm->lock_object, &lock_class_rm, name, NULL, (opts & LO_RECURSABLE)| LO_WITNESS);
180 void
181 rm_destroy(struct rmlock *rm)
183 mtx_destroy(&rm->rm_lock);
184 lock_destroy(&rm->lock_object);
188 rm_wowned(struct rmlock *rm)
191 return (mtx_owned(&rm->rm_lock));
194 void
195 rm_sysinit(void *arg)
197 struct rm_args *args = arg;
198 rm_init(args->ra_rm, args->ra_desc, args->ra_opts);
202 static void
203 _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker* tracker)
205 struct pcpu *pc;
206 struct rm_queue *queue;
207 struct rm_priotracker* atracker;
209 critical_enter();
210 pc = pcpu_find(curcpu);
212 /* Check if we just need to do a proper critical_exit */
213 if (0 == rm->rm_noreadtoken) {
214 critical_exit();
215 return;
218 /* Remove our tracker from the per cpu list */
219 rm_tracker_remove(pc,tracker);
221 /* Check to see if the IPI granted us the lock after all */
222 if(tracker->rmp_flags) {
223 /* Just add back tracker - we hold the lock */
224 rm_tracker_add(pc,tracker);
225 critical_exit();
226 return;
232 * We allow readers to aquire a lock even if a writer
233 * is blocked if the lock is recursive and the reader
234 * already holds the lock
237 if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
239 * Just grand the lock if this thread already have a tracker
240 * for this lock on the per cpu queue
243 for(queue = pc->pc_rm_queue.rmq_next;
244 queue != &pc->pc_rm_queue;
245 queue = queue->rmq_next) {
246 atracker = (struct rm_priotracker *) queue;
247 if (( atracker->rmp_rmlock == rm) &&
248 ( atracker->rmp_thread == tracker->rmp_thread )) {
249 mtx_lock_spin(&rm_spinlock);
250 LIST_INSERT_HEAD(&rm->rm_activeReaders,tracker,
251 rmp_qentry);
252 tracker->rmp_flags = RMPF_ONQUEUE;
253 mtx_unlock_spin(&rm_spinlock);
254 rm_tracker_add(pc,tracker);
255 critical_exit();
256 return;
262 sched_unpin();
263 critical_exit();
265 mtx_lock(&rm->rm_lock);
266 rm->rm_noreadtoken = 0;
267 critical_enter();
269 pc = pcpu_find(curcpu);
270 rm_tracker_add(pc,tracker);
271 sched_pin();
272 critical_exit();
274 mtx_unlock(&rm->rm_lock);
275 return;
278 void
279 _rm_rlock(struct rmlock *rm, struct rm_priotracker* tracker)
281 struct thread *td = curthread;
282 struct pcpu *pc;
284 tracker->rmp_flags = 0;
285 tracker->rmp_thread = td;
286 tracker->rmp_rmlock = rm;
288 td->td_critnest++; /* critical_enter(); */
290 compiler_memory_barrier();
292 pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
294 rm_tracker_add(pc,tracker);
296 td->td_pinned++; /* sched_pin(); */
298 compiler_memory_barrier();
300 td->td_critnest--;
303 * Fast path to combine two common conditions
304 * into a single conditional jump
307 if (0 == (td->td_owepreempt | rm->rm_noreadtoken)) {
308 return;
311 /* We do not have a read token and need to acquire one */
312 _rm_rlock_hard(rm,tracker);
316 static void
317 _rm_unlock_hard(struct thread *td,struct rm_priotracker* tracker)
320 if (td->td_owepreempt) {
321 td->td_critnest++;
322 critical_exit();
325 if (!tracker->rmp_flags) {
326 return;
330 mtx_lock_spin(&rm_spinlock);
331 LIST_REMOVE(tracker,rmp_qentry);
333 if (tracker->rmp_flags & RMPF_SIGNAL) {
334 struct rmlock *rm;
335 struct turnstile* ts;
337 rm = tracker->rmp_rmlock;
339 turnstile_chain_lock(&rm->lock_object);
340 mtx_unlock_spin(&rm_spinlock);
342 ts = turnstile_lookup(&rm->lock_object);
344 turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
345 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
346 turnstile_chain_unlock(&rm->lock_object);
349 } else
350 mtx_unlock_spin(&rm_spinlock);
354 void
355 _rm_runlock(struct rmlock *rm, struct rm_priotracker* tracker)
357 struct pcpu *pc;
358 struct thread *td = tracker->rmp_thread;
360 td->td_critnest++; /* critical_enter(); */
361 pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
362 rm_tracker_remove(pc,tracker);
363 td->td_critnest--;
364 td->td_pinned--; /* sched_unpin(); */
366 if (0 == (td->td_owepreempt | tracker->rmp_flags))
367 return;
370 _rm_unlock_hard(td,tracker);
376 void
377 _rm_wlock(struct rmlock *rm)
379 struct rm_priotracker *prio;
380 struct turnstile *ts;
382 mtx_lock(&rm->rm_lock);
384 if (rm->rm_noreadtoken == 0) {
385 /* Get all read tokens back */
387 rm->rm_noreadtoken = 1;
390 * Assumes rm->rm_noreadtoken update is visible
391 * on other CPUs before rm_cleanIPI is called
393 #ifdef SMP
394 smp_rendezvous(smp_no_rendevous_barrier,
395 rm_cleanIPI,
396 smp_no_rendevous_barrier
397 ,rm);
399 #else
400 rm_cleanIPI(rm);
401 #endif
404 mtx_lock_spin(&rm_spinlock);
406 while((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
407 ts = turnstile_trywait(&rm->lock_object);
408 prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL;
409 mtx_unlock_spin(&rm_spinlock);
410 turnstile_wait(ts,prio->rmp_thread,
411 TS_EXCLUSIVE_QUEUE);
412 mtx_lock_spin(&rm_spinlock);
416 mtx_unlock_spin(&rm_spinlock);
422 void
423 _rm_wunlock(struct rmlock *rm)
425 mtx_unlock(&rm->rm_lock);
429 #ifdef LOCK_DEBUG
431 void _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
435 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
436 file, line);
438 _rm_wlock(rm);
440 LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
442 WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
444 curthread->td_locks++;
448 void _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
450 curthread->td_locks--;
451 WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
452 LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
453 _rm_wunlock(rm);
457 void
458 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
459 const char *file, int line)
463 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER , file, line);
465 _rm_rlock(rm, tracker);
467 LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file, line);
469 WITNESS_LOCK(&rm->lock_object, 0 , file, line);
471 curthread->td_locks++;
474 void
475 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
476 const char *file, int line) {
477 curthread->td_locks--;
478 WITNESS_UNLOCK(&rm->lock_object, 0 , file, line);
479 LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
480 _rm_runlock(rm, tracker);
486 #else
488 * Just strip out file and line arguments if no lock debugging is enabled
489 * in the kernel - we are called from a kernel module.
493 void _rm_wlock_debug(struct rmlock *rm, const char *file, int line)
495 _rm_wlock(rm);
498 void _rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
500 _rm_wunlock(rm);
503 void
504 _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
505 const char *file, int line)
507 _rm_rlock(rm, tracker);
510 void
511 _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
512 const char *file, int line) {
513 _rm_runlock(rm, tracker);
516 #endif