Ignore machine-check MSRs
[freebsd-src/fkvm-freebsd.git] / sys / kern / kern_alq.c
blob512f358b936531abaa2d13381811bce860edc32d
1 /*-
2 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include "opt_mac.h"
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/kthread.h>
36 #include <sys/lock.h>
37 #include <sys/mount.h>
38 #include <sys/mutex.h>
39 #include <sys/namei.h>
40 #include <sys/proc.h>
41 #include <sys/vnode.h>
42 #include <sys/alq.h>
43 #include <sys/malloc.h>
44 #include <sys/unistd.h>
45 #include <sys/fcntl.h>
46 #include <sys/eventhandler.h>
48 #include <security/mac/mac_framework.h>
50 /* Async. Logging Queue */
51 struct alq {
52 int aq_entmax; /* Max entries */
53 int aq_entlen; /* Entry length */
54 char *aq_entbuf; /* Buffer for stored entries */
55 int aq_flags; /* Queue flags */
56 struct mtx aq_mtx; /* Queue lock */
57 struct vnode *aq_vp; /* Open vnode handle */
58 struct ucred *aq_cred; /* Credentials of the opening thread */
59 struct ale *aq_first; /* First ent */
60 struct ale *aq_entfree; /* First free ent */
61 struct ale *aq_entvalid; /* First ent valid for writing */
62 LIST_ENTRY(alq) aq_act; /* List of active queues */
63 LIST_ENTRY(alq) aq_link; /* List of all queues */
66 #define AQ_WANTED 0x0001 /* Wakeup sleeper when io is done */
67 #define AQ_ACTIVE 0x0002 /* on the active list */
68 #define AQ_FLUSHING 0x0004 /* doing IO */
69 #define AQ_SHUTDOWN 0x0008 /* Queue no longer valid */
71 #define ALQ_LOCK(alq) mtx_lock_spin(&(alq)->aq_mtx)
72 #define ALQ_UNLOCK(alq) mtx_unlock_spin(&(alq)->aq_mtx)
74 static MALLOC_DEFINE(M_ALD, "ALD", "ALD");
77 * The ald_mtx protects the ald_queues list and the ald_active list.
79 static struct mtx ald_mtx;
80 static LIST_HEAD(, alq) ald_queues;
81 static LIST_HEAD(, alq) ald_active;
82 static int ald_shutingdown = 0;
83 struct thread *ald_thread;
84 static struct proc *ald_proc;
86 #define ALD_LOCK() mtx_lock(&ald_mtx)
87 #define ALD_UNLOCK() mtx_unlock(&ald_mtx)
89 /* Daemon functions */
90 static int ald_add(struct alq *);
91 static int ald_rem(struct alq *);
92 static void ald_startup(void *);
93 static void ald_daemon(void);
94 static void ald_shutdown(void *, int);
95 static void ald_activate(struct alq *);
96 static void ald_deactivate(struct alq *);
98 /* Internal queue functions */
99 static void alq_shutdown(struct alq *);
100 static int alq_doio(struct alq *);
104 * Add a new queue to the global list. Fail if we're shutting down.
106 static int
107 ald_add(struct alq *alq)
109 int error;
111 error = 0;
113 ALD_LOCK();
114 if (ald_shutingdown) {
115 error = EBUSY;
116 goto done;
118 LIST_INSERT_HEAD(&ald_queues, alq, aq_link);
119 done:
120 ALD_UNLOCK();
121 return (error);
125 * Remove a queue from the global list unless we're shutting down. If so,
126 * the ald will take care of cleaning up it's resources.
128 static int
129 ald_rem(struct alq *alq)
131 int error;
133 error = 0;
135 ALD_LOCK();
136 if (ald_shutingdown) {
137 error = EBUSY;
138 goto done;
140 LIST_REMOVE(alq, aq_link);
141 done:
142 ALD_UNLOCK();
143 return (error);
147 * Put a queue on the active list. This will schedule it for writing.
149 static void
150 ald_activate(struct alq *alq)
152 LIST_INSERT_HEAD(&ald_active, alq, aq_act);
153 wakeup(&ald_active);
156 static void
157 ald_deactivate(struct alq *alq)
159 LIST_REMOVE(alq, aq_act);
160 alq->aq_flags &= ~AQ_ACTIVE;
163 static void
164 ald_startup(void *unused)
166 mtx_init(&ald_mtx, "ALDmtx", NULL, MTX_DEF|MTX_QUIET);
167 LIST_INIT(&ald_queues);
168 LIST_INIT(&ald_active);
171 static void
172 ald_daemon(void)
174 int needwakeup;
175 struct alq *alq;
177 ald_thread = FIRST_THREAD_IN_PROC(ald_proc);
179 EVENTHANDLER_REGISTER(shutdown_pre_sync, ald_shutdown, NULL,
180 SHUTDOWN_PRI_FIRST);
182 ALD_LOCK();
184 for (;;) {
185 while ((alq = LIST_FIRST(&ald_active)) == NULL)
186 msleep(&ald_active, &ald_mtx, PWAIT, "aldslp", 0);
188 ALQ_LOCK(alq);
189 ald_deactivate(alq);
190 ALD_UNLOCK();
191 needwakeup = alq_doio(alq);
192 ALQ_UNLOCK(alq);
193 if (needwakeup)
194 wakeup(alq);
195 ALD_LOCK();
199 static void
200 ald_shutdown(void *arg, int howto)
202 struct alq *alq;
204 ALD_LOCK();
205 ald_shutingdown = 1;
207 while ((alq = LIST_FIRST(&ald_queues)) != NULL) {
208 LIST_REMOVE(alq, aq_link);
209 ALD_UNLOCK();
210 alq_shutdown(alq);
211 ALD_LOCK();
213 ALD_UNLOCK();
216 static void
217 alq_shutdown(struct alq *alq)
219 ALQ_LOCK(alq);
221 /* Stop any new writers. */
222 alq->aq_flags |= AQ_SHUTDOWN;
224 /* Drain IO */
225 while (alq->aq_flags & (AQ_FLUSHING|AQ_ACTIVE)) {
226 alq->aq_flags |= AQ_WANTED;
227 msleep_spin(alq, &alq->aq_mtx, "aldclose", 0);
229 ALQ_UNLOCK(alq);
231 vn_close(alq->aq_vp, FWRITE, alq->aq_cred,
232 curthread);
233 crfree(alq->aq_cred);
237 * Flush all pending data to disk. This operation will block.
239 static int
240 alq_doio(struct alq *alq)
242 struct thread *td;
243 struct mount *mp;
244 struct vnode *vp;
245 struct uio auio;
246 struct iovec aiov[2];
247 struct ale *ale;
248 struct ale *alstart;
249 int totlen;
250 int iov;
251 int vfslocked;
253 vp = alq->aq_vp;
254 td = curthread;
255 totlen = 0;
256 iov = 0;
258 alstart = ale = alq->aq_entvalid;
259 alq->aq_entvalid = NULL;
261 bzero(&aiov, sizeof(aiov));
262 bzero(&auio, sizeof(auio));
264 do {
265 if (aiov[iov].iov_base == NULL)
266 aiov[iov].iov_base = ale->ae_data;
267 aiov[iov].iov_len += alq->aq_entlen;
268 totlen += alq->aq_entlen;
269 /* Check to see if we're wrapping the buffer */
270 if (ale->ae_data + alq->aq_entlen != ale->ae_next->ae_data)
271 iov++;
272 ale->ae_flags &= ~AE_VALID;
273 ale = ale->ae_next;
274 } while (ale->ae_flags & AE_VALID);
276 alq->aq_flags |= AQ_FLUSHING;
277 ALQ_UNLOCK(alq);
279 if (iov == 2 || aiov[iov].iov_base == NULL)
280 iov--;
282 auio.uio_iov = &aiov[0];
283 auio.uio_offset = 0;
284 auio.uio_segflg = UIO_SYSSPACE;
285 auio.uio_rw = UIO_WRITE;
286 auio.uio_iovcnt = iov + 1;
287 auio.uio_resid = totlen;
288 auio.uio_td = td;
291 * Do all of the junk required to write now.
293 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
294 vn_start_write(vp, &mp, V_WAIT);
295 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
296 VOP_LEASE(vp, td, alq->aq_cred, LEASE_WRITE);
298 * XXX: VOP_WRITE error checks are ignored.
300 #ifdef MAC
301 if (mac_vnode_check_write(alq->aq_cred, NOCRED, vp) == 0)
302 #endif
303 VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, alq->aq_cred);
304 VOP_UNLOCK(vp, 0);
305 vn_finished_write(mp);
306 VFS_UNLOCK_GIANT(vfslocked);
308 ALQ_LOCK(alq);
309 alq->aq_flags &= ~AQ_FLUSHING;
311 if (alq->aq_entfree == NULL)
312 alq->aq_entfree = alstart;
314 if (alq->aq_flags & AQ_WANTED) {
315 alq->aq_flags &= ~AQ_WANTED;
316 return (1);
319 return(0);
322 static struct kproc_desc ald_kp = {
323 "ALQ Daemon",
324 ald_daemon,
325 &ald_proc
328 SYSINIT(aldthread, SI_SUB_KTHREAD_IDLE, SI_ORDER_ANY, kproc_start, &ald_kp);
329 SYSINIT(ald, SI_SUB_LOCK, SI_ORDER_ANY, ald_startup, NULL);
332 /* User visible queue functions */
335 * Create the queue data structure, allocate the buffer, and open the file.
338 alq_open(struct alq **alqp, const char *file, struct ucred *cred, int cmode,
339 int size, int count)
341 struct thread *td;
342 struct nameidata nd;
343 struct ale *ale;
344 struct ale *alp;
345 struct alq *alq;
346 char *bufp;
347 int flags;
348 int error;
349 int i, vfslocked;
351 *alqp = NULL;
352 td = curthread;
354 NDINIT(&nd, LOOKUP, NOFOLLOW | MPSAFE, UIO_SYSSPACE, file, td);
355 flags = FWRITE | O_NOFOLLOW | O_CREAT;
357 error = vn_open_cred(&nd, &flags, cmode, cred, NULL);
358 if (error)
359 return (error);
361 vfslocked = NDHASGIANT(&nd);
362 NDFREE(&nd, NDF_ONLY_PNBUF);
363 /* We just unlock so we hold a reference */
364 VOP_UNLOCK(nd.ni_vp, 0);
365 VFS_UNLOCK_GIANT(vfslocked);
367 alq = malloc(sizeof(*alq), M_ALD, M_WAITOK|M_ZERO);
368 alq->aq_entbuf = malloc(count * size, M_ALD, M_WAITOK|M_ZERO);
369 alq->aq_first = malloc(sizeof(*ale) * count, M_ALD, M_WAITOK|M_ZERO);
370 alq->aq_vp = nd.ni_vp;
371 alq->aq_cred = crhold(cred);
372 alq->aq_entmax = count;
373 alq->aq_entlen = size;
374 alq->aq_entfree = alq->aq_first;
376 mtx_init(&alq->aq_mtx, "ALD Queue", NULL, MTX_SPIN|MTX_QUIET);
378 bufp = alq->aq_entbuf;
379 ale = alq->aq_first;
380 alp = NULL;
382 /* Match up entries with buffers */
383 for (i = 0; i < count; i++) {
384 if (alp)
385 alp->ae_next = ale;
386 ale->ae_data = bufp;
387 alp = ale;
388 ale++;
389 bufp += size;
392 alp->ae_next = alq->aq_first;
394 if ((error = ald_add(alq)) != 0)
395 return (error);
396 *alqp = alq;
398 return (0);
402 * Copy a new entry into the queue. If the operation would block either
403 * wait or return an error depending on the value of waitok.
406 alq_write(struct alq *alq, void *data, int waitok)
408 struct ale *ale;
410 if ((ale = alq_get(alq, waitok)) == NULL)
411 return (EWOULDBLOCK);
413 bcopy(data, ale->ae_data, alq->aq_entlen);
414 alq_post(alq, ale);
416 return (0);
419 struct ale *
420 alq_get(struct alq *alq, int waitok)
422 struct ale *ale;
423 struct ale *aln;
425 ale = NULL;
427 ALQ_LOCK(alq);
429 /* Loop until we get an entry or we're shutting down */
430 while ((alq->aq_flags & AQ_SHUTDOWN) == 0 &&
431 (ale = alq->aq_entfree) == NULL &&
432 (waitok & ALQ_WAITOK)) {
433 alq->aq_flags |= AQ_WANTED;
434 msleep_spin(alq, &alq->aq_mtx, "alqget", 0);
437 if (ale != NULL) {
438 aln = ale->ae_next;
439 if ((aln->ae_flags & AE_VALID) == 0)
440 alq->aq_entfree = aln;
441 else
442 alq->aq_entfree = NULL;
443 } else
444 ALQ_UNLOCK(alq);
447 return (ale);
450 void
451 alq_post(struct alq *alq, struct ale *ale)
453 int activate;
455 ale->ae_flags |= AE_VALID;
457 if (alq->aq_entvalid == NULL)
458 alq->aq_entvalid = ale;
460 if ((alq->aq_flags & AQ_ACTIVE) == 0) {
461 alq->aq_flags |= AQ_ACTIVE;
462 activate = 1;
463 } else
464 activate = 0;
466 ALQ_UNLOCK(alq);
467 if (activate) {
468 ALD_LOCK();
469 ald_activate(alq);
470 ALD_UNLOCK();
474 void
475 alq_flush(struct alq *alq)
477 int needwakeup = 0;
479 ALD_LOCK();
480 ALQ_LOCK(alq);
481 if (alq->aq_flags & AQ_ACTIVE) {
482 ald_deactivate(alq);
483 ALD_UNLOCK();
484 needwakeup = alq_doio(alq);
485 } else
486 ALD_UNLOCK();
487 ALQ_UNLOCK(alq);
489 if (needwakeup)
490 wakeup(alq);
494 * Flush remaining data, close the file and free all resources.
496 void
497 alq_close(struct alq *alq)
500 * If we're already shuting down someone else will flush and close
501 * the vnode.
503 if (ald_rem(alq) != 0)
504 return;
507 * Drain all pending IO.
509 alq_shutdown(alq);
511 mtx_destroy(&alq->aq_mtx);
512 free(alq->aq_first, M_ALD);
513 free(alq->aq_entbuf, M_ALD);
514 free(alq, M_ALD);