MFC numerous features from HEAD.
[dragonfly.git] / sys / kern / kern_physio.c
blobcdea0d9b4e58f767b05d5ff6b5aa9b8e90f32dc8
1 /*
2 * Copyright (c) 1994 John S. Dyson
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
15 * John S. Dyson.
16 * 4. Modifications may be freely made to this file if the above conditions
17 * are met.
19 * $FreeBSD: src/sys/kern/kern_physio.c,v 1.46.2.4 2003/11/14 09:51:47 simokawa Exp $
20 * $DragonFly: src/sys/kern/kern_physio.c,v 1.25.4.1 2008/09/25 01:44:52 dillon Exp $
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/buf.h>
26 #include <sys/conf.h>
27 #include <sys/proc.h>
28 #include <sys/uio.h>
29 #include <sys/device.h>
30 #include <sys/thread2.h>
32 #include <vm/vm.h>
33 #include <vm/vm_extern.h>
35 static void
36 physwakeup(struct bio *bio)
38 bio->bio_buf->b_cmd = BUF_CMD_DONE;
39 wakeup(bio);
42 static int
43 physio(cdev_t dev, struct uio *uio, int ioflag)
45 int i;
46 int error;
47 int saflags;
48 int iolen;
49 int bcount;
50 int bounceit;
51 caddr_t ubase;
52 struct buf *bp;
54 bp = getpbuf(NULL);
55 saflags = bp->b_flags;
56 error = 0;
58 /* XXX: sanity check */
59 if (dev->si_iosize_max < PAGE_SIZE) {
60 kprintf("WARNING: %s si_iosize_max=%d, using DFLTPHYS.\n",
61 devtoname(dev), dev->si_iosize_max);
62 dev->si_iosize_max = DFLTPHYS;
65 /* Must be a real uio */
66 KKASSERT(uio->uio_segflg != UIO_NOCOPY);
68 for (i = 0; i < uio->uio_iovcnt; i++) {
69 while (uio->uio_iov[i].iov_len) {
70 if (uio->uio_rw == UIO_READ)
71 bp->b_cmd = BUF_CMD_READ;
72 else
73 bp->b_cmd = BUF_CMD_WRITE;
74 bp->b_flags = saflags;
75 bcount = uio->uio_iov[i].iov_len;
77 reinitbufbio(bp); /* clear translation cache */
78 bp->b_bio1.bio_offset = uio->uio_offset;
79 bp->b_bio1.bio_done = physwakeup;
81 /*
82 * Setup for mapping the request into kernel memory.
84 * We can only write as much as fits in a pbuf,
85 * which is MAXPHYS, and no larger then the device's
86 * ability.
88 * If not using bounce pages the base address of the
89 * user mapping into the pbuf may be offset, further
90 * reducing how much will actually fit in the pbuf.
92 if (bcount > dev->si_iosize_max)
93 bcount = dev->si_iosize_max;
95 ubase = uio->uio_iov[i].iov_base;
96 bounceit = (int)(((vm_offset_t)ubase) & 15);
97 iolen = ((vm_offset_t)ubase) & PAGE_MASK;
98 if (bounceit) {
99 if (bcount > bp->b_kvasize)
100 bcount = bp->b_kvasize;
101 } else {
102 if ((bcount + iolen) > bp->b_kvasize) {
103 bcount = bp->b_kvasize;
104 if (iolen != 0)
105 bcount -= PAGE_SIZE;
110 * If we have to use a bounce buffer allocate kernel
111 * memory and copyin/copyout. Otherwise map the
112 * user buffer directly into kernel memory without
113 * copying.
115 if (uio->uio_segflg == UIO_USERSPACE) {
116 if (bounceit) {
117 bp->b_data = bp->b_kvabase;
118 bp->b_bcount = bcount;
119 vm_hold_load_pages(bp, (vm_offset_t)bp->b_data, (vm_offset_t)bp->b_data + bcount);
120 if (uio->uio_rw == UIO_WRITE) {
121 error = copyin(ubase, bp->b_data, bcount);
122 if (error) {
123 vm_hold_free_pages(bp, (vm_offset_t)bp->b_data, (vm_offset_t)bp->b_data + bcount);
124 goto doerror;
127 } else if (vmapbuf(bp, ubase, bcount) < 0) {
128 error = EFAULT;
129 goto doerror;
131 } else {
132 bp->b_data = uio->uio_iov[i].iov_base;
133 bp->b_bcount = bcount;
135 dev_dstrategy(dev, &bp->b_bio1);
136 crit_enter();
137 while (bp->b_cmd != BUF_CMD_DONE)
138 tsleep(&bp->b_bio1, 0, "physstr", 0);
139 crit_exit();
141 iolen = bp->b_bcount - bp->b_resid;
142 if (uio->uio_segflg == UIO_USERSPACE) {
143 if (bounceit) {
144 if (uio->uio_rw == UIO_READ && iolen) {
145 error = copyout(bp->b_data, ubase, iolen);
146 if (error) {
147 bp->b_flags |= B_ERROR;
148 bp->b_error = error;
151 vm_hold_free_pages(bp, (vm_offset_t)bp->b_data, (vm_offset_t)bp->b_data + bcount);
152 } else {
153 vunmapbuf(bp);
156 if (iolen == 0 && !(bp->b_flags & B_ERROR))
157 goto doerror; /* EOF */
158 uio->uio_iov[i].iov_len -= iolen;
159 uio->uio_iov[i].iov_base += iolen;
160 uio->uio_resid -= iolen;
161 uio->uio_offset += iolen;
162 if (bp->b_flags & B_ERROR) {
163 error = bp->b_error;
164 goto doerror;
168 doerror:
169 relpbuf(bp, NULL);
170 return (error);
174 physread(struct dev_read_args *ap)
176 return(physio(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag));
180 physwrite(struct dev_write_args *ap)
182 return(physio(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag));