* Remove the remains of the obsolete timeout()/untimeout() interface.
[dragonfly.git] / sys / kern / kern_xio.c
blob4d36a6b7087cc385e171b5c171eaad2c4d2306b6
1 /*
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/kern/kern_xio.c,v 1.15 2007/08/13 17:20:04 dillon Exp $
37 * Kernel XIO interface. An initialized XIO is basically a collection of
38 * appropriately held vm_page_t's. XIO buffers are vmspace agnostic and
39 * can represent userspace or kernelspace buffers, and can be passed to
40 * foreign threads outside of the originating vmspace. XIO buffers are
41 * not mapped into KVM and thus can be manipulated and passed around with
42 * very low overheads.
44 * The intent is for XIO to be used in the I/O path, VFS, CAPS, and other
45 * places that need to pass (possibly userspace) data between threads.
47 * TODO: check for busy page when modifying, check writeable.
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/malloc.h>
53 #include <sys/proc.h>
54 #include <sys/vmmeter.h>
55 #include <sys/vnode.h>
56 #include <sys/xio.h>
57 #include <sys/sfbuf.h>
59 #include <vm/vm.h>
60 #include <vm/vm_param.h>
61 #include <sys/lock.h>
62 #include <vm/vm_kern.h>
63 #include <vm/pmap.h>
64 #include <vm/vm_map.h>
65 #include <vm/vm_object.h>
66 #include <vm/vm_page.h>
67 #include <vm/vm_pageout.h>
68 #include <vm/vm_pager.h>
69 #include <vm/vm_extern.h>
70 #include <vm/vm_page2.h>
73 * Just do basic initialization of an empty XIO
75 void
76 xio_init(xio_t xio)
78 xio->xio_flags = 0;
79 xio->xio_bytes = 0;
80 xio->xio_error = 0;
81 xio->xio_offset = 0;
82 xio->xio_npages = 0;
83 xio->xio_pages = xio->xio_internal_pages;
87 * Initialize an XIO given a userspace buffer. 0 is returned on success,
88 * an error code on failure. The actual number of bytes that could be
89 * accomodated in the XIO will be stored in xio_bytes and the page offset
90 * will be stored in xio_offset.
92 int
93 xio_init_ubuf(xio_t xio, void *ubase, size_t ubytes, int flags)
95 vm_offset_t addr;
96 vm_page_t m;
97 vm_page_t m0;
98 int error;
99 int i;
100 int n;
101 int vmprot;
103 addr = trunc_page((vm_offset_t)ubase);
104 xio->xio_flags = flags;
105 xio->xio_bytes = 0;
106 xio->xio_error = 0;
107 if (ubytes == 0) {
108 xio->xio_offset = 0;
109 xio->xio_npages = 0;
110 } else {
111 vmprot = (flags & XIOF_WRITE) ? VM_PROT_WRITE : VM_PROT_READ;
112 xio->xio_offset = (vm_offset_t)ubase & PAGE_MASK;
113 xio->xio_pages = xio->xio_internal_pages;
114 if ((n = PAGE_SIZE - xio->xio_offset) > ubytes)
115 n = ubytes;
116 m0 = NULL;
117 for (i = 0; n && i < XIO_INTERNAL_PAGES; ++i) {
118 m = vm_fault_page_quick(addr, vmprot, &error);
119 if (m == NULL)
120 break;
121 xio->xio_pages[i] = m;
122 ubytes -= n;
123 xio->xio_bytes += n;
124 if ((n = ubytes) > PAGE_SIZE)
125 n = PAGE_SIZE;
126 addr += PAGE_SIZE;
129 * Check linearity, used by syslink to memory map DMA buffers.
131 if (flags & XIOF_VMLINEAR) {
132 if (i == 0) {
133 m0 = m;
134 } else
135 if (m->object != m0->object || m->pindex != m0->pindex + i) {
136 error = EINVAL;
137 break;
141 xio->xio_npages = i;
144 * If a failure occured clean out what we loaded and return EFAULT.
145 * Return 0 on success.
147 if (i < XIO_INTERNAL_PAGES && n) {
148 xio_release(xio);
149 xio->xio_error = EFAULT;
152 return(xio->xio_error);
156 * Initialize an XIO given a kernelspace buffer. 0 is returned on success,
157 * an error code on failure. The actual number of bytes that could be
158 * accomodated in the XIO will be stored in xio_bytes and the page offset
159 * will be stored in xio_offset.
162 xio_init_kbuf(xio_t xio, void *kbase, size_t kbytes)
164 vm_offset_t addr;
165 vm_paddr_t paddr;
166 vm_page_t m;
167 int i;
168 int n;
170 addr = trunc_page((vm_offset_t)kbase);
171 xio->xio_flags = 0;
172 xio->xio_offset = (vm_offset_t)kbase & PAGE_MASK;
173 xio->xio_bytes = 0;
174 xio->xio_pages = xio->xio_internal_pages;
175 xio->xio_error = 0;
176 if ((n = PAGE_SIZE - xio->xio_offset) > kbytes)
177 n = kbytes;
178 for (i = 0; n && i < XIO_INTERNAL_PAGES; ++i) {
179 if ((paddr = pmap_kextract(addr)) == 0)
180 break;
181 crit_enter();
182 m = PHYS_TO_VM_PAGE(paddr);
183 vm_page_hold(m);
184 crit_exit();
185 xio->xio_pages[i] = m;
186 kbytes -= n;
187 xio->xio_bytes += n;
188 if ((n = kbytes) > PAGE_SIZE)
189 n = PAGE_SIZE;
190 addr += PAGE_SIZE;
192 xio->xio_npages = i;
195 * If a failure occured clean out what we loaded and return EFAULT.
196 * Return 0 on success.
198 if (i < XIO_INTERNAL_PAGES && n) {
199 xio_release(xio);
200 xio->xio_error = EFAULT;
202 return(xio->xio_error);
206 * Initialize an XIO given an array of vm_page pointers.
209 xio_init_pages(xio_t xio, struct vm_page **mbase, int npages, int xflags)
211 int i;
213 KKASSERT(npages <= XIO_INTERNAL_PAGES);
215 xio->xio_flags = xflags;
216 xio->xio_offset = 0;
217 xio->xio_bytes = 0;
218 xio->xio_pages = xio->xio_internal_pages;
219 xio->xio_npages = npages;
220 xio->xio_error = 0;
221 crit_enter();
222 for (i = 0; i < npages; ++i) {
223 vm_page_hold(mbase[i]);
224 xio->xio_pages[i] = mbase[i];
226 crit_exit();
227 return(0);
231 * Cleanup an XIO so it can be destroyed. The pages associated with the
232 * XIO are released.
234 void
235 xio_release(xio_t xio)
237 int i;
238 vm_page_t m;
240 crit_enter();
241 for (i = 0; i < xio->xio_npages; ++i) {
242 m = xio->xio_pages[i];
243 vm_page_unhold(m);
245 crit_exit();
246 xio->xio_offset = 0;
247 xio->xio_npages = 0;
248 xio->xio_bytes = 0;
249 xio->xio_error = ENOBUFS;
253 * Copy data between an XIO and a UIO. If the UIO represents userspace it
254 * must be relative to the current context.
256 * uoffset is the abstracted starting offset in the XIO, not the actual
257 * offset, and usually starts at 0.
259 * The XIO is not modified. The UIO is updated to reflect the copy.
261 * UIO_READ xio -> uio
262 * UIO_WRITE uio -> xio
265 xio_uio_copy(xio_t xio, int uoffset, struct uio *uio, int *sizep)
267 int error;
268 int bytes;
270 bytes = xio->xio_bytes - uoffset;
271 if (bytes > uio->uio_resid)
272 bytes = uio->uio_resid;
273 KKASSERT(bytes >= 0);
274 error = uiomove_fromphys(xio->xio_pages, xio->xio_offset + uoffset,
275 bytes, uio);
276 if (error == 0)
277 *sizep = bytes;
278 else
279 *sizep = 0;
280 return(error);
284 * Copy the specified number of bytes from the xio to a userland
285 * buffer. Return an error code or 0 on success.
287 * uoffset is the abstracted starting offset in the XIO, not the actual
288 * offset, and usually starts at 0.
290 * The XIO is not modified.
293 xio_copy_xtou(xio_t xio, int uoffset, void *uptr, int bytes)
295 int i;
296 int n;
297 int error;
298 int offset;
299 vm_page_t m;
300 struct sf_buf *sf;
302 if (uoffset + bytes > xio->xio_bytes)
303 return(EFAULT);
305 offset = (xio->xio_offset + uoffset) & PAGE_MASK;
306 if ((n = PAGE_SIZE - offset) > bytes)
307 n = bytes;
309 error = 0;
310 for (i = (xio->xio_offset + uoffset) >> PAGE_SHIFT;
311 i < xio->xio_npages;
314 m = xio->xio_pages[i];
315 sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
316 error = copyout((char *)sf_buf_kva(sf) + offset, uptr, n);
317 sf_buf_free(sf);
318 if (error)
319 break;
320 bytes -= n;
321 uptr = (char *)uptr + n;
322 if (bytes == 0)
323 break;
324 if ((n = bytes) > PAGE_SIZE)
325 n = PAGE_SIZE;
326 offset = 0;
328 return(error);
332 * Copy the specified number of bytes from the xio to a kernel
333 * buffer. Return an error code or 0 on success.
335 * uoffset is the abstracted starting offset in the XIO, not the actual
336 * offset, and usually starts at 0.
338 * The XIO is not modified.
341 xio_copy_xtok(xio_t xio, int uoffset, void *kptr, int bytes)
343 int i;
344 int n;
345 int error;
346 int offset;
347 vm_page_t m;
348 struct sf_buf *sf;
350 if (bytes + uoffset > xio->xio_bytes)
351 return(EFAULT);
353 offset = (xio->xio_offset + uoffset) & PAGE_MASK;
354 if ((n = PAGE_SIZE - offset) > bytes)
355 n = bytes;
357 error = 0;
358 for (i = (xio->xio_offset + uoffset) >> PAGE_SHIFT;
359 i < xio->xio_npages;
362 m = xio->xio_pages[i];
363 sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
364 bcopy((char *)sf_buf_kva(sf) + offset, kptr, n);
365 sf_buf_free(sf);
366 bytes -= n;
367 kptr = (char *)kptr + n;
368 if (bytes == 0)
369 break;
370 if ((n = bytes) > PAGE_SIZE)
371 n = PAGE_SIZE;
372 offset = 0;
374 return(error);
378 * Copy the specified number of bytes from userland to the xio.
379 * Return an error code or 0 on success.
381 * uoffset is the abstracted starting offset in the XIO, not the actual
382 * offset, and usually starts at 0.
384 * Data in pages backing the XIO will be modified.
387 xio_copy_utox(xio_t xio, int uoffset, const void *uptr, int bytes)
389 int i;
390 int n;
391 int error;
392 int offset;
393 vm_page_t m;
394 struct sf_buf *sf;
396 if (uoffset + bytes > xio->xio_bytes)
397 return(EFAULT);
399 offset = (xio->xio_offset + uoffset) & PAGE_MASK;
400 if ((n = PAGE_SIZE - offset) > bytes)
401 n = bytes;
403 error = 0;
404 for (i = (xio->xio_offset + uoffset) >> PAGE_SHIFT;
405 i < xio->xio_npages;
408 m = xio->xio_pages[i];
409 sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
410 error = copyin(uptr, (char *)sf_buf_kva(sf) + offset, n);
411 sf_buf_free(sf);
412 if (error)
413 break;
414 bytes -= n;
415 uptr = (const char *)uptr + n;
416 if (bytes == 0)
417 break;
418 if ((n = bytes) > PAGE_SIZE)
419 n = PAGE_SIZE;
420 offset = 0;
422 return(error);
426 * Copy the specified number of bytes from the kernel to the xio.
427 * Return an error code or 0 on success.
429 * uoffset is the abstracted starting offset in the XIO, not the actual
430 * offset, and usually starts at 0.
432 * Data in pages backing the XIO will be modified.
435 xio_copy_ktox(xio_t xio, int uoffset, const void *kptr, int bytes)
437 int i;
438 int n;
439 int error;
440 int offset;
441 vm_page_t m;
442 struct sf_buf *sf;
444 if (uoffset + bytes > xio->xio_bytes)
445 return(EFAULT);
447 offset = (xio->xio_offset + uoffset) & PAGE_MASK;
448 if ((n = PAGE_SIZE - offset) > bytes)
449 n = bytes;
451 error = 0;
452 for (i = (xio->xio_offset + uoffset) >> PAGE_SHIFT;
453 i < xio->xio_npages;
456 m = xio->xio_pages[i];
457 sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
458 bcopy(kptr, (char *)sf_buf_kva(sf) + offset, n);
459 sf_buf_free(sf);
460 bytes -= n;
461 kptr = (const char *)kptr + n;
462 if (bytes == 0)
463 break;
464 if ((n = bytes) > PAGE_SIZE)
465 n = PAGE_SIZE;
466 offset = 0;
468 return(error);