2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/kern/kern_xio.c,v 1.15 2007/08/13 17:20:04 dillon Exp $
37 * Kernel XIO interface. An initialized XIO is basically a collection of
38 * appropriately held vm_page_t's. XIO buffers are vmspace agnostic and
39 * can represent userspace or kernelspace buffers, and can be passed to
40 * foreign threads outside of the originating vmspace. XIO buffers are
41 * not mapped into KVM and thus can be manipulated and passed around with
44 * The intent is for XIO to be used in the I/O path, VFS, CAPS, and other
45 * places that need to pass (possibly userspace) data between threads.
47 * TODO: check for busy page when modifying, check writeable.
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/malloc.h>
54 #include <sys/vmmeter.h>
55 #include <sys/vnode.h>
57 #include <sys/sfbuf.h>
60 #include <vm/vm_param.h>
62 #include <vm/vm_kern.h>
64 #include <vm/vm_map.h>
65 #include <vm/vm_object.h>
66 #include <vm/vm_page.h>
67 #include <vm/vm_pageout.h>
68 #include <vm/vm_pager.h>
69 #include <vm/vm_extern.h>
70 #include <vm/vm_page2.h>
73 * Just do basic initialization of an empty XIO
83 xio
->xio_pages
= xio
->xio_internal_pages
;
87 * Initialize an XIO given a userspace buffer. 0 is returned on success,
88 * an error code on failure. The actual number of bytes that could be
89 * accomodated in the XIO will be stored in xio_bytes and the page offset
90 * will be stored in xio_offset.
93 xio_init_ubuf(xio_t xio
, void *ubase
, size_t ubytes
, int flags
)
103 addr
= trunc_page((vm_offset_t
)ubase
);
104 xio
->xio_flags
= flags
;
111 vmprot
= (flags
& XIOF_WRITE
) ? VM_PROT_WRITE
: VM_PROT_READ
;
112 xio
->xio_offset
= (vm_offset_t
)ubase
& PAGE_MASK
;
113 xio
->xio_pages
= xio
->xio_internal_pages
;
114 if ((n
= PAGE_SIZE
- xio
->xio_offset
) > ubytes
)
117 for (i
= 0; n
&& i
< XIO_INTERNAL_PAGES
; ++i
) {
118 m
= vm_fault_page_quick(addr
, vmprot
, &error
);
121 xio
->xio_pages
[i
] = m
;
124 if ((n
= ubytes
) > PAGE_SIZE
)
129 * Check linearity, used by syslink to memory map DMA buffers.
131 if (flags
& XIOF_VMLINEAR
) {
135 if (m
->object
!= m0
->object
|| m
->pindex
!= m0
->pindex
+ i
) {
144 * If a failure occured clean out what we loaded and return EFAULT.
145 * Return 0 on success.
147 if (i
< XIO_INTERNAL_PAGES
&& n
) {
149 xio
->xio_error
= EFAULT
;
152 return(xio
->xio_error
);
156 * Initialize an XIO given a kernelspace buffer. 0 is returned on success,
157 * an error code on failure. The actual number of bytes that could be
158 * accomodated in the XIO will be stored in xio_bytes and the page offset
159 * will be stored in xio_offset.
162 xio_init_kbuf(xio_t xio
, void *kbase
, size_t kbytes
)
170 addr
= trunc_page((vm_offset_t
)kbase
);
172 xio
->xio_offset
= (vm_offset_t
)kbase
& PAGE_MASK
;
174 xio
->xio_pages
= xio
->xio_internal_pages
;
176 if ((n
= PAGE_SIZE
- xio
->xio_offset
) > kbytes
)
178 for (i
= 0; n
&& i
< XIO_INTERNAL_PAGES
; ++i
) {
179 if ((paddr
= pmap_kextract(addr
)) == 0)
182 m
= PHYS_TO_VM_PAGE(paddr
);
185 xio
->xio_pages
[i
] = m
;
188 if ((n
= kbytes
) > PAGE_SIZE
)
195 * If a failure occured clean out what we loaded and return EFAULT.
196 * Return 0 on success.
198 if (i
< XIO_INTERNAL_PAGES
&& n
) {
200 xio
->xio_error
= EFAULT
;
202 return(xio
->xio_error
);
206 * Initialize an XIO given an array of vm_page pointers.
209 xio_init_pages(xio_t xio
, struct vm_page
**mbase
, int npages
, int xflags
)
213 KKASSERT(npages
<= XIO_INTERNAL_PAGES
);
215 xio
->xio_flags
= xflags
;
218 xio
->xio_pages
= xio
->xio_internal_pages
;
219 xio
->xio_npages
= npages
;
222 for (i
= 0; i
< npages
; ++i
) {
223 vm_page_hold(mbase
[i
]);
224 xio
->xio_pages
[i
] = mbase
[i
];
231 * Cleanup an XIO so it can be destroyed. The pages associated with the
235 xio_release(xio_t xio
)
241 for (i
= 0; i
< xio
->xio_npages
; ++i
) {
242 m
= xio
->xio_pages
[i
];
249 xio
->xio_error
= ENOBUFS
;
253 * Copy data between an XIO and a UIO. If the UIO represents userspace it
254 * must be relative to the current context.
256 * uoffset is the abstracted starting offset in the XIO, not the actual
257 * offset, and usually starts at 0.
259 * The XIO is not modified. The UIO is updated to reflect the copy.
261 * UIO_READ xio -> uio
262 * UIO_WRITE uio -> xio
265 xio_uio_copy(xio_t xio
, int uoffset
, struct uio
*uio
, int *sizep
)
270 bytes
= xio
->xio_bytes
- uoffset
;
271 if (bytes
> uio
->uio_resid
)
272 bytes
= uio
->uio_resid
;
273 KKASSERT(bytes
>= 0);
274 error
= uiomove_fromphys(xio
->xio_pages
, xio
->xio_offset
+ uoffset
,
284 * Copy the specified number of bytes from the xio to a userland
285 * buffer. Return an error code or 0 on success.
287 * uoffset is the abstracted starting offset in the XIO, not the actual
288 * offset, and usually starts at 0.
290 * The XIO is not modified.
293 xio_copy_xtou(xio_t xio
, int uoffset
, void *uptr
, int bytes
)
302 if (uoffset
+ bytes
> xio
->xio_bytes
)
305 offset
= (xio
->xio_offset
+ uoffset
) & PAGE_MASK
;
306 if ((n
= PAGE_SIZE
- offset
) > bytes
)
310 for (i
= (xio
->xio_offset
+ uoffset
) >> PAGE_SHIFT
;
314 m
= xio
->xio_pages
[i
];
315 sf
= sf_buf_alloc(m
, SFB_CPUPRIVATE
);
316 error
= copyout((char *)sf_buf_kva(sf
) + offset
, uptr
, n
);
321 uptr
= (char *)uptr
+ n
;
324 if ((n
= bytes
) > PAGE_SIZE
)
332 * Copy the specified number of bytes from the xio to a kernel
333 * buffer. Return an error code or 0 on success.
335 * uoffset is the abstracted starting offset in the XIO, not the actual
336 * offset, and usually starts at 0.
338 * The XIO is not modified.
341 xio_copy_xtok(xio_t xio
, int uoffset
, void *kptr
, int bytes
)
350 if (bytes
+ uoffset
> xio
->xio_bytes
)
353 offset
= (xio
->xio_offset
+ uoffset
) & PAGE_MASK
;
354 if ((n
= PAGE_SIZE
- offset
) > bytes
)
358 for (i
= (xio
->xio_offset
+ uoffset
) >> PAGE_SHIFT
;
362 m
= xio
->xio_pages
[i
];
363 sf
= sf_buf_alloc(m
, SFB_CPUPRIVATE
);
364 bcopy((char *)sf_buf_kva(sf
) + offset
, kptr
, n
);
367 kptr
= (char *)kptr
+ n
;
370 if ((n
= bytes
) > PAGE_SIZE
)
378 * Copy the specified number of bytes from userland to the xio.
379 * Return an error code or 0 on success.
381 * uoffset is the abstracted starting offset in the XIO, not the actual
382 * offset, and usually starts at 0.
384 * Data in pages backing the XIO will be modified.
387 xio_copy_utox(xio_t xio
, int uoffset
, const void *uptr
, int bytes
)
396 if (uoffset
+ bytes
> xio
->xio_bytes
)
399 offset
= (xio
->xio_offset
+ uoffset
) & PAGE_MASK
;
400 if ((n
= PAGE_SIZE
- offset
) > bytes
)
404 for (i
= (xio
->xio_offset
+ uoffset
) >> PAGE_SHIFT
;
408 m
= xio
->xio_pages
[i
];
409 sf
= sf_buf_alloc(m
, SFB_CPUPRIVATE
);
410 error
= copyin(uptr
, (char *)sf_buf_kva(sf
) + offset
, n
);
415 uptr
= (const char *)uptr
+ n
;
418 if ((n
= bytes
) > PAGE_SIZE
)
426 * Copy the specified number of bytes from the kernel to the xio.
427 * Return an error code or 0 on success.
429 * uoffset is the abstracted starting offset in the XIO, not the actual
430 * offset, and usually starts at 0.
432 * Data in pages backing the XIO will be modified.
435 xio_copy_ktox(xio_t xio
, int uoffset
, const void *kptr
, int bytes
)
444 if (uoffset
+ bytes
> xio
->xio_bytes
)
447 offset
= (xio
->xio_offset
+ uoffset
) & PAGE_MASK
;
448 if ((n
= PAGE_SIZE
- offset
) > bytes
)
452 for (i
= (xio
->xio_offset
+ uoffset
) >> PAGE_SHIFT
;
456 m
= xio
->xio_pages
[i
];
457 sf
= sf_buf_alloc(m
, SFB_CPUPRIVATE
);
458 bcopy(kptr
, (char *)sf_buf_kva(sf
) + offset
, n
);
461 kptr
= (const char *)kptr
+ n
;
464 if ((n
= bytes
) > PAGE_SIZE
)