2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/kern/kern_xio.c,v 1.16 2008/05/09 07:24:45 dillon Exp $
37 * Kernel XIO interface. An initialized XIO is basically a collection of
38 * appropriately held vm_page_t's. XIO buffers are vmspace agnostic and
39 * can represent userspace or kernelspace buffers, and can be passed to
40 * foreign threads outside of the originating vmspace. XIO buffers are
41 * not mapped into KVM and thus can be manipulated and passed around with
44 * The intent is for XIO to be used in the I/O path, VFS, CAPS, and other
45 * places that need to pass (possibly userspace) data between threads.
47 * TODO: check for busy page when modifying, check writeable.
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/malloc.h>
54 #include <sys/vmmeter.h>
55 #include <sys/vnode.h>
58 #include <cpu/lwbuf.h>
61 #include <vm/vm_param.h>
63 #include <vm/vm_kern.h>
65 #include <vm/vm_map.h>
66 #include <vm/vm_object.h>
67 #include <vm/vm_page.h>
68 #include <vm/vm_pageout.h>
69 #include <vm/vm_pager.h>
70 #include <vm/vm_extern.h>
71 #include <vm/vm_page2.h>
74 * Just do basic initialization of an empty XIO
84 xio
->xio_pages
= xio
->xio_internal_pages
;
88 * Initialize an XIO given a userspace buffer. 0 is returned on success,
89 * an error code on failure. The actual number of bytes that could be
90 * accomodated in the XIO will be stored in xio_bytes and the page offset
91 * will be stored in xio_offset.
94 xio_init_ubuf(xio_t xio
, void *ubase
, size_t ubytes
, int flags
)
104 addr
= trunc_page((vm_offset_t
)ubase
);
105 xio
->xio_flags
= flags
;
112 vmprot
= (flags
& XIOF_WRITE
) ? VM_PROT_WRITE
: VM_PROT_READ
;
113 xio
->xio_offset
= (vm_offset_t
)ubase
& PAGE_MASK
;
114 xio
->xio_pages
= xio
->xio_internal_pages
;
115 if ((n
= PAGE_SIZE
- xio
->xio_offset
) > ubytes
)
118 for (i
= 0; n
&& i
< XIO_INTERNAL_PAGES
; ++i
) {
119 m
= vm_fault_page_quick(addr
, vmprot
, &error
);
122 xio
->xio_pages
[i
] = m
;
125 if ((n
= ubytes
) > PAGE_SIZE
)
130 * Check linearity, used by syslink to memory map DMA buffers.
132 if (flags
& XIOF_VMLINEAR
) {
136 if (m
->object
!= m0
->object
|| m
->pindex
!= m0
->pindex
+ i
) {
145 * If a failure occured clean out what we loaded and return EFAULT.
146 * Return 0 on success. Do not dirty the pages.
148 if (i
< XIO_INTERNAL_PAGES
&& n
) {
149 xio
->xio_flags
&= ~XIOF_WRITE
;
151 xio
->xio_error
= EFAULT
;
154 return(xio
->xio_error
);
158 * Initialize an XIO given a kernelspace buffer. 0 is returned on success,
159 * an error code on failure. The actual number of bytes that could be
160 * accomodated in the XIO will be stored in xio_bytes and the page offset
161 * will be stored in xio_offset.
164 xio_init_kbuf(xio_t xio
, void *kbase
, size_t kbytes
)
172 addr
= trunc_page((vm_offset_t
)kbase
);
174 xio
->xio_offset
= (vm_offset_t
)kbase
& PAGE_MASK
;
176 xio
->xio_pages
= xio
->xio_internal_pages
;
178 if ((n
= PAGE_SIZE
- xio
->xio_offset
) > kbytes
)
180 lwkt_gettoken(&vm_token
);
182 for (i
= 0; n
&& i
< XIO_INTERNAL_PAGES
; ++i
) {
183 if ((paddr
= pmap_kextract(addr
)) == 0)
185 m
= PHYS_TO_VM_PAGE(paddr
);
187 xio
->xio_pages
[i
] = m
;
190 if ((n
= kbytes
) > PAGE_SIZE
)
195 lwkt_reltoken(&vm_token
);
199 * If a failure occured clean out what we loaded and return EFAULT.
200 * Return 0 on success.
202 if (i
< XIO_INTERNAL_PAGES
&& n
) {
204 xio
->xio_error
= EFAULT
;
206 return(xio
->xio_error
);
210 * Initialize an XIO given an array of vm_page pointers. The caller is
211 * responsible for any modified state changes for the pages.
214 xio_init_pages(xio_t xio
, struct vm_page
**mbase
, int npages
, int xflags
)
218 KKASSERT(npages
<= XIO_INTERNAL_PAGES
);
220 xio
->xio_flags
= xflags
;
222 xio
->xio_bytes
= npages
* PAGE_SIZE
;
223 xio
->xio_pages
= xio
->xio_internal_pages
;
224 xio
->xio_npages
= npages
;
226 lwkt_gettoken(&vm_token
);
228 for (i
= 0; i
< npages
; ++i
) {
229 vm_page_hold(mbase
[i
]);
230 xio
->xio_pages
[i
] = mbase
[i
];
233 lwkt_reltoken(&vm_token
);
238 * Cleanup an XIO so it can be destroyed. The pages associated with the
242 xio_release(xio_t xio
)
247 lwkt_gettoken(&vm_token
);
249 for (i
= 0; i
< xio
->xio_npages
; ++i
) {
250 m
= xio
->xio_pages
[i
];
251 if (xio
->xio_flags
& XIOF_WRITE
)
256 lwkt_reltoken(&vm_token
);
260 xio
->xio_error
= ENOBUFS
;
264 * Copy data between an XIO and a UIO. If the UIO represents userspace it
265 * must be relative to the current context.
267 * uoffset is the abstracted starting offset in the XIO, not the actual
268 * offset, and usually starts at 0.
270 * The XIO is not modified. The UIO is updated to reflect the copy.
272 * UIO_READ xio -> uio
273 * UIO_WRITE uio -> xio
276 xio_uio_copy(xio_t xio
, int uoffset
, struct uio
*uio
, size_t *sizep
)
281 bytes
= xio
->xio_bytes
- uoffset
;
282 if (bytes
> uio
->uio_resid
)
283 bytes
= uio
->uio_resid
;
284 KKASSERT(bytes
>= 0);
285 error
= uiomove_fromphys(xio
->xio_pages
, xio
->xio_offset
+ uoffset
,
295 * Copy the specified number of bytes from the xio to a userland
296 * buffer. Return an error code or 0 on success.
298 * uoffset is the abstracted starting offset in the XIO, not the actual
299 * offset, and usually starts at 0.
301 * The XIO is not modified.
304 xio_copy_xtou(xio_t xio
, int uoffset
, void *uptr
, int bytes
)
313 if (uoffset
+ bytes
> xio
->xio_bytes
)
316 offset
= (xio
->xio_offset
+ uoffset
) & PAGE_MASK
;
317 if ((n
= PAGE_SIZE
- offset
) > bytes
)
321 for (i
= (xio
->xio_offset
+ uoffset
) >> PAGE_SHIFT
;
325 m
= xio
->xio_pages
[i
];
326 lwb
= lwbuf_alloc(m
);
327 error
= copyout((char *)lwbuf_kva(lwb
) + offset
, uptr
, n
);
332 uptr
= (char *)uptr
+ n
;
335 if ((n
= bytes
) > PAGE_SIZE
)
343 * Copy the specified number of bytes from the xio to a kernel
344 * buffer. Return an error code or 0 on success.
346 * uoffset is the abstracted starting offset in the XIO, not the actual
347 * offset, and usually starts at 0.
349 * The XIO is not modified.
352 xio_copy_xtok(xio_t xio
, int uoffset
, void *kptr
, int bytes
)
361 if (bytes
+ uoffset
> xio
->xio_bytes
)
364 offset
= (xio
->xio_offset
+ uoffset
) & PAGE_MASK
;
365 if ((n
= PAGE_SIZE
- offset
) > bytes
)
369 for (i
= (xio
->xio_offset
+ uoffset
) >> PAGE_SHIFT
;
373 m
= xio
->xio_pages
[i
];
374 lwb
= lwbuf_alloc(m
);
375 bcopy((char *)lwbuf_kva(lwb
) + offset
, kptr
, n
);
378 kptr
= (char *)kptr
+ n
;
381 if ((n
= bytes
) > PAGE_SIZE
)
389 * Copy the specified number of bytes from userland to the xio.
390 * Return an error code or 0 on success.
392 * uoffset is the abstracted starting offset in the XIO, not the actual
393 * offset, and usually starts at 0.
395 * Data in pages backing the XIO will be modified.
398 xio_copy_utox(xio_t xio
, int uoffset
, const void *uptr
, int bytes
)
407 if (uoffset
+ bytes
> xio
->xio_bytes
)
410 offset
= (xio
->xio_offset
+ uoffset
) & PAGE_MASK
;
411 if ((n
= PAGE_SIZE
- offset
) > bytes
)
415 for (i
= (xio
->xio_offset
+ uoffset
) >> PAGE_SHIFT
;
419 m
= xio
->xio_pages
[i
];
420 lwb
= lwbuf_alloc(m
);
421 error
= copyin(uptr
, (char *)lwbuf_kva(lwb
) + offset
, n
);
426 uptr
= (const char *)uptr
+ n
;
429 if ((n
= bytes
) > PAGE_SIZE
)
437 * Copy the specified number of bytes from the kernel to the xio.
438 * Return an error code or 0 on success.
440 * uoffset is the abstracted starting offset in the XIO, not the actual
441 * offset, and usually starts at 0.
443 * Data in pages backing the XIO will be modified.
446 xio_copy_ktox(xio_t xio
, int uoffset
, const void *kptr
, int bytes
)
455 if (uoffset
+ bytes
> xio
->xio_bytes
)
458 offset
= (xio
->xio_offset
+ uoffset
) & PAGE_MASK
;
459 if ((n
= PAGE_SIZE
- offset
) > bytes
)
463 for (i
= (xio
->xio_offset
+ uoffset
) >> PAGE_SHIFT
;
467 m
= xio
->xio_pages
[i
];
468 lwb
= lwbuf_alloc(m
);
469 bcopy(kptr
, (char *)lwbuf_kva(lwb
) + offset
, n
);
472 kptr
= (const char *)kptr
+ n
;
475 if ((n
= bytes
) > PAGE_SIZE
)