2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Kernel XIO interface. An initialized XIO is basically a collection of
36 * appropriately held vm_page_t's. XIO buffers are vmspace agnostic and
37 * can represent userspace or kernelspace buffers, and can be passed to
38 * foreign threads outside of the originating vmspace. XIO buffers are
39 * not mapped into KVM and thus can be manipulated and passed around with
42 * The intent is for XIO to be used in the I/O path, VFS, CAPS, and other
43 * places that need to pass (possibly userspace) data between threads.
45 * TODO: check for busy page when modifying, check writeable.
48 #include <sys/param.h>
49 #include <sys/systm.h>
51 #include <sys/malloc.h>
53 #include <sys/vmmeter.h>
54 #include <sys/vnode.h>
57 #include <cpu/lwbuf.h>
60 #include <vm/vm_param.h>
62 #include <vm/vm_kern.h>
64 #include <vm/vm_map.h>
65 #include <vm/vm_object.h>
66 #include <vm/vm_page.h>
67 #include <vm/vm_pageout.h>
68 #include <vm/vm_pager.h>
69 #include <vm/vm_extern.h>
70 #include <vm/vm_page2.h>
73 * Just do basic initialization of an empty XIO
83 xio
->xio_pages
= xio
->xio_internal_pages
;
87 * Initialize an XIO given a kernelspace buffer. 0 is returned on success,
88 * an error code on failure. The actual number of bytes that could be
89 * accomodated in the XIO will be stored in xio_bytes and the page offset
90 * will be stored in xio_offset.
92 * WARNING! We cannot map user memory directly into an xio unless we also
93 * make the mapping use managed pages, otherwise modifications to
94 * the memory will race against pageouts and flushes.
97 xio_init_kbuf(xio_t xio
, void *kbase
, size_t kbytes
)
105 addr
= trunc_page((vm_offset_t
)kbase
);
107 xio
->xio_offset
= (vm_offset_t
)kbase
& PAGE_MASK
;
109 xio
->xio_pages
= xio
->xio_internal_pages
;
111 if ((n
= PAGE_SIZE
- xio
->xio_offset
) > kbytes
)
113 for (i
= 0; n
&& i
< XIO_INTERNAL_PAGES
; ++i
) {
114 if ((paddr
= pmap_kextract(addr
)) == 0)
116 m
= PHYS_TO_VM_PAGE(paddr
);
118 xio
->xio_pages
[i
] = m
;
121 if ((n
= kbytes
) > PAGE_SIZE
)
128 * If a failure occured clean out what we loaded and return EFAULT.
129 * Return 0 on success.
131 if (i
< XIO_INTERNAL_PAGES
&& n
) {
133 xio
->xio_error
= EFAULT
;
135 return(xio
->xio_error
);
139 * Initialize an XIO given an array of vm_page pointers. The caller is
140 * responsible for any modified state changes for the pages.
143 xio_init_pages(xio_t xio
, struct vm_page
**mbase
, int npages
, int xflags
)
147 KKASSERT(npages
<= XIO_INTERNAL_PAGES
);
149 xio
->xio_flags
= xflags
;
151 xio
->xio_bytes
= npages
* PAGE_SIZE
;
152 xio
->xio_pages
= xio
->xio_internal_pages
;
153 xio
->xio_npages
= npages
;
155 for (i
= 0; i
< npages
; ++i
) {
156 vm_page_hold(mbase
[i
]);
157 xio
->xio_pages
[i
] = mbase
[i
];
163 * Cleanup an XIO so it can be destroyed. The pages associated with the
167 xio_release(xio_t xio
)
172 for (i
= 0; i
< xio
->xio_npages
; ++i
) {
173 m
= xio
->xio_pages
[i
];
174 if (xio
->xio_flags
& XIOF_WRITE
)
181 xio
->xio_error
= ENOBUFS
;
185 * Copy data between an XIO and a UIO. If the UIO represents userspace it
186 * must be relative to the current context.
188 * uoffset is the abstracted starting offset in the XIO, not the actual
189 * offset, and usually starts at 0.
191 * The XIO is not modified. The UIO is updated to reflect the copy.
193 * UIO_READ xio -> uio
194 * UIO_WRITE uio -> xio
197 xio_uio_copy(xio_t xio
, int uoffset
, struct uio
*uio
, size_t *sizep
)
202 bytes
= xio
->xio_bytes
- uoffset
;
203 if (bytes
> uio
->uio_resid
)
204 bytes
= uio
->uio_resid
;
205 KKASSERT(bytes
>= 0);
206 error
= uiomove_fromphys(xio
->xio_pages
, xio
->xio_offset
+ uoffset
,
216 * Copy the specified number of bytes from the xio to a userland
217 * buffer. Return an error code or 0 on success.
219 * uoffset is the abstracted starting offset in the XIO, not the actual
220 * offset, and usually starts at 0.
222 * The XIO is not modified.
225 xio_copy_xtou(xio_t xio
, int uoffset
, void *uptr
, int bytes
)
233 struct lwbuf lwb_cache
;
235 if (uoffset
+ bytes
> xio
->xio_bytes
)
238 offset
= (xio
->xio_offset
+ uoffset
) & PAGE_MASK
;
239 if ((n
= PAGE_SIZE
- offset
) > bytes
)
243 for (i
= (xio
->xio_offset
+ uoffset
) >> PAGE_SHIFT
;
247 m
= xio
->xio_pages
[i
];
248 lwb
= lwbuf_alloc(m
, &lwb_cache
);
249 error
= copyout((char *)lwbuf_kva(lwb
) + offset
, uptr
, n
);
254 uptr
= (char *)uptr
+ n
;
257 if ((n
= bytes
) > PAGE_SIZE
)
265 * Copy the specified number of bytes from the xio to a kernel
266 * buffer. Return an error code or 0 on success.
268 * uoffset is the abstracted starting offset in the XIO, not the actual
269 * offset, and usually starts at 0.
271 * The XIO is not modified.
274 xio_copy_xtok(xio_t xio
, int uoffset
, void *kptr
, int bytes
)
282 struct lwbuf lwb_cache
;
284 if (bytes
+ uoffset
> xio
->xio_bytes
)
287 offset
= (xio
->xio_offset
+ uoffset
) & PAGE_MASK
;
288 if ((n
= PAGE_SIZE
- offset
) > bytes
)
292 for (i
= (xio
->xio_offset
+ uoffset
) >> PAGE_SHIFT
;
296 m
= xio
->xio_pages
[i
];
297 lwb
= lwbuf_alloc(m
, &lwb_cache
);
298 bcopy((char *)lwbuf_kva(lwb
) + offset
, kptr
, n
);
301 kptr
= (char *)kptr
+ n
;
304 if ((n
= bytes
) > PAGE_SIZE
)
312 * Copy the specified number of bytes from userland to the xio.
313 * Return an error code or 0 on success.
315 * uoffset is the abstracted starting offset in the XIO, not the actual
316 * offset, and usually starts at 0.
318 * Data in pages backing the XIO will be modified.
321 xio_copy_utox(xio_t xio
, int uoffset
, const void *uptr
, int bytes
)
329 struct lwbuf lwb_cache
;
331 if (uoffset
+ bytes
> xio
->xio_bytes
)
334 offset
= (xio
->xio_offset
+ uoffset
) & PAGE_MASK
;
335 if ((n
= PAGE_SIZE
- offset
) > bytes
)
339 for (i
= (xio
->xio_offset
+ uoffset
) >> PAGE_SHIFT
;
343 m
= xio
->xio_pages
[i
];
344 lwb
= lwbuf_alloc(m
, &lwb_cache
);
345 error
= copyin(uptr
, (char *)lwbuf_kva(lwb
) + offset
, n
);
350 uptr
= (const char *)uptr
+ n
;
353 if ((n
= bytes
) > PAGE_SIZE
)
361 * Copy the specified number of bytes from the kernel to the xio.
362 * Return an error code or 0 on success.
364 * uoffset is the abstracted starting offset in the XIO, not the actual
365 * offset, and usually starts at 0.
367 * Data in pages backing the XIO will be modified.
370 xio_copy_ktox(xio_t xio
, int uoffset
, const void *kptr
, int bytes
)
378 struct lwbuf lwb_cache
;
380 if (uoffset
+ bytes
> xio
->xio_bytes
)
383 offset
= (xio
->xio_offset
+ uoffset
) & PAGE_MASK
;
384 if ((n
= PAGE_SIZE
- offset
) > bytes
)
388 for (i
= (xio
->xio_offset
+ uoffset
) >> PAGE_SHIFT
;
392 m
= xio
->xio_pages
[i
];
393 lwb
= lwbuf_alloc(m
, &lwb_cache
);
394 bcopy(kptr
, (char *)lwbuf_kva(lwb
) + offset
, n
);
397 kptr
= (const char *)kptr
+ n
;
400 if ((n
= bytes
) > PAGE_SIZE
)