bootstrap: Remove helpers for upgrading directly from pre 4.0
[dragonfly.git] / sys / kern / kern_xio.c
blob9749ed2c7327295972a45b577de1764f651b8efd
1 /*
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/kern/kern_xio.c,v 1.16 2008/05/09 07:24:45 dillon Exp $
37 * Kernel XIO interface. An initialized XIO is basically a collection of
38 * appropriately held vm_page_t's. XIO buffers are vmspace agnostic and
39 * can represent userspace or kernelspace buffers, and can be passed to
40 * foreign threads outside of the originating vmspace. XIO buffers are
41 * not mapped into KVM and thus can be manipulated and passed around with
42 * very low overheads.
44 * The intent is for XIO to be used in the I/O path, VFS, CAPS, and other
45 * places that need to pass (possibly userspace) data between threads.
47 * TODO: check for busy page when modifying, check writeable.
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/malloc.h>
53 #include <sys/proc.h>
54 #include <sys/vmmeter.h>
55 #include <sys/vnode.h>
56 #include <sys/xio.h>
58 #include <cpu/lwbuf.h>
60 #include <vm/vm.h>
61 #include <vm/vm_param.h>
62 #include <sys/lock.h>
63 #include <vm/vm_kern.h>
64 #include <vm/pmap.h>
65 #include <vm/vm_map.h>
66 #include <vm/vm_object.h>
67 #include <vm/vm_page.h>
68 #include <vm/vm_pageout.h>
69 #include <vm/vm_pager.h>
70 #include <vm/vm_extern.h>
71 #include <vm/vm_page2.h>
74 * Just do basic initialization of an empty XIO
76 void
77 xio_init(xio_t xio)
79 xio->xio_flags = 0;
80 xio->xio_bytes = 0;
81 xio->xio_error = 0;
82 xio->xio_offset = 0;
83 xio->xio_npages = 0;
84 xio->xio_pages = xio->xio_internal_pages;
88 * Initialize an XIO given a kernelspace buffer. 0 is returned on success,
89 * an error code on failure. The actual number of bytes that could be
90 * accomodated in the XIO will be stored in xio_bytes and the page offset
91 * will be stored in xio_offset.
93 * WARNING! We cannot map user memory directly into an xio unless we also
94 * make the mapping use managed pages, otherwise modifications to
95 * the memory will race against pageouts and flushes.
97 int
98 xio_init_kbuf(xio_t xio, void *kbase, size_t kbytes)
100 vm_offset_t addr;
101 vm_paddr_t paddr;
102 vm_page_t m;
103 int i;
104 int n;
106 addr = trunc_page((vm_offset_t)kbase);
107 xio->xio_flags = 0;
108 xio->xio_offset = (vm_offset_t)kbase & PAGE_MASK;
109 xio->xio_bytes = 0;
110 xio->xio_pages = xio->xio_internal_pages;
111 xio->xio_error = 0;
112 if ((n = PAGE_SIZE - xio->xio_offset) > kbytes)
113 n = kbytes;
114 for (i = 0; n && i < XIO_INTERNAL_PAGES; ++i) {
115 if ((paddr = pmap_kextract(addr)) == 0)
116 break;
117 m = PHYS_TO_VM_PAGE(paddr);
118 vm_page_hold(m);
119 xio->xio_pages[i] = m;
120 kbytes -= n;
121 xio->xio_bytes += n;
122 if ((n = kbytes) > PAGE_SIZE)
123 n = PAGE_SIZE;
124 addr += PAGE_SIZE;
126 xio->xio_npages = i;
129 * If a failure occured clean out what we loaded and return EFAULT.
130 * Return 0 on success.
132 if (i < XIO_INTERNAL_PAGES && n) {
133 xio_release(xio);
134 xio->xio_error = EFAULT;
136 return(xio->xio_error);
140 * Initialize an XIO given an array of vm_page pointers. The caller is
141 * responsible for any modified state changes for the pages.
144 xio_init_pages(xio_t xio, struct vm_page **mbase, int npages, int xflags)
146 int i;
148 KKASSERT(npages <= XIO_INTERNAL_PAGES);
150 xio->xio_flags = xflags;
151 xio->xio_offset = 0;
152 xio->xio_bytes = npages * PAGE_SIZE;
153 xio->xio_pages = xio->xio_internal_pages;
154 xio->xio_npages = npages;
155 xio->xio_error = 0;
156 for (i = 0; i < npages; ++i) {
157 vm_page_hold(mbase[i]);
158 xio->xio_pages[i] = mbase[i];
160 return(0);
164 * Cleanup an XIO so it can be destroyed. The pages associated with the
165 * XIO are released.
167 void
168 xio_release(xio_t xio)
170 int i;
171 vm_page_t m;
173 for (i = 0; i < xio->xio_npages; ++i) {
174 m = xio->xio_pages[i];
175 if (xio->xio_flags & XIOF_WRITE)
176 vm_page_dirty(m);
177 vm_page_unhold(m);
179 xio->xio_offset = 0;
180 xio->xio_npages = 0;
181 xio->xio_bytes = 0;
182 xio->xio_error = ENOBUFS;
186 * Copy data between an XIO and a UIO. If the UIO represents userspace it
187 * must be relative to the current context.
189 * uoffset is the abstracted starting offset in the XIO, not the actual
190 * offset, and usually starts at 0.
192 * The XIO is not modified. The UIO is updated to reflect the copy.
194 * UIO_READ xio -> uio
195 * UIO_WRITE uio -> xio
198 xio_uio_copy(xio_t xio, int uoffset, struct uio *uio, size_t *sizep)
200 size_t bytes;
201 int error;
203 bytes = xio->xio_bytes - uoffset;
204 if (bytes > uio->uio_resid)
205 bytes = uio->uio_resid;
206 KKASSERT(bytes >= 0);
207 error = uiomove_fromphys(xio->xio_pages, xio->xio_offset + uoffset,
208 bytes, uio);
209 if (error == 0)
210 *sizep = bytes;
211 else
212 *sizep = 0;
213 return(error);
217 * Copy the specified number of bytes from the xio to a userland
218 * buffer. Return an error code or 0 on success.
220 * uoffset is the abstracted starting offset in the XIO, not the actual
221 * offset, and usually starts at 0.
223 * The XIO is not modified.
226 xio_copy_xtou(xio_t xio, int uoffset, void *uptr, int bytes)
228 int i;
229 int n;
230 int error;
231 int offset;
232 vm_page_t m;
233 struct lwbuf *lwb;
234 struct lwbuf lwb_cache;
236 if (uoffset + bytes > xio->xio_bytes)
237 return(EFAULT);
239 offset = (xio->xio_offset + uoffset) & PAGE_MASK;
240 if ((n = PAGE_SIZE - offset) > bytes)
241 n = bytes;
243 error = 0;
244 for (i = (xio->xio_offset + uoffset) >> PAGE_SHIFT;
245 i < xio->xio_npages;
248 m = xio->xio_pages[i];
249 lwb = lwbuf_alloc(m, &lwb_cache);
250 error = copyout((char *)lwbuf_kva(lwb) + offset, uptr, n);
251 lwbuf_free(lwb);
252 if (error)
253 break;
254 bytes -= n;
255 uptr = (char *)uptr + n;
256 if (bytes == 0)
257 break;
258 if ((n = bytes) > PAGE_SIZE)
259 n = PAGE_SIZE;
260 offset = 0;
262 return(error);
266 * Copy the specified number of bytes from the xio to a kernel
267 * buffer. Return an error code or 0 on success.
269 * uoffset is the abstracted starting offset in the XIO, not the actual
270 * offset, and usually starts at 0.
272 * The XIO is not modified.
275 xio_copy_xtok(xio_t xio, int uoffset, void *kptr, int bytes)
277 int i;
278 int n;
279 int error;
280 int offset;
281 vm_page_t m;
282 struct lwbuf *lwb;
283 struct lwbuf lwb_cache;
285 if (bytes + uoffset > xio->xio_bytes)
286 return(EFAULT);
288 offset = (xio->xio_offset + uoffset) & PAGE_MASK;
289 if ((n = PAGE_SIZE - offset) > bytes)
290 n = bytes;
292 error = 0;
293 for (i = (xio->xio_offset + uoffset) >> PAGE_SHIFT;
294 i < xio->xio_npages;
297 m = xio->xio_pages[i];
298 lwb = lwbuf_alloc(m, &lwb_cache);
299 bcopy((char *)lwbuf_kva(lwb) + offset, kptr, n);
300 lwbuf_free(lwb);
301 bytes -= n;
302 kptr = (char *)kptr + n;
303 if (bytes == 0)
304 break;
305 if ((n = bytes) > PAGE_SIZE)
306 n = PAGE_SIZE;
307 offset = 0;
309 return(error);
313 * Copy the specified number of bytes from userland to the xio.
314 * Return an error code or 0 on success.
316 * uoffset is the abstracted starting offset in the XIO, not the actual
317 * offset, and usually starts at 0.
319 * Data in pages backing the XIO will be modified.
322 xio_copy_utox(xio_t xio, int uoffset, const void *uptr, int bytes)
324 int i;
325 int n;
326 int error;
327 int offset;
328 vm_page_t m;
329 struct lwbuf *lwb;
330 struct lwbuf lwb_cache;
332 if (uoffset + bytes > xio->xio_bytes)
333 return(EFAULT);
335 offset = (xio->xio_offset + uoffset) & PAGE_MASK;
336 if ((n = PAGE_SIZE - offset) > bytes)
337 n = bytes;
339 error = 0;
340 for (i = (xio->xio_offset + uoffset) >> PAGE_SHIFT;
341 i < xio->xio_npages;
344 m = xio->xio_pages[i];
345 lwb = lwbuf_alloc(m, &lwb_cache);
346 error = copyin(uptr, (char *)lwbuf_kva(lwb) + offset, n);
347 lwbuf_free(lwb);
348 if (error)
349 break;
350 bytes -= n;
351 uptr = (const char *)uptr + n;
352 if (bytes == 0)
353 break;
354 if ((n = bytes) > PAGE_SIZE)
355 n = PAGE_SIZE;
356 offset = 0;
358 return(error);
362 * Copy the specified number of bytes from the kernel to the xio.
363 * Return an error code or 0 on success.
365 * uoffset is the abstracted starting offset in the XIO, not the actual
366 * offset, and usually starts at 0.
368 * Data in pages backing the XIO will be modified.
371 xio_copy_ktox(xio_t xio, int uoffset, const void *kptr, int bytes)
373 int i;
374 int n;
375 int error;
376 int offset;
377 vm_page_t m;
378 struct lwbuf *lwb;
379 struct lwbuf lwb_cache;
381 if (uoffset + bytes > xio->xio_bytes)
382 return(EFAULT);
384 offset = (xio->xio_offset + uoffset) & PAGE_MASK;
385 if ((n = PAGE_SIZE - offset) > bytes)
386 n = bytes;
388 error = 0;
389 for (i = (xio->xio_offset + uoffset) >> PAGE_SHIFT;
390 i < xio->xio_npages;
393 m = xio->xio_pages[i];
394 lwb = lwbuf_alloc(m, &lwb_cache);
395 bcopy(kptr, (char *)lwbuf_kva(lwb) + offset, n);
396 lwbuf_free(lwb);
397 bytes -= n;
398 kptr = (const char *)kptr + n;
399 if (bytes == 0)
400 break;
401 if ((n = bytes) > PAGE_SIZE)
402 n = PAGE_SIZE;
403 offset = 0;
405 return(error);