dhcpcd: update README.DRAGONFLY
[dragonfly.git] / sys / kern / kern_xio.c
blobb789dac4e29cdbaccd6908f77a48fd252aa8457f
1 /*
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 * Kernel XIO interface. An initialized XIO is basically a collection of
36 * appropriately held vm_page_t's. XIO buffers are vmspace agnostic and
37 * can represent userspace or kernelspace buffers, and can be passed to
38 * foreign threads outside of the originating vmspace. XIO buffers are
39 * not mapped into KVM and thus can be manipulated and passed around with
40 * very low overheads.
42 * The intent is for XIO to be used in the I/O path, VFS, CAPS, and other
43 * places that need to pass (possibly userspace) data between threads.
45 * TODO: check for busy page when modifying, check writeable.
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/uio.h>
51 #include <sys/malloc.h>
52 #include <sys/proc.h>
53 #include <sys/vmmeter.h>
54 #include <sys/vnode.h>
55 #include <sys/xio.h>
57 #include <cpu/lwbuf.h>
59 #include <vm/vm.h>
60 #include <vm/vm_param.h>
61 #include <sys/lock.h>
62 #include <vm/vm_kern.h>
63 #include <vm/pmap.h>
64 #include <vm/vm_map.h>
65 #include <vm/vm_object.h>
66 #include <vm/vm_page.h>
67 #include <vm/vm_pageout.h>
68 #include <vm/vm_pager.h>
69 #include <vm/vm_extern.h>
70 #include <vm/vm_page2.h>
73 * Just do basic initialization of an empty XIO
75 void
76 xio_init(xio_t xio)
78 xio->xio_flags = 0;
79 xio->xio_bytes = 0;
80 xio->xio_error = 0;
81 xio->xio_offset = 0;
82 xio->xio_npages = 0;
83 xio->xio_pages = xio->xio_internal_pages;
87 * Initialize an XIO given a kernelspace buffer. 0 is returned on success,
88 * an error code on failure. The actual number of bytes that could be
89 * accomodated in the XIO will be stored in xio_bytes and the page offset
90 * will be stored in xio_offset.
92 * WARNING! We cannot map user memory directly into an xio unless we also
93 * make the mapping use managed pages, otherwise modifications to
94 * the memory will race against pageouts and flushes.
96 int
97 xio_init_kbuf(xio_t xio, void *kbase, size_t kbytes)
99 vm_offset_t addr;
100 vm_paddr_t paddr;
101 vm_page_t m;
102 int i;
103 int n;
105 addr = trunc_page((vm_offset_t)kbase);
106 xio->xio_flags = 0;
107 xio->xio_offset = (vm_offset_t)kbase & PAGE_MASK;
108 xio->xio_bytes = 0;
109 xio->xio_pages = xio->xio_internal_pages;
110 xio->xio_error = 0;
111 if ((n = PAGE_SIZE - xio->xio_offset) > kbytes)
112 n = kbytes;
113 for (i = 0; n && i < XIO_INTERNAL_PAGES; ++i) {
114 if ((paddr = pmap_kextract(addr)) == 0)
115 break;
116 m = PHYS_TO_VM_PAGE(paddr);
117 vm_page_hold(m);
118 xio->xio_pages[i] = m;
119 kbytes -= n;
120 xio->xio_bytes += n;
121 if ((n = kbytes) > PAGE_SIZE)
122 n = PAGE_SIZE;
123 addr += PAGE_SIZE;
125 xio->xio_npages = i;
128 * If a failure occured clean out what we loaded and return EFAULT.
129 * Return 0 on success.
131 if (i < XIO_INTERNAL_PAGES && n) {
132 xio_release(xio);
133 xio->xio_error = EFAULT;
135 return(xio->xio_error);
139 * Initialize an XIO given an array of vm_page pointers. The caller is
140 * responsible for any modified state changes for the pages.
143 xio_init_pages(xio_t xio, struct vm_page **mbase, int npages, int xflags)
145 int i;
147 KKASSERT(npages <= XIO_INTERNAL_PAGES);
149 xio->xio_flags = xflags;
150 xio->xio_offset = 0;
151 xio->xio_bytes = npages * PAGE_SIZE;
152 xio->xio_pages = xio->xio_internal_pages;
153 xio->xio_npages = npages;
154 xio->xio_error = 0;
155 for (i = 0; i < npages; ++i) {
156 vm_page_hold(mbase[i]);
157 xio->xio_pages[i] = mbase[i];
159 return(0);
163 * Cleanup an XIO so it can be destroyed. The pages associated with the
164 * XIO are released.
166 void
167 xio_release(xio_t xio)
169 int i;
170 vm_page_t m;
172 for (i = 0; i < xio->xio_npages; ++i) {
173 m = xio->xio_pages[i];
174 if (xio->xio_flags & XIOF_WRITE)
175 vm_page_dirty(m);
176 vm_page_unhold(m);
178 xio->xio_offset = 0;
179 xio->xio_npages = 0;
180 xio->xio_bytes = 0;
181 xio->xio_error = ENOBUFS;
185 * Copy data between an XIO and a UIO. If the UIO represents userspace it
186 * must be relative to the current context.
188 * uoffset is the abstracted starting offset in the XIO, not the actual
189 * offset, and usually starts at 0.
191 * The XIO is not modified. The UIO is updated to reflect the copy.
193 * UIO_READ xio -> uio
194 * UIO_WRITE uio -> xio
197 xio_uio_copy(xio_t xio, int uoffset, struct uio *uio, size_t *sizep)
199 size_t bytes;
200 int error;
202 bytes = xio->xio_bytes - uoffset;
203 if (bytes > uio->uio_resid)
204 bytes = uio->uio_resid;
205 KKASSERT(bytes >= 0);
206 error = uiomove_fromphys(xio->xio_pages, xio->xio_offset + uoffset,
207 bytes, uio);
208 if (error == 0)
209 *sizep = bytes;
210 else
211 *sizep = 0;
212 return(error);
216 * Copy the specified number of bytes from the xio to a userland
217 * buffer. Return an error code or 0 on success.
219 * uoffset is the abstracted starting offset in the XIO, not the actual
220 * offset, and usually starts at 0.
222 * The XIO is not modified.
225 xio_copy_xtou(xio_t xio, int uoffset, void *uptr, int bytes)
227 int i;
228 int n;
229 int error;
230 int offset;
231 vm_page_t m;
232 struct lwbuf *lwb;
233 struct lwbuf lwb_cache;
235 if (uoffset + bytes > xio->xio_bytes)
236 return(EFAULT);
238 offset = (xio->xio_offset + uoffset) & PAGE_MASK;
239 if ((n = PAGE_SIZE - offset) > bytes)
240 n = bytes;
242 error = 0;
243 for (i = (xio->xio_offset + uoffset) >> PAGE_SHIFT;
244 i < xio->xio_npages;
247 m = xio->xio_pages[i];
248 lwb = lwbuf_alloc(m, &lwb_cache);
249 error = copyout((char *)lwbuf_kva(lwb) + offset, uptr, n);
250 lwbuf_free(lwb);
251 if (error)
252 break;
253 bytes -= n;
254 uptr = (char *)uptr + n;
255 if (bytes == 0)
256 break;
257 if ((n = bytes) > PAGE_SIZE)
258 n = PAGE_SIZE;
259 offset = 0;
261 return(error);
265 * Copy the specified number of bytes from the xio to a kernel
266 * buffer. Return an error code or 0 on success.
268 * uoffset is the abstracted starting offset in the XIO, not the actual
269 * offset, and usually starts at 0.
271 * The XIO is not modified.
274 xio_copy_xtok(xio_t xio, int uoffset, void *kptr, int bytes)
276 int i;
277 int n;
278 int error;
279 int offset;
280 vm_page_t m;
281 struct lwbuf *lwb;
282 struct lwbuf lwb_cache;
284 if (bytes + uoffset > xio->xio_bytes)
285 return(EFAULT);
287 offset = (xio->xio_offset + uoffset) & PAGE_MASK;
288 if ((n = PAGE_SIZE - offset) > bytes)
289 n = bytes;
291 error = 0;
292 for (i = (xio->xio_offset + uoffset) >> PAGE_SHIFT;
293 i < xio->xio_npages;
296 m = xio->xio_pages[i];
297 lwb = lwbuf_alloc(m, &lwb_cache);
298 bcopy((char *)lwbuf_kva(lwb) + offset, kptr, n);
299 lwbuf_free(lwb);
300 bytes -= n;
301 kptr = (char *)kptr + n;
302 if (bytes == 0)
303 break;
304 if ((n = bytes) > PAGE_SIZE)
305 n = PAGE_SIZE;
306 offset = 0;
308 return(error);
312 * Copy the specified number of bytes from userland to the xio.
313 * Return an error code or 0 on success.
315 * uoffset is the abstracted starting offset in the XIO, not the actual
316 * offset, and usually starts at 0.
318 * Data in pages backing the XIO will be modified.
321 xio_copy_utox(xio_t xio, int uoffset, const void *uptr, int bytes)
323 int i;
324 int n;
325 int error;
326 int offset;
327 vm_page_t m;
328 struct lwbuf *lwb;
329 struct lwbuf lwb_cache;
331 if (uoffset + bytes > xio->xio_bytes)
332 return(EFAULT);
334 offset = (xio->xio_offset + uoffset) & PAGE_MASK;
335 if ((n = PAGE_SIZE - offset) > bytes)
336 n = bytes;
338 error = 0;
339 for (i = (xio->xio_offset + uoffset) >> PAGE_SHIFT;
340 i < xio->xio_npages;
343 m = xio->xio_pages[i];
344 lwb = lwbuf_alloc(m, &lwb_cache);
345 error = copyin(uptr, (char *)lwbuf_kva(lwb) + offset, n);
346 lwbuf_free(lwb);
347 if (error)
348 break;
349 bytes -= n;
350 uptr = (const char *)uptr + n;
351 if (bytes == 0)
352 break;
353 if ((n = bytes) > PAGE_SIZE)
354 n = PAGE_SIZE;
355 offset = 0;
357 return(error);
361 * Copy the specified number of bytes from the kernel to the xio.
362 * Return an error code or 0 on success.
364 * uoffset is the abstracted starting offset in the XIO, not the actual
365 * offset, and usually starts at 0.
367 * Data in pages backing the XIO will be modified.
370 xio_copy_ktox(xio_t xio, int uoffset, const void *kptr, int bytes)
372 int i;
373 int n;
374 int error;
375 int offset;
376 vm_page_t m;
377 struct lwbuf *lwb;
378 struct lwbuf lwb_cache;
380 if (uoffset + bytes > xio->xio_bytes)
381 return(EFAULT);
383 offset = (xio->xio_offset + uoffset) & PAGE_MASK;
384 if ((n = PAGE_SIZE - offset) > bytes)
385 n = bytes;
387 error = 0;
388 for (i = (xio->xio_offset + uoffset) >> PAGE_SHIFT;
389 i < xio->xio_npages;
392 m = xio->xio_pages[i];
393 lwb = lwbuf_alloc(m, &lwb_cache);
394 bcopy(kptr, (char *)lwbuf_kva(lwb) + offset, n);
395 lwbuf_free(lwb);
396 bytes -= n;
397 kptr = (const char *)kptr + n;
398 if (bytes == 0)
399 break;
400 if ((n = bytes) > PAGE_SIZE)
401 n = PAGE_SIZE;
402 offset = 0;
404 return(error);