More -Wwrite-strings cleanup and make sure you can actually play it.
[dragonfly.git] / sys / kern / kern_msfbuf.c
blob9679a564fd6602b7adb5ea08396aeee334830412
1 /*
2 * Copyright (c) 2004, 2005 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Hiten Pandya <hmp@backplane.com> and Matthew Dillon
6 * <dillon@backplane.com>.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
35 * The MSF_BUF API was augmented from the SFBUF API:
36 * Copyright (c) 1998 David Greenman. All rights reserved.
37 * src/sys/kern/kern_sfbuf.c,v 1.7 2004/05/13 19:46:18 dillon
39 * $DragonFly: src/sys/kern/kern_msfbuf.c,v 1.13 2005/04/20 10:24:48 hmp Exp $
42 * MSFBUFs cache linear multi-page ephermal mappings and operate similar
43 * to SFBUFs. MSFBUFs use XIO's internally to hold the page list and can
44 * be considered to be a KVA wrapper around an XIO.
46 * Like the SFBUF subsystem, the locking and validation of the page array
47 * is the responsibility of the caller. Also like the SFBUF subsystem,
48 * MSFBUFs are SMP-friendly, cache the mappings, and will avoid unnecessary
49 * page invalidations when possible.
51 * MSFBUFs are primarily designed to be used in subsystems that manipulate
52 * XIOs. The DEV and BUF subsystems are a good example.
54 * TODO LIST:
55 * - Implement the FREEQ optimization that exists in the SFBUF code.
56 * - Allow allocation (aka mapping) based on an XIO instead of a pglist.
57 * - Overload XIOs representitive of smaller chunks of memory onto the
58 * same KVA space to efficiently cache smaller mappings (filesystem
59 * blocks / buffer cache related).
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/globaldata.h>
65 #include <sys/kernel.h>
66 #include <sys/malloc.h>
67 #include <sys/queue.h>
68 #include <sys/sfbuf.h>
69 #include <sys/sysctl.h>
70 #include <sys/thread.h>
71 #include <sys/xio.h>
72 #include <sys/msfbuf.h>
73 #include <sys/uio.h>
75 #include <vm/vm.h>
76 #include <vm/vm_param.h>
77 #include <vm/vm_extern.h>
78 #include <vm/vm_kern.h>
79 #include <vm/vm_page.h>
80 #include <vm/pmap.h>
82 #include <sys/thread2.h>
83 #include <vm/vm_page2.h>
85 MALLOC_DEFINE(M_MSFBUF, "MSFBUF", "direct-copy buffers");
87 /* lists and queues associated with msf_bufs */
88 LIST_HEAD(msf_buf_list, msf_buf);
90 TAILQ_HEAD(, msf_buf) msf_buf_freelist;
92 /* indicate shortage of available msf_bufs */
93 static u_int msf_buf_alloc_want;
95 /* base of the msf_buf map */
96 static vm_offset_t msf_base;
97 static struct msf_buf *msf_bufs;
98 static int msf_buf_hits;
99 static int msf_buf_misses;
101 static int msf_buf_count = 256; /* magic value */
102 SYSCTL_INT(_kern_ipc, OID_AUTO, msf_bufs, CTLFLAG_RD, &msf_buf_count,
103 0, "number of direct-copy buffers available");
104 SYSCTL_INT(_kern_ipc, OID_AUTO, msf_hits, CTLFLAG_RD, &msf_buf_hits,
105 0, "number of direct-copy buffers available");
106 SYSCTL_INT(_kern_ipc, OID_AUTO, msf_misses, CTLFLAG_RD, &msf_buf_misses,
107 0, "number of direct-copy buffers available");
109 static void
110 msf_buf_init(void *__dummy)
112 struct msf_buf *msf;
113 int i;
115 msf_buf_alloc_want = 0;
116 TUNABLE_INT_FETCH("kern.ipc.msfbufs", &msf_buf_count);
118 TAILQ_INIT(&msf_buf_freelist);
120 msf_base = kmem_alloc_nofault(kernel_map,
121 msf_buf_count * XIO_INTERNAL_SIZE);
123 msf_bufs = malloc(msf_buf_count * sizeof(struct msf_buf), M_MSFBUF,
124 M_WAITOK|M_ZERO);
126 /* Initialize the free list with necessary information. */
127 for (i = 0; i < msf_buf_count; i++) {
128 msf = &msf_bufs[i];
129 msf->ms_kva = msf_base + i * XIO_INTERNAL_SIZE;
130 msf->ms_flags = MSF_ONFREEQ;
131 msf->ms_type = MSF_TYPE_UNKNOWN;
132 msf->ms_xio = &msf->ms_internal_xio;
133 xio_init(&msf->ms_internal_xio);
134 TAILQ_INSERT_TAIL(&msf_buf_freelist, &msf_bufs[i], free_list);
137 SYSINIT(msf_buf, SI_SUB_MBUF, SI_ORDER_ANY, msf_buf_init, NULL);
140 * Get an msf_buf from the freelist; if none are available
141 * than it will block.
143 * If SFB_CATCH was specified in 'flags' than the sleep is
144 * block is interruptable by signals etc; this flag is normally
145 * use for system calls.
148 static struct msf_buf *
149 msf_alloc(vm_page_t firstpage, int flags)
151 struct msf_buf *msf;
152 int pflags;
153 int error;
155 crit_enter();
156 if (firstpage && (msf = firstpage->msf_hint) != NULL &&
157 (msf->ms_flags & MSF_ONFREEQ)
159 KKASSERT(msf->ms_refcnt == 0);
160 msf->ms_flags &= ~MSF_ONFREEQ;
161 msf->ms_refcnt = 1;
162 TAILQ_REMOVE(&msf_buf_freelist, msf, free_list);
163 --msf_buf_count;
164 ++msf_buf_hits;
165 } else {
167 * Get a buffer off the freelist. If the freelist is empty, we
168 * block until something becomes available; this happens quite
169 * quickly anyway because MSFBUFs are supposed to be temporary
170 * mappings.
172 * If the SFB_CATCH flag was provided, then we allow the sleep
173 * to be interruptible.
175 for (;;) {
176 if ((msf = TAILQ_FIRST(&msf_buf_freelist)) != NULL) {
177 KKASSERT(msf->ms_refcnt == 0);
178 --msf_buf_count;
179 TAILQ_REMOVE(&msf_buf_freelist, msf, free_list);
180 msf->ms_flags &= ~MSF_ONFREEQ;
181 msf->ms_refcnt = 1;
182 if (firstpage)
183 firstpage->msf_hint = msf;
184 break;
186 pflags = (flags & SFB_CATCH) ? PCATCH : 0;
187 ++msf_buf_alloc_want;
188 error = tsleep(&msf_buf_freelist, pflags, "msfbuf", 0);
189 --msf_buf_alloc_want;
190 if (error)
191 break;
193 ++msf_buf_misses;
195 crit_exit();
196 return (msf);
199 static
200 void
201 msf_map_msf(struct msf_buf *msf, int flags)
203 #ifdef SMP
204 if (flags & SFB_CPUPRIVATE) {
205 pmap_qenter2(msf->ms_kva, msf->ms_xio->xio_pages,
206 msf->ms_xio->xio_npages, &msf->ms_cpumask);
207 } else {
208 pmap_qenter(msf->ms_kva, msf->ms_xio->xio_pages,
209 msf->ms_xio->xio_npages);
210 msf->ms_cpumask = (cpumask_t)-1;
212 #else
213 pmap_qenter2(msf->ms_kva, msf->ms_xio->xio_pages,
214 msf->ms_xio->xio_npages, &msf->ms_cpumask);
215 #endif
219 msf_map_pagelist(struct msf_buf **msfp, vm_page_t *list, int npages, int flags)
221 struct msf_buf *msf;
222 int i;
224 KKASSERT(npages != 0 && npages <= XIO_INTERNAL_PAGES);
226 if ((msf = msf_alloc(list[0], flags)) != NULL) {
227 KKASSERT(msf->ms_xio == &msf->ms_internal_xio);
228 for (i = 0; i < npages; ++i)
229 msf->ms_internal_xio.xio_pages[i] = list[i];
230 msf->ms_internal_xio.xio_offset = 0;
231 msf->ms_internal_xio.xio_npages = npages;
232 msf->ms_internal_xio.xio_bytes = npages << PAGE_SHIFT;
233 msf->ms_type = MSF_TYPE_PGLIST;
234 msf_map_msf(msf, flags);
235 *msfp = msf;
236 return (0);
237 } else {
238 *msfp = NULL;
239 return (ENOMEM);
244 msf_map_xio(struct msf_buf **msfp, struct xio *xio, int flags)
246 struct msf_buf *msf;
248 KKASSERT(xio != NULL && xio->xio_npages > 0);
249 KKASSERT(xio->xio_npages <= XIO_INTERNAL_PAGES);
251 if ((msf = msf_alloc(xio->xio_pages[0], flags)) != NULL) {
252 msf->ms_type = MSF_TYPE_XIO;
253 msf->ms_xio = xio;
254 msf_map_msf(msf, flags);
255 *msfp = msf;
256 return(0);
257 } else {
258 *msfp = NULL;
259 return(ENOMEM);
264 msf_map_ubuf(struct msf_buf **msfp, void *base, size_t nbytes, int flags)
266 struct msf_buf *msf;
267 vm_paddr_t paddr;
268 int error;
270 if (((int)(intptr_t)base & PAGE_MASK) + nbytes > XIO_INTERNAL_SIZE) {
271 *msfp = NULL;
272 return (ERANGE);
275 if ((paddr = pmap_kextract((vm_offset_t)base)) != 0)
276 msf = msf_alloc(PHYS_TO_VM_PAGE(paddr), flags);
277 else
278 msf = msf_alloc(NULL, flags);
280 if (msf == NULL) {
281 error = ENOENT;
282 } else {
283 error = xio_init_ubuf(&msf->ms_internal_xio, base, nbytes, 0);
284 if (error == 0) {
285 KKASSERT(msf->ms_xio == &msf->ms_internal_xio);
286 msf_map_msf(msf, flags);
287 msf->ms_type = MSF_TYPE_UBUF;
288 } else {
289 msf_buf_free(msf);
290 msf = NULL;
293 *msfp = msf;
294 return (error);
298 msf_map_kbuf(struct msf_buf **msfp, void *base, size_t nbytes, int flags)
300 struct msf_buf *msf;
301 vm_paddr_t paddr;
302 int error;
304 if (((int)(intptr_t)base & PAGE_MASK) + nbytes > XIO_INTERNAL_SIZE) {
305 *msfp = NULL;
306 return (ERANGE);
309 if ((paddr = pmap_kextract((vm_offset_t)base)) != 0)
310 msf = msf_alloc(PHYS_TO_VM_PAGE(paddr), flags);
311 else
312 msf = msf_alloc(NULL, flags);
314 if (msf == NULL) {
315 error = ENOENT;
316 } else {
317 error = xio_init_kbuf(&msf->ms_internal_xio, base, nbytes);
318 if (error == 0) {
319 KKASSERT(msf->ms_xio == &msf->ms_internal_xio);
320 msf_map_msf(msf, flags);
321 msf->ms_type = MSF_TYPE_KBUF;
322 } else {
323 msf_buf_free(msf);
324 msf = NULL;
327 *msfp = msf;
328 return (error);
332 * Iterate through the specified uio calling the function with a kernel buffer
333 * containing the data until the uio has been exhausted. If the uio
334 * represents system space no mapping occurs. If the uio represents user
335 * space the data is mapped into system space in chunks. This function does
336 * not guarentee any particular alignment or minimum chunk size, it will
337 * depend on the limitations of MSF buffers and the breakdown of the UIO's
338 * elements.
341 msf_uio_iterate(struct uio *uio,
342 int (*callback)(void *info, char *buf, int bytes), void *info)
344 struct msf_buf *msf;
345 struct iovec *iov;
346 size_t offset;
347 size_t bytes;
348 size_t pgoff;
349 int error;
350 int i;
352 switch (uio->uio_segflg) {
353 case UIO_USERSPACE:
354 error = 0;
355 for (i = 0; i < uio->uio_iovcnt && error == 0; ++i) {
356 iov = &uio->uio_iov[i];
357 offset = 0;
358 pgoff = (int)(intptr_t)iov->iov_base & PAGE_MASK;
359 while (offset < iov->iov_len) {
360 bytes = iov->iov_len - offset;
361 if (bytes + pgoff > XIO_INTERNAL_SIZE)
362 bytes = XIO_INTERNAL_SIZE - pgoff;
363 error = msf_map_ubuf(&msf, iov->iov_base + offset, bytes, 0);
364 if (error)
365 break;
366 error = callback(info, msf_buf_kva(msf), bytes);
367 msf_buf_free(msf);
368 if (error)
369 break;
370 pgoff = 0;
371 offset += bytes;
374 break;
375 case UIO_SYSSPACE:
376 error = 0;
377 for (i = 0; i < uio->uio_iovcnt; ++i) {
378 iov = &uio->uio_iov[i];
379 if (iov->iov_len == 0)
380 continue;
381 error = callback(info, iov->iov_base, iov->iov_len);
382 if (error)
383 break;
385 break;
386 default:
387 error = EOPNOTSUPP;
388 break;
390 return (error);
393 #if 0
395 * Add a reference to a buffer (currently unused)
397 void
398 msf_buf_ref(struct msf_buf *msf)
400 if (msf->ms_refcnt == 0)
401 panic("msf_buf_ref: referencing a free msf_buf");
402 crit_enter();
403 ++msf->ms_refcnt;
404 crit_exit();
406 #endif
409 * Lose a reference to an msf_buf. When none left, detach mapped page
410 * and release resources back to the system. Note that the sfbuf's
411 * removal from the freelist is delayed, so it may in fact already be
412 * on the free list. This is the optimal (and most likely) scenario.
414 * Must be called at splimp.
416 void
417 msf_buf_free(struct msf_buf *msf)
419 KKASSERT(msf->ms_refcnt > 0);
421 crit_enter();
422 if (--msf->ms_refcnt == 0) {
423 KKASSERT((msf->ms_flags & MSF_ONFREEQ) == 0);
425 if (msf->ms_type == MSF_TYPE_UBUF || msf->ms_type == MSF_TYPE_KBUF)
426 xio_release(msf->ms_xio);
428 msf->ms_type = MSF_TYPE_UNKNOWN;
429 msf->ms_flags |= MSF_ONFREEQ;
430 msf->ms_xio = &msf->ms_internal_xio;
431 TAILQ_INSERT_TAIL(&msf_buf_freelist, msf, free_list);
432 ++msf_buf_count;
433 if (msf_buf_alloc_want > 0)
434 wakeup_one(&msf_buf_freelist);
436 crit_exit();