2 * Copyright (c) 2004, 2005 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Hiten Pandya <hmp@backplane.com> and Matthew Dillon
6 * <dillon@backplane.com>.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * The MSF_BUF API was augmented from the SFBUF API:
36 * Copyright (c) 1998 David Greenman. All rights reserved.
37 * src/sys/kern/kern_sfbuf.c,v 1.7 2004/05/13 19:46:18 dillon
39 * $DragonFly: src/sys/kern/kern_msfbuf.c,v 1.13 2005/04/20 10:24:48 hmp Exp $
42 * MSFBUFs cache linear multi-page ephermal mappings and operate similar
43 * to SFBUFs. MSFBUFs use XIO's internally to hold the page list and can
44 * be considered to be a KVA wrapper around an XIO.
46 * Like the SFBUF subsystem, the locking and validation of the page array
47 * is the responsibility of the caller. Also like the SFBUF subsystem,
48 * MSFBUFs are SMP-friendly, cache the mappings, and will avoid unnecessary
49 * page invalidations when possible.
51 * MSFBUFs are primarily designed to be used in subsystems that manipulate
52 * XIOs. The DEV and BUF subsystems are a good example.
55 * - Implement the FREEQ optimization that exists in the SFBUF code.
56 * - Allow allocation (aka mapping) based on an XIO instead of a pglist.
57 * - Overload XIOs representitive of smaller chunks of memory onto the
58 * same KVA space to efficiently cache smaller mappings (filesystem
59 * blocks / buffer cache related).
62 #include <sys/param.h>
63 #include <sys/systm.h>
64 #include <sys/globaldata.h>
65 #include <sys/kernel.h>
66 #include <sys/malloc.h>
67 #include <sys/queue.h>
68 #include <sys/sfbuf.h>
69 #include <sys/sysctl.h>
70 #include <sys/thread.h>
72 #include <sys/msfbuf.h>
76 #include <vm/vm_param.h>
77 #include <vm/vm_extern.h>
78 #include <vm/vm_kern.h>
79 #include <vm/vm_page.h>
82 #include <sys/thread2.h>
83 #include <vm/vm_page2.h>
85 MALLOC_DEFINE(M_MSFBUF
, "MSFBUF", "direct-copy buffers");
87 /* lists and queues associated with msf_bufs */
88 LIST_HEAD(msf_buf_list
, msf_buf
);
90 TAILQ_HEAD(, msf_buf
) msf_buf_freelist
;
92 /* indicate shortage of available msf_bufs */
93 static u_int msf_buf_alloc_want
;
95 /* base of the msf_buf map */
96 static vm_offset_t msf_base
;
97 static struct msf_buf
*msf_bufs
;
98 static int msf_buf_hits
;
99 static int msf_buf_misses
;
101 static int msf_buf_count
= 256; /* magic value */
102 SYSCTL_INT(_kern_ipc
, OID_AUTO
, msf_bufs
, CTLFLAG_RD
, &msf_buf_count
,
103 0, "number of direct-copy buffers available");
104 SYSCTL_INT(_kern_ipc
, OID_AUTO
, msf_hits
, CTLFLAG_RD
, &msf_buf_hits
,
105 0, "number of direct-copy buffers available");
106 SYSCTL_INT(_kern_ipc
, OID_AUTO
, msf_misses
, CTLFLAG_RD
, &msf_buf_misses
,
107 0, "number of direct-copy buffers available");
110 msf_buf_init(void *__dummy
)
115 msf_buf_alloc_want
= 0;
116 TUNABLE_INT_FETCH("kern.ipc.msfbufs", &msf_buf_count
);
118 TAILQ_INIT(&msf_buf_freelist
);
120 msf_base
= kmem_alloc_nofault(kernel_map
,
121 msf_buf_count
* XIO_INTERNAL_SIZE
);
123 msf_bufs
= malloc(msf_buf_count
* sizeof(struct msf_buf
), M_MSFBUF
,
126 /* Initialize the free list with necessary information. */
127 for (i
= 0; i
< msf_buf_count
; i
++) {
129 msf
->ms_kva
= msf_base
+ i
* XIO_INTERNAL_SIZE
;
130 msf
->ms_flags
= MSF_ONFREEQ
;
131 msf
->ms_type
= MSF_TYPE_UNKNOWN
;
132 msf
->ms_xio
= &msf
->ms_internal_xio
;
133 xio_init(&msf
->ms_internal_xio
);
134 TAILQ_INSERT_TAIL(&msf_buf_freelist
, &msf_bufs
[i
], free_list
);
137 SYSINIT(msf_buf
, SI_SUB_MBUF
, SI_ORDER_ANY
, msf_buf_init
, NULL
);
140 * Get an msf_buf from the freelist; if none are available
141 * than it will block.
143 * If SFB_CATCH was specified in 'flags' than the sleep is
144 * block is interruptable by signals etc; this flag is normally
145 * use for system calls.
148 static struct msf_buf
*
149 msf_alloc(vm_page_t firstpage
, int flags
)
156 if (firstpage
&& (msf
= firstpage
->msf_hint
) != NULL
&&
157 (msf
->ms_flags
& MSF_ONFREEQ
)
159 KKASSERT(msf
->ms_refcnt
== 0);
160 msf
->ms_flags
&= ~MSF_ONFREEQ
;
162 TAILQ_REMOVE(&msf_buf_freelist
, msf
, free_list
);
167 * Get a buffer off the freelist. If the freelist is empty, we
168 * block until something becomes available; this happens quite
169 * quickly anyway because MSFBUFs are supposed to be temporary
172 * If the SFB_CATCH flag was provided, then we allow the sleep
173 * to be interruptible.
176 if ((msf
= TAILQ_FIRST(&msf_buf_freelist
)) != NULL
) {
177 KKASSERT(msf
->ms_refcnt
== 0);
179 TAILQ_REMOVE(&msf_buf_freelist
, msf
, free_list
);
180 msf
->ms_flags
&= ~MSF_ONFREEQ
;
183 firstpage
->msf_hint
= msf
;
186 pflags
= (flags
& SFB_CATCH
) ? PCATCH
: 0;
187 ++msf_buf_alloc_want
;
188 error
= tsleep(&msf_buf_freelist
, pflags
, "msfbuf", 0);
189 --msf_buf_alloc_want
;
201 msf_map_msf(struct msf_buf
*msf
, int flags
)
204 if (flags
& SFB_CPUPRIVATE
) {
205 pmap_qenter2(msf
->ms_kva
, msf
->ms_xio
->xio_pages
,
206 msf
->ms_xio
->xio_npages
, &msf
->ms_cpumask
);
208 pmap_qenter(msf
->ms_kva
, msf
->ms_xio
->xio_pages
,
209 msf
->ms_xio
->xio_npages
);
210 msf
->ms_cpumask
= (cpumask_t
)-1;
213 pmap_qenter2(msf
->ms_kva
, msf
->ms_xio
->xio_pages
,
214 msf
->ms_xio
->xio_npages
, &msf
->ms_cpumask
);
219 msf_map_pagelist(struct msf_buf
**msfp
, vm_page_t
*list
, int npages
, int flags
)
224 KKASSERT(npages
!= 0 && npages
<= XIO_INTERNAL_PAGES
);
226 if ((msf
= msf_alloc(list
[0], flags
)) != NULL
) {
227 KKASSERT(msf
->ms_xio
== &msf
->ms_internal_xio
);
228 for (i
= 0; i
< npages
; ++i
)
229 msf
->ms_internal_xio
.xio_pages
[i
] = list
[i
];
230 msf
->ms_internal_xio
.xio_offset
= 0;
231 msf
->ms_internal_xio
.xio_npages
= npages
;
232 msf
->ms_internal_xio
.xio_bytes
= npages
<< PAGE_SHIFT
;
233 msf
->ms_type
= MSF_TYPE_PGLIST
;
234 msf_map_msf(msf
, flags
);
244 msf_map_xio(struct msf_buf
**msfp
, struct xio
*xio
, int flags
)
248 KKASSERT(xio
!= NULL
&& xio
->xio_npages
> 0);
249 KKASSERT(xio
->xio_npages
<= XIO_INTERNAL_PAGES
);
251 if ((msf
= msf_alloc(xio
->xio_pages
[0], flags
)) != NULL
) {
252 msf
->ms_type
= MSF_TYPE_XIO
;
254 msf_map_msf(msf
, flags
);
264 msf_map_ubuf(struct msf_buf
**msfp
, void *base
, size_t nbytes
, int flags
)
270 if (((int)(intptr_t)base
& PAGE_MASK
) + nbytes
> XIO_INTERNAL_SIZE
) {
275 if ((paddr
= pmap_kextract((vm_offset_t
)base
)) != 0)
276 msf
= msf_alloc(PHYS_TO_VM_PAGE(paddr
), flags
);
278 msf
= msf_alloc(NULL
, flags
);
283 error
= xio_init_ubuf(&msf
->ms_internal_xio
, base
, nbytes
, 0);
285 KKASSERT(msf
->ms_xio
== &msf
->ms_internal_xio
);
286 msf_map_msf(msf
, flags
);
287 msf
->ms_type
= MSF_TYPE_UBUF
;
298 msf_map_kbuf(struct msf_buf
**msfp
, void *base
, size_t nbytes
, int flags
)
304 if (((int)(intptr_t)base
& PAGE_MASK
) + nbytes
> XIO_INTERNAL_SIZE
) {
309 if ((paddr
= pmap_kextract((vm_offset_t
)base
)) != 0)
310 msf
= msf_alloc(PHYS_TO_VM_PAGE(paddr
), flags
);
312 msf
= msf_alloc(NULL
, flags
);
317 error
= xio_init_kbuf(&msf
->ms_internal_xio
, base
, nbytes
);
319 KKASSERT(msf
->ms_xio
== &msf
->ms_internal_xio
);
320 msf_map_msf(msf
, flags
);
321 msf
->ms_type
= MSF_TYPE_KBUF
;
332 * Iterate through the specified uio calling the function with a kernel buffer
333 * containing the data until the uio has been exhausted. If the uio
334 * represents system space no mapping occurs. If the uio represents user
335 * space the data is mapped into system space in chunks. This function does
336 * not guarentee any particular alignment or minimum chunk size, it will
337 * depend on the limitations of MSF buffers and the breakdown of the UIO's
341 msf_uio_iterate(struct uio
*uio
,
342 int (*callback
)(void *info
, char *buf
, int bytes
), void *info
)
352 switch (uio
->uio_segflg
) {
355 for (i
= 0; i
< uio
->uio_iovcnt
&& error
== 0; ++i
) {
356 iov
= &uio
->uio_iov
[i
];
358 pgoff
= (int)(intptr_t)iov
->iov_base
& PAGE_MASK
;
359 while (offset
< iov
->iov_len
) {
360 bytes
= iov
->iov_len
- offset
;
361 if (bytes
+ pgoff
> XIO_INTERNAL_SIZE
)
362 bytes
= XIO_INTERNAL_SIZE
- pgoff
;
363 error
= msf_map_ubuf(&msf
, iov
->iov_base
+ offset
, bytes
, 0);
366 error
= callback(info
, msf_buf_kva(msf
), bytes
);
377 for (i
= 0; i
< uio
->uio_iovcnt
; ++i
) {
378 iov
= &uio
->uio_iov
[i
];
379 if (iov
->iov_len
== 0)
381 error
= callback(info
, iov
->iov_base
, iov
->iov_len
);
395 * Add a reference to a buffer (currently unused)
398 msf_buf_ref(struct msf_buf
*msf
)
400 if (msf
->ms_refcnt
== 0)
401 panic("msf_buf_ref: referencing a free msf_buf");
409 * Lose a reference to an msf_buf. When none left, detach mapped page
410 * and release resources back to the system. Note that the sfbuf's
411 * removal from the freelist is delayed, so it may in fact already be
412 * on the free list. This is the optimal (and most likely) scenario.
414 * Must be called at splimp.
417 msf_buf_free(struct msf_buf
*msf
)
419 KKASSERT(msf
->ms_refcnt
> 0);
422 if (--msf
->ms_refcnt
== 0) {
423 KKASSERT((msf
->ms_flags
& MSF_ONFREEQ
) == 0);
425 if (msf
->ms_type
== MSF_TYPE_UBUF
|| msf
->ms_type
== MSF_TYPE_KBUF
)
426 xio_release(msf
->ms_xio
);
428 msf
->ms_type
= MSF_TYPE_UNKNOWN
;
429 msf
->ms_flags
|= MSF_ONFREEQ
;
430 msf
->ms_xio
= &msf
->ms_internal_xio
;
431 TAILQ_INSERT_TAIL(&msf_buf_freelist
, msf
, free_list
);
433 if (msf_buf_alloc_want
> 0)
434 wakeup_one(&msf_buf_freelist
);