Merge commit '7e934d3acc051b7ee3ef0d11571fd1225800a607'
[unleashed.git] / kernel / os / bp_map.c
blob2c5888072fdc15f985dc88caff864bcb6b80374e
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "%Z%%M% %I% %E% SMI"
28 #include <sys/types.h>
29 #include <sys/sysmacros.h>
30 #include <sys/systm.h>
31 #include <sys/mman.h>
32 #include <sys/buf.h>
33 #include <sys/vmem.h>
34 #include <sys/cmn_err.h>
35 #include <sys/debug.h>
36 #include <sys/machparam.h>
37 #include <vm/page.h>
38 #include <vm/seg_kmem.h>
39 #include <vm/seg_kpm.h>
41 #define BP_FLUSH(addr, size)
43 int bp_force_copy = 0;
44 typedef enum {
45 BP_COPYIN = 0,
46 BP_COPYOUT = 1
47 } bp_copydir_t;
48 static int bp_copy_common(bp_copydir_t dir, struct buf *bp, void *driverbuf,
49 offset_t offset, size_t size);
51 static vmem_t *bp_map_arena;
52 static size_t bp_align;
53 static uint_t bp_devload_flags = PROT_READ | PROT_WRITE | HAT_NOSYNC;
54 int bp_max_cache = 1 << 17; /* 128K default; tunable */
55 int bp_mapin_kpm_enable = 1; /* enable default; tunable */
57 static void *
58 bp_vmem_alloc(vmem_t *vmp, size_t size, int vmflag)
60 return (vmem_xalloc(vmp, size, bp_align, 0, 0, NULL, NULL, vmflag));
63 void
64 bp_init(size_t align, uint_t devload_flags)
66 bp_align = MAX(align, PAGESIZE);
67 bp_devload_flags |= devload_flags;
69 if (bp_align <= bp_max_cache)
70 bp_map_arena = vmem_create("bp_map", NULL, 0, bp_align,
71 bp_vmem_alloc, vmem_free, heap_arena,
72 MIN(8 * bp_align, bp_max_cache), VM_SLEEP);
76 * common routine so can be called with/without VM_SLEEP
78 void *
79 bp_mapin_common(struct buf *bp, int flag)
81 struct as *as;
82 pfn_t pfnum;
83 page_t *pp;
84 page_t **pplist;
85 caddr_t kaddr;
86 caddr_t addr;
87 uintptr_t off;
88 size_t size;
89 pgcnt_t npages;
90 int color;
92 /* return if already mapped in, no pageio/physio, or physio to kas */
93 if ((bp->b_flags & B_REMAPPED) ||
94 !(bp->b_flags & (B_PAGEIO | B_PHYS)) ||
95 (((bp->b_flags & (B_PAGEIO | B_PHYS)) == B_PHYS) &&
96 ((bp->b_proc == NULL) || (bp->b_proc->p_as == &kas))))
97 return (bp->b_un.b_addr);
99 ASSERT((bp->b_flags & (B_PAGEIO | B_PHYS)) != (B_PAGEIO | B_PHYS));
101 addr = (caddr_t)bp->b_un.b_addr;
102 off = (uintptr_t)addr & PAGEOFFSET;
103 size = P2ROUNDUP(bp->b_bcount + off, PAGESIZE);
104 npages = btop(size);
106 /* Fastpath single page IO to locked memory by using kpm. */
107 if ((bp->b_flags & (B_SHADOW | B_PAGEIO)) && (npages == 1) &&
108 kpm_enable && bp_mapin_kpm_enable) {
109 if (bp->b_flags & B_SHADOW)
110 pp = *bp->b_shadow;
111 else
112 pp = bp->b_pages;
113 kaddr = hat_kpm_mapin(pp, NULL);
114 bp->b_un.b_addr = kaddr + off;
115 bp->b_flags |= B_REMAPPED;
116 return (bp->b_un.b_addr);
120 * Allocate kernel virtual space for remapping.
122 color = bp_color(bp);
123 ASSERT(color < bp_align);
125 if (bp_map_arena != NULL) {
126 kaddr = (caddr_t)vmem_alloc(bp_map_arena,
127 P2ROUNDUP(color + size, bp_align), flag);
128 if (kaddr == NULL)
129 return (NULL);
130 kaddr += color;
131 } else {
132 kaddr = vmem_xalloc(heap_arena, size, bp_align, color,
133 0, NULL, NULL, flag);
134 if (kaddr == NULL)
135 return (NULL);
138 ASSERT(P2PHASE((uintptr_t)kaddr, bp_align) == color);
141 * Map bp into the virtual space we just allocated.
143 if (bp->b_flags & B_PAGEIO) {
144 pp = bp->b_pages;
145 pplist = NULL;
146 } else if (bp->b_flags & B_SHADOW) {
147 pp = NULL;
148 pplist = bp->b_shadow;
149 } else {
150 pp = NULL;
151 pplist = NULL;
152 if (bp->b_proc == NULL || (as = bp->b_proc->p_as) == NULL)
153 as = &kas;
156 bp->b_flags |= B_REMAPPED;
157 bp->b_un.b_addr = kaddr + off;
159 while (npages-- != 0) {
160 if (pp) {
161 pfnum = pp->p_pagenum;
162 pp = pp->p_next;
163 } else if (pplist == NULL) {
164 pfnum = hat_getpfnum(as->a_hat,
165 (caddr_t)((uintptr_t)addr & MMU_PAGEMASK));
166 if (pfnum == PFN_INVALID)
167 panic("bp_mapin_common: hat_getpfnum for"
168 " addr %p failed\n", (void *)addr);
169 addr += PAGESIZE;
170 } else {
171 pfnum = (*pplist)->p_pagenum;
172 pplist++;
175 hat_devload(kas.a_hat, kaddr, PAGESIZE, pfnum,
176 bp_devload_flags, HAT_LOAD_LOCK);
178 kaddr += PAGESIZE;
180 return (bp->b_un.b_addr);
184 * Convert bp for pageio/physio to a kernel addressable location.
186 void
187 bp_mapin(struct buf *bp)
189 (void) bp_mapin_common(bp, VM_SLEEP);
193 * Release all the resources associated with a previous bp_mapin() call.
195 void
196 bp_mapout(struct buf *bp)
198 caddr_t addr;
199 uintptr_t off;
200 uintptr_t base;
201 uintptr_t color;
202 size_t size;
203 pgcnt_t npages;
204 page_t *pp;
206 if ((bp->b_flags & B_REMAPPED) == 0)
207 return;
209 addr = bp->b_un.b_addr;
210 off = (uintptr_t)addr & PAGEOFFSET;
211 size = P2ROUNDUP(bp->b_bcount + off, PAGESIZE);
212 npages = btop(size);
214 bp->b_un.b_addr = (caddr_t)off; /* debugging aid */
216 if ((bp->b_flags & (B_SHADOW | B_PAGEIO)) && (npages == 1) &&
217 kpm_enable && bp_mapin_kpm_enable) {
218 if (bp->b_flags & B_SHADOW)
219 pp = *bp->b_shadow;
220 else
221 pp = bp->b_pages;
222 addr = (caddr_t)((uintptr_t)addr & MMU_PAGEMASK);
223 hat_kpm_mapout(pp, NULL, addr);
224 bp->b_flags &= ~B_REMAPPED;
225 return;
228 base = (uintptr_t)addr & MMU_PAGEMASK;
229 BP_FLUSH(base, size);
230 hat_unload(kas.a_hat, (void *)base, size,
231 HAT_UNLOAD_NOSYNC | HAT_UNLOAD_UNLOCK);
232 if (bp_map_arena != NULL) {
233 color = P2PHASE(base, bp_align);
234 vmem_free(bp_map_arena, (void *)(base - color),
235 P2ROUNDUP(color + size, bp_align));
236 } else
237 vmem_free(heap_arena, (void *)base, size);
238 bp->b_flags &= ~B_REMAPPED;
242 * copy data from a KVA into a buf_t which may not be mapped in. offset
243 * is relative to the buf_t only.
246 bp_copyout(void *driverbuf, struct buf *bp, offset_t offset, size_t size)
248 return (bp_copy_common(BP_COPYOUT, bp, driverbuf, offset, size));
252 * copy data from a buf_t which may not be mapped in, into a KVA.. offset
253 * is relative to the buf_t only.
256 bp_copyin(struct buf *bp, void *driverbuf, offset_t offset, size_t size)
258 return (bp_copy_common(BP_COPYIN, bp, driverbuf, offset, size));
262 #define BP_COPY(dir, driverbuf, baddr, sz) \
263 (dir == BP_COPYIN) ? \
264 bcopy(baddr, driverbuf, sz) : bcopy(driverbuf, baddr, sz)
266 static int
267 bp_copy_common(bp_copydir_t dir, struct buf *bp, void *driverbuf,
268 offset_t offset, size_t size)
270 page_t **pplist;
271 uintptr_t poff;
272 uintptr_t voff;
273 struct as *as;
274 caddr_t kaddr;
275 caddr_t addr;
276 page_t *page;
277 size_t psize;
278 page_t *pp;
279 pfn_t pfn;
282 ASSERT((offset + size) <= bp->b_bcount);
284 /* if the buf_t already has a KVA, just do a bcopy */
285 if (!(bp->b_flags & (B_PHYS | B_PAGEIO))) {
286 BP_COPY(dir, driverbuf, bp->b_un.b_addr + offset, size);
287 return (0);
290 /* if we don't have kpm enabled, we need to do the slow path */
291 if (!kpm_enable || bp_force_copy) {
292 bp_mapin(bp);
293 BP_COPY(dir, driverbuf, bp->b_un.b_addr + offset, size);
294 bp_mapout(bp);
295 return (0);
299 * kpm is enabled, and we need to map in the buf_t for the copy
302 /* setup pp, plist, and make sure 'as' is right */
303 if (bp->b_flags & B_PAGEIO) {
304 pp = bp->b_pages;
305 pplist = NULL;
306 } else if (bp->b_flags & B_SHADOW) {
307 pp = NULL;
308 pplist = bp->b_shadow;
309 } else {
310 pp = NULL;
311 pplist = NULL;
312 if (bp->b_proc == NULL || (as = bp->b_proc->p_as) == NULL) {
313 as = &kas;
318 * locals for the address, the offset into the first page, and the
319 * size of the first page we are going to copy.
321 addr = (caddr_t)bp->b_un.b_addr;
322 poff = (uintptr_t)addr & PAGEOFFSET;
323 psize = MIN(PAGESIZE - poff, size);
326 * we always start with a 0 offset into the driverbuf provided. The
327 * offset passed in only applies to the buf_t.
329 voff = 0;
331 /* Loop until we've copied al the data */
332 while (size > 0) {
335 * for a pp or pplist, get the pfn, then go to the next page_t
336 * for the next time around the loop.
338 if (pp) {
339 page = pp;
340 pp = pp->p_next;
341 } else if (pplist != NULL) {
342 page = (*pplist);
343 pplist++;
346 * We have a user VA. If we are going to copy this page, (e.g.
347 * the offset into the buf_t where we start to copy is
348 * within this page), get the pfn. Don't waste the cycles
349 * getting the pfn if we're not copying this page.
351 } else if (offset < psize) {
352 pfn = hat_getpfnum(as->a_hat,
353 (caddr_t)((uintptr_t)addr & PAGEMASK));
354 if (pfn == PFN_INVALID) {
355 return (-1);
357 page = page_numtopp_nolock(pfn);
358 addr += psize - offset;
359 } else {
360 addr += psize;
364 * if we have an initial offset into the buf_t passed in,
365 * and it falls within the current page, account for it in
366 * the page size (how much we will copy) and the offset into the
367 * page (where we'll start copying from).
369 if ((offset > 0) && (offset < psize)) {
370 psize -= offset;
371 poff += offset;
372 offset = 0;
375 * if we have an initial offset into the buf_t passed in,
376 * and it's not within the current page, skip this page.
377 * We don't have to worry about the first page offset and size
378 * anymore. psize will normally be PAGESIZE now unless we are
379 * on the last page.
381 } else if (offset >= psize) {
382 offset -= psize;
383 psize = MIN(PAGESIZE, size);
384 poff = 0;
385 continue;
389 * get a kpm mapping to the page, them copy in/out of the
390 * page. update size left and offset into the driverbuf passed
391 * in for the next time around the loop.
393 kaddr = hat_kpm_mapin(page, NULL) + poff;
394 BP_COPY(dir, (void *)((uintptr_t)driverbuf + voff), kaddr,
395 psize);
396 hat_kpm_mapout(page, NULL, kaddr - poff);
398 size -= psize;
399 voff += psize;
401 poff = 0;
402 psize = MIN(PAGESIZE, size);
405 return (0);