cxgbe/t4_tom: Read the chip's DDP page sizes and save them in a
[freebsd-src.git] / sys / vm / vm_phys.c
blob0b42a5e41f099494fe73f4b270001a632a5d4eb8
1 /*-
2 * Copyright (c) 2002-2006 Rice University
3 * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
4 * All rights reserved.
6 * This software was developed for the FreeBSD Project by Alan L. Cox,
7 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
28 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Physical memory system implementation
35 * Any external functions defined by this module are only to be used by the
36 * virtual memory system.
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
42 #include "opt_ddb.h"
43 #include "opt_vm.h"
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/lock.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/mutex.h>
51 #include <sys/proc.h>
52 #include <sys/queue.h>
53 #include <sys/rwlock.h>
54 #include <sys/sbuf.h>
55 #include <sys/sysctl.h>
56 #include <sys/tree.h>
57 #include <sys/vmmeter.h>
58 #include <sys/seq.h>
60 #include <ddb/ddb.h>
62 #include <vm/vm.h>
63 #include <vm/vm_param.h>
64 #include <vm/vm_kern.h>
65 #include <vm/vm_object.h>
66 #include <vm/vm_page.h>
67 #include <vm/vm_phys.h>
69 #include <vm/vm_domain.h>
71 _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
72 "Too many physsegs.");
74 #ifdef VM_NUMA_ALLOC
75 struct mem_affinity *mem_affinity;
76 int *mem_locality;
77 #endif
79 int vm_ndomains = 1;
81 struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX];
82 int vm_phys_nsegs;
84 struct vm_phys_fictitious_seg;
85 static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *,
86 struct vm_phys_fictitious_seg *);
88 RB_HEAD(fict_tree, vm_phys_fictitious_seg) vm_phys_fictitious_tree =
89 RB_INITIALIZER(_vm_phys_fictitious_tree);
91 struct vm_phys_fictitious_seg {
92 RB_ENTRY(vm_phys_fictitious_seg) node;
93 /* Memory region data */
94 vm_paddr_t start;
95 vm_paddr_t end;
96 vm_page_t first_page;
99 RB_GENERATE_STATIC(fict_tree, vm_phys_fictitious_seg, node,
100 vm_phys_fictitious_cmp);
102 static struct rwlock vm_phys_fictitious_reg_lock;
103 MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages");
105 static struct vm_freelist
106 vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER];
108 static int vm_nfreelists;
111 * Provides the mapping from VM_FREELIST_* to free list indices (flind).
113 static int vm_freelist_to_flind[VM_NFREELIST];
115 CTASSERT(VM_FREELIST_DEFAULT == 0);
117 #ifdef VM_FREELIST_ISADMA
118 #define VM_ISADMA_BOUNDARY 16777216
119 #endif
120 #ifdef VM_FREELIST_DMA32
121 #define VM_DMA32_BOUNDARY ((vm_paddr_t)1 << 32)
122 #endif
125 * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about
126 * the ordering of the free list boundaries.
128 #if defined(VM_ISADMA_BOUNDARY) && defined(VM_LOWMEM_BOUNDARY)
129 CTASSERT(VM_ISADMA_BOUNDARY < VM_LOWMEM_BOUNDARY);
130 #endif
131 #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY)
132 CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY);
133 #endif
135 static int cnt_prezero;
136 SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD,
137 &cnt_prezero, 0, "The number of physical pages prezeroed at idle time");
139 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
140 SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD,
141 NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info");
143 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
144 SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD,
145 NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info");
147 #ifdef VM_NUMA_ALLOC
148 static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS);
149 SYSCTL_OID(_vm, OID_AUTO, phys_locality, CTLTYPE_STRING | CTLFLAG_RD,
150 NULL, 0, sysctl_vm_phys_locality, "A", "Phys Locality Info");
151 #endif
153 SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
154 &vm_ndomains, 0, "Number of physical memory domains available.");
157 * Default to first-touch + round-robin.
159 static struct mtx vm_default_policy_mtx;
160 MTX_SYSINIT(vm_default_policy, &vm_default_policy_mtx, "default policy mutex",
161 MTX_DEF);
162 #ifdef VM_NUMA_ALLOC
163 static struct vm_domain_policy vm_default_policy =
164 VM_DOMAIN_POLICY_STATIC_INITIALISER(VM_POLICY_FIRST_TOUCH_ROUND_ROBIN, 0);
165 #else
166 /* Use round-robin so the domain policy code will only try once per allocation */
167 static struct vm_domain_policy vm_default_policy =
168 VM_DOMAIN_POLICY_STATIC_INITIALISER(VM_POLICY_ROUND_ROBIN, 0);
169 #endif
171 static vm_page_t vm_phys_alloc_domain_pages(int domain, int flind, int pool,
172 int order);
173 static vm_page_t vm_phys_alloc_seg_contig(struct vm_phys_seg *seg,
174 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
175 vm_paddr_t boundary);
176 static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
177 static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
178 static int vm_phys_paddr_to_segind(vm_paddr_t pa);
179 static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
180 int order);
182 static int
183 sysctl_vm_default_policy(SYSCTL_HANDLER_ARGS)
185 char policy_name[32];
186 int error;
188 mtx_lock(&vm_default_policy_mtx);
190 /* Map policy to output string */
191 switch (vm_default_policy.p.policy) {
192 case VM_POLICY_FIRST_TOUCH:
193 strcpy(policy_name, "first-touch");
194 break;
195 case VM_POLICY_FIRST_TOUCH_ROUND_ROBIN:
196 strcpy(policy_name, "first-touch-rr");
197 break;
198 case VM_POLICY_ROUND_ROBIN:
199 default:
200 strcpy(policy_name, "rr");
201 break;
203 mtx_unlock(&vm_default_policy_mtx);
205 error = sysctl_handle_string(oidp, &policy_name[0],
206 sizeof(policy_name), req);
207 if (error != 0 || req->newptr == NULL)
208 return (error);
210 mtx_lock(&vm_default_policy_mtx);
211 /* Set: match on the subset of policies that make sense as a default */
212 if (strcmp("first-touch-rr", policy_name) == 0) {
213 vm_domain_policy_set(&vm_default_policy,
214 VM_POLICY_FIRST_TOUCH_ROUND_ROBIN, 0);
215 } else if (strcmp("first-touch", policy_name) == 0) {
216 vm_domain_policy_set(&vm_default_policy,
217 VM_POLICY_FIRST_TOUCH, 0);
218 } else if (strcmp("rr", policy_name) == 0) {
219 vm_domain_policy_set(&vm_default_policy,
220 VM_POLICY_ROUND_ROBIN, 0);
221 } else {
222 error = EINVAL;
223 goto finish;
226 error = 0;
227 finish:
228 mtx_unlock(&vm_default_policy_mtx);
229 return (error);
232 SYSCTL_PROC(_vm, OID_AUTO, default_policy, CTLTYPE_STRING | CTLFLAG_RW,
233 0, 0, sysctl_vm_default_policy, "A",
234 "Default policy (rr, first-touch, first-touch-rr");
237 * Red-black tree helpers for vm fictitious range management.
239 static inline int
240 vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg *p,
241 struct vm_phys_fictitious_seg *range)
244 KASSERT(range->start != 0 && range->end != 0,
245 ("Invalid range passed on search for vm_fictitious page"));
246 if (p->start >= range->end)
247 return (1);
248 if (p->start < range->start)
249 return (-1);
251 return (0);
254 static int
255 vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg *p1,
256 struct vm_phys_fictitious_seg *p2)
259 /* Check if this is a search for a page */
260 if (p1->end == 0)
261 return (vm_phys_fictitious_in_range(p1, p2));
263 KASSERT(p2->end != 0,
264 ("Invalid range passed as second parameter to vm fictitious comparison"));
266 /* Searching to add a new range */
267 if (p1->end <= p2->start)
268 return (-1);
269 if (p1->start >= p2->end)
270 return (1);
272 panic("Trying to add overlapping vm fictitious ranges:\n"
273 "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1->start,
274 (uintmax_t)p1->end, (uintmax_t)p2->start, (uintmax_t)p2->end);
277 static __inline int
278 vm_rr_selectdomain(void)
280 #ifdef VM_NUMA_ALLOC
281 struct thread *td;
283 td = curthread;
285 td->td_dom_rr_idx++;
286 td->td_dom_rr_idx %= vm_ndomains;
287 return (td->td_dom_rr_idx);
288 #else
289 return (0);
290 #endif
294 * Initialise a VM domain iterator.
296 * Check the thread policy, then the proc policy,
297 * then default to the system policy.
299 * Later on the various layers will have this logic
300 * plumbed into them and the phys code will be explicitly
301 * handed a VM domain policy to use.
303 static void
304 vm_policy_iterator_init(struct vm_domain_iterator *vi)
306 #ifdef VM_NUMA_ALLOC
307 struct vm_domain_policy lcl;
308 #endif
310 vm_domain_iterator_init(vi);
312 #ifdef VM_NUMA_ALLOC
313 /* Copy out the thread policy */
314 vm_domain_policy_localcopy(&lcl, &curthread->td_vm_dom_policy);
315 if (lcl.p.policy != VM_POLICY_NONE) {
316 /* Thread policy is present; use it */
317 vm_domain_iterator_set_policy(vi, &lcl);
318 return;
321 vm_domain_policy_localcopy(&lcl,
322 &curthread->td_proc->p_vm_dom_policy);
323 if (lcl.p.policy != VM_POLICY_NONE) {
324 /* Process policy is present; use it */
325 vm_domain_iterator_set_policy(vi, &lcl);
326 return;
328 #endif
329 /* Use system default policy */
330 vm_domain_iterator_set_policy(vi, &vm_default_policy);
333 static void
334 vm_policy_iterator_finish(struct vm_domain_iterator *vi)
337 vm_domain_iterator_cleanup(vi);
340 boolean_t
341 vm_phys_domain_intersects(long mask, vm_paddr_t low, vm_paddr_t high)
343 struct vm_phys_seg *s;
344 int idx;
346 while ((idx = ffsl(mask)) != 0) {
347 idx--; /* ffsl counts from 1 */
348 mask &= ~(1UL << idx);
349 s = &vm_phys_segs[idx];
350 if (low < s->end && high > s->start)
351 return (TRUE);
353 return (FALSE);
357 * Outputs the state of the physical memory allocator, specifically,
358 * the amount of physical memory in each free list.
360 static int
361 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
363 struct sbuf sbuf;
364 struct vm_freelist *fl;
365 int dom, error, flind, oind, pind;
367 error = sysctl_wire_old_buffer(req, 0);
368 if (error != 0)
369 return (error);
370 sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req);
371 for (dom = 0; dom < vm_ndomains; dom++) {
372 sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom);
373 for (flind = 0; flind < vm_nfreelists; flind++) {
374 sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
375 "\n ORDER (SIZE) | NUMBER"
376 "\n ", flind);
377 for (pind = 0; pind < VM_NFREEPOOL; pind++)
378 sbuf_printf(&sbuf, " | POOL %d", pind);
379 sbuf_printf(&sbuf, "\n-- ");
380 for (pind = 0; pind < VM_NFREEPOOL; pind++)
381 sbuf_printf(&sbuf, "-- -- ");
382 sbuf_printf(&sbuf, "--\n");
383 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
384 sbuf_printf(&sbuf, " %2d (%6dK)", oind,
385 1 << (PAGE_SHIFT - 10 + oind));
386 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
387 fl = vm_phys_free_queues[dom][flind][pind];
388 sbuf_printf(&sbuf, " | %6d",
389 fl[oind].lcnt);
391 sbuf_printf(&sbuf, "\n");
395 error = sbuf_finish(&sbuf);
396 sbuf_delete(&sbuf);
397 return (error);
401 * Outputs the set of physical memory segments.
403 static int
404 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
406 struct sbuf sbuf;
407 struct vm_phys_seg *seg;
408 int error, segind;
410 error = sysctl_wire_old_buffer(req, 0);
411 if (error != 0)
412 return (error);
413 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
414 for (segind = 0; segind < vm_phys_nsegs; segind++) {
415 sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
416 seg = &vm_phys_segs[segind];
417 sbuf_printf(&sbuf, "start: %#jx\n",
418 (uintmax_t)seg->start);
419 sbuf_printf(&sbuf, "end: %#jx\n",
420 (uintmax_t)seg->end);
421 sbuf_printf(&sbuf, "domain: %d\n", seg->domain);
422 sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
424 error = sbuf_finish(&sbuf);
425 sbuf_delete(&sbuf);
426 return (error);
430 * Return affinity, or -1 if there's no affinity information.
433 vm_phys_mem_affinity(int f, int t)
436 #ifdef VM_NUMA_ALLOC
437 if (mem_locality == NULL)
438 return (-1);
439 if (f >= vm_ndomains || t >= vm_ndomains)
440 return (-1);
441 return (mem_locality[f * vm_ndomains + t]);
442 #else
443 return (-1);
444 #endif
447 #ifdef VM_NUMA_ALLOC
449 * Outputs the VM locality table.
451 static int
452 sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS)
454 struct sbuf sbuf;
455 int error, i, j;
457 error = sysctl_wire_old_buffer(req, 0);
458 if (error != 0)
459 return (error);
460 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
462 sbuf_printf(&sbuf, "\n");
464 for (i = 0; i < vm_ndomains; i++) {
465 sbuf_printf(&sbuf, "%d: ", i);
466 for (j = 0; j < vm_ndomains; j++) {
467 sbuf_printf(&sbuf, "%d ", vm_phys_mem_affinity(i, j));
469 sbuf_printf(&sbuf, "\n");
471 error = sbuf_finish(&sbuf);
472 sbuf_delete(&sbuf);
473 return (error);
475 #endif
477 static void
478 vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
481 m->order = order;
482 if (tail)
483 TAILQ_INSERT_TAIL(&fl[order].pl, m, plinks.q);
484 else
485 TAILQ_INSERT_HEAD(&fl[order].pl, m, plinks.q);
486 fl[order].lcnt++;
489 static void
490 vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order)
493 TAILQ_REMOVE(&fl[order].pl, m, plinks.q);
494 fl[order].lcnt--;
495 m->order = VM_NFREEORDER;
499 * Create a physical memory segment.
501 static void
502 _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain)
504 struct vm_phys_seg *seg;
506 KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
507 ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
508 KASSERT(domain < vm_ndomains,
509 ("vm_phys_create_seg: invalid domain provided"));
510 seg = &vm_phys_segs[vm_phys_nsegs++];
511 while (seg > vm_phys_segs && (seg - 1)->start >= end) {
512 *seg = *(seg - 1);
513 seg--;
515 seg->start = start;
516 seg->end = end;
517 seg->domain = domain;
520 static void
521 vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end)
523 #ifdef VM_NUMA_ALLOC
524 int i;
526 if (mem_affinity == NULL) {
527 _vm_phys_create_seg(start, end, 0);
528 return;
531 for (i = 0;; i++) {
532 if (mem_affinity[i].end == 0)
533 panic("Reached end of affinity info");
534 if (mem_affinity[i].end <= start)
535 continue;
536 if (mem_affinity[i].start > start)
537 panic("No affinity info for start %jx",
538 (uintmax_t)start);
539 if (mem_affinity[i].end >= end) {
540 _vm_phys_create_seg(start, end,
541 mem_affinity[i].domain);
542 break;
544 _vm_phys_create_seg(start, mem_affinity[i].end,
545 mem_affinity[i].domain);
546 start = mem_affinity[i].end;
548 #else
549 _vm_phys_create_seg(start, end, 0);
550 #endif
554 * Add a physical memory segment.
556 void
557 vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
559 vm_paddr_t paddr;
561 KASSERT((start & PAGE_MASK) == 0,
562 ("vm_phys_define_seg: start is not page aligned"));
563 KASSERT((end & PAGE_MASK) == 0,
564 ("vm_phys_define_seg: end is not page aligned"));
567 * Split the physical memory segment if it spans two or more free
568 * list boundaries.
570 paddr = start;
571 #ifdef VM_FREELIST_ISADMA
572 if (paddr < VM_ISADMA_BOUNDARY && end > VM_ISADMA_BOUNDARY) {
573 vm_phys_create_seg(paddr, VM_ISADMA_BOUNDARY);
574 paddr = VM_ISADMA_BOUNDARY;
576 #endif
577 #ifdef VM_FREELIST_LOWMEM
578 if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) {
579 vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY);
580 paddr = VM_LOWMEM_BOUNDARY;
582 #endif
583 #ifdef VM_FREELIST_DMA32
584 if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) {
585 vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY);
586 paddr = VM_DMA32_BOUNDARY;
588 #endif
589 vm_phys_create_seg(paddr, end);
593 * Initialize the physical memory allocator.
595 * Requires that vm_page_array is initialized!
597 void
598 vm_phys_init(void)
600 struct vm_freelist *fl;
601 struct vm_phys_seg *seg;
602 u_long npages;
603 int dom, flind, freelist, oind, pind, segind;
606 * Compute the number of free lists, and generate the mapping from the
607 * manifest constants VM_FREELIST_* to the free list indices.
609 * Initially, the entries of vm_freelist_to_flind[] are set to either
610 * 0 or 1 to indicate which free lists should be created.
612 npages = 0;
613 for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
614 seg = &vm_phys_segs[segind];
615 #ifdef VM_FREELIST_ISADMA
616 if (seg->end <= VM_ISADMA_BOUNDARY)
617 vm_freelist_to_flind[VM_FREELIST_ISADMA] = 1;
618 else
619 #endif
620 #ifdef VM_FREELIST_LOWMEM
621 if (seg->end <= VM_LOWMEM_BOUNDARY)
622 vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1;
623 else
624 #endif
625 #ifdef VM_FREELIST_DMA32
626 if (
627 #ifdef VM_DMA32_NPAGES_THRESHOLD
629 * Create the DMA32 free list only if the amount of
630 * physical memory above physical address 4G exceeds the
631 * given threshold.
633 npages > VM_DMA32_NPAGES_THRESHOLD &&
634 #endif
635 seg->end <= VM_DMA32_BOUNDARY)
636 vm_freelist_to_flind[VM_FREELIST_DMA32] = 1;
637 else
638 #endif
640 npages += atop(seg->end - seg->start);
641 vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1;
644 /* Change each entry into a running total of the free lists. */
645 for (freelist = 1; freelist < VM_NFREELIST; freelist++) {
646 vm_freelist_to_flind[freelist] +=
647 vm_freelist_to_flind[freelist - 1];
649 vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1];
650 KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists"));
651 /* Change each entry into a free list index. */
652 for (freelist = 0; freelist < VM_NFREELIST; freelist++)
653 vm_freelist_to_flind[freelist]--;
656 * Initialize the first_page and free_queues fields of each physical
657 * memory segment.
659 #ifdef VM_PHYSSEG_SPARSE
660 npages = 0;
661 #endif
662 for (segind = 0; segind < vm_phys_nsegs; segind++) {
663 seg = &vm_phys_segs[segind];
664 #ifdef VM_PHYSSEG_SPARSE
665 seg->first_page = &vm_page_array[npages];
666 npages += atop(seg->end - seg->start);
667 #else
668 seg->first_page = PHYS_TO_VM_PAGE(seg->start);
669 #endif
670 #ifdef VM_FREELIST_ISADMA
671 if (seg->end <= VM_ISADMA_BOUNDARY) {
672 flind = vm_freelist_to_flind[VM_FREELIST_ISADMA];
673 KASSERT(flind >= 0,
674 ("vm_phys_init: ISADMA flind < 0"));
675 } else
676 #endif
677 #ifdef VM_FREELIST_LOWMEM
678 if (seg->end <= VM_LOWMEM_BOUNDARY) {
679 flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM];
680 KASSERT(flind >= 0,
681 ("vm_phys_init: LOWMEM flind < 0"));
682 } else
683 #endif
684 #ifdef VM_FREELIST_DMA32
685 if (seg->end <= VM_DMA32_BOUNDARY) {
686 flind = vm_freelist_to_flind[VM_FREELIST_DMA32];
687 KASSERT(flind >= 0,
688 ("vm_phys_init: DMA32 flind < 0"));
689 } else
690 #endif
692 flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT];
693 KASSERT(flind >= 0,
694 ("vm_phys_init: DEFAULT flind < 0"));
696 seg->free_queues = &vm_phys_free_queues[seg->domain][flind];
700 * Initialize the free queues.
702 for (dom = 0; dom < vm_ndomains; dom++) {
703 for (flind = 0; flind < vm_nfreelists; flind++) {
704 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
705 fl = vm_phys_free_queues[dom][flind][pind];
706 for (oind = 0; oind < VM_NFREEORDER; oind++)
707 TAILQ_INIT(&fl[oind].pl);
712 rw_init(&vm_phys_fictitious_reg_lock, "vmfctr");
716 * Split a contiguous, power of two-sized set of physical pages.
718 static __inline void
719 vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order)
721 vm_page_t m_buddy;
723 while (oind > order) {
724 oind--;
725 m_buddy = &m[1 << oind];
726 KASSERT(m_buddy->order == VM_NFREEORDER,
727 ("vm_phys_split_pages: page %p has unexpected order %d",
728 m_buddy, m_buddy->order));
729 vm_freelist_add(fl, m_buddy, oind, 0);
734 * Initialize a physical page and add it to the free lists.
736 void
737 vm_phys_add_page(vm_paddr_t pa)
739 vm_page_t m;
740 struct vm_domain *vmd;
742 vm_cnt.v_page_count++;
743 m = vm_phys_paddr_to_vm_page(pa);
744 m->phys_addr = pa;
745 m->queue = PQ_NONE;
746 m->segind = vm_phys_paddr_to_segind(pa);
747 vmd = vm_phys_domain(m);
748 vmd->vmd_page_count++;
749 vmd->vmd_segs |= 1UL << m->segind;
750 KASSERT(m->order == VM_NFREEORDER,
751 ("vm_phys_add_page: page %p has unexpected order %d",
752 m, m->order));
753 m->pool = VM_FREEPOOL_DEFAULT;
754 pmap_page_init(m);
755 mtx_lock(&vm_page_queue_free_mtx);
756 vm_phys_freecnt_adj(m, 1);
757 vm_phys_free_pages(m, 0);
758 mtx_unlock(&vm_page_queue_free_mtx);
762 * Allocate a contiguous, power of two-sized set of physical pages
763 * from the free lists.
765 * The free page queues must be locked.
767 vm_page_t
768 vm_phys_alloc_pages(int pool, int order)
770 vm_page_t m;
771 int domain, flind;
772 struct vm_domain_iterator vi;
774 KASSERT(pool < VM_NFREEPOOL,
775 ("vm_phys_alloc_pages: pool %d is out of range", pool));
776 KASSERT(order < VM_NFREEORDER,
777 ("vm_phys_alloc_pages: order %d is out of range", order));
779 vm_policy_iterator_init(&vi);
781 while ((vm_domain_iterator_run(&vi, &domain)) == 0) {
782 for (flind = 0; flind < vm_nfreelists; flind++) {
783 m = vm_phys_alloc_domain_pages(domain, flind, pool,
784 order);
785 if (m != NULL)
786 return (m);
790 vm_policy_iterator_finish(&vi);
791 return (NULL);
795 * Allocate a contiguous, power of two-sized set of physical pages from the
796 * specified free list. The free list must be specified using one of the
797 * manifest constants VM_FREELIST_*.
799 * The free page queues must be locked.
801 vm_page_t
802 vm_phys_alloc_freelist_pages(int freelist, int pool, int order)
804 vm_page_t m;
805 struct vm_domain_iterator vi;
806 int domain;
808 KASSERT(freelist < VM_NFREELIST,
809 ("vm_phys_alloc_freelist_pages: freelist %d is out of range",
810 freelist));
811 KASSERT(pool < VM_NFREEPOOL,
812 ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
813 KASSERT(order < VM_NFREEORDER,
814 ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
816 vm_policy_iterator_init(&vi);
818 while ((vm_domain_iterator_run(&vi, &domain)) == 0) {
819 m = vm_phys_alloc_domain_pages(domain,
820 vm_freelist_to_flind[freelist], pool, order);
821 if (m != NULL)
822 return (m);
825 vm_policy_iterator_finish(&vi);
826 return (NULL);
829 static vm_page_t
830 vm_phys_alloc_domain_pages(int domain, int flind, int pool, int order)
832 struct vm_freelist *fl;
833 struct vm_freelist *alt;
834 int oind, pind;
835 vm_page_t m;
837 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
838 fl = &vm_phys_free_queues[domain][flind][pool][0];
839 for (oind = order; oind < VM_NFREEORDER; oind++) {
840 m = TAILQ_FIRST(&fl[oind].pl);
841 if (m != NULL) {
842 vm_freelist_rem(fl, m, oind);
843 vm_phys_split_pages(m, oind, fl, order);
844 return (m);
849 * The given pool was empty. Find the largest
850 * contiguous, power-of-two-sized set of pages in any
851 * pool. Transfer these pages to the given pool, and
852 * use them to satisfy the allocation.
854 for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
855 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
856 alt = &vm_phys_free_queues[domain][flind][pind][0];
857 m = TAILQ_FIRST(&alt[oind].pl);
858 if (m != NULL) {
859 vm_freelist_rem(alt, m, oind);
860 vm_phys_set_pool(pool, m, oind);
861 vm_phys_split_pages(m, oind, fl, order);
862 return (m);
866 return (NULL);
870 * Find the vm_page corresponding to the given physical address.
872 vm_page_t
873 vm_phys_paddr_to_vm_page(vm_paddr_t pa)
875 struct vm_phys_seg *seg;
876 int segind;
878 for (segind = 0; segind < vm_phys_nsegs; segind++) {
879 seg = &vm_phys_segs[segind];
880 if (pa >= seg->start && pa < seg->end)
881 return (&seg->first_page[atop(pa - seg->start)]);
883 return (NULL);
886 vm_page_t
887 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
889 struct vm_phys_fictitious_seg tmp, *seg;
890 vm_page_t m;
892 m = NULL;
893 tmp.start = pa;
894 tmp.end = 0;
896 rw_rlock(&vm_phys_fictitious_reg_lock);
897 seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
898 rw_runlock(&vm_phys_fictitious_reg_lock);
899 if (seg == NULL)
900 return (NULL);
902 m = &seg->first_page[atop(pa - seg->start)];
903 KASSERT((m->flags & PG_FICTITIOUS) != 0, ("%p not fictitious", m));
905 return (m);
908 static inline void
909 vm_phys_fictitious_init_range(vm_page_t range, vm_paddr_t start,
910 long page_count, vm_memattr_t memattr)
912 long i;
914 for (i = 0; i < page_count; i++) {
915 vm_page_initfake(&range[i], start + PAGE_SIZE * i, memattr);
916 range[i].oflags &= ~VPO_UNMANAGED;
917 range[i].busy_lock = VPB_UNBUSIED;
922 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
923 vm_memattr_t memattr)
925 struct vm_phys_fictitious_seg *seg;
926 vm_page_t fp;
927 long page_count;
928 #ifdef VM_PHYSSEG_DENSE
929 long pi, pe;
930 long dpage_count;
931 #endif
933 KASSERT(start < end,
934 ("Start of segment isn't less than end (start: %jx end: %jx)",
935 (uintmax_t)start, (uintmax_t)end));
937 page_count = (end - start) / PAGE_SIZE;
939 #ifdef VM_PHYSSEG_DENSE
940 pi = atop(start);
941 pe = atop(end);
942 if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
943 fp = &vm_page_array[pi - first_page];
944 if ((pe - first_page) > vm_page_array_size) {
946 * We have a segment that starts inside
947 * of vm_page_array, but ends outside of it.
949 * Use vm_page_array pages for those that are
950 * inside of the vm_page_array range, and
951 * allocate the remaining ones.
953 dpage_count = vm_page_array_size - (pi - first_page);
954 vm_phys_fictitious_init_range(fp, start, dpage_count,
955 memattr);
956 page_count -= dpage_count;
957 start += ptoa(dpage_count);
958 goto alloc;
961 * We can allocate the full range from vm_page_array,
962 * so there's no need to register the range in the tree.
964 vm_phys_fictitious_init_range(fp, start, page_count, memattr);
965 return (0);
966 } else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
968 * We have a segment that ends inside of vm_page_array,
969 * but starts outside of it.
971 fp = &vm_page_array[0];
972 dpage_count = pe - first_page;
973 vm_phys_fictitious_init_range(fp, ptoa(first_page), dpage_count,
974 memattr);
975 end -= ptoa(dpage_count);
976 page_count -= dpage_count;
977 goto alloc;
978 } else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
980 * Trying to register a fictitious range that expands before
981 * and after vm_page_array.
983 return (EINVAL);
984 } else {
985 alloc:
986 #endif
987 fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
988 M_WAITOK | M_ZERO);
989 #ifdef VM_PHYSSEG_DENSE
991 #endif
992 vm_phys_fictitious_init_range(fp, start, page_count, memattr);
994 seg = malloc(sizeof(*seg), M_FICT_PAGES, M_WAITOK | M_ZERO);
995 seg->start = start;
996 seg->end = end;
997 seg->first_page = fp;
999 rw_wlock(&vm_phys_fictitious_reg_lock);
1000 RB_INSERT(fict_tree, &vm_phys_fictitious_tree, seg);
1001 rw_wunlock(&vm_phys_fictitious_reg_lock);
1003 return (0);
1006 void
1007 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
1009 struct vm_phys_fictitious_seg *seg, tmp;
1010 #ifdef VM_PHYSSEG_DENSE
1011 long pi, pe;
1012 #endif
1014 KASSERT(start < end,
1015 ("Start of segment isn't less than end (start: %jx end: %jx)",
1016 (uintmax_t)start, (uintmax_t)end));
1018 #ifdef VM_PHYSSEG_DENSE
1019 pi = atop(start);
1020 pe = atop(end);
1021 if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
1022 if ((pe - first_page) <= vm_page_array_size) {
1024 * This segment was allocated using vm_page_array
1025 * only, there's nothing to do since those pages
1026 * were never added to the tree.
1028 return;
1031 * We have a segment that starts inside
1032 * of vm_page_array, but ends outside of it.
1034 * Calculate how many pages were added to the
1035 * tree and free them.
1037 start = ptoa(first_page + vm_page_array_size);
1038 } else if (pe > first_page && (pe - first_page) < vm_page_array_size) {
1040 * We have a segment that ends inside of vm_page_array,
1041 * but starts outside of it.
1043 end = ptoa(first_page);
1044 } else if (pi < first_page && pe > (first_page + vm_page_array_size)) {
1045 /* Since it's not possible to register such a range, panic. */
1046 panic(
1047 "Unregistering not registered fictitious range [%#jx:%#jx]",
1048 (uintmax_t)start, (uintmax_t)end);
1050 #endif
1051 tmp.start = start;
1052 tmp.end = 0;
1054 rw_wlock(&vm_phys_fictitious_reg_lock);
1055 seg = RB_FIND(fict_tree, &vm_phys_fictitious_tree, &tmp);
1056 if (seg->start != start || seg->end != end) {
1057 rw_wunlock(&vm_phys_fictitious_reg_lock);
1058 panic(
1059 "Unregistering not registered fictitious range [%#jx:%#jx]",
1060 (uintmax_t)start, (uintmax_t)end);
1062 RB_REMOVE(fict_tree, &vm_phys_fictitious_tree, seg);
1063 rw_wunlock(&vm_phys_fictitious_reg_lock);
1064 free(seg->first_page, M_FICT_PAGES);
1065 free(seg, M_FICT_PAGES);
1069 * Find the segment containing the given physical address.
1071 static int
1072 vm_phys_paddr_to_segind(vm_paddr_t pa)
1074 struct vm_phys_seg *seg;
1075 int segind;
1077 for (segind = 0; segind < vm_phys_nsegs; segind++) {
1078 seg = &vm_phys_segs[segind];
1079 if (pa >= seg->start && pa < seg->end)
1080 return (segind);
1082 panic("vm_phys_paddr_to_segind: paddr %#jx is not in any segment" ,
1083 (uintmax_t)pa);
1087 * Free a contiguous, power of two-sized set of physical pages.
1089 * The free page queues must be locked.
1091 void
1092 vm_phys_free_pages(vm_page_t m, int order)
1094 struct vm_freelist *fl;
1095 struct vm_phys_seg *seg;
1096 vm_paddr_t pa;
1097 vm_page_t m_buddy;
1099 KASSERT(m->order == VM_NFREEORDER,
1100 ("vm_phys_free_pages: page %p has unexpected order %d",
1101 m, m->order));
1102 KASSERT(m->pool < VM_NFREEPOOL,
1103 ("vm_phys_free_pages: page %p has unexpected pool %d",
1104 m, m->pool));
1105 KASSERT(order < VM_NFREEORDER,
1106 ("vm_phys_free_pages: order %d is out of range", order));
1107 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1108 seg = &vm_phys_segs[m->segind];
1109 if (order < VM_NFREEORDER - 1) {
1110 pa = VM_PAGE_TO_PHYS(m);
1111 do {
1112 pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
1113 if (pa < seg->start || pa >= seg->end)
1114 break;
1115 m_buddy = &seg->first_page[atop(pa - seg->start)];
1116 if (m_buddy->order != order)
1117 break;
1118 fl = (*seg->free_queues)[m_buddy->pool];
1119 vm_freelist_rem(fl, m_buddy, order);
1120 if (m_buddy->pool != m->pool)
1121 vm_phys_set_pool(m->pool, m_buddy, order);
1122 order++;
1123 pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
1124 m = &seg->first_page[atop(pa - seg->start)];
1125 } while (order < VM_NFREEORDER - 1);
1127 fl = (*seg->free_queues)[m->pool];
1128 vm_freelist_add(fl, m, order, 1);
1132 * Free a contiguous, arbitrarily sized set of physical pages.
1134 * The free page queues must be locked.
1136 void
1137 vm_phys_free_contig(vm_page_t m, u_long npages)
1139 u_int n;
1140 int order;
1143 * Avoid unnecessary coalescing by freeing the pages in the largest
1144 * possible power-of-two-sized subsets.
1146 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1147 for (;; npages -= n) {
1149 * Unsigned "min" is used here so that "order" is assigned
1150 * "VM_NFREEORDER - 1" when "m"'s physical address is zero
1151 * or the low-order bits of its physical address are zero
1152 * because the size of a physical address exceeds the size of
1153 * a long.
1155 order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
1156 VM_NFREEORDER - 1);
1157 n = 1 << order;
1158 if (npages < n)
1159 break;
1160 vm_phys_free_pages(m, order);
1161 m += n;
1163 /* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */
1164 for (; npages > 0; npages -= n) {
1165 order = flsl(npages) - 1;
1166 n = 1 << order;
1167 vm_phys_free_pages(m, order);
1168 m += n;
1173 * Scan physical memory between the specified addresses "low" and "high" for a
1174 * run of contiguous physical pages that satisfy the specified conditions, and
1175 * return the lowest page in the run. The specified "alignment" determines
1176 * the alignment of the lowest physical page in the run. If the specified
1177 * "boundary" is non-zero, then the run of physical pages cannot span a
1178 * physical address that is a multiple of "boundary".
1180 * "npages" must be greater than zero. Both "alignment" and "boundary" must
1181 * be a power of two.
1183 vm_page_t
1184 vm_phys_scan_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
1185 u_long alignment, vm_paddr_t boundary, int options)
1187 vm_paddr_t pa_end;
1188 vm_page_t m_end, m_run, m_start;
1189 struct vm_phys_seg *seg;
1190 int segind;
1192 KASSERT(npages > 0, ("npages is 0"));
1193 KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1194 KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1195 if (low >= high)
1196 return (NULL);
1197 for (segind = 0; segind < vm_phys_nsegs; segind++) {
1198 seg = &vm_phys_segs[segind];
1199 if (seg->start >= high)
1200 break;
1201 if (low >= seg->end)
1202 continue;
1203 if (low <= seg->start)
1204 m_start = seg->first_page;
1205 else
1206 m_start = &seg->first_page[atop(low - seg->start)];
1207 if (high < seg->end)
1208 pa_end = high;
1209 else
1210 pa_end = seg->end;
1211 if (pa_end - VM_PAGE_TO_PHYS(m_start) < ptoa(npages))
1212 continue;
1213 m_end = &seg->first_page[atop(pa_end - seg->start)];
1214 m_run = vm_page_scan_contig(npages, m_start, m_end,
1215 alignment, boundary, options);
1216 if (m_run != NULL)
1217 return (m_run);
1219 return (NULL);
1223 * Set the pool for a contiguous, power of two-sized set of physical pages.
1225 void
1226 vm_phys_set_pool(int pool, vm_page_t m, int order)
1228 vm_page_t m_tmp;
1230 for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
1231 m_tmp->pool = pool;
1235 * Search for the given physical page "m" in the free lists. If the search
1236 * succeeds, remove "m" from the free lists and return TRUE. Otherwise, return
1237 * FALSE, indicating that "m" is not in the free lists.
1239 * The free page queues must be locked.
1241 boolean_t
1242 vm_phys_unfree_page(vm_page_t m)
1244 struct vm_freelist *fl;
1245 struct vm_phys_seg *seg;
1246 vm_paddr_t pa, pa_half;
1247 vm_page_t m_set, m_tmp;
1248 int order;
1250 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1253 * First, find the contiguous, power of two-sized set of free
1254 * physical pages containing the given physical page "m" and
1255 * assign it to "m_set".
1257 seg = &vm_phys_segs[m->segind];
1258 for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
1259 order < VM_NFREEORDER - 1; ) {
1260 order++;
1261 pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
1262 if (pa >= seg->start)
1263 m_set = &seg->first_page[atop(pa - seg->start)];
1264 else
1265 return (FALSE);
1267 if (m_set->order < order)
1268 return (FALSE);
1269 if (m_set->order == VM_NFREEORDER)
1270 return (FALSE);
1271 KASSERT(m_set->order < VM_NFREEORDER,
1272 ("vm_phys_unfree_page: page %p has unexpected order %d",
1273 m_set, m_set->order));
1276 * Next, remove "m_set" from the free lists. Finally, extract
1277 * "m" from "m_set" using an iterative algorithm: While "m_set"
1278 * is larger than a page, shrink "m_set" by returning the half
1279 * of "m_set" that does not contain "m" to the free lists.
1281 fl = (*seg->free_queues)[m_set->pool];
1282 order = m_set->order;
1283 vm_freelist_rem(fl, m_set, order);
1284 while (order > 0) {
1285 order--;
1286 pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
1287 if (m->phys_addr < pa_half)
1288 m_tmp = &seg->first_page[atop(pa_half - seg->start)];
1289 else {
1290 m_tmp = m_set;
1291 m_set = &seg->first_page[atop(pa_half - seg->start)];
1293 vm_freelist_add(fl, m_tmp, order, 0);
1295 KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
1296 return (TRUE);
1300 * Try to zero one physical page. Used by an idle priority thread.
1302 boolean_t
1303 vm_phys_zero_pages_idle(void)
1305 static struct vm_freelist *fl;
1306 static int flind, oind, pind;
1307 vm_page_t m, m_tmp;
1308 int domain;
1310 domain = vm_rr_selectdomain();
1311 fl = vm_phys_free_queues[domain][0][0];
1312 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1313 for (;;) {
1314 TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, plinks.q) {
1315 for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) {
1316 if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) {
1317 vm_phys_unfree_page(m_tmp);
1318 vm_phys_freecnt_adj(m, -1);
1319 mtx_unlock(&vm_page_queue_free_mtx);
1320 pmap_zero_page_idle(m_tmp);
1321 m_tmp->flags |= PG_ZERO;
1322 mtx_lock(&vm_page_queue_free_mtx);
1323 vm_phys_freecnt_adj(m, 1);
1324 vm_phys_free_pages(m_tmp, 0);
1325 vm_page_zero_count++;
1326 cnt_prezero++;
1327 return (TRUE);
1331 oind++;
1332 if (oind == VM_NFREEORDER) {
1333 oind = 0;
1334 pind++;
1335 if (pind == VM_NFREEPOOL) {
1336 pind = 0;
1337 flind++;
1338 if (flind == vm_nfreelists)
1339 flind = 0;
1341 fl = vm_phys_free_queues[domain][flind][pind];
1347 * Allocate a contiguous set of physical pages of the given size
1348 * "npages" from the free lists. All of the physical pages must be at
1349 * or above the given physical address "low" and below the given
1350 * physical address "high". The given value "alignment" determines the
1351 * alignment of the first physical page in the set. If the given value
1352 * "boundary" is non-zero, then the set of physical pages cannot cross
1353 * any physical address boundary that is a multiple of that value. Both
1354 * "alignment" and "boundary" must be a power of two.
1356 vm_page_t
1357 vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
1358 u_long alignment, vm_paddr_t boundary)
1360 vm_paddr_t pa_end, pa_start;
1361 vm_page_t m_run;
1362 struct vm_domain_iterator vi;
1363 struct vm_phys_seg *seg;
1364 int domain, segind;
1366 KASSERT(npages > 0, ("npages is 0"));
1367 KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1368 KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1369 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1370 if (low >= high)
1371 return (NULL);
1372 vm_policy_iterator_init(&vi);
1373 restartdom:
1374 if (vm_domain_iterator_run(&vi, &domain) != 0) {
1375 vm_policy_iterator_finish(&vi);
1376 return (NULL);
1378 m_run = NULL;
1379 for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
1380 seg = &vm_phys_segs[segind];
1381 if (seg->start >= high || seg->domain != domain)
1382 continue;
1383 if (low >= seg->end)
1384 break;
1385 if (low <= seg->start)
1386 pa_start = seg->start;
1387 else
1388 pa_start = low;
1389 if (high < seg->end)
1390 pa_end = high;
1391 else
1392 pa_end = seg->end;
1393 if (pa_end - pa_start < ptoa(npages))
1394 continue;
1395 m_run = vm_phys_alloc_seg_contig(seg, npages, low, high,
1396 alignment, boundary);
1397 if (m_run != NULL)
1398 break;
1400 if (m_run == NULL && !vm_domain_iterator_isdone(&vi))
1401 goto restartdom;
1402 vm_policy_iterator_finish(&vi);
1403 return (m_run);
1407 * Allocate a run of contiguous physical pages from the free list for the
1408 * specified segment.
1410 static vm_page_t
1411 vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, u_long npages,
1412 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
1414 struct vm_freelist *fl;
1415 vm_paddr_t pa, pa_end, size;
1416 vm_page_t m, m_ret;
1417 u_long npages_end;
1418 int oind, order, pind;
1420 KASSERT(npages > 0, ("npages is 0"));
1421 KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
1422 KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
1423 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1424 /* Compute the queue that is the best fit for npages. */
1425 for (order = 0; (1 << order) < npages; order++);
1426 /* Search for a run satisfying the specified conditions. */
1427 size = npages << PAGE_SHIFT;
1428 for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER;
1429 oind++) {
1430 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1431 fl = (*seg->free_queues)[pind];
1432 TAILQ_FOREACH(m_ret, &fl[oind].pl, plinks.q) {
1434 * Is the size of this allocation request
1435 * larger than the largest block size?
1437 if (order >= VM_NFREEORDER) {
1439 * Determine if a sufficient number of
1440 * subsequent blocks to satisfy the
1441 * allocation request are free.
1443 pa = VM_PAGE_TO_PHYS(m_ret);
1444 pa_end = pa + size;
1445 for (;;) {
1446 pa += 1 << (PAGE_SHIFT +
1447 VM_NFREEORDER - 1);
1448 if (pa >= pa_end ||
1449 pa < seg->start ||
1450 pa >= seg->end)
1451 break;
1452 m = &seg->first_page[atop(pa -
1453 seg->start)];
1454 if (m->order != VM_NFREEORDER -
1456 break;
1458 /* If not, go to the next block. */
1459 if (pa < pa_end)
1460 continue;
1464 * Determine if the blocks are within the
1465 * given range, satisfy the given alignment,
1466 * and do not cross the given boundary.
1468 pa = VM_PAGE_TO_PHYS(m_ret);
1469 pa_end = pa + size;
1470 if (pa >= low && pa_end <= high &&
1471 (pa & (alignment - 1)) == 0 &&
1472 rounddown2(pa ^ (pa_end - 1), boundary) == 0)
1473 goto done;
1477 return (NULL);
1478 done:
1479 for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
1480 fl = (*seg->free_queues)[m->pool];
1481 vm_freelist_rem(fl, m, m->order);
1483 if (m_ret->pool != VM_FREEPOOL_DEFAULT)
1484 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind);
1485 fl = (*seg->free_queues)[m_ret->pool];
1486 vm_phys_split_pages(m_ret, oind, fl, order);
1487 /* Return excess pages to the free lists. */
1488 npages_end = roundup2(npages, 1 << imin(oind, order));
1489 if (npages < npages_end)
1490 vm_phys_free_contig(&m_ret[npages], npages_end - npages);
1491 return (m_ret);
1494 #ifdef DDB
1496 * Show the number of physical pages in each of the free lists.
1498 DB_SHOW_COMMAND(freepages, db_show_freepages)
1500 struct vm_freelist *fl;
1501 int flind, oind, pind, dom;
1503 for (dom = 0; dom < vm_ndomains; dom++) {
1504 db_printf("DOMAIN: %d\n", dom);
1505 for (flind = 0; flind < vm_nfreelists; flind++) {
1506 db_printf("FREE LIST %d:\n"
1507 "\n ORDER (SIZE) | NUMBER"
1508 "\n ", flind);
1509 for (pind = 0; pind < VM_NFREEPOOL; pind++)
1510 db_printf(" | POOL %d", pind);
1511 db_printf("\n-- ");
1512 for (pind = 0; pind < VM_NFREEPOOL; pind++)
1513 db_printf("-- -- ");
1514 db_printf("--\n");
1515 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
1516 db_printf(" %2.2d (%6.6dK)", oind,
1517 1 << (PAGE_SHIFT - 10 + oind));
1518 for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1519 fl = vm_phys_free_queues[dom][flind][pind];
1520 db_printf(" | %6.6d", fl[oind].lcnt);
1522 db_printf("\n");
1524 db_printf("\n");
1526 db_printf("\n");
1529 #endif