2 * Copyright (c) 2002-2006 Rice University
3 * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
6 * This software was developed for the FreeBSD Project by Alan L. Cox,
7 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
28 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Physical memory system implementation
35 * Any external functions defined by this module are only to be used by the
36 * virtual memory system.
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
45 #include <sys/param.h>
46 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/mutex.h>
52 #include <sys/queue.h>
53 #include <sys/rwlock.h>
55 #include <sys/sysctl.h>
57 #include <sys/vmmeter.h>
63 #include <vm/vm_param.h>
64 #include <vm/vm_kern.h>
65 #include <vm/vm_object.h>
66 #include <vm/vm_page.h>
67 #include <vm/vm_phys.h>
69 #include <vm/vm_domain.h>
71 _Static_assert(sizeof(long) * NBBY
>= VM_PHYSSEG_MAX
,
72 "Too many physsegs.");
75 struct mem_affinity
*mem_affinity
;
81 struct vm_phys_seg vm_phys_segs
[VM_PHYSSEG_MAX
];
84 struct vm_phys_fictitious_seg
;
85 static int vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg
*,
86 struct vm_phys_fictitious_seg
*);
88 RB_HEAD(fict_tree
, vm_phys_fictitious_seg
) vm_phys_fictitious_tree
=
89 RB_INITIALIZER(_vm_phys_fictitious_tree
);
91 struct vm_phys_fictitious_seg
{
92 RB_ENTRY(vm_phys_fictitious_seg
) node
;
93 /* Memory region data */
99 RB_GENERATE_STATIC(fict_tree
, vm_phys_fictitious_seg
, node
,
100 vm_phys_fictitious_cmp
);
102 static struct rwlock vm_phys_fictitious_reg_lock
;
103 MALLOC_DEFINE(M_FICT_PAGES
, "vm_fictitious", "Fictitious VM pages");
105 static struct vm_freelist
106 vm_phys_free_queues
[MAXMEMDOM
][VM_NFREELIST
][VM_NFREEPOOL
][VM_NFREEORDER
];
108 static int vm_nfreelists
;
111 * Provides the mapping from VM_FREELIST_* to free list indices (flind).
113 static int vm_freelist_to_flind
[VM_NFREELIST
];
115 CTASSERT(VM_FREELIST_DEFAULT
== 0);
117 #ifdef VM_FREELIST_ISADMA
118 #define VM_ISADMA_BOUNDARY 16777216
120 #ifdef VM_FREELIST_DMA32
121 #define VM_DMA32_BOUNDARY ((vm_paddr_t)1 << 32)
125 * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about
126 * the ordering of the free list boundaries.
128 #if defined(VM_ISADMA_BOUNDARY) && defined(VM_LOWMEM_BOUNDARY)
129 CTASSERT(VM_ISADMA_BOUNDARY
< VM_LOWMEM_BOUNDARY
);
131 #if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY)
132 CTASSERT(VM_LOWMEM_BOUNDARY
< VM_DMA32_BOUNDARY
);
135 static int cnt_prezero
;
136 SYSCTL_INT(_vm_stats_misc
, OID_AUTO
, cnt_prezero
, CTLFLAG_RD
,
137 &cnt_prezero
, 0, "The number of physical pages prezeroed at idle time");
139 static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS
);
140 SYSCTL_OID(_vm
, OID_AUTO
, phys_free
, CTLTYPE_STRING
| CTLFLAG_RD
,
141 NULL
, 0, sysctl_vm_phys_free
, "A", "Phys Free Info");
143 static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS
);
144 SYSCTL_OID(_vm
, OID_AUTO
, phys_segs
, CTLTYPE_STRING
| CTLFLAG_RD
,
145 NULL
, 0, sysctl_vm_phys_segs
, "A", "Phys Seg Info");
148 static int sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS
);
149 SYSCTL_OID(_vm
, OID_AUTO
, phys_locality
, CTLTYPE_STRING
| CTLFLAG_RD
,
150 NULL
, 0, sysctl_vm_phys_locality
, "A", "Phys Locality Info");
153 SYSCTL_INT(_vm
, OID_AUTO
, ndomains
, CTLFLAG_RD
,
154 &vm_ndomains
, 0, "Number of physical memory domains available.");
157 * Default to first-touch + round-robin.
159 static struct mtx vm_default_policy_mtx
;
160 MTX_SYSINIT(vm_default_policy
, &vm_default_policy_mtx
, "default policy mutex",
163 static struct vm_domain_policy vm_default_policy
=
164 VM_DOMAIN_POLICY_STATIC_INITIALISER(VM_POLICY_FIRST_TOUCH_ROUND_ROBIN
, 0);
166 /* Use round-robin so the domain policy code will only try once per allocation */
167 static struct vm_domain_policy vm_default_policy
=
168 VM_DOMAIN_POLICY_STATIC_INITIALISER(VM_POLICY_ROUND_ROBIN
, 0);
171 static vm_page_t
vm_phys_alloc_domain_pages(int domain
, int flind
, int pool
,
173 static vm_page_t
vm_phys_alloc_seg_contig(struct vm_phys_seg
*seg
,
174 u_long npages
, vm_paddr_t low
, vm_paddr_t high
, u_long alignment
,
175 vm_paddr_t boundary
);
176 static void _vm_phys_create_seg(vm_paddr_t start
, vm_paddr_t end
, int domain
);
177 static void vm_phys_create_seg(vm_paddr_t start
, vm_paddr_t end
);
178 static int vm_phys_paddr_to_segind(vm_paddr_t pa
);
179 static void vm_phys_split_pages(vm_page_t m
, int oind
, struct vm_freelist
*fl
,
183 sysctl_vm_default_policy(SYSCTL_HANDLER_ARGS
)
185 char policy_name
[32];
188 mtx_lock(&vm_default_policy_mtx
);
190 /* Map policy to output string */
191 switch (vm_default_policy
.p
.policy
) {
192 case VM_POLICY_FIRST_TOUCH
:
193 strcpy(policy_name
, "first-touch");
195 case VM_POLICY_FIRST_TOUCH_ROUND_ROBIN
:
196 strcpy(policy_name
, "first-touch-rr");
198 case VM_POLICY_ROUND_ROBIN
:
200 strcpy(policy_name
, "rr");
203 mtx_unlock(&vm_default_policy_mtx
);
205 error
= sysctl_handle_string(oidp
, &policy_name
[0],
206 sizeof(policy_name
), req
);
207 if (error
!= 0 || req
->newptr
== NULL
)
210 mtx_lock(&vm_default_policy_mtx
);
211 /* Set: match on the subset of policies that make sense as a default */
212 if (strcmp("first-touch-rr", policy_name
) == 0) {
213 vm_domain_policy_set(&vm_default_policy
,
214 VM_POLICY_FIRST_TOUCH_ROUND_ROBIN
, 0);
215 } else if (strcmp("first-touch", policy_name
) == 0) {
216 vm_domain_policy_set(&vm_default_policy
,
217 VM_POLICY_FIRST_TOUCH
, 0);
218 } else if (strcmp("rr", policy_name
) == 0) {
219 vm_domain_policy_set(&vm_default_policy
,
220 VM_POLICY_ROUND_ROBIN
, 0);
228 mtx_unlock(&vm_default_policy_mtx
);
232 SYSCTL_PROC(_vm
, OID_AUTO
, default_policy
, CTLTYPE_STRING
| CTLFLAG_RW
,
233 0, 0, sysctl_vm_default_policy
, "A",
234 "Default policy (rr, first-touch, first-touch-rr");
237 * Red-black tree helpers for vm fictitious range management.
240 vm_phys_fictitious_in_range(struct vm_phys_fictitious_seg
*p
,
241 struct vm_phys_fictitious_seg
*range
)
244 KASSERT(range
->start
!= 0 && range
->end
!= 0,
245 ("Invalid range passed on search for vm_fictitious page"));
246 if (p
->start
>= range
->end
)
248 if (p
->start
< range
->start
)
255 vm_phys_fictitious_cmp(struct vm_phys_fictitious_seg
*p1
,
256 struct vm_phys_fictitious_seg
*p2
)
259 /* Check if this is a search for a page */
261 return (vm_phys_fictitious_in_range(p1
, p2
));
263 KASSERT(p2
->end
!= 0,
264 ("Invalid range passed as second parameter to vm fictitious comparison"));
266 /* Searching to add a new range */
267 if (p1
->end
<= p2
->start
)
269 if (p1
->start
>= p2
->end
)
272 panic("Trying to add overlapping vm fictitious ranges:\n"
273 "[%#jx:%#jx] and [%#jx:%#jx]", (uintmax_t)p1
->start
,
274 (uintmax_t)p1
->end
, (uintmax_t)p2
->start
, (uintmax_t)p2
->end
);
278 vm_rr_selectdomain(void)
286 td
->td_dom_rr_idx
%= vm_ndomains
;
287 return (td
->td_dom_rr_idx
);
294 * Initialise a VM domain iterator.
296 * Check the thread policy, then the proc policy,
297 * then default to the system policy.
299 * Later on the various layers will have this logic
300 * plumbed into them and the phys code will be explicitly
301 * handed a VM domain policy to use.
304 vm_policy_iterator_init(struct vm_domain_iterator
*vi
)
307 struct vm_domain_policy lcl
;
310 vm_domain_iterator_init(vi
);
313 /* Copy out the thread policy */
314 vm_domain_policy_localcopy(&lcl
, &curthread
->td_vm_dom_policy
);
315 if (lcl
.p
.policy
!= VM_POLICY_NONE
) {
316 /* Thread policy is present; use it */
317 vm_domain_iterator_set_policy(vi
, &lcl
);
321 vm_domain_policy_localcopy(&lcl
,
322 &curthread
->td_proc
->p_vm_dom_policy
);
323 if (lcl
.p
.policy
!= VM_POLICY_NONE
) {
324 /* Process policy is present; use it */
325 vm_domain_iterator_set_policy(vi
, &lcl
);
329 /* Use system default policy */
330 vm_domain_iterator_set_policy(vi
, &vm_default_policy
);
334 vm_policy_iterator_finish(struct vm_domain_iterator
*vi
)
337 vm_domain_iterator_cleanup(vi
);
341 vm_phys_domain_intersects(long mask
, vm_paddr_t low
, vm_paddr_t high
)
343 struct vm_phys_seg
*s
;
346 while ((idx
= ffsl(mask
)) != 0) {
347 idx
--; /* ffsl counts from 1 */
348 mask
&= ~(1UL << idx
);
349 s
= &vm_phys_segs
[idx
];
350 if (low
< s
->end
&& high
> s
->start
)
357 * Outputs the state of the physical memory allocator, specifically,
358 * the amount of physical memory in each free list.
361 sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS
)
364 struct vm_freelist
*fl
;
365 int dom
, error
, flind
, oind
, pind
;
367 error
= sysctl_wire_old_buffer(req
, 0);
370 sbuf_new_for_sysctl(&sbuf
, NULL
, 128 * vm_ndomains
, req
);
371 for (dom
= 0; dom
< vm_ndomains
; dom
++) {
372 sbuf_printf(&sbuf
,"\nDOMAIN %d:\n", dom
);
373 for (flind
= 0; flind
< vm_nfreelists
; flind
++) {
374 sbuf_printf(&sbuf
, "\nFREE LIST %d:\n"
375 "\n ORDER (SIZE) | NUMBER"
377 for (pind
= 0; pind
< VM_NFREEPOOL
; pind
++)
378 sbuf_printf(&sbuf
, " | POOL %d", pind
);
379 sbuf_printf(&sbuf
, "\n-- ");
380 for (pind
= 0; pind
< VM_NFREEPOOL
; pind
++)
381 sbuf_printf(&sbuf
, "-- -- ");
382 sbuf_printf(&sbuf
, "--\n");
383 for (oind
= VM_NFREEORDER
- 1; oind
>= 0; oind
--) {
384 sbuf_printf(&sbuf
, " %2d (%6dK)", oind
,
385 1 << (PAGE_SHIFT
- 10 + oind
));
386 for (pind
= 0; pind
< VM_NFREEPOOL
; pind
++) {
387 fl
= vm_phys_free_queues
[dom
][flind
][pind
];
388 sbuf_printf(&sbuf
, " | %6d",
391 sbuf_printf(&sbuf
, "\n");
395 error
= sbuf_finish(&sbuf
);
401 * Outputs the set of physical memory segments.
404 sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS
)
407 struct vm_phys_seg
*seg
;
410 error
= sysctl_wire_old_buffer(req
, 0);
413 sbuf_new_for_sysctl(&sbuf
, NULL
, 128, req
);
414 for (segind
= 0; segind
< vm_phys_nsegs
; segind
++) {
415 sbuf_printf(&sbuf
, "\nSEGMENT %d:\n\n", segind
);
416 seg
= &vm_phys_segs
[segind
];
417 sbuf_printf(&sbuf
, "start: %#jx\n",
418 (uintmax_t)seg
->start
);
419 sbuf_printf(&sbuf
, "end: %#jx\n",
420 (uintmax_t)seg
->end
);
421 sbuf_printf(&sbuf
, "domain: %d\n", seg
->domain
);
422 sbuf_printf(&sbuf
, "free list: %p\n", seg
->free_queues
);
424 error
= sbuf_finish(&sbuf
);
430 * Return affinity, or -1 if there's no affinity information.
433 vm_phys_mem_affinity(int f
, int t
)
437 if (mem_locality
== NULL
)
439 if (f
>= vm_ndomains
|| t
>= vm_ndomains
)
441 return (mem_locality
[f
* vm_ndomains
+ t
]);
449 * Outputs the VM locality table.
452 sysctl_vm_phys_locality(SYSCTL_HANDLER_ARGS
)
457 error
= sysctl_wire_old_buffer(req
, 0);
460 sbuf_new_for_sysctl(&sbuf
, NULL
, 128, req
);
462 sbuf_printf(&sbuf
, "\n");
464 for (i
= 0; i
< vm_ndomains
; i
++) {
465 sbuf_printf(&sbuf
, "%d: ", i
);
466 for (j
= 0; j
< vm_ndomains
; j
++) {
467 sbuf_printf(&sbuf
, "%d ", vm_phys_mem_affinity(i
, j
));
469 sbuf_printf(&sbuf
, "\n");
471 error
= sbuf_finish(&sbuf
);
478 vm_freelist_add(struct vm_freelist
*fl
, vm_page_t m
, int order
, int tail
)
483 TAILQ_INSERT_TAIL(&fl
[order
].pl
, m
, plinks
.q
);
485 TAILQ_INSERT_HEAD(&fl
[order
].pl
, m
, plinks
.q
);
490 vm_freelist_rem(struct vm_freelist
*fl
, vm_page_t m
, int order
)
493 TAILQ_REMOVE(&fl
[order
].pl
, m
, plinks
.q
);
495 m
->order
= VM_NFREEORDER
;
499 * Create a physical memory segment.
502 _vm_phys_create_seg(vm_paddr_t start
, vm_paddr_t end
, int domain
)
504 struct vm_phys_seg
*seg
;
506 KASSERT(vm_phys_nsegs
< VM_PHYSSEG_MAX
,
507 ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
508 KASSERT(domain
< vm_ndomains
,
509 ("vm_phys_create_seg: invalid domain provided"));
510 seg
= &vm_phys_segs
[vm_phys_nsegs
++];
511 while (seg
> vm_phys_segs
&& (seg
- 1)->start
>= end
) {
517 seg
->domain
= domain
;
521 vm_phys_create_seg(vm_paddr_t start
, vm_paddr_t end
)
526 if (mem_affinity
== NULL
) {
527 _vm_phys_create_seg(start
, end
, 0);
532 if (mem_affinity
[i
].end
== 0)
533 panic("Reached end of affinity info");
534 if (mem_affinity
[i
].end
<= start
)
536 if (mem_affinity
[i
].start
> start
)
537 panic("No affinity info for start %jx",
539 if (mem_affinity
[i
].end
>= end
) {
540 _vm_phys_create_seg(start
, end
,
541 mem_affinity
[i
].domain
);
544 _vm_phys_create_seg(start
, mem_affinity
[i
].end
,
545 mem_affinity
[i
].domain
);
546 start
= mem_affinity
[i
].end
;
549 _vm_phys_create_seg(start
, end
, 0);
554 * Add a physical memory segment.
557 vm_phys_add_seg(vm_paddr_t start
, vm_paddr_t end
)
561 KASSERT((start
& PAGE_MASK
) == 0,
562 ("vm_phys_define_seg: start is not page aligned"));
563 KASSERT((end
& PAGE_MASK
) == 0,
564 ("vm_phys_define_seg: end is not page aligned"));
567 * Split the physical memory segment if it spans two or more free
571 #ifdef VM_FREELIST_ISADMA
572 if (paddr
< VM_ISADMA_BOUNDARY
&& end
> VM_ISADMA_BOUNDARY
) {
573 vm_phys_create_seg(paddr
, VM_ISADMA_BOUNDARY
);
574 paddr
= VM_ISADMA_BOUNDARY
;
577 #ifdef VM_FREELIST_LOWMEM
578 if (paddr
< VM_LOWMEM_BOUNDARY
&& end
> VM_LOWMEM_BOUNDARY
) {
579 vm_phys_create_seg(paddr
, VM_LOWMEM_BOUNDARY
);
580 paddr
= VM_LOWMEM_BOUNDARY
;
583 #ifdef VM_FREELIST_DMA32
584 if (paddr
< VM_DMA32_BOUNDARY
&& end
> VM_DMA32_BOUNDARY
) {
585 vm_phys_create_seg(paddr
, VM_DMA32_BOUNDARY
);
586 paddr
= VM_DMA32_BOUNDARY
;
589 vm_phys_create_seg(paddr
, end
);
593 * Initialize the physical memory allocator.
595 * Requires that vm_page_array is initialized!
600 struct vm_freelist
*fl
;
601 struct vm_phys_seg
*seg
;
603 int dom
, flind
, freelist
, oind
, pind
, segind
;
606 * Compute the number of free lists, and generate the mapping from the
607 * manifest constants VM_FREELIST_* to the free list indices.
609 * Initially, the entries of vm_freelist_to_flind[] are set to either
610 * 0 or 1 to indicate which free lists should be created.
613 for (segind
= vm_phys_nsegs
- 1; segind
>= 0; segind
--) {
614 seg
= &vm_phys_segs
[segind
];
615 #ifdef VM_FREELIST_ISADMA
616 if (seg
->end
<= VM_ISADMA_BOUNDARY
)
617 vm_freelist_to_flind
[VM_FREELIST_ISADMA
] = 1;
620 #ifdef VM_FREELIST_LOWMEM
621 if (seg
->end
<= VM_LOWMEM_BOUNDARY
)
622 vm_freelist_to_flind
[VM_FREELIST_LOWMEM
] = 1;
625 #ifdef VM_FREELIST_DMA32
627 #ifdef VM_DMA32_NPAGES_THRESHOLD
629 * Create the DMA32 free list only if the amount of
630 * physical memory above physical address 4G exceeds the
633 npages
> VM_DMA32_NPAGES_THRESHOLD
&&
635 seg
->end
<= VM_DMA32_BOUNDARY
)
636 vm_freelist_to_flind
[VM_FREELIST_DMA32
] = 1;
640 npages
+= atop(seg
->end
- seg
->start
);
641 vm_freelist_to_flind
[VM_FREELIST_DEFAULT
] = 1;
644 /* Change each entry into a running total of the free lists. */
645 for (freelist
= 1; freelist
< VM_NFREELIST
; freelist
++) {
646 vm_freelist_to_flind
[freelist
] +=
647 vm_freelist_to_flind
[freelist
- 1];
649 vm_nfreelists
= vm_freelist_to_flind
[VM_NFREELIST
- 1];
650 KASSERT(vm_nfreelists
> 0, ("vm_phys_init: no free lists"));
651 /* Change each entry into a free list index. */
652 for (freelist
= 0; freelist
< VM_NFREELIST
; freelist
++)
653 vm_freelist_to_flind
[freelist
]--;
656 * Initialize the first_page and free_queues fields of each physical
659 #ifdef VM_PHYSSEG_SPARSE
662 for (segind
= 0; segind
< vm_phys_nsegs
; segind
++) {
663 seg
= &vm_phys_segs
[segind
];
664 #ifdef VM_PHYSSEG_SPARSE
665 seg
->first_page
= &vm_page_array
[npages
];
666 npages
+= atop(seg
->end
- seg
->start
);
668 seg
->first_page
= PHYS_TO_VM_PAGE(seg
->start
);
670 #ifdef VM_FREELIST_ISADMA
671 if (seg
->end
<= VM_ISADMA_BOUNDARY
) {
672 flind
= vm_freelist_to_flind
[VM_FREELIST_ISADMA
];
674 ("vm_phys_init: ISADMA flind < 0"));
677 #ifdef VM_FREELIST_LOWMEM
678 if (seg
->end
<= VM_LOWMEM_BOUNDARY
) {
679 flind
= vm_freelist_to_flind
[VM_FREELIST_LOWMEM
];
681 ("vm_phys_init: LOWMEM flind < 0"));
684 #ifdef VM_FREELIST_DMA32
685 if (seg
->end
<= VM_DMA32_BOUNDARY
) {
686 flind
= vm_freelist_to_flind
[VM_FREELIST_DMA32
];
688 ("vm_phys_init: DMA32 flind < 0"));
692 flind
= vm_freelist_to_flind
[VM_FREELIST_DEFAULT
];
694 ("vm_phys_init: DEFAULT flind < 0"));
696 seg
->free_queues
= &vm_phys_free_queues
[seg
->domain
][flind
];
700 * Initialize the free queues.
702 for (dom
= 0; dom
< vm_ndomains
; dom
++) {
703 for (flind
= 0; flind
< vm_nfreelists
; flind
++) {
704 for (pind
= 0; pind
< VM_NFREEPOOL
; pind
++) {
705 fl
= vm_phys_free_queues
[dom
][flind
][pind
];
706 for (oind
= 0; oind
< VM_NFREEORDER
; oind
++)
707 TAILQ_INIT(&fl
[oind
].pl
);
712 rw_init(&vm_phys_fictitious_reg_lock
, "vmfctr");
716 * Split a contiguous, power of two-sized set of physical pages.
719 vm_phys_split_pages(vm_page_t m
, int oind
, struct vm_freelist
*fl
, int order
)
723 while (oind
> order
) {
725 m_buddy
= &m
[1 << oind
];
726 KASSERT(m_buddy
->order
== VM_NFREEORDER
,
727 ("vm_phys_split_pages: page %p has unexpected order %d",
728 m_buddy
, m_buddy
->order
));
729 vm_freelist_add(fl
, m_buddy
, oind
, 0);
734 * Initialize a physical page and add it to the free lists.
737 vm_phys_add_page(vm_paddr_t pa
)
740 struct vm_domain
*vmd
;
742 vm_cnt
.v_page_count
++;
743 m
= vm_phys_paddr_to_vm_page(pa
);
746 m
->segind
= vm_phys_paddr_to_segind(pa
);
747 vmd
= vm_phys_domain(m
);
748 vmd
->vmd_page_count
++;
749 vmd
->vmd_segs
|= 1UL << m
->segind
;
750 KASSERT(m
->order
== VM_NFREEORDER
,
751 ("vm_phys_add_page: page %p has unexpected order %d",
753 m
->pool
= VM_FREEPOOL_DEFAULT
;
755 mtx_lock(&vm_page_queue_free_mtx
);
756 vm_phys_freecnt_adj(m
, 1);
757 vm_phys_free_pages(m
, 0);
758 mtx_unlock(&vm_page_queue_free_mtx
);
762 * Allocate a contiguous, power of two-sized set of physical pages
763 * from the free lists.
765 * The free page queues must be locked.
768 vm_phys_alloc_pages(int pool
, int order
)
772 struct vm_domain_iterator vi
;
774 KASSERT(pool
< VM_NFREEPOOL
,
775 ("vm_phys_alloc_pages: pool %d is out of range", pool
));
776 KASSERT(order
< VM_NFREEORDER
,
777 ("vm_phys_alloc_pages: order %d is out of range", order
));
779 vm_policy_iterator_init(&vi
);
781 while ((vm_domain_iterator_run(&vi
, &domain
)) == 0) {
782 for (flind
= 0; flind
< vm_nfreelists
; flind
++) {
783 m
= vm_phys_alloc_domain_pages(domain
, flind
, pool
,
790 vm_policy_iterator_finish(&vi
);
795 * Allocate a contiguous, power of two-sized set of physical pages from the
796 * specified free list. The free list must be specified using one of the
797 * manifest constants VM_FREELIST_*.
799 * The free page queues must be locked.
802 vm_phys_alloc_freelist_pages(int freelist
, int pool
, int order
)
805 struct vm_domain_iterator vi
;
808 KASSERT(freelist
< VM_NFREELIST
,
809 ("vm_phys_alloc_freelist_pages: freelist %d is out of range",
811 KASSERT(pool
< VM_NFREEPOOL
,
812 ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool
));
813 KASSERT(order
< VM_NFREEORDER
,
814 ("vm_phys_alloc_freelist_pages: order %d is out of range", order
));
816 vm_policy_iterator_init(&vi
);
818 while ((vm_domain_iterator_run(&vi
, &domain
)) == 0) {
819 m
= vm_phys_alloc_domain_pages(domain
,
820 vm_freelist_to_flind
[freelist
], pool
, order
);
825 vm_policy_iterator_finish(&vi
);
830 vm_phys_alloc_domain_pages(int domain
, int flind
, int pool
, int order
)
832 struct vm_freelist
*fl
;
833 struct vm_freelist
*alt
;
837 mtx_assert(&vm_page_queue_free_mtx
, MA_OWNED
);
838 fl
= &vm_phys_free_queues
[domain
][flind
][pool
][0];
839 for (oind
= order
; oind
< VM_NFREEORDER
; oind
++) {
840 m
= TAILQ_FIRST(&fl
[oind
].pl
);
842 vm_freelist_rem(fl
, m
, oind
);
843 vm_phys_split_pages(m
, oind
, fl
, order
);
849 * The given pool was empty. Find the largest
850 * contiguous, power-of-two-sized set of pages in any
851 * pool. Transfer these pages to the given pool, and
852 * use them to satisfy the allocation.
854 for (oind
= VM_NFREEORDER
- 1; oind
>= order
; oind
--) {
855 for (pind
= 0; pind
< VM_NFREEPOOL
; pind
++) {
856 alt
= &vm_phys_free_queues
[domain
][flind
][pind
][0];
857 m
= TAILQ_FIRST(&alt
[oind
].pl
);
859 vm_freelist_rem(alt
, m
, oind
);
860 vm_phys_set_pool(pool
, m
, oind
);
861 vm_phys_split_pages(m
, oind
, fl
, order
);
870 * Find the vm_page corresponding to the given physical address.
873 vm_phys_paddr_to_vm_page(vm_paddr_t pa
)
875 struct vm_phys_seg
*seg
;
878 for (segind
= 0; segind
< vm_phys_nsegs
; segind
++) {
879 seg
= &vm_phys_segs
[segind
];
880 if (pa
>= seg
->start
&& pa
< seg
->end
)
881 return (&seg
->first_page
[atop(pa
- seg
->start
)]);
887 vm_phys_fictitious_to_vm_page(vm_paddr_t pa
)
889 struct vm_phys_fictitious_seg tmp
, *seg
;
896 rw_rlock(&vm_phys_fictitious_reg_lock
);
897 seg
= RB_FIND(fict_tree
, &vm_phys_fictitious_tree
, &tmp
);
898 rw_runlock(&vm_phys_fictitious_reg_lock
);
902 m
= &seg
->first_page
[atop(pa
- seg
->start
)];
903 KASSERT((m
->flags
& PG_FICTITIOUS
) != 0, ("%p not fictitious", m
));
909 vm_phys_fictitious_init_range(vm_page_t range
, vm_paddr_t start
,
910 long page_count
, vm_memattr_t memattr
)
914 for (i
= 0; i
< page_count
; i
++) {
915 vm_page_initfake(&range
[i
], start
+ PAGE_SIZE
* i
, memattr
);
916 range
[i
].oflags
&= ~VPO_UNMANAGED
;
917 range
[i
].busy_lock
= VPB_UNBUSIED
;
922 vm_phys_fictitious_reg_range(vm_paddr_t start
, vm_paddr_t end
,
923 vm_memattr_t memattr
)
925 struct vm_phys_fictitious_seg
*seg
;
928 #ifdef VM_PHYSSEG_DENSE
934 ("Start of segment isn't less than end (start: %jx end: %jx)",
935 (uintmax_t)start
, (uintmax_t)end
));
937 page_count
= (end
- start
) / PAGE_SIZE
;
939 #ifdef VM_PHYSSEG_DENSE
942 if (pi
>= first_page
&& (pi
- first_page
) < vm_page_array_size
) {
943 fp
= &vm_page_array
[pi
- first_page
];
944 if ((pe
- first_page
) > vm_page_array_size
) {
946 * We have a segment that starts inside
947 * of vm_page_array, but ends outside of it.
949 * Use vm_page_array pages for those that are
950 * inside of the vm_page_array range, and
951 * allocate the remaining ones.
953 dpage_count
= vm_page_array_size
- (pi
- first_page
);
954 vm_phys_fictitious_init_range(fp
, start
, dpage_count
,
956 page_count
-= dpage_count
;
957 start
+= ptoa(dpage_count
);
961 * We can allocate the full range from vm_page_array,
962 * so there's no need to register the range in the tree.
964 vm_phys_fictitious_init_range(fp
, start
, page_count
, memattr
);
966 } else if (pe
> first_page
&& (pe
- first_page
) < vm_page_array_size
) {
968 * We have a segment that ends inside of vm_page_array,
969 * but starts outside of it.
971 fp
= &vm_page_array
[0];
972 dpage_count
= pe
- first_page
;
973 vm_phys_fictitious_init_range(fp
, ptoa(first_page
), dpage_count
,
975 end
-= ptoa(dpage_count
);
976 page_count
-= dpage_count
;
978 } else if (pi
< first_page
&& pe
> (first_page
+ vm_page_array_size
)) {
980 * Trying to register a fictitious range that expands before
981 * and after vm_page_array.
987 fp
= malloc(page_count
* sizeof(struct vm_page
), M_FICT_PAGES
,
989 #ifdef VM_PHYSSEG_DENSE
992 vm_phys_fictitious_init_range(fp
, start
, page_count
, memattr
);
994 seg
= malloc(sizeof(*seg
), M_FICT_PAGES
, M_WAITOK
| M_ZERO
);
997 seg
->first_page
= fp
;
999 rw_wlock(&vm_phys_fictitious_reg_lock
);
1000 RB_INSERT(fict_tree
, &vm_phys_fictitious_tree
, seg
);
1001 rw_wunlock(&vm_phys_fictitious_reg_lock
);
1007 vm_phys_fictitious_unreg_range(vm_paddr_t start
, vm_paddr_t end
)
1009 struct vm_phys_fictitious_seg
*seg
, tmp
;
1010 #ifdef VM_PHYSSEG_DENSE
1014 KASSERT(start
< end
,
1015 ("Start of segment isn't less than end (start: %jx end: %jx)",
1016 (uintmax_t)start
, (uintmax_t)end
));
1018 #ifdef VM_PHYSSEG_DENSE
1021 if (pi
>= first_page
&& (pi
- first_page
) < vm_page_array_size
) {
1022 if ((pe
- first_page
) <= vm_page_array_size
) {
1024 * This segment was allocated using vm_page_array
1025 * only, there's nothing to do since those pages
1026 * were never added to the tree.
1031 * We have a segment that starts inside
1032 * of vm_page_array, but ends outside of it.
1034 * Calculate how many pages were added to the
1035 * tree and free them.
1037 start
= ptoa(first_page
+ vm_page_array_size
);
1038 } else if (pe
> first_page
&& (pe
- first_page
) < vm_page_array_size
) {
1040 * We have a segment that ends inside of vm_page_array,
1041 * but starts outside of it.
1043 end
= ptoa(first_page
);
1044 } else if (pi
< first_page
&& pe
> (first_page
+ vm_page_array_size
)) {
1045 /* Since it's not possible to register such a range, panic. */
1047 "Unregistering not registered fictitious range [%#jx:%#jx]",
1048 (uintmax_t)start
, (uintmax_t)end
);
1054 rw_wlock(&vm_phys_fictitious_reg_lock
);
1055 seg
= RB_FIND(fict_tree
, &vm_phys_fictitious_tree
, &tmp
);
1056 if (seg
->start
!= start
|| seg
->end
!= end
) {
1057 rw_wunlock(&vm_phys_fictitious_reg_lock
);
1059 "Unregistering not registered fictitious range [%#jx:%#jx]",
1060 (uintmax_t)start
, (uintmax_t)end
);
1062 RB_REMOVE(fict_tree
, &vm_phys_fictitious_tree
, seg
);
1063 rw_wunlock(&vm_phys_fictitious_reg_lock
);
1064 free(seg
->first_page
, M_FICT_PAGES
);
1065 free(seg
, M_FICT_PAGES
);
1069 * Find the segment containing the given physical address.
1072 vm_phys_paddr_to_segind(vm_paddr_t pa
)
1074 struct vm_phys_seg
*seg
;
1077 for (segind
= 0; segind
< vm_phys_nsegs
; segind
++) {
1078 seg
= &vm_phys_segs
[segind
];
1079 if (pa
>= seg
->start
&& pa
< seg
->end
)
1082 panic("vm_phys_paddr_to_segind: paddr %#jx is not in any segment" ,
1087 * Free a contiguous, power of two-sized set of physical pages.
1089 * The free page queues must be locked.
1092 vm_phys_free_pages(vm_page_t m
, int order
)
1094 struct vm_freelist
*fl
;
1095 struct vm_phys_seg
*seg
;
1099 KASSERT(m
->order
== VM_NFREEORDER
,
1100 ("vm_phys_free_pages: page %p has unexpected order %d",
1102 KASSERT(m
->pool
< VM_NFREEPOOL
,
1103 ("vm_phys_free_pages: page %p has unexpected pool %d",
1105 KASSERT(order
< VM_NFREEORDER
,
1106 ("vm_phys_free_pages: order %d is out of range", order
));
1107 mtx_assert(&vm_page_queue_free_mtx
, MA_OWNED
);
1108 seg
= &vm_phys_segs
[m
->segind
];
1109 if (order
< VM_NFREEORDER
- 1) {
1110 pa
= VM_PAGE_TO_PHYS(m
);
1112 pa
^= ((vm_paddr_t
)1 << (PAGE_SHIFT
+ order
));
1113 if (pa
< seg
->start
|| pa
>= seg
->end
)
1115 m_buddy
= &seg
->first_page
[atop(pa
- seg
->start
)];
1116 if (m_buddy
->order
!= order
)
1118 fl
= (*seg
->free_queues
)[m_buddy
->pool
];
1119 vm_freelist_rem(fl
, m_buddy
, order
);
1120 if (m_buddy
->pool
!= m
->pool
)
1121 vm_phys_set_pool(m
->pool
, m_buddy
, order
);
1123 pa
&= ~(((vm_paddr_t
)1 << (PAGE_SHIFT
+ order
)) - 1);
1124 m
= &seg
->first_page
[atop(pa
- seg
->start
)];
1125 } while (order
< VM_NFREEORDER
- 1);
1127 fl
= (*seg
->free_queues
)[m
->pool
];
1128 vm_freelist_add(fl
, m
, order
, 1);
1132 * Free a contiguous, arbitrarily sized set of physical pages.
1134 * The free page queues must be locked.
1137 vm_phys_free_contig(vm_page_t m
, u_long npages
)
1143 * Avoid unnecessary coalescing by freeing the pages in the largest
1144 * possible power-of-two-sized subsets.
1146 mtx_assert(&vm_page_queue_free_mtx
, MA_OWNED
);
1147 for (;; npages
-= n
) {
1149 * Unsigned "min" is used here so that "order" is assigned
1150 * "VM_NFREEORDER - 1" when "m"'s physical address is zero
1151 * or the low-order bits of its physical address are zero
1152 * because the size of a physical address exceeds the size of
1155 order
= min(ffsl(VM_PAGE_TO_PHYS(m
) >> PAGE_SHIFT
) - 1,
1160 vm_phys_free_pages(m
, order
);
1163 /* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */
1164 for (; npages
> 0; npages
-= n
) {
1165 order
= flsl(npages
) - 1;
1167 vm_phys_free_pages(m
, order
);
1173 * Scan physical memory between the specified addresses "low" and "high" for a
1174 * run of contiguous physical pages that satisfy the specified conditions, and
1175 * return the lowest page in the run. The specified "alignment" determines
1176 * the alignment of the lowest physical page in the run. If the specified
1177 * "boundary" is non-zero, then the run of physical pages cannot span a
1178 * physical address that is a multiple of "boundary".
1180 * "npages" must be greater than zero. Both "alignment" and "boundary" must
1181 * be a power of two.
1184 vm_phys_scan_contig(u_long npages
, vm_paddr_t low
, vm_paddr_t high
,
1185 u_long alignment
, vm_paddr_t boundary
, int options
)
1188 vm_page_t m_end
, m_run
, m_start
;
1189 struct vm_phys_seg
*seg
;
1192 KASSERT(npages
> 0, ("npages is 0"));
1193 KASSERT(powerof2(alignment
), ("alignment is not a power of 2"));
1194 KASSERT(powerof2(boundary
), ("boundary is not a power of 2"));
1197 for (segind
= 0; segind
< vm_phys_nsegs
; segind
++) {
1198 seg
= &vm_phys_segs
[segind
];
1199 if (seg
->start
>= high
)
1201 if (low
>= seg
->end
)
1203 if (low
<= seg
->start
)
1204 m_start
= seg
->first_page
;
1206 m_start
= &seg
->first_page
[atop(low
- seg
->start
)];
1207 if (high
< seg
->end
)
1211 if (pa_end
- VM_PAGE_TO_PHYS(m_start
) < ptoa(npages
))
1213 m_end
= &seg
->first_page
[atop(pa_end
- seg
->start
)];
1214 m_run
= vm_page_scan_contig(npages
, m_start
, m_end
,
1215 alignment
, boundary
, options
);
1223 * Set the pool for a contiguous, power of two-sized set of physical pages.
1226 vm_phys_set_pool(int pool
, vm_page_t m
, int order
)
1230 for (m_tmp
= m
; m_tmp
< &m
[1 << order
]; m_tmp
++)
1235 * Search for the given physical page "m" in the free lists. If the search
1236 * succeeds, remove "m" from the free lists and return TRUE. Otherwise, return
1237 * FALSE, indicating that "m" is not in the free lists.
1239 * The free page queues must be locked.
1242 vm_phys_unfree_page(vm_page_t m
)
1244 struct vm_freelist
*fl
;
1245 struct vm_phys_seg
*seg
;
1246 vm_paddr_t pa
, pa_half
;
1247 vm_page_t m_set
, m_tmp
;
1250 mtx_assert(&vm_page_queue_free_mtx
, MA_OWNED
);
1253 * First, find the contiguous, power of two-sized set of free
1254 * physical pages containing the given physical page "m" and
1255 * assign it to "m_set".
1257 seg
= &vm_phys_segs
[m
->segind
];
1258 for (m_set
= m
, order
= 0; m_set
->order
== VM_NFREEORDER
&&
1259 order
< VM_NFREEORDER
- 1; ) {
1261 pa
= m
->phys_addr
& (~(vm_paddr_t
)0 << (PAGE_SHIFT
+ order
));
1262 if (pa
>= seg
->start
)
1263 m_set
= &seg
->first_page
[atop(pa
- seg
->start
)];
1267 if (m_set
->order
< order
)
1269 if (m_set
->order
== VM_NFREEORDER
)
1271 KASSERT(m_set
->order
< VM_NFREEORDER
,
1272 ("vm_phys_unfree_page: page %p has unexpected order %d",
1273 m_set
, m_set
->order
));
1276 * Next, remove "m_set" from the free lists. Finally, extract
1277 * "m" from "m_set" using an iterative algorithm: While "m_set"
1278 * is larger than a page, shrink "m_set" by returning the half
1279 * of "m_set" that does not contain "m" to the free lists.
1281 fl
= (*seg
->free_queues
)[m_set
->pool
];
1282 order
= m_set
->order
;
1283 vm_freelist_rem(fl
, m_set
, order
);
1286 pa_half
= m_set
->phys_addr
^ (1 << (PAGE_SHIFT
+ order
));
1287 if (m
->phys_addr
< pa_half
)
1288 m_tmp
= &seg
->first_page
[atop(pa_half
- seg
->start
)];
1291 m_set
= &seg
->first_page
[atop(pa_half
- seg
->start
)];
1293 vm_freelist_add(fl
, m_tmp
, order
, 0);
1295 KASSERT(m_set
== m
, ("vm_phys_unfree_page: fatal inconsistency"));
1300 * Try to zero one physical page. Used by an idle priority thread.
1303 vm_phys_zero_pages_idle(void)
1305 static struct vm_freelist
*fl
;
1306 static int flind
, oind
, pind
;
1310 domain
= vm_rr_selectdomain();
1311 fl
= vm_phys_free_queues
[domain
][0][0];
1312 mtx_assert(&vm_page_queue_free_mtx
, MA_OWNED
);
1314 TAILQ_FOREACH_REVERSE(m
, &fl
[oind
].pl
, pglist
, plinks
.q
) {
1315 for (m_tmp
= m
; m_tmp
< &m
[1 << oind
]; m_tmp
++) {
1316 if ((m_tmp
->flags
& (PG_CACHED
| PG_ZERO
)) == 0) {
1317 vm_phys_unfree_page(m_tmp
);
1318 vm_phys_freecnt_adj(m
, -1);
1319 mtx_unlock(&vm_page_queue_free_mtx
);
1320 pmap_zero_page_idle(m_tmp
);
1321 m_tmp
->flags
|= PG_ZERO
;
1322 mtx_lock(&vm_page_queue_free_mtx
);
1323 vm_phys_freecnt_adj(m
, 1);
1324 vm_phys_free_pages(m_tmp
, 0);
1325 vm_page_zero_count
++;
1332 if (oind
== VM_NFREEORDER
) {
1335 if (pind
== VM_NFREEPOOL
) {
1338 if (flind
== vm_nfreelists
)
1341 fl
= vm_phys_free_queues
[domain
][flind
][pind
];
1347 * Allocate a contiguous set of physical pages of the given size
1348 * "npages" from the free lists. All of the physical pages must be at
1349 * or above the given physical address "low" and below the given
1350 * physical address "high". The given value "alignment" determines the
1351 * alignment of the first physical page in the set. If the given value
1352 * "boundary" is non-zero, then the set of physical pages cannot cross
1353 * any physical address boundary that is a multiple of that value. Both
1354 * "alignment" and "boundary" must be a power of two.
1357 vm_phys_alloc_contig(u_long npages
, vm_paddr_t low
, vm_paddr_t high
,
1358 u_long alignment
, vm_paddr_t boundary
)
1360 vm_paddr_t pa_end
, pa_start
;
1362 struct vm_domain_iterator vi
;
1363 struct vm_phys_seg
*seg
;
1366 KASSERT(npages
> 0, ("npages is 0"));
1367 KASSERT(powerof2(alignment
), ("alignment is not a power of 2"));
1368 KASSERT(powerof2(boundary
), ("boundary is not a power of 2"));
1369 mtx_assert(&vm_page_queue_free_mtx
, MA_OWNED
);
1372 vm_policy_iterator_init(&vi
);
1374 if (vm_domain_iterator_run(&vi
, &domain
) != 0) {
1375 vm_policy_iterator_finish(&vi
);
1379 for (segind
= vm_phys_nsegs
- 1; segind
>= 0; segind
--) {
1380 seg
= &vm_phys_segs
[segind
];
1381 if (seg
->start
>= high
|| seg
->domain
!= domain
)
1383 if (low
>= seg
->end
)
1385 if (low
<= seg
->start
)
1386 pa_start
= seg
->start
;
1389 if (high
< seg
->end
)
1393 if (pa_end
- pa_start
< ptoa(npages
))
1395 m_run
= vm_phys_alloc_seg_contig(seg
, npages
, low
, high
,
1396 alignment
, boundary
);
1400 if (m_run
== NULL
&& !vm_domain_iterator_isdone(&vi
))
1402 vm_policy_iterator_finish(&vi
);
1407 * Allocate a run of contiguous physical pages from the free list for the
1408 * specified segment.
1411 vm_phys_alloc_seg_contig(struct vm_phys_seg
*seg
, u_long npages
,
1412 vm_paddr_t low
, vm_paddr_t high
, u_long alignment
, vm_paddr_t boundary
)
1414 struct vm_freelist
*fl
;
1415 vm_paddr_t pa
, pa_end
, size
;
1418 int oind
, order
, pind
;
1420 KASSERT(npages
> 0, ("npages is 0"));
1421 KASSERT(powerof2(alignment
), ("alignment is not a power of 2"));
1422 KASSERT(powerof2(boundary
), ("boundary is not a power of 2"));
1423 mtx_assert(&vm_page_queue_free_mtx
, MA_OWNED
);
1424 /* Compute the queue that is the best fit for npages. */
1425 for (order
= 0; (1 << order
) < npages
; order
++);
1426 /* Search for a run satisfying the specified conditions. */
1427 size
= npages
<< PAGE_SHIFT
;
1428 for (oind
= min(order
, VM_NFREEORDER
- 1); oind
< VM_NFREEORDER
;
1430 for (pind
= 0; pind
< VM_NFREEPOOL
; pind
++) {
1431 fl
= (*seg
->free_queues
)[pind
];
1432 TAILQ_FOREACH(m_ret
, &fl
[oind
].pl
, plinks
.q
) {
1434 * Is the size of this allocation request
1435 * larger than the largest block size?
1437 if (order
>= VM_NFREEORDER
) {
1439 * Determine if a sufficient number of
1440 * subsequent blocks to satisfy the
1441 * allocation request are free.
1443 pa
= VM_PAGE_TO_PHYS(m_ret
);
1446 pa
+= 1 << (PAGE_SHIFT
+
1452 m
= &seg
->first_page
[atop(pa
-
1454 if (m
->order
!= VM_NFREEORDER
-
1458 /* If not, go to the next block. */
1464 * Determine if the blocks are within the
1465 * given range, satisfy the given alignment,
1466 * and do not cross the given boundary.
1468 pa
= VM_PAGE_TO_PHYS(m_ret
);
1470 if (pa
>= low
&& pa_end
<= high
&&
1471 (pa
& (alignment
- 1)) == 0 &&
1472 rounddown2(pa
^ (pa_end
- 1), boundary
) == 0)
1479 for (m
= m_ret
; m
< &m_ret
[npages
]; m
= &m
[1 << oind
]) {
1480 fl
= (*seg
->free_queues
)[m
->pool
];
1481 vm_freelist_rem(fl
, m
, m
->order
);
1483 if (m_ret
->pool
!= VM_FREEPOOL_DEFAULT
)
1484 vm_phys_set_pool(VM_FREEPOOL_DEFAULT
, m_ret
, oind
);
1485 fl
= (*seg
->free_queues
)[m_ret
->pool
];
1486 vm_phys_split_pages(m_ret
, oind
, fl
, order
);
1487 /* Return excess pages to the free lists. */
1488 npages_end
= roundup2(npages
, 1 << imin(oind
, order
));
1489 if (npages
< npages_end
)
1490 vm_phys_free_contig(&m_ret
[npages
], npages_end
- npages
);
1496 * Show the number of physical pages in each of the free lists.
1498 DB_SHOW_COMMAND(freepages
, db_show_freepages
)
1500 struct vm_freelist
*fl
;
1501 int flind
, oind
, pind
, dom
;
1503 for (dom
= 0; dom
< vm_ndomains
; dom
++) {
1504 db_printf("DOMAIN: %d\n", dom
);
1505 for (flind
= 0; flind
< vm_nfreelists
; flind
++) {
1506 db_printf("FREE LIST %d:\n"
1507 "\n ORDER (SIZE) | NUMBER"
1509 for (pind
= 0; pind
< VM_NFREEPOOL
; pind
++)
1510 db_printf(" | POOL %d", pind
);
1512 for (pind
= 0; pind
< VM_NFREEPOOL
; pind
++)
1513 db_printf("-- -- ");
1515 for (oind
= VM_NFREEORDER
- 1; oind
>= 0; oind
--) {
1516 db_printf(" %2.2d (%6.6dK)", oind
,
1517 1 << (PAGE_SHIFT
- 10 + oind
));
1518 for (pind
= 0; pind
< VM_NFREEPOOL
; pind
++) {
1519 fl
= vm_phys_free_queues
[dom
][flind
][pind
];
1520 db_printf(" | %6.6d", fl
[oind
].lcnt
);