2 ** Copyright 2001-2004, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
5 #include <kernel/kernel.h>
6 #include <kernel/arch/cpu.h>
8 #include <kernel/vm_priv.h>
9 #include <kernel/vm_page.h>
10 #include <kernel/vm_cache.h>
11 #include <kernel/arch/vm_translation_map.h>
12 #include <kernel/console.h>
13 #include <kernel/debug.h>
14 #include <kernel/int.h>
15 #include <kernel/thread.h>
16 #include <kernel/smp.h>
17 #include <kernel/sem.h>
18 #include <kernel/list.h>
19 #include <newos/errors.h>
20 #include <boot/stage2.h>
24 typedef struct page_queue
{
25 struct list_node list
;
29 extern bool trimming_cycle
;
31 static page_queue page_free_queue
;
32 static page_queue page_clear_queue
;
33 static page_queue page_active_queue
;
34 static page_queue page_modified_queue
;
35 static page_queue page_modified_temporary_queue
;
37 static vm_page
*all_pages
;
38 static addr_t physical_page_offset
;
39 static unsigned int num_pages
;
41 static spinlock_t page_lock
;
43 static sem_id modified_pages_available
;
45 void dump_page_stats(int argc
, char **argv
);
46 void dump_free_page_table(int argc
, char **argv
);
47 static int vm_page_set_state_nolock(vm_page
*page
, int page_state
);
48 static void clear_page(addr_t pa
);
49 static int page_scrubber(void *);
51 static vm_page
*dequeue_page(page_queue
*q
)
55 page
= list_remove_head_type(&q
->list
, vm_page
, queue_node
);
67 static void enqueue_page(page_queue
*q
, vm_page
*page
)
72 list_add_head(&q
->list
, &page
->queue_node
);
74 if(q
== &page_modified_queue
|| q
== &page_modified_temporary_queue
) {
76 sem_release_etc(modified_pages_available
, 1, SEM_FLAG_NO_RESCHED
);
80 static void remove_page_from_queue(page_queue
*q
, vm_page
*page
)
85 list_delete(&page
->queue_node
);
89 static void move_page_to_queue(page_queue
*from_q
, page_queue
*to_q
, vm_page
*page
)
95 remove_page_from_queue(from_q
, page
);
96 enqueue_page(to_q
, page
);
100 static int pageout_daemon()
107 dprintf("pageout daemon starting\n");
110 dprintf("pageout daemon disabled, exiting...\n");
114 sem_acquire(modified_pages_available
, 1);
118 int_disable_interrupts();
119 acquire_spinlock(&page_lock
);
120 page
= dequeue_page(&page_modified_queue
);
121 page
->state
= PAGE_STATE_BUSY
;
122 vm_info
.modified_pages
--;
123 vm_info
.busy_pages
++;
124 vm_cache_acquire_ref(page
->cache_ref
, true);
125 release_spinlock(&page_lock
);
126 int_restore_interrupts();
128 dprintf("got page %p\n", page
);
130 if(page
->cache_ref
->cache
->temporary
&& !trimming_cycle
) {
131 // unless we're in the trimming cycle, dont write out pages
132 // that back anonymous stores
133 int_disable_interrupts();
134 acquire_spinlock(&page_lock
);
135 enqueue_page(&page_modified_queue
, page
);
136 page
->state
= PAGE_STATE_MODIFIED
;
137 vm_info
.busy_pages
--;
138 vm_info
.modified_pages
++;
139 release_spinlock(&page_lock
);
140 int_restore_interrupts();
141 vm_cache_release_ref(page
->cache_ref
);
145 /* clear the modified flag on this page in all it's mappings */
146 mutex_lock(&page
->cache_ref
->lock
);
147 list_for_every_entry(&page
->cache_ref
->region_list_head
, region
, vm_region
, cache_node
) {
148 if(page
->offset
> region
->cache_offset
149 && page
->offset
< region
->cache_offset
+ region
->size
) {
150 vm_translation_map
*map
= ®ion
->aspace
->translation_map
;
152 map
->ops
->clear_flags(map
, page
->offset
- region
->cache_offset
+ region
->base
, PAGE_MODIFIED
);
153 map
->ops
->unlock(map
);
156 mutex_unlock(&page
->cache_ref
->lock
);
158 /* write the page out to it's backing store */
160 vecs
->total_len
= PAGE_SIZE
;
161 vm_get_physical_page(page
->ppn
* PAGE_SIZE
, (addr_t
*)&vecs
->vec
[0].start
, PHYSICAL_PAGE_CAN_WAIT
);
162 vecs
->vec
[0].len
= PAGE_SIZE
;
164 err
= page
->cache_ref
->cache
->store
->ops
->write(page
->cache_ref
->cache
->store
, page
->offset
, vecs
);
166 vm_put_physical_page((addr_t
)vecs
->vec
[0].start
);
168 int_disable_interrupts();
169 acquire_spinlock(&page_lock
);
170 vm_info
.busy_pages
--;
171 if(page
->ref_count
> 0) {
172 page
->state
= PAGE_STATE_ACTIVE
;
173 vm_info
.active_pages
++;
175 page
->state
= PAGE_STATE_INACTIVE
;
176 vm_info
.inactive_pages
++;
178 enqueue_page(&page_active_queue
, page
);
179 release_spinlock(&page_lock
);
180 int_restore_interrupts();
182 vm_cache_release_ref(page
->cache_ref
);
186 int vm_page_init(kernel_args
*ka
)
190 dprintf("vm_page_init: entry\n");
195 list_initialize(&page_free_queue
.list
);
196 page_free_queue
.count
= 0;
197 list_initialize(&page_clear_queue
.list
);
198 page_clear_queue
.count
= 0;
199 list_initialize(&page_active_queue
.list
);
200 page_active_queue
.count
= 0;
201 list_initialize(&page_modified_queue
.list
);
202 page_modified_queue
.count
= 0;
203 list_initialize(&page_modified_temporary_queue
.list
);
204 page_modified_temporary_queue
.count
= 0;
206 // calculate the size of memory by looking at the phys_mem_range array
208 unsigned int last_phys_page
= 0;
210 dprintf("physical memory ranges:\n");
211 dprintf("count %d\n", ka
->num_phys_mem_ranges
);
213 physical_page_offset
= ka
->phys_mem_range
[0].start
/ PAGE_SIZE
;
214 for(i
=0; i
<ka
->num_phys_mem_ranges
; i
++) {
215 dprintf("\tbase 0x%08lx size 0x%08lx\n", ka
->phys_mem_range
[i
].start
, ka
->phys_mem_range
[i
].size
);
216 last_phys_page
= (ka
->phys_mem_range
[i
].start
+ ka
->phys_mem_range
[i
].size
) / PAGE_SIZE
- 1;
218 dprintf("first phys page = 0x%lx, last 0x%x\n", physical_page_offset
, last_phys_page
);
219 num_pages
= last_phys_page
- physical_page_offset
+ 1;
222 // set up the global info structure about physical memory
223 vm_info
.physical_page_size
= PAGE_SIZE
;
224 vm_info
.physical_pages
= num_pages
;
226 dprintf("vm_page_init: exit\n");
231 int vm_page_init_postheap(kernel_args
*ka
)
235 // map in the new free page table
236 all_pages
= (vm_page
*)vm_alloc_from_ka_struct(ka
, num_pages
* sizeof(vm_page
), LOCK_KERNEL
|LOCK_RW
);
238 dprintf("vm_page_init_postheap: putting free_page_table @ %p, # ents %d (size 0x%x)\n",
239 all_pages
, num_pages
, (unsigned int)(num_pages
* sizeof(vm_page
)));
241 // initialize the free page table
242 for(i
=0; i
< num_pages
; i
++) {
243 all_pages
[i
].magic
= VM_PAGE_MAGIC
;
244 all_pages
[i
].ppn
= physical_page_offset
+ i
;
245 all_pages
[i
].type
= PAGE_TYPE_PHYSICAL
;
246 all_pages
[i
].state
= PAGE_STATE_FREE
;
247 all_pages
[i
].ref_count
= 0;
248 vm_info
.free_pages
++;
249 enqueue_page(&page_free_queue
, &all_pages
[i
]);
252 // mark some of the page ranges inuse
253 for(i
= 0; i
< ka
->num_phys_alloc_ranges
; i
++) {
254 vm_mark_page_range_inuse(ka
->phys_alloc_range
[i
].start
/ PAGE_SIZE
,
255 ka
->phys_alloc_range
[i
].size
/ PAGE_SIZE
);
258 // set the global max_commit variable
259 vm_increase_max_commit(num_pages
*PAGE_SIZE
);
261 dprintf("vm_page_init_postheap: exit\n");
267 int vm_page_init2(kernel_args
*ka
)
272 vm_create_anonymous_region(vm_get_kernel_aspace_id(), "page_structures", &null
, REGION_ADDR_EXACT_ADDRESS
,
273 PAGE_ALIGN(num_pages
* sizeof(vm_page
)), REGION_WIRING_WIRED_ALREADY
, LOCK_RW
|LOCK_KERNEL
);
275 dbg_add_command(&dump_page_stats
, "page_stats", "Dump statistics about page usage");
276 dbg_add_command(&dump_free_page_table
, "free_pages", "Dump list of free pages");
281 int vm_page_init_postthread(kernel_args
*ka
)
285 // create a kernel thread to clear out pages
286 tid
= thread_create_kernel_thread("page scrubber", &page_scrubber
, NULL
);
287 thread_set_priority(tid
, THREAD_LOWEST_PRIORITY
);
288 thread_resume_thread(tid
);
290 modified_pages_available
= sem_create(0, "modified_pages_avail_sem");
292 // create a kernel thread to schedule modified pages to write
293 tid
= thread_create_kernel_thread("pageout daemon", &pageout_daemon
, NULL
);
294 thread_set_priority(tid
, THREAD_MIN_RT_PRIORITY
+ 1);
295 thread_resume_thread(tid
);
300 static int page_scrubber(void *unused
)
302 #define SCRUB_SIZE 16
303 vm_page
*page
[SCRUB_SIZE
];
309 dprintf("page_scrubber starting...\n");
312 thread_snooze(100000); // 100ms
314 if(page_free_queue
.count
> 0) {
315 int_disable_interrupts();
316 acquire_spinlock(&page_lock
);
318 for(i
=0; i
<SCRUB_SIZE
; i
++) {
319 page
[i
] = dequeue_page(&page_free_queue
);
322 vm_info
.free_pages
--;
325 release_spinlock(&page_lock
);
326 int_restore_interrupts();
330 for(i
=0; i
<scrub_count
; i
++) {
331 clear_page(page
[i
]->ppn
* PAGE_SIZE
);
334 int_disable_interrupts();
335 acquire_spinlock(&page_lock
);
337 for(i
=0; i
<scrub_count
; i
++) {
338 page
[i
]->state
= PAGE_STATE_CLEAR
;
339 enqueue_page(&page_clear_queue
, page
[i
]);
340 vm_info
.clear_pages
++;
343 release_spinlock(&page_lock
);
344 int_restore_interrupts();
351 static void clear_page(addr_t pa
)
355 vm_get_physical_page(pa
, &va
, PHYSICAL_PAGE_CAN_WAIT
);
357 memset((void *)va
, 0, PAGE_SIZE
);
359 vm_put_physical_page(va
);
362 int vm_mark_page_inuse(addr_t page
)
364 return vm_mark_page_range_inuse(page
, 1);
367 int vm_mark_page_range_inuse(addr_t start_page
, addr_t len
)
372 dprintf("vm_mark_page_range_inuse: start 0x%lx, len 0x%lx\n", start_page
, len
);
374 if(physical_page_offset
> start_page
) {
375 dprintf("vm_mark_page_range_inuse: start page %ld is before free list\n", start_page
);
376 return ERR_INVALID_ARGS
;
378 start_page
-= physical_page_offset
;
379 if(start_page
+ len
>= num_pages
) {
380 dprintf("vm_mark_page_range_inuse: range would extend past free list\n");
381 return ERR_INVALID_ARGS
;
384 int_disable_interrupts();
385 acquire_spinlock(&page_lock
);
387 for(i
= 0; i
< len
; i
++) {
388 page
= &all_pages
[start_page
+ i
];
389 switch(page
->state
) {
390 case PAGE_STATE_FREE
:
391 case PAGE_STATE_CLEAR
:
392 vm_page_set_state_nolock(page
, PAGE_STATE_UNUSED
);
394 case PAGE_STATE_WIRED
:
396 case PAGE_STATE_ACTIVE
:
397 case PAGE_STATE_INACTIVE
:
398 case PAGE_STATE_BUSY
:
399 case PAGE_STATE_MODIFIED
:
400 case PAGE_STATE_MODIFIED_TEMPORARY
:
401 case PAGE_STATE_UNUSED
:
404 dprintf("vm_mark_page_range_inuse: page 0x%lx in non-free state %d!\n", start_page
+ i
, page
->state
);
408 release_spinlock(&page_lock
);
409 int_restore_interrupts();
414 vm_page
*vm_page_allocate_specific_page(addr_t page_num
, int page_state
)
417 int old_page_state
= PAGE_STATE_BUSY
;
419 int_disable_interrupts();
420 acquire_spinlock(&page_lock
);
422 p
= vm_lookup_page(page_num
);
427 case PAGE_STATE_FREE
:
428 remove_page_from_queue(&page_free_queue
, p
);
429 vm_info
.free_pages
--;
431 case PAGE_STATE_CLEAR
:
432 remove_page_from_queue(&page_clear_queue
, p
);
433 vm_info
.clear_pages
--;
435 case PAGE_STATE_UNUSED
:
438 // we can't allocate this page
444 old_page_state
= p
->state
;
445 p
->state
= PAGE_STATE_BUSY
;
446 vm_info
.busy_pages
++;
448 if(old_page_state
!= PAGE_STATE_UNUSED
)
449 enqueue_page(&page_active_queue
, p
);
452 release_spinlock(&page_lock
);
453 int_restore_interrupts();
455 if(p
!= NULL
&& page_state
== PAGE_STATE_CLEAR
&&
456 (old_page_state
== PAGE_STATE_FREE
|| old_page_state
== PAGE_STATE_UNUSED
)) {
458 clear_page(p
->ppn
* PAGE_SIZE
);
464 vm_page
*vm_page_allocate_page(int page_state
)
472 case PAGE_STATE_FREE
:
473 q
= &page_free_queue
;
474 q_other
= &page_clear_queue
;
476 case PAGE_STATE_CLEAR
:
477 q
= &page_clear_queue
;
478 q_other
= &page_free_queue
;
481 return NULL
; // invalid
484 int_disable_interrupts();
485 acquire_spinlock(&page_lock
);
489 // the clear queue was empty, grab one from the free queue and zero it out
494 panic("vm_allocate_page: out of memory!\n");
498 if(q
== &page_free_queue
)
499 vm_info
.free_pages
--;
501 vm_info
.clear_pages
--;
503 old_page_state
= p
->state
;
504 p
->state
= PAGE_STATE_BUSY
;
505 vm_info
.busy_pages
++;
507 enqueue_page(&page_active_queue
, p
);
509 release_spinlock(&page_lock
);
510 int_restore_interrupts();
512 if(page_state
== PAGE_STATE_CLEAR
&& old_page_state
== PAGE_STATE_FREE
) {
513 clear_page(p
->ppn
* PAGE_SIZE
);
522 vm_page
*vm_page_allocate_page_run(int page_state
, addr_t len
)
526 vm_page
*first_page
= NULL
;
530 int_disable_interrupts();
531 acquire_spinlock(&page_lock
);
535 if(start
+ len
>= num_pages
) {
538 for(i
= 0; i
< len
; i
++) {
539 if(all_pages
[start
+ i
].state
!= PAGE_STATE_FREE
&&
540 all_pages
[start
+ i
].state
!= PAGE_STATE_CLEAR
) {
547 // pull the pages out of the appropriate queues
548 for(i
= 0; i
< len
; i
++)
549 vm_page_set_state_nolock(&all_pages
[start
+ i
], PAGE_STATE_BUSY
);
550 first_page
= &all_pages
[start
];
554 if(start
>= num_pages
) {
555 // no more pages to look through
560 release_spinlock(&page_lock
);
561 int_restore_interrupts();
566 vm_page
*vm_lookup_page(addr_t page_num
)
568 if(page_num
< physical_page_offset
)
570 page_num
-= physical_page_offset
;
571 if(page_num
>= num_pages
)
574 VERIFY_VM_PAGE(&all_pages
[page_num
]);
576 return &all_pages
[page_num
];
579 static int vm_page_set_state_nolock(vm_page
*page
, int page_state
)
581 page_queue
*from_q
= NULL
;
582 page_queue
*to_q
= NULL
;
584 switch(page
->state
) {
585 case PAGE_STATE_BUSY
:
586 vm_info
.busy_pages
--;
587 goto removefromactive
;
588 case PAGE_STATE_ACTIVE
:
589 vm_info
.active_pages
--;
590 goto removefromactive
;
591 case PAGE_STATE_INACTIVE
:
592 vm_info
.inactive_pages
--;
593 goto removefromactive
;
594 case PAGE_STATE_WIRED
:
595 vm_info
.wired_pages
--;
596 goto removefromactive
;
597 case PAGE_STATE_UNUSED
:
598 vm_info
.unused_pages
--;
600 from_q
= &page_active_queue
;
602 case PAGE_STATE_MODIFIED
:
603 vm_info
.modified_pages
--;
604 from_q
= &page_modified_queue
;
606 case PAGE_STATE_MODIFIED_TEMPORARY
:
607 vm_info
.modified_temporary_pages
--;
608 from_q
= &page_modified_temporary_queue
;
610 case PAGE_STATE_FREE
:
611 vm_info
.free_pages
--;
612 from_q
= &page_free_queue
;
614 case PAGE_STATE_CLEAR
:
615 vm_info
.clear_pages
--;
616 from_q
= &page_clear_queue
;
619 panic("vm_page_set_state: vm_page %p in invalid state %d\n", page
, page
->state
);
623 case PAGE_STATE_BUSY
:
624 vm_info
.busy_pages
++;
626 case PAGE_STATE_ACTIVE
:
627 vm_info
.active_pages
++;
629 case PAGE_STATE_INACTIVE
:
630 vm_info
.inactive_pages
++;
632 case PAGE_STATE_WIRED
:
633 vm_info
.wired_pages
++;
635 case PAGE_STATE_UNUSED
:
636 vm_info
.unused_pages
++;
638 to_q
= &page_active_queue
;
640 case PAGE_STATE_MODIFIED
:
641 vm_info
.modified_pages
++;
642 to_q
= &page_modified_queue
;
644 case PAGE_STATE_MODIFIED_TEMPORARY
:
645 vm_info
.modified_temporary_pages
++;
646 to_q
= &page_modified_temporary_queue
;
648 case PAGE_STATE_FREE
:
649 vm_info
.free_pages
++;
650 to_q
= &page_free_queue
;
652 case PAGE_STATE_CLEAR
:
653 vm_info
.clear_pages
++;
654 to_q
= &page_clear_queue
;
657 panic("vm_page_set_state: invalid target state %d\n", page_state
);
659 move_page_to_queue(from_q
, to_q
, page
);
660 page
->state
= page_state
;
665 int vm_page_set_state(vm_page
*page
, int page_state
)
669 VERIFY_VM_PAGE(page
);
671 int_disable_interrupts();
672 acquire_spinlock(&page_lock
);
674 err
= vm_page_set_state_nolock(page
, page_state
);
676 release_spinlock(&page_lock
);
677 int_restore_interrupts();
682 addr_t
vm_page_num_pages()
687 addr_t
vm_page_num_free_pages()
689 return page_free_queue
.count
+ page_clear_queue
.count
;
692 void dump_free_page_table(int argc
, char **argv
)
695 dprintf("not finished\n");
698 void dump_page_stats(int argc
, char **argv
)
700 unsigned int page_types
[9];
703 memset(page_types
, 0, sizeof(page_types
));
705 for(i
=0; i
<num_pages
; i
++) {
706 page_types
[all_pages
[i
].state
]++;
709 dprintf("page stats:\n");
710 dprintf("active: %d\ninactive: %d\nbusy: %d\nunused: %d\n",
711 page_types
[PAGE_STATE_ACTIVE
], page_types
[PAGE_STATE_INACTIVE
], page_types
[PAGE_STATE_BUSY
], page_types
[PAGE_STATE_UNUSED
]);
712 dprintf("modified: %d\nmodified_temporary %d\nfree: %d\nclear: %d\nwired: %d\n",
713 page_types
[PAGE_STATE_MODIFIED
], page_types
[PAGE_STATE_MODIFIED_TEMPORARY
], page_types
[PAGE_STATE_FREE
], page_types
[PAGE_STATE_CLEAR
], page_types
[PAGE_STATE_WIRED
]);
718 static void dump_free_page_table(int argc
, char **argv
)
721 unsigned int free_start
= END_OF_LIST
;
722 unsigned int inuse_start
= PAGE_INUSE
;
724 dprintf("dump_free_page_table():\n");
725 dprintf("first_free_page_index = %d\n", first_free_page_index
);
727 while(i
< free_page_table_size
) {
728 if(free_page_table
[i
] == PAGE_INUSE
) {
729 if(inuse_start
!= PAGE_INUSE
) {
733 if(free_start
!= END_OF_LIST
) {
734 dprintf("free from %d -> %d\n", free_start
+ free_page_table_base
, i
-1 + free_page_table_base
);
735 free_start
= END_OF_LIST
;
739 if(free_start
!= END_OF_LIST
) {
743 if(inuse_start
!= PAGE_INUSE
) {
744 dprintf("inuse from %d -> %d\n", inuse_start
+ free_page_table_base
, i
-1 + free_page_table_base
);
745 inuse_start
= PAGE_INUSE
;
751 if(inuse_start
!= PAGE_INUSE
) {
752 dprintf("inuse from %d -> %d\n", inuse_start
+ free_page_table_base
, i
-1 + free_page_table_base
);
754 if(free_start
!= END_OF_LIST
) {
755 dprintf("free from %d -> %d\n", free_start
+ free_page_table_base
, i
-1 + free_page_table_base
);
758 for(i=0; i<free_page_table_size; i++) {
759 dprintf("%d->%d ", i, free_page_table[i]);
764 static addr_t
vm_alloc_vspace_from_ka_struct(kernel_args
*ka
, unsigned int size
)
768 int last_valloc_entry
= 0;
770 size
= PAGE_ALIGN(size
);
771 // find a slot in the virtual allocation addr_t range
772 for(i
=1; i
<ka
->num_virt_alloc_ranges
; i
++) {
773 last_valloc_entry
= i
;
774 // check to see if the space between this one and the last is big enough
775 if(ka
->virt_alloc_range
[i
].start
-
776 (ka
->virt_alloc_range
[i
-1].start
+ ka
->virt_alloc_range
[i
-1].size
) >= size
) {
778 spot
= ka
->virt_alloc_range
[i
-1].start
+ ka
->virt_alloc_range
[i
-1].size
;
779 ka
->virt_alloc_range
[i
-1].size
+= size
;
784 // we hadn't found one between allocation ranges. this is ok.
785 // see if there's a gap after the last one
786 if(ka
->virt_alloc_range
[last_valloc_entry
].start
+ ka
->virt_alloc_range
[last_valloc_entry
].size
+ size
<=
787 KERNEL_BASE
+ (KERNEL_SIZE
- 1)) {
788 spot
= ka
->virt_alloc_range
[last_valloc_entry
].start
+ ka
->virt_alloc_range
[last_valloc_entry
].size
;
789 ka
->virt_alloc_range
[last_valloc_entry
].size
+= size
;
792 // see if there's a gap before the first one
793 if(ka
->virt_alloc_range
[0].start
> KERNEL_BASE
) {
794 if(ka
->virt_alloc_range
[0].start
- KERNEL_BASE
>= size
) {
795 ka
->virt_alloc_range
[0].start
-= size
;
796 spot
= ka
->virt_alloc_range
[0].start
;
806 // XXX horrible brute-force method of determining if the page can be allocated
807 static bool is_page_in_phys_range(kernel_args
*ka
, addr_t paddr
)
811 for(i
=0; i
<ka
->num_phys_mem_ranges
; i
++) {
812 if(paddr
>= ka
->phys_mem_range
[i
].start
&&
813 paddr
< ka
->phys_mem_range
[i
].start
+ ka
->phys_mem_range
[i
].size
) {
820 addr_t
vm_alloc_ppage_from_kernel_struct(kernel_args
*ka
)
824 for(i
=0; i
<ka
->num_phys_alloc_ranges
; i
++) {
827 next_page
= ka
->phys_alloc_range
[i
].start
+ ka
->phys_alloc_range
[i
].size
;
828 // see if the page after the next allocated paddr run can be allocated
829 if(i
+ 1 < ka
->num_phys_alloc_ranges
&& ka
->phys_alloc_range
[i
+1].size
!= 0) {
830 // see if the next page will collide with the next allocated range
831 if(next_page
>= ka
->phys_alloc_range
[i
+1].start
)
834 // see if the next physical page fits in the memory block
835 if(is_page_in_phys_range(ka
, next_page
)) {
837 ka
->phys_alloc_range
[i
].size
+= PAGE_SIZE
;
838 return ((ka
->phys_alloc_range
[i
].start
+ ka
->phys_alloc_range
[i
].size
- PAGE_SIZE
) / PAGE_SIZE
);
842 return 0; // could not allocate a block
845 addr_t
vm_alloc_from_ka_struct(kernel_args
*ka
, unsigned int size
, int lock
)
851 // find the vaddr to allocate at
852 vspot
= vm_alloc_vspace_from_ka_struct(ka
, size
);
853 // dprintf("alloc_from_ka_struct: vaddr 0x%x\n", vspot);
856 for(i
=0; i
<PAGE_ALIGN(size
)/PAGE_SIZE
; i
++) {
857 pspot
= vm_alloc_ppage_from_kernel_struct(ka
);
858 // dprintf("alloc_from_ka_struct: paddr 0x%x\n", pspot);
860 panic("error allocating page from ka_struct!\n");
861 vm_translation_map_quick_map(ka
, vspot
+ i
*PAGE_SIZE
, pspot
* PAGE_SIZE
, lock
, &vm_alloc_ppage_from_kernel_struct
);
862 // pmap_map_page(pspot, vspot + i*PAGE_SIZE, lock);