1 /******************************************************************************
2 * Xen balloon driver - enables returning/claiming memory to/from Xen.
4 * Copyright (c) 2003, B Dragovic
5 * Copyright (c) 2003-2004, M Williamson, K Fraser
6 * Copyright (c) 2005 Dan M. Smith, IBM Corporation
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/errno.h>
37 #include <linux/bootmem.h>
38 #include <linux/pagemap.h>
39 #include <linux/highmem.h>
40 #include <linux/mutex.h>
41 #include <linux/list.h>
42 #include <linux/gfp.h>
45 #include <asm/pgalloc.h>
46 #include <asm/pgtable.h>
50 #include <asm/xen/hypervisor.h>
51 #include <asm/xen/hypercall.h>
54 #include <xen/interface/xen.h>
55 #include <xen/interface/memory.h>
56 #include <xen/balloon.h>
57 #include <xen/features.h>
61 * balloon_process() state:
63 * BP_DONE: done or nothing to do,
64 * BP_EAGAIN: error, go to sleep,
65 * BP_ECANCELED: error, balloon operation canceled.
75 static DEFINE_MUTEX(balloon_mutex
);
77 struct balloon_stats balloon_stats
;
78 EXPORT_SYMBOL_GPL(balloon_stats
);
80 /* We increase/decrease in batches which fit in a page */
81 static unsigned long frame_list
[PAGE_SIZE
/ sizeof(unsigned long)];
84 #define inc_totalhigh_pages() (totalhigh_pages++)
85 #define dec_totalhigh_pages() (totalhigh_pages--)
87 #define inc_totalhigh_pages() do {} while(0)
88 #define dec_totalhigh_pages() do {} while(0)
91 /* List of ballooned pages, threaded through the mem_map array. */
92 static LIST_HEAD(ballooned_pages
);
94 /* Main work function, always executed in process context. */
95 static void balloon_process(struct work_struct
*work
);
96 static DECLARE_DELAYED_WORK(balloon_worker
, balloon_process
);
98 /* When ballooning out (allocating memory to return to Xen) we don't really
99 want the kernel to try too hard since that can trigger the oom killer. */
100 #define GFP_BALLOON \
101 (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
103 static void scrub_page(struct page
*page
)
105 #ifdef CONFIG_XEN_SCRUB_PAGES
106 clear_highpage(page
);
110 /* balloon_append: add the given page to the balloon. */
111 static void __balloon_append(struct page
*page
)
113 /* Lowmem is re-populated first, so highmem pages go at list tail. */
114 if (PageHighMem(page
)) {
115 list_add_tail(&page
->lru
, &ballooned_pages
);
116 balloon_stats
.balloon_high
++;
118 list_add(&page
->lru
, &ballooned_pages
);
119 balloon_stats
.balloon_low
++;
123 static void balloon_append(struct page
*page
)
125 __balloon_append(page
);
126 if (PageHighMem(page
))
127 dec_totalhigh_pages();
131 /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
132 static struct page
*balloon_retrieve(bool prefer_highmem
)
136 if (list_empty(&ballooned_pages
))
140 page
= list_entry(ballooned_pages
.prev
, struct page
, lru
);
142 page
= list_entry(ballooned_pages
.next
, struct page
, lru
);
143 list_del(&page
->lru
);
145 if (PageHighMem(page
)) {
146 balloon_stats
.balloon_high
--;
147 inc_totalhigh_pages();
150 balloon_stats
.balloon_low
--;
157 static struct page
*balloon_first_page(void)
159 if (list_empty(&ballooned_pages
))
161 return list_entry(ballooned_pages
.next
, struct page
, lru
);
164 static struct page
*balloon_next_page(struct page
*page
)
166 struct list_head
*next
= page
->lru
.next
;
167 if (next
== &ballooned_pages
)
169 return list_entry(next
, struct page
, lru
);
172 static enum bp_state
update_schedule(enum bp_state state
)
174 if (state
== BP_DONE
) {
175 balloon_stats
.schedule_delay
= 1;
176 balloon_stats
.retry_count
= 1;
180 ++balloon_stats
.retry_count
;
182 if (balloon_stats
.max_retry_count
!= RETRY_UNLIMITED
&&
183 balloon_stats
.retry_count
> balloon_stats
.max_retry_count
) {
184 balloon_stats
.schedule_delay
= 1;
185 balloon_stats
.retry_count
= 1;
189 balloon_stats
.schedule_delay
<<= 1;
191 if (balloon_stats
.schedule_delay
> balloon_stats
.max_schedule_delay
)
192 balloon_stats
.schedule_delay
= balloon_stats
.max_schedule_delay
;
197 static long current_credit(void)
199 unsigned long target
= balloon_stats
.target_pages
;
202 balloon_stats
.current_pages
+
203 balloon_stats
.balloon_low
+
204 balloon_stats
.balloon_high
);
206 return target
- balloon_stats
.current_pages
;
209 static enum bp_state
increase_reservation(unsigned long nr_pages
)
212 unsigned long pfn
, i
;
214 struct xen_memory_reservation reservation
= {
220 if (nr_pages
> ARRAY_SIZE(frame_list
))
221 nr_pages
= ARRAY_SIZE(frame_list
);
223 page
= balloon_first_page();
224 for (i
= 0; i
< nr_pages
; i
++) {
229 frame_list
[i
] = page_to_pfn(page
);
230 page
= balloon_next_page(page
);
233 set_xen_guest_handle(reservation
.extent_start
, frame_list
);
234 reservation
.nr_extents
= nr_pages
;
235 rc
= HYPERVISOR_memory_op(XENMEM_populate_physmap
, &reservation
);
239 for (i
= 0; i
< rc
; i
++) {
240 page
= balloon_retrieve(false);
241 BUG_ON(page
== NULL
);
243 pfn
= page_to_pfn(page
);
244 BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap
) &&
245 phys_to_machine_mapping_valid(pfn
));
247 set_phys_to_machine(pfn
, frame_list
[i
]);
249 /* Link back into the page tables if not highmem. */
250 if (xen_pv_domain() && !PageHighMem(page
)) {
252 ret
= HYPERVISOR_update_va_mapping(
253 (unsigned long)__va(pfn
<< PAGE_SHIFT
),
254 mfn_pte(frame_list
[i
], PAGE_KERNEL
),
259 /* Relinquish the page back to the allocator. */
260 ClearPageReserved(page
);
261 init_page_count(page
);
265 balloon_stats
.current_pages
+= rc
;
270 static enum bp_state
decrease_reservation(unsigned long nr_pages
, gfp_t gfp
)
272 enum bp_state state
= BP_DONE
;
273 unsigned long pfn
, i
;
276 struct xen_memory_reservation reservation
= {
282 if (nr_pages
> ARRAY_SIZE(frame_list
))
283 nr_pages
= ARRAY_SIZE(frame_list
);
285 for (i
= 0; i
< nr_pages
; i
++) {
286 if ((page
= alloc_page(gfp
)) == NULL
) {
292 pfn
= page_to_pfn(page
);
293 frame_list
[i
] = pfn_to_mfn(pfn
);
297 if (xen_pv_domain() && !PageHighMem(page
)) {
298 ret
= HYPERVISOR_update_va_mapping(
299 (unsigned long)__va(pfn
<< PAGE_SHIFT
),
306 /* Ensure that ballooned highmem pages don't have kmaps. */
310 /* No more mappings: invalidate P2M and add to balloon. */
311 for (i
= 0; i
< nr_pages
; i
++) {
312 pfn
= mfn_to_pfn(frame_list
[i
]);
313 __set_phys_to_machine(pfn
, INVALID_P2M_ENTRY
);
314 balloon_append(pfn_to_page(pfn
));
317 set_xen_guest_handle(reservation
.extent_start
, frame_list
);
318 reservation
.nr_extents
= nr_pages
;
319 ret
= HYPERVISOR_memory_op(XENMEM_decrease_reservation
, &reservation
);
320 BUG_ON(ret
!= nr_pages
);
322 balloon_stats
.current_pages
-= nr_pages
;
328 * We avoid multiple worker processes conflicting via the balloon mutex.
329 * We may of course race updates of the target counts (which are protected
330 * by the balloon lock), or with changes to the Xen hard limit, but we will
331 * recover from these in time.
333 static void balloon_process(struct work_struct
*work
)
335 enum bp_state state
= BP_DONE
;
338 mutex_lock(&balloon_mutex
);
341 credit
= current_credit();
344 state
= increase_reservation(credit
);
347 state
= decrease_reservation(-credit
, GFP_BALLOON
);
349 state
= update_schedule(state
);
351 #ifndef CONFIG_PREEMPT
355 } while (credit
&& state
== BP_DONE
);
357 /* Schedule more work if there is some still to be done. */
358 if (state
== BP_EAGAIN
)
359 schedule_delayed_work(&balloon_worker
, balloon_stats
.schedule_delay
* HZ
);
361 mutex_unlock(&balloon_mutex
);
364 /* Resets the Xen limit, sets new target, and kicks off processing. */
365 void balloon_set_new_target(unsigned long target
)
367 /* No need for lock. Not read-modify-write updates. */
368 balloon_stats
.target_pages
= target
;
369 schedule_delayed_work(&balloon_worker
, 0);
371 EXPORT_SYMBOL_GPL(balloon_set_new_target
);
374 * alloc_xenballooned_pages - get pages that have been ballooned out
375 * @nr_pages: Number of pages to get
376 * @pages: pages returned
377 * @return 0 on success, error otherwise
379 int alloc_xenballooned_pages(int nr_pages
, struct page
** pages
)
383 mutex_lock(&balloon_mutex
);
384 while (pgno
< nr_pages
) {
385 page
= balloon_retrieve(true);
387 pages
[pgno
++] = page
;
390 st
= decrease_reservation(nr_pages
- pgno
, GFP_HIGHUSER
);
395 mutex_unlock(&balloon_mutex
);
399 balloon_append(pages
[--pgno
]);
400 /* Free the memory back to the kernel soon */
401 schedule_delayed_work(&balloon_worker
, 0);
402 mutex_unlock(&balloon_mutex
);
405 EXPORT_SYMBOL(alloc_xenballooned_pages
);
408 * free_xenballooned_pages - return pages retrieved with get_ballooned_pages
409 * @nr_pages: Number of pages
410 * @pages: pages to return
412 void free_xenballooned_pages(int nr_pages
, struct page
** pages
)
416 mutex_lock(&balloon_mutex
);
418 for (i
= 0; i
< nr_pages
; i
++) {
420 balloon_append(pages
[i
]);
423 /* The balloon may be too large now. Shrink it if needed. */
424 if (current_credit())
425 schedule_delayed_work(&balloon_worker
, 0);
427 mutex_unlock(&balloon_mutex
);
429 EXPORT_SYMBOL(free_xenballooned_pages
);
431 static int __init
balloon_init(void)
433 unsigned long pfn
, extra_pfn_end
;
439 pr_info("xen/balloon: Initialising balloon driver.\n");
441 balloon_stats
.current_pages
= xen_pv_domain() ? min(xen_start_info
->nr_pages
, max_pfn
) : max_pfn
;
442 balloon_stats
.target_pages
= balloon_stats
.current_pages
;
443 balloon_stats
.balloon_low
= 0;
444 balloon_stats
.balloon_high
= 0;
446 balloon_stats
.schedule_delay
= 1;
447 balloon_stats
.max_schedule_delay
= 32;
448 balloon_stats
.retry_count
= 1;
449 balloon_stats
.max_retry_count
= RETRY_UNLIMITED
;
452 * Initialise the balloon with excess memory space. We need
453 * to make sure we don't add memory which doesn't exist or
454 * logically exist. The E820 map can be trimmed to be smaller
455 * than the amount of physical memory due to the mem= command
456 * line parameter. And if this is a 32-bit non-HIGHMEM kernel
457 * on a system with memory which requires highmem to access,
458 * don't try to use it.
460 extra_pfn_end
= min(min(max_pfn
, e820_end_of_ram_pfn()),
461 (unsigned long)PFN_DOWN(xen_extra_mem_start
+ xen_extra_mem_size
));
462 for (pfn
= PFN_UP(xen_extra_mem_start
);
465 page
= pfn_to_page(pfn
);
466 /* totalram_pages and totalhigh_pages do not include the boot-time
467 balloon extension, so don't subtract from it. */
468 __balloon_append(page
);
474 subsys_initcall(balloon_init
);
476 MODULE_LICENSE("GPL");