xen-balloon: Move core balloon functionality out of module
[linux-2.6.git] / drivers / xen / balloon.c
blob7497041d0631a89e4383eb29d43b2179bc6305cd
1 /******************************************************************************
2 * Xen balloon driver - enables returning/claiming memory to/from Xen.
4 * Copyright (c) 2003, B Dragovic
5 * Copyright (c) 2003-2004, M Williamson, K Fraser
6 * Copyright (c) 2005 Dan M. Smith, IBM Corporation
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/errno.h>
36 #include <linux/mm.h>
37 #include <linux/bootmem.h>
38 #include <linux/pagemap.h>
39 #include <linux/highmem.h>
40 #include <linux/mutex.h>
41 #include <linux/list.h>
42 #include <linux/gfp.h>
44 #include <asm/page.h>
45 #include <asm/pgalloc.h>
46 #include <asm/pgtable.h>
47 #include <asm/tlb.h>
48 #include <asm/e820.h>
50 #include <asm/xen/hypervisor.h>
51 #include <asm/xen/hypercall.h>
53 #include <xen/xen.h>
54 #include <xen/interface/xen.h>
55 #include <xen/interface/memory.h>
56 #include <xen/balloon.h>
57 #include <xen/features.h>
58 #include <xen/page.h>
61 * balloon_process() state:
63 * BP_DONE: done or nothing to do,
64 * BP_EAGAIN: error, go to sleep,
65 * BP_ECANCELED: error, balloon operation canceled.
68 enum bp_state {
69 BP_DONE,
70 BP_EAGAIN,
71 BP_ECANCELED
75 static DEFINE_MUTEX(balloon_mutex);
77 struct balloon_stats balloon_stats;
78 EXPORT_SYMBOL_GPL(balloon_stats);
80 /* We increase/decrease in batches which fit in a page */
81 static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
83 #ifdef CONFIG_HIGHMEM
84 #define inc_totalhigh_pages() (totalhigh_pages++)
85 #define dec_totalhigh_pages() (totalhigh_pages--)
86 #else
87 #define inc_totalhigh_pages() do {} while(0)
88 #define dec_totalhigh_pages() do {} while(0)
89 #endif
91 /* List of ballooned pages, threaded through the mem_map array. */
92 static LIST_HEAD(ballooned_pages);
94 /* Main work function, always executed in process context. */
95 static void balloon_process(struct work_struct *work);
96 static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
98 /* When ballooning out (allocating memory to return to Xen) we don't really
99 want the kernel to try too hard since that can trigger the oom killer. */
100 #define GFP_BALLOON \
101 (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
103 static void scrub_page(struct page *page)
105 #ifdef CONFIG_XEN_SCRUB_PAGES
106 clear_highpage(page);
107 #endif
110 /* balloon_append: add the given page to the balloon. */
111 static void __balloon_append(struct page *page)
113 /* Lowmem is re-populated first, so highmem pages go at list tail. */
114 if (PageHighMem(page)) {
115 list_add_tail(&page->lru, &ballooned_pages);
116 balloon_stats.balloon_high++;
117 dec_totalhigh_pages();
118 } else {
119 list_add(&page->lru, &ballooned_pages);
120 balloon_stats.balloon_low++;
124 static void balloon_append(struct page *page)
126 __balloon_append(page);
127 totalram_pages--;
130 /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
131 static struct page *balloon_retrieve(void)
133 struct page *page;
135 if (list_empty(&ballooned_pages))
136 return NULL;
138 page = list_entry(ballooned_pages.next, struct page, lru);
139 list_del(&page->lru);
141 if (PageHighMem(page)) {
142 balloon_stats.balloon_high--;
143 inc_totalhigh_pages();
145 else
146 balloon_stats.balloon_low--;
148 totalram_pages++;
150 return page;
153 static struct page *balloon_first_page(void)
155 if (list_empty(&ballooned_pages))
156 return NULL;
157 return list_entry(ballooned_pages.next, struct page, lru);
160 static struct page *balloon_next_page(struct page *page)
162 struct list_head *next = page->lru.next;
163 if (next == &ballooned_pages)
164 return NULL;
165 return list_entry(next, struct page, lru);
168 static enum bp_state update_schedule(enum bp_state state)
170 if (state == BP_DONE) {
171 balloon_stats.schedule_delay = 1;
172 balloon_stats.retry_count = 1;
173 return BP_DONE;
176 ++balloon_stats.retry_count;
178 if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
179 balloon_stats.retry_count > balloon_stats.max_retry_count) {
180 balloon_stats.schedule_delay = 1;
181 balloon_stats.retry_count = 1;
182 return BP_ECANCELED;
185 balloon_stats.schedule_delay <<= 1;
187 if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
188 balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
190 return BP_EAGAIN;
193 static unsigned long current_target(void)
195 unsigned long target = balloon_stats.target_pages;
197 target = min(target,
198 balloon_stats.current_pages +
199 balloon_stats.balloon_low +
200 balloon_stats.balloon_high);
202 return target;
205 static enum bp_state increase_reservation(unsigned long nr_pages)
207 int rc;
208 unsigned long pfn, i;
209 struct page *page;
210 struct xen_memory_reservation reservation = {
211 .address_bits = 0,
212 .extent_order = 0,
213 .domid = DOMID_SELF
216 if (nr_pages > ARRAY_SIZE(frame_list))
217 nr_pages = ARRAY_SIZE(frame_list);
219 page = balloon_first_page();
220 for (i = 0; i < nr_pages; i++) {
221 if (!page) {
222 nr_pages = i;
223 break;
225 frame_list[i] = page_to_pfn(page);
226 page = balloon_next_page(page);
229 set_xen_guest_handle(reservation.extent_start, frame_list);
230 reservation.nr_extents = nr_pages;
231 rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
232 if (rc <= 0)
233 return BP_EAGAIN;
235 for (i = 0; i < rc; i++) {
236 page = balloon_retrieve();
237 BUG_ON(page == NULL);
239 pfn = page_to_pfn(page);
240 BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) &&
241 phys_to_machine_mapping_valid(pfn));
243 set_phys_to_machine(pfn, frame_list[i]);
245 /* Link back into the page tables if not highmem. */
246 if (pfn < max_low_pfn) {
247 int ret;
248 ret = HYPERVISOR_update_va_mapping(
249 (unsigned long)__va(pfn << PAGE_SHIFT),
250 mfn_pte(frame_list[i], PAGE_KERNEL),
252 BUG_ON(ret);
255 /* Relinquish the page back to the allocator. */
256 ClearPageReserved(page);
257 init_page_count(page);
258 __free_page(page);
261 balloon_stats.current_pages += rc;
263 return BP_DONE;
266 static enum bp_state decrease_reservation(unsigned long nr_pages)
268 enum bp_state state = BP_DONE;
269 unsigned long pfn, i;
270 struct page *page;
271 int ret;
272 struct xen_memory_reservation reservation = {
273 .address_bits = 0,
274 .extent_order = 0,
275 .domid = DOMID_SELF
278 if (nr_pages > ARRAY_SIZE(frame_list))
279 nr_pages = ARRAY_SIZE(frame_list);
281 for (i = 0; i < nr_pages; i++) {
282 if ((page = alloc_page(GFP_BALLOON)) == NULL) {
283 nr_pages = i;
284 state = BP_EAGAIN;
285 break;
288 pfn = page_to_pfn(page);
289 frame_list[i] = pfn_to_mfn(pfn);
291 scrub_page(page);
293 if (!PageHighMem(page)) {
294 ret = HYPERVISOR_update_va_mapping(
295 (unsigned long)__va(pfn << PAGE_SHIFT),
296 __pte_ma(0), 0);
297 BUG_ON(ret);
302 /* Ensure that ballooned highmem pages don't have kmaps. */
303 kmap_flush_unused();
304 flush_tlb_all();
306 /* No more mappings: invalidate P2M and add to balloon. */
307 for (i = 0; i < nr_pages; i++) {
308 pfn = mfn_to_pfn(frame_list[i]);
309 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
310 balloon_append(pfn_to_page(pfn));
313 set_xen_guest_handle(reservation.extent_start, frame_list);
314 reservation.nr_extents = nr_pages;
315 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
316 BUG_ON(ret != nr_pages);
318 balloon_stats.current_pages -= nr_pages;
320 return state;
324 * We avoid multiple worker processes conflicting via the balloon mutex.
325 * We may of course race updates of the target counts (which are protected
326 * by the balloon lock), or with changes to the Xen hard limit, but we will
327 * recover from these in time.
329 static void balloon_process(struct work_struct *work)
331 enum bp_state state = BP_DONE;
332 long credit;
334 mutex_lock(&balloon_mutex);
336 do {
337 credit = current_target() - balloon_stats.current_pages;
339 if (credit > 0)
340 state = increase_reservation(credit);
342 if (credit < 0)
343 state = decrease_reservation(-credit);
345 state = update_schedule(state);
347 #ifndef CONFIG_PREEMPT
348 if (need_resched())
349 schedule();
350 #endif
351 } while (credit && state == BP_DONE);
353 /* Schedule more work if there is some still to be done. */
354 if (state == BP_EAGAIN)
355 schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
357 mutex_unlock(&balloon_mutex);
360 /* Resets the Xen limit, sets new target, and kicks off processing. */
361 void balloon_set_new_target(unsigned long target)
363 /* No need for lock. Not read-modify-write updates. */
364 balloon_stats.target_pages = target;
365 schedule_delayed_work(&balloon_worker, 0);
367 EXPORT_SYMBOL_GPL(balloon_set_new_target);
369 static int __init balloon_init(void)
371 unsigned long pfn, extra_pfn_end;
372 struct page *page;
374 if (!xen_pv_domain())
375 return -ENODEV;
377 pr_info("xen/balloon: Initialising balloon driver.\n");
379 balloon_stats.current_pages = min(xen_start_info->nr_pages, max_pfn);
380 balloon_stats.target_pages = balloon_stats.current_pages;
381 balloon_stats.balloon_low = 0;
382 balloon_stats.balloon_high = 0;
384 balloon_stats.schedule_delay = 1;
385 balloon_stats.max_schedule_delay = 32;
386 balloon_stats.retry_count = 1;
387 balloon_stats.max_retry_count = RETRY_UNLIMITED;
390 * Initialise the balloon with excess memory space. We need
391 * to make sure we don't add memory which doesn't exist or
392 * logically exist. The E820 map can be trimmed to be smaller
393 * than the amount of physical memory due to the mem= command
394 * line parameter. And if this is a 32-bit non-HIGHMEM kernel
395 * on a system with memory which requires highmem to access,
396 * don't try to use it.
398 extra_pfn_end = min(min(max_pfn, e820_end_of_ram_pfn()),
399 (unsigned long)PFN_DOWN(xen_extra_mem_start + xen_extra_mem_size));
400 for (pfn = PFN_UP(xen_extra_mem_start);
401 pfn < extra_pfn_end;
402 pfn++) {
403 page = pfn_to_page(pfn);
404 /* totalram_pages doesn't include the boot-time
405 balloon extension, so don't subtract from it. */
406 __balloon_append(page);
409 return 0;
412 subsys_initcall(balloon_init);
414 MODULE_LICENSE("GPL");