Merge tag 'mac80211-for-davem-2018-10-04' of git://git.kernel.org/pub/scm/linux/kerne...
[linux-2.6/btrfs-unstable.git] / mm / balloon_compaction.c
blobef858d547e2d7b69676aff410a52c32edc7986f0
1 /*
2 * mm/balloon_compaction.c
4 * Common interface for making balloon pages movable by compaction.
6 * Copyright (C) 2012, Red Hat, Inc. Rafael Aquini <aquini@redhat.com>
7 */
8 #include <linux/mm.h>
9 #include <linux/slab.h>
10 #include <linux/export.h>
11 #include <linux/balloon_compaction.h>
14 * balloon_page_alloc - allocates a new page for insertion into the balloon
15 * page list.
17 * Driver must call it to properly allocate a new enlisted balloon page.
18 * Driver must call balloon_page_enqueue before definitively removing it from
19 * the guest system. This function returns the page address for the recently
20 * allocated page or NULL in the case we fail to allocate a new page this turn.
22 struct page *balloon_page_alloc(void)
24 struct page *page = alloc_page(balloon_mapping_gfp_mask() |
25 __GFP_NOMEMALLOC | __GFP_NORETRY);
26 return page;
28 EXPORT_SYMBOL_GPL(balloon_page_alloc);
31 * balloon_page_enqueue - allocates a new page and inserts it into the balloon
32 * page list.
33 * @b_dev_info: balloon device descriptor where we will insert a new page to
34 * @page: new page to enqueue - allocated using balloon_page_alloc.
36 * Driver must call it to properly enqueue a new allocated balloon page
37 * before definitively removing it from the guest system.
38 * This function returns the page address for the recently enqueued page or
39 * NULL in the case we fail to allocate a new page this turn.
41 void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
42 struct page *page)
44 unsigned long flags;
47 * Block others from accessing the 'page' when we get around to
48 * establishing additional references. We should be the only one
49 * holding a reference to the 'page' at this point.
51 BUG_ON(!trylock_page(page));
52 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
53 balloon_page_insert(b_dev_info, page);
54 __count_vm_event(BALLOON_INFLATE);
55 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
56 unlock_page(page);
58 EXPORT_SYMBOL_GPL(balloon_page_enqueue);
61 * balloon_page_dequeue - removes a page from balloon's page list and returns
62 * the its address to allow the driver release the page.
63 * @b_dev_info: balloon device decriptor where we will grab a page from.
65 * Driver must call it to properly de-allocate a previous enlisted balloon page
66 * before definetively releasing it back to the guest system.
67 * This function returns the page address for the recently dequeued page or
68 * NULL in the case we find balloon's page list temporarily empty due to
69 * compaction isolated pages.
71 struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
73 struct page *page, *tmp;
74 unsigned long flags;
75 bool dequeued_page;
77 dequeued_page = false;
78 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
79 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
81 * Block others from accessing the 'page' while we get around
82 * establishing additional references and preparing the 'page'
83 * to be released by the balloon driver.
85 if (trylock_page(page)) {
86 #ifdef CONFIG_BALLOON_COMPACTION
87 if (PageIsolated(page)) {
88 /* raced with isolation */
89 unlock_page(page);
90 continue;
92 #endif
93 balloon_page_delete(page);
94 __count_vm_event(BALLOON_DEFLATE);
95 unlock_page(page);
96 dequeued_page = true;
97 break;
100 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
102 if (!dequeued_page) {
104 * If we are unable to dequeue a balloon page because the page
105 * list is empty and there is no isolated pages, then something
106 * went out of track and some balloon pages are lost.
107 * BUG() here, otherwise the balloon driver may get stuck into
108 * an infinite loop while attempting to release all its pages.
110 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
111 if (unlikely(list_empty(&b_dev_info->pages) &&
112 !b_dev_info->isolated_pages))
113 BUG();
114 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
115 page = NULL;
117 return page;
119 EXPORT_SYMBOL_GPL(balloon_page_dequeue);
121 #ifdef CONFIG_BALLOON_COMPACTION
123 bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
126 struct balloon_dev_info *b_dev_info = balloon_page_device(page);
127 unsigned long flags;
129 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
130 list_del(&page->lru);
131 b_dev_info->isolated_pages++;
132 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
134 return true;
137 void balloon_page_putback(struct page *page)
139 struct balloon_dev_info *b_dev_info = balloon_page_device(page);
140 unsigned long flags;
142 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
143 list_add(&page->lru, &b_dev_info->pages);
144 b_dev_info->isolated_pages--;
145 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
149 /* move_to_new_page() counterpart for a ballooned page */
150 int balloon_page_migrate(struct address_space *mapping,
151 struct page *newpage, struct page *page,
152 enum migrate_mode mode)
154 struct balloon_dev_info *balloon = balloon_page_device(page);
157 * We can not easily support the no copy case here so ignore it as it
158 * is unlikely to be use with ballon pages. See include/linux/hmm.h for
159 * user of the MIGRATE_SYNC_NO_COPY mode.
161 if (mode == MIGRATE_SYNC_NO_COPY)
162 return -EINVAL;
164 VM_BUG_ON_PAGE(!PageLocked(page), page);
165 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
167 return balloon->migratepage(balloon, newpage, page, mode);
170 const struct address_space_operations balloon_aops = {
171 .migratepage = balloon_page_migrate,
172 .isolate_page = balloon_page_isolate,
173 .putback_page = balloon_page_putback,
175 EXPORT_SYMBOL_GPL(balloon_aops);
177 #endif /* CONFIG_BALLOON_COMPACTION */