[PATCH] Make kmem_cache_destroy() return void
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / mm / memory_hotplug.c
blobc37319542b700a92339fdfc4427a5fc4d6fa2ee4
1 /*
2 * linux/mm/memory_hotplug.c
4 * Copyright (C)
5 */
7 #include <linux/stddef.h>
8 #include <linux/mm.h>
9 #include <linux/swap.h>
10 #include <linux/interrupt.h>
11 #include <linux/pagemap.h>
12 #include <linux/bootmem.h>
13 #include <linux/compiler.h>
14 #include <linux/module.h>
15 #include <linux/pagevec.h>
16 #include <linux/slab.h>
17 #include <linux/sysctl.h>
18 #include <linux/cpu.h>
19 #include <linux/memory.h>
20 #include <linux/memory_hotplug.h>
21 #include <linux/highmem.h>
22 #include <linux/vmalloc.h>
23 #include <linux/ioport.h>
25 #include <asm/tlbflush.h>
27 extern void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn,
28 unsigned long size);
29 static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
31 struct pglist_data *pgdat = zone->zone_pgdat;
32 int nr_pages = PAGES_PER_SECTION;
33 int nid = pgdat->node_id;
34 int zone_type;
36 zone_type = zone - pgdat->node_zones;
37 if (!populated_zone(zone)) {
38 int ret = 0;
39 ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages);
40 if (ret < 0)
41 return ret;
43 memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn);
44 zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages);
45 return 0;
48 extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
49 int nr_pages);
50 static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
52 int nr_pages = PAGES_PER_SECTION;
53 int ret;
55 if (pfn_valid(phys_start_pfn))
56 return -EEXIST;
58 ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
60 if (ret < 0)
61 return ret;
63 ret = __add_zone(zone, phys_start_pfn);
65 if (ret < 0)
66 return ret;
68 return register_new_memory(__pfn_to_section(phys_start_pfn));
72 * Reasonably generic function for adding memory. It is
73 * expected that archs that support memory hotplug will
74 * call this function after deciding the zone to which to
75 * add the new pages.
77 int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
78 unsigned long nr_pages)
80 unsigned long i;
81 int err = 0;
82 int start_sec, end_sec;
83 /* during initialize mem_map, align hot-added range to section */
84 start_sec = pfn_to_section_nr(phys_start_pfn);
85 end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
87 for (i = start_sec; i <= end_sec; i++) {
88 err = __add_section(zone, i << PFN_SECTION_SHIFT);
91 * EEXIST is finally dealed with by ioresource collision
92 * check. see add_memory() => register_memory_resource()
93 * Warning will be printed if there is collision.
95 if (err && (err != -EEXIST))
96 break;
97 err = 0;
100 return err;
102 EXPORT_SYMBOL_GPL(__add_pages);
104 static void grow_zone_span(struct zone *zone,
105 unsigned long start_pfn, unsigned long end_pfn)
107 unsigned long old_zone_end_pfn;
109 zone_span_writelock(zone);
111 old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
112 if (start_pfn < zone->zone_start_pfn)
113 zone->zone_start_pfn = start_pfn;
115 zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
116 zone->zone_start_pfn;
118 zone_span_writeunlock(zone);
121 static void grow_pgdat_span(struct pglist_data *pgdat,
122 unsigned long start_pfn, unsigned long end_pfn)
124 unsigned long old_pgdat_end_pfn =
125 pgdat->node_start_pfn + pgdat->node_spanned_pages;
127 if (start_pfn < pgdat->node_start_pfn)
128 pgdat->node_start_pfn = start_pfn;
130 pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
131 pgdat->node_start_pfn;
134 int online_pages(unsigned long pfn, unsigned long nr_pages)
136 unsigned long i;
137 unsigned long flags;
138 unsigned long onlined_pages = 0;
139 struct resource res;
140 u64 section_end;
141 unsigned long start_pfn;
142 struct zone *zone;
143 int need_zonelists_rebuild = 0;
146 * This doesn't need a lock to do pfn_to_page().
147 * The section can't be removed here because of the
148 * memory_block->state_sem.
150 zone = page_zone(pfn_to_page(pfn));
151 pgdat_resize_lock(zone->zone_pgdat, &flags);
152 grow_zone_span(zone, pfn, pfn + nr_pages);
153 grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages);
154 pgdat_resize_unlock(zone->zone_pgdat, &flags);
157 * If this zone is not populated, then it is not in zonelist.
158 * This means the page allocator ignores this zone.
159 * So, zonelist must be updated after online.
161 if (!populated_zone(zone))
162 need_zonelists_rebuild = 1;
164 res.start = (u64)pfn << PAGE_SHIFT;
165 res.end = res.start + ((u64)nr_pages << PAGE_SHIFT) - 1;
166 res.flags = IORESOURCE_MEM; /* we just need system ram */
167 section_end = res.end;
169 while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) {
170 start_pfn = (unsigned long)(res.start >> PAGE_SHIFT);
171 nr_pages = (unsigned long)
172 ((res.end + 1 - res.start) >> PAGE_SHIFT);
174 if (PageReserved(pfn_to_page(start_pfn))) {
175 /* this region's page is not onlined now */
176 for (i = 0; i < nr_pages; i++) {
177 struct page *page = pfn_to_page(start_pfn + i);
178 online_page(page);
179 onlined_pages++;
183 res.start = res.end + 1;
184 res.end = section_end;
186 zone->present_pages += onlined_pages;
187 zone->zone_pgdat->node_present_pages += onlined_pages;
189 setup_per_zone_pages_min();
191 if (need_zonelists_rebuild)
192 build_all_zonelists();
193 vm_total_pages = nr_free_pagecache_pages();
194 return 0;
197 static pg_data_t *hotadd_new_pgdat(int nid, u64 start)
199 struct pglist_data *pgdat;
200 unsigned long zones_size[MAX_NR_ZONES] = {0};
201 unsigned long zholes_size[MAX_NR_ZONES] = {0};
202 unsigned long start_pfn = start >> PAGE_SHIFT;
204 pgdat = arch_alloc_nodedata(nid);
205 if (!pgdat)
206 return NULL;
208 arch_refresh_nodedata(nid, pgdat);
210 /* we can use NODE_DATA(nid) from here */
212 /* init node's zones as empty zones, we don't have any present pages.*/
213 free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size);
215 return pgdat;
218 static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
220 arch_refresh_nodedata(nid, NULL);
221 arch_free_nodedata(pgdat);
222 return;
225 /* add this memory to iomem resource */
226 static struct resource *register_memory_resource(u64 start, u64 size)
228 struct resource *res;
229 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
230 BUG_ON(!res);
232 res->name = "System RAM";
233 res->start = start;
234 res->end = start + size - 1;
235 res->flags = IORESOURCE_MEM;
236 if (request_resource(&iomem_resource, res) < 0) {
237 printk("System RAM resource %llx - %llx cannot be added\n",
238 (unsigned long long)res->start, (unsigned long long)res->end);
239 kfree(res);
240 res = NULL;
242 return res;
245 static void release_memory_resource(struct resource *res)
247 if (!res)
248 return;
249 release_resource(res);
250 kfree(res);
251 return;
256 int add_memory(int nid, u64 start, u64 size)
258 pg_data_t *pgdat = NULL;
259 int new_pgdat = 0;
260 struct resource *res;
261 int ret;
263 res = register_memory_resource(start, size);
264 if (!res)
265 return -EEXIST;
267 if (!node_online(nid)) {
268 pgdat = hotadd_new_pgdat(nid, start);
269 if (!pgdat)
270 return -ENOMEM;
271 new_pgdat = 1;
272 ret = kswapd_run(nid);
273 if (ret)
274 goto error;
277 /* call arch's memory hotadd */
278 ret = arch_add_memory(nid, start, size);
280 if (ret < 0)
281 goto error;
283 /* we online node here. we can't roll back from here. */
284 node_set_online(nid);
286 if (new_pgdat) {
287 ret = register_one_node(nid);
289 * If sysfs file of new node can't create, cpu on the node
290 * can't be hot-added. There is no rollback way now.
291 * So, check by BUG_ON() to catch it reluctantly..
293 BUG_ON(ret);
296 return ret;
297 error:
298 /* rollback pgdat allocation and others */
299 if (new_pgdat)
300 rollback_node_hotadd(nid, pgdat);
301 if (res)
302 release_memory_resource(res);
304 return ret;
306 EXPORT_SYMBOL_GPL(add_memory);