2 * linux/mm/memory_hotplug.c
7 #include <linux/config.h>
8 #include <linux/stddef.h>
10 #include <linux/swap.h>
11 #include <linux/interrupt.h>
12 #include <linux/pagemap.h>
13 #include <linux/bootmem.h>
14 #include <linux/compiler.h>
15 #include <linux/module.h>
16 #include <linux/pagevec.h>
17 #include <linux/slab.h>
18 #include <linux/sysctl.h>
19 #include <linux/cpu.h>
20 #include <linux/memory.h>
21 #include <linux/memory_hotplug.h>
22 #include <linux/highmem.h>
23 #include <linux/vmalloc.h>
25 #include <asm/tlbflush.h>
27 extern void zonetable_add(struct zone
*zone
, int nid
, int zid
, unsigned long pfn
,
29 static int __add_zone(struct zone
*zone
, unsigned long phys_start_pfn
)
31 struct pglist_data
*pgdat
= zone
->zone_pgdat
;
32 int nr_pages
= PAGES_PER_SECTION
;
33 int nid
= pgdat
->node_id
;
36 zone_type
= zone
- pgdat
->node_zones
;
37 if (!populated_zone(zone
)) {
39 ret
= init_currently_empty_zone(zone
, phys_start_pfn
, nr_pages
);
43 memmap_init_zone(nr_pages
, nid
, zone_type
, phys_start_pfn
);
44 zonetable_add(zone
, nid
, zone_type
, phys_start_pfn
, nr_pages
);
48 extern int sparse_add_one_section(struct zone
*zone
, unsigned long start_pfn
,
50 static int __add_section(struct zone
*zone
, unsigned long phys_start_pfn
)
52 int nr_pages
= PAGES_PER_SECTION
;
55 ret
= sparse_add_one_section(zone
, phys_start_pfn
, nr_pages
);
60 ret
= __add_zone(zone
, phys_start_pfn
);
65 return register_new_memory(__pfn_to_section(phys_start_pfn
));
69 * Reasonably generic function for adding memory. It is
70 * expected that archs that support memory hotplug will
71 * call this function after deciding the zone to which to
74 int __add_pages(struct zone
*zone
, unsigned long phys_start_pfn
,
75 unsigned long nr_pages
)
80 for (i
= 0; i
< nr_pages
; i
+= PAGES_PER_SECTION
) {
81 err
= __add_section(zone
, phys_start_pfn
+ i
);
83 /* We want to keep adding the rest of the
84 * sections if the first ones already exist
86 if (err
&& (err
!= -EEXIST
))
92 EXPORT_SYMBOL_GPL(__add_pages
);
94 static void grow_zone_span(struct zone
*zone
,
95 unsigned long start_pfn
, unsigned long end_pfn
)
97 unsigned long old_zone_end_pfn
;
99 zone_span_writelock(zone
);
101 old_zone_end_pfn
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
102 if (start_pfn
< zone
->zone_start_pfn
)
103 zone
->zone_start_pfn
= start_pfn
;
105 zone
->spanned_pages
= max(old_zone_end_pfn
, end_pfn
) -
106 zone
->zone_start_pfn
;
108 zone_span_writeunlock(zone
);
111 static void grow_pgdat_span(struct pglist_data
*pgdat
,
112 unsigned long start_pfn
, unsigned long end_pfn
)
114 unsigned long old_pgdat_end_pfn
=
115 pgdat
->node_start_pfn
+ pgdat
->node_spanned_pages
;
117 if (start_pfn
< pgdat
->node_start_pfn
)
118 pgdat
->node_start_pfn
= start_pfn
;
120 pgdat
->node_spanned_pages
= max(old_pgdat_end_pfn
, end_pfn
) -
121 pgdat
->node_start_pfn
;
124 int online_pages(unsigned long pfn
, unsigned long nr_pages
)
128 unsigned long onlined_pages
= 0;
130 int need_zonelists_rebuild
= 0;
133 * This doesn't need a lock to do pfn_to_page().
134 * The section can't be removed here because of the
135 * memory_block->state_sem.
137 zone
= page_zone(pfn_to_page(pfn
));
138 pgdat_resize_lock(zone
->zone_pgdat
, &flags
);
139 grow_zone_span(zone
, pfn
, pfn
+ nr_pages
);
140 grow_pgdat_span(zone
->zone_pgdat
, pfn
, pfn
+ nr_pages
);
141 pgdat_resize_unlock(zone
->zone_pgdat
, &flags
);
144 * If this zone is not populated, then it is not in zonelist.
145 * This means the page allocator ignores this zone.
146 * So, zonelist must be updated after online.
148 if (!populated_zone(zone
))
149 need_zonelists_rebuild
= 1;
151 for (i
= 0; i
< nr_pages
; i
++) {
152 struct page
*page
= pfn_to_page(pfn
+ i
);
156 zone
->present_pages
+= onlined_pages
;
157 zone
->zone_pgdat
->node_present_pages
+= onlined_pages
;
159 setup_per_zone_pages_min();
161 if (need_zonelists_rebuild
)
162 build_all_zonelists();
163 vm_total_pages
= nr_free_pagecache_pages();