Add KEY_MICMUTE and enable it on Lenovo X220
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / mm / memblock.c
blob992da5ec5a64d69ddc3381d9e8508e4d6061d4ef
1 #include <linux/kernel.h>
2 #include <linux/types.h>
3 #include <linux/init.h>
4 #include <linux/bitops.h>
5 #include <linux/memblock.h>
6 #include <linux/bootmem.h>
7 #include <linux/mm.h>
8 #include <linux/range.h>
10 /* Check for already reserved areas */
11 bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align)
13 struct memblock_region *r;
14 u64 addr = *addrp, last;
15 u64 size = *sizep;
16 bool changed = false;
18 again:
19 last = addr + size;
20 for_each_memblock(reserved, r) {
21 if (last > r->base && addr < r->base) {
22 size = r->base - addr;
23 changed = true;
24 goto again;
26 if (last > (r->base + r->size) && addr < (r->base + r->size)) {
27 addr = round_up(r->base + r->size, align);
28 size = last - addr;
29 changed = true;
30 goto again;
32 if (last <= (r->base + r->size) && addr >= r->base) {
33 *sizep = 0;
34 return false;
37 if (changed) {
38 *addrp = addr;
39 *sizep = size;
41 return changed;
45 * Find next free range after start, and size is returned in *sizep
47 u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
49 struct memblock_region *r;
51 for_each_memblock(memory, r) {
52 u64 ei_start = r->base;
53 u64 ei_last = ei_start + r->size;
54 u64 addr;
56 addr = round_up(ei_start, align);
57 if (addr < start)
58 addr = round_up(start, align);
59 if (addr >= ei_last)
60 continue;
61 *sizep = ei_last - addr;
62 while (memblock_x86_check_reserved_size(&addr, sizep, align))
65 if (*sizep)
66 return addr;
69 return MEMBLOCK_ERROR;
72 static __init struct range *find_range_array(int count)
74 u64 end, size, mem;
75 struct range *range;
77 size = sizeof(struct range) * count;
78 end = memblock.current_limit;
80 mem = memblock_find_in_range(0, end, size, sizeof(struct range));
81 if (mem == MEMBLOCK_ERROR)
82 panic("can not find more space for range array");
85 * This range is tempoaray, so don't reserve it, it will not be
86 * overlapped because We will not alloccate new buffer before
87 * We discard this one
89 range = __va(mem);
90 memset(range, 0, size);
92 return range;
95 static void __init memblock_x86_subtract_reserved(struct range *range, int az)
97 u64 final_start, final_end;
98 struct memblock_region *r;
100 /* Take out region array itself at first*/
101 memblock_free_reserved_regions();
103 memblock_dbg("Subtract (%ld early reservations)\n", memblock.reserved.cnt);
105 for_each_memblock(reserved, r) {
106 memblock_dbg(" [%010llx-%010llx]\n", (u64)r->base, (u64)r->base + r->size - 1);
107 final_start = PFN_DOWN(r->base);
108 final_end = PFN_UP(r->base + r->size);
109 if (final_start >= final_end)
110 continue;
111 subtract_range(range, az, final_start, final_end);
114 /* Put region array back ? */
115 memblock_reserve_reserved_regions();
118 struct count_data {
119 int nr;
122 static int __init count_work_fn(unsigned long start_pfn,
123 unsigned long end_pfn, void *datax)
125 struct count_data *data = datax;
127 data->nr++;
129 return 0;
132 static int __init count_early_node_map(int nodeid)
134 struct count_data data;
136 data.nr = 0;
137 work_with_active_regions(nodeid, count_work_fn, &data);
139 return data.nr;
142 int __init __get_free_all_memory_range(struct range **rangep, int nodeid,
143 unsigned long start_pfn, unsigned long end_pfn)
145 int count;
146 struct range *range;
147 int nr_range;
149 count = (memblock.reserved.cnt + count_early_node_map(nodeid)) * 2;
151 range = find_range_array(count);
152 nr_range = 0;
155 * Use early_node_map[] and memblock.reserved.region to get range array
156 * at first
158 nr_range = add_from_early_node_map(range, count, nr_range, nodeid);
159 subtract_range(range, count, 0, start_pfn);
160 subtract_range(range, count, end_pfn, -1ULL);
162 memblock_x86_subtract_reserved(range, count);
163 nr_range = clean_sort_range(range, count);
165 *rangep = range;
166 return nr_range;
169 int __init get_free_all_memory_range(struct range **rangep, int nodeid)
171 unsigned long end_pfn = -1UL;
173 #ifdef CONFIG_X86_32
174 end_pfn = max_low_pfn;
175 #endif
176 return __get_free_all_memory_range(rangep, nodeid, 0, end_pfn);
179 static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free)
181 int i, count;
182 struct range *range;
183 int nr_range;
184 u64 final_start, final_end;
185 u64 free_size;
186 struct memblock_region *r;
188 count = (memblock.reserved.cnt + memblock.memory.cnt) * 2;
190 range = find_range_array(count);
191 nr_range = 0;
193 addr = PFN_UP(addr);
194 limit = PFN_DOWN(limit);
196 for_each_memblock(memory, r) {
197 final_start = PFN_UP(r->base);
198 final_end = PFN_DOWN(r->base + r->size);
199 if (final_start >= final_end)
200 continue;
201 if (final_start >= limit || final_end <= addr)
202 continue;
204 nr_range = add_range(range, count, nr_range, final_start, final_end);
206 subtract_range(range, count, 0, addr);
207 subtract_range(range, count, limit, -1ULL);
209 /* Subtract memblock.reserved.region in range ? */
210 if (!get_free)
211 goto sort_and_count_them;
212 for_each_memblock(reserved, r) {
213 final_start = PFN_DOWN(r->base);
214 final_end = PFN_UP(r->base + r->size);
215 if (final_start >= final_end)
216 continue;
217 if (final_start >= limit || final_end <= addr)
218 continue;
220 subtract_range(range, count, final_start, final_end);
223 sort_and_count_them:
224 nr_range = clean_sort_range(range, count);
226 free_size = 0;
227 for (i = 0; i < nr_range; i++)
228 free_size += range[i].end - range[i].start;
230 return free_size << PAGE_SHIFT;
233 u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit)
235 return __memblock_x86_memory_in_range(addr, limit, true);
238 u64 __init memblock_x86_memory_in_range(u64 addr, u64 limit)
240 return __memblock_x86_memory_in_range(addr, limit, false);
243 void __init memblock_x86_reserve_range(u64 start, u64 end, char *name)
245 if (start == end)
246 return;
248 if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx)\n", start, end))
249 return;
251 memblock_dbg(" memblock_x86_reserve_range: [%#010llx-%#010llx] %16s\n", start, end - 1, name);
253 memblock_reserve(start, end - start);
256 void __init memblock_x86_free_range(u64 start, u64 end)
258 if (start == end)
259 return;
261 if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx)\n", start, end))
262 return;
264 memblock_dbg(" memblock_x86_free_range: [%#010llx-%#010llx]\n", start, end - 1);
266 memblock_free(start, end - start);
270 * Need to call this function after memblock_x86_register_active_regions,
271 * so early_node_map[] is filled already.
273 u64 __init memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align)
275 u64 addr;
276 addr = find_memory_core_early(nid, size, align, start, end);
277 if (addr != MEMBLOCK_ERROR)
278 return addr;
280 /* Fallback, should already have start end within node range */
281 return memblock_find_in_range(start, end, size, align);
285 * Finds an active region in the address range from start_pfn to last_pfn and
286 * returns its range in ei_startpfn and ei_endpfn for the memblock entry.
288 static int __init memblock_x86_find_active_region(const struct memblock_region *ei,
289 unsigned long start_pfn,
290 unsigned long last_pfn,
291 unsigned long *ei_startpfn,
292 unsigned long *ei_endpfn)
294 u64 align = PAGE_SIZE;
296 *ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT;
297 *ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT;
299 /* Skip map entries smaller than a page */
300 if (*ei_startpfn >= *ei_endpfn)
301 return 0;
303 /* Skip if map is outside the node */
304 if (*ei_endpfn <= start_pfn || *ei_startpfn >= last_pfn)
305 return 0;
307 /* Check for overlaps */
308 if (*ei_startpfn < start_pfn)
309 *ei_startpfn = start_pfn;
310 if (*ei_endpfn > last_pfn)
311 *ei_endpfn = last_pfn;
313 return 1;
316 /* Walk the memblock.memory map and register active regions within a node */
317 void __init memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
318 unsigned long last_pfn)
320 unsigned long ei_startpfn;
321 unsigned long ei_endpfn;
322 struct memblock_region *r;
324 for_each_memblock(memory, r)
325 if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
326 &ei_startpfn, &ei_endpfn))
327 add_active_range(nid, ei_startpfn, ei_endpfn);
331 * Find the hole size (in bytes) in the memory range.
332 * @start: starting address of the memory range to scan
333 * @end: ending address of the memory range to scan
335 u64 __init memblock_x86_hole_size(u64 start, u64 end)
337 unsigned long start_pfn = start >> PAGE_SHIFT;
338 unsigned long last_pfn = end >> PAGE_SHIFT;
339 unsigned long ei_startpfn, ei_endpfn, ram = 0;
340 struct memblock_region *r;
342 for_each_memblock(memory, r)
343 if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
344 &ei_startpfn, &ei_endpfn))
345 ram += ei_endpfn - ei_startpfn;
347 return end - start - ((u64)ram << PAGE_SHIFT);