drm/nv50: Make ctxprog wait until interrupt handler is done.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / mm / allocpercpu.c
blobdf34ceae0c678569446930eaf5ceb982a496f8b9
1 /*
2 * linux/mm/allocpercpu.c
4 * Separated from slab.c August 11, 2006 Christoph Lameter
5 */
6 #include <linux/mm.h>
7 #include <linux/module.h>
8 #include <linux/bootmem.h>
9 #include <asm/sections.h>
11 #ifndef cache_line_size
12 #define cache_line_size() L1_CACHE_BYTES
13 #endif
15 /**
16 * percpu_depopulate - depopulate per-cpu data for given cpu
17 * @__pdata: per-cpu data to depopulate
18 * @cpu: depopulate per-cpu data for this cpu
20 * Depopulating per-cpu data for a cpu going offline would be a typical
21 * use case. You need to register a cpu hotplug handler for that purpose.
23 static void percpu_depopulate(void *__pdata, int cpu)
25 struct percpu_data *pdata = __percpu_disguise(__pdata);
27 kfree(pdata->ptrs[cpu]);
28 pdata->ptrs[cpu] = NULL;
31 /**
32 * percpu_depopulate_mask - depopulate per-cpu data for some cpu's
33 * @__pdata: per-cpu data to depopulate
34 * @mask: depopulate per-cpu data for cpu's selected through mask bits
36 static void __percpu_depopulate_mask(void *__pdata, const cpumask_t *mask)
38 int cpu;
39 for_each_cpu_mask_nr(cpu, *mask)
40 percpu_depopulate(__pdata, cpu);
43 #define percpu_depopulate_mask(__pdata, mask) \
44 __percpu_depopulate_mask((__pdata), &(mask))
46 /**
47 * percpu_populate - populate per-cpu data for given cpu
48 * @__pdata: per-cpu data to populate further
49 * @size: size of per-cpu object
50 * @gfp: may sleep or not etc.
51 * @cpu: populate per-data for this cpu
53 * Populating per-cpu data for a cpu coming online would be a typical
54 * use case. You need to register a cpu hotplug handler for that purpose.
55 * Per-cpu object is populated with zeroed buffer.
57 static void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
59 struct percpu_data *pdata = __percpu_disguise(__pdata);
60 int node = cpu_to_node(cpu);
63 * We should make sure each CPU gets private memory.
65 size = roundup(size, cache_line_size());
67 BUG_ON(pdata->ptrs[cpu]);
68 if (node_online(node))
69 pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
70 else
71 pdata->ptrs[cpu] = kzalloc(size, gfp);
72 return pdata->ptrs[cpu];
75 /**
76 * percpu_populate_mask - populate per-cpu data for more cpu's
77 * @__pdata: per-cpu data to populate further
78 * @size: size of per-cpu object
79 * @gfp: may sleep or not etc.
80 * @mask: populate per-cpu data for cpu's selected through mask bits
82 * Per-cpu objects are populated with zeroed buffers.
84 static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
85 cpumask_t *mask)
87 cpumask_t populated;
88 int cpu;
90 cpus_clear(populated);
91 for_each_cpu_mask_nr(cpu, *mask)
92 if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
93 __percpu_depopulate_mask(__pdata, &populated);
94 return -ENOMEM;
95 } else
96 cpu_set(cpu, populated);
97 return 0;
100 #define percpu_populate_mask(__pdata, size, gfp, mask) \
101 __percpu_populate_mask((__pdata), (size), (gfp), &(mask))
104 * alloc_percpu - initial setup of per-cpu data
105 * @size: size of per-cpu object
106 * @align: alignment
108 * Allocate dynamic percpu area. Percpu objects are populated with
109 * zeroed buffers.
111 void *__alloc_percpu(size_t size, size_t align)
114 * We allocate whole cache lines to avoid false sharing
116 size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
117 void *pdata = kzalloc(sz, GFP_KERNEL);
118 void *__pdata = __percpu_disguise(pdata);
121 * Can't easily make larger alignment work with kmalloc. WARN
122 * on it. Larger alignment should only be used for module
123 * percpu sections on SMP for which this path isn't used.
125 WARN_ON_ONCE(align > SMP_CACHE_BYTES);
127 if (unlikely(!pdata))
128 return NULL;
129 if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL,
130 &cpu_possible_map)))
131 return __pdata;
132 kfree(pdata);
133 return NULL;
135 EXPORT_SYMBOL_GPL(__alloc_percpu);
138 * free_percpu - final cleanup of per-cpu data
139 * @__pdata: object to clean up
141 * We simply clean up any per-cpu object left. No need for the client to
142 * track and specify through a bis mask which per-cpu objects are to free.
144 void free_percpu(void *__pdata)
146 if (unlikely(!__pdata))
147 return;
148 __percpu_depopulate_mask(__pdata, cpu_possible_mask);
149 kfree(__percpu_disguise(__pdata));
151 EXPORT_SYMBOL_GPL(free_percpu);
154 * Generic percpu area setup.
156 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
157 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
159 EXPORT_SYMBOL(__per_cpu_offset);
161 void __init setup_per_cpu_areas(void)
163 unsigned long size, i;
164 char *ptr;
165 unsigned long nr_possible_cpus = num_possible_cpus();
167 /* Copy section for each CPU (we discard the original) */
168 size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
169 ptr = alloc_bootmem_pages(size * nr_possible_cpus);
171 for_each_possible_cpu(i) {
172 __per_cpu_offset[i] = ptr - __per_cpu_start;
173 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
174 ptr += size;
177 #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */