tty: VT, remove unused variable
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / linux / percpu.h
blob3a5c4449fd36050f667ee13553924bdf44d9a3f8
1 #ifndef __LINUX_PERCPU_H
2 #define __LINUX_PERCPU_H
4 #include <linux/preempt.h>
5 #include <linux/smp.h>
6 #include <linux/cpumask.h>
7 #include <linux/pfn.h>
8 #include <linux/init.h>
10 #include <asm/percpu.h>
12 /* enough to cover all DEFINE_PER_CPUs in modules */
13 #ifdef CONFIG_MODULES
14 #define PERCPU_MODULE_RESERVE (8 << 10)
15 #else
16 #define PERCPU_MODULE_RESERVE 0
17 #endif
19 #ifndef PERCPU_ENOUGH_ROOM
20 #define PERCPU_ENOUGH_ROOM \
21 (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
22 PERCPU_MODULE_RESERVE)
23 #endif
26 * Must be an lvalue. Since @var must be a simple identifier,
27 * we force a syntax error here if it isn't.
29 #define get_cpu_var(var) (*({ \
30 preempt_disable(); \
31 &__get_cpu_var(var); }))
34 * The weird & is necessary because sparse considers (void)(var) to be
35 * a direct dereference of percpu variable (var).
37 #define put_cpu_var(var) do { \
38 (void)&(var); \
39 preempt_enable(); \
40 } while (0)
42 #define get_cpu_ptr(var) ({ \
43 preempt_disable(); \
44 this_cpu_ptr(var); })
46 #define put_cpu_ptr(var) do { \
47 (void)(var); \
48 preempt_enable(); \
49 } while (0)
51 /* minimum unit size, also is the maximum supported allocation size */
52 #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
55 * Percpu allocator can serve percpu allocations before slab is
56 * initialized which allows slab to depend on the percpu allocator.
57 * The following two parameters decide how much resource to
58 * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
59 * larger than PERCPU_DYNAMIC_EARLY_SIZE.
61 #define PERCPU_DYNAMIC_EARLY_SLOTS 128
62 #define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
65 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
66 * back on the first chunk for dynamic percpu allocation if arch is
67 * manually allocating and mapping it for faster access (as a part of
68 * large page mapping for example).
70 * The following values give between one and two pages of free space
71 * after typical minimal boot (2-way SMP, single disk and NIC) with
72 * both defconfig and a distro config on x86_64 and 32. More
73 * intelligent way to determine this would be nice.
75 #if BITS_PER_LONG > 32
76 #define PERCPU_DYNAMIC_RESERVE (20 << 10)
77 #else
78 #define PERCPU_DYNAMIC_RESERVE (12 << 10)
79 #endif
81 extern void *pcpu_base_addr;
82 extern const unsigned long *pcpu_unit_offsets;
84 struct pcpu_group_info {
85 int nr_units; /* aligned # of units */
86 unsigned long base_offset; /* base address offset */
87 unsigned int *cpu_map; /* unit->cpu map, empty
88 * entries contain NR_CPUS */
91 struct pcpu_alloc_info {
92 size_t static_size;
93 size_t reserved_size;
94 size_t dyn_size;
95 size_t unit_size;
96 size_t atom_size;
97 size_t alloc_size;
98 size_t __ai_size; /* internal, don't use */
99 int nr_groups; /* 0 if grouping unnecessary */
100 struct pcpu_group_info groups[];
103 enum pcpu_fc {
104 PCPU_FC_AUTO,
105 PCPU_FC_EMBED,
106 PCPU_FC_PAGE,
108 PCPU_FC_NR,
110 extern const char *pcpu_fc_names[PCPU_FC_NR];
112 extern enum pcpu_fc pcpu_chosen_fc;
114 typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
115 size_t align);
116 typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
117 typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
118 typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
120 extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
121 int nr_units);
122 extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
124 extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
125 void *base_addr);
127 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
128 extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
129 size_t atom_size,
130 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
131 pcpu_fc_alloc_fn_t alloc_fn,
132 pcpu_fc_free_fn_t free_fn);
133 #endif
135 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
136 extern int __init pcpu_page_first_chunk(size_t reserved_size,
137 pcpu_fc_alloc_fn_t alloc_fn,
138 pcpu_fc_free_fn_t free_fn,
139 pcpu_fc_populate_pte_fn_t populate_pte_fn);
140 #endif
143 * Use this to get to a cpu's version of the per-cpu object
144 * dynamically allocated. Non-atomic access to the current CPU's
145 * version should probably be combined with get_cpu()/put_cpu().
147 #ifdef CONFIG_SMP
148 #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
149 #else
150 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
151 #endif
153 extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
154 extern bool is_kernel_percpu_address(unsigned long addr);
156 #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
157 extern void __init setup_per_cpu_areas(void);
158 #endif
159 extern void __init percpu_init_late(void);
161 extern void __percpu *__alloc_percpu(size_t size, size_t align);
162 extern void free_percpu(void __percpu *__pdata);
163 extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
165 #define alloc_percpu(type) \
166 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
169 * Optional methods for optimized non-lvalue per-cpu variable access.
171 * @var can be a percpu variable or a field of it and its size should
172 * equal char, int or long. percpu_read() evaluates to a lvalue and
173 * all others to void.
175 * These operations are guaranteed to be atomic w.r.t. preemption.
176 * The generic versions use plain get/put_cpu_var(). Archs are
177 * encouraged to implement single-instruction alternatives which don't
178 * require preemption protection.
180 #ifndef percpu_read
181 # define percpu_read(var) \
182 ({ \
183 typeof(var) *pr_ptr__ = &(var); \
184 typeof(var) pr_ret__; \
185 pr_ret__ = get_cpu_var(*pr_ptr__); \
186 put_cpu_var(*pr_ptr__); \
187 pr_ret__; \
189 #endif
191 #define __percpu_generic_to_op(var, val, op) \
192 do { \
193 typeof(var) *pgto_ptr__ = &(var); \
194 get_cpu_var(*pgto_ptr__) op val; \
195 put_cpu_var(*pgto_ptr__); \
196 } while (0)
198 #ifndef percpu_write
199 # define percpu_write(var, val) __percpu_generic_to_op(var, (val), =)
200 #endif
202 #ifndef percpu_add
203 # define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=)
204 #endif
206 #ifndef percpu_sub
207 # define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=)
208 #endif
210 #ifndef percpu_and
211 # define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=)
212 #endif
214 #ifndef percpu_or
215 # define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=)
216 #endif
218 #ifndef percpu_xor
219 # define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
220 #endif
223 * Branching function to split up a function into a set of functions that
224 * are called for different scalar sizes of the objects handled.
227 extern void __bad_size_call_parameter(void);
229 #define __pcpu_size_call_return(stem, variable) \
230 ({ typeof(variable) pscr_ret__; \
231 __verify_pcpu_ptr(&(variable)); \
232 switch(sizeof(variable)) { \
233 case 1: pscr_ret__ = stem##1(variable);break; \
234 case 2: pscr_ret__ = stem##2(variable);break; \
235 case 4: pscr_ret__ = stem##4(variable);break; \
236 case 8: pscr_ret__ = stem##8(variable);break; \
237 default: \
238 __bad_size_call_parameter();break; \
240 pscr_ret__; \
243 #define __pcpu_size_call_return2(stem, variable, ...) \
244 ({ \
245 typeof(variable) pscr2_ret__; \
246 __verify_pcpu_ptr(&(variable)); \
247 switch(sizeof(variable)) { \
248 case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
249 case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \
250 case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \
251 case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \
252 default: \
253 __bad_size_call_parameter(); break; \
255 pscr2_ret__; \
259 * Special handling for cmpxchg_double. cmpxchg_double is passed two
260 * percpu variables. The first has to be aligned to a double word
261 * boundary and the second has to follow directly thereafter.
263 #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
264 ({ \
265 bool pdcrb_ret__; \
266 __verify_pcpu_ptr(&pcp1); \
267 BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \
268 VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1))); \
269 VM_BUG_ON((unsigned long)(&pcp2) != \
270 (unsigned long)(&pcp1) + sizeof(pcp1)); \
271 switch(sizeof(pcp1)) { \
272 case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \
273 case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \
274 case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \
275 case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \
276 default: \
277 __bad_size_call_parameter(); break; \
279 pdcrb_ret__; \
282 #define __pcpu_size_call(stem, variable, ...) \
283 do { \
284 __verify_pcpu_ptr(&(variable)); \
285 switch(sizeof(variable)) { \
286 case 1: stem##1(variable, __VA_ARGS__);break; \
287 case 2: stem##2(variable, __VA_ARGS__);break; \
288 case 4: stem##4(variable, __VA_ARGS__);break; \
289 case 8: stem##8(variable, __VA_ARGS__);break; \
290 default: \
291 __bad_size_call_parameter();break; \
293 } while (0)
296 * Optimized manipulation for memory allocated through the per cpu
297 * allocator or for addresses of per cpu variables.
299 * These operation guarantee exclusivity of access for other operations
300 * on the *same* processor. The assumption is that per cpu data is only
301 * accessed by a single processor instance (the current one).
303 * The first group is used for accesses that must be done in a
304 * preemption safe way since we know that the context is not preempt
305 * safe. Interrupts may occur. If the interrupt modifies the variable
306 * too then RMW actions will not be reliable.
308 * The arch code can provide optimized functions in two ways:
310 * 1. Override the function completely. F.e. define this_cpu_add().
311 * The arch must then ensure that the various scalar format passed
312 * are handled correctly.
314 * 2. Provide functions for certain scalar sizes. F.e. provide
315 * this_cpu_add_2() to provide per cpu atomic operations for 2 byte
316 * sized RMW actions. If arch code does not provide operations for
317 * a scalar size then the fallback in the generic code will be
318 * used.
321 #define _this_cpu_generic_read(pcp) \
322 ({ typeof(pcp) ret__; \
323 preempt_disable(); \
324 ret__ = *this_cpu_ptr(&(pcp)); \
325 preempt_enable(); \
326 ret__; \
329 #ifndef this_cpu_read
330 # ifndef this_cpu_read_1
331 # define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp)
332 # endif
333 # ifndef this_cpu_read_2
334 # define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp)
335 # endif
336 # ifndef this_cpu_read_4
337 # define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp)
338 # endif
339 # ifndef this_cpu_read_8
340 # define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp)
341 # endif
342 # define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp))
343 #endif
345 #define _this_cpu_generic_to_op(pcp, val, op) \
346 do { \
347 preempt_disable(); \
348 *__this_cpu_ptr(&(pcp)) op val; \
349 preempt_enable(); \
350 } while (0)
352 #ifndef this_cpu_write
353 # ifndef this_cpu_write_1
354 # define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
355 # endif
356 # ifndef this_cpu_write_2
357 # define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
358 # endif
359 # ifndef this_cpu_write_4
360 # define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
361 # endif
362 # ifndef this_cpu_write_8
363 # define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
364 # endif
365 # define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val))
366 #endif
368 #ifndef this_cpu_add
369 # ifndef this_cpu_add_1
370 # define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
371 # endif
372 # ifndef this_cpu_add_2
373 # define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
374 # endif
375 # ifndef this_cpu_add_4
376 # define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
377 # endif
378 # ifndef this_cpu_add_8
379 # define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
380 # endif
381 # define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val))
382 #endif
384 #ifndef this_cpu_sub
385 # define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val))
386 #endif
388 #ifndef this_cpu_inc
389 # define this_cpu_inc(pcp) this_cpu_add((pcp), 1)
390 #endif
392 #ifndef this_cpu_dec
393 # define this_cpu_dec(pcp) this_cpu_sub((pcp), 1)
394 #endif
396 #ifndef this_cpu_and
397 # ifndef this_cpu_and_1
398 # define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
399 # endif
400 # ifndef this_cpu_and_2
401 # define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
402 # endif
403 # ifndef this_cpu_and_4
404 # define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
405 # endif
406 # ifndef this_cpu_and_8
407 # define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
408 # endif
409 # define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val))
410 #endif
412 #ifndef this_cpu_or
413 # ifndef this_cpu_or_1
414 # define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
415 # endif
416 # ifndef this_cpu_or_2
417 # define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
418 # endif
419 # ifndef this_cpu_or_4
420 # define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
421 # endif
422 # ifndef this_cpu_or_8
423 # define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
424 # endif
425 # define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
426 #endif
428 #ifndef this_cpu_xor
429 # ifndef this_cpu_xor_1
430 # define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
431 # endif
432 # ifndef this_cpu_xor_2
433 # define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
434 # endif
435 # ifndef this_cpu_xor_4
436 # define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
437 # endif
438 # ifndef this_cpu_xor_8
439 # define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
440 # endif
441 # define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
442 #endif
444 #define _this_cpu_generic_add_return(pcp, val) \
445 ({ \
446 typeof(pcp) ret__; \
447 preempt_disable(); \
448 __this_cpu_add(pcp, val); \
449 ret__ = __this_cpu_read(pcp); \
450 preempt_enable(); \
451 ret__; \
454 #ifndef this_cpu_add_return
455 # ifndef this_cpu_add_return_1
456 # define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val)
457 # endif
458 # ifndef this_cpu_add_return_2
459 # define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val)
460 # endif
461 # ifndef this_cpu_add_return_4
462 # define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val)
463 # endif
464 # ifndef this_cpu_add_return_8
465 # define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val)
466 # endif
467 # define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
468 #endif
470 #define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
471 #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
472 #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
474 #define _this_cpu_generic_xchg(pcp, nval) \
475 ({ typeof(pcp) ret__; \
476 preempt_disable(); \
477 ret__ = __this_cpu_read(pcp); \
478 __this_cpu_write(pcp, nval); \
479 preempt_enable(); \
480 ret__; \
483 #ifndef this_cpu_xchg
484 # ifndef this_cpu_xchg_1
485 # define this_cpu_xchg_1(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
486 # endif
487 # ifndef this_cpu_xchg_2
488 # define this_cpu_xchg_2(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
489 # endif
490 # ifndef this_cpu_xchg_4
491 # define this_cpu_xchg_4(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
492 # endif
493 # ifndef this_cpu_xchg_8
494 # define this_cpu_xchg_8(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
495 # endif
496 # define this_cpu_xchg(pcp, nval) \
497 __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
498 #endif
500 #define _this_cpu_generic_cmpxchg(pcp, oval, nval) \
501 ({ typeof(pcp) ret__; \
502 preempt_disable(); \
503 ret__ = __this_cpu_read(pcp); \
504 if (ret__ == (oval)) \
505 __this_cpu_write(pcp, nval); \
506 preempt_enable(); \
507 ret__; \
510 #ifndef this_cpu_cmpxchg
511 # ifndef this_cpu_cmpxchg_1
512 # define this_cpu_cmpxchg_1(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
513 # endif
514 # ifndef this_cpu_cmpxchg_2
515 # define this_cpu_cmpxchg_2(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
516 # endif
517 # ifndef this_cpu_cmpxchg_4
518 # define this_cpu_cmpxchg_4(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
519 # endif
520 # ifndef this_cpu_cmpxchg_8
521 # define this_cpu_cmpxchg_8(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
522 # endif
523 # define this_cpu_cmpxchg(pcp, oval, nval) \
524 __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
525 #endif
528 * cmpxchg_double replaces two adjacent scalars at once. The first
529 * two parameters are per cpu variables which have to be of the same
530 * size. A truth value is returned to indicate success or failure
531 * (since a double register result is difficult to handle). There is
532 * very limited hardware support for these operations, so only certain
533 * sizes may work.
535 #define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
536 ({ \
537 int ret__; \
538 preempt_disable(); \
539 ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
540 oval1, oval2, nval1, nval2); \
541 preempt_enable(); \
542 ret__; \
545 #ifndef this_cpu_cmpxchg_double
546 # ifndef this_cpu_cmpxchg_double_1
547 # define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
548 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
549 # endif
550 # ifndef this_cpu_cmpxchg_double_2
551 # define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
552 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
553 # endif
554 # ifndef this_cpu_cmpxchg_double_4
555 # define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
556 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
557 # endif
558 # ifndef this_cpu_cmpxchg_double_8
559 # define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
560 _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
561 # endif
562 # define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
563 __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
564 #endif
567 * Generic percpu operations that do not require preemption handling.
568 * Either we do not care about races or the caller has the
569 * responsibility of handling preemptions issues. Arch code can still
570 * override these instructions since the arch per cpu code may be more
571 * efficient and may actually get race freeness for free (that is the
572 * case for x86 for example).
574 * If there is no other protection through preempt disable and/or
575 * disabling interupts then one of these RMW operations can show unexpected
576 * behavior because the execution thread was rescheduled on another processor
577 * or an interrupt occurred and the same percpu variable was modified from
578 * the interrupt context.
580 #ifndef __this_cpu_read
581 # ifndef __this_cpu_read_1
582 # define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp)))
583 # endif
584 # ifndef __this_cpu_read_2
585 # define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp)))
586 # endif
587 # ifndef __this_cpu_read_4
588 # define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp)))
589 # endif
590 # ifndef __this_cpu_read_8
591 # define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp)))
592 # endif
593 # define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp))
594 #endif
596 #define __this_cpu_generic_to_op(pcp, val, op) \
597 do { \
598 *__this_cpu_ptr(&(pcp)) op val; \
599 } while (0)
601 #ifndef __this_cpu_write
602 # ifndef __this_cpu_write_1
603 # define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
604 # endif
605 # ifndef __this_cpu_write_2
606 # define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
607 # endif
608 # ifndef __this_cpu_write_4
609 # define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
610 # endif
611 # ifndef __this_cpu_write_8
612 # define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
613 # endif
614 # define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val))
615 #endif
617 #ifndef __this_cpu_add
618 # ifndef __this_cpu_add_1
619 # define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
620 # endif
621 # ifndef __this_cpu_add_2
622 # define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
623 # endif
624 # ifndef __this_cpu_add_4
625 # define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
626 # endif
627 # ifndef __this_cpu_add_8
628 # define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
629 # endif
630 # define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val))
631 #endif
633 #ifndef __this_cpu_sub
634 # define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val))
635 #endif
637 #ifndef __this_cpu_inc
638 # define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1)
639 #endif
641 #ifndef __this_cpu_dec
642 # define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1)
643 #endif
645 #ifndef __this_cpu_and
646 # ifndef __this_cpu_and_1
647 # define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
648 # endif
649 # ifndef __this_cpu_and_2
650 # define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
651 # endif
652 # ifndef __this_cpu_and_4
653 # define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
654 # endif
655 # ifndef __this_cpu_and_8
656 # define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
657 # endif
658 # define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val))
659 #endif
661 #ifndef __this_cpu_or
662 # ifndef __this_cpu_or_1
663 # define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
664 # endif
665 # ifndef __this_cpu_or_2
666 # define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
667 # endif
668 # ifndef __this_cpu_or_4
669 # define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
670 # endif
671 # ifndef __this_cpu_or_8
672 # define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
673 # endif
674 # define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val))
675 #endif
677 #ifndef __this_cpu_xor
678 # ifndef __this_cpu_xor_1
679 # define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
680 # endif
681 # ifndef __this_cpu_xor_2
682 # define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
683 # endif
684 # ifndef __this_cpu_xor_4
685 # define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
686 # endif
687 # ifndef __this_cpu_xor_8
688 # define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
689 # endif
690 # define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
691 #endif
693 #define __this_cpu_generic_add_return(pcp, val) \
694 ({ \
695 __this_cpu_add(pcp, val); \
696 __this_cpu_read(pcp); \
699 #ifndef __this_cpu_add_return
700 # ifndef __this_cpu_add_return_1
701 # define __this_cpu_add_return_1(pcp, val) __this_cpu_generic_add_return(pcp, val)
702 # endif
703 # ifndef __this_cpu_add_return_2
704 # define __this_cpu_add_return_2(pcp, val) __this_cpu_generic_add_return(pcp, val)
705 # endif
706 # ifndef __this_cpu_add_return_4
707 # define __this_cpu_add_return_4(pcp, val) __this_cpu_generic_add_return(pcp, val)
708 # endif
709 # ifndef __this_cpu_add_return_8
710 # define __this_cpu_add_return_8(pcp, val) __this_cpu_generic_add_return(pcp, val)
711 # endif
712 # define __this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
713 #endif
715 #define __this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
716 #define __this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
717 #define __this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
719 #define __this_cpu_generic_xchg(pcp, nval) \
720 ({ typeof(pcp) ret__; \
721 ret__ = __this_cpu_read(pcp); \
722 __this_cpu_write(pcp, nval); \
723 ret__; \
726 #ifndef __this_cpu_xchg
727 # ifndef __this_cpu_xchg_1
728 # define __this_cpu_xchg_1(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
729 # endif
730 # ifndef __this_cpu_xchg_2
731 # define __this_cpu_xchg_2(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
732 # endif
733 # ifndef __this_cpu_xchg_4
734 # define __this_cpu_xchg_4(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
735 # endif
736 # ifndef __this_cpu_xchg_8
737 # define __this_cpu_xchg_8(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
738 # endif
739 # define __this_cpu_xchg(pcp, nval) \
740 __pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval)
741 #endif
743 #define __this_cpu_generic_cmpxchg(pcp, oval, nval) \
744 ({ \
745 typeof(pcp) ret__; \
746 ret__ = __this_cpu_read(pcp); \
747 if (ret__ == (oval)) \
748 __this_cpu_write(pcp, nval); \
749 ret__; \
752 #ifndef __this_cpu_cmpxchg
753 # ifndef __this_cpu_cmpxchg_1
754 # define __this_cpu_cmpxchg_1(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
755 # endif
756 # ifndef __this_cpu_cmpxchg_2
757 # define __this_cpu_cmpxchg_2(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
758 # endif
759 # ifndef __this_cpu_cmpxchg_4
760 # define __this_cpu_cmpxchg_4(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
761 # endif
762 # ifndef __this_cpu_cmpxchg_8
763 # define __this_cpu_cmpxchg_8(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
764 # endif
765 # define __this_cpu_cmpxchg(pcp, oval, nval) \
766 __pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
767 #endif
769 #define __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
770 ({ \
771 int __ret = 0; \
772 if (__this_cpu_read(pcp1) == (oval1) && \
773 __this_cpu_read(pcp2) == (oval2)) { \
774 __this_cpu_write(pcp1, (nval1)); \
775 __this_cpu_write(pcp2, (nval2)); \
776 __ret = 1; \
778 (__ret); \
781 #ifndef __this_cpu_cmpxchg_double
782 # ifndef __this_cpu_cmpxchg_double_1
783 # define __this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
784 __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
785 # endif
786 # ifndef __this_cpu_cmpxchg_double_2
787 # define __this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
788 __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
789 # endif
790 # ifndef __this_cpu_cmpxchg_double_4
791 # define __this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
792 __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
793 # endif
794 # ifndef __this_cpu_cmpxchg_double_8
795 # define __this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
796 __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
797 # endif
798 # define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
799 __pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
800 #endif
803 * IRQ safe versions of the per cpu RMW operations. Note that these operations
804 * are *not* safe against modification of the same variable from another
805 * processors (which one gets when using regular atomic operations)
806 * They are guaranteed to be atomic vs. local interrupts and
807 * preemption only.
809 #define irqsafe_cpu_generic_to_op(pcp, val, op) \
810 do { \
811 unsigned long flags; \
812 local_irq_save(flags); \
813 *__this_cpu_ptr(&(pcp)) op val; \
814 local_irq_restore(flags); \
815 } while (0)
817 #ifndef irqsafe_cpu_add
818 # ifndef irqsafe_cpu_add_1
819 # define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
820 # endif
821 # ifndef irqsafe_cpu_add_2
822 # define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
823 # endif
824 # ifndef irqsafe_cpu_add_4
825 # define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
826 # endif
827 # ifndef irqsafe_cpu_add_8
828 # define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
829 # endif
830 # define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val))
831 #endif
833 #ifndef irqsafe_cpu_sub
834 # define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val))
835 #endif
837 #ifndef irqsafe_cpu_inc
838 # define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1)
839 #endif
841 #ifndef irqsafe_cpu_dec
842 # define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1)
843 #endif
845 #ifndef irqsafe_cpu_and
846 # ifndef irqsafe_cpu_and_1
847 # define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
848 # endif
849 # ifndef irqsafe_cpu_and_2
850 # define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
851 # endif
852 # ifndef irqsafe_cpu_and_4
853 # define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
854 # endif
855 # ifndef irqsafe_cpu_and_8
856 # define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
857 # endif
858 # define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val))
859 #endif
861 #ifndef irqsafe_cpu_or
862 # ifndef irqsafe_cpu_or_1
863 # define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
864 # endif
865 # ifndef irqsafe_cpu_or_2
866 # define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
867 # endif
868 # ifndef irqsafe_cpu_or_4
869 # define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
870 # endif
871 # ifndef irqsafe_cpu_or_8
872 # define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
873 # endif
874 # define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val))
875 #endif
877 #ifndef irqsafe_cpu_xor
878 # ifndef irqsafe_cpu_xor_1
879 # define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
880 # endif
881 # ifndef irqsafe_cpu_xor_2
882 # define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
883 # endif
884 # ifndef irqsafe_cpu_xor_4
885 # define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
886 # endif
887 # ifndef irqsafe_cpu_xor_8
888 # define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
889 # endif
890 # define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
891 #endif
893 #define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) \
894 ({ \
895 typeof(pcp) ret__; \
896 unsigned long flags; \
897 local_irq_save(flags); \
898 ret__ = __this_cpu_read(pcp); \
899 if (ret__ == (oval)) \
900 __this_cpu_write(pcp, nval); \
901 local_irq_restore(flags); \
902 ret__; \
905 #ifndef irqsafe_cpu_cmpxchg
906 # ifndef irqsafe_cpu_cmpxchg_1
907 # define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
908 # endif
909 # ifndef irqsafe_cpu_cmpxchg_2
910 # define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
911 # endif
912 # ifndef irqsafe_cpu_cmpxchg_4
913 # define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
914 # endif
915 # ifndef irqsafe_cpu_cmpxchg_8
916 # define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
917 # endif
918 # define irqsafe_cpu_cmpxchg(pcp, oval, nval) \
919 __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
920 #endif
922 #define irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
923 ({ \
924 int ret__; \
925 unsigned long flags; \
926 local_irq_save(flags); \
927 ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2, \
928 oval1, oval2, nval1, nval2); \
929 local_irq_restore(flags); \
930 ret__; \
933 #ifndef irqsafe_cpu_cmpxchg_double
934 # ifndef irqsafe_cpu_cmpxchg_double_1
935 # define irqsafe_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
936 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
937 # endif
938 # ifndef irqsafe_cpu_cmpxchg_double_2
939 # define irqsafe_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
940 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
941 # endif
942 # ifndef irqsafe_cpu_cmpxchg_double_4
943 # define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
944 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
945 # endif
946 # ifndef irqsafe_cpu_cmpxchg_double_8
947 # define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
948 irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
949 # endif
950 # define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
951 __pcpu_double_call_return_int(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
952 #endif
954 #endif /* __LINUX_PERCPU_H */