1 #ifndef _ASM_X86_PERCPU_H
2 #define _ASM_X86_PERCPU_H
5 #define __percpu_seg gs
6 #define __percpu_mov_op movq
8 #define __percpu_seg fs
9 #define __percpu_mov_op movl
15 * PER_CPU finds an address of a per-cpu variable.
19 * reg - 32bit register
21 * The resulting address is stored in the "reg" argument.
24 * PER_CPU(cpu_gdt_descr, %ebx)
27 #define PER_CPU(var, reg) \
28 __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \
30 #define PER_CPU_VAR(var) %__percpu_seg:var
32 #define PER_CPU(var, reg) __percpu_mov_op $var, reg
33 #define PER_CPU_VAR(var) var
36 #ifdef CONFIG_X86_64_SMP
37 #define INIT_PER_CPU_VAR(var) init_per_cpu__##var
39 #define INIT_PER_CPU_VAR(var) var
42 #else /* ...!ASSEMBLY */
44 #include <linux/kernel.h>
45 #include <linux/stringify.h>
48 #define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
49 #define __my_cpu_offset percpu_read(this_cpu_off)
52 * Compared to the generic __my_cpu_offset version, the following
53 * saves one instruction and avoids clobbering a temp register.
55 #define __this_cpu_ptr(ptr) \
57 unsigned long tcp_ptr__; \
58 __verify_pcpu_ptr(ptr); \
59 asm volatile("add " __percpu_arg(1) ", %0" \
61 : "m" (this_cpu_off), "0" (ptr)); \
62 (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \
65 #define __percpu_arg(x) "%P" #x
69 * Initialized pointers to per-cpu variables needed for the boot
70 * processor need to use these macros to get the proper address
71 * offset from __per_cpu_load on SMP.
73 * There also must be an entry in vmlinux_64.lds.S
75 #define DECLARE_INIT_PER_CPU(var) \
76 extern typeof(var) init_per_cpu_var(var)
78 #ifdef CONFIG_X86_64_SMP
79 #define init_per_cpu_var(var) init_per_cpu__##var
81 #define init_per_cpu_var(var) var
84 /* For arch-specific code, we can use direct single-insn ops (they
85 * don't give an lvalue though). */
86 extern void __bad_percpu_size(void);
88 #define percpu_to_op(op, var, val) \
90 typedef typeof(var) pto_T__; \
96 switch (sizeof(var)) { \
98 asm(op "b %1,"__percpu_arg(0) \
100 : "qi" ((pto_T__)(val))); \
103 asm(op "w %1,"__percpu_arg(0) \
105 : "ri" ((pto_T__)(val))); \
108 asm(op "l %1,"__percpu_arg(0) \
110 : "ri" ((pto_T__)(val))); \
113 asm(op "q %1,"__percpu_arg(0) \
115 : "re" ((pto_T__)(val))); \
117 default: __bad_percpu_size(); \
122 * Generate a percpu add to memory instruction and optimize code
123 * if one is added or subtracted.
125 #define percpu_add_op(var, val) \
127 typedef typeof(var) pao_T__; \
128 const int pao_ID__ = (__builtin_constant_p(val) && \
129 ((val) == 1 || (val) == -1)) ? (val) : 0; \
135 switch (sizeof(var)) { \
138 asm("incb "__percpu_arg(0) : "+m" (var)); \
139 else if (pao_ID__ == -1) \
140 asm("decb "__percpu_arg(0) : "+m" (var)); \
142 asm("addb %1, "__percpu_arg(0) \
144 : "qi" ((pao_T__)(val))); \
148 asm("incw "__percpu_arg(0) : "+m" (var)); \
149 else if (pao_ID__ == -1) \
150 asm("decw "__percpu_arg(0) : "+m" (var)); \
152 asm("addw %1, "__percpu_arg(0) \
154 : "ri" ((pao_T__)(val))); \
158 asm("incl "__percpu_arg(0) : "+m" (var)); \
159 else if (pao_ID__ == -1) \
160 asm("decl "__percpu_arg(0) : "+m" (var)); \
162 asm("addl %1, "__percpu_arg(0) \
164 : "ri" ((pao_T__)(val))); \
168 asm("incq "__percpu_arg(0) : "+m" (var)); \
169 else if (pao_ID__ == -1) \
170 asm("decq "__percpu_arg(0) : "+m" (var)); \
172 asm("addq %1, "__percpu_arg(0) \
174 : "re" ((pao_T__)(val))); \
176 default: __bad_percpu_size(); \
180 #define percpu_from_op(op, var, constraint) \
182 typeof(var) pfo_ret__; \
183 switch (sizeof(var)) { \
185 asm(op "b "__percpu_arg(1)",%0" \
190 asm(op "w "__percpu_arg(1)",%0" \
195 asm(op "l "__percpu_arg(1)",%0" \
200 asm(op "q "__percpu_arg(1)",%0" \
204 default: __bad_percpu_size(); \
209 #define percpu_unary_op(op, var) \
211 switch (sizeof(var)) { \
213 asm(op "b "__percpu_arg(0) \
217 asm(op "w "__percpu_arg(0) \
221 asm(op "l "__percpu_arg(0) \
225 asm(op "q "__percpu_arg(0) \
228 default: __bad_percpu_size(); \
233 * Add return operation
235 #define percpu_add_return_op(var, val) \
237 typeof(var) paro_ret__ = val; \
238 switch (sizeof(var)) { \
240 asm("xaddb %0, "__percpu_arg(1) \
241 : "+q" (paro_ret__), "+m" (var) \
245 asm("xaddw %0, "__percpu_arg(1) \
246 : "+r" (paro_ret__), "+m" (var) \
250 asm("xaddl %0, "__percpu_arg(1) \
251 : "+r" (paro_ret__), "+m" (var) \
255 asm("xaddq %0, "__percpu_arg(1) \
256 : "+re" (paro_ret__), "+m" (var) \
259 default: __bad_percpu_size(); \
266 * xchg is implemented using cmpxchg without a lock prefix. xchg is
267 * expensive due to the implied lock prefix. The processor cannot prefetch
268 * cachelines if xchg is used.
270 #define percpu_xchg_op(var, nval) \
272 typeof(var) pxo_ret__; \
273 typeof(var) pxo_new__ = (nval); \
274 switch (sizeof(var)) { \
276 asm("\n\tmov "__percpu_arg(1)",%%al" \
277 "\n1:\tcmpxchgb %2, "__percpu_arg(1) \
279 : "=&a" (pxo_ret__), "+m" (var) \
284 asm("\n\tmov "__percpu_arg(1)",%%ax" \
285 "\n1:\tcmpxchgw %2, "__percpu_arg(1) \
287 : "=&a" (pxo_ret__), "+m" (var) \
292 asm("\n\tmov "__percpu_arg(1)",%%eax" \
293 "\n1:\tcmpxchgl %2, "__percpu_arg(1) \
295 : "=&a" (pxo_ret__), "+m" (var) \
300 asm("\n\tmov "__percpu_arg(1)",%%rax" \
301 "\n1:\tcmpxchgq %2, "__percpu_arg(1) \
303 : "=&a" (pxo_ret__), "+m" (var) \
307 default: __bad_percpu_size(); \
313 * cmpxchg has no such implied lock semantics as a result it is much
314 * more efficient for cpu local operations.
316 #define percpu_cmpxchg_op(var, oval, nval) \
318 typeof(var) pco_ret__; \
319 typeof(var) pco_old__ = (oval); \
320 typeof(var) pco_new__ = (nval); \
321 switch (sizeof(var)) { \
323 asm("cmpxchgb %2, "__percpu_arg(1) \
324 : "=a" (pco_ret__), "+m" (var) \
325 : "q" (pco_new__), "0" (pco_old__) \
329 asm("cmpxchgw %2, "__percpu_arg(1) \
330 : "=a" (pco_ret__), "+m" (var) \
331 : "r" (pco_new__), "0" (pco_old__) \
335 asm("cmpxchgl %2, "__percpu_arg(1) \
336 : "=a" (pco_ret__), "+m" (var) \
337 : "r" (pco_new__), "0" (pco_old__) \
341 asm("cmpxchgq %2, "__percpu_arg(1) \
342 : "=a" (pco_ret__), "+m" (var) \
343 : "r" (pco_new__), "0" (pco_old__) \
346 default: __bad_percpu_size(); \
352 * percpu_read() makes gcc load the percpu variable every time it is
353 * accessed while percpu_read_stable() allows the value to be cached.
354 * percpu_read_stable() is more efficient and can be used if its value
355 * is guaranteed to be valid across cpus. The current users include
356 * get_current() and get_thread_info() both of which are actually
357 * per-thread variables implemented as per-cpu variables and thus
358 * stable for the duration of the respective task.
360 #define percpu_read(var) percpu_from_op("mov", var, "m" (var))
361 #define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var)))
362 #define percpu_write(var, val) percpu_to_op("mov", var, val)
363 #define percpu_add(var, val) percpu_add_op(var, val)
364 #define percpu_sub(var, val) percpu_add_op(var, -(val))
365 #define percpu_and(var, val) percpu_to_op("and", var, val)
366 #define percpu_or(var, val) percpu_to_op("or", var, val)
367 #define percpu_xor(var, val) percpu_to_op("xor", var, val)
368 #define percpu_inc(var) percpu_unary_op("inc", var)
370 #define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
371 #define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
372 #define __this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
374 #define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
375 #define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
376 #define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
377 #define __this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
378 #define __this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
379 #define __this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
380 #define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
381 #define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
382 #define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
383 #define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
384 #define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
385 #define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
386 #define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
387 #define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
388 #define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
390 * Generic fallback operations for __this_cpu_xchg_[1-4] are okay and much
391 * faster than an xchg with forced lock semantics.
393 #define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
394 #define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
396 #define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
397 #define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
398 #define this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
399 #define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
400 #define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
401 #define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
402 #define this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
403 #define this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
404 #define this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
405 #define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
406 #define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
407 #define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
408 #define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
409 #define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
410 #define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
411 #define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
412 #define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
413 #define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
414 #define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
415 #define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
416 #define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
418 #define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
419 #define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
420 #define irqsafe_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
421 #define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
422 #define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
423 #define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
424 #define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
425 #define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
426 #define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
427 #define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
428 #define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
429 #define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
430 #define irqsafe_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
431 #define irqsafe_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
432 #define irqsafe_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
435 #define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
436 #define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
437 #define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
438 #define __this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
439 #define __this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
440 #define __this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
442 #define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
443 #define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
444 #define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
445 #define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
446 #define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
447 #define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
449 #define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
450 #define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
451 #define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
452 #endif /* !CONFIG_M386 */
455 * Per cpu atomic 64 bit operations are only available under 64 bit.
456 * 32 bit must fall back to generic operations.
459 #define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
460 #define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
461 #define __this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
462 #define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
463 #define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
464 #define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
465 #define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
467 #define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
468 #define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
469 #define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
470 #define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
471 #define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
472 #define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
473 #define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
474 #define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
475 #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
477 #define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
478 #define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
479 #define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
480 #define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
481 #define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
482 #define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
485 /* This is not atomic against other CPUs -- CPU preemption needs to be off */
486 #define x86_test_and_clear_bit_percpu(bit, var) \
489 asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
490 : "=r" (old__), "+m" (var) \
495 #include <asm-generic/percpu.h>
497 /* We can use this directly for local CPU (faster). */
498 DECLARE_PER_CPU(unsigned long, this_cpu_off
);
500 #endif /* !__ASSEMBLY__ */
505 * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu
506 * variables that are initialized and accessed before there are per_cpu
510 #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
511 DEFINE_PER_CPU(_type, _name) = _initvalue; \
512 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
513 { [0 ... NR_CPUS-1] = _initvalue }; \
514 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
516 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
517 EXPORT_PER_CPU_SYMBOL(_name)
519 #define DECLARE_EARLY_PER_CPU(_type, _name) \
520 DECLARE_PER_CPU(_type, _name); \
521 extern __typeof__(_type) *_name##_early_ptr; \
522 extern __typeof__(_type) _name##_early_map[]
524 #define early_per_cpu_ptr(_name) (_name##_early_ptr)
525 #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
526 #define early_per_cpu(_name, _cpu) \
527 *(early_per_cpu_ptr(_name) ? \
528 &early_per_cpu_ptr(_name)[_cpu] : \
529 &per_cpu(_name, _cpu))
531 #else /* !CONFIG_SMP */
532 #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
533 DEFINE_PER_CPU(_type, _name) = _initvalue
535 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
536 EXPORT_PER_CPU_SYMBOL(_name)
538 #define DECLARE_EARLY_PER_CPU(_type, _name) \
539 DECLARE_PER_CPU(_type, _name)
541 #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
542 #define early_per_cpu_ptr(_name) NULL
543 /* no early_per_cpu_map() */
545 #endif /* !CONFIG_SMP */
547 #endif /* _ASM_X86_PERCPU_H */