5 #include <linux/stddef.h>
6 #include <linux/types.h>
7 #include <linux/cache.h>
10 /* Per processor datastructure. %gs points to it while the kernel runs */
12 struct task_struct
*pcurrent
; /* 0 Current process */
13 unsigned long data_offset
; /* 8 Per cpu data offset from linker
15 unsigned long kernelstack
; /* 16 top of kernel stack for current */
16 unsigned long oldrsp
; /* 24 user rsp for system call */
17 int irqcount
; /* 32 Irq nesting counter. Starts -1 */
18 unsigned int cpunumber
; /* 36 Logical CPU number */
19 #ifdef CONFIG_CC_STACKPROTECTOR
20 unsigned long stack_canary
; /* 40 stack canary value */
21 /* gcc-ABI: this canary MUST be at
25 short nodenumber
; /* number of current node (32k max) */
26 short in_bootmem
; /* pda lives in bootmem */
27 unsigned int __softirq_pending
;
28 unsigned int __nmi_count
; /* number of NMI on this CPUs */
31 struct mm_struct
*active_mm
;
32 unsigned apic_timer_irqs
;
34 unsigned irq_resched_count
;
35 unsigned irq_call_count
;
36 unsigned irq_tlb_count
;
37 unsigned irq_thermal_count
;
38 unsigned irq_threshold_count
;
39 unsigned irq_spurious_count
;
40 } ____cacheline_aligned_in_smp
;
42 extern struct x8664_pda
**_cpu_pda
;
43 extern void pda_init(int);
45 #define cpu_pda(i) (_cpu_pda[i])
48 * There is no fast way to get the base address of the PDA, all the accesses
49 * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
51 extern void __bad_pda_field(void) __attribute__((noreturn
));
54 * proxy_pda doesn't actually exist, but tell gcc it is accessed for
55 * all PDA accesses so it gets read/write dependencies right.
57 extern struct x8664_pda _proxy_pda
;
59 #define pda_offset(field) offsetof(struct x8664_pda, field)
61 #define pda_to_op(op, field, val) \
63 typedef typeof(_proxy_pda.field) T__; \
64 if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
65 switch (sizeof(_proxy_pda.field)) { \
67 asm(op "w %1,%%gs:%c2" : \
68 "+m" (_proxy_pda.field) : \
70 "i"(pda_offset(field))); \
73 asm(op "l %1,%%gs:%c2" : \
74 "+m" (_proxy_pda.field) : \
76 "i" (pda_offset(field))); \
79 asm(op "q %1,%%gs:%c2": \
80 "+m" (_proxy_pda.field) : \
82 "i"(pda_offset(field))); \
89 #define pda_from_op(op, field) \
91 typeof(_proxy_pda.field) ret__; \
92 switch (sizeof(_proxy_pda.field)) { \
94 asm(op "w %%gs:%c1,%0" : \
96 "i" (pda_offset(field)), \
97 "m" (_proxy_pda.field)); \
100 asm(op "l %%gs:%c1,%0": \
102 "i" (pda_offset(field)), \
103 "m" (_proxy_pda.field)); \
106 asm(op "q %%gs:%c1,%0": \
108 "i" (pda_offset(field)), \
109 "m" (_proxy_pda.field)); \
117 #define read_pda(field) pda_from_op("mov", field)
118 #define write_pda(field, val) pda_to_op("mov", field, val)
119 #define add_pda(field, val) pda_to_op("add", field, val)
120 #define sub_pda(field, val) pda_to_op("sub", field, val)
121 #define or_pda(field, val) pda_to_op("or", field, val)
123 /* This is not atomic against other CPUs -- CPU preemption needs to be off */
124 #define test_and_clear_bit_pda(bit, field) \
127 asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
128 : "=r" (old__), "+m" (_proxy_pda.field) \
129 : "dIr" (bit), "i" (pda_offset(field)) : "memory");\
135 #define PDA_STACKOFFSET (5*8)