Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/x86
[linux-2.6/linux-loongson.git] / include / asm-x86_64 / smp.h
blob9ccbb2cfd5c093110c3a853d2fe0c1d3e11ea9e3
1 #ifndef __ASM_SMP_H
2 #define __ASM_SMP_H
4 /*
5 * We need the APIC definitions automatically as part of 'smp.h'
6 */
7 #ifndef __ASSEMBLY__
8 #include <linux/config.h>
9 #include <linux/threads.h>
10 #include <linux/cpumask.h>
11 #include <linux/bitops.h>
12 extern int disable_apic;
13 #endif
15 #ifdef CONFIG_X86_LOCAL_APIC
16 #ifndef __ASSEMBLY__
17 #include <asm/fixmap.h>
18 #include <asm/mpspec.h>
19 #ifdef CONFIG_X86_IO_APIC
20 #include <asm/io_apic.h>
21 #endif
22 #include <asm/apic.h>
23 #include <asm/thread_info.h>
24 #endif
25 #endif
27 #ifdef CONFIG_SMP
28 #ifndef ASSEMBLY
30 #include <asm/pda.h>
32 struct pt_regs;
34 extern cpumask_t cpu_present_mask;
35 extern cpumask_t cpu_possible_map;
36 extern cpumask_t cpu_online_map;
37 extern cpumask_t cpu_callout_map;
38 extern cpumask_t cpu_initialized;
41 * Private routines/data
44 extern void smp_alloc_memory(void);
45 extern volatile unsigned long smp_invalidate_needed;
46 extern int pic_mode;
47 extern void lock_ipi_call_lock(void);
48 extern void unlock_ipi_call_lock(void);
49 extern int smp_num_siblings;
50 extern void smp_send_reschedule(int cpu);
51 void smp_stop_cpu(void);
52 extern int smp_call_function_single(int cpuid, void (*func) (void *info),
53 void *info, int retry, int wait);
55 extern cpumask_t cpu_sibling_map[NR_CPUS];
56 extern cpumask_t cpu_core_map[NR_CPUS];
57 extern u8 phys_proc_id[NR_CPUS];
58 extern u8 cpu_core_id[NR_CPUS];
60 #define SMP_TRAMPOLINE_BASE 0x6000
63 * On x86 all CPUs are mapped 1:1 to the APIC space.
64 * This simplifies scheduling and IPI sending and
65 * compresses data structures.
68 static inline int num_booting_cpus(void)
70 return cpus_weight(cpu_callout_map);
73 #define raw_smp_processor_id() read_pda(cpunumber)
75 static inline int hard_smp_processor_id(void)
77 /* we don't want to mark this access volatile - bad code generation */
78 return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
81 extern int safe_smp_processor_id(void);
82 extern int __cpu_disable(void);
83 extern void __cpu_die(unsigned int cpu);
84 extern void prefill_possible_map(void);
85 extern unsigned num_processors;
86 extern unsigned disabled_cpus;
88 #endif /* !ASSEMBLY */
90 #define NO_PROC_ID 0xFF /* No processor magic marker */
92 #endif
94 #ifndef ASSEMBLY
96 * Some lowlevel functions might want to know about
97 * the real APIC ID <-> CPU # mapping.
99 extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */
100 extern u8 x86_cpu_to_log_apicid[NR_CPUS];
101 extern u8 bios_cpu_apicid[];
103 static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
105 return cpus_addr(cpumask)[0];
108 static inline int cpu_present_to_apicid(int mps_cpu)
110 if (mps_cpu < NR_CPUS)
111 return (int)bios_cpu_apicid[mps_cpu];
112 else
113 return BAD_APICID;
116 #endif /* !ASSEMBLY */
118 #ifndef CONFIG_SMP
119 #define stack_smp_processor_id() 0
120 #define safe_smp_processor_id() 0
121 #define cpu_logical_map(x) (x)
122 #else
123 #include <asm/thread_info.h>
124 #define stack_smp_processor_id() \
125 ({ \
126 struct thread_info *ti; \
127 __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
128 ti->cpu; \
130 #endif
132 #ifndef __ASSEMBLY__
133 static __inline int logical_smp_processor_id(void)
135 /* we don't want to mark this access volatile - bad code generation */
136 return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
138 #endif
140 #ifdef CONFIG_SMP
141 #define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
142 #else
143 #define cpu_physical_id(cpu) boot_cpu_id
144 #endif
146 #endif