x86, apic: clean up check_apicid*() callbacks
[linux-2.6/mini2440.git] / arch / x86 / include / asm / es7000 / apic.h
blobcd888daa19308380f8c24d9c9bfe3146e7d0b630
1 #ifndef __ASM_ES7000_APIC_H
2 #define __ASM_ES7000_APIC_H
4 #include <linux/gfp.h>
6 #define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
8 static inline int es7000_apic_id_registered(void)
10 return 1;
13 static inline const cpumask_t *target_cpus_cluster(void)
15 return &CPU_MASK_ALL;
18 static inline const cpumask_t *es7000_target_cpus(void)
20 return &cpumask_of_cpu(smp_processor_id());
23 #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
24 #define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio)
25 #define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */
26 #define NO_BALANCE_IRQ_CLUSTER (1)
28 #define APIC_DFR_VALUE (APIC_DFR_FLAT)
29 #define NO_BALANCE_IRQ (0)
31 static inline unsigned long
32 es7000_check_apicid_used(physid_mask_t bitmap, int apicid)
34 return 0;
36 static inline unsigned long es7000_check_apicid_present(int bit)
38 return physid_isset(bit, phys_cpu_present_map);
41 #define apicid_cluster(apicid) (apicid & 0xF0)
43 static inline unsigned long calculate_ldr(int cpu)
45 unsigned long id;
46 id = xapic_phys_to_log_apicid(cpu);
47 return (SET_APIC_LOGICAL_ID(id));
51 * Set up the logical destination ID.
53 * Intel recommends to set DFR, LdR and TPR before enabling
54 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
55 * document number 292116). So here it goes...
57 static inline void init_apic_ldr_cluster(void)
59 unsigned long val;
60 int cpu = smp_processor_id();
62 apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
63 val = calculate_ldr(cpu);
64 apic_write(APIC_LDR, val);
67 static inline void init_apic_ldr(void)
69 unsigned long val;
70 int cpu = smp_processor_id();
72 apic_write(APIC_DFR, APIC_DFR_VALUE);
73 val = calculate_ldr(cpu);
74 apic_write(APIC_LDR, val);
77 extern int apic_version [MAX_APICS];
78 static inline void setup_apic_routing(void)
80 int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
81 printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
82 (apic_version[apic] == 0x14) ?
83 "Physical Cluster" : "Logical Cluster",
84 nr_ioapics, cpus_addr(*es7000_target_cpus())[0]);
87 static inline int multi_timer_check(int apic, int irq)
89 return 0;
92 static inline int apicid_to_node(int logical_apicid)
94 return 0;
98 static inline int cpu_present_to_apicid(int mps_cpu)
100 if (!mps_cpu)
101 return boot_cpu_physical_apicid;
102 else if (mps_cpu < nr_cpu_ids)
103 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
104 else
105 return BAD_APICID;
108 static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
110 static int id = 0;
111 physid_mask_t mask;
112 mask = physid_mask_of_physid(id);
113 ++id;
114 return mask;
117 extern u8 cpu_2_logical_apicid[];
118 /* Mapping from cpu number to logical apicid */
119 static inline int cpu_to_logical_apicid(int cpu)
121 #ifdef CONFIG_SMP
122 if (cpu >= nr_cpu_ids)
123 return BAD_APICID;
124 return (int)cpu_2_logical_apicid[cpu];
125 #else
126 return logical_smp_processor_id();
127 #endif
130 static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
132 /* For clustered we don't have a good way to do this yet - hack */
133 return physids_promote(0xff);
137 static inline void setup_portio_remap(void)
141 extern unsigned int boot_cpu_physical_apicid;
142 static inline int check_phys_apicid_present(int cpu_physical_apicid)
144 boot_cpu_physical_apicid = read_apic_id();
145 return (1);
148 static inline unsigned int
149 cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
151 int num_bits_set;
152 int cpus_found = 0;
153 int cpu;
154 int apicid;
156 num_bits_set = cpumask_weight(cpumask);
157 /* Return id to all */
158 if (num_bits_set == nr_cpu_ids)
159 return 0xFF;
161 * The cpus in the mask must all be on the apic cluster. If are not
162 * on the same apicid cluster return default value of target_cpus():
164 cpu = cpumask_first(cpumask);
165 apicid = cpu_to_logical_apicid(cpu);
166 while (cpus_found < num_bits_set) {
167 if (cpumask_test_cpu(cpu, cpumask)) {
168 int new_apicid = cpu_to_logical_apicid(cpu);
169 if (apicid_cluster(apicid) !=
170 apicid_cluster(new_apicid)){
171 printk ("%s: Not a valid mask!\n", __func__);
172 return 0xFF;
174 apicid = new_apicid;
175 cpus_found++;
177 cpu++;
179 return apicid;
182 static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
184 int num_bits_set;
185 int cpus_found = 0;
186 int cpu;
187 int apicid;
189 num_bits_set = cpus_weight(*cpumask);
190 /* Return id to all */
191 if (num_bits_set == nr_cpu_ids)
192 return cpu_to_logical_apicid(0);
194 * The cpus in the mask must all be on the apic cluster. If are not
195 * on the same apicid cluster return default value of target_cpus():
197 cpu = first_cpu(*cpumask);
198 apicid = cpu_to_logical_apicid(cpu);
199 while (cpus_found < num_bits_set) {
200 if (cpu_isset(cpu, *cpumask)) {
201 int new_apicid = cpu_to_logical_apicid(cpu);
202 if (apicid_cluster(apicid) !=
203 apicid_cluster(new_apicid)){
204 printk ("%s: Not a valid mask!\n", __func__);
205 return cpu_to_logical_apicid(0);
207 apicid = new_apicid;
208 cpus_found++;
210 cpu++;
212 return apicid;
216 static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
217 const struct cpumask *andmask)
219 int apicid = cpu_to_logical_apicid(0);
220 cpumask_var_t cpumask;
222 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
223 return apicid;
225 cpumask_and(cpumask, inmask, andmask);
226 cpumask_and(cpumask, cpumask, cpu_online_mask);
227 apicid = cpu_mask_to_apicid(cpumask);
229 free_cpumask_var(cpumask);
230 return apicid;
233 static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
235 return cpuid_apic >> index_msb;
238 #endif /* __ASM_ES7000_APIC_H */