1 #ifndef __ASM_ES7000_APIC_H
2 #define __ASM_ES7000_APIC_H
6 #define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
8 static inline int es7000_apic_id_registered(void)
13 static inline const cpumask_t
*target_cpus_cluster(void)
18 static inline const cpumask_t
*es7000_target_cpus(void)
20 return &cpumask_of_cpu(smp_processor_id());
23 #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
24 #define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio)
25 #define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */
26 #define NO_BALANCE_IRQ_CLUSTER (1)
28 #define APIC_DFR_VALUE (APIC_DFR_FLAT)
29 #define NO_BALANCE_IRQ (0)
31 static inline unsigned long
32 es7000_check_apicid_used(physid_mask_t bitmap
, int apicid
)
36 static inline unsigned long es7000_check_apicid_present(int bit
)
38 return physid_isset(bit
, phys_cpu_present_map
);
41 #define apicid_cluster(apicid) (apicid & 0xF0)
43 static inline unsigned long calculate_ldr(int cpu
)
46 id
= xapic_phys_to_log_apicid(cpu
);
47 return (SET_APIC_LOGICAL_ID(id
));
51 * Set up the logical destination ID.
53 * Intel recommends to set DFR, LdR and TPR before enabling
54 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
55 * document number 292116). So here it goes...
57 static inline void init_apic_ldr_cluster(void)
60 int cpu
= smp_processor_id();
62 apic_write(APIC_DFR
, APIC_DFR_VALUE_CLUSTER
);
63 val
= calculate_ldr(cpu
);
64 apic_write(APIC_LDR
, val
);
67 static inline void init_apic_ldr(void)
70 int cpu
= smp_processor_id();
72 apic_write(APIC_DFR
, APIC_DFR_VALUE
);
73 val
= calculate_ldr(cpu
);
74 apic_write(APIC_LDR
, val
);
77 extern int apic_version
[MAX_APICS
];
78 static inline void setup_apic_routing(void)
80 int apic
= per_cpu(x86_bios_cpu_apicid
, smp_processor_id());
81 printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
82 (apic_version
[apic
] == 0x14) ?
83 "Physical Cluster" : "Logical Cluster",
84 nr_ioapics
, cpus_addr(*es7000_target_cpus())[0]);
87 static inline int multi_timer_check(int apic
, int irq
)
92 static inline int apicid_to_node(int logical_apicid
)
98 static inline int cpu_present_to_apicid(int mps_cpu
)
101 return boot_cpu_physical_apicid
;
102 else if (mps_cpu
< nr_cpu_ids
)
103 return (int) per_cpu(x86_bios_cpu_apicid
, mps_cpu
);
108 static inline physid_mask_t
apicid_to_cpu_present(int phys_apicid
)
112 mask
= physid_mask_of_physid(id
);
117 extern u8 cpu_2_logical_apicid
[];
118 /* Mapping from cpu number to logical apicid */
119 static inline int cpu_to_logical_apicid(int cpu
)
122 if (cpu
>= nr_cpu_ids
)
124 return (int)cpu_2_logical_apicid
[cpu
];
126 return logical_smp_processor_id();
130 static inline physid_mask_t
ioapic_phys_id_map(physid_mask_t phys_map
)
132 /* For clustered we don't have a good way to do this yet - hack */
133 return physids_promote(0xff);
137 static inline void setup_portio_remap(void)
141 extern unsigned int boot_cpu_physical_apicid
;
142 static inline int check_phys_apicid_present(int cpu_physical_apicid
)
144 boot_cpu_physical_apicid
= read_apic_id();
148 static inline unsigned int
149 cpu_mask_to_apicid_cluster(const struct cpumask
*cpumask
)
156 num_bits_set
= cpumask_weight(cpumask
);
157 /* Return id to all */
158 if (num_bits_set
== nr_cpu_ids
)
161 * The cpus in the mask must all be on the apic cluster. If are not
162 * on the same apicid cluster return default value of target_cpus():
164 cpu
= cpumask_first(cpumask
);
165 apicid
= cpu_to_logical_apicid(cpu
);
166 while (cpus_found
< num_bits_set
) {
167 if (cpumask_test_cpu(cpu
, cpumask
)) {
168 int new_apicid
= cpu_to_logical_apicid(cpu
);
169 if (apicid_cluster(apicid
) !=
170 apicid_cluster(new_apicid
)){
171 printk ("%s: Not a valid mask!\n", __func__
);
182 static inline unsigned int cpu_mask_to_apicid(const cpumask_t
*cpumask
)
189 num_bits_set
= cpus_weight(*cpumask
);
190 /* Return id to all */
191 if (num_bits_set
== nr_cpu_ids
)
192 return cpu_to_logical_apicid(0);
194 * The cpus in the mask must all be on the apic cluster. If are not
195 * on the same apicid cluster return default value of target_cpus():
197 cpu
= first_cpu(*cpumask
);
198 apicid
= cpu_to_logical_apicid(cpu
);
199 while (cpus_found
< num_bits_set
) {
200 if (cpu_isset(cpu
, *cpumask
)) {
201 int new_apicid
= cpu_to_logical_apicid(cpu
);
202 if (apicid_cluster(apicid
) !=
203 apicid_cluster(new_apicid
)){
204 printk ("%s: Not a valid mask!\n", __func__
);
205 return cpu_to_logical_apicid(0);
216 static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask
*inmask
,
217 const struct cpumask
*andmask
)
219 int apicid
= cpu_to_logical_apicid(0);
220 cpumask_var_t cpumask
;
222 if (!alloc_cpumask_var(&cpumask
, GFP_ATOMIC
))
225 cpumask_and(cpumask
, inmask
, andmask
);
226 cpumask_and(cpumask
, cpumask
, cpu_online_mask
);
227 apicid
= cpu_mask_to_apicid(cpumask
);
229 free_cpumask_var(cpumask
);
233 static inline u32
phys_pkg_id(u32 cpuid_apic
, int index_msb
)
235 return cpuid_apic
>> index_msb
;
238 #endif /* __ASM_ES7000_APIC_H */