1 #include <linux/threads.h>
2 #include <linux/cpumask.h>
3 #include <linux/string.h>
4 #include <linux/kernel.h>
5 #include <linux/ctype.h>
6 #include <linux/init.h>
7 #include <linux/dmar.h>
14 static DEFINE_PER_CPU(u32
, x86_cpu_to_logical_apicid
);
15 static DEFINE_PER_CPU(cpumask_var_t
, cpus_in_cluster
);
16 static DEFINE_PER_CPU(cpumask_var_t
, ipi_mask
);
18 static int x2apic_acpi_madt_oem_check(char *oem_id
, char *oem_table_id
)
20 return x2apic_enabled();
24 * need to use more than cpu 0, because we need more vectors when
27 static const struct cpumask
*x2apic_target_cpus(void)
29 return cpu_online_mask
;
33 * for now each logical cpu is in its own vector allocation domain.
35 static void x2apic_vector_allocation_domain(int cpu
, struct cpumask
*retmask
)
37 cpumask_clear(retmask
);
38 cpumask_set_cpu(cpu
, retmask
);
42 __x2apic_send_IPI_dest(unsigned int apicid
, int vector
, unsigned int dest
)
46 cfg
= __prepare_ICR(0, vector
, dest
);
51 native_x2apic_icr_write(cfg
, apicid
);
54 static inline u32
x2apic_cluster(int cpu
)
56 return per_cpu(x86_cpu_to_logical_apicid
, cpu
) >> 16;
60 __x2apic_send_IPI_mask(const struct cpumask
*mask
, int vector
, int apic_dest
)
62 struct cpumask
*cpus_in_cluster_ptr
;
63 struct cpumask
*ipi_mask_ptr
;
64 unsigned int cpu
, this_cpu
;
70 local_irq_save(flags
);
72 this_cpu
= smp_processor_id();
75 * We are to modify mask, so we need an own copy
76 * and be sure it's manipulated with irq off.
78 ipi_mask_ptr
= __raw_get_cpu_var(ipi_mask
);
79 cpumask_copy(ipi_mask_ptr
, mask
);
82 * The idea is to send one IPI per cluster.
84 for_each_cpu(cpu
, ipi_mask_ptr
) {
87 cpus_in_cluster_ptr
= per_cpu(cpus_in_cluster
, cpu
);
90 /* Collect cpus in cluster. */
91 for_each_cpu_and(i
, ipi_mask_ptr
, cpus_in_cluster_ptr
) {
92 if (apic_dest
== APIC_DEST_ALLINC
|| i
!= this_cpu
)
93 dest
|= per_cpu(x86_cpu_to_logical_apicid
, i
);
99 __x2apic_send_IPI_dest(dest
, vector
, apic
->dest_logical
);
101 * Cluster sibling cpus should be discared now so
102 * we would not send IPI them second time.
104 cpumask_andnot(ipi_mask_ptr
, ipi_mask_ptr
, cpus_in_cluster_ptr
);
107 local_irq_restore(flags
);
110 static void x2apic_send_IPI_mask(const struct cpumask
*mask
, int vector
)
112 __x2apic_send_IPI_mask(mask
, vector
, APIC_DEST_ALLINC
);
116 x2apic_send_IPI_mask_allbutself(const struct cpumask
*mask
, int vector
)
118 __x2apic_send_IPI_mask(mask
, vector
, APIC_DEST_ALLBUT
);
121 static void x2apic_send_IPI_allbutself(int vector
)
123 __x2apic_send_IPI_mask(cpu_online_mask
, vector
, APIC_DEST_ALLBUT
);
126 static void x2apic_send_IPI_all(int vector
)
128 __x2apic_send_IPI_mask(cpu_online_mask
, vector
, APIC_DEST_ALLINC
);
131 static int x2apic_apic_id_registered(void)
136 static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask
*cpumask
)
139 * We're using fixed IRQ delivery, can only return one logical APIC ID.
140 * May as well be the first.
142 int cpu
= cpumask_first(cpumask
);
144 if ((unsigned)cpu
< nr_cpu_ids
)
145 return per_cpu(x86_cpu_to_logical_apicid
, cpu
);
151 x2apic_cpu_mask_to_apicid_and(const struct cpumask
*cpumask
,
152 const struct cpumask
*andmask
)
157 * We're using fixed IRQ delivery, can only return one logical APIC ID.
158 * May as well be the first.
160 for_each_cpu_and(cpu
, cpumask
, andmask
) {
161 if (cpumask_test_cpu(cpu
, cpu_online_mask
))
165 return per_cpu(x86_cpu_to_logical_apicid
, cpu
);
168 static unsigned int x2apic_cluster_phys_get_apic_id(unsigned long x
)
176 static unsigned long set_apic_id(unsigned int id
)
184 static int x2apic_cluster_phys_pkg_id(int initial_apicid
, int index_msb
)
186 return initial_apicid
>> index_msb
;
189 static void x2apic_send_IPI_self(int vector
)
191 apic_write(APIC_SELF_IPI
, vector
);
194 static void init_x2apic_ldr(void)
196 unsigned int this_cpu
= smp_processor_id();
199 per_cpu(x86_cpu_to_logical_apicid
, this_cpu
) = apic_read(APIC_LDR
);
201 __cpu_set(this_cpu
, per_cpu(cpus_in_cluster
, this_cpu
));
202 for_each_online_cpu(cpu
) {
203 if (x2apic_cluster(this_cpu
) != x2apic_cluster(cpu
))
205 __cpu_set(this_cpu
, per_cpu(cpus_in_cluster
, cpu
));
206 __cpu_set(cpu
, per_cpu(cpus_in_cluster
, this_cpu
));
211 * At CPU state changes, update the x2apic cluster sibling info.
214 update_clusterinfo(struct notifier_block
*nfb
, unsigned long action
, void *hcpu
)
216 unsigned int this_cpu
= (unsigned long)hcpu
;
222 if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster
, this_cpu
),
225 } else if (!zalloc_cpumask_var(&per_cpu(ipi_mask
, this_cpu
),
227 free_cpumask_var(per_cpu(cpus_in_cluster
, this_cpu
));
231 case CPU_UP_CANCELED
:
232 case CPU_UP_CANCELED_FROZEN
:
234 for_each_online_cpu(cpu
) {
235 if (x2apic_cluster(this_cpu
) != x2apic_cluster(cpu
))
237 __cpu_clear(this_cpu
, per_cpu(cpus_in_cluster
, cpu
));
238 __cpu_clear(cpu
, per_cpu(cpus_in_cluster
, this_cpu
));
240 free_cpumask_var(per_cpu(cpus_in_cluster
, this_cpu
));
241 free_cpumask_var(per_cpu(ipi_mask
, this_cpu
));
245 return notifier_from_errno(err
);
248 static struct notifier_block __refdata x2apic_cpu_notifier
= {
249 .notifier_call
= update_clusterinfo
,
252 static int x2apic_init_cpu_notifier(void)
254 int cpu
= smp_processor_id();
256 zalloc_cpumask_var(&per_cpu(cpus_in_cluster
, cpu
), GFP_KERNEL
);
257 zalloc_cpumask_var(&per_cpu(ipi_mask
, cpu
), GFP_KERNEL
);
259 BUG_ON(!per_cpu(cpus_in_cluster
, cpu
) || !per_cpu(ipi_mask
, cpu
));
261 __cpu_set(cpu
, per_cpu(cpus_in_cluster
, cpu
));
262 register_hotcpu_notifier(&x2apic_cpu_notifier
);
266 static int x2apic_cluster_probe(void)
269 return x2apic_init_cpu_notifier();
274 struct apic apic_x2apic_cluster
= {
276 .name
= "cluster x2apic",
277 .probe
= x2apic_cluster_probe
,
278 .acpi_madt_oem_check
= x2apic_acpi_madt_oem_check
,
279 .apic_id_registered
= x2apic_apic_id_registered
,
281 .irq_delivery_mode
= dest_LowestPrio
,
282 .irq_dest_mode
= 1, /* logical */
284 .target_cpus
= x2apic_target_cpus
,
286 .dest_logical
= APIC_DEST_LOGICAL
,
287 .check_apicid_used
= NULL
,
288 .check_apicid_present
= NULL
,
290 .vector_allocation_domain
= x2apic_vector_allocation_domain
,
291 .init_apic_ldr
= init_x2apic_ldr
,
293 .ioapic_phys_id_map
= NULL
,
294 .setup_apic_routing
= NULL
,
295 .multi_timer_check
= NULL
,
296 .cpu_present_to_apicid
= default_cpu_present_to_apicid
,
297 .apicid_to_cpu_present
= NULL
,
298 .setup_portio_remap
= NULL
,
299 .check_phys_apicid_present
= default_check_phys_apicid_present
,
300 .enable_apic_mode
= NULL
,
301 .phys_pkg_id
= x2apic_cluster_phys_pkg_id
,
302 .mps_oem_check
= NULL
,
304 .get_apic_id
= x2apic_cluster_phys_get_apic_id
,
305 .set_apic_id
= set_apic_id
,
306 .apic_id_mask
= 0xFFFFFFFFu
,
308 .cpu_mask_to_apicid
= x2apic_cpu_mask_to_apicid
,
309 .cpu_mask_to_apicid_and
= x2apic_cpu_mask_to_apicid_and
,
311 .send_IPI_mask
= x2apic_send_IPI_mask
,
312 .send_IPI_mask_allbutself
= x2apic_send_IPI_mask_allbutself
,
313 .send_IPI_allbutself
= x2apic_send_IPI_allbutself
,
314 .send_IPI_all
= x2apic_send_IPI_all
,
315 .send_IPI_self
= x2apic_send_IPI_self
,
317 .trampoline_phys_low
= DEFAULT_TRAMPOLINE_PHYS_LOW
,
318 .trampoline_phys_high
= DEFAULT_TRAMPOLINE_PHYS_HIGH
,
319 .wait_for_init_deassert
= NULL
,
320 .smp_callin_clear_local_apic
= NULL
,
321 .inquire_remote_apic
= NULL
,
323 .read
= native_apic_msr_read
,
324 .write
= native_apic_msr_write
,
325 .icr_read
= native_x2apic_icr_read
,
326 .icr_write
= native_x2apic_icr_write
,
327 .wait_icr_idle
= native_x2apic_wait_icr_idle
,
328 .safe_wait_icr_idle
= native_safe_x2apic_wait_icr_idle
,