1 #include <linux/threads.h>
2 #include <linux/cpumask.h>
3 #include <linux/string.h>
4 #include <linux/kernel.h>
5 #include <linux/ctype.h>
6 #include <linux/init.h>
7 #include <linux/dmar.h>
11 #include <asm/genapic.h>
13 static int x2apic_phys
;
15 static int set_x2apic_phys_mode(char *arg
)
20 early_param("x2apic_phys", set_x2apic_phys_mode
);
22 static int x2apic_acpi_madt_oem_check(char *oem_id
, char *oem_table_id
)
24 if (cpu_has_x2apic
&& x2apic_phys
)
30 /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
32 static const struct cpumask
*x2apic_target_cpus(void)
37 static void x2apic_vector_allocation_domain(int cpu
, struct cpumask
*retmask
)
39 cpumask_clear(retmask
);
40 cpumask_set_cpu(cpu
, retmask
);
43 static void __x2apic_send_IPI_dest(unsigned int apicid
, int vector
,
48 cfg
= __prepare_ICR(0, vector
, dest
);
53 x2apic_icr_write(cfg
, apicid
);
56 static void x2apic_send_IPI_mask(const struct cpumask
*mask
, int vector
)
59 unsigned long query_cpu
;
61 local_irq_save(flags
);
62 for_each_cpu(query_cpu
, mask
) {
63 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid
, query_cpu
),
64 vector
, APIC_DEST_PHYSICAL
);
66 local_irq_restore(flags
);
69 static void x2apic_send_IPI_mask_allbutself(const struct cpumask
*mask
,
73 unsigned long query_cpu
;
74 unsigned long this_cpu
= smp_processor_id();
76 local_irq_save(flags
);
77 for_each_cpu(query_cpu
, mask
) {
78 if (query_cpu
!= this_cpu
)
79 __x2apic_send_IPI_dest(
80 per_cpu(x86_cpu_to_apicid
, query_cpu
),
81 vector
, APIC_DEST_PHYSICAL
);
83 local_irq_restore(flags
);
86 static void x2apic_send_IPI_allbutself(int vector
)
89 unsigned long query_cpu
;
90 unsigned long this_cpu
= smp_processor_id();
92 local_irq_save(flags
);
93 for_each_online_cpu(query_cpu
)
94 if (query_cpu
!= this_cpu
)
95 __x2apic_send_IPI_dest(
96 per_cpu(x86_cpu_to_apicid
, query_cpu
),
97 vector
, APIC_DEST_PHYSICAL
);
98 local_irq_restore(flags
);
101 static void x2apic_send_IPI_all(int vector
)
103 x2apic_send_IPI_mask(cpu_online_mask
, vector
);
106 static int x2apic_apic_id_registered(void)
111 static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask
*cpumask
)
116 * We're using fixed IRQ delivery, can only return one phys APIC ID.
117 * May as well be the first.
119 cpu
= cpumask_first(cpumask
);
120 if ((unsigned)cpu
< nr_cpu_ids
)
121 return per_cpu(x86_cpu_to_apicid
, cpu
);
126 static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask
*cpumask
,
127 const struct cpumask
*andmask
)
132 * We're using fixed IRQ delivery, can only return one phys APIC ID.
133 * May as well be the first.
135 for_each_cpu_and(cpu
, cpumask
, andmask
)
136 if (cpumask_test_cpu(cpu
, cpu_online_mask
))
138 if (cpu
< nr_cpu_ids
)
139 return per_cpu(x86_cpu_to_apicid
, cpu
);
143 static unsigned int get_apic_id(unsigned long x
)
151 static unsigned long set_apic_id(unsigned int id
)
159 static unsigned int phys_pkg_id(int index_msb
)
161 return current_cpu_data
.initial_apicid
>> index_msb
;
164 static void x2apic_send_IPI_self(int vector
)
166 apic_write(APIC_SELF_IPI
, vector
);
169 static void init_x2apic_ldr(void)
174 struct genapic apic_x2apic_phys
= {
175 .name
= "physical x2apic",
176 .acpi_madt_oem_check
= x2apic_acpi_madt_oem_check
,
177 .int_delivery_mode
= dest_Fixed
,
178 .int_dest_mode
= (APIC_DEST_PHYSICAL
!= 0),
179 .target_cpus
= x2apic_target_cpus
,
180 .vector_allocation_domain
= x2apic_vector_allocation_domain
,
181 .apic_id_registered
= x2apic_apic_id_registered
,
182 .init_apic_ldr
= init_x2apic_ldr
,
183 .send_IPI_all
= x2apic_send_IPI_all
,
184 .send_IPI_allbutself
= x2apic_send_IPI_allbutself
,
185 .send_IPI_mask
= x2apic_send_IPI_mask
,
186 .send_IPI_mask_allbutself
= x2apic_send_IPI_mask_allbutself
,
187 .send_IPI_self
= x2apic_send_IPI_self
,
188 .cpu_mask_to_apicid
= x2apic_cpu_mask_to_apicid
,
189 .cpu_mask_to_apicid_and
= x2apic_cpu_mask_to_apicid_and
,
190 .phys_pkg_id
= phys_pkg_id
,
191 .get_apic_id
= get_apic_id
,
192 .set_apic_id
= set_apic_id
,
193 .apic_id_mask
= (0xFFFFFFFFu
),