2 * APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs.
4 * Drives the local APIC in "clustered mode".
6 #include <linux/threads.h>
7 #include <linux/cpumask.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/dmi.h>
11 #include <linux/smp.h>
13 #include <asm/apicdef.h>
14 #include <asm/fixmap.h>
15 #include <asm/mpspec.h>
19 static unsigned bigsmp_get_apic_id(unsigned long x
)
21 return (x
>> 24) & 0xFF;
24 static int bigsmp_apic_id_registered(void)
29 static const struct cpumask
*bigsmp_target_cpus(void)
32 return cpu_online_mask
;
38 static unsigned long bigsmp_check_apicid_used(physid_mask_t bitmap
, int apicid
)
43 static unsigned long bigsmp_check_apicid_present(int bit
)
48 static inline unsigned long calculate_ldr(int cpu
)
50 unsigned long val
, id
;
52 val
= apic_read(APIC_LDR
) & ~APIC_LDR_MASK
;
53 id
= per_cpu(x86_bios_cpu_apicid
, cpu
);
54 val
|= SET_APIC_LOGICAL_ID(id
);
60 * Set up the logical destination ID.
62 * Intel recommends to set DFR, LDR and TPR before enabling
63 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
64 * document number 292116). So here it goes...
66 static void bigsmp_init_apic_ldr(void)
69 int cpu
= smp_processor_id();
71 apic_write(APIC_DFR
, APIC_DFR_FLAT
);
72 val
= calculate_ldr(cpu
);
73 apic_write(APIC_LDR
, val
);
76 static void bigsmp_setup_apic_routing(void)
79 "Enabling APIC mode: Physflat. Using %d I/O APICs\n",
83 static int bigsmp_apicid_to_node(int logical_apicid
)
85 return apicid_2_node
[hard_smp_processor_id()];
88 static int bigsmp_cpu_present_to_apicid(int mps_cpu
)
90 if (mps_cpu
< nr_cpu_ids
)
91 return (int) per_cpu(x86_bios_cpu_apicid
, mps_cpu
);
96 static physid_mask_t
bigsmp_apicid_to_cpu_present(int phys_apicid
)
98 return physid_mask_of_physid(phys_apicid
);
101 /* Mapping from cpu number to logical apicid */
102 static inline int bigsmp_cpu_to_logical_apicid(int cpu
)
104 if (cpu
>= nr_cpu_ids
)
106 return cpu_physical_id(cpu
);
109 static physid_mask_t
bigsmp_ioapic_phys_id_map(physid_mask_t phys_map
)
111 /* For clustered we don't have a good way to do this yet - hack */
112 return physids_promote(0xFFL
);
115 static int bigsmp_check_phys_apicid_present(int phys_apicid
)
120 /* As we are using single CPU as destination, pick only one CPU here */
121 static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask
*cpumask
)
123 return bigsmp_cpu_to_logical_apicid(cpumask_first(cpumask
));
126 static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask
*cpumask
,
127 const struct cpumask
*andmask
)
132 * We're using fixed IRQ delivery, can only return one phys APIC ID.
133 * May as well be the first.
135 for_each_cpu_and(cpu
, cpumask
, andmask
) {
136 if (cpumask_test_cpu(cpu
, cpu_online_mask
))
139 if (cpu
< nr_cpu_ids
)
140 return bigsmp_cpu_to_logical_apicid(cpu
);
145 static int bigsmp_phys_pkg_id(int cpuid_apic
, int index_msb
)
147 return cpuid_apic
>> index_msb
;
150 static inline void bigsmp_send_IPI_mask(const struct cpumask
*mask
, int vector
)
152 default_send_IPI_mask_sequence_phys(mask
, vector
);
155 static void bigsmp_send_IPI_allbutself(int vector
)
157 default_send_IPI_mask_allbutself_phys(cpu_online_mask
, vector
);
160 static void bigsmp_send_IPI_all(int vector
)
162 bigsmp_send_IPI_mask(cpu_online_mask
, vector
);
165 static int dmi_bigsmp
; /* can be set by dmi scanners */
167 static int hp_ht_bigsmp(const struct dmi_system_id
*d
)
169 printk(KERN_NOTICE
"%s detected: force use of apic=bigsmp\n", d
->ident
);
176 static const struct dmi_system_id bigsmp_dmi_table
[] = {
177 { hp_ht_bigsmp
, "HP ProLiant DL760 G2",
178 { DMI_MATCH(DMI_BIOS_VENDOR
, "HP"),
179 DMI_MATCH(DMI_BIOS_VERSION
, "P44-"),
183 { hp_ht_bigsmp
, "HP ProLiant DL740",
184 { DMI_MATCH(DMI_BIOS_VENDOR
, "HP"),
185 DMI_MATCH(DMI_BIOS_VERSION
, "P47-"),
188 { } /* NULL entry stops DMI scanning */
191 static void bigsmp_vector_allocation_domain(int cpu
, struct cpumask
*retmask
)
193 cpumask_clear(retmask
);
194 cpumask_set_cpu(cpu
, retmask
);
197 static int probe_bigsmp(void)
202 dmi_check_system(bigsmp_dmi_table
);
207 struct apic apic_bigsmp
= {
210 .probe
= probe_bigsmp
,
211 .acpi_madt_oem_check
= NULL
,
212 .apic_id_registered
= bigsmp_apic_id_registered
,
214 .irq_delivery_mode
= dest_Fixed
,
215 /* phys delivery to target CPU: */
218 .target_cpus
= bigsmp_target_cpus
,
221 .check_apicid_used
= bigsmp_check_apicid_used
,
222 .check_apicid_present
= bigsmp_check_apicid_present
,
224 .vector_allocation_domain
= bigsmp_vector_allocation_domain
,
225 .init_apic_ldr
= bigsmp_init_apic_ldr
,
227 .ioapic_phys_id_map
= bigsmp_ioapic_phys_id_map
,
228 .setup_apic_routing
= bigsmp_setup_apic_routing
,
229 .multi_timer_check
= NULL
,
230 .apicid_to_node
= bigsmp_apicid_to_node
,
231 .cpu_to_logical_apicid
= bigsmp_cpu_to_logical_apicid
,
232 .cpu_present_to_apicid
= bigsmp_cpu_present_to_apicid
,
233 .apicid_to_cpu_present
= bigsmp_apicid_to_cpu_present
,
234 .setup_portio_remap
= NULL
,
235 .check_phys_apicid_present
= bigsmp_check_phys_apicid_present
,
236 .enable_apic_mode
= NULL
,
237 .phys_pkg_id
= bigsmp_phys_pkg_id
,
238 .mps_oem_check
= NULL
,
240 .get_apic_id
= bigsmp_get_apic_id
,
242 .apic_id_mask
= 0xFF << 24,
244 .cpu_mask_to_apicid
= bigsmp_cpu_mask_to_apicid
,
245 .cpu_mask_to_apicid_and
= bigsmp_cpu_mask_to_apicid_and
,
247 .send_IPI_mask
= bigsmp_send_IPI_mask
,
248 .send_IPI_mask_allbutself
= NULL
,
249 .send_IPI_allbutself
= bigsmp_send_IPI_allbutself
,
250 .send_IPI_all
= bigsmp_send_IPI_all
,
251 .send_IPI_self
= default_send_IPI_self
,
253 .trampoline_phys_low
= DEFAULT_TRAMPOLINE_PHYS_LOW
,
254 .trampoline_phys_high
= DEFAULT_TRAMPOLINE_PHYS_HIGH
,
256 .wait_for_init_deassert
= default_wait_for_init_deassert
,
258 .smp_callin_clear_local_apic
= NULL
,
259 .inquire_remote_apic
= default_inquire_remote_apic
,
261 .read
= native_apic_mem_read
,
262 .write
= native_apic_mem_write
,
263 .icr_read
= native_apic_icr_read
,
264 .icr_write
= native_apic_icr_write
,
265 .wait_icr_idle
= native_apic_wait_icr_idle
,
266 .safe_wait_icr_idle
= native_safe_apic_wait_icr_idle
,