2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * SGI UV APIC functions (note: not an Intel compatible APIC)
8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
11 #include <linux/kernel.h>
12 #include <linux/threads.h>
13 #include <linux/cpu.h>
14 #include <linux/cpumask.h>
15 #include <linux/string.h>
16 #include <linux/ctype.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
19 #include <linux/module.h>
20 #include <linux/hardirq.h>
21 #include <linux/timer.h>
22 #include <linux/proc_fs.h>
23 #include <asm/current.h>
26 #include <asm/genapic.h>
27 #include <asm/pgtable.h>
28 #include <asm/uv/uv_mmrs.h>
29 #include <asm/uv/uv_hub.h>
30 #include <asm/uv/bios.h>
32 DEFINE_PER_CPU(int, x2apic_extra_bits
);
34 static enum uv_system_type uv_system_type
;
36 static int uv_acpi_madt_oem_check(char *oem_id
, char *oem_table_id
)
38 if (!strcmp(oem_id
, "SGI")) {
39 if (!strcmp(oem_table_id
, "UVL"))
40 uv_system_type
= UV_LEGACY_APIC
;
41 else if (!strcmp(oem_table_id
, "UVX"))
42 uv_system_type
= UV_X2APIC
;
43 else if (!strcmp(oem_table_id
, "UVH")) {
44 uv_system_type
= UV_NON_UNIQUE_APIC
;
51 enum uv_system_type
get_uv_system_type(void)
53 return uv_system_type
;
56 int is_uv_system(void)
58 return uv_system_type
!= UV_NONE
;
60 EXPORT_SYMBOL_GPL(is_uv_system
);
62 DEFINE_PER_CPU(struct uv_hub_info_s
, __uv_hub_info
);
63 EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info
);
65 struct uv_blade_info
*uv_blade_info
;
66 EXPORT_SYMBOL_GPL(uv_blade_info
);
68 short *uv_node_to_blade
;
69 EXPORT_SYMBOL_GPL(uv_node_to_blade
);
71 short *uv_cpu_to_blade
;
72 EXPORT_SYMBOL_GPL(uv_cpu_to_blade
);
74 short uv_possible_blades
;
75 EXPORT_SYMBOL_GPL(uv_possible_blades
);
77 unsigned long sn_rtc_cycles_per_second
;
78 EXPORT_SYMBOL(sn_rtc_cycles_per_second
);
80 /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
82 static const struct cpumask
*uv_target_cpus(void)
87 static void uv_vector_allocation_domain(int cpu
, struct cpumask
*retmask
)
89 cpumask_clear(retmask
);
90 cpumask_set_cpu(cpu
, retmask
);
93 int uv_wakeup_secondary(int phys_apicid
, unsigned int start_rip
)
98 pnode
= uv_apicid_to_pnode(phys_apicid
);
99 val
= (1UL << UVH_IPI_INT_SEND_SHFT
) |
100 (phys_apicid
<< UVH_IPI_INT_APIC_ID_SHFT
) |
101 (((long)start_rip
<< UVH_IPI_INT_VECTOR_SHFT
) >> 12) |
103 uv_write_global_mmr64(pnode
, UVH_IPI_INT
, val
);
106 val
= (1UL << UVH_IPI_INT_SEND_SHFT
) |
107 (phys_apicid
<< UVH_IPI_INT_APIC_ID_SHFT
) |
108 (((long)start_rip
<< UVH_IPI_INT_VECTOR_SHFT
) >> 12) |
110 uv_write_global_mmr64(pnode
, UVH_IPI_INT
, val
);
114 static void uv_send_IPI_one(int cpu
, int vector
)
116 unsigned long val
, apicid
, lapicid
;
119 apicid
= per_cpu(x86_cpu_to_apicid
, cpu
);
120 lapicid
= apicid
& 0x3f; /* ZZZ macro needed */
121 pnode
= uv_apicid_to_pnode(apicid
);
123 (1UL << UVH_IPI_INT_SEND_SHFT
) | (lapicid
<<
124 UVH_IPI_INT_APIC_ID_SHFT
) |
125 (vector
<< UVH_IPI_INT_VECTOR_SHFT
);
126 uv_write_global_mmr64(pnode
, UVH_IPI_INT
, val
);
129 static void uv_send_IPI_mask(const struct cpumask
*mask
, int vector
)
133 for_each_cpu(cpu
, mask
)
134 uv_send_IPI_one(cpu
, vector
);
137 static void uv_send_IPI_mask_allbutself(const struct cpumask
*mask
, int vector
)
140 unsigned int this_cpu
= smp_processor_id();
142 for_each_cpu(cpu
, mask
)
144 uv_send_IPI_one(cpu
, vector
);
147 static void uv_send_IPI_allbutself(int vector
)
150 unsigned int this_cpu
= smp_processor_id();
152 for_each_online_cpu(cpu
)
154 uv_send_IPI_one(cpu
, vector
);
157 static void uv_send_IPI_all(int vector
)
159 uv_send_IPI_mask(cpu_online_mask
, vector
);
162 static int uv_apic_id_registered(void)
167 static void uv_init_apic_ldr(void)
171 static unsigned int uv_cpu_mask_to_apicid(const struct cpumask
*cpumask
)
176 * We're using fixed IRQ delivery, can only return one phys APIC ID.
177 * May as well be the first.
179 cpu
= cpumask_first(cpumask
);
180 if ((unsigned)cpu
< nr_cpu_ids
)
181 return per_cpu(x86_cpu_to_apicid
, cpu
);
186 static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask
*cpumask
,
187 const struct cpumask
*andmask
)
192 * We're using fixed IRQ delivery, can only return one phys APIC ID.
193 * May as well be the first.
195 for_each_cpu_and(cpu
, cpumask
, andmask
)
196 if (cpumask_test_cpu(cpu
, cpu_online_mask
))
198 if (cpu
< nr_cpu_ids
)
199 return per_cpu(x86_cpu_to_apicid
, cpu
);
203 static unsigned int get_apic_id(unsigned long x
)
207 WARN_ON(preemptible() && num_online_cpus() > 1);
208 id
= x
| __get_cpu_var(x2apic_extra_bits
);
213 static unsigned long set_apic_id(unsigned int id
)
217 /* maskout x2apic_extra_bits ? */
222 static unsigned int uv_read_apic_id(void)
225 return get_apic_id(apic_read(APIC_ID
));
228 static unsigned int phys_pkg_id(int index_msb
)
230 return uv_read_apic_id() >> index_msb
;
233 static void uv_send_IPI_self(int vector
)
235 apic_write(APIC_SELF_IPI
, vector
);
238 struct genapic apic_x2apic_uv_x
= {
239 .name
= "UV large system",
240 .acpi_madt_oem_check
= uv_acpi_madt_oem_check
,
241 .int_delivery_mode
= dest_Fixed
,
242 .int_dest_mode
= (APIC_DEST_PHYSICAL
!= 0),
243 .target_cpus
= uv_target_cpus
,
244 .vector_allocation_domain
= uv_vector_allocation_domain
,
245 .apic_id_registered
= uv_apic_id_registered
,
246 .init_apic_ldr
= uv_init_apic_ldr
,
247 .send_IPI_all
= uv_send_IPI_all
,
248 .send_IPI_allbutself
= uv_send_IPI_allbutself
,
249 .send_IPI_mask
= uv_send_IPI_mask
,
250 .send_IPI_mask_allbutself
= uv_send_IPI_mask_allbutself
,
251 .send_IPI_self
= uv_send_IPI_self
,
252 .cpu_mask_to_apicid
= uv_cpu_mask_to_apicid
,
253 .cpu_mask_to_apicid_and
= uv_cpu_mask_to_apicid_and
,
254 .phys_pkg_id
= phys_pkg_id
,
255 .get_apic_id
= get_apic_id
,
256 .set_apic_id
= set_apic_id
,
257 .apic_id_mask
= (0xFFFFFFFFu
),
260 static __cpuinit
void set_x2apic_extra_bits(int pnode
)
262 __get_cpu_var(x2apic_extra_bits
) = (pnode
<< 6);
266 * Called on boot cpu.
268 static __init
int boot_pnode_to_blade(int pnode
)
272 for (blade
= 0; blade
< uv_num_possible_blades(); blade
++)
273 if (pnode
== uv_blade_info
[blade
].pnode
)
279 unsigned long redirect
;
283 #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
285 static __initdata
struct redir_addr redir_addrs
[] = {
286 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR
, UVH_SI_ALIAS0_OVERLAY_CONFIG
},
287 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR
, UVH_SI_ALIAS1_OVERLAY_CONFIG
},
288 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR
, UVH_SI_ALIAS2_OVERLAY_CONFIG
},
291 static __init
void get_lowmem_redirect(unsigned long *base
, unsigned long *size
)
293 union uvh_si_alias0_overlay_config_u alias
;
294 union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect
;
297 for (i
= 0; i
< ARRAY_SIZE(redir_addrs
); i
++) {
298 alias
.v
= uv_read_local_mmr(redir_addrs
[i
].alias
);
299 if (alias
.s
.base
== 0) {
300 *size
= (1UL << alias
.s
.m_alias
);
301 redirect
.v
= uv_read_local_mmr(redir_addrs
[i
].redirect
);
302 *base
= (unsigned long)redirect
.s
.dest_base
<< DEST_SHIFT
;
309 static __init
void map_low_mmrs(void)
311 init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE
, UV_GLOBAL_MMR32_SIZE
);
312 init_extra_mapping_uc(UV_LOCAL_MMR_BASE
, UV_LOCAL_MMR_SIZE
);
315 enum map_type
{map_wb
, map_uc
};
317 static __init
void map_high(char *id
, unsigned long base
, int shift
,
318 int max_pnode
, enum map_type map_type
)
320 unsigned long bytes
, paddr
;
322 paddr
= base
<< shift
;
323 bytes
= (1UL << shift
) * (max_pnode
+ 1);
324 printk(KERN_INFO
"UV: Map %s_HI 0x%lx - 0x%lx\n", id
, paddr
,
326 if (map_type
== map_uc
)
327 init_extra_mapping_uc(paddr
, bytes
);
329 init_extra_mapping_wb(paddr
, bytes
);
332 static __init
void map_gru_high(int max_pnode
)
334 union uvh_rh_gam_gru_overlay_config_mmr_u gru
;
335 int shift
= UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT
;
337 gru
.v
= uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR
);
339 map_high("GRU", gru
.s
.base
, shift
, max_pnode
, map_wb
);
342 static __init
void map_config_high(int max_pnode
)
344 union uvh_rh_gam_cfg_overlay_config_mmr_u cfg
;
345 int shift
= UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT
;
347 cfg
.v
= uv_read_local_mmr(UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR
);
349 map_high("CONFIG", cfg
.s
.base
, shift
, max_pnode
, map_uc
);
352 static __init
void map_mmr_high(int max_pnode
)
354 union uvh_rh_gam_mmr_overlay_config_mmr_u mmr
;
355 int shift
= UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT
;
357 mmr
.v
= uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR
);
359 map_high("MMR", mmr
.s
.base
, shift
, max_pnode
, map_uc
);
362 static __init
void map_mmioh_high(int max_pnode
)
364 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh
;
365 int shift
= UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT
;
367 mmioh
.v
= uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR
);
369 map_high("MMIOH", mmioh
.s
.base
, shift
, max_pnode
, map_uc
);
372 static __init
void uv_rtc_init(void)
377 status
= uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK
,
379 if (status
!= BIOS_STATUS_SUCCESS
|| ticks_per_sec
< 100000) {
381 "unable to determine platform RTC clock frequency, "
383 /* BIOS gives wrong value for clock freq. so guess */
384 sn_rtc_cycles_per_second
= 1000000000000UL / 30000UL;
386 sn_rtc_cycles_per_second
= ticks_per_sec
;
390 * percpu heartbeat timer
392 static void uv_heartbeat(unsigned long ignored
)
394 struct timer_list
*timer
= &uv_hub_info
->scir
.timer
;
395 unsigned char bits
= uv_hub_info
->scir
.state
;
397 /* flip heartbeat bit */
398 bits
^= SCIR_CPU_HEARTBEAT
;
400 /* is this cpu idle? */
401 if (idle_cpu(raw_smp_processor_id()))
402 bits
&= ~SCIR_CPU_ACTIVITY
;
404 bits
|= SCIR_CPU_ACTIVITY
;
406 /* update system controller interface reg */
407 uv_set_scir_bits(bits
);
409 /* enable next timer period */
410 mod_timer(timer
, jiffies
+ SCIR_CPU_HB_INTERVAL
);
413 static void __cpuinit
uv_heartbeat_enable(int cpu
)
415 if (!uv_cpu_hub_info(cpu
)->scir
.enabled
) {
416 struct timer_list
*timer
= &uv_cpu_hub_info(cpu
)->scir
.timer
;
418 uv_set_cpu_scir_bits(cpu
, SCIR_CPU_HEARTBEAT
|SCIR_CPU_ACTIVITY
);
419 setup_timer(timer
, uv_heartbeat
, cpu
);
420 timer
->expires
= jiffies
+ SCIR_CPU_HB_INTERVAL
;
421 add_timer_on(timer
, cpu
);
422 uv_cpu_hub_info(cpu
)->scir
.enabled
= 1;
426 if (!uv_cpu_hub_info(0)->scir
.enabled
)
427 uv_heartbeat_enable(0);
430 #ifdef CONFIG_HOTPLUG_CPU
431 static void __cpuinit
uv_heartbeat_disable(int cpu
)
433 if (uv_cpu_hub_info(cpu
)->scir
.enabled
) {
434 uv_cpu_hub_info(cpu
)->scir
.enabled
= 0;
435 del_timer(&uv_cpu_hub_info(cpu
)->scir
.timer
);
437 uv_set_cpu_scir_bits(cpu
, 0xff);
441 * cpu hotplug notifier
443 static __cpuinit
int uv_scir_cpu_notify(struct notifier_block
*self
,
444 unsigned long action
, void *hcpu
)
446 long cpu
= (long)hcpu
;
450 uv_heartbeat_enable(cpu
);
452 case CPU_DOWN_PREPARE
:
453 uv_heartbeat_disable(cpu
);
461 static __init
void uv_scir_register_cpu_notifier(void)
463 hotcpu_notifier(uv_scir_cpu_notify
, 0);
466 #else /* !CONFIG_HOTPLUG_CPU */
468 static __init
void uv_scir_register_cpu_notifier(void)
472 static __init
int uv_init_heartbeat(void)
477 for_each_online_cpu(cpu
)
478 uv_heartbeat_enable(cpu
);
482 late_initcall(uv_init_heartbeat
);
484 #endif /* !CONFIG_HOTPLUG_CPU */
487 * Called on each cpu to initialize the per_cpu UV data area.
488 * ZZZ hotplug not supported yet
490 void __cpuinit
uv_cpu_init(void)
492 /* CPU 0 initilization will be done via uv_system_init. */
496 uv_blade_info
[uv_numa_blade_id()].nr_online_cpus
++;
498 if (get_uv_system_type() == UV_NON_UNIQUE_APIC
)
499 set_x2apic_extra_bits(uv_hub_info
->pnode
);
503 void __init
uv_system_init(void)
505 union uvh_si_addr_map_config_u m_n_config
;
506 union uvh_node_id_u node_id
;
507 unsigned long gnode_upper
, lowmem_redir_base
, lowmem_redir_size
;
508 int bytes
, nid
, cpu
, lcpu
, pnode
, blade
, i
, j
, m_val
, n_val
;
510 unsigned long mmr_base
, present
;
514 m_n_config
.v
= uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG
);
515 m_val
= m_n_config
.s
.m_skt
;
516 n_val
= m_n_config
.s
.n_skt
;
518 uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR
) &
520 printk(KERN_DEBUG
"UV: global MMR base 0x%lx\n", mmr_base
);
522 for(i
= 0; i
< UVH_NODE_PRESENT_TABLE_DEPTH
; i
++)
523 uv_possible_blades
+=
524 hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE
+ i
* 8));
525 printk(KERN_DEBUG
"UV: Found %d blades\n", uv_num_possible_blades());
527 bytes
= sizeof(struct uv_blade_info
) * uv_num_possible_blades();
528 uv_blade_info
= kmalloc(bytes
, GFP_KERNEL
);
530 get_lowmem_redirect(&lowmem_redir_base
, &lowmem_redir_size
);
532 bytes
= sizeof(uv_node_to_blade
[0]) * num_possible_nodes();
533 uv_node_to_blade
= kmalloc(bytes
, GFP_KERNEL
);
534 memset(uv_node_to_blade
, 255, bytes
);
536 bytes
= sizeof(uv_cpu_to_blade
[0]) * num_possible_cpus();
537 uv_cpu_to_blade
= kmalloc(bytes
, GFP_KERNEL
);
538 memset(uv_cpu_to_blade
, 255, bytes
);
541 for (i
= 0; i
< UVH_NODE_PRESENT_TABLE_DEPTH
; i
++) {
542 present
= uv_read_local_mmr(UVH_NODE_PRESENT_TABLE
+ i
* 8);
543 for (j
= 0; j
< 64; j
++) {
544 if (!test_bit(j
, &present
))
546 uv_blade_info
[blade
].pnode
= (i
* 64 + j
);
547 uv_blade_info
[blade
].nr_possible_cpus
= 0;
548 uv_blade_info
[blade
].nr_online_cpus
= 0;
553 node_id
.v
= uv_read_local_mmr(UVH_NODE_ID
);
554 gnode_upper
= (((unsigned long)node_id
.s
.node_id
) &
555 ~((1 << n_val
) - 1)) << m_val
;
558 uv_bios_get_sn_info(0, &uv_type
, &sn_partition_id
,
559 &sn_coherency_id
, &sn_region_size
);
562 for_each_present_cpu(cpu
) {
563 nid
= cpu_to_node(cpu
);
564 pnode
= uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid
, cpu
));
565 blade
= boot_pnode_to_blade(pnode
);
566 lcpu
= uv_blade_info
[blade
].nr_possible_cpus
;
567 uv_blade_info
[blade
].nr_possible_cpus
++;
569 uv_cpu_hub_info(cpu
)->lowmem_remap_base
= lowmem_redir_base
;
570 uv_cpu_hub_info(cpu
)->lowmem_remap_top
= lowmem_redir_size
;
571 uv_cpu_hub_info(cpu
)->m_val
= m_val
;
572 uv_cpu_hub_info(cpu
)->n_val
= m_val
;
573 uv_cpu_hub_info(cpu
)->numa_blade_id
= blade
;
574 uv_cpu_hub_info(cpu
)->blade_processor_id
= lcpu
;
575 uv_cpu_hub_info(cpu
)->pnode
= pnode
;
576 uv_cpu_hub_info(cpu
)->pnode_mask
= (1 << n_val
) - 1;
577 uv_cpu_hub_info(cpu
)->gpa_mask
= (1 << (m_val
+ n_val
)) - 1;
578 uv_cpu_hub_info(cpu
)->gnode_upper
= gnode_upper
;
579 uv_cpu_hub_info(cpu
)->global_mmr_base
= mmr_base
;
580 uv_cpu_hub_info(cpu
)->coherency_domain_number
= sn_coherency_id
;
581 uv_cpu_hub_info(cpu
)->scir
.offset
= SCIR_LOCAL_MMR_BASE
+ lcpu
;
582 uv_node_to_blade
[nid
] = blade
;
583 uv_cpu_to_blade
[cpu
] = blade
;
584 max_pnode
= max(pnode
, max_pnode
);
586 printk(KERN_DEBUG
"UV: cpu %d, apicid 0x%x, pnode %d, nid %d, "
587 "lcpu %d, blade %d\n",
588 cpu
, per_cpu(x86_cpu_to_apicid
, cpu
), pnode
, nid
,
592 map_gru_high(max_pnode
);
593 map_mmr_high(max_pnode
);
594 map_config_high(max_pnode
);
595 map_mmioh_high(max_pnode
);
598 uv_scir_register_cpu_notifier();
599 proc_mkdir("sgi_uv", NULL
);