2 * Copyright IBM Corp. 2007
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/device.h>
10 #include <linux/bootmem.h>
11 #include <linux/sched.h>
12 #include <linux/workqueue.h>
13 #include <linux/cpu.h>
14 #include <linux/smp.h>
15 #include <linux/cpuset.h>
16 #include <asm/delay.h>
17 #include <asm/s390_ext.h>
18 #include <asm/sysinfo.h>
23 #define PTF_HORIZONTAL (0UL)
24 #define PTF_VERTICAL (1UL)
25 #define PTF_CHECK (2UL)
28 unsigned char reserved0
[4];
31 unsigned char reserved1
;
32 unsigned short origin
;
33 unsigned long mask
[CPU_BITS
/ BITS_PER_LONG
];
37 unsigned char reserved
[8];
43 struct tl_container container
;
47 unsigned char reserved0
[2];
48 unsigned short length
;
49 unsigned char mag
[NR_MAG
];
50 unsigned char reserved1
;
52 unsigned char reserved2
[4];
53 union tl_entry tle
[0];
57 struct core_info
*next
;
61 static int topology_enabled
;
62 static void topology_work_fn(struct work_struct
*work
);
63 static struct tl_info
*tl_info
;
64 static struct core_info core_info
;
65 static int machine_has_topology
;
66 static int machine_has_topology_irq
;
67 static struct timer_list topology_timer
;
68 static void set_topology_timer(void);
69 static DECLARE_WORK(topology_work
, topology_work_fn
);
70 /* topology_lock protects the core linked list */
71 static DEFINE_SPINLOCK(topology_lock
);
73 cpumask_t cpu_core_map
[NR_CPUS
];
75 cpumask_t
cpu_coregroup_map(unsigned int cpu
)
77 struct core_info
*core
= &core_info
;
82 if (!topology_enabled
|| !machine_has_topology
)
83 return cpu_possible_map
;
84 spin_lock_irqsave(&topology_lock
, flags
);
86 if (cpu_isset(cpu
, core
->mask
)) {
92 spin_unlock_irqrestore(&topology_lock
, flags
);
94 mask
= cpumask_of_cpu(cpu
);
98 static void add_cpus_to_core(struct tl_cpu
*tl_cpu
, struct core_info
*core
)
102 for (cpu
= find_first_bit(&tl_cpu
->mask
[0], CPU_BITS
);
104 cpu
= find_next_bit(&tl_cpu
->mask
[0], CPU_BITS
, cpu
+ 1))
106 unsigned int rcpu
, lcpu
;
108 rcpu
= CPU_BITS
- 1 - cpu
+ tl_cpu
->origin
;
109 for_each_present_cpu(lcpu
) {
110 if (__cpu_logical_map
[lcpu
] == rcpu
) {
111 cpu_set(lcpu
, core
->mask
);
112 smp_cpu_polarization
[lcpu
] = tl_cpu
->pp
;
118 static void clear_cores(void)
120 struct core_info
*core
= &core_info
;
123 cpus_clear(core
->mask
);
128 static union tl_entry
*next_tle(union tl_entry
*tle
)
131 return (union tl_entry
*)((struct tl_container
*)tle
+ 1);
133 return (union tl_entry
*)((struct tl_cpu
*)tle
+ 1);
136 static void tl_to_cores(struct tl_info
*info
)
138 union tl_entry
*tle
, *end
;
139 struct core_info
*core
= &core_info
;
141 spin_lock_irq(&topology_lock
);
144 end
= (union tl_entry
*)((unsigned long)info
+ info
->length
);
156 add_cpus_to_core(&tle
->cpu
, core
);
160 machine_has_topology
= 0;
165 spin_unlock_irq(&topology_lock
);
168 static void topology_update_polarization_simple(void)
172 mutex_lock(&smp_cpu_state_mutex
);
173 for_each_possible_cpu(cpu
)
174 smp_cpu_polarization
[cpu
] = POLARIZATION_HRZ
;
175 mutex_unlock(&smp_cpu_state_mutex
);
178 static int ptf(unsigned long fc
)
183 " .insn rre,0xb9a20000,%1,%1\n"
191 int topology_set_cpu_management(int fc
)
196 if (!machine_has_topology
)
199 rc
= ptf(PTF_VERTICAL
);
201 rc
= ptf(PTF_HORIZONTAL
);
204 for_each_possible_cpu(cpu
)
205 smp_cpu_polarization
[cpu
] = POLARIZATION_UNKNWN
;
209 static void update_cpu_core_map(void)
213 for_each_possible_cpu(cpu
)
214 cpu_core_map
[cpu
] = cpu_coregroup_map(cpu
);
217 void arch_update_cpu_topology(void)
219 struct tl_info
*info
= tl_info
;
220 struct sys_device
*sysdev
;
223 if (!machine_has_topology
) {
224 update_cpu_core_map();
225 topology_update_polarization_simple();
228 stsi(info
, 15, 1, 2);
230 update_cpu_core_map();
231 for_each_online_cpu(cpu
) {
232 sysdev
= get_cpu_sysdev(cpu
);
233 kobject_uevent(&sysdev
->kobj
, KOBJ_CHANGE
);
237 static void topology_work_fn(struct work_struct
*work
)
239 rebuild_sched_domains();
242 void topology_schedule_update(void)
244 schedule_work(&topology_work
);
247 static void topology_timer_fn(unsigned long ignored
)
250 topology_schedule_update();
251 set_topology_timer();
254 static void set_topology_timer(void)
256 topology_timer
.function
= topology_timer_fn
;
257 topology_timer
.data
= 0;
258 topology_timer
.expires
= jiffies
+ 60 * HZ
;
259 add_timer(&topology_timer
);
262 static void topology_interrupt(__u16 code
)
264 schedule_work(&topology_work
);
267 static int __init
early_parse_topology(char *p
)
269 if (strncmp(p
, "on", 2))
271 topology_enabled
= 1;
274 early_param("topology", early_parse_topology
);
276 static int __init
init_topology_update(void)
281 if (!machine_has_topology
) {
282 topology_update_polarization_simple();
285 init_timer_deferrable(&topology_timer
);
286 if (machine_has_topology_irq
) {
287 rc
= register_external_interrupt(0x2005, topology_interrupt
);
293 set_topology_timer();
295 update_cpu_core_map();
298 __initcall(init_topology_update
);
300 void __init
s390_init_cpu_topology(void)
302 unsigned long long facility_bits
;
303 struct tl_info
*info
;
304 struct core_info
*core
;
308 if (stfle(&facility_bits
, 1) <= 0)
310 if (!(facility_bits
& (1ULL << 52)) || !(facility_bits
& (1ULL << 61)))
312 machine_has_topology
= 1;
314 if (facility_bits
& (1ULL << 51))
315 machine_has_topology_irq
= 1;
317 tl_info
= alloc_bootmem_pages(PAGE_SIZE
);
319 stsi(info
, 15, 1, 2);
321 nr_cores
= info
->mag
[NR_MAG
- 2];
322 for (i
= 0; i
< info
->mnest
- 2; i
++)
323 nr_cores
*= info
->mag
[NR_MAG
- 3 - i
];
325 printk(KERN_INFO
"CPU topology:");
326 for (i
= 0; i
< NR_MAG
; i
++)
327 printk(" %d", info
->mag
[i
]);
328 printk(" / %d\n", info
->mnest
);
331 for (i
= 0; i
< nr_cores
; i
++) {
332 core
->next
= alloc_bootmem(sizeof(struct core_info
));
339 machine_has_topology
= 0;
340 machine_has_topology_irq
= 0;