2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * This file contains NUMA specific variables and functions which can
7 * be split away from DISCONTIGMEM and are used on NUMA machines with
9 * 2002/08/07 Erich Focht <efocht@ess.nec.de>
10 * Populate cpu entries in sysfs for non-numa systems as well
11 * Intel Corporation - Ashok Raj
12 * 02/27/2006 Zhang, Yanmin
13 * Populate cpu cache entries in sysfs for cpu cache info
16 #include <linux/config.h>
17 #include <linux/cpu.h>
18 #include <linux/kernel.h>
20 #include <linux/node.h>
21 #include <linux/init.h>
22 #include <linux/bootmem.h>
23 #include <linux/nodemask.h>
24 #include <linux/notifier.h>
25 #include <asm/mmzone.h>
30 static struct node
*sysfs_nodes
;
32 static struct ia64_cpu
*sysfs_cpus
;
34 int arch_register_cpu(int num
)
36 struct node
*parent
= NULL
;
39 parent
= &sysfs_nodes
[cpu_to_node(num
)];
40 #endif /* CONFIG_NUMA */
42 #if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU)
44 * If CPEI cannot be re-targetted, and this is
45 * CPEI target, then dont create the control file
47 if (!can_cpei_retarget() && is_cpu_cpei_target(num
))
48 sysfs_cpus
[num
].cpu
.no_control
= 1;
51 return register_cpu(&sysfs_cpus
[num
].cpu
, num
, parent
);
54 #ifdef CONFIG_HOTPLUG_CPU
56 void arch_unregister_cpu(int num
)
58 struct node
*parent
= NULL
;
61 int node
= cpu_to_node(num
);
62 parent
= &sysfs_nodes
[node
];
63 #endif /* CONFIG_NUMA */
65 return unregister_cpu(&sysfs_cpus
[num
].cpu
, parent
);
67 EXPORT_SYMBOL(arch_register_cpu
);
68 EXPORT_SYMBOL(arch_unregister_cpu
);
69 #endif /*CONFIG_HOTPLUG_CPU*/
72 static int __init
topology_init(void)
77 sysfs_nodes
= kzalloc(sizeof(struct node
) * MAX_NUMNODES
, GFP_KERNEL
);
84 * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes?
86 for_each_online_node(i
) {
87 if ((err
= register_node(&sysfs_nodes
[i
], i
, 0)))
92 sysfs_cpus
= kzalloc(sizeof(struct ia64_cpu
) * NR_CPUS
, GFP_KERNEL
);
98 for_each_present_cpu(i
) {
99 if((err
= arch_register_cpu(i
)))
106 subsys_initcall(topology_init
);
110 * Export cpu cache information through sysfs
114 * A bunch of string array to get pretty printing
116 static const char *cache_types
[] = {
120 "Unified" /* unified */
123 static const char *cache_mattrib
[]={
131 pal_cache_config_info_t cci
;
132 cpumask_t shared_cpu_map
;
138 struct cpu_cache_info
{
139 struct cache_info
*cache_leaves
;
140 int num_cache_leaves
;
144 static struct cpu_cache_info all_cpu_cache_info
[NR_CPUS
];
145 #define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y])
148 static void cache_shared_cpu_map_setup( unsigned int cpu
,
149 struct cache_info
* this_leaf
)
151 pal_cache_shared_info_t csi
;
152 int num_shared
, i
= 0;
155 if (cpu_data(cpu
)->threads_per_core
<= 1 &&
156 cpu_data(cpu
)->cores_per_socket
<= 1) {
157 cpu_set(cpu
, this_leaf
->shared_cpu_map
);
161 if (ia64_pal_cache_shared_info(this_leaf
->level
,
164 &csi
) != PAL_STATUS_SUCCESS
)
167 num_shared
= (int) csi
.num_shared
;
170 if (cpu_data(cpu
)->socket_id
== cpu_data(j
)->socket_id
171 && cpu_data(j
)->core_id
== csi
.log1_cid
172 && cpu_data(j
)->thread_id
== csi
.log1_tid
)
173 cpu_set(j
, this_leaf
->shared_cpu_map
);
176 } while (i
< num_shared
&&
177 ia64_pal_cache_shared_info(this_leaf
->level
,
180 &csi
) == PAL_STATUS_SUCCESS
);
183 static void cache_shared_cpu_map_setup(unsigned int cpu
,
184 struct cache_info
* this_leaf
)
186 cpu_set(cpu
, this_leaf
->shared_cpu_map
);
191 static ssize_t
show_coherency_line_size(struct cache_info
*this_leaf
,
194 return sprintf(buf
, "%u\n", 1 << this_leaf
->cci
.pcci_line_size
);
197 static ssize_t
show_ways_of_associativity(struct cache_info
*this_leaf
,
200 return sprintf(buf
, "%u\n", this_leaf
->cci
.pcci_assoc
);
203 static ssize_t
show_attributes(struct cache_info
*this_leaf
, char *buf
)
207 cache_mattrib
[this_leaf
->cci
.pcci_cache_attr
]);
210 static ssize_t
show_size(struct cache_info
*this_leaf
, char *buf
)
212 return sprintf(buf
, "%uK\n", this_leaf
->cci
.pcci_cache_size
/ 1024);
215 static ssize_t
show_number_of_sets(struct cache_info
*this_leaf
, char *buf
)
217 unsigned number_of_sets
= this_leaf
->cci
.pcci_cache_size
;
218 number_of_sets
/= this_leaf
->cci
.pcci_assoc
;
219 number_of_sets
/= 1 << this_leaf
->cci
.pcci_line_size
;
221 return sprintf(buf
, "%u\n", number_of_sets
);
224 static ssize_t
show_shared_cpu_map(struct cache_info
*this_leaf
, char *buf
)
227 cpumask_t shared_cpu_map
;
229 cpus_and(shared_cpu_map
, this_leaf
->shared_cpu_map
, cpu_online_map
);
230 len
= cpumask_scnprintf(buf
, NR_CPUS
+1, shared_cpu_map
);
231 len
+= sprintf(buf
+len
, "\n");
235 static ssize_t
show_type(struct cache_info
*this_leaf
, char *buf
)
237 int type
= this_leaf
->type
+ this_leaf
->cci
.pcci_unified
;
238 return sprintf(buf
, "%s\n", cache_types
[type
]);
241 static ssize_t
show_level(struct cache_info
*this_leaf
, char *buf
)
243 return sprintf(buf
, "%u\n", this_leaf
->level
);
247 struct attribute attr
;
248 ssize_t (*show
)(struct cache_info
*, char *);
249 ssize_t (*store
)(struct cache_info
*, const char *, size_t count
);
255 #define define_one_ro(_name) \
256 static struct cache_attr _name = \
257 __ATTR(_name, 0444, show_##_name, NULL)
259 define_one_ro(level
);
261 define_one_ro(coherency_line_size
);
262 define_one_ro(ways_of_associativity
);
264 define_one_ro(number_of_sets
);
265 define_one_ro(shared_cpu_map
);
266 define_one_ro(attributes
);
268 static struct attribute
* cache_default_attrs
[] = {
271 &coherency_line_size
.attr
,
272 &ways_of_associativity
.attr
,
275 &number_of_sets
.attr
,
276 &shared_cpu_map
.attr
,
280 #define to_object(k) container_of(k, struct cache_info, kobj)
281 #define to_attr(a) container_of(a, struct cache_attr, attr)
283 static ssize_t
cache_show(struct kobject
* kobj
, struct attribute
* attr
, char * buf
)
285 struct cache_attr
*fattr
= to_attr(attr
);
286 struct cache_info
*this_leaf
= to_object(kobj
);
289 ret
= fattr
->show
? fattr
->show(this_leaf
, buf
) : 0;
293 static struct sysfs_ops cache_sysfs_ops
= {
297 static struct kobj_type cache_ktype
= {
298 .sysfs_ops
= &cache_sysfs_ops
,
299 .default_attrs
= cache_default_attrs
,
302 static struct kobj_type cache_ktype_percpu_entry
= {
303 .sysfs_ops
= &cache_sysfs_ops
,
306 static void __cpuinit
cpu_cache_sysfs_exit(unsigned int cpu
)
308 kfree(all_cpu_cache_info
[cpu
].cache_leaves
);
309 all_cpu_cache_info
[cpu
].cache_leaves
= NULL
;
310 all_cpu_cache_info
[cpu
].num_cache_leaves
= 0;
311 memset(&all_cpu_cache_info
[cpu
].kobj
, 0, sizeof(struct kobject
));
315 static int __cpuinit
cpu_cache_sysfs_init(unsigned int cpu
)
317 u64 i
, levels
, unique_caches
;
318 pal_cache_config_info_t cci
;
321 struct cache_info
*this_cache
;
322 int num_cache_leaves
= 0;
324 if ((status
= ia64_pal_cache_summary(&levels
, &unique_caches
)) != 0) {
325 printk(KERN_ERR
"ia64_pal_cache_summary=%ld\n", status
);
329 this_cache
=kzalloc(sizeof(struct cache_info
)*unique_caches
,
331 if (this_cache
== NULL
)
334 for (i
=0; i
< levels
; i
++) {
335 for (j
=2; j
>0 ; j
--) {
336 if ((status
=ia64_pal_cache_config_info(i
,j
, &cci
)) !=
340 this_cache
[num_cache_leaves
].cci
= cci
;
341 this_cache
[num_cache_leaves
].level
= i
+ 1;
342 this_cache
[num_cache_leaves
].type
= j
;
344 cache_shared_cpu_map_setup(cpu
,
345 &this_cache
[num_cache_leaves
]);
350 all_cpu_cache_info
[cpu
].cache_leaves
= this_cache
;
351 all_cpu_cache_info
[cpu
].num_cache_leaves
= num_cache_leaves
;
353 memset(&all_cpu_cache_info
[cpu
].kobj
, 0, sizeof(struct kobject
));
358 /* Add cache interface for CPU device */
359 static int __cpuinit
cache_add_dev(struct sys_device
* sys_dev
)
361 unsigned int cpu
= sys_dev
->id
;
363 struct cache_info
*this_object
;
367 if (all_cpu_cache_info
[cpu
].kobj
.parent
)
370 oldmask
= current
->cpus_allowed
;
371 retval
= set_cpus_allowed(current
, cpumask_of_cpu(cpu
));
372 if (unlikely(retval
))
375 retval
= cpu_cache_sysfs_init(cpu
);
376 set_cpus_allowed(current
, oldmask
);
377 if (unlikely(retval
< 0))
380 all_cpu_cache_info
[cpu
].kobj
.parent
= &sys_dev
->kobj
;
381 kobject_set_name(&all_cpu_cache_info
[cpu
].kobj
, "%s", "cache");
382 all_cpu_cache_info
[cpu
].kobj
.ktype
= &cache_ktype_percpu_entry
;
383 retval
= kobject_register(&all_cpu_cache_info
[cpu
].kobj
);
385 for (i
= 0; i
< all_cpu_cache_info
[cpu
].num_cache_leaves
; i
++) {
386 this_object
= LEAF_KOBJECT_PTR(cpu
,i
);
387 this_object
->kobj
.parent
= &all_cpu_cache_info
[cpu
].kobj
;
388 kobject_set_name(&(this_object
->kobj
), "index%1lu", i
);
389 this_object
->kobj
.ktype
= &cache_ktype
;
390 retval
= kobject_register(&(this_object
->kobj
));
391 if (unlikely(retval
)) {
392 for (j
= 0; j
< i
; j
++) {
394 &(LEAF_KOBJECT_PTR(cpu
,j
)->kobj
));
396 kobject_unregister(&all_cpu_cache_info
[cpu
].kobj
);
397 cpu_cache_sysfs_exit(cpu
);
404 /* Remove cache interface for CPU device */
405 static int __cpuinit
cache_remove_dev(struct sys_device
* sys_dev
)
407 unsigned int cpu
= sys_dev
->id
;
410 for (i
= 0; i
< all_cpu_cache_info
[cpu
].num_cache_leaves
; i
++)
411 kobject_unregister(&(LEAF_KOBJECT_PTR(cpu
,i
)->kobj
));
413 if (all_cpu_cache_info
[cpu
].kobj
.parent
) {
414 kobject_unregister(&all_cpu_cache_info
[cpu
].kobj
);
415 memset(&all_cpu_cache_info
[cpu
].kobj
,
417 sizeof(struct kobject
));
420 cpu_cache_sysfs_exit(cpu
);
426 * When a cpu is hot-plugged, do a check and initiate
427 * cache kobject if necessary
429 static int cache_cpu_callback(struct notifier_block
*nfb
,
430 unsigned long action
, void *hcpu
)
432 unsigned int cpu
= (unsigned long)hcpu
;
433 struct sys_device
*sys_dev
;
435 sys_dev
= get_cpu_sysdev(cpu
);
438 cache_add_dev(sys_dev
);
441 cache_remove_dev(sys_dev
);
447 static struct notifier_block cache_cpu_notifier
=
449 .notifier_call
= cache_cpu_callback
452 static int __cpuinit
cache_sysfs_init(void)
456 for_each_online_cpu(i
) {
457 cache_cpu_callback(&cache_cpu_notifier
, CPU_ONLINE
,
461 register_cpu_notifier(&cache_cpu_notifier
);
466 device_initcall(cache_sysfs_init
);