2 * Processor cache information made available to userspace via sysfs;
3 * intended to be compatible with x86 intel_cacheinfo implementation.
5 * Copyright 2008 IBM Corporation
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
13 #include <linux/cpu.h>
14 #include <linux/cpumask.h>
15 #include <linux/init.h>
16 #include <linux/kernel.h>
17 #include <linux/kobject.h>
18 #include <linux/list.h>
19 #include <linux/notifier.h>
21 #include <linux/percpu.h>
24 #include "cacheinfo.h"
26 /* per-cpu object for tracking:
27 * - a "cache" kobject for the top-level directory
28 * - a list of "index" objects representing the cpu's local cache hierarchy
31 struct kobject
*kobj
; /* bare (not embedded) kobject for cache
33 struct cache_index_dir
*index
; /* list of index objects */
36 /* "index" object: each cpu's cache directory has an index
37 * subdirectory corresponding to a cache object associated with the
38 * cpu. This object's lifetime is managed via the embedded kobject.
40 struct cache_index_dir
{
42 struct cache_index_dir
*next
; /* next index in parent directory */
46 /* Template for determining which OF properties to query for a given
48 struct cache_type_info
{
50 const char *size_prop
;
52 /* Allow for both [di]-cache-line-size and
53 * [di]-cache-block-size properties. According to the PowerPC
54 * Processor binding, -line-size should be provided if it
55 * differs from the cache block size (that which is operated
56 * on by cache instructions), so we look for -line-size first.
57 * See cache_get_line_size(). */
59 const char *line_size_props
[2];
60 const char *nr_sets_prop
;
63 /* These are used to index the cache_type_info array. */
64 #define CACHE_TYPE_UNIFIED 0
65 #define CACHE_TYPE_INSTRUCTION 1
66 #define CACHE_TYPE_DATA 2
68 static const struct cache_type_info cache_type_info
[] = {
70 /* PowerPC Processor binding says the [di]-cache-*
71 * must be equal on unified caches, so just use
72 * d-cache properties. */
74 .size_prop
= "d-cache-size",
75 .line_size_props
= { "d-cache-line-size",
76 "d-cache-block-size", },
77 .nr_sets_prop
= "d-cache-sets",
80 .name
= "Instruction",
81 .size_prop
= "i-cache-size",
82 .line_size_props
= { "i-cache-line-size",
83 "i-cache-block-size", },
84 .nr_sets_prop
= "i-cache-sets",
88 .size_prop
= "d-cache-size",
89 .line_size_props
= { "d-cache-line-size",
90 "d-cache-block-size", },
91 .nr_sets_prop
= "d-cache-sets",
95 /* Cache object: each instance of this corresponds to a distinct cache
96 * in the system. There are separate objects for Harvard caches: one
97 * each for instruction and data, and each refers to the same OF node.
98 * The refcount of the OF node is elevated for the lifetime of the
99 * cache object. A cache object is released when its shared_cpu_map
100 * is cleared (see cache_cpu_clear).
102 * A cache object is on two lists: an unsorted global list
103 * (cache_list) of cache objects; and a singly-linked list
104 * representing the local cache hierarchy, which is ordered by level
105 * (e.g. L1d -> L1i -> L2 -> L3).
108 struct device_node
*ofnode
; /* OF node for this cache, may be cpu */
109 struct cpumask shared_cpu_map
; /* online CPUs using this cache */
110 int type
; /* split cache disambiguation */
111 int level
; /* level not explicit in device tree */
112 struct list_head list
; /* global list of cache objects */
113 struct cache
*next_local
; /* next cache of >= level */
116 static DEFINE_PER_CPU(struct cache_dir
*, cache_dir_pcpu
);
118 /* traversal/modification of this list occurs only at cpu hotplug time;
119 * access is serialized by cpu hotplug locking
121 static LIST_HEAD(cache_list
);
123 static struct cache_index_dir
*kobj_to_cache_index_dir(struct kobject
*k
)
125 return container_of(k
, struct cache_index_dir
, kobj
);
128 static const char *cache_type_string(const struct cache
*cache
)
130 return cache_type_info
[cache
->type
].name
;
133 static void __cpuinit
cache_init(struct cache
*cache
, int type
, int level
, struct device_node
*ofnode
)
136 cache
->level
= level
;
137 cache
->ofnode
= of_node_get(ofnode
);
138 INIT_LIST_HEAD(&cache
->list
);
139 list_add(&cache
->list
, &cache_list
);
142 static struct cache
*__cpuinit
new_cache(int type
, int level
, struct device_node
*ofnode
)
146 cache
= kzalloc(sizeof(*cache
), GFP_KERNEL
);
148 cache_init(cache
, type
, level
, ofnode
);
153 static void release_cache_debugcheck(struct cache
*cache
)
157 list_for_each_entry(iter
, &cache_list
, list
)
158 WARN_ONCE(iter
->next_local
== cache
,
159 "cache for %s(%s) refers to cache for %s(%s)\n",
160 iter
->ofnode
->full_name
,
161 cache_type_string(iter
),
162 cache
->ofnode
->full_name
,
163 cache_type_string(cache
));
166 static void release_cache(struct cache
*cache
)
171 pr_debug("freeing L%d %s cache for %s\n", cache
->level
,
172 cache_type_string(cache
), cache
->ofnode
->full_name
);
174 release_cache_debugcheck(cache
);
175 list_del(&cache
->list
);
176 of_node_put(cache
->ofnode
);
180 static void cache_cpu_set(struct cache
*cache
, int cpu
)
182 struct cache
*next
= cache
;
185 WARN_ONCE(cpumask_test_cpu(cpu
, &next
->shared_cpu_map
),
186 "CPU %i already accounted in %s(%s)\n",
187 cpu
, next
->ofnode
->full_name
,
188 cache_type_string(next
));
189 cpumask_set_cpu(cpu
, &next
->shared_cpu_map
);
190 next
= next
->next_local
;
194 static int cache_size(const struct cache
*cache
, unsigned int *ret
)
196 const char *propname
;
197 const u32
*cache_size
;
199 propname
= cache_type_info
[cache
->type
].size_prop
;
201 cache_size
= of_get_property(cache
->ofnode
, propname
, NULL
);
209 static int cache_size_kb(const struct cache
*cache
, unsigned int *ret
)
213 if (cache_size(cache
, &size
))
220 /* not cache_line_size() because that's a macro in include/linux/cache.h */
221 static int cache_get_line_size(const struct cache
*cache
, unsigned int *ret
)
223 const u32
*line_size
;
226 lim
= ARRAY_SIZE(cache_type_info
[cache
->type
].line_size_props
);
228 for (i
= 0; i
< lim
; i
++) {
229 const char *propname
;
231 propname
= cache_type_info
[cache
->type
].line_size_props
[i
];
232 line_size
= of_get_property(cache
->ofnode
, propname
, NULL
);
244 static int cache_nr_sets(const struct cache
*cache
, unsigned int *ret
)
246 const char *propname
;
249 propname
= cache_type_info
[cache
->type
].nr_sets_prop
;
251 nr_sets
= of_get_property(cache
->ofnode
, propname
, NULL
);
259 static int cache_associativity(const struct cache
*cache
, unsigned int *ret
)
261 unsigned int line_size
;
262 unsigned int nr_sets
;
265 if (cache_nr_sets(cache
, &nr_sets
))
268 /* If the cache is fully associative, there is no need to
269 * check the other properties.
276 if (cache_get_line_size(cache
, &line_size
))
278 if (cache_size(cache
, &size
))
281 if (!(nr_sets
> 0 && size
> 0 && line_size
> 0))
284 *ret
= (size
/ nr_sets
) / line_size
;
290 /* helper for dealing with split caches */
291 static struct cache
*cache_find_first_sibling(struct cache
*cache
)
295 if (cache
->type
== CACHE_TYPE_UNIFIED
)
298 list_for_each_entry(iter
, &cache_list
, list
)
299 if (iter
->ofnode
== cache
->ofnode
&& iter
->next_local
== cache
)
305 /* return the first cache on a local list matching node */
306 static struct cache
*cache_lookup_by_node(const struct device_node
*node
)
308 struct cache
*cache
= NULL
;
311 list_for_each_entry(iter
, &cache_list
, list
) {
312 if (iter
->ofnode
!= node
)
314 cache
= cache_find_first_sibling(iter
);
321 static bool cache_node_is_unified(const struct device_node
*np
)
323 return of_get_property(np
, "cache-unified", NULL
);
326 static struct cache
*__cpuinit
cache_do_one_devnode_unified(struct device_node
*node
, int level
)
330 pr_debug("creating L%d ucache for %s\n", level
, node
->full_name
);
332 cache
= new_cache(CACHE_TYPE_UNIFIED
, level
, node
);
337 static struct cache
*__cpuinit
cache_do_one_devnode_split(struct device_node
*node
, int level
)
339 struct cache
*dcache
, *icache
;
341 pr_debug("creating L%d dcache and icache for %s\n", level
,
344 dcache
= new_cache(CACHE_TYPE_DATA
, level
, node
);
345 icache
= new_cache(CACHE_TYPE_INSTRUCTION
, level
, node
);
347 if (!dcache
|| !icache
)
350 dcache
->next_local
= icache
;
354 release_cache(dcache
);
355 release_cache(icache
);
359 static struct cache
*__cpuinit
cache_do_one_devnode(struct device_node
*node
, int level
)
363 if (cache_node_is_unified(node
))
364 cache
= cache_do_one_devnode_unified(node
, level
);
366 cache
= cache_do_one_devnode_split(node
, level
);
371 static struct cache
*__cpuinit
cache_lookup_or_instantiate(struct device_node
*node
, int level
)
375 cache
= cache_lookup_by_node(node
);
377 WARN_ONCE(cache
&& cache
->level
!= level
,
378 "cache level mismatch on lookup (got %d, expected %d)\n",
379 cache
->level
, level
);
382 cache
= cache_do_one_devnode(node
, level
);
387 static void __cpuinit
link_cache_lists(struct cache
*smaller
, struct cache
*bigger
)
389 while (smaller
->next_local
) {
390 if (smaller
->next_local
== bigger
)
391 return; /* already linked */
392 smaller
= smaller
->next_local
;
395 smaller
->next_local
= bigger
;
398 static void __cpuinit
do_subsidiary_caches_debugcheck(struct cache
*cache
)
400 WARN_ON_ONCE(cache
->level
!= 1);
401 WARN_ON_ONCE(strcmp(cache
->ofnode
->type
, "cpu"));
404 static void __cpuinit
do_subsidiary_caches(struct cache
*cache
)
406 struct device_node
*subcache_node
;
407 int level
= cache
->level
;
409 do_subsidiary_caches_debugcheck(cache
);
411 while ((subcache_node
= of_find_next_cache_node(cache
->ofnode
))) {
412 struct cache
*subcache
;
415 subcache
= cache_lookup_or_instantiate(subcache_node
, level
);
416 of_node_put(subcache_node
);
420 link_cache_lists(cache
, subcache
);
425 static struct cache
*__cpuinit
cache_chain_instantiate(unsigned int cpu_id
)
427 struct device_node
*cpu_node
;
428 struct cache
*cpu_cache
= NULL
;
430 pr_debug("creating cache object(s) for CPU %i\n", cpu_id
);
432 cpu_node
= of_get_cpu_node(cpu_id
, NULL
);
433 WARN_ONCE(!cpu_node
, "no OF node found for CPU %i\n", cpu_id
);
437 cpu_cache
= cache_lookup_or_instantiate(cpu_node
, 1);
441 do_subsidiary_caches(cpu_cache
);
443 cache_cpu_set(cpu_cache
, cpu_id
);
445 of_node_put(cpu_node
);
450 static struct cache_dir
*__cpuinit
cacheinfo_create_cache_dir(unsigned int cpu_id
)
452 struct cache_dir
*cache_dir
;
453 struct sys_device
*sysdev
;
454 struct kobject
*kobj
= NULL
;
456 sysdev
= get_cpu_sysdev(cpu_id
);
457 WARN_ONCE(!sysdev
, "no sysdev for CPU %i\n", cpu_id
);
461 kobj
= kobject_create_and_add("cache", &sysdev
->kobj
);
465 cache_dir
= kzalloc(sizeof(*cache_dir
), GFP_KERNEL
);
469 cache_dir
->kobj
= kobj
;
471 WARN_ON_ONCE(per_cpu(cache_dir_pcpu
, cpu_id
) != NULL
);
473 per_cpu(cache_dir_pcpu
, cpu_id
) = cache_dir
;
481 static void cache_index_release(struct kobject
*kobj
)
483 struct cache_index_dir
*index
;
485 index
= kobj_to_cache_index_dir(kobj
);
487 pr_debug("freeing index directory for L%d %s cache\n",
488 index
->cache
->level
, cache_type_string(index
->cache
));
493 static ssize_t
cache_index_show(struct kobject
*k
, struct attribute
*attr
, char *buf
)
495 struct kobj_attribute
*kobj_attr
;
497 kobj_attr
= container_of(attr
, struct kobj_attribute
, attr
);
499 return kobj_attr
->show(k
, kobj_attr
, buf
);
502 static struct cache
*index_kobj_to_cache(struct kobject
*k
)
504 struct cache_index_dir
*index
;
506 index
= kobj_to_cache_index_dir(k
);
511 static ssize_t
size_show(struct kobject
*k
, struct kobj_attribute
*attr
, char *buf
)
513 unsigned int size_kb
;
516 cache
= index_kobj_to_cache(k
);
518 if (cache_size_kb(cache
, &size_kb
))
521 return sprintf(buf
, "%uK\n", size_kb
);
524 static struct kobj_attribute cache_size_attr
=
525 __ATTR(size
, 0444, size_show
, NULL
);
528 static ssize_t
line_size_show(struct kobject
*k
, struct kobj_attribute
*attr
, char *buf
)
530 unsigned int line_size
;
533 cache
= index_kobj_to_cache(k
);
535 if (cache_get_line_size(cache
, &line_size
))
538 return sprintf(buf
, "%u\n", line_size
);
541 static struct kobj_attribute cache_line_size_attr
=
542 __ATTR(coherency_line_size
, 0444, line_size_show
, NULL
);
544 static ssize_t
nr_sets_show(struct kobject
*k
, struct kobj_attribute
*attr
, char *buf
)
546 unsigned int nr_sets
;
549 cache
= index_kobj_to_cache(k
);
551 if (cache_nr_sets(cache
, &nr_sets
))
554 return sprintf(buf
, "%u\n", nr_sets
);
557 static struct kobj_attribute cache_nr_sets_attr
=
558 __ATTR(number_of_sets
, 0444, nr_sets_show
, NULL
);
560 static ssize_t
associativity_show(struct kobject
*k
, struct kobj_attribute
*attr
, char *buf
)
562 unsigned int associativity
;
565 cache
= index_kobj_to_cache(k
);
567 if (cache_associativity(cache
, &associativity
))
570 return sprintf(buf
, "%u\n", associativity
);
573 static struct kobj_attribute cache_assoc_attr
=
574 __ATTR(ways_of_associativity
, 0444, associativity_show
, NULL
);
576 static ssize_t
type_show(struct kobject
*k
, struct kobj_attribute
*attr
, char *buf
)
580 cache
= index_kobj_to_cache(k
);
582 return sprintf(buf
, "%s\n", cache_type_string(cache
));
585 static struct kobj_attribute cache_type_attr
=
586 __ATTR(type
, 0444, type_show
, NULL
);
588 static ssize_t
level_show(struct kobject
*k
, struct kobj_attribute
*attr
, char *buf
)
590 struct cache_index_dir
*index
;
593 index
= kobj_to_cache_index_dir(k
);
594 cache
= index
->cache
;
596 return sprintf(buf
, "%d\n", cache
->level
);
599 static struct kobj_attribute cache_level_attr
=
600 __ATTR(level
, 0444, level_show
, NULL
);
602 static ssize_t
shared_cpu_map_show(struct kobject
*k
, struct kobj_attribute
*attr
, char *buf
)
604 struct cache_index_dir
*index
;
609 index
= kobj_to_cache_index_dir(k
);
610 cache
= index
->cache
;
614 n
= cpumask_scnprintf(buf
, len
, &cache
->shared_cpu_map
);
621 static struct kobj_attribute cache_shared_cpu_map_attr
=
622 __ATTR(shared_cpu_map
, 0444, shared_cpu_map_show
, NULL
);
624 /* Attributes which should always be created -- the kobject/sysfs core
625 * does this automatically via kobj_type->default_attrs. This is the
626 * minimum data required to uniquely identify a cache.
628 static struct attribute
*cache_index_default_attrs
[] = {
629 &cache_type_attr
.attr
,
630 &cache_level_attr
.attr
,
631 &cache_shared_cpu_map_attr
.attr
,
635 /* Attributes which should be created if the cache device node has the
636 * right properties -- see cacheinfo_create_index_opt_attrs
638 static struct kobj_attribute
*cache_index_opt_attrs
[] = {
640 &cache_line_size_attr
,
645 static struct sysfs_ops cache_index_ops
= {
646 .show
= cache_index_show
,
649 static struct kobj_type cache_index_type
= {
650 .release
= cache_index_release
,
651 .sysfs_ops
= &cache_index_ops
,
652 .default_attrs
= cache_index_default_attrs
,
655 static void __cpuinit
cacheinfo_create_index_opt_attrs(struct cache_index_dir
*dir
)
657 const char *cache_name
;
658 const char *cache_type
;
663 buf
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
668 cache_name
= cache
->ofnode
->full_name
;
669 cache_type
= cache_type_string(cache
);
671 /* We don't want to create an attribute that can't provide a
672 * meaningful value. Check the return value of each optional
673 * attribute's ->show method before registering the
676 for (i
= 0; i
< ARRAY_SIZE(cache_index_opt_attrs
); i
++) {
677 struct kobj_attribute
*attr
;
680 attr
= cache_index_opt_attrs
[i
];
682 rc
= attr
->show(&dir
->kobj
, attr
, buf
);
684 pr_debug("not creating %s attribute for "
685 "%s(%s) (rc = %zd)\n",
686 attr
->attr
.name
, cache_name
,
690 if (sysfs_create_file(&dir
->kobj
, &attr
->attr
))
691 pr_debug("could not create %s attribute for %s(%s)\n",
692 attr
->attr
.name
, cache_name
, cache_type
);
698 static void __cpuinit
cacheinfo_create_index_dir(struct cache
*cache
, int index
, struct cache_dir
*cache_dir
)
700 struct cache_index_dir
*index_dir
;
703 index_dir
= kzalloc(sizeof(*index_dir
), GFP_KERNEL
);
707 index_dir
->cache
= cache
;
709 rc
= kobject_init_and_add(&index_dir
->kobj
, &cache_index_type
,
710 cache_dir
->kobj
, "index%d", index
);
714 index_dir
->next
= cache_dir
->index
;
715 cache_dir
->index
= index_dir
;
717 cacheinfo_create_index_opt_attrs(index_dir
);
724 static void __cpuinit
cacheinfo_sysfs_populate(unsigned int cpu_id
, struct cache
*cache_list
)
726 struct cache_dir
*cache_dir
;
730 cache_dir
= cacheinfo_create_cache_dir(cpu_id
);
736 cacheinfo_create_index_dir(cache
, index
, cache_dir
);
738 cache
= cache
->next_local
;
742 void __cpuinit
cacheinfo_cpu_online(unsigned int cpu_id
)
746 cache
= cache_chain_instantiate(cpu_id
);
750 cacheinfo_sysfs_populate(cpu_id
, cache
);
753 #ifdef CONFIG_HOTPLUG_CPU /* functions needed for cpu offline */
755 static struct cache
*cache_lookup_by_cpu(unsigned int cpu_id
)
757 struct device_node
*cpu_node
;
760 cpu_node
= of_get_cpu_node(cpu_id
, NULL
);
761 WARN_ONCE(!cpu_node
, "no OF node found for CPU %i\n", cpu_id
);
765 cache
= cache_lookup_by_node(cpu_node
);
766 of_node_put(cpu_node
);
771 static void remove_index_dirs(struct cache_dir
*cache_dir
)
773 struct cache_index_dir
*index
;
775 index
= cache_dir
->index
;
778 struct cache_index_dir
*next
;
781 kobject_put(&index
->kobj
);
786 static void remove_cache_dir(struct cache_dir
*cache_dir
)
788 remove_index_dirs(cache_dir
);
790 kobject_put(cache_dir
->kobj
);
795 static void cache_cpu_clear(struct cache
*cache
, int cpu
)
798 struct cache
*next
= cache
->next_local
;
800 WARN_ONCE(!cpumask_test_cpu(cpu
, &cache
->shared_cpu_map
),
801 "CPU %i not accounted in %s(%s)\n",
802 cpu
, cache
->ofnode
->full_name
,
803 cache_type_string(cache
));
805 cpumask_clear_cpu(cpu
, &cache
->shared_cpu_map
);
807 /* Release the cache object if all the cpus using it
809 if (cpumask_empty(&cache
->shared_cpu_map
))
810 release_cache(cache
);
816 void cacheinfo_cpu_offline(unsigned int cpu_id
)
818 struct cache_dir
*cache_dir
;
821 /* Prevent userspace from seeing inconsistent state - remove
822 * the sysfs hierarchy first */
823 cache_dir
= per_cpu(cache_dir_pcpu
, cpu_id
);
825 /* careful, sysfs population may have failed */
827 remove_cache_dir(cache_dir
);
829 per_cpu(cache_dir_pcpu
, cpu_id
) = NULL
;
831 /* clear the CPU's bit in its cache chain, possibly freeing
833 cache
= cache_lookup_by_cpu(cpu_id
);
835 cache_cpu_clear(cache
, cpu_id
);
837 #endif /* CONFIG_HOTPLUG_CPU */