x86/PCI: Move set_pci_bus_resources_arch_default into arch/x86
[linux-2.6/mini2440.git] / arch / powerpc / kernel / cacheinfo.c
blobbb37b1d19a586291453ec5f460d3b1b1661185d3
1 /*
2 * Processor cache information made available to userspace via sysfs;
3 * intended to be compatible with x86 intel_cacheinfo implementation.
5 * Copyright 2008 IBM Corporation
6 * Author: Nathan Lynch
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
13 #include <linux/cpu.h>
14 #include <linux/cpumask.h>
15 #include <linux/init.h>
16 #include <linux/kernel.h>
17 #include <linux/kobject.h>
18 #include <linux/list.h>
19 #include <linux/notifier.h>
20 #include <linux/of.h>
21 #include <linux/percpu.h>
22 #include <asm/prom.h>
24 #include "cacheinfo.h"
26 /* per-cpu object for tracking:
27 * - a "cache" kobject for the top-level directory
28 * - a list of "index" objects representing the cpu's local cache hierarchy
30 struct cache_dir {
31 struct kobject *kobj; /* bare (not embedded) kobject for cache
32 * directory */
33 struct cache_index_dir *index; /* list of index objects */
36 /* "index" object: each cpu's cache directory has an index
37 * subdirectory corresponding to a cache object associated with the
38 * cpu. This object's lifetime is managed via the embedded kobject.
40 struct cache_index_dir {
41 struct kobject kobj;
42 struct cache_index_dir *next; /* next index in parent directory */
43 struct cache *cache;
46 /* Template for determining which OF properties to query for a given
47 * cache type */
48 struct cache_type_info {
49 const char *name;
50 const char *size_prop;
52 /* Allow for both [di]-cache-line-size and
53 * [di]-cache-block-size properties. According to the PowerPC
54 * Processor binding, -line-size should be provided if it
55 * differs from the cache block size (that which is operated
56 * on by cache instructions), so we look for -line-size first.
57 * See cache_get_line_size(). */
59 const char *line_size_props[2];
60 const char *nr_sets_prop;
63 /* These are used to index the cache_type_info array. */
64 #define CACHE_TYPE_UNIFIED 0
65 #define CACHE_TYPE_INSTRUCTION 1
66 #define CACHE_TYPE_DATA 2
68 static const struct cache_type_info cache_type_info[] = {
70 /* PowerPC Processor binding says the [di]-cache-*
71 * must be equal on unified caches, so just use
72 * d-cache properties. */
73 .name = "Unified",
74 .size_prop = "d-cache-size",
75 .line_size_props = { "d-cache-line-size",
76 "d-cache-block-size", },
77 .nr_sets_prop = "d-cache-sets",
80 .name = "Instruction",
81 .size_prop = "i-cache-size",
82 .line_size_props = { "i-cache-line-size",
83 "i-cache-block-size", },
84 .nr_sets_prop = "i-cache-sets",
87 .name = "Data",
88 .size_prop = "d-cache-size",
89 .line_size_props = { "d-cache-line-size",
90 "d-cache-block-size", },
91 .nr_sets_prop = "d-cache-sets",
95 /* Cache object: each instance of this corresponds to a distinct cache
96 * in the system. There are separate objects for Harvard caches: one
97 * each for instruction and data, and each refers to the same OF node.
98 * The refcount of the OF node is elevated for the lifetime of the
99 * cache object. A cache object is released when its shared_cpu_map
100 * is cleared (see cache_cpu_clear).
102 * A cache object is on two lists: an unsorted global list
103 * (cache_list) of cache objects; and a singly-linked list
104 * representing the local cache hierarchy, which is ordered by level
105 * (e.g. L1d -> L1i -> L2 -> L3).
107 struct cache {
108 struct device_node *ofnode; /* OF node for this cache, may be cpu */
109 struct cpumask shared_cpu_map; /* online CPUs using this cache */
110 int type; /* split cache disambiguation */
111 int level; /* level not explicit in device tree */
112 struct list_head list; /* global list of cache objects */
113 struct cache *next_local; /* next cache of >= level */
116 static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
118 /* traversal/modification of this list occurs only at cpu hotplug time;
119 * access is serialized by cpu hotplug locking
121 static LIST_HEAD(cache_list);
123 static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
125 return container_of(k, struct cache_index_dir, kobj);
128 static const char *cache_type_string(const struct cache *cache)
130 return cache_type_info[cache->type].name;
133 static void __cpuinit cache_init(struct cache *cache, int type, int level, struct device_node *ofnode)
135 cache->type = type;
136 cache->level = level;
137 cache->ofnode = of_node_get(ofnode);
138 INIT_LIST_HEAD(&cache->list);
139 list_add(&cache->list, &cache_list);
142 static struct cache *__cpuinit new_cache(int type, int level, struct device_node *ofnode)
144 struct cache *cache;
146 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
147 if (cache)
148 cache_init(cache, type, level, ofnode);
150 return cache;
153 static void release_cache_debugcheck(struct cache *cache)
155 struct cache *iter;
157 list_for_each_entry(iter, &cache_list, list)
158 WARN_ONCE(iter->next_local == cache,
159 "cache for %s(%s) refers to cache for %s(%s)\n",
160 iter->ofnode->full_name,
161 cache_type_string(iter),
162 cache->ofnode->full_name,
163 cache_type_string(cache));
166 static void release_cache(struct cache *cache)
168 if (!cache)
169 return;
171 pr_debug("freeing L%d %s cache for %s\n", cache->level,
172 cache_type_string(cache), cache->ofnode->full_name);
174 release_cache_debugcheck(cache);
175 list_del(&cache->list);
176 of_node_put(cache->ofnode);
177 kfree(cache);
180 static void cache_cpu_set(struct cache *cache, int cpu)
182 struct cache *next = cache;
184 while (next) {
185 WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
186 "CPU %i already accounted in %s(%s)\n",
187 cpu, next->ofnode->full_name,
188 cache_type_string(next));
189 cpumask_set_cpu(cpu, &next->shared_cpu_map);
190 next = next->next_local;
194 static int cache_size(const struct cache *cache, unsigned int *ret)
196 const char *propname;
197 const u32 *cache_size;
199 propname = cache_type_info[cache->type].size_prop;
201 cache_size = of_get_property(cache->ofnode, propname, NULL);
202 if (!cache_size)
203 return -ENODEV;
205 *ret = *cache_size;
206 return 0;
209 static int cache_size_kb(const struct cache *cache, unsigned int *ret)
211 unsigned int size;
213 if (cache_size(cache, &size))
214 return -ENODEV;
216 *ret = size / 1024;
217 return 0;
220 /* not cache_line_size() because that's a macro in include/linux/cache.h */
221 static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
223 const u32 *line_size;
224 int i, lim;
226 lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
228 for (i = 0; i < lim; i++) {
229 const char *propname;
231 propname = cache_type_info[cache->type].line_size_props[i];
232 line_size = of_get_property(cache->ofnode, propname, NULL);
233 if (line_size)
234 break;
237 if (!line_size)
238 return -ENODEV;
240 *ret = *line_size;
241 return 0;
244 static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
246 const char *propname;
247 const u32 *nr_sets;
249 propname = cache_type_info[cache->type].nr_sets_prop;
251 nr_sets = of_get_property(cache->ofnode, propname, NULL);
252 if (!nr_sets)
253 return -ENODEV;
255 *ret = *nr_sets;
256 return 0;
259 static int cache_associativity(const struct cache *cache, unsigned int *ret)
261 unsigned int line_size;
262 unsigned int nr_sets;
263 unsigned int size;
265 if (cache_nr_sets(cache, &nr_sets))
266 goto err;
268 /* If the cache is fully associative, there is no need to
269 * check the other properties.
271 if (nr_sets == 1) {
272 *ret = 0;
273 return 0;
276 if (cache_get_line_size(cache, &line_size))
277 goto err;
278 if (cache_size(cache, &size))
279 goto err;
281 if (!(nr_sets > 0 && size > 0 && line_size > 0))
282 goto err;
284 *ret = (size / nr_sets) / line_size;
285 return 0;
286 err:
287 return -ENODEV;
290 /* helper for dealing with split caches */
291 static struct cache *cache_find_first_sibling(struct cache *cache)
293 struct cache *iter;
295 if (cache->type == CACHE_TYPE_UNIFIED)
296 return cache;
298 list_for_each_entry(iter, &cache_list, list)
299 if (iter->ofnode == cache->ofnode && iter->next_local == cache)
300 return iter;
302 return cache;
305 /* return the first cache on a local list matching node */
306 static struct cache *cache_lookup_by_node(const struct device_node *node)
308 struct cache *cache = NULL;
309 struct cache *iter;
311 list_for_each_entry(iter, &cache_list, list) {
312 if (iter->ofnode != node)
313 continue;
314 cache = cache_find_first_sibling(iter);
315 break;
318 return cache;
321 static bool cache_node_is_unified(const struct device_node *np)
323 return of_get_property(np, "cache-unified", NULL);
326 static struct cache *__cpuinit cache_do_one_devnode_unified(struct device_node *node, int level)
328 struct cache *cache;
330 pr_debug("creating L%d ucache for %s\n", level, node->full_name);
332 cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
334 return cache;
337 static struct cache *__cpuinit cache_do_one_devnode_split(struct device_node *node, int level)
339 struct cache *dcache, *icache;
341 pr_debug("creating L%d dcache and icache for %s\n", level,
342 node->full_name);
344 dcache = new_cache(CACHE_TYPE_DATA, level, node);
345 icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
347 if (!dcache || !icache)
348 goto err;
350 dcache->next_local = icache;
352 return dcache;
353 err:
354 release_cache(dcache);
355 release_cache(icache);
356 return NULL;
359 static struct cache *__cpuinit cache_do_one_devnode(struct device_node *node, int level)
361 struct cache *cache;
363 if (cache_node_is_unified(node))
364 cache = cache_do_one_devnode_unified(node, level);
365 else
366 cache = cache_do_one_devnode_split(node, level);
368 return cache;
371 static struct cache *__cpuinit cache_lookup_or_instantiate(struct device_node *node, int level)
373 struct cache *cache;
375 cache = cache_lookup_by_node(node);
377 WARN_ONCE(cache && cache->level != level,
378 "cache level mismatch on lookup (got %d, expected %d)\n",
379 cache->level, level);
381 if (!cache)
382 cache = cache_do_one_devnode(node, level);
384 return cache;
387 static void __cpuinit link_cache_lists(struct cache *smaller, struct cache *bigger)
389 while (smaller->next_local) {
390 if (smaller->next_local == bigger)
391 return; /* already linked */
392 smaller = smaller->next_local;
395 smaller->next_local = bigger;
398 static void __cpuinit do_subsidiary_caches_debugcheck(struct cache *cache)
400 WARN_ON_ONCE(cache->level != 1);
401 WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu"));
404 static void __cpuinit do_subsidiary_caches(struct cache *cache)
406 struct device_node *subcache_node;
407 int level = cache->level;
409 do_subsidiary_caches_debugcheck(cache);
411 while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
412 struct cache *subcache;
414 level++;
415 subcache = cache_lookup_or_instantiate(subcache_node, level);
416 of_node_put(subcache_node);
417 if (!subcache)
418 break;
420 link_cache_lists(cache, subcache);
421 cache = subcache;
425 static struct cache *__cpuinit cache_chain_instantiate(unsigned int cpu_id)
427 struct device_node *cpu_node;
428 struct cache *cpu_cache = NULL;
430 pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
432 cpu_node = of_get_cpu_node(cpu_id, NULL);
433 WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
434 if (!cpu_node)
435 goto out;
437 cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
438 if (!cpu_cache)
439 goto out;
441 do_subsidiary_caches(cpu_cache);
443 cache_cpu_set(cpu_cache, cpu_id);
444 out:
445 of_node_put(cpu_node);
447 return cpu_cache;
450 static struct cache_dir *__cpuinit cacheinfo_create_cache_dir(unsigned int cpu_id)
452 struct cache_dir *cache_dir;
453 struct sys_device *sysdev;
454 struct kobject *kobj = NULL;
456 sysdev = get_cpu_sysdev(cpu_id);
457 WARN_ONCE(!sysdev, "no sysdev for CPU %i\n", cpu_id);
458 if (!sysdev)
459 goto err;
461 kobj = kobject_create_and_add("cache", &sysdev->kobj);
462 if (!kobj)
463 goto err;
465 cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
466 if (!cache_dir)
467 goto err;
469 cache_dir->kobj = kobj;
471 WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
473 per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
475 return cache_dir;
476 err:
477 kobject_put(kobj);
478 return NULL;
481 static void cache_index_release(struct kobject *kobj)
483 struct cache_index_dir *index;
485 index = kobj_to_cache_index_dir(kobj);
487 pr_debug("freeing index directory for L%d %s cache\n",
488 index->cache->level, cache_type_string(index->cache));
490 kfree(index);
493 static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
495 struct kobj_attribute *kobj_attr;
497 kobj_attr = container_of(attr, struct kobj_attribute, attr);
499 return kobj_attr->show(k, kobj_attr, buf);
502 static struct cache *index_kobj_to_cache(struct kobject *k)
504 struct cache_index_dir *index;
506 index = kobj_to_cache_index_dir(k);
508 return index->cache;
511 static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
513 unsigned int size_kb;
514 struct cache *cache;
516 cache = index_kobj_to_cache(k);
518 if (cache_size_kb(cache, &size_kb))
519 return -ENODEV;
521 return sprintf(buf, "%uK\n", size_kb);
524 static struct kobj_attribute cache_size_attr =
525 __ATTR(size, 0444, size_show, NULL);
528 static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
530 unsigned int line_size;
531 struct cache *cache;
533 cache = index_kobj_to_cache(k);
535 if (cache_get_line_size(cache, &line_size))
536 return -ENODEV;
538 return sprintf(buf, "%u\n", line_size);
541 static struct kobj_attribute cache_line_size_attr =
542 __ATTR(coherency_line_size, 0444, line_size_show, NULL);
544 static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
546 unsigned int nr_sets;
547 struct cache *cache;
549 cache = index_kobj_to_cache(k);
551 if (cache_nr_sets(cache, &nr_sets))
552 return -ENODEV;
554 return sprintf(buf, "%u\n", nr_sets);
557 static struct kobj_attribute cache_nr_sets_attr =
558 __ATTR(number_of_sets, 0444, nr_sets_show, NULL);
560 static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
562 unsigned int associativity;
563 struct cache *cache;
565 cache = index_kobj_to_cache(k);
567 if (cache_associativity(cache, &associativity))
568 return -ENODEV;
570 return sprintf(buf, "%u\n", associativity);
573 static struct kobj_attribute cache_assoc_attr =
574 __ATTR(ways_of_associativity, 0444, associativity_show, NULL);
576 static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
578 struct cache *cache;
580 cache = index_kobj_to_cache(k);
582 return sprintf(buf, "%s\n", cache_type_string(cache));
585 static struct kobj_attribute cache_type_attr =
586 __ATTR(type, 0444, type_show, NULL);
588 static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
590 struct cache_index_dir *index;
591 struct cache *cache;
593 index = kobj_to_cache_index_dir(k);
594 cache = index->cache;
596 return sprintf(buf, "%d\n", cache->level);
599 static struct kobj_attribute cache_level_attr =
600 __ATTR(level, 0444, level_show, NULL);
602 static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
604 struct cache_index_dir *index;
605 struct cache *cache;
606 int len;
607 int n = 0;
609 index = kobj_to_cache_index_dir(k);
610 cache = index->cache;
611 len = PAGE_SIZE - 2;
613 if (len > 1) {
614 n = cpumask_scnprintf(buf, len, &cache->shared_cpu_map);
615 buf[n++] = '\n';
616 buf[n] = '\0';
618 return n;
621 static struct kobj_attribute cache_shared_cpu_map_attr =
622 __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
624 /* Attributes which should always be created -- the kobject/sysfs core
625 * does this automatically via kobj_type->default_attrs. This is the
626 * minimum data required to uniquely identify a cache.
628 static struct attribute *cache_index_default_attrs[] = {
629 &cache_type_attr.attr,
630 &cache_level_attr.attr,
631 &cache_shared_cpu_map_attr.attr,
632 NULL,
635 /* Attributes which should be created if the cache device node has the
636 * right properties -- see cacheinfo_create_index_opt_attrs
638 static struct kobj_attribute *cache_index_opt_attrs[] = {
639 &cache_size_attr,
640 &cache_line_size_attr,
641 &cache_nr_sets_attr,
642 &cache_assoc_attr,
645 static struct sysfs_ops cache_index_ops = {
646 .show = cache_index_show,
649 static struct kobj_type cache_index_type = {
650 .release = cache_index_release,
651 .sysfs_ops = &cache_index_ops,
652 .default_attrs = cache_index_default_attrs,
655 static void __cpuinit cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
657 const char *cache_name;
658 const char *cache_type;
659 struct cache *cache;
660 char *buf;
661 int i;
663 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
664 if (!buf)
665 return;
667 cache = dir->cache;
668 cache_name = cache->ofnode->full_name;
669 cache_type = cache_type_string(cache);
671 /* We don't want to create an attribute that can't provide a
672 * meaningful value. Check the return value of each optional
673 * attribute's ->show method before registering the
674 * attribute.
676 for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
677 struct kobj_attribute *attr;
678 ssize_t rc;
680 attr = cache_index_opt_attrs[i];
682 rc = attr->show(&dir->kobj, attr, buf);
683 if (rc <= 0) {
684 pr_debug("not creating %s attribute for "
685 "%s(%s) (rc = %zd)\n",
686 attr->attr.name, cache_name,
687 cache_type, rc);
688 continue;
690 if (sysfs_create_file(&dir->kobj, &attr->attr))
691 pr_debug("could not create %s attribute for %s(%s)\n",
692 attr->attr.name, cache_name, cache_type);
695 kfree(buf);
698 static void __cpuinit cacheinfo_create_index_dir(struct cache *cache, int index, struct cache_dir *cache_dir)
700 struct cache_index_dir *index_dir;
701 int rc;
703 index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
704 if (!index_dir)
705 goto err;
707 index_dir->cache = cache;
709 rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
710 cache_dir->kobj, "index%d", index);
711 if (rc)
712 goto err;
714 index_dir->next = cache_dir->index;
715 cache_dir->index = index_dir;
717 cacheinfo_create_index_opt_attrs(index_dir);
719 return;
720 err:
721 kfree(index_dir);
724 static void __cpuinit cacheinfo_sysfs_populate(unsigned int cpu_id, struct cache *cache_list)
726 struct cache_dir *cache_dir;
727 struct cache *cache;
728 int index = 0;
730 cache_dir = cacheinfo_create_cache_dir(cpu_id);
731 if (!cache_dir)
732 return;
734 cache = cache_list;
735 while (cache) {
736 cacheinfo_create_index_dir(cache, index, cache_dir);
737 index++;
738 cache = cache->next_local;
742 void __cpuinit cacheinfo_cpu_online(unsigned int cpu_id)
744 struct cache *cache;
746 cache = cache_chain_instantiate(cpu_id);
747 if (!cache)
748 return;
750 cacheinfo_sysfs_populate(cpu_id, cache);
753 #ifdef CONFIG_HOTPLUG_CPU /* functions needed for cpu offline */
755 static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
757 struct device_node *cpu_node;
758 struct cache *cache;
760 cpu_node = of_get_cpu_node(cpu_id, NULL);
761 WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
762 if (!cpu_node)
763 return NULL;
765 cache = cache_lookup_by_node(cpu_node);
766 of_node_put(cpu_node);
768 return cache;
771 static void remove_index_dirs(struct cache_dir *cache_dir)
773 struct cache_index_dir *index;
775 index = cache_dir->index;
777 while (index) {
778 struct cache_index_dir *next;
780 next = index->next;
781 kobject_put(&index->kobj);
782 index = next;
786 static void remove_cache_dir(struct cache_dir *cache_dir)
788 remove_index_dirs(cache_dir);
790 kobject_put(cache_dir->kobj);
792 kfree(cache_dir);
795 static void cache_cpu_clear(struct cache *cache, int cpu)
797 while (cache) {
798 struct cache *next = cache->next_local;
800 WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
801 "CPU %i not accounted in %s(%s)\n",
802 cpu, cache->ofnode->full_name,
803 cache_type_string(cache));
805 cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
807 /* Release the cache object if all the cpus using it
808 * are offline */
809 if (cpumask_empty(&cache->shared_cpu_map))
810 release_cache(cache);
812 cache = next;
816 void cacheinfo_cpu_offline(unsigned int cpu_id)
818 struct cache_dir *cache_dir;
819 struct cache *cache;
821 /* Prevent userspace from seeing inconsistent state - remove
822 * the sysfs hierarchy first */
823 cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
825 /* careful, sysfs population may have failed */
826 if (cache_dir)
827 remove_cache_dir(cache_dir);
829 per_cpu(cache_dir_pcpu, cpu_id) = NULL;
831 /* clear the CPU's bit in its cache chain, possibly freeing
832 * cache objects */
833 cache = cache_lookup_by_cpu(cpu_id);
834 if (cache)
835 cache_cpu_clear(cache, cpu_id);
837 #endif /* CONFIG_HOTPLUG_CPU */