2 * Routines to indentify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
18 #include <asm/processor.h>
29 unsigned char descriptor
;
34 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
35 static struct _cache_table cache_table
[] __cpuinitdata
=
37 { 0x06, LVL_1_INST
, 8 }, /* 4-way set assoc, 32 byte line size */
38 { 0x08, LVL_1_INST
, 16 }, /* 4-way set assoc, 32 byte line size */
39 { 0x0a, LVL_1_DATA
, 8 }, /* 2 way set assoc, 32 byte line size */
40 { 0x0c, LVL_1_DATA
, 16 }, /* 4-way set assoc, 32 byte line size */
41 { 0x22, LVL_3
, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
42 { 0x23, LVL_3
, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
43 { 0x25, LVL_3
, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
44 { 0x29, LVL_3
, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
45 { 0x2c, LVL_1_DATA
, 32 }, /* 8-way set assoc, 64 byte line size */
46 { 0x30, LVL_1_INST
, 32 }, /* 8-way set assoc, 64 byte line size */
47 { 0x39, LVL_2
, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
48 { 0x3a, LVL_2
, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
49 { 0x3b, LVL_2
, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
50 { 0x3c, LVL_2
, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
51 { 0x3d, LVL_2
, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
52 { 0x3e, LVL_2
, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
53 { 0x3f, LVL_2
, 256 }, /* 2-way set assoc, 64 byte line size */
54 { 0x41, LVL_2
, 128 }, /* 4-way set assoc, 32 byte line size */
55 { 0x42, LVL_2
, 256 }, /* 4-way set assoc, 32 byte line size */
56 { 0x43, LVL_2
, 512 }, /* 4-way set assoc, 32 byte line size */
57 { 0x44, LVL_2
, 1024 }, /* 4-way set assoc, 32 byte line size */
58 { 0x45, LVL_2
, 2048 }, /* 4-way set assoc, 32 byte line size */
59 { 0x46, LVL_3
, 4096 }, /* 4-way set assoc, 64 byte line size */
60 { 0x47, LVL_3
, 8192 }, /* 8-way set assoc, 64 byte line size */
61 { 0x49, LVL_3
, 4096 }, /* 16-way set assoc, 64 byte line size */
62 { 0x4a, LVL_3
, 6144 }, /* 12-way set assoc, 64 byte line size */
63 { 0x4b, LVL_3
, 8192 }, /* 16-way set assoc, 64 byte line size */
64 { 0x4c, LVL_3
, 12288 }, /* 12-way set assoc, 64 byte line size */
65 { 0x4d, LVL_3
, 16384 }, /* 16-way set assoc, 64 byte line size */
66 { 0x4e, LVL_2
, 6144 }, /* 24-way set assoc, 64 byte line size */
67 { 0x60, LVL_1_DATA
, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
68 { 0x66, LVL_1_DATA
, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
69 { 0x67, LVL_1_DATA
, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
70 { 0x68, LVL_1_DATA
, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
71 { 0x70, LVL_TRACE
, 12 }, /* 8-way set assoc */
72 { 0x71, LVL_TRACE
, 16 }, /* 8-way set assoc */
73 { 0x72, LVL_TRACE
, 32 }, /* 8-way set assoc */
74 { 0x73, LVL_TRACE
, 64 }, /* 8-way set assoc */
75 { 0x78, LVL_2
, 1024 }, /* 4-way set assoc, 64 byte line size */
76 { 0x79, LVL_2
, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
77 { 0x7a, LVL_2
, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x7b, LVL_2
, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
79 { 0x7c, LVL_2
, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
80 { 0x7d, LVL_2
, 2048 }, /* 8-way set assoc, 64 byte line size */
81 { 0x7f, LVL_2
, 512 }, /* 2-way set assoc, 64 byte line size */
82 { 0x82, LVL_2
, 256 }, /* 8-way set assoc, 32 byte line size */
83 { 0x83, LVL_2
, 512 }, /* 8-way set assoc, 32 byte line size */
84 { 0x84, LVL_2
, 1024 }, /* 8-way set assoc, 32 byte line size */
85 { 0x85, LVL_2
, 2048 }, /* 8-way set assoc, 32 byte line size */
86 { 0x86, LVL_2
, 512 }, /* 4-way set assoc, 64 byte line size */
87 { 0x87, LVL_2
, 1024 }, /* 8-way set assoc, 64 byte line size */
97 CACHE_TYPE_UNIFIED
= 3
100 union _cpuid4_leaf_eax
{
102 enum _cache_type type
:5;
103 unsigned int level
:3;
104 unsigned int is_self_initializing
:1;
105 unsigned int is_fully_associative
:1;
106 unsigned int reserved
:4;
107 unsigned int num_threads_sharing
:12;
108 unsigned int num_cores_on_die
:6;
113 union _cpuid4_leaf_ebx
{
115 unsigned int coherency_line_size
:12;
116 unsigned int physical_line_partition
:10;
117 unsigned int ways_of_associativity
:10;
122 union _cpuid4_leaf_ecx
{
124 unsigned int number_of_sets
:32;
129 struct _cpuid4_info
{
130 union _cpuid4_leaf_eax eax
;
131 union _cpuid4_leaf_ebx ebx
;
132 union _cpuid4_leaf_ecx ecx
;
134 unsigned long can_disable
;
135 cpumask_t shared_cpu_map
; /* future?: only cpus/node is needed */
138 static struct pci_device_id k8_nb_id
[] = {
139 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, 0x1103) },
140 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, 0x1203) },
144 unsigned short num_cache_leaves
;
146 /* AMD doesn't have CPUID4. Emulate it here to report the same
147 information to the user. This makes some assumptions about the machine:
148 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
150 In theory the TLBs could be reported as fake type (they are in "dummy").
154 unsigned line_size
: 8;
155 unsigned lines_per_tag
: 8;
157 unsigned size_in_kb
: 8;
164 unsigned line_size
: 8;
165 unsigned lines_per_tag
: 4;
167 unsigned size_in_kb
: 16;
174 unsigned line_size
: 8;
175 unsigned lines_per_tag
: 4;
178 unsigned size_encoded
: 14;
183 static unsigned short assocs
[] __cpuinitdata
= {
184 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
185 [8] = 16, [0xa] = 32, [0xb] = 48,
190 static unsigned char levels
[] __cpuinitdata
= { 1, 1, 2, 3 };
191 static unsigned char types
[] __cpuinitdata
= { 1, 2, 3, 3 };
193 static void __cpuinit
amd_cpuid4(int leaf
, union _cpuid4_leaf_eax
*eax
,
194 union _cpuid4_leaf_ebx
*ebx
,
195 union _cpuid4_leaf_ecx
*ecx
)
198 unsigned line_size
, lines_per_tag
, assoc
, size_in_kb
;
199 union l1_cache l1i
, l1d
;
202 union l1_cache
*l1
= &l1d
;
208 cpuid(0x80000005, &dummy
, &dummy
, &l1d
.val
, &l1i
.val
);
209 cpuid(0x80000006, &dummy
, &dummy
, &l2
.val
, &l3
.val
);
218 line_size
= l1
->line_size
;
219 lines_per_tag
= l1
->lines_per_tag
;
220 size_in_kb
= l1
->size_in_kb
;
226 line_size
= l2
.line_size
;
227 lines_per_tag
= l2
.lines_per_tag
;
228 /* cpu_data has errata corrections for K7 applied */
229 size_in_kb
= current_cpu_data
.x86_cache_size
;
235 line_size
= l3
.line_size
;
236 lines_per_tag
= l3
.lines_per_tag
;
237 size_in_kb
= l3
.size_encoded
* 512;
243 eax
->split
.is_self_initializing
= 1;
244 eax
->split
.type
= types
[leaf
];
245 eax
->split
.level
= levels
[leaf
];
247 eax
->split
.num_threads_sharing
= current_cpu_data
.x86_max_cores
- 1;
249 eax
->split
.num_threads_sharing
= 0;
250 eax
->split
.num_cores_on_die
= current_cpu_data
.x86_max_cores
- 1;
254 eax
->split
.is_fully_associative
= 1;
255 ebx
->split
.coherency_line_size
= line_size
- 1;
256 ebx
->split
.ways_of_associativity
= assocs
[assoc
] - 1;
257 ebx
->split
.physical_line_partition
= lines_per_tag
- 1;
258 ecx
->split
.number_of_sets
= (size_in_kb
* 1024) / line_size
/
259 (ebx
->split
.ways_of_associativity
+ 1) - 1;
262 static void __cpuinit
263 amd_check_l3_disable(int index
, struct _cpuid4_info
*this_leaf
)
267 this_leaf
->can_disable
= 1;
271 __cpuinit
cpuid4_cache_lookup(int index
, struct _cpuid4_info
*this_leaf
)
273 union _cpuid4_leaf_eax eax
;
274 union _cpuid4_leaf_ebx ebx
;
275 union _cpuid4_leaf_ecx ecx
;
278 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
) {
279 amd_cpuid4(index
, &eax
, &ebx
, &ecx
);
280 if (boot_cpu_data
.x86
>= 0x10)
281 amd_check_l3_disable(index
, this_leaf
);
283 cpuid_count(4, index
, &eax
.full
, &ebx
.full
, &ecx
.full
, &edx
);
286 if (eax
.split
.type
== CACHE_TYPE_NULL
)
287 return -EIO
; /* better error ? */
289 this_leaf
->eax
= eax
;
290 this_leaf
->ebx
= ebx
;
291 this_leaf
->ecx
= ecx
;
292 this_leaf
->size
= (ecx
.split
.number_of_sets
+ 1) *
293 (ebx
.split
.coherency_line_size
+ 1) *
294 (ebx
.split
.physical_line_partition
+ 1) *
295 (ebx
.split
.ways_of_associativity
+ 1);
299 static int __cpuinit
find_num_cache_leaves(void)
301 unsigned int eax
, ebx
, ecx
, edx
;
302 union _cpuid4_leaf_eax cache_eax
;
307 /* Do cpuid(4) loop to find out num_cache_leaves */
308 cpuid_count(4, i
, &eax
, &ebx
, &ecx
, &edx
);
309 cache_eax
.full
= eax
;
310 } while (cache_eax
.split
.type
!= CACHE_TYPE_NULL
);
314 unsigned int __cpuinit
init_intel_cacheinfo(struct cpuinfo_x86
*c
)
316 unsigned int trace
= 0, l1i
= 0, l1d
= 0, l2
= 0, l3
= 0; /* Cache sizes */
317 unsigned int new_l1d
= 0, new_l1i
= 0; /* Cache sizes from cpuid(4) */
318 unsigned int new_l2
= 0, new_l3
= 0, i
; /* Cache sizes from cpuid(4) */
319 unsigned int l2_id
= 0, l3_id
= 0, num_threads_sharing
, index_msb
;
321 unsigned int cpu
= c
->cpu_index
;
324 if (c
->cpuid_level
> 3) {
325 static int is_initialized
;
327 if (is_initialized
== 0) {
328 /* Init num_cache_leaves from boot CPU */
329 num_cache_leaves
= find_num_cache_leaves();
334 * Whenever possible use cpuid(4), deterministic cache
335 * parameters cpuid leaf to find the cache details
337 for (i
= 0; i
< num_cache_leaves
; i
++) {
338 struct _cpuid4_info this_leaf
;
342 retval
= cpuid4_cache_lookup(i
, &this_leaf
);
344 switch(this_leaf
.eax
.split
.level
) {
346 if (this_leaf
.eax
.split
.type
==
348 new_l1d
= this_leaf
.size
/1024;
349 else if (this_leaf
.eax
.split
.type
==
351 new_l1i
= this_leaf
.size
/1024;
354 new_l2
= this_leaf
.size
/1024;
355 num_threads_sharing
= 1 + this_leaf
.eax
.split
.num_threads_sharing
;
356 index_msb
= get_count_order(num_threads_sharing
);
357 l2_id
= c
->apicid
>> index_msb
;
360 new_l3
= this_leaf
.size
/1024;
361 num_threads_sharing
= 1 + this_leaf
.eax
.split
.num_threads_sharing
;
362 index_msb
= get_count_order(num_threads_sharing
);
363 l3_id
= c
->apicid
>> index_msb
;
372 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
375 if ((num_cache_leaves
== 0 || c
->x86
== 15) && c
->cpuid_level
> 1) {
376 /* supports eax=2 call */
378 unsigned int regs
[4];
379 unsigned char *dp
= (unsigned char *)regs
;
382 if (num_cache_leaves
!= 0 && c
->x86
== 15)
385 /* Number of times to iterate */
386 n
= cpuid_eax(2) & 0xFF;
388 for ( i
= 0 ; i
< n
; i
++ ) {
389 cpuid(2, ®s
[0], ®s
[1], ®s
[2], ®s
[3]);
391 /* If bit 31 is set, this is an unknown format */
392 for ( j
= 0 ; j
< 3 ; j
++ ) {
393 if (regs
[j
] & (1 << 31)) regs
[j
] = 0;
396 /* Byte 0 is level count, not a descriptor */
397 for ( j
= 1 ; j
< 16 ; j
++ ) {
398 unsigned char des
= dp
[j
];
401 /* look up this descriptor in the table */
402 while (cache_table
[k
].descriptor
!= 0)
404 if (cache_table
[k
].descriptor
== des
) {
405 if (only_trace
&& cache_table
[k
].cache_type
!= LVL_TRACE
)
407 switch (cache_table
[k
].cache_type
) {
409 l1i
+= cache_table
[k
].size
;
412 l1d
+= cache_table
[k
].size
;
415 l2
+= cache_table
[k
].size
;
418 l3
+= cache_table
[k
].size
;
421 trace
+= cache_table
[k
].size
;
443 per_cpu(cpu_llc_id
, cpu
) = l2_id
;
450 per_cpu(cpu_llc_id
, cpu
) = l3_id
;
455 printk (KERN_INFO
"CPU: Trace cache: %dK uops", trace
);
457 printk (KERN_INFO
"CPU: L1 I cache: %dK", l1i
);
460 printk(", L1 D cache: %dK\n", l1d
);
465 printk(KERN_INFO
"CPU: L2 cache: %dK\n", l2
);
468 printk(KERN_INFO
"CPU: L3 cache: %dK\n", l3
);
470 c
->x86_cache_size
= l3
? l3
: (l2
? l2
: (l1i
+l1d
));
475 /* pointer to _cpuid4_info array (for each cache leaf) */
476 static DEFINE_PER_CPU(struct _cpuid4_info
*, cpuid4_info
);
477 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
480 static void __cpuinit
cache_shared_cpu_map_setup(unsigned int cpu
, int index
)
482 struct _cpuid4_info
*this_leaf
, *sibling_leaf
;
483 unsigned long num_threads_sharing
;
485 struct cpuinfo_x86
*c
= &cpu_data(cpu
);
487 this_leaf
= CPUID4_INFO_IDX(cpu
, index
);
488 num_threads_sharing
= 1 + this_leaf
->eax
.split
.num_threads_sharing
;
490 if (num_threads_sharing
== 1)
491 cpu_set(cpu
, this_leaf
->shared_cpu_map
);
493 index_msb
= get_count_order(num_threads_sharing
);
495 for_each_online_cpu(i
) {
496 if (cpu_data(i
).apicid
>> index_msb
==
497 c
->apicid
>> index_msb
) {
498 cpu_set(i
, this_leaf
->shared_cpu_map
);
499 if (i
!= cpu
&& per_cpu(cpuid4_info
, i
)) {
500 sibling_leaf
= CPUID4_INFO_IDX(i
, index
);
501 cpu_set(cpu
, sibling_leaf
->shared_cpu_map
);
507 static void __cpuinit
cache_remove_shared_cpu_map(unsigned int cpu
, int index
)
509 struct _cpuid4_info
*this_leaf
, *sibling_leaf
;
512 this_leaf
= CPUID4_INFO_IDX(cpu
, index
);
513 for_each_cpu_mask(sibling
, this_leaf
->shared_cpu_map
) {
514 sibling_leaf
= CPUID4_INFO_IDX(sibling
, index
);
515 cpu_clear(cpu
, sibling_leaf
->shared_cpu_map
);
519 static void __cpuinit
cache_shared_cpu_map_setup(unsigned int cpu
, int index
) {}
520 static void __cpuinit
cache_remove_shared_cpu_map(unsigned int cpu
, int index
) {}
523 static void __cpuinit
free_cache_attributes(unsigned int cpu
)
527 for (i
= 0; i
< num_cache_leaves
; i
++)
528 cache_remove_shared_cpu_map(cpu
, i
);
530 kfree(per_cpu(cpuid4_info
, cpu
));
531 per_cpu(cpuid4_info
, cpu
) = NULL
;
534 static int __cpuinit
detect_cache_attributes(unsigned int cpu
)
536 struct _cpuid4_info
*this_leaf
;
541 if (num_cache_leaves
== 0)
544 per_cpu(cpuid4_info
, cpu
) = kzalloc(
545 sizeof(struct _cpuid4_info
) * num_cache_leaves
, GFP_KERNEL
);
546 if (per_cpu(cpuid4_info
, cpu
) == NULL
)
549 oldmask
= current
->cpus_allowed
;
550 retval
= set_cpus_allowed_ptr(current
, &cpumask_of_cpu(cpu
));
554 /* Do cpuid and store the results */
555 for (j
= 0; j
< num_cache_leaves
; j
++) {
556 this_leaf
= CPUID4_INFO_IDX(cpu
, j
);
557 retval
= cpuid4_cache_lookup(j
, this_leaf
);
558 if (unlikely(retval
< 0)) {
561 for (i
= 0; i
< j
; i
++)
562 cache_remove_shared_cpu_map(cpu
, i
);
565 cache_shared_cpu_map_setup(cpu
, j
);
567 set_cpus_allowed_ptr(current
, &oldmask
);
571 kfree(per_cpu(cpuid4_info
, cpu
));
572 per_cpu(cpuid4_info
, cpu
) = NULL
;
580 #include <linux/kobject.h>
581 #include <linux/sysfs.h>
583 extern struct sysdev_class cpu_sysdev_class
; /* from drivers/base/cpu.c */
585 /* pointer to kobject for cpuX/cache */
586 static DEFINE_PER_CPU(struct kobject
*, cache_kobject
);
588 struct _index_kobject
{
591 unsigned short index
;
594 /* pointer to array of kobjects for cpuX/cache/indexY */
595 static DEFINE_PER_CPU(struct _index_kobject
*, index_kobject
);
596 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
598 #define show_one_plus(file_name, object, val) \
599 static ssize_t show_##file_name \
600 (struct _cpuid4_info *this_leaf, char *buf) \
602 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
605 show_one_plus(level
, eax
.split
.level
, 0);
606 show_one_plus(coherency_line_size
, ebx
.split
.coherency_line_size
, 1);
607 show_one_plus(physical_line_partition
, ebx
.split
.physical_line_partition
, 1);
608 show_one_plus(ways_of_associativity
, ebx
.split
.ways_of_associativity
, 1);
609 show_one_plus(number_of_sets
, ecx
.split
.number_of_sets
, 1);
611 static ssize_t
show_size(struct _cpuid4_info
*this_leaf
, char *buf
)
613 return sprintf (buf
, "%luK\n", this_leaf
->size
/ 1024);
616 static ssize_t
show_shared_cpu_map_func(struct _cpuid4_info
*this_leaf
,
619 ptrdiff_t len
= PTR_ALIGN(buf
+ PAGE_SIZE
- 1, PAGE_SIZE
) - buf
;
623 cpumask_t
*mask
= &this_leaf
->shared_cpu_map
;
626 cpulist_scnprintf(buf
, len
-2, *mask
):
627 cpumask_scnprintf(buf
, len
-2, *mask
);
634 static inline ssize_t
show_shared_cpu_map(struct _cpuid4_info
*leaf
, char *buf
)
636 return show_shared_cpu_map_func(leaf
, 0, buf
);
639 static inline ssize_t
show_shared_cpu_list(struct _cpuid4_info
*leaf
, char *buf
)
641 return show_shared_cpu_map_func(leaf
, 1, buf
);
644 static ssize_t
show_type(struct _cpuid4_info
*this_leaf
, char *buf
) {
645 switch(this_leaf
->eax
.split
.type
) {
646 case CACHE_TYPE_DATA
:
647 return sprintf(buf
, "Data\n");
649 case CACHE_TYPE_INST
:
650 return sprintf(buf
, "Instruction\n");
652 case CACHE_TYPE_UNIFIED
:
653 return sprintf(buf
, "Unified\n");
656 return sprintf(buf
, "Unknown\n");
661 #define to_object(k) container_of(k, struct _index_kobject, kobj)
662 #define to_attr(a) container_of(a, struct _cache_attr, attr)
664 static struct pci_dev
*get_k8_northbridge(int node
)
666 struct pci_dev
*dev
= NULL
;
669 for (i
= 0; i
<= node
; i
++) {
671 dev
= pci_get_device(PCI_ANY_ID
, PCI_ANY_ID
, dev
);
674 } while (!pci_match_id(&k8_nb_id
[0], dev
));
681 static ssize_t
show_cache_disable(struct _cpuid4_info
*this_leaf
, char *buf
)
683 int node
= cpu_to_node(first_cpu(this_leaf
->shared_cpu_map
));
684 struct pci_dev
*dev
= NULL
;
688 if (!this_leaf
->can_disable
)
689 return sprintf(buf
, "Feature not enabled\n");
691 dev
= get_k8_northbridge(node
);
693 printk(KERN_ERR
"Attempting AMD northbridge operation on a system with no northbridge\n");
697 for (i
= 0; i
< 2; i
++) {
700 pci_read_config_dword(dev
, 0x1BC + i
* 4, ®
);
702 ret
+= sprintf(buf
, "%sEntry: %d\n", buf
, i
);
703 ret
+= sprintf(buf
, "%sReads: %s\tNew Entries: %s\n",
705 reg
& 0x80000000 ? "Disabled" : "Allowed",
706 reg
& 0x40000000 ? "Disabled" : "Allowed");
707 ret
+= sprintf(buf
, "%sSubCache: %x\tIndex: %x\n",
708 buf
, (reg
& 0x30000) >> 16, reg
& 0xfff);
714 store_cache_disable(struct _cpuid4_info
*this_leaf
, const char *buf
,
717 int node
= cpu_to_node(first_cpu(this_leaf
->shared_cpu_map
));
718 struct pci_dev
*dev
= NULL
;
719 unsigned int ret
, index
, val
;
721 if (!this_leaf
->can_disable
)
724 if (strlen(buf
) > 15)
727 ret
= sscanf(buf
, "%x %x", &index
, &val
);
734 dev
= get_k8_northbridge(node
);
736 printk(KERN_ERR
"Attempting AMD northbridge operation on a system with no northbridge\n");
740 pci_write_config_dword(dev
, 0x1BC + index
* 4, val
& ~0x40000000);
742 pci_write_config_dword(dev
, 0x1BC + index
* 4, val
);
748 struct attribute attr
;
749 ssize_t (*show
)(struct _cpuid4_info
*, char *);
750 ssize_t (*store
)(struct _cpuid4_info
*, const char *, size_t count
);
753 #define define_one_ro(_name) \
754 static struct _cache_attr _name = \
755 __ATTR(_name, 0444, show_##_name, NULL)
757 define_one_ro(level
);
759 define_one_ro(coherency_line_size
);
760 define_one_ro(physical_line_partition
);
761 define_one_ro(ways_of_associativity
);
762 define_one_ro(number_of_sets
);
764 define_one_ro(shared_cpu_map
);
765 define_one_ro(shared_cpu_list
);
767 static struct _cache_attr cache_disable
= __ATTR(cache_disable
, 0644, show_cache_disable
, store_cache_disable
);
769 static struct attribute
* default_attrs
[] = {
772 &coherency_line_size
.attr
,
773 &physical_line_partition
.attr
,
774 &ways_of_associativity
.attr
,
775 &number_of_sets
.attr
,
777 &shared_cpu_map
.attr
,
778 &shared_cpu_list
.attr
,
783 static ssize_t
show(struct kobject
* kobj
, struct attribute
* attr
, char * buf
)
785 struct _cache_attr
*fattr
= to_attr(attr
);
786 struct _index_kobject
*this_leaf
= to_object(kobj
);
790 fattr
->show(CPUID4_INFO_IDX(this_leaf
->cpu
, this_leaf
->index
),
796 static ssize_t
store(struct kobject
* kobj
, struct attribute
* attr
,
797 const char * buf
, size_t count
)
799 struct _cache_attr
*fattr
= to_attr(attr
);
800 struct _index_kobject
*this_leaf
= to_object(kobj
);
804 fattr
->store(CPUID4_INFO_IDX(this_leaf
->cpu
, this_leaf
->index
),
810 static struct sysfs_ops sysfs_ops
= {
815 static struct kobj_type ktype_cache
= {
816 .sysfs_ops
= &sysfs_ops
,
817 .default_attrs
= default_attrs
,
820 static struct kobj_type ktype_percpu_entry
= {
821 .sysfs_ops
= &sysfs_ops
,
824 static void __cpuinit
cpuid4_cache_sysfs_exit(unsigned int cpu
)
826 kfree(per_cpu(cache_kobject
, cpu
));
827 kfree(per_cpu(index_kobject
, cpu
));
828 per_cpu(cache_kobject
, cpu
) = NULL
;
829 per_cpu(index_kobject
, cpu
) = NULL
;
830 free_cache_attributes(cpu
);
833 static int __cpuinit
cpuid4_cache_sysfs_init(unsigned int cpu
)
837 if (num_cache_leaves
== 0)
840 err
= detect_cache_attributes(cpu
);
844 /* Allocate all required memory */
845 per_cpu(cache_kobject
, cpu
) =
846 kzalloc(sizeof(struct kobject
), GFP_KERNEL
);
847 if (unlikely(per_cpu(cache_kobject
, cpu
) == NULL
))
850 per_cpu(index_kobject
, cpu
) = kzalloc(
851 sizeof(struct _index_kobject
) * num_cache_leaves
, GFP_KERNEL
);
852 if (unlikely(per_cpu(index_kobject
, cpu
) == NULL
))
858 cpuid4_cache_sysfs_exit(cpu
);
862 static cpumask_t cache_dev_map
= CPU_MASK_NONE
;
864 /* Add/Remove cache interface for CPU device */
865 static int __cpuinit
cache_add_dev(struct sys_device
* sys_dev
)
867 unsigned int cpu
= sys_dev
->id
;
869 struct _index_kobject
*this_object
;
872 retval
= cpuid4_cache_sysfs_init(cpu
);
873 if (unlikely(retval
< 0))
876 retval
= kobject_init_and_add(per_cpu(cache_kobject
, cpu
),
878 &sys_dev
->kobj
, "%s", "cache");
880 cpuid4_cache_sysfs_exit(cpu
);
884 for (i
= 0; i
< num_cache_leaves
; i
++) {
885 this_object
= INDEX_KOBJECT_PTR(cpu
,i
);
886 this_object
->cpu
= cpu
;
887 this_object
->index
= i
;
888 retval
= kobject_init_and_add(&(this_object
->kobj
),
890 per_cpu(cache_kobject
, cpu
),
892 if (unlikely(retval
)) {
893 for (j
= 0; j
< i
; j
++) {
894 kobject_put(&(INDEX_KOBJECT_PTR(cpu
,j
)->kobj
));
896 kobject_put(per_cpu(cache_kobject
, cpu
));
897 cpuid4_cache_sysfs_exit(cpu
);
900 kobject_uevent(&(this_object
->kobj
), KOBJ_ADD
);
902 cpu_set(cpu
, cache_dev_map
);
904 kobject_uevent(per_cpu(cache_kobject
, cpu
), KOBJ_ADD
);
908 static void __cpuinit
cache_remove_dev(struct sys_device
* sys_dev
)
910 unsigned int cpu
= sys_dev
->id
;
913 if (per_cpu(cpuid4_info
, cpu
) == NULL
)
915 if (!cpu_isset(cpu
, cache_dev_map
))
917 cpu_clear(cpu
, cache_dev_map
);
919 for (i
= 0; i
< num_cache_leaves
; i
++)
920 kobject_put(&(INDEX_KOBJECT_PTR(cpu
,i
)->kobj
));
921 kobject_put(per_cpu(cache_kobject
, cpu
));
922 cpuid4_cache_sysfs_exit(cpu
);
925 static int __cpuinit
cacheinfo_cpu_callback(struct notifier_block
*nfb
,
926 unsigned long action
, void *hcpu
)
928 unsigned int cpu
= (unsigned long)hcpu
;
929 struct sys_device
*sys_dev
;
931 sys_dev
= get_cpu_sysdev(cpu
);
934 case CPU_ONLINE_FROZEN
:
935 cache_add_dev(sys_dev
);
938 case CPU_DEAD_FROZEN
:
939 cache_remove_dev(sys_dev
);
945 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier
=
947 .notifier_call
= cacheinfo_cpu_callback
,
950 static int __cpuinit
cache_sysfs_init(void)
954 if (num_cache_leaves
== 0)
957 for_each_online_cpu(i
) {
959 struct sys_device
*sys_dev
= get_cpu_sysdev(i
);
961 err
= cache_add_dev(sys_dev
);
965 register_hotcpu_notifier(&cacheinfo_cpu_notifier
);
969 device_initcall(cache_sysfs_init
);