x86, cpu: Make init_scattered_cpuid_features() consider cpuid subleaves
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / kernel / cpu / addon_cpuid_features.c
blob03cf24a3d933cd23281004cf8c9fc9561870eff5
1 /*
2 * Routines to indentify additional cpu features that are scattered in
3 * cpuid space.
4 */
5 #include <linux/cpu.h>
7 #include <asm/pat.h>
8 #include <asm/processor.h>
10 #include <asm/apic.h>
12 struct cpuid_bit {
13 u16 feature;
14 u8 reg;
15 u8 bit;
16 u32 level;
17 u32 sub_leaf;
20 enum cpuid_regs {
21 CR_EAX = 0,
22 CR_ECX,
23 CR_EDX,
24 CR_EBX
27 void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
29 u32 max_level;
30 u32 regs[4];
31 const struct cpuid_bit *cb;
33 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
34 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
35 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
36 { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
37 { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
38 { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 },
39 { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a, 0 },
40 { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 },
41 { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 },
42 { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a, 0 },
43 { 0, 0, 0, 0, 0 }
46 for (cb = cpuid_bits; cb->feature; cb++) {
48 /* Verify that the level is valid */
49 max_level = cpuid_eax(cb->level & 0xffff0000);
50 if (max_level < cb->level ||
51 max_level > (cb->level | 0xffff))
52 continue;
54 cpuid_count(cb->level, cb->sub_leaf, &regs[CR_EAX],
55 &regs[CR_EBX], &regs[CR_ECX], &regs[CR_EDX]);
57 if (regs[cb->reg] & (1 << cb->bit))
58 set_cpu_cap(c, cb->feature);
62 /* leaf 0xb SMT level */
63 #define SMT_LEVEL 0
65 /* leaf 0xb sub-leaf types */
66 #define INVALID_TYPE 0
67 #define SMT_TYPE 1
68 #define CORE_TYPE 2
70 #define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff)
71 #define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
72 #define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)
75 * Check for extended topology enumeration cpuid leaf 0xb and if it
76 * exists, use it for populating initial_apicid and cpu topology
77 * detection.
79 void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
81 #ifdef CONFIG_SMP
82 unsigned int eax, ebx, ecx, edx, sub_index;
83 unsigned int ht_mask_width, core_plus_mask_width;
84 unsigned int core_select_mask, core_level_siblings;
85 static bool printed;
87 if (c->cpuid_level < 0xb)
88 return;
90 cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
93 * check if the cpuid leaf 0xb is actually implemented.
95 if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE))
96 return;
98 set_cpu_cap(c, X86_FEATURE_XTOPOLOGY);
101 * initial apic id, which also represents 32-bit extended x2apic id.
103 c->initial_apicid = edx;
106 * Populate HT related information from sub-leaf level 0.
108 core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
109 core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
111 sub_index = 1;
112 do {
113 cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx);
116 * Check for the Core type in the implemented sub leaves.
118 if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) {
119 core_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
120 core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
121 break;
124 sub_index++;
125 } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
127 core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
129 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, ht_mask_width)
130 & core_select_mask;
131 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, core_plus_mask_width);
133 * Reinit the apicid, now that we have extended initial_apicid.
135 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
137 c->x86_max_cores = (core_level_siblings / smp_num_siblings);
139 if (!printed) {
140 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
141 c->phys_proc_id);
142 if (c->x86_max_cores > 1)
143 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
144 c->cpu_core_id);
145 printed = 1;
147 return;
148 #endif