x86: Unify APERF/MPERF support
[wandboard.git] / arch / x86 / kernel / cpu / addon_cpuid_features.c
blobfd1fc1902a4744d6e77f6ee3d9a8cc6cc0bfc6e1
1 /*
2 * Routines to indentify additional cpu features that are scattered in
3 * cpuid space.
4 */
5 #include <linux/cpu.h>
7 #include <asm/pat.h>
8 #include <asm/processor.h>
10 #include <asm/apic.h>
12 struct cpuid_bit {
13 u16 feature;
14 u8 reg;
15 u8 bit;
16 u32 level;
19 enum cpuid_regs {
20 CR_EAX = 0,
21 CR_ECX,
22 CR_EDX,
23 CR_EBX
26 void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
28 u32 max_level;
29 u32 regs[4];
30 const struct cpuid_bit *cb;
32 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
33 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
34 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006 },
35 { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007 },
36 { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a },
37 { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a },
38 { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a },
39 { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a },
40 { 0, 0, 0, 0 }
43 for (cb = cpuid_bits; cb->feature; cb++) {
45 /* Verify that the level is valid */
46 max_level = cpuid_eax(cb->level & 0xffff0000);
47 if (max_level < cb->level ||
48 max_level > (cb->level | 0xffff))
49 continue;
51 cpuid(cb->level, &regs[CR_EAX], &regs[CR_EBX],
52 &regs[CR_ECX], &regs[CR_EDX]);
54 if (regs[cb->reg] & (1 << cb->bit))
55 set_cpu_cap(c, cb->feature);
59 * common AMD/Intel features
61 if (c->cpuid_level >= 6) {
62 if (cpuid_ecx(6) & 0x1)
63 set_cpu_cap(c, X86_FEATURE_APERFMPERF);
67 /* leaf 0xb SMT level */
68 #define SMT_LEVEL 0
70 /* leaf 0xb sub-leaf types */
71 #define INVALID_TYPE 0
72 #define SMT_TYPE 1
73 #define CORE_TYPE 2
75 #define LEAFB_SUBTYPE(ecx) (((ecx) >> 8) & 0xff)
76 #define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
77 #define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)
80 * Check for extended topology enumeration cpuid leaf 0xb and if it
81 * exists, use it for populating initial_apicid and cpu topology
82 * detection.
84 void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
86 #ifdef CONFIG_SMP
87 unsigned int eax, ebx, ecx, edx, sub_index;
88 unsigned int ht_mask_width, core_plus_mask_width;
89 unsigned int core_select_mask, core_level_siblings;
90 static bool printed;
92 if (c->cpuid_level < 0xb)
93 return;
95 cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
98 * check if the cpuid leaf 0xb is actually implemented.
100 if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE))
101 return;
103 set_cpu_cap(c, X86_FEATURE_XTOPOLOGY);
106 * initial apic id, which also represents 32-bit extended x2apic id.
108 c->initial_apicid = edx;
111 * Populate HT related information from sub-leaf level 0.
113 core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
114 core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
116 sub_index = 1;
117 do {
118 cpuid_count(0xb, sub_index, &eax, &ebx, &ecx, &edx);
121 * Check for the Core type in the implemented sub leaves.
123 if (LEAFB_SUBTYPE(ecx) == CORE_TYPE) {
124 core_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
125 core_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
126 break;
129 sub_index++;
130 } while (LEAFB_SUBTYPE(ecx) != INVALID_TYPE);
132 core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
134 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, ht_mask_width)
135 & core_select_mask;
136 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, core_plus_mask_width);
138 * Reinit the apicid, now that we have extended initial_apicid.
140 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
142 c->x86_max_cores = (core_level_siblings / smp_num_siblings);
144 if (!printed) {
145 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
146 c->phys_proc_id);
147 if (c->x86_max_cores > 1)
148 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
149 c->cpu_core_id);
150 printed = 1;
152 return;
153 #endif