1 #ifdef CONFIG_CPU_SUP_INTEL
6 * Used to coordinate shared registers between HT threads or
7 * among events on a single PMU.
9 struct intel_shared_regs
{
10 struct er_account regs
[EXTRA_REG_MAX
];
11 int refcnt
; /* per-core: #HT threads */
12 unsigned core_id
; /* per-core: core id */
16 * Intel PerfMon, used on Core and later.
18 static u64 intel_perfmon_event_map
[PERF_COUNT_HW_MAX
] __read_mostly
=
20 [PERF_COUNT_HW_CPU_CYCLES
] = 0x003c,
21 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
22 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x4f2e,
23 [PERF_COUNT_HW_CACHE_MISSES
] = 0x412e,
24 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
25 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
26 [PERF_COUNT_HW_BUS_CYCLES
] = 0x013c,
29 static struct event_constraint intel_core_event_constraints
[] __read_mostly
=
31 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
32 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
33 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
34 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
35 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
36 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
40 static struct event_constraint intel_core2_event_constraints
[] __read_mostly
=
42 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
43 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
45 * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
46 * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
47 * ratio between these counters.
49 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
50 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
51 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
52 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
53 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
54 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
55 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
56 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
57 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
58 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
59 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
63 static struct event_constraint intel_nehalem_event_constraints
[] __read_mostly
=
65 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
66 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
67 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
68 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
69 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
70 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
71 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
72 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
73 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
74 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
75 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
79 static struct extra_reg intel_nehalem_extra_regs
[] __read_mostly
=
81 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
85 static struct event_constraint intel_westmere_event_constraints
[] __read_mostly
=
87 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
88 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
89 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
90 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
91 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
92 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
93 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
97 static struct event_constraint intel_snb_event_constraints
[] __read_mostly
=
99 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
100 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
101 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
102 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
103 INTEL_EVENT_CONSTRAINT(0xb7, 0x1), /* OFF_CORE_RESPONSE_0 */
104 INTEL_EVENT_CONSTRAINT(0xbb, 0x8), /* OFF_CORE_RESPONSE_1 */
105 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
106 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
110 static struct extra_reg intel_westmere_extra_regs
[] __read_mostly
=
112 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
113 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1
, 0xffff, RSP_1
),
117 static struct event_constraint intel_gen_event_constraints
[] __read_mostly
=
119 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
120 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
121 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
125 static u64
intel_pmu_event_map(int hw_event
)
127 return intel_perfmon_event_map
[hw_event
];
130 static __initconst
const u64 snb_hw_cache_event_ids
131 [PERF_COUNT_HW_CACHE_MAX
]
132 [PERF_COUNT_HW_CACHE_OP_MAX
]
133 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
137 [ C(RESULT_ACCESS
) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
138 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPLACEMENT */
141 [ C(RESULT_ACCESS
) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
142 [ C(RESULT_MISS
) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
144 [ C(OP_PREFETCH
) ] = {
145 [ C(RESULT_ACCESS
) ] = 0x0,
146 [ C(RESULT_MISS
) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
151 [ C(RESULT_ACCESS
) ] = 0x0,
152 [ C(RESULT_MISS
) ] = 0x0280, /* ICACHE.MISSES */
155 [ C(RESULT_ACCESS
) ] = -1,
156 [ C(RESULT_MISS
) ] = -1,
158 [ C(OP_PREFETCH
) ] = {
159 [ C(RESULT_ACCESS
) ] = 0x0,
160 [ C(RESULT_MISS
) ] = 0x0,
165 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
166 [ C(RESULT_ACCESS
) ] = 0x01b7,
167 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
168 [ C(RESULT_MISS
) ] = 0x01b7,
171 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
172 [ C(RESULT_ACCESS
) ] = 0x01b7,
173 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
174 [ C(RESULT_MISS
) ] = 0x01b7,
176 [ C(OP_PREFETCH
) ] = {
177 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
178 [ C(RESULT_ACCESS
) ] = 0x01b7,
179 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
180 [ C(RESULT_MISS
) ] = 0x01b7,
185 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
186 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
189 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
190 [ C(RESULT_MISS
) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
192 [ C(OP_PREFETCH
) ] = {
193 [ C(RESULT_ACCESS
) ] = 0x0,
194 [ C(RESULT_MISS
) ] = 0x0,
199 [ C(RESULT_ACCESS
) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
200 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
203 [ C(RESULT_ACCESS
) ] = -1,
204 [ C(RESULT_MISS
) ] = -1,
206 [ C(OP_PREFETCH
) ] = {
207 [ C(RESULT_ACCESS
) ] = -1,
208 [ C(RESULT_MISS
) ] = -1,
213 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
214 [ C(RESULT_MISS
) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
217 [ C(RESULT_ACCESS
) ] = -1,
218 [ C(RESULT_MISS
) ] = -1,
220 [ C(OP_PREFETCH
) ] = {
221 [ C(RESULT_ACCESS
) ] = -1,
222 [ C(RESULT_MISS
) ] = -1,
227 static __initconst
const u64 westmere_hw_cache_event_ids
228 [PERF_COUNT_HW_CACHE_MAX
]
229 [PERF_COUNT_HW_CACHE_OP_MAX
]
230 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
234 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
235 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
238 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
239 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
241 [ C(OP_PREFETCH
) ] = {
242 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
243 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
248 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
249 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
252 [ C(RESULT_ACCESS
) ] = -1,
253 [ C(RESULT_MISS
) ] = -1,
255 [ C(OP_PREFETCH
) ] = {
256 [ C(RESULT_ACCESS
) ] = 0x0,
257 [ C(RESULT_MISS
) ] = 0x0,
262 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
263 [ C(RESULT_ACCESS
) ] = 0x01b7,
264 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
265 [ C(RESULT_MISS
) ] = 0x01b7,
268 * Use RFO, not WRITEBACK, because a write miss would typically occur
272 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
273 [ C(RESULT_ACCESS
) ] = 0x01b7,
274 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
275 [ C(RESULT_MISS
) ] = 0x01b7,
277 [ C(OP_PREFETCH
) ] = {
278 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
279 [ C(RESULT_ACCESS
) ] = 0x01b7,
280 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
281 [ C(RESULT_MISS
) ] = 0x01b7,
286 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
287 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
290 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
291 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
293 [ C(OP_PREFETCH
) ] = {
294 [ C(RESULT_ACCESS
) ] = 0x0,
295 [ C(RESULT_MISS
) ] = 0x0,
300 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
301 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.ANY */
304 [ C(RESULT_ACCESS
) ] = -1,
305 [ C(RESULT_MISS
) ] = -1,
307 [ C(OP_PREFETCH
) ] = {
308 [ C(RESULT_ACCESS
) ] = -1,
309 [ C(RESULT_MISS
) ] = -1,
314 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
315 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
318 [ C(RESULT_ACCESS
) ] = -1,
319 [ C(RESULT_MISS
) ] = -1,
321 [ C(OP_PREFETCH
) ] = {
322 [ C(RESULT_ACCESS
) ] = -1,
323 [ C(RESULT_MISS
) ] = -1,
329 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
330 * See IA32 SDM Vol 3B 30.6.1.3
333 #define NHM_DMND_DATA_RD (1 << 0)
334 #define NHM_DMND_RFO (1 << 1)
335 #define NHM_DMND_IFETCH (1 << 2)
336 #define NHM_DMND_WB (1 << 3)
337 #define NHM_PF_DATA_RD (1 << 4)
338 #define NHM_PF_DATA_RFO (1 << 5)
339 #define NHM_PF_IFETCH (1 << 6)
340 #define NHM_OFFCORE_OTHER (1 << 7)
341 #define NHM_UNCORE_HIT (1 << 8)
342 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
343 #define NHM_OTHER_CORE_HITM (1 << 10)
345 #define NHM_REMOTE_CACHE_FWD (1 << 12)
346 #define NHM_REMOTE_DRAM (1 << 13)
347 #define NHM_LOCAL_DRAM (1 << 14)
348 #define NHM_NON_DRAM (1 << 15)
350 #define NHM_ALL_DRAM (NHM_REMOTE_DRAM|NHM_LOCAL_DRAM)
352 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
353 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
354 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
356 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
357 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_ALL_DRAM|NHM_REMOTE_CACHE_FWD)
358 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
360 static __initconst
const u64 nehalem_hw_cache_extra_regs
361 [PERF_COUNT_HW_CACHE_MAX
]
362 [PERF_COUNT_HW_CACHE_OP_MAX
]
363 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
367 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_L3_ACCESS
,
368 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_L3_MISS
,
371 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_L3_ACCESS
,
372 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_L3_MISS
,
374 [ C(OP_PREFETCH
) ] = {
375 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_L3_ACCESS
,
376 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_L3_MISS
,
381 static __initconst
const u64 nehalem_hw_cache_event_ids
382 [PERF_COUNT_HW_CACHE_MAX
]
383 [PERF_COUNT_HW_CACHE_OP_MAX
]
384 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
388 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
389 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
392 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
393 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
395 [ C(OP_PREFETCH
) ] = {
396 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
397 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
402 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
403 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
406 [ C(RESULT_ACCESS
) ] = -1,
407 [ C(RESULT_MISS
) ] = -1,
409 [ C(OP_PREFETCH
) ] = {
410 [ C(RESULT_ACCESS
) ] = 0x0,
411 [ C(RESULT_MISS
) ] = 0x0,
416 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
417 [ C(RESULT_ACCESS
) ] = 0x01b7,
418 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
419 [ C(RESULT_MISS
) ] = 0x01b7,
422 * Use RFO, not WRITEBACK, because a write miss would typically occur
426 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
427 [ C(RESULT_ACCESS
) ] = 0x01b7,
428 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
429 [ C(RESULT_MISS
) ] = 0x01b7,
431 [ C(OP_PREFETCH
) ] = {
432 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
433 [ C(RESULT_ACCESS
) ] = 0x01b7,
434 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
435 [ C(RESULT_MISS
) ] = 0x01b7,
440 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
441 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
444 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
445 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
447 [ C(OP_PREFETCH
) ] = {
448 [ C(RESULT_ACCESS
) ] = 0x0,
449 [ C(RESULT_MISS
) ] = 0x0,
454 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
455 [ C(RESULT_MISS
) ] = 0x20c8, /* ITLB_MISS_RETIRED */
458 [ C(RESULT_ACCESS
) ] = -1,
459 [ C(RESULT_MISS
) ] = -1,
461 [ C(OP_PREFETCH
) ] = {
462 [ C(RESULT_ACCESS
) ] = -1,
463 [ C(RESULT_MISS
) ] = -1,
468 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
469 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
472 [ C(RESULT_ACCESS
) ] = -1,
473 [ C(RESULT_MISS
) ] = -1,
475 [ C(OP_PREFETCH
) ] = {
476 [ C(RESULT_ACCESS
) ] = -1,
477 [ C(RESULT_MISS
) ] = -1,
482 static __initconst
const u64 core2_hw_cache_event_ids
483 [PERF_COUNT_HW_CACHE_MAX
]
484 [PERF_COUNT_HW_CACHE_OP_MAX
]
485 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
489 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
490 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
493 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
494 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
496 [ C(OP_PREFETCH
) ] = {
497 [ C(RESULT_ACCESS
) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
498 [ C(RESULT_MISS
) ] = 0,
503 [ C(RESULT_ACCESS
) ] = 0x0080, /* L1I.READS */
504 [ C(RESULT_MISS
) ] = 0x0081, /* L1I.MISSES */
507 [ C(RESULT_ACCESS
) ] = -1,
508 [ C(RESULT_MISS
) ] = -1,
510 [ C(OP_PREFETCH
) ] = {
511 [ C(RESULT_ACCESS
) ] = 0,
512 [ C(RESULT_MISS
) ] = 0,
517 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
518 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
521 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
522 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
524 [ C(OP_PREFETCH
) ] = {
525 [ C(RESULT_ACCESS
) ] = 0,
526 [ C(RESULT_MISS
) ] = 0,
531 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
532 [ C(RESULT_MISS
) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
535 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
536 [ C(RESULT_MISS
) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
538 [ C(OP_PREFETCH
) ] = {
539 [ C(RESULT_ACCESS
) ] = 0,
540 [ C(RESULT_MISS
) ] = 0,
545 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
546 [ C(RESULT_MISS
) ] = 0x1282, /* ITLBMISSES */
549 [ C(RESULT_ACCESS
) ] = -1,
550 [ C(RESULT_MISS
) ] = -1,
552 [ C(OP_PREFETCH
) ] = {
553 [ C(RESULT_ACCESS
) ] = -1,
554 [ C(RESULT_MISS
) ] = -1,
559 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
560 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
563 [ C(RESULT_ACCESS
) ] = -1,
564 [ C(RESULT_MISS
) ] = -1,
566 [ C(OP_PREFETCH
) ] = {
567 [ C(RESULT_ACCESS
) ] = -1,
568 [ C(RESULT_MISS
) ] = -1,
573 static __initconst
const u64 atom_hw_cache_event_ids
574 [PERF_COUNT_HW_CACHE_MAX
]
575 [PERF_COUNT_HW_CACHE_OP_MAX
]
576 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
580 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE.LD */
581 [ C(RESULT_MISS
) ] = 0,
584 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE.ST */
585 [ C(RESULT_MISS
) ] = 0,
587 [ C(OP_PREFETCH
) ] = {
588 [ C(RESULT_ACCESS
) ] = 0x0,
589 [ C(RESULT_MISS
) ] = 0,
594 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
595 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
598 [ C(RESULT_ACCESS
) ] = -1,
599 [ C(RESULT_MISS
) ] = -1,
601 [ C(OP_PREFETCH
) ] = {
602 [ C(RESULT_ACCESS
) ] = 0,
603 [ C(RESULT_MISS
) ] = 0,
608 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
609 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
612 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
613 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
615 [ C(OP_PREFETCH
) ] = {
616 [ C(RESULT_ACCESS
) ] = 0,
617 [ C(RESULT_MISS
) ] = 0,
622 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
623 [ C(RESULT_MISS
) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
626 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
627 [ C(RESULT_MISS
) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
629 [ C(OP_PREFETCH
) ] = {
630 [ C(RESULT_ACCESS
) ] = 0,
631 [ C(RESULT_MISS
) ] = 0,
636 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
637 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
640 [ C(RESULT_ACCESS
) ] = -1,
641 [ C(RESULT_MISS
) ] = -1,
643 [ C(OP_PREFETCH
) ] = {
644 [ C(RESULT_ACCESS
) ] = -1,
645 [ C(RESULT_MISS
) ] = -1,
650 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
651 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
654 [ C(RESULT_ACCESS
) ] = -1,
655 [ C(RESULT_MISS
) ] = -1,
657 [ C(OP_PREFETCH
) ] = {
658 [ C(RESULT_ACCESS
) ] = -1,
659 [ C(RESULT_MISS
) ] = -1,
664 static void intel_pmu_disable_all(void)
666 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
668 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
670 if (test_bit(X86_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
))
671 intel_pmu_disable_bts();
673 intel_pmu_pebs_disable_all();
674 intel_pmu_lbr_disable_all();
677 static void intel_pmu_enable_all(int added
)
679 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
681 intel_pmu_pebs_enable_all();
682 intel_pmu_lbr_enable_all();
683 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, x86_pmu
.intel_ctrl
);
685 if (test_bit(X86_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
)) {
686 struct perf_event
*event
=
687 cpuc
->events
[X86_PMC_IDX_FIXED_BTS
];
689 if (WARN_ON_ONCE(!event
))
692 intel_pmu_enable_bts(event
->hw
.config
);
698 * Intel Errata AAK100 (model 26)
699 * Intel Errata AAP53 (model 30)
700 * Intel Errata BD53 (model 44)
702 * The official story:
703 * These chips need to be 'reset' when adding counters by programming the
704 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
705 * in sequence on the same PMC or on different PMCs.
707 * In practise it appears some of these events do in fact count, and
708 * we need to programm all 4 events.
710 static void intel_pmu_nhm_workaround(void)
712 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
713 static const unsigned long nhm_magic
[4] = {
719 struct perf_event
*event
;
723 * The Errata requires below steps:
724 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
725 * 2) Configure 4 PERFEVTSELx with the magic events and clear
726 * the corresponding PMCx;
727 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
728 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
729 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
733 * The real steps we choose are a little different from above.
734 * A) To reduce MSR operations, we don't run step 1) as they
735 * are already cleared before this function is called;
736 * B) Call x86_perf_event_update to save PMCx before configuring
737 * PERFEVTSELx with magic number;
738 * C) With step 5), we do clear only when the PERFEVTSELx is
739 * not used currently.
740 * D) Call x86_perf_event_set_period to restore PMCx;
743 /* We always operate 4 pairs of PERF Counters */
744 for (i
= 0; i
< 4; i
++) {
745 event
= cpuc
->events
[i
];
747 x86_perf_event_update(event
);
750 for (i
= 0; i
< 4; i
++) {
751 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, nhm_magic
[i
]);
752 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0
+ i
, 0x0);
755 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0xf);
756 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0x0);
758 for (i
= 0; i
< 4; i
++) {
759 event
= cpuc
->events
[i
];
762 x86_perf_event_set_period(event
);
763 __x86_pmu_enable_event(&event
->hw
,
764 ARCH_PERFMON_EVENTSEL_ENABLE
);
766 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, 0x0);
770 static void intel_pmu_nhm_enable_all(int added
)
773 intel_pmu_nhm_workaround();
774 intel_pmu_enable_all(added
);
777 static inline u64
intel_pmu_get_status(void)
781 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
786 static inline void intel_pmu_ack_status(u64 ack
)
788 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
);
791 static void intel_pmu_disable_fixed(struct hw_perf_event
*hwc
)
793 int idx
= hwc
->idx
- X86_PMC_IDX_FIXED
;
796 mask
= 0xfULL
<< (idx
* 4);
798 rdmsrl(hwc
->config_base
, ctrl_val
);
800 wrmsrl(hwc
->config_base
, ctrl_val
);
803 static void intel_pmu_disable_event(struct perf_event
*event
)
805 struct hw_perf_event
*hwc
= &event
->hw
;
807 if (unlikely(hwc
->idx
== X86_PMC_IDX_FIXED_BTS
)) {
808 intel_pmu_disable_bts();
809 intel_pmu_drain_bts_buffer();
813 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
814 intel_pmu_disable_fixed(hwc
);
818 x86_pmu_disable_event(event
);
820 if (unlikely(event
->attr
.precise_ip
))
821 intel_pmu_pebs_disable(event
);
824 static void intel_pmu_enable_fixed(struct hw_perf_event
*hwc
)
826 int idx
= hwc
->idx
- X86_PMC_IDX_FIXED
;
827 u64 ctrl_val
, bits
, mask
;
830 * Enable IRQ generation (0x8),
831 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
835 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_USR
)
837 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
841 * ANY bit is supported in v3 and up
843 if (x86_pmu
.version
> 2 && hwc
->config
& ARCH_PERFMON_EVENTSEL_ANY
)
847 mask
= 0xfULL
<< (idx
* 4);
849 rdmsrl(hwc
->config_base
, ctrl_val
);
852 wrmsrl(hwc
->config_base
, ctrl_val
);
855 static void intel_pmu_enable_event(struct perf_event
*event
)
857 struct hw_perf_event
*hwc
= &event
->hw
;
859 if (unlikely(hwc
->idx
== X86_PMC_IDX_FIXED_BTS
)) {
860 if (!__this_cpu_read(cpu_hw_events
.enabled
))
863 intel_pmu_enable_bts(hwc
->config
);
867 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
868 intel_pmu_enable_fixed(hwc
);
872 if (unlikely(event
->attr
.precise_ip
))
873 intel_pmu_pebs_enable(event
);
875 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
879 * Save and restart an expired event. Called by NMI contexts,
880 * so it has to be careful about preempting normal event ops:
882 static int intel_pmu_save_and_restart(struct perf_event
*event
)
884 x86_perf_event_update(event
);
885 return x86_perf_event_set_period(event
);
888 static void intel_pmu_reset(void)
890 struct debug_store
*ds
= __this_cpu_read(cpu_hw_events
.ds
);
894 if (!x86_pmu
.num_counters
)
897 local_irq_save(flags
);
899 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
901 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
902 checking_wrmsrl(x86_pmu_config_addr(idx
), 0ull);
903 checking_wrmsrl(x86_pmu_event_addr(idx
), 0ull);
905 for (idx
= 0; idx
< x86_pmu
.num_counters_fixed
; idx
++)
906 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, 0ull);
909 ds
->bts_index
= ds
->bts_buffer_base
;
911 local_irq_restore(flags
);
915 * This handler is triggered by the local APIC, so the APIC IRQ handling
918 static int intel_pmu_handle_irq(struct pt_regs
*regs
)
920 struct perf_sample_data data
;
921 struct cpu_hw_events
*cpuc
;
926 perf_sample_data_init(&data
, 0);
928 cpuc
= &__get_cpu_var(cpu_hw_events
);
931 * Some chipsets need to unmask the LVTPC in a particular spot
932 * inside the nmi handler. As a result, the unmasking was pushed
933 * into all the nmi handlers.
935 * This handler doesn't seem to have any issues with the unmasking
936 * so it was left at the top.
938 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
940 intel_pmu_disable_all();
941 handled
= intel_pmu_drain_bts_buffer();
942 status
= intel_pmu_get_status();
944 intel_pmu_enable_all(0);
950 intel_pmu_ack_status(status
);
952 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
953 perf_event_print_debug();
958 inc_irq_stat(apic_perf_irqs
);
960 intel_pmu_lbr_read();
963 * PEBS overflow sets bit 62 in the global status register
965 if (__test_and_clear_bit(62, (unsigned long *)&status
)) {
967 x86_pmu
.drain_pebs(regs
);
970 for_each_set_bit(bit
, (unsigned long *)&status
, X86_PMC_IDX_MAX
) {
971 struct perf_event
*event
= cpuc
->events
[bit
];
975 if (!test_bit(bit
, cpuc
->active_mask
))
978 if (!intel_pmu_save_and_restart(event
))
981 data
.period
= event
->hw
.last_period
;
983 if (perf_event_overflow(event
, &data
, regs
))
984 x86_pmu_stop(event
, 0);
988 * Repeat if there is more work to be done:
990 status
= intel_pmu_get_status();
995 intel_pmu_enable_all(0);
999 static struct event_constraint
*
1000 intel_bts_constraints(struct perf_event
*event
)
1002 struct hw_perf_event
*hwc
= &event
->hw
;
1003 unsigned int hw_event
, bts_event
;
1005 if (event
->attr
.freq
)
1008 hw_event
= hwc
->config
& INTEL_ARCH_EVENT_MASK
;
1009 bts_event
= x86_pmu
.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS
);
1011 if (unlikely(hw_event
== bts_event
&& hwc
->sample_period
== 1))
1012 return &bts_constraint
;
1018 * manage allocation of shared extra msr for certain events
1021 * per-cpu: to be shared between the various events on a single PMU
1022 * per-core: per-cpu + shared by HT threads
1024 static struct event_constraint
*
1025 __intel_shared_reg_get_constraints(struct cpu_hw_events
*cpuc
,
1026 struct hw_perf_event_extra
*reg
)
1028 struct event_constraint
*c
= &emptyconstraint
;
1029 struct er_account
*era
;
1031 /* already allocated shared msr */
1032 if (reg
->alloc
|| !cpuc
->shared_regs
)
1033 return &unconstrained
;
1035 era
= &cpuc
->shared_regs
->regs
[reg
->idx
];
1037 raw_spin_lock(&era
->lock
);
1039 if (!atomic_read(&era
->ref
) || era
->config
== reg
->config
) {
1041 /* lock in msr value */
1042 era
->config
= reg
->config
;
1043 era
->reg
= reg
->reg
;
1046 atomic_inc(&era
->ref
);
1048 /* no need to reallocate during incremental event scheduling */
1052 * All events using extra_reg are unconstrained.
1053 * Avoids calling x86_get_event_constraints()
1055 * Must revisit if extra_reg controlling events
1056 * ever have constraints. Worst case we go through
1057 * the regular event constraint table.
1061 raw_spin_unlock(&era
->lock
);
1067 __intel_shared_reg_put_constraints(struct cpu_hw_events
*cpuc
,
1068 struct hw_perf_event_extra
*reg
)
1070 struct er_account
*era
;
1073 * only put constraint if extra reg was actually
1074 * allocated. Also takes care of event which do
1075 * not use an extra shared reg
1080 era
= &cpuc
->shared_regs
->regs
[reg
->idx
];
1082 /* one fewer user */
1083 atomic_dec(&era
->ref
);
1085 /* allocate again next time */
1089 static struct event_constraint
*
1090 intel_shared_regs_constraints(struct cpu_hw_events
*cpuc
,
1091 struct perf_event
*event
)
1093 struct event_constraint
*c
= NULL
;
1094 struct hw_perf_event_extra
*xreg
;
1096 xreg
= &event
->hw
.extra_reg
;
1097 if (xreg
->idx
!= EXTRA_REG_NONE
)
1098 c
= __intel_shared_reg_get_constraints(cpuc
, xreg
);
1102 static struct event_constraint
*
1103 intel_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1105 struct event_constraint
*c
;
1107 c
= intel_bts_constraints(event
);
1111 c
= intel_pebs_constraints(event
);
1115 c
= intel_shared_regs_constraints(cpuc
, event
);
1119 return x86_get_event_constraints(cpuc
, event
);
1123 intel_put_shared_regs_event_constraints(struct cpu_hw_events
*cpuc
,
1124 struct perf_event
*event
)
1126 struct hw_perf_event_extra
*reg
;
1128 reg
= &event
->hw
.extra_reg
;
1129 if (reg
->idx
!= EXTRA_REG_NONE
)
1130 __intel_shared_reg_put_constraints(cpuc
, reg
);
1133 static void intel_put_event_constraints(struct cpu_hw_events
*cpuc
,
1134 struct perf_event
*event
)
1136 intel_put_shared_regs_event_constraints(cpuc
, event
);
1139 static int intel_pmu_hw_config(struct perf_event
*event
)
1141 int ret
= x86_pmu_hw_config(event
);
1146 if (event
->attr
.precise_ip
&&
1147 (event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1149 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1150 * (0x003c) so that we can use it with PEBS.
1152 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1153 * PEBS capable. However we can use INST_RETIRED.ANY_P
1154 * (0x00c0), which is a PEBS capable event, to get the same
1157 * INST_RETIRED.ANY_P counts the number of cycles that retires
1158 * CNTMASK instructions. By setting CNTMASK to a value (16)
1159 * larger than the maximum number of instructions that can be
1160 * retired per cycle (4) and then inverting the condition, we
1161 * count all cycles that retire 16 or less instructions, which
1164 * Thereby we gain a PEBS capable cycle counter.
1166 u64 alt_config
= 0x108000c0; /* INST_RETIRED.TOTAL_CYCLES */
1168 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1169 event
->hw
.config
= alt_config
;
1172 if (event
->attr
.type
!= PERF_TYPE_RAW
)
1175 if (!(event
->attr
.config
& ARCH_PERFMON_EVENTSEL_ANY
))
1178 if (x86_pmu
.version
< 3)
1181 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN
))
1184 event
->hw
.config
|= ARCH_PERFMON_EVENTSEL_ANY
;
1189 static __initconst
const struct x86_pmu core_pmu
= {
1191 .handle_irq
= x86_pmu_handle_irq
,
1192 .disable_all
= x86_pmu_disable_all
,
1193 .enable_all
= x86_pmu_enable_all
,
1194 .enable
= x86_pmu_enable_event
,
1195 .disable
= x86_pmu_disable_event
,
1196 .hw_config
= x86_pmu_hw_config
,
1197 .schedule_events
= x86_schedule_events
,
1198 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1199 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1200 .event_map
= intel_pmu_event_map
,
1201 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1204 * Intel PMCs cannot be accessed sanely above 32 bit width,
1205 * so we install an artificial 1<<31 period regardless of
1206 * the generic event period:
1208 .max_period
= (1ULL << 31) - 1,
1209 .get_event_constraints
= intel_get_event_constraints
,
1210 .put_event_constraints
= intel_put_event_constraints
,
1211 .event_constraints
= intel_core_event_constraints
,
1214 static struct intel_shared_regs
*allocate_shared_regs(int cpu
)
1216 struct intel_shared_regs
*regs
;
1219 regs
= kzalloc_node(sizeof(struct intel_shared_regs
),
1220 GFP_KERNEL
, cpu_to_node(cpu
));
1223 * initialize the locks to keep lockdep happy
1225 for (i
= 0; i
< EXTRA_REG_MAX
; i
++)
1226 raw_spin_lock_init(®s
->regs
[i
].lock
);
1233 static int intel_pmu_cpu_prepare(int cpu
)
1235 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1237 if (!x86_pmu
.extra_regs
)
1240 cpuc
->shared_regs
= allocate_shared_regs(cpu
);
1241 if (!cpuc
->shared_regs
)
1247 static void intel_pmu_cpu_starting(int cpu
)
1249 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1250 int core_id
= topology_core_id(cpu
);
1253 init_debug_store_on_cpu(cpu
);
1255 * Deal with CPUs that don't clear their LBRs on power-up.
1257 intel_pmu_lbr_reset();
1259 if (!cpuc
->shared_regs
)
1262 for_each_cpu(i
, topology_thread_cpumask(cpu
)) {
1263 struct intel_shared_regs
*pc
;
1265 pc
= per_cpu(cpu_hw_events
, i
).shared_regs
;
1266 if (pc
&& pc
->core_id
== core_id
) {
1267 kfree(cpuc
->shared_regs
);
1268 cpuc
->shared_regs
= pc
;
1273 cpuc
->shared_regs
->core_id
= core_id
;
1274 cpuc
->shared_regs
->refcnt
++;
1277 static void intel_pmu_cpu_dying(int cpu
)
1279 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1280 struct intel_shared_regs
*pc
;
1282 pc
= cpuc
->shared_regs
;
1284 if (pc
->core_id
== -1 || --pc
->refcnt
== 0)
1286 cpuc
->shared_regs
= NULL
;
1289 fini_debug_store_on_cpu(cpu
);
1292 static __initconst
const struct x86_pmu intel_pmu
= {
1294 .handle_irq
= intel_pmu_handle_irq
,
1295 .disable_all
= intel_pmu_disable_all
,
1296 .enable_all
= intel_pmu_enable_all
,
1297 .enable
= intel_pmu_enable_event
,
1298 .disable
= intel_pmu_disable_event
,
1299 .hw_config
= intel_pmu_hw_config
,
1300 .schedule_events
= x86_schedule_events
,
1301 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1302 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1303 .event_map
= intel_pmu_event_map
,
1304 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1307 * Intel PMCs cannot be accessed sanely above 32 bit width,
1308 * so we install an artificial 1<<31 period regardless of
1309 * the generic event period:
1311 .max_period
= (1ULL << 31) - 1,
1312 .get_event_constraints
= intel_get_event_constraints
,
1313 .put_event_constraints
= intel_put_event_constraints
,
1315 .cpu_prepare
= intel_pmu_cpu_prepare
,
1316 .cpu_starting
= intel_pmu_cpu_starting
,
1317 .cpu_dying
= intel_pmu_cpu_dying
,
1320 static void intel_clovertown_quirks(void)
1323 * PEBS is unreliable due to:
1325 * AJ67 - PEBS may experience CPL leaks
1326 * AJ68 - PEBS PMI may be delayed by one event
1327 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
1328 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
1330 * AJ67 could be worked around by restricting the OS/USR flags.
1331 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
1333 * AJ106 could possibly be worked around by not allowing LBR
1334 * usage from PEBS, including the fixup.
1335 * AJ68 could possibly be worked around by always programming
1336 * a pebs_event_reset[0] value and coping with the lost events.
1338 * But taken together it might just make sense to not enable PEBS on
1341 printk(KERN_WARNING
"PEBS disabled due to CPU errata.\n");
1343 x86_pmu
.pebs_constraints
= NULL
;
1346 static __init
int intel_pmu_init(void)
1348 union cpuid10_edx edx
;
1349 union cpuid10_eax eax
;
1350 unsigned int unused
;
1354 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
1355 switch (boot_cpu_data
.x86
) {
1357 return p6_pmu_init();
1359 return p4_pmu_init();
1365 * Check whether the Architectural PerfMon supports
1366 * Branch Misses Retired hw_event or not.
1368 cpuid(10, &eax
.full
, &ebx
, &unused
, &edx
.full
);
1369 if (eax
.split
.mask_length
<= ARCH_PERFMON_BRANCH_MISSES_RETIRED
)
1372 version
= eax
.split
.version_id
;
1376 x86_pmu
= intel_pmu
;
1378 x86_pmu
.version
= version
;
1379 x86_pmu
.num_counters
= eax
.split
.num_counters
;
1380 x86_pmu
.cntval_bits
= eax
.split
.bit_width
;
1381 x86_pmu
.cntval_mask
= (1ULL << eax
.split
.bit_width
) - 1;
1384 * Quirk: v2 perfmon does not report fixed-purpose events, so
1385 * assume at least 3 events:
1388 x86_pmu
.num_counters_fixed
= max((int)edx
.split
.num_counters_fixed
, 3);
1391 * v2 and above have a perf capabilities MSR
1396 rdmsrl(MSR_IA32_PERF_CAPABILITIES
, capabilities
);
1397 x86_pmu
.intel_cap
.capabilities
= capabilities
;
1403 * Install the hw-cache-events table:
1405 switch (boot_cpu_data
.x86_model
) {
1406 case 14: /* 65 nm core solo/duo, "Yonah" */
1407 pr_cont("Core events, ");
1410 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
1411 x86_pmu
.quirks
= intel_clovertown_quirks
;
1412 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
1413 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
1414 case 29: /* six-core 45 nm xeon "Dunnington" */
1415 memcpy(hw_cache_event_ids
, core2_hw_cache_event_ids
,
1416 sizeof(hw_cache_event_ids
));
1418 intel_pmu_lbr_init_core();
1420 x86_pmu
.event_constraints
= intel_core2_event_constraints
;
1421 x86_pmu
.pebs_constraints
= intel_core2_pebs_event_constraints
;
1422 pr_cont("Core2 events, ");
1425 case 26: /* 45 nm nehalem, "Bloomfield" */
1426 case 30: /* 45 nm nehalem, "Lynnfield" */
1427 case 46: /* 45 nm nehalem-ex, "Beckton" */
1428 memcpy(hw_cache_event_ids
, nehalem_hw_cache_event_ids
,
1429 sizeof(hw_cache_event_ids
));
1430 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
1431 sizeof(hw_cache_extra_regs
));
1433 intel_pmu_lbr_init_nhm();
1435 x86_pmu
.event_constraints
= intel_nehalem_event_constraints
;
1436 x86_pmu
.pebs_constraints
= intel_nehalem_pebs_event_constraints
;
1437 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
1438 x86_pmu
.extra_regs
= intel_nehalem_extra_regs
;
1440 /* UOPS_ISSUED.STALLED_CYCLES */
1441 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = 0x180010e;
1442 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
1443 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = 0x1803fb1;
1447 * Erratum AAJ80 detected, we work it around by using
1448 * the BR_MISP_EXEC.ANY event. This will over-count
1449 * branch-misses, but it's still much better than the
1450 * architectural event which is often completely bogus:
1452 intel_perfmon_event_map
[PERF_COUNT_HW_BRANCH_MISSES
] = 0x7f89;
1454 pr_cont("erratum AAJ80 worked around, ");
1456 pr_cont("Nehalem events, ");
1460 memcpy(hw_cache_event_ids
, atom_hw_cache_event_ids
,
1461 sizeof(hw_cache_event_ids
));
1463 intel_pmu_lbr_init_atom();
1465 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
1466 x86_pmu
.pebs_constraints
= intel_atom_pebs_event_constraints
;
1467 pr_cont("Atom events, ");
1470 case 37: /* 32 nm nehalem, "Clarkdale" */
1471 case 44: /* 32 nm nehalem, "Gulftown" */
1472 case 47: /* 32 nm Xeon E7 */
1473 memcpy(hw_cache_event_ids
, westmere_hw_cache_event_ids
,
1474 sizeof(hw_cache_event_ids
));
1475 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
1476 sizeof(hw_cache_extra_regs
));
1478 intel_pmu_lbr_init_nhm();
1480 x86_pmu
.event_constraints
= intel_westmere_event_constraints
;
1481 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
1482 x86_pmu
.pebs_constraints
= intel_westmere_pebs_event_constraints
;
1483 x86_pmu
.extra_regs
= intel_westmere_extra_regs
;
1485 /* UOPS_ISSUED.STALLED_CYCLES */
1486 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = 0x180010e;
1487 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
1488 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = 0x1803fb1;
1490 pr_cont("Westmere events, ");
1493 case 42: /* SandyBridge */
1494 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
1495 sizeof(hw_cache_event_ids
));
1497 intel_pmu_lbr_init_nhm();
1499 x86_pmu
.event_constraints
= intel_snb_event_constraints
;
1500 x86_pmu
.pebs_constraints
= intel_snb_pebs_events
;
1502 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
1503 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = 0x180010e;
1504 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
1505 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = 0x18001b1;
1507 pr_cont("SandyBridge events, ");
1512 * default constraints for v2 and up
1514 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
1515 pr_cont("generic architected perfmon, ");
1520 #else /* CONFIG_CPU_SUP_INTEL */
1522 static int intel_pmu_init(void)
1527 #endif /* CONFIG_CPU_SUP_INTEL */