4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/stddef.h>
11 #include <linux/types.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/export.h>
16 #include <asm/hardirq.h>
19 #include "perf_event.h"
22 * Intel PerfMon, used on Core and later.
24 static u64 intel_perfmon_event_map
[PERF_COUNT_HW_MAX
] __read_mostly
=
26 [PERF_COUNT_HW_CPU_CYCLES
] = 0x003c,
27 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
28 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x4f2e,
29 [PERF_COUNT_HW_CACHE_MISSES
] = 0x412e,
30 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
31 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
32 [PERF_COUNT_HW_BUS_CYCLES
] = 0x013c,
33 [PERF_COUNT_HW_REF_CPU_CYCLES
] = 0x0300, /* pseudo-encoding */
36 static struct event_constraint intel_core_event_constraints
[] __read_mostly
=
38 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
39 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
40 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
41 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
42 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
43 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
47 static struct event_constraint intel_core2_event_constraints
[] __read_mostly
=
49 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
50 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
51 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
52 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
53 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
54 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
55 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
56 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
57 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
58 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
59 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
60 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
61 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
65 static struct event_constraint intel_nehalem_event_constraints
[] __read_mostly
=
67 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
68 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
69 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
70 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
71 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
72 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
73 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
74 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
75 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
76 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
77 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
81 static struct extra_reg intel_nehalem_extra_regs
[] __read_mostly
=
83 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
87 static struct event_constraint intel_westmere_event_constraints
[] __read_mostly
=
89 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
90 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
91 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
92 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
93 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
94 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
95 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
99 static struct event_constraint intel_snb_event_constraints
[] __read_mostly
=
101 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
102 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
103 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
104 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
105 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
106 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
110 static struct extra_reg intel_westmere_extra_regs
[] __read_mostly
=
112 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0xffff, RSP_0
),
113 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1
, 0xffff, RSP_1
),
117 static struct event_constraint intel_v1_event_constraints
[] __read_mostly
=
122 static struct event_constraint intel_gen_event_constraints
[] __read_mostly
=
124 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
125 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
126 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
130 static struct extra_reg intel_snb_extra_regs
[] __read_mostly
= {
131 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0x3fffffffffull
, RSP_0
),
132 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1
, 0x3fffffffffull
, RSP_1
),
136 static u64
intel_pmu_event_map(int hw_event
)
138 return intel_perfmon_event_map
[hw_event
];
141 static __initconst
const u64 snb_hw_cache_event_ids
142 [PERF_COUNT_HW_CACHE_MAX
]
143 [PERF_COUNT_HW_CACHE_OP_MAX
]
144 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
148 [ C(RESULT_ACCESS
) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
149 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPLACEMENT */
152 [ C(RESULT_ACCESS
) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
153 [ C(RESULT_MISS
) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
155 [ C(OP_PREFETCH
) ] = {
156 [ C(RESULT_ACCESS
) ] = 0x0,
157 [ C(RESULT_MISS
) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
162 [ C(RESULT_ACCESS
) ] = 0x0,
163 [ C(RESULT_MISS
) ] = 0x0280, /* ICACHE.MISSES */
166 [ C(RESULT_ACCESS
) ] = -1,
167 [ C(RESULT_MISS
) ] = -1,
169 [ C(OP_PREFETCH
) ] = {
170 [ C(RESULT_ACCESS
) ] = 0x0,
171 [ C(RESULT_MISS
) ] = 0x0,
176 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
177 [ C(RESULT_ACCESS
) ] = 0x01b7,
178 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
179 [ C(RESULT_MISS
) ] = 0x01b7,
182 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
183 [ C(RESULT_ACCESS
) ] = 0x01b7,
184 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
185 [ C(RESULT_MISS
) ] = 0x01b7,
187 [ C(OP_PREFETCH
) ] = {
188 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
189 [ C(RESULT_ACCESS
) ] = 0x01b7,
190 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
191 [ C(RESULT_MISS
) ] = 0x01b7,
196 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
197 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
200 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
201 [ C(RESULT_MISS
) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
203 [ C(OP_PREFETCH
) ] = {
204 [ C(RESULT_ACCESS
) ] = 0x0,
205 [ C(RESULT_MISS
) ] = 0x0,
210 [ C(RESULT_ACCESS
) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
211 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
214 [ C(RESULT_ACCESS
) ] = -1,
215 [ C(RESULT_MISS
) ] = -1,
217 [ C(OP_PREFETCH
) ] = {
218 [ C(RESULT_ACCESS
) ] = -1,
219 [ C(RESULT_MISS
) ] = -1,
224 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
225 [ C(RESULT_MISS
) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
228 [ C(RESULT_ACCESS
) ] = -1,
229 [ C(RESULT_MISS
) ] = -1,
231 [ C(OP_PREFETCH
) ] = {
232 [ C(RESULT_ACCESS
) ] = -1,
233 [ C(RESULT_MISS
) ] = -1,
238 [ C(RESULT_ACCESS
) ] = -1,
239 [ C(RESULT_MISS
) ] = -1,
242 [ C(RESULT_ACCESS
) ] = -1,
243 [ C(RESULT_MISS
) ] = -1,
245 [ C(OP_PREFETCH
) ] = {
246 [ C(RESULT_ACCESS
) ] = -1,
247 [ C(RESULT_MISS
) ] = -1,
253 static __initconst
const u64 westmere_hw_cache_event_ids
254 [PERF_COUNT_HW_CACHE_MAX
]
255 [PERF_COUNT_HW_CACHE_OP_MAX
]
256 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
260 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
261 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
264 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
265 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
267 [ C(OP_PREFETCH
) ] = {
268 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
269 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
274 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
275 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
278 [ C(RESULT_ACCESS
) ] = -1,
279 [ C(RESULT_MISS
) ] = -1,
281 [ C(OP_PREFETCH
) ] = {
282 [ C(RESULT_ACCESS
) ] = 0x0,
283 [ C(RESULT_MISS
) ] = 0x0,
288 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
289 [ C(RESULT_ACCESS
) ] = 0x01b7,
290 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
291 [ C(RESULT_MISS
) ] = 0x01b7,
294 * Use RFO, not WRITEBACK, because a write miss would typically occur
298 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
299 [ C(RESULT_ACCESS
) ] = 0x01b7,
300 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
301 [ C(RESULT_MISS
) ] = 0x01b7,
303 [ C(OP_PREFETCH
) ] = {
304 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
305 [ C(RESULT_ACCESS
) ] = 0x01b7,
306 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
307 [ C(RESULT_MISS
) ] = 0x01b7,
312 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
313 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
316 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
317 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
319 [ C(OP_PREFETCH
) ] = {
320 [ C(RESULT_ACCESS
) ] = 0x0,
321 [ C(RESULT_MISS
) ] = 0x0,
326 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
327 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.ANY */
330 [ C(RESULT_ACCESS
) ] = -1,
331 [ C(RESULT_MISS
) ] = -1,
333 [ C(OP_PREFETCH
) ] = {
334 [ C(RESULT_ACCESS
) ] = -1,
335 [ C(RESULT_MISS
) ] = -1,
340 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
341 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
344 [ C(RESULT_ACCESS
) ] = -1,
345 [ C(RESULT_MISS
) ] = -1,
347 [ C(OP_PREFETCH
) ] = {
348 [ C(RESULT_ACCESS
) ] = -1,
349 [ C(RESULT_MISS
) ] = -1,
354 [ C(RESULT_ACCESS
) ] = 0x01b7,
355 [ C(RESULT_MISS
) ] = 0x01b7,
358 [ C(RESULT_ACCESS
) ] = 0x01b7,
359 [ C(RESULT_MISS
) ] = 0x01b7,
361 [ C(OP_PREFETCH
) ] = {
362 [ C(RESULT_ACCESS
) ] = 0x01b7,
363 [ C(RESULT_MISS
) ] = 0x01b7,
369 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
370 * See IA32 SDM Vol 3B 30.6.1.3
373 #define NHM_DMND_DATA_RD (1 << 0)
374 #define NHM_DMND_RFO (1 << 1)
375 #define NHM_DMND_IFETCH (1 << 2)
376 #define NHM_DMND_WB (1 << 3)
377 #define NHM_PF_DATA_RD (1 << 4)
378 #define NHM_PF_DATA_RFO (1 << 5)
379 #define NHM_PF_IFETCH (1 << 6)
380 #define NHM_OFFCORE_OTHER (1 << 7)
381 #define NHM_UNCORE_HIT (1 << 8)
382 #define NHM_OTHER_CORE_HIT_SNP (1 << 9)
383 #define NHM_OTHER_CORE_HITM (1 << 10)
385 #define NHM_REMOTE_CACHE_FWD (1 << 12)
386 #define NHM_REMOTE_DRAM (1 << 13)
387 #define NHM_LOCAL_DRAM (1 << 14)
388 #define NHM_NON_DRAM (1 << 15)
390 #define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
391 #define NHM_REMOTE (NHM_REMOTE_DRAM)
393 #define NHM_DMND_READ (NHM_DMND_DATA_RD)
394 #define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
395 #define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
397 #define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
398 #define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
399 #define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
401 static __initconst
const u64 nehalem_hw_cache_extra_regs
402 [PERF_COUNT_HW_CACHE_MAX
]
403 [PERF_COUNT_HW_CACHE_OP_MAX
]
404 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
408 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_L3_ACCESS
,
409 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_L3_MISS
,
412 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_L3_ACCESS
,
413 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_L3_MISS
,
415 [ C(OP_PREFETCH
) ] = {
416 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_L3_ACCESS
,
417 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_L3_MISS
,
422 [ C(RESULT_ACCESS
) ] = NHM_DMND_READ
|NHM_LOCAL
|NHM_REMOTE
,
423 [ C(RESULT_MISS
) ] = NHM_DMND_READ
|NHM_REMOTE
,
426 [ C(RESULT_ACCESS
) ] = NHM_DMND_WRITE
|NHM_LOCAL
|NHM_REMOTE
,
427 [ C(RESULT_MISS
) ] = NHM_DMND_WRITE
|NHM_REMOTE
,
429 [ C(OP_PREFETCH
) ] = {
430 [ C(RESULT_ACCESS
) ] = NHM_DMND_PREFETCH
|NHM_LOCAL
|NHM_REMOTE
,
431 [ C(RESULT_MISS
) ] = NHM_DMND_PREFETCH
|NHM_REMOTE
,
436 static __initconst
const u64 nehalem_hw_cache_event_ids
437 [PERF_COUNT_HW_CACHE_MAX
]
438 [PERF_COUNT_HW_CACHE_OP_MAX
]
439 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
443 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
444 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
447 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
448 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
450 [ C(OP_PREFETCH
) ] = {
451 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
452 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
457 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
458 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
461 [ C(RESULT_ACCESS
) ] = -1,
462 [ C(RESULT_MISS
) ] = -1,
464 [ C(OP_PREFETCH
) ] = {
465 [ C(RESULT_ACCESS
) ] = 0x0,
466 [ C(RESULT_MISS
) ] = 0x0,
471 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
472 [ C(RESULT_ACCESS
) ] = 0x01b7,
473 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
474 [ C(RESULT_MISS
) ] = 0x01b7,
477 * Use RFO, not WRITEBACK, because a write miss would typically occur
481 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
482 [ C(RESULT_ACCESS
) ] = 0x01b7,
483 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
484 [ C(RESULT_MISS
) ] = 0x01b7,
486 [ C(OP_PREFETCH
) ] = {
487 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
488 [ C(RESULT_ACCESS
) ] = 0x01b7,
489 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
490 [ C(RESULT_MISS
) ] = 0x01b7,
495 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
496 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
499 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
500 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
502 [ C(OP_PREFETCH
) ] = {
503 [ C(RESULT_ACCESS
) ] = 0x0,
504 [ C(RESULT_MISS
) ] = 0x0,
509 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
510 [ C(RESULT_MISS
) ] = 0x20c8, /* ITLB_MISS_RETIRED */
513 [ C(RESULT_ACCESS
) ] = -1,
514 [ C(RESULT_MISS
) ] = -1,
516 [ C(OP_PREFETCH
) ] = {
517 [ C(RESULT_ACCESS
) ] = -1,
518 [ C(RESULT_MISS
) ] = -1,
523 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
524 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
527 [ C(RESULT_ACCESS
) ] = -1,
528 [ C(RESULT_MISS
) ] = -1,
530 [ C(OP_PREFETCH
) ] = {
531 [ C(RESULT_ACCESS
) ] = -1,
532 [ C(RESULT_MISS
) ] = -1,
537 [ C(RESULT_ACCESS
) ] = 0x01b7,
538 [ C(RESULT_MISS
) ] = 0x01b7,
541 [ C(RESULT_ACCESS
) ] = 0x01b7,
542 [ C(RESULT_MISS
) ] = 0x01b7,
544 [ C(OP_PREFETCH
) ] = {
545 [ C(RESULT_ACCESS
) ] = 0x01b7,
546 [ C(RESULT_MISS
) ] = 0x01b7,
551 static __initconst
const u64 core2_hw_cache_event_ids
552 [PERF_COUNT_HW_CACHE_MAX
]
553 [PERF_COUNT_HW_CACHE_OP_MAX
]
554 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
558 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
559 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
562 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
563 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
565 [ C(OP_PREFETCH
) ] = {
566 [ C(RESULT_ACCESS
) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
567 [ C(RESULT_MISS
) ] = 0,
572 [ C(RESULT_ACCESS
) ] = 0x0080, /* L1I.READS */
573 [ C(RESULT_MISS
) ] = 0x0081, /* L1I.MISSES */
576 [ C(RESULT_ACCESS
) ] = -1,
577 [ C(RESULT_MISS
) ] = -1,
579 [ C(OP_PREFETCH
) ] = {
580 [ C(RESULT_ACCESS
) ] = 0,
581 [ C(RESULT_MISS
) ] = 0,
586 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
587 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
590 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
591 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
593 [ C(OP_PREFETCH
) ] = {
594 [ C(RESULT_ACCESS
) ] = 0,
595 [ C(RESULT_MISS
) ] = 0,
600 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
601 [ C(RESULT_MISS
) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
604 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
605 [ C(RESULT_MISS
) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
607 [ C(OP_PREFETCH
) ] = {
608 [ C(RESULT_ACCESS
) ] = 0,
609 [ C(RESULT_MISS
) ] = 0,
614 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
615 [ C(RESULT_MISS
) ] = 0x1282, /* ITLBMISSES */
618 [ C(RESULT_ACCESS
) ] = -1,
619 [ C(RESULT_MISS
) ] = -1,
621 [ C(OP_PREFETCH
) ] = {
622 [ C(RESULT_ACCESS
) ] = -1,
623 [ C(RESULT_MISS
) ] = -1,
628 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
629 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
632 [ C(RESULT_ACCESS
) ] = -1,
633 [ C(RESULT_MISS
) ] = -1,
635 [ C(OP_PREFETCH
) ] = {
636 [ C(RESULT_ACCESS
) ] = -1,
637 [ C(RESULT_MISS
) ] = -1,
642 static __initconst
const u64 atom_hw_cache_event_ids
643 [PERF_COUNT_HW_CACHE_MAX
]
644 [PERF_COUNT_HW_CACHE_OP_MAX
]
645 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
649 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE.LD */
650 [ C(RESULT_MISS
) ] = 0,
653 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE.ST */
654 [ C(RESULT_MISS
) ] = 0,
656 [ C(OP_PREFETCH
) ] = {
657 [ C(RESULT_ACCESS
) ] = 0x0,
658 [ C(RESULT_MISS
) ] = 0,
663 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
664 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
667 [ C(RESULT_ACCESS
) ] = -1,
668 [ C(RESULT_MISS
) ] = -1,
670 [ C(OP_PREFETCH
) ] = {
671 [ C(RESULT_ACCESS
) ] = 0,
672 [ C(RESULT_MISS
) ] = 0,
677 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
678 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
681 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
682 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
684 [ C(OP_PREFETCH
) ] = {
685 [ C(RESULT_ACCESS
) ] = 0,
686 [ C(RESULT_MISS
) ] = 0,
691 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
692 [ C(RESULT_MISS
) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
695 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
696 [ C(RESULT_MISS
) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
698 [ C(OP_PREFETCH
) ] = {
699 [ C(RESULT_ACCESS
) ] = 0,
700 [ C(RESULT_MISS
) ] = 0,
705 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
706 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
709 [ C(RESULT_ACCESS
) ] = -1,
710 [ C(RESULT_MISS
) ] = -1,
712 [ C(OP_PREFETCH
) ] = {
713 [ C(RESULT_ACCESS
) ] = -1,
714 [ C(RESULT_MISS
) ] = -1,
719 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
720 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
723 [ C(RESULT_ACCESS
) ] = -1,
724 [ C(RESULT_MISS
) ] = -1,
726 [ C(OP_PREFETCH
) ] = {
727 [ C(RESULT_ACCESS
) ] = -1,
728 [ C(RESULT_MISS
) ] = -1,
733 static inline bool intel_pmu_needs_lbr_smpl(struct perf_event
*event
)
735 /* user explicitly requested branch sampling */
736 if (has_branch_stack(event
))
739 /* implicit branch sampling to correct PEBS skid */
740 if (x86_pmu
.intel_cap
.pebs_trap
&& event
->attr
.precise_ip
> 1)
746 static void intel_pmu_disable_all(void)
748 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
750 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
752 if (test_bit(X86_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
))
753 intel_pmu_disable_bts();
755 intel_pmu_pebs_disable_all();
756 intel_pmu_lbr_disable_all();
759 static void intel_pmu_enable_all(int added
)
761 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
763 intel_pmu_pebs_enable_all();
764 intel_pmu_lbr_enable_all();
765 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
,
766 x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
);
768 if (test_bit(X86_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
)) {
769 struct perf_event
*event
=
770 cpuc
->events
[X86_PMC_IDX_FIXED_BTS
];
772 if (WARN_ON_ONCE(!event
))
775 intel_pmu_enable_bts(event
->hw
.config
);
781 * Intel Errata AAK100 (model 26)
782 * Intel Errata AAP53 (model 30)
783 * Intel Errata BD53 (model 44)
785 * The official story:
786 * These chips need to be 'reset' when adding counters by programming the
787 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
788 * in sequence on the same PMC or on different PMCs.
790 * In practise it appears some of these events do in fact count, and
791 * we need to programm all 4 events.
793 static void intel_pmu_nhm_workaround(void)
795 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
796 static const unsigned long nhm_magic
[4] = {
802 struct perf_event
*event
;
806 * The Errata requires below steps:
807 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
808 * 2) Configure 4 PERFEVTSELx with the magic events and clear
809 * the corresponding PMCx;
810 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
811 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
812 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
816 * The real steps we choose are a little different from above.
817 * A) To reduce MSR operations, we don't run step 1) as they
818 * are already cleared before this function is called;
819 * B) Call x86_perf_event_update to save PMCx before configuring
820 * PERFEVTSELx with magic number;
821 * C) With step 5), we do clear only when the PERFEVTSELx is
822 * not used currently.
823 * D) Call x86_perf_event_set_period to restore PMCx;
826 /* We always operate 4 pairs of PERF Counters */
827 for (i
= 0; i
< 4; i
++) {
828 event
= cpuc
->events
[i
];
830 x86_perf_event_update(event
);
833 for (i
= 0; i
< 4; i
++) {
834 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, nhm_magic
[i
]);
835 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0
+ i
, 0x0);
838 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0xf);
839 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0x0);
841 for (i
= 0; i
< 4; i
++) {
842 event
= cpuc
->events
[i
];
845 x86_perf_event_set_period(event
);
846 __x86_pmu_enable_event(&event
->hw
,
847 ARCH_PERFMON_EVENTSEL_ENABLE
);
849 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, 0x0);
853 static void intel_pmu_nhm_enable_all(int added
)
856 intel_pmu_nhm_workaround();
857 intel_pmu_enable_all(added
);
860 static inline u64
intel_pmu_get_status(void)
864 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
869 static inline void intel_pmu_ack_status(u64 ack
)
871 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
);
874 static void intel_pmu_disable_fixed(struct hw_perf_event
*hwc
)
876 int idx
= hwc
->idx
- X86_PMC_IDX_FIXED
;
879 mask
= 0xfULL
<< (idx
* 4);
881 rdmsrl(hwc
->config_base
, ctrl_val
);
883 wrmsrl(hwc
->config_base
, ctrl_val
);
886 static void intel_pmu_disable_event(struct perf_event
*event
)
888 struct hw_perf_event
*hwc
= &event
->hw
;
889 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
891 if (unlikely(hwc
->idx
== X86_PMC_IDX_FIXED_BTS
)) {
892 intel_pmu_disable_bts();
893 intel_pmu_drain_bts_buffer();
897 cpuc
->intel_ctrl_guest_mask
&= ~(1ull << hwc
->idx
);
898 cpuc
->intel_ctrl_host_mask
&= ~(1ull << hwc
->idx
);
901 * must disable before any actual event
902 * because any event may be combined with LBR
904 if (intel_pmu_needs_lbr_smpl(event
))
905 intel_pmu_lbr_disable(event
);
907 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
908 intel_pmu_disable_fixed(hwc
);
912 x86_pmu_disable_event(event
);
914 if (unlikely(event
->attr
.precise_ip
))
915 intel_pmu_pebs_disable(event
);
918 static void intel_pmu_enable_fixed(struct hw_perf_event
*hwc
)
920 int idx
= hwc
->idx
- X86_PMC_IDX_FIXED
;
921 u64 ctrl_val
, bits
, mask
;
924 * Enable IRQ generation (0x8),
925 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
929 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_USR
)
931 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
935 * ANY bit is supported in v3 and up
937 if (x86_pmu
.version
> 2 && hwc
->config
& ARCH_PERFMON_EVENTSEL_ANY
)
941 mask
= 0xfULL
<< (idx
* 4);
943 rdmsrl(hwc
->config_base
, ctrl_val
);
946 wrmsrl(hwc
->config_base
, ctrl_val
);
949 static void intel_pmu_enable_event(struct perf_event
*event
)
951 struct hw_perf_event
*hwc
= &event
->hw
;
952 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
954 if (unlikely(hwc
->idx
== X86_PMC_IDX_FIXED_BTS
)) {
955 if (!__this_cpu_read(cpu_hw_events
.enabled
))
958 intel_pmu_enable_bts(hwc
->config
);
962 * must enabled before any actual event
963 * because any event may be combined with LBR
965 if (intel_pmu_needs_lbr_smpl(event
))
966 intel_pmu_lbr_enable(event
);
968 if (event
->attr
.exclude_host
)
969 cpuc
->intel_ctrl_guest_mask
|= (1ull << hwc
->idx
);
970 if (event
->attr
.exclude_guest
)
971 cpuc
->intel_ctrl_host_mask
|= (1ull << hwc
->idx
);
973 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
974 intel_pmu_enable_fixed(hwc
);
978 if (unlikely(event
->attr
.precise_ip
))
979 intel_pmu_pebs_enable(event
);
981 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
985 * Save and restart an expired event. Called by NMI contexts,
986 * so it has to be careful about preempting normal event ops:
988 int intel_pmu_save_and_restart(struct perf_event
*event
)
990 x86_perf_event_update(event
);
991 return x86_perf_event_set_period(event
);
994 static void intel_pmu_reset(void)
996 struct debug_store
*ds
= __this_cpu_read(cpu_hw_events
.ds
);
1000 if (!x86_pmu
.num_counters
)
1003 local_irq_save(flags
);
1005 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
1007 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1008 checking_wrmsrl(x86_pmu_config_addr(idx
), 0ull);
1009 checking_wrmsrl(x86_pmu_event_addr(idx
), 0ull);
1011 for (idx
= 0; idx
< x86_pmu
.num_counters_fixed
; idx
++)
1012 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, 0ull);
1015 ds
->bts_index
= ds
->bts_buffer_base
;
1017 local_irq_restore(flags
);
1021 * This handler is triggered by the local APIC, so the APIC IRQ handling
1024 static int intel_pmu_handle_irq(struct pt_regs
*regs
)
1026 struct perf_sample_data data
;
1027 struct cpu_hw_events
*cpuc
;
1032 cpuc
= &__get_cpu_var(cpu_hw_events
);
1035 * Some chipsets need to unmask the LVTPC in a particular spot
1036 * inside the nmi handler. As a result, the unmasking was pushed
1037 * into all the nmi handlers.
1039 * This handler doesn't seem to have any issues with the unmasking
1040 * so it was left at the top.
1042 apic_write(APIC_LVTPC
, APIC_DM_NMI
);
1044 intel_pmu_disable_all();
1045 handled
= intel_pmu_drain_bts_buffer();
1046 status
= intel_pmu_get_status();
1048 intel_pmu_enable_all(0);
1054 intel_pmu_ack_status(status
);
1055 if (++loops
> 100) {
1056 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1057 perf_event_print_debug();
1062 inc_irq_stat(apic_perf_irqs
);
1064 intel_pmu_lbr_read();
1067 * PEBS overflow sets bit 62 in the global status register
1069 if (__test_and_clear_bit(62, (unsigned long *)&status
)) {
1071 x86_pmu
.drain_pebs(regs
);
1074 for_each_set_bit(bit
, (unsigned long *)&status
, X86_PMC_IDX_MAX
) {
1075 struct perf_event
*event
= cpuc
->events
[bit
];
1079 if (!test_bit(bit
, cpuc
->active_mask
))
1082 if (!intel_pmu_save_and_restart(event
))
1085 perf_sample_data_init(&data
, 0, event
->hw
.last_period
);
1087 if (has_branch_stack(event
))
1088 data
.br_stack
= &cpuc
->lbr_stack
;
1090 if (perf_event_overflow(event
, &data
, regs
))
1091 x86_pmu_stop(event
, 0);
1095 * Repeat if there is more work to be done:
1097 status
= intel_pmu_get_status();
1102 intel_pmu_enable_all(0);
1106 static struct event_constraint
*
1107 intel_bts_constraints(struct perf_event
*event
)
1109 struct hw_perf_event
*hwc
= &event
->hw
;
1110 unsigned int hw_event
, bts_event
;
1112 if (event
->attr
.freq
)
1115 hw_event
= hwc
->config
& INTEL_ARCH_EVENT_MASK
;
1116 bts_event
= x86_pmu
.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS
);
1118 if (unlikely(hw_event
== bts_event
&& hwc
->sample_period
== 1))
1119 return &bts_constraint
;
1124 static bool intel_try_alt_er(struct perf_event
*event
, int orig_idx
)
1126 if (!(x86_pmu
.er_flags
& ERF_HAS_RSP_1
))
1129 if (event
->hw
.extra_reg
.idx
== EXTRA_REG_RSP_0
) {
1130 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1131 event
->hw
.config
|= 0x01bb;
1132 event
->hw
.extra_reg
.idx
= EXTRA_REG_RSP_1
;
1133 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_1
;
1134 } else if (event
->hw
.extra_reg
.idx
== EXTRA_REG_RSP_1
) {
1135 event
->hw
.config
&= ~INTEL_ARCH_EVENT_MASK
;
1136 event
->hw
.config
|= 0x01b7;
1137 event
->hw
.extra_reg
.idx
= EXTRA_REG_RSP_0
;
1138 event
->hw
.extra_reg
.reg
= MSR_OFFCORE_RSP_0
;
1141 if (event
->hw
.extra_reg
.idx
== orig_idx
)
1148 * manage allocation of shared extra msr for certain events
1151 * per-cpu: to be shared between the various events on a single PMU
1152 * per-core: per-cpu + shared by HT threads
1154 static struct event_constraint
*
1155 __intel_shared_reg_get_constraints(struct cpu_hw_events
*cpuc
,
1156 struct perf_event
*event
,
1157 struct hw_perf_event_extra
*reg
)
1159 struct event_constraint
*c
= &emptyconstraint
;
1160 struct er_account
*era
;
1161 unsigned long flags
;
1162 int orig_idx
= reg
->idx
;
1164 /* already allocated shared msr */
1166 return NULL
; /* call x86_get_event_constraint() */
1169 era
= &cpuc
->shared_regs
->regs
[reg
->idx
];
1171 * we use spin_lock_irqsave() to avoid lockdep issues when
1172 * passing a fake cpuc
1174 raw_spin_lock_irqsave(&era
->lock
, flags
);
1176 if (!atomic_read(&era
->ref
) || era
->config
== reg
->config
) {
1178 /* lock in msr value */
1179 era
->config
= reg
->config
;
1180 era
->reg
= reg
->reg
;
1183 atomic_inc(&era
->ref
);
1185 /* no need to reallocate during incremental event scheduling */
1189 * need to call x86_get_event_constraint()
1190 * to check if associated event has constraints
1193 } else if (intel_try_alt_er(event
, orig_idx
)) {
1194 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1197 raw_spin_unlock_irqrestore(&era
->lock
, flags
);
1203 __intel_shared_reg_put_constraints(struct cpu_hw_events
*cpuc
,
1204 struct hw_perf_event_extra
*reg
)
1206 struct er_account
*era
;
1209 * only put constraint if extra reg was actually
1210 * allocated. Also takes care of event which do
1211 * not use an extra shared reg
1216 era
= &cpuc
->shared_regs
->regs
[reg
->idx
];
1218 /* one fewer user */
1219 atomic_dec(&era
->ref
);
1221 /* allocate again next time */
1225 static struct event_constraint
*
1226 intel_shared_regs_constraints(struct cpu_hw_events
*cpuc
,
1227 struct perf_event
*event
)
1229 struct event_constraint
*c
= NULL
, *d
;
1230 struct hw_perf_event_extra
*xreg
, *breg
;
1232 xreg
= &event
->hw
.extra_reg
;
1233 if (xreg
->idx
!= EXTRA_REG_NONE
) {
1234 c
= __intel_shared_reg_get_constraints(cpuc
, event
, xreg
);
1235 if (c
== &emptyconstraint
)
1238 breg
= &event
->hw
.branch_reg
;
1239 if (breg
->idx
!= EXTRA_REG_NONE
) {
1240 d
= __intel_shared_reg_get_constraints(cpuc
, event
, breg
);
1241 if (d
== &emptyconstraint
) {
1242 __intel_shared_reg_put_constraints(cpuc
, xreg
);
1249 struct event_constraint
*
1250 x86_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1252 struct event_constraint
*c
;
1254 if (x86_pmu
.event_constraints
) {
1255 for_each_event_constraint(c
, x86_pmu
.event_constraints
) {
1256 if ((event
->hw
.config
& c
->cmask
) == c
->code
)
1261 return &unconstrained
;
1264 static struct event_constraint
*
1265 intel_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1267 struct event_constraint
*c
;
1269 c
= intel_bts_constraints(event
);
1273 c
= intel_pebs_constraints(event
);
1277 c
= intel_shared_regs_constraints(cpuc
, event
);
1281 return x86_get_event_constraints(cpuc
, event
);
1285 intel_put_shared_regs_event_constraints(struct cpu_hw_events
*cpuc
,
1286 struct perf_event
*event
)
1288 struct hw_perf_event_extra
*reg
;
1290 reg
= &event
->hw
.extra_reg
;
1291 if (reg
->idx
!= EXTRA_REG_NONE
)
1292 __intel_shared_reg_put_constraints(cpuc
, reg
);
1294 reg
= &event
->hw
.branch_reg
;
1295 if (reg
->idx
!= EXTRA_REG_NONE
)
1296 __intel_shared_reg_put_constraints(cpuc
, reg
);
1299 static void intel_put_event_constraints(struct cpu_hw_events
*cpuc
,
1300 struct perf_event
*event
)
1302 intel_put_shared_regs_event_constraints(cpuc
, event
);
1305 static int intel_pmu_hw_config(struct perf_event
*event
)
1307 int ret
= x86_pmu_hw_config(event
);
1312 if (event
->attr
.precise_ip
&&
1313 (event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1315 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1316 * (0x003c) so that we can use it with PEBS.
1318 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1319 * PEBS capable. However we can use INST_RETIRED.ANY_P
1320 * (0x00c0), which is a PEBS capable event, to get the same
1323 * INST_RETIRED.ANY_P counts the number of cycles that retires
1324 * CNTMASK instructions. By setting CNTMASK to a value (16)
1325 * larger than the maximum number of instructions that can be
1326 * retired per cycle (4) and then inverting the condition, we
1327 * count all cycles that retire 16 or less instructions, which
1330 * Thereby we gain a PEBS capable cycle counter.
1332 u64 alt_config
= X86_CONFIG(.event
=0xc0, .inv
=1, .cmask
=16);
1335 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1336 event
->hw
.config
= alt_config
;
1339 if (intel_pmu_needs_lbr_smpl(event
)) {
1340 ret
= intel_pmu_setup_lbr_filter(event
);
1345 if (event
->attr
.type
!= PERF_TYPE_RAW
)
1348 if (!(event
->attr
.config
& ARCH_PERFMON_EVENTSEL_ANY
))
1351 if (x86_pmu
.version
< 3)
1354 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN
))
1357 event
->hw
.config
|= ARCH_PERFMON_EVENTSEL_ANY
;
1362 struct perf_guest_switch_msr
*perf_guest_get_msrs(int *nr
)
1364 if (x86_pmu
.guest_get_msrs
)
1365 return x86_pmu
.guest_get_msrs(nr
);
1369 EXPORT_SYMBOL_GPL(perf_guest_get_msrs
);
1371 static struct perf_guest_switch_msr
*intel_guest_get_msrs(int *nr
)
1373 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1374 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
1376 arr
[0].msr
= MSR_CORE_PERF_GLOBAL_CTRL
;
1377 arr
[0].host
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_guest_mask
;
1378 arr
[0].guest
= x86_pmu
.intel_ctrl
& ~cpuc
->intel_ctrl_host_mask
;
1384 static struct perf_guest_switch_msr
*core_guest_get_msrs(int *nr
)
1386 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1387 struct perf_guest_switch_msr
*arr
= cpuc
->guest_switch_msrs
;
1390 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1391 struct perf_event
*event
= cpuc
->events
[idx
];
1393 arr
[idx
].msr
= x86_pmu_config_addr(idx
);
1394 arr
[idx
].host
= arr
[idx
].guest
= 0;
1396 if (!test_bit(idx
, cpuc
->active_mask
))
1399 arr
[idx
].host
= arr
[idx
].guest
=
1400 event
->hw
.config
| ARCH_PERFMON_EVENTSEL_ENABLE
;
1402 if (event
->attr
.exclude_host
)
1403 arr
[idx
].host
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
1404 else if (event
->attr
.exclude_guest
)
1405 arr
[idx
].guest
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
1408 *nr
= x86_pmu
.num_counters
;
1412 static void core_pmu_enable_event(struct perf_event
*event
)
1414 if (!event
->attr
.exclude_host
)
1415 x86_pmu_enable_event(event
);
1418 static void core_pmu_enable_all(int added
)
1420 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
1423 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
1424 struct hw_perf_event
*hwc
= &cpuc
->events
[idx
]->hw
;
1426 if (!test_bit(idx
, cpuc
->active_mask
) ||
1427 cpuc
->events
[idx
]->attr
.exclude_host
)
1430 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
1434 PMU_FORMAT_ATTR(event
, "config:0-7" );
1435 PMU_FORMAT_ATTR(umask
, "config:8-15" );
1436 PMU_FORMAT_ATTR(edge
, "config:18" );
1437 PMU_FORMAT_ATTR(pc
, "config:19" );
1438 PMU_FORMAT_ATTR(any
, "config:21" ); /* v3 + */
1439 PMU_FORMAT_ATTR(inv
, "config:23" );
1440 PMU_FORMAT_ATTR(cmask
, "config:24-31" );
1442 static struct attribute
*intel_arch_formats_attr
[] = {
1443 &format_attr_event
.attr
,
1444 &format_attr_umask
.attr
,
1445 &format_attr_edge
.attr
,
1446 &format_attr_pc
.attr
,
1447 &format_attr_inv
.attr
,
1448 &format_attr_cmask
.attr
,
1452 static __initconst
const struct x86_pmu core_pmu
= {
1454 .handle_irq
= x86_pmu_handle_irq
,
1455 .disable_all
= x86_pmu_disable_all
,
1456 .enable_all
= core_pmu_enable_all
,
1457 .enable
= core_pmu_enable_event
,
1458 .disable
= x86_pmu_disable_event
,
1459 .hw_config
= x86_pmu_hw_config
,
1460 .schedule_events
= x86_schedule_events
,
1461 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1462 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1463 .event_map
= intel_pmu_event_map
,
1464 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1467 * Intel PMCs cannot be accessed sanely above 32 bit width,
1468 * so we install an artificial 1<<31 period regardless of
1469 * the generic event period:
1471 .max_period
= (1ULL << 31) - 1,
1472 .get_event_constraints
= intel_get_event_constraints
,
1473 .put_event_constraints
= intel_put_event_constraints
,
1474 .event_constraints
= intel_core_event_constraints
,
1475 .guest_get_msrs
= core_guest_get_msrs
,
1476 .format_attrs
= intel_arch_formats_attr
,
1479 struct intel_shared_regs
*allocate_shared_regs(int cpu
)
1481 struct intel_shared_regs
*regs
;
1484 regs
= kzalloc_node(sizeof(struct intel_shared_regs
),
1485 GFP_KERNEL
, cpu_to_node(cpu
));
1488 * initialize the locks to keep lockdep happy
1490 for (i
= 0; i
< EXTRA_REG_MAX
; i
++)
1491 raw_spin_lock_init(®s
->regs
[i
].lock
);
1498 static int intel_pmu_cpu_prepare(int cpu
)
1500 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1502 if (!(x86_pmu
.extra_regs
|| x86_pmu
.lbr_sel_map
))
1505 cpuc
->shared_regs
= allocate_shared_regs(cpu
);
1506 if (!cpuc
->shared_regs
)
1512 static void intel_pmu_cpu_starting(int cpu
)
1514 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1515 int core_id
= topology_core_id(cpu
);
1518 init_debug_store_on_cpu(cpu
);
1520 * Deal with CPUs that don't clear their LBRs on power-up.
1522 intel_pmu_lbr_reset();
1524 cpuc
->lbr_sel
= NULL
;
1526 if (!cpuc
->shared_regs
)
1529 if (!(x86_pmu
.er_flags
& ERF_NO_HT_SHARING
)) {
1530 for_each_cpu(i
, topology_thread_cpumask(cpu
)) {
1531 struct intel_shared_regs
*pc
;
1533 pc
= per_cpu(cpu_hw_events
, i
).shared_regs
;
1534 if (pc
&& pc
->core_id
== core_id
) {
1535 cpuc
->kfree_on_online
= cpuc
->shared_regs
;
1536 cpuc
->shared_regs
= pc
;
1540 cpuc
->shared_regs
->core_id
= core_id
;
1541 cpuc
->shared_regs
->refcnt
++;
1544 if (x86_pmu
.lbr_sel_map
)
1545 cpuc
->lbr_sel
= &cpuc
->shared_regs
->regs
[EXTRA_REG_LBR
];
1548 static void intel_pmu_cpu_dying(int cpu
)
1550 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1551 struct intel_shared_regs
*pc
;
1553 pc
= cpuc
->shared_regs
;
1555 if (pc
->core_id
== -1 || --pc
->refcnt
== 0)
1557 cpuc
->shared_regs
= NULL
;
1560 fini_debug_store_on_cpu(cpu
);
1563 static void intel_pmu_flush_branch_stack(void)
1566 * Intel LBR does not tag entries with the
1567 * PID of the current task, then we need to
1569 * For now, we simply reset it
1572 intel_pmu_lbr_reset();
1575 PMU_FORMAT_ATTR(offcore_rsp
, "config1:0-63");
1577 static struct attribute
*intel_arch3_formats_attr
[] = {
1578 &format_attr_event
.attr
,
1579 &format_attr_umask
.attr
,
1580 &format_attr_edge
.attr
,
1581 &format_attr_pc
.attr
,
1582 &format_attr_any
.attr
,
1583 &format_attr_inv
.attr
,
1584 &format_attr_cmask
.attr
,
1586 &format_attr_offcore_rsp
.attr
, /* XXX do NHM/WSM + SNB breakout */
1590 static __initconst
const struct x86_pmu intel_pmu
= {
1592 .handle_irq
= intel_pmu_handle_irq
,
1593 .disable_all
= intel_pmu_disable_all
,
1594 .enable_all
= intel_pmu_enable_all
,
1595 .enable
= intel_pmu_enable_event
,
1596 .disable
= intel_pmu_disable_event
,
1597 .hw_config
= intel_pmu_hw_config
,
1598 .schedule_events
= x86_schedule_events
,
1599 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1600 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1601 .event_map
= intel_pmu_event_map
,
1602 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1605 * Intel PMCs cannot be accessed sanely above 32 bit width,
1606 * so we install an artificial 1<<31 period regardless of
1607 * the generic event period:
1609 .max_period
= (1ULL << 31) - 1,
1610 .get_event_constraints
= intel_get_event_constraints
,
1611 .put_event_constraints
= intel_put_event_constraints
,
1613 .format_attrs
= intel_arch3_formats_attr
,
1615 .cpu_prepare
= intel_pmu_cpu_prepare
,
1616 .cpu_starting
= intel_pmu_cpu_starting
,
1617 .cpu_dying
= intel_pmu_cpu_dying
,
1618 .guest_get_msrs
= intel_guest_get_msrs
,
1619 .flush_branch_stack
= intel_pmu_flush_branch_stack
,
1622 static __init
void intel_clovertown_quirk(void)
1625 * PEBS is unreliable due to:
1627 * AJ67 - PEBS may experience CPL leaks
1628 * AJ68 - PEBS PMI may be delayed by one event
1629 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
1630 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
1632 * AJ67 could be worked around by restricting the OS/USR flags.
1633 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
1635 * AJ106 could possibly be worked around by not allowing LBR
1636 * usage from PEBS, including the fixup.
1637 * AJ68 could possibly be worked around by always programming
1638 * a pebs_event_reset[0] value and coping with the lost events.
1640 * But taken together it might just make sense to not enable PEBS on
1643 pr_warn("PEBS disabled due to CPU errata\n");
1645 x86_pmu
.pebs_constraints
= NULL
;
1648 static __init
void intel_sandybridge_quirk(void)
1650 pr_warn("PEBS disabled due to CPU errata\n");
1652 x86_pmu
.pebs_constraints
= NULL
;
1655 static const struct { int id
; char *name
; } intel_arch_events_map
[] __initconst
= {
1656 { PERF_COUNT_HW_CPU_CYCLES
, "cpu cycles" },
1657 { PERF_COUNT_HW_INSTRUCTIONS
, "instructions" },
1658 { PERF_COUNT_HW_BUS_CYCLES
, "bus cycles" },
1659 { PERF_COUNT_HW_CACHE_REFERENCES
, "cache references" },
1660 { PERF_COUNT_HW_CACHE_MISSES
, "cache misses" },
1661 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS
, "branch instructions" },
1662 { PERF_COUNT_HW_BRANCH_MISSES
, "branch misses" },
1665 static __init
void intel_arch_events_quirk(void)
1669 /* disable event that reported as not presend by cpuid */
1670 for_each_set_bit(bit
, x86_pmu
.events_mask
, ARRAY_SIZE(intel_arch_events_map
)) {
1671 intel_perfmon_event_map
[intel_arch_events_map
[bit
].id
] = 0;
1672 pr_warn("CPUID marked event: \'%s\' unavailable\n",
1673 intel_arch_events_map
[bit
].name
);
1677 static __init
void intel_nehalem_quirk(void)
1679 union cpuid10_ebx ebx
;
1681 ebx
.full
= x86_pmu
.events_maskl
;
1682 if (ebx
.split
.no_branch_misses_retired
) {
1684 * Erratum AAJ80 detected, we work it around by using
1685 * the BR_MISP_EXEC.ANY event. This will over-count
1686 * branch-misses, but it's still much better than the
1687 * architectural event which is often completely bogus:
1689 intel_perfmon_event_map
[PERF_COUNT_HW_BRANCH_MISSES
] = 0x7f89;
1690 ebx
.split
.no_branch_misses_retired
= 0;
1691 x86_pmu
.events_maskl
= ebx
.full
;
1692 pr_info("CPU erratum AAJ80 worked around\n");
1696 __init
int intel_pmu_init(void)
1698 union cpuid10_edx edx
;
1699 union cpuid10_eax eax
;
1700 union cpuid10_ebx ebx
;
1701 unsigned int unused
;
1704 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
1705 switch (boot_cpu_data
.x86
) {
1707 return p6_pmu_init();
1709 return p4_pmu_init();
1715 * Check whether the Architectural PerfMon supports
1716 * Branch Misses Retired hw_event or not.
1718 cpuid(10, &eax
.full
, &ebx
.full
, &unused
, &edx
.full
);
1719 if (eax
.split
.mask_length
< ARCH_PERFMON_EVENTS_COUNT
)
1722 version
= eax
.split
.version_id
;
1726 x86_pmu
= intel_pmu
;
1728 x86_pmu
.version
= version
;
1729 x86_pmu
.num_counters
= eax
.split
.num_counters
;
1730 x86_pmu
.cntval_bits
= eax
.split
.bit_width
;
1731 x86_pmu
.cntval_mask
= (1ULL << eax
.split
.bit_width
) - 1;
1733 x86_pmu
.events_maskl
= ebx
.full
;
1734 x86_pmu
.events_mask_len
= eax
.split
.mask_length
;
1737 * Quirk: v2 perfmon does not report fixed-purpose events, so
1738 * assume at least 3 events:
1741 x86_pmu
.num_counters_fixed
= max((int)edx
.split
.num_counters_fixed
, 3);
1744 * v2 and above have a perf capabilities MSR
1749 rdmsrl(MSR_IA32_PERF_CAPABILITIES
, capabilities
);
1750 x86_pmu
.intel_cap
.capabilities
= capabilities
;
1755 x86_add_quirk(intel_arch_events_quirk
); /* Install first, so it runs last */
1758 * Install the hw-cache-events table:
1760 switch (boot_cpu_data
.x86_model
) {
1761 case 14: /* 65 nm core solo/duo, "Yonah" */
1762 pr_cont("Core events, ");
1765 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
1766 x86_add_quirk(intel_clovertown_quirk
);
1767 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
1768 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
1769 case 29: /* six-core 45 nm xeon "Dunnington" */
1770 memcpy(hw_cache_event_ids
, core2_hw_cache_event_ids
,
1771 sizeof(hw_cache_event_ids
));
1773 intel_pmu_lbr_init_core();
1775 x86_pmu
.event_constraints
= intel_core2_event_constraints
;
1776 x86_pmu
.pebs_constraints
= intel_core2_pebs_event_constraints
;
1777 pr_cont("Core2 events, ");
1780 case 26: /* 45 nm nehalem, "Bloomfield" */
1781 case 30: /* 45 nm nehalem, "Lynnfield" */
1782 case 46: /* 45 nm nehalem-ex, "Beckton" */
1783 memcpy(hw_cache_event_ids
, nehalem_hw_cache_event_ids
,
1784 sizeof(hw_cache_event_ids
));
1785 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
1786 sizeof(hw_cache_extra_regs
));
1788 intel_pmu_lbr_init_nhm();
1790 x86_pmu
.event_constraints
= intel_nehalem_event_constraints
;
1791 x86_pmu
.pebs_constraints
= intel_nehalem_pebs_event_constraints
;
1792 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
1793 x86_pmu
.extra_regs
= intel_nehalem_extra_regs
;
1795 /* UOPS_ISSUED.STALLED_CYCLES */
1796 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
1797 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
1798 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
1799 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
1800 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
1802 x86_add_quirk(intel_nehalem_quirk
);
1804 pr_cont("Nehalem events, ");
1808 memcpy(hw_cache_event_ids
, atom_hw_cache_event_ids
,
1809 sizeof(hw_cache_event_ids
));
1811 intel_pmu_lbr_init_atom();
1813 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
1814 x86_pmu
.pebs_constraints
= intel_atom_pebs_event_constraints
;
1815 pr_cont("Atom events, ");
1818 case 37: /* 32 nm nehalem, "Clarkdale" */
1819 case 44: /* 32 nm nehalem, "Gulftown" */
1820 case 47: /* 32 nm Xeon E7 */
1821 memcpy(hw_cache_event_ids
, westmere_hw_cache_event_ids
,
1822 sizeof(hw_cache_event_ids
));
1823 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
1824 sizeof(hw_cache_extra_regs
));
1826 intel_pmu_lbr_init_nhm();
1828 x86_pmu
.event_constraints
= intel_westmere_event_constraints
;
1829 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
1830 x86_pmu
.pebs_constraints
= intel_westmere_pebs_event_constraints
;
1831 x86_pmu
.extra_regs
= intel_westmere_extra_regs
;
1832 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
1834 /* UOPS_ISSUED.STALLED_CYCLES */
1835 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
1836 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
1837 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
1838 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
1839 X86_CONFIG(.event
=0xb1, .umask
=0x3f, .inv
=1, .cmask
=1);
1841 pr_cont("Westmere events, ");
1844 case 42: /* SandyBridge */
1845 x86_add_quirk(intel_sandybridge_quirk
);
1846 case 45: /* SandyBridge, "Romely-EP" */
1847 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
1848 sizeof(hw_cache_event_ids
));
1850 intel_pmu_lbr_init_snb();
1852 x86_pmu
.event_constraints
= intel_snb_event_constraints
;
1853 x86_pmu
.pebs_constraints
= intel_snb_pebs_event_constraints
;
1854 x86_pmu
.extra_regs
= intel_snb_extra_regs
;
1855 /* all extra regs are per-cpu when HT is on */
1856 x86_pmu
.er_flags
|= ERF_HAS_RSP_1
;
1857 x86_pmu
.er_flags
|= ERF_NO_HT_SHARING
;
1859 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
1860 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] =
1861 X86_CONFIG(.event
=0x0e, .umask
=0x01, .inv
=1, .cmask
=1);
1862 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
1863 intel_perfmon_event_map
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] =
1864 X86_CONFIG(.event
=0xb1, .umask
=0x01, .inv
=1, .cmask
=1);
1866 pr_cont("SandyBridge events, ");
1870 switch (x86_pmu
.version
) {
1872 x86_pmu
.event_constraints
= intel_v1_event_constraints
;
1873 pr_cont("generic architected perfmon v1, ");
1877 * default constraints for v2 and up
1879 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
1880 pr_cont("generic architected perfmon, ");