1 #ifdef CONFIG_CPU_SUP_INTEL
3 #define MAX_EXTRA_REGS 2
9 int ref
; /* reference count */
10 unsigned int extra_reg
; /* extra MSR number */
11 u64 extra_config
; /* extra MSR config */
16 * This used to coordinate shared registers for HT threads.
18 struct intel_percore
{
19 raw_spinlock_t lock
; /* protect structure */
20 struct er_account regs
[MAX_EXTRA_REGS
];
21 int refcnt
; /* number of threads */
26 * Intel PerfMon, used on Core and later.
28 static const u64 intel_perfmon_event_map
[] =
30 [PERF_COUNT_HW_CPU_CYCLES
] = 0x003c,
31 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
32 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x4f2e,
33 [PERF_COUNT_HW_CACHE_MISSES
] = 0x412e,
34 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
35 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
36 [PERF_COUNT_HW_BUS_CYCLES
] = 0x013c,
39 static struct event_constraint intel_core_event_constraints
[] =
41 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
42 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
43 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
44 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
45 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
46 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
50 static struct event_constraint intel_core2_event_constraints
[] =
52 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
53 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
55 * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
56 * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
57 * ratio between these counters.
59 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
60 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
61 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
62 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
63 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
64 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
65 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
66 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
67 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
68 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
69 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
73 static struct event_constraint intel_nehalem_event_constraints
[] =
75 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
76 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
77 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
78 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
79 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
80 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
81 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
82 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
83 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
84 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
85 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
89 static struct extra_reg intel_nehalem_extra_regs
[] =
91 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0xffff),
95 static struct event_constraint intel_nehalem_percore_constraints
[] =
97 INTEL_EVENT_CONSTRAINT(0xb7, 0),
101 static struct event_constraint intel_westmere_event_constraints
[] =
103 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
104 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
105 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
106 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
107 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
108 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
109 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
113 static struct event_constraint intel_snb_event_constraints
[] =
115 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
116 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
117 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
118 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
119 INTEL_EVENT_CONSTRAINT(0xb7, 0x1), /* OFF_CORE_RESPONSE_0 */
120 INTEL_EVENT_CONSTRAINT(0xbb, 0x8), /* OFF_CORE_RESPONSE_1 */
121 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
122 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
126 static struct extra_reg intel_westmere_extra_regs
[] =
128 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0
, 0xffff),
129 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1
, 0xffff),
133 static struct event_constraint intel_westmere_percore_constraints
[] =
135 INTEL_EVENT_CONSTRAINT(0xb7, 0),
136 INTEL_EVENT_CONSTRAINT(0xbb, 0),
140 static struct event_constraint intel_gen_event_constraints
[] =
142 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
143 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
144 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
148 static u64
intel_pmu_event_map(int hw_event
)
150 return intel_perfmon_event_map
[hw_event
];
153 static __initconst
const u64 snb_hw_cache_event_ids
154 [PERF_COUNT_HW_CACHE_MAX
]
155 [PERF_COUNT_HW_CACHE_OP_MAX
]
156 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
160 [ C(RESULT_ACCESS
) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
161 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPLACEMENT */
164 [ C(RESULT_ACCESS
) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
165 [ C(RESULT_MISS
) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
167 [ C(OP_PREFETCH
) ] = {
168 [ C(RESULT_ACCESS
) ] = 0x0,
169 [ C(RESULT_MISS
) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
174 [ C(RESULT_ACCESS
) ] = 0x0,
175 [ C(RESULT_MISS
) ] = 0x0280, /* ICACHE.MISSES */
178 [ C(RESULT_ACCESS
) ] = -1,
179 [ C(RESULT_MISS
) ] = -1,
181 [ C(OP_PREFETCH
) ] = {
182 [ C(RESULT_ACCESS
) ] = 0x0,
183 [ C(RESULT_MISS
) ] = 0x0,
188 * TBD: Need Off-core Response Performance Monitoring support
191 /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */
192 [ C(RESULT_ACCESS
) ] = 0x01b7,
193 /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */
194 [ C(RESULT_MISS
) ] = 0x01bb,
197 /* OFFCORE_RESPONSE_0.ANY_RFO.LOCAL_CACHE */
198 [ C(RESULT_ACCESS
) ] = 0x01b7,
199 /* OFFCORE_RESPONSE_1.ANY_RFO.ANY_LLC_MISS */
200 [ C(RESULT_MISS
) ] = 0x01bb,
202 [ C(OP_PREFETCH
) ] = {
203 /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */
204 [ C(RESULT_ACCESS
) ] = 0x01b7,
205 /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */
206 [ C(RESULT_MISS
) ] = 0x01bb,
211 [ C(RESULT_ACCESS
) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
212 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
215 [ C(RESULT_ACCESS
) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
216 [ C(RESULT_MISS
) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
218 [ C(OP_PREFETCH
) ] = {
219 [ C(RESULT_ACCESS
) ] = 0x0,
220 [ C(RESULT_MISS
) ] = 0x0,
225 [ C(RESULT_ACCESS
) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
226 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
229 [ C(RESULT_ACCESS
) ] = -1,
230 [ C(RESULT_MISS
) ] = -1,
232 [ C(OP_PREFETCH
) ] = {
233 [ C(RESULT_ACCESS
) ] = -1,
234 [ C(RESULT_MISS
) ] = -1,
239 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
240 [ C(RESULT_MISS
) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
243 [ C(RESULT_ACCESS
) ] = -1,
244 [ C(RESULT_MISS
) ] = -1,
246 [ C(OP_PREFETCH
) ] = {
247 [ C(RESULT_ACCESS
) ] = -1,
248 [ C(RESULT_MISS
) ] = -1,
253 static __initconst
const u64 westmere_hw_cache_event_ids
254 [PERF_COUNT_HW_CACHE_MAX
]
255 [PERF_COUNT_HW_CACHE_OP_MAX
]
256 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
260 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
261 [ C(RESULT_MISS
) ] = 0x0151, /* L1D.REPL */
264 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
265 [ C(RESULT_MISS
) ] = 0x0251, /* L1D.M_REPL */
267 [ C(OP_PREFETCH
) ] = {
268 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
269 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
274 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
275 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
278 [ C(RESULT_ACCESS
) ] = -1,
279 [ C(RESULT_MISS
) ] = -1,
281 [ C(OP_PREFETCH
) ] = {
282 [ C(RESULT_ACCESS
) ] = 0x0,
283 [ C(RESULT_MISS
) ] = 0x0,
288 /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */
289 [ C(RESULT_ACCESS
) ] = 0x01b7,
290 /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */
291 [ C(RESULT_MISS
) ] = 0x01bb,
294 * Use RFO, not WRITEBACK, because a write miss would typically occur
298 /* OFFCORE_RESPONSE_1.ANY_RFO.LOCAL_CACHE */
299 [ C(RESULT_ACCESS
) ] = 0x01bb,
300 /* OFFCORE_RESPONSE_0.ANY_RFO.ANY_LLC_MISS */
301 [ C(RESULT_MISS
) ] = 0x01b7,
303 [ C(OP_PREFETCH
) ] = {
304 /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */
305 [ C(RESULT_ACCESS
) ] = 0x01b7,
306 /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */
307 [ C(RESULT_MISS
) ] = 0x01bb,
312 [ C(RESULT_ACCESS
) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
313 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
316 [ C(RESULT_ACCESS
) ] = 0x020b, /* MEM_INST_RETURED.STORES */
317 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
319 [ C(OP_PREFETCH
) ] = {
320 [ C(RESULT_ACCESS
) ] = 0x0,
321 [ C(RESULT_MISS
) ] = 0x0,
326 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
327 [ C(RESULT_MISS
) ] = 0x0185, /* ITLB_MISSES.ANY */
330 [ C(RESULT_ACCESS
) ] = -1,
331 [ C(RESULT_MISS
) ] = -1,
333 [ C(OP_PREFETCH
) ] = {
334 [ C(RESULT_ACCESS
) ] = -1,
335 [ C(RESULT_MISS
) ] = -1,
340 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
341 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
344 [ C(RESULT_ACCESS
) ] = -1,
345 [ C(RESULT_MISS
) ] = -1,
347 [ C(OP_PREFETCH
) ] = {
348 [ C(RESULT_ACCESS
) ] = -1,
349 [ C(RESULT_MISS
) ] = -1,
355 * OFFCORE_RESPONSE MSR bits (subset), See IA32 SDM Vol 3 30.6.1.3
358 #define DMND_DATA_RD (1 << 0)
359 #define DMND_RFO (1 << 1)
360 #define DMND_WB (1 << 3)
361 #define PF_DATA_RD (1 << 4)
362 #define PF_DATA_RFO (1 << 5)
363 #define RESP_UNCORE_HIT (1 << 8)
364 #define RESP_MISS (0xf600) /* non uncore hit */
366 static __initconst
const u64 nehalem_hw_cache_extra_regs
367 [PERF_COUNT_HW_CACHE_MAX
]
368 [PERF_COUNT_HW_CACHE_OP_MAX
]
369 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
373 [ C(RESULT_ACCESS
) ] = DMND_DATA_RD
|RESP_UNCORE_HIT
,
374 [ C(RESULT_MISS
) ] = DMND_DATA_RD
|RESP_MISS
,
377 [ C(RESULT_ACCESS
) ] = DMND_RFO
|DMND_WB
|RESP_UNCORE_HIT
,
378 [ C(RESULT_MISS
) ] = DMND_RFO
|DMND_WB
|RESP_MISS
,
380 [ C(OP_PREFETCH
) ] = {
381 [ C(RESULT_ACCESS
) ] = PF_DATA_RD
|PF_DATA_RFO
|RESP_UNCORE_HIT
,
382 [ C(RESULT_MISS
) ] = PF_DATA_RD
|PF_DATA_RFO
|RESP_MISS
,
387 static __initconst
const u64 nehalem_hw_cache_event_ids
388 [PERF_COUNT_HW_CACHE_MAX
]
389 [PERF_COUNT_HW_CACHE_OP_MAX
]
390 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
394 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
395 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
398 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
399 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
401 [ C(OP_PREFETCH
) ] = {
402 [ C(RESULT_ACCESS
) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
403 [ C(RESULT_MISS
) ] = 0x024e, /* L1D_PREFETCH.MISS */
408 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
409 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
412 [ C(RESULT_ACCESS
) ] = -1,
413 [ C(RESULT_MISS
) ] = -1,
415 [ C(OP_PREFETCH
) ] = {
416 [ C(RESULT_ACCESS
) ] = 0x0,
417 [ C(RESULT_MISS
) ] = 0x0,
422 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
423 [ C(RESULT_ACCESS
) ] = 0x01b7,
424 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
425 [ C(RESULT_MISS
) ] = 0x01b7,
428 * Use RFO, not WRITEBACK, because a write miss would typically occur
432 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
433 [ C(RESULT_ACCESS
) ] = 0x01b7,
434 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
435 [ C(RESULT_MISS
) ] = 0x01b7,
437 [ C(OP_PREFETCH
) ] = {
438 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
439 [ C(RESULT_ACCESS
) ] = 0x01b7,
440 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
441 [ C(RESULT_MISS
) ] = 0x01b7,
446 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
447 [ C(RESULT_MISS
) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
450 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
451 [ C(RESULT_MISS
) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
453 [ C(OP_PREFETCH
) ] = {
454 [ C(RESULT_ACCESS
) ] = 0x0,
455 [ C(RESULT_MISS
) ] = 0x0,
460 [ C(RESULT_ACCESS
) ] = 0x01c0, /* INST_RETIRED.ANY_P */
461 [ C(RESULT_MISS
) ] = 0x20c8, /* ITLB_MISS_RETIRED */
464 [ C(RESULT_ACCESS
) ] = -1,
465 [ C(RESULT_MISS
) ] = -1,
467 [ C(OP_PREFETCH
) ] = {
468 [ C(RESULT_ACCESS
) ] = -1,
469 [ C(RESULT_MISS
) ] = -1,
474 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
475 [ C(RESULT_MISS
) ] = 0x03e8, /* BPU_CLEARS.ANY */
478 [ C(RESULT_ACCESS
) ] = -1,
479 [ C(RESULT_MISS
) ] = -1,
481 [ C(OP_PREFETCH
) ] = {
482 [ C(RESULT_ACCESS
) ] = -1,
483 [ C(RESULT_MISS
) ] = -1,
488 static __initconst
const u64 core2_hw_cache_event_ids
489 [PERF_COUNT_HW_CACHE_MAX
]
490 [PERF_COUNT_HW_CACHE_OP_MAX
]
491 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
495 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
496 [ C(RESULT_MISS
) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
499 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
500 [ C(RESULT_MISS
) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
502 [ C(OP_PREFETCH
) ] = {
503 [ C(RESULT_ACCESS
) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
504 [ C(RESULT_MISS
) ] = 0,
509 [ C(RESULT_ACCESS
) ] = 0x0080, /* L1I.READS */
510 [ C(RESULT_MISS
) ] = 0x0081, /* L1I.MISSES */
513 [ C(RESULT_ACCESS
) ] = -1,
514 [ C(RESULT_MISS
) ] = -1,
516 [ C(OP_PREFETCH
) ] = {
517 [ C(RESULT_ACCESS
) ] = 0,
518 [ C(RESULT_MISS
) ] = 0,
523 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
524 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
527 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
528 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
530 [ C(OP_PREFETCH
) ] = {
531 [ C(RESULT_ACCESS
) ] = 0,
532 [ C(RESULT_MISS
) ] = 0,
537 [ C(RESULT_ACCESS
) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
538 [ C(RESULT_MISS
) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
541 [ C(RESULT_ACCESS
) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
542 [ C(RESULT_MISS
) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
544 [ C(OP_PREFETCH
) ] = {
545 [ C(RESULT_ACCESS
) ] = 0,
546 [ C(RESULT_MISS
) ] = 0,
551 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
552 [ C(RESULT_MISS
) ] = 0x1282, /* ITLBMISSES */
555 [ C(RESULT_ACCESS
) ] = -1,
556 [ C(RESULT_MISS
) ] = -1,
558 [ C(OP_PREFETCH
) ] = {
559 [ C(RESULT_ACCESS
) ] = -1,
560 [ C(RESULT_MISS
) ] = -1,
565 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
566 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
569 [ C(RESULT_ACCESS
) ] = -1,
570 [ C(RESULT_MISS
) ] = -1,
572 [ C(OP_PREFETCH
) ] = {
573 [ C(RESULT_ACCESS
) ] = -1,
574 [ C(RESULT_MISS
) ] = -1,
579 static __initconst
const u64 atom_hw_cache_event_ids
580 [PERF_COUNT_HW_CACHE_MAX
]
581 [PERF_COUNT_HW_CACHE_OP_MAX
]
582 [PERF_COUNT_HW_CACHE_RESULT_MAX
] =
586 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE.LD */
587 [ C(RESULT_MISS
) ] = 0,
590 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE.ST */
591 [ C(RESULT_MISS
) ] = 0,
593 [ C(OP_PREFETCH
) ] = {
594 [ C(RESULT_ACCESS
) ] = 0x0,
595 [ C(RESULT_MISS
) ] = 0,
600 [ C(RESULT_ACCESS
) ] = 0x0380, /* L1I.READS */
601 [ C(RESULT_MISS
) ] = 0x0280, /* L1I.MISSES */
604 [ C(RESULT_ACCESS
) ] = -1,
605 [ C(RESULT_MISS
) ] = -1,
607 [ C(OP_PREFETCH
) ] = {
608 [ C(RESULT_ACCESS
) ] = 0,
609 [ C(RESULT_MISS
) ] = 0,
614 [ C(RESULT_ACCESS
) ] = 0x4f29, /* L2_LD.MESI */
615 [ C(RESULT_MISS
) ] = 0x4129, /* L2_LD.ISTATE */
618 [ C(RESULT_ACCESS
) ] = 0x4f2A, /* L2_ST.MESI */
619 [ C(RESULT_MISS
) ] = 0x412A, /* L2_ST.ISTATE */
621 [ C(OP_PREFETCH
) ] = {
622 [ C(RESULT_ACCESS
) ] = 0,
623 [ C(RESULT_MISS
) ] = 0,
628 [ C(RESULT_ACCESS
) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
629 [ C(RESULT_MISS
) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
632 [ C(RESULT_ACCESS
) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
633 [ C(RESULT_MISS
) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
635 [ C(OP_PREFETCH
) ] = {
636 [ C(RESULT_ACCESS
) ] = 0,
637 [ C(RESULT_MISS
) ] = 0,
642 [ C(RESULT_ACCESS
) ] = 0x00c0, /* INST_RETIRED.ANY_P */
643 [ C(RESULT_MISS
) ] = 0x0282, /* ITLB.MISSES */
646 [ C(RESULT_ACCESS
) ] = -1,
647 [ C(RESULT_MISS
) ] = -1,
649 [ C(OP_PREFETCH
) ] = {
650 [ C(RESULT_ACCESS
) ] = -1,
651 [ C(RESULT_MISS
) ] = -1,
656 [ C(RESULT_ACCESS
) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
657 [ C(RESULT_MISS
) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
660 [ C(RESULT_ACCESS
) ] = -1,
661 [ C(RESULT_MISS
) ] = -1,
663 [ C(OP_PREFETCH
) ] = {
664 [ C(RESULT_ACCESS
) ] = -1,
665 [ C(RESULT_MISS
) ] = -1,
670 static void intel_pmu_disable_all(void)
672 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
674 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0);
676 if (test_bit(X86_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
))
677 intel_pmu_disable_bts();
679 intel_pmu_pebs_disable_all();
680 intel_pmu_lbr_disable_all();
683 static void intel_pmu_enable_all(int added
)
685 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
687 intel_pmu_pebs_enable_all();
688 intel_pmu_lbr_enable_all();
689 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, x86_pmu
.intel_ctrl
);
691 if (test_bit(X86_PMC_IDX_FIXED_BTS
, cpuc
->active_mask
)) {
692 struct perf_event
*event
=
693 cpuc
->events
[X86_PMC_IDX_FIXED_BTS
];
695 if (WARN_ON_ONCE(!event
))
698 intel_pmu_enable_bts(event
->hw
.config
);
704 * Intel Errata AAK100 (model 26)
705 * Intel Errata AAP53 (model 30)
706 * Intel Errata BD53 (model 44)
708 * The official story:
709 * These chips need to be 'reset' when adding counters by programming the
710 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
711 * in sequence on the same PMC or on different PMCs.
713 * In practise it appears some of these events do in fact count, and
714 * we need to programm all 4 events.
716 static void intel_pmu_nhm_workaround(void)
718 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
719 static const unsigned long nhm_magic
[4] = {
725 struct perf_event
*event
;
729 * The Errata requires below steps:
730 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
731 * 2) Configure 4 PERFEVTSELx with the magic events and clear
732 * the corresponding PMCx;
733 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
734 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
735 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
739 * The real steps we choose are a little different from above.
740 * A) To reduce MSR operations, we don't run step 1) as they
741 * are already cleared before this function is called;
742 * B) Call x86_perf_event_update to save PMCx before configuring
743 * PERFEVTSELx with magic number;
744 * C) With step 5), we do clear only when the PERFEVTSELx is
745 * not used currently.
746 * D) Call x86_perf_event_set_period to restore PMCx;
749 /* We always operate 4 pairs of PERF Counters */
750 for (i
= 0; i
< 4; i
++) {
751 event
= cpuc
->events
[i
];
753 x86_perf_event_update(event
);
756 for (i
= 0; i
< 4; i
++) {
757 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, nhm_magic
[i
]);
758 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0
+ i
, 0x0);
761 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0xf);
762 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL
, 0x0);
764 for (i
= 0; i
< 4; i
++) {
765 event
= cpuc
->events
[i
];
768 x86_perf_event_set_period(event
);
769 __x86_pmu_enable_event(&event
->hw
,
770 ARCH_PERFMON_EVENTSEL_ENABLE
);
772 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0
+ i
, 0x0);
776 static void intel_pmu_nhm_enable_all(int added
)
779 intel_pmu_nhm_workaround();
780 intel_pmu_enable_all(added
);
783 static inline u64
intel_pmu_get_status(void)
787 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS
, status
);
792 static inline void intel_pmu_ack_status(u64 ack
)
794 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL
, ack
);
797 static void intel_pmu_disable_fixed(struct hw_perf_event
*hwc
)
799 int idx
= hwc
->idx
- X86_PMC_IDX_FIXED
;
802 mask
= 0xfULL
<< (idx
* 4);
804 rdmsrl(hwc
->config_base
, ctrl_val
);
806 wrmsrl(hwc
->config_base
, ctrl_val
);
809 static void intel_pmu_disable_event(struct perf_event
*event
)
811 struct hw_perf_event
*hwc
= &event
->hw
;
813 if (unlikely(hwc
->idx
== X86_PMC_IDX_FIXED_BTS
)) {
814 intel_pmu_disable_bts();
815 intel_pmu_drain_bts_buffer();
819 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
820 intel_pmu_disable_fixed(hwc
);
824 x86_pmu_disable_event(event
);
826 if (unlikely(event
->attr
.precise_ip
))
827 intel_pmu_pebs_disable(event
);
830 static void intel_pmu_enable_fixed(struct hw_perf_event
*hwc
)
832 int idx
= hwc
->idx
- X86_PMC_IDX_FIXED
;
833 u64 ctrl_val
, bits
, mask
;
836 * Enable IRQ generation (0x8),
837 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
841 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_USR
)
843 if (hwc
->config
& ARCH_PERFMON_EVENTSEL_OS
)
847 * ANY bit is supported in v3 and up
849 if (x86_pmu
.version
> 2 && hwc
->config
& ARCH_PERFMON_EVENTSEL_ANY
)
853 mask
= 0xfULL
<< (idx
* 4);
855 rdmsrl(hwc
->config_base
, ctrl_val
);
858 wrmsrl(hwc
->config_base
, ctrl_val
);
861 static void intel_pmu_enable_event(struct perf_event
*event
)
863 struct hw_perf_event
*hwc
= &event
->hw
;
865 if (unlikely(hwc
->idx
== X86_PMC_IDX_FIXED_BTS
)) {
866 if (!__this_cpu_read(cpu_hw_events
.enabled
))
869 intel_pmu_enable_bts(hwc
->config
);
873 if (unlikely(hwc
->config_base
== MSR_ARCH_PERFMON_FIXED_CTR_CTRL
)) {
874 intel_pmu_enable_fixed(hwc
);
878 if (unlikely(event
->attr
.precise_ip
))
879 intel_pmu_pebs_enable(event
);
881 __x86_pmu_enable_event(hwc
, ARCH_PERFMON_EVENTSEL_ENABLE
);
885 * Save and restart an expired event. Called by NMI contexts,
886 * so it has to be careful about preempting normal event ops:
888 static int intel_pmu_save_and_restart(struct perf_event
*event
)
890 x86_perf_event_update(event
);
891 return x86_perf_event_set_period(event
);
894 static void intel_pmu_reset(void)
896 struct debug_store
*ds
= __this_cpu_read(cpu_hw_events
.ds
);
900 if (!x86_pmu
.num_counters
)
903 local_irq_save(flags
);
905 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
907 for (idx
= 0; idx
< x86_pmu
.num_counters
; idx
++) {
908 checking_wrmsrl(x86_pmu_config_addr(idx
), 0ull);
909 checking_wrmsrl(x86_pmu_event_addr(idx
), 0ull);
911 for (idx
= 0; idx
< x86_pmu
.num_counters_fixed
; idx
++)
912 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0
+ idx
, 0ull);
915 ds
->bts_index
= ds
->bts_buffer_base
;
917 local_irq_restore(flags
);
921 * This handler is triggered by the local APIC, so the APIC IRQ handling
924 static int intel_pmu_handle_irq(struct pt_regs
*regs
)
926 struct perf_sample_data data
;
927 struct cpu_hw_events
*cpuc
;
932 perf_sample_data_init(&data
, 0);
934 cpuc
= &__get_cpu_var(cpu_hw_events
);
936 intel_pmu_disable_all();
937 handled
= intel_pmu_drain_bts_buffer();
938 status
= intel_pmu_get_status();
940 intel_pmu_enable_all(0);
946 intel_pmu_ack_status(status
);
948 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
949 perf_event_print_debug();
954 inc_irq_stat(apic_perf_irqs
);
956 intel_pmu_lbr_read();
959 * PEBS overflow sets bit 62 in the global status register
961 if (__test_and_clear_bit(62, (unsigned long *)&status
)) {
963 x86_pmu
.drain_pebs(regs
);
966 for_each_set_bit(bit
, (unsigned long *)&status
, X86_PMC_IDX_MAX
) {
967 struct perf_event
*event
= cpuc
->events
[bit
];
971 if (!test_bit(bit
, cpuc
->active_mask
))
974 if (!intel_pmu_save_and_restart(event
))
977 data
.period
= event
->hw
.last_period
;
979 if (perf_event_overflow(event
, 1, &data
, regs
))
980 x86_pmu_stop(event
, 0);
984 * Repeat if there is more work to be done:
986 status
= intel_pmu_get_status();
991 intel_pmu_enable_all(0);
995 static struct event_constraint
*
996 intel_bts_constraints(struct perf_event
*event
)
998 struct hw_perf_event
*hwc
= &event
->hw
;
999 unsigned int hw_event
, bts_event
;
1001 hw_event
= hwc
->config
& INTEL_ARCH_EVENT_MASK
;
1002 bts_event
= x86_pmu
.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS
);
1004 if (unlikely(hw_event
== bts_event
&& hwc
->sample_period
== 1))
1005 return &bts_constraint
;
1010 static struct event_constraint
*
1011 intel_percore_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1013 struct hw_perf_event
*hwc
= &event
->hw
;
1014 unsigned int e
= hwc
->config
& ARCH_PERFMON_EVENTSEL_EVENT
;
1015 struct event_constraint
*c
;
1016 struct intel_percore
*pc
;
1017 struct er_account
*era
;
1022 if (!x86_pmu
.percore_constraints
|| hwc
->extra_alloc
)
1025 for (c
= x86_pmu
.percore_constraints
; c
->cmask
; c
++) {
1030 * Allocate resource per core.
1032 pc
= cpuc
->per_core
;
1035 c
= &emptyconstraint
;
1036 raw_spin_lock(&pc
->lock
);
1039 for (i
= 0; i
< MAX_EXTRA_REGS
; i
++) {
1041 if (era
->ref
> 0 && hwc
->extra_reg
== era
->extra_reg
) {
1042 /* Allow sharing same config */
1043 if (hwc
->extra_config
== era
->extra_config
) {
1045 cpuc
->percore_used
= 1;
1046 hwc
->extra_alloc
= 1;
1052 } else if (era
->ref
== 0 && free_slot
== -1)
1055 if (!found
&& free_slot
!= -1) {
1056 era
= &pc
->regs
[free_slot
];
1058 era
->extra_reg
= hwc
->extra_reg
;
1059 era
->extra_config
= hwc
->extra_config
;
1060 cpuc
->percore_used
= 1;
1061 hwc
->extra_alloc
= 1;
1064 raw_spin_unlock(&pc
->lock
);
1071 static struct event_constraint
*
1072 intel_get_event_constraints(struct cpu_hw_events
*cpuc
, struct perf_event
*event
)
1074 struct event_constraint
*c
;
1076 c
= intel_bts_constraints(event
);
1080 c
= intel_pebs_constraints(event
);
1084 c
= intel_percore_constraints(cpuc
, event
);
1088 return x86_get_event_constraints(cpuc
, event
);
1091 static void intel_put_event_constraints(struct cpu_hw_events
*cpuc
,
1092 struct perf_event
*event
)
1094 struct extra_reg
*er
;
1095 struct intel_percore
*pc
;
1096 struct er_account
*era
;
1097 struct hw_perf_event
*hwc
= &event
->hw
;
1100 if (!cpuc
->percore_used
)
1103 for (er
= x86_pmu
.extra_regs
; er
->msr
; er
++) {
1104 if (er
->event
!= (hwc
->config
& er
->config_mask
))
1107 pc
= cpuc
->per_core
;
1108 raw_spin_lock(&pc
->lock
);
1109 for (i
= 0; i
< MAX_EXTRA_REGS
; i
++) {
1112 era
->extra_config
== hwc
->extra_config
&&
1113 era
->extra_reg
== er
->msr
) {
1115 hwc
->extra_alloc
= 0;
1120 for (i
= 0; i
< MAX_EXTRA_REGS
; i
++)
1121 allref
+= pc
->regs
[i
].ref
;
1123 cpuc
->percore_used
= 0;
1124 raw_spin_unlock(&pc
->lock
);
1129 static int intel_pmu_hw_config(struct perf_event
*event
)
1131 int ret
= x86_pmu_hw_config(event
);
1136 if (event
->attr
.precise_ip
&&
1137 (event
->hw
.config
& X86_RAW_EVENT_MASK
) == 0x003c) {
1139 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1140 * (0x003c) so that we can use it with PEBS.
1142 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1143 * PEBS capable. However we can use INST_RETIRED.ANY_P
1144 * (0x00c0), which is a PEBS capable event, to get the same
1147 * INST_RETIRED.ANY_P counts the number of cycles that retires
1148 * CNTMASK instructions. By setting CNTMASK to a value (16)
1149 * larger than the maximum number of instructions that can be
1150 * retired per cycle (4) and then inverting the condition, we
1151 * count all cycles that retire 16 or less instructions, which
1154 * Thereby we gain a PEBS capable cycle counter.
1156 u64 alt_config
= 0x108000c0; /* INST_RETIRED.TOTAL_CYCLES */
1158 alt_config
|= (event
->hw
.config
& ~X86_RAW_EVENT_MASK
);
1159 event
->hw
.config
= alt_config
;
1162 if (event
->attr
.type
!= PERF_TYPE_RAW
)
1165 if (!(event
->attr
.config
& ARCH_PERFMON_EVENTSEL_ANY
))
1168 if (x86_pmu
.version
< 3)
1171 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN
))
1174 event
->hw
.config
|= ARCH_PERFMON_EVENTSEL_ANY
;
1179 static __initconst
const struct x86_pmu core_pmu
= {
1181 .handle_irq
= x86_pmu_handle_irq
,
1182 .disable_all
= x86_pmu_disable_all
,
1183 .enable_all
= x86_pmu_enable_all
,
1184 .enable
= x86_pmu_enable_event
,
1185 .disable
= x86_pmu_disable_event
,
1186 .hw_config
= x86_pmu_hw_config
,
1187 .schedule_events
= x86_schedule_events
,
1188 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1189 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1190 .event_map
= intel_pmu_event_map
,
1191 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1194 * Intel PMCs cannot be accessed sanely above 32 bit width,
1195 * so we install an artificial 1<<31 period regardless of
1196 * the generic event period:
1198 .max_period
= (1ULL << 31) - 1,
1199 .get_event_constraints
= intel_get_event_constraints
,
1200 .put_event_constraints
= intel_put_event_constraints
,
1201 .event_constraints
= intel_core_event_constraints
,
1204 static int intel_pmu_cpu_prepare(int cpu
)
1206 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1208 if (!cpu_has_ht_siblings())
1211 cpuc
->per_core
= kzalloc_node(sizeof(struct intel_percore
),
1212 GFP_KERNEL
, cpu_to_node(cpu
));
1213 if (!cpuc
->per_core
)
1216 raw_spin_lock_init(&cpuc
->per_core
->lock
);
1217 cpuc
->per_core
->core_id
= -1;
1221 static void intel_pmu_cpu_starting(int cpu
)
1223 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1224 int core_id
= topology_core_id(cpu
);
1227 init_debug_store_on_cpu(cpu
);
1229 * Deal with CPUs that don't clear their LBRs on power-up.
1231 intel_pmu_lbr_reset();
1233 if (!cpu_has_ht_siblings())
1236 for_each_cpu(i
, topology_thread_cpumask(cpu
)) {
1237 struct intel_percore
*pc
= per_cpu(cpu_hw_events
, i
).per_core
;
1239 if (pc
&& pc
->core_id
== core_id
) {
1240 kfree(cpuc
->per_core
);
1241 cpuc
->per_core
= pc
;
1246 cpuc
->per_core
->core_id
= core_id
;
1247 cpuc
->per_core
->refcnt
++;
1250 static void intel_pmu_cpu_dying(int cpu
)
1252 struct cpu_hw_events
*cpuc
= &per_cpu(cpu_hw_events
, cpu
);
1253 struct intel_percore
*pc
= cpuc
->per_core
;
1256 if (pc
->core_id
== -1 || --pc
->refcnt
== 0)
1258 cpuc
->per_core
= NULL
;
1261 fini_debug_store_on_cpu(cpu
);
1264 static __initconst
const struct x86_pmu intel_pmu
= {
1266 .handle_irq
= intel_pmu_handle_irq
,
1267 .disable_all
= intel_pmu_disable_all
,
1268 .enable_all
= intel_pmu_enable_all
,
1269 .enable
= intel_pmu_enable_event
,
1270 .disable
= intel_pmu_disable_event
,
1271 .hw_config
= intel_pmu_hw_config
,
1272 .schedule_events
= x86_schedule_events
,
1273 .eventsel
= MSR_ARCH_PERFMON_EVENTSEL0
,
1274 .perfctr
= MSR_ARCH_PERFMON_PERFCTR0
,
1275 .event_map
= intel_pmu_event_map
,
1276 .max_events
= ARRAY_SIZE(intel_perfmon_event_map
),
1279 * Intel PMCs cannot be accessed sanely above 32 bit width,
1280 * so we install an artificial 1<<31 period regardless of
1281 * the generic event period:
1283 .max_period
= (1ULL << 31) - 1,
1284 .get_event_constraints
= intel_get_event_constraints
,
1285 .put_event_constraints
= intel_put_event_constraints
,
1287 .cpu_prepare
= intel_pmu_cpu_prepare
,
1288 .cpu_starting
= intel_pmu_cpu_starting
,
1289 .cpu_dying
= intel_pmu_cpu_dying
,
1292 static void intel_clovertown_quirks(void)
1295 * PEBS is unreliable due to:
1297 * AJ67 - PEBS may experience CPL leaks
1298 * AJ68 - PEBS PMI may be delayed by one event
1299 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
1300 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
1302 * AJ67 could be worked around by restricting the OS/USR flags.
1303 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
1305 * AJ106 could possibly be worked around by not allowing LBR
1306 * usage from PEBS, including the fixup.
1307 * AJ68 could possibly be worked around by always programming
1308 * a pebs_event_reset[0] value and coping with the lost events.
1310 * But taken together it might just make sense to not enable PEBS on
1313 printk(KERN_WARNING
"PEBS disabled due to CPU errata.\n");
1315 x86_pmu
.pebs_constraints
= NULL
;
1318 static __init
int intel_pmu_init(void)
1320 union cpuid10_edx edx
;
1321 union cpuid10_eax eax
;
1322 unsigned int unused
;
1326 if (!cpu_has(&boot_cpu_data
, X86_FEATURE_ARCH_PERFMON
)) {
1327 switch (boot_cpu_data
.x86
) {
1329 return p6_pmu_init();
1331 return p4_pmu_init();
1337 * Check whether the Architectural PerfMon supports
1338 * Branch Misses Retired hw_event or not.
1340 cpuid(10, &eax
.full
, &ebx
, &unused
, &edx
.full
);
1341 if (eax
.split
.mask_length
<= ARCH_PERFMON_BRANCH_MISSES_RETIRED
)
1344 version
= eax
.split
.version_id
;
1348 x86_pmu
= intel_pmu
;
1350 x86_pmu
.version
= version
;
1351 x86_pmu
.num_counters
= eax
.split
.num_counters
;
1352 x86_pmu
.cntval_bits
= eax
.split
.bit_width
;
1353 x86_pmu
.cntval_mask
= (1ULL << eax
.split
.bit_width
) - 1;
1356 * Quirk: v2 perfmon does not report fixed-purpose events, so
1357 * assume at least 3 events:
1360 x86_pmu
.num_counters_fixed
= max((int)edx
.split
.num_counters_fixed
, 3);
1363 * v2 and above have a perf capabilities MSR
1368 rdmsrl(MSR_IA32_PERF_CAPABILITIES
, capabilities
);
1369 x86_pmu
.intel_cap
.capabilities
= capabilities
;
1375 * Install the hw-cache-events table:
1377 switch (boot_cpu_data
.x86_model
) {
1378 case 14: /* 65 nm core solo/duo, "Yonah" */
1379 pr_cont("Core events, ");
1382 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
1383 x86_pmu
.quirks
= intel_clovertown_quirks
;
1384 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
1385 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
1386 case 29: /* six-core 45 nm xeon "Dunnington" */
1387 memcpy(hw_cache_event_ids
, core2_hw_cache_event_ids
,
1388 sizeof(hw_cache_event_ids
));
1390 intel_pmu_lbr_init_core();
1392 x86_pmu
.event_constraints
= intel_core2_event_constraints
;
1393 x86_pmu
.pebs_constraints
= intel_core2_pebs_event_constraints
;
1394 pr_cont("Core2 events, ");
1397 case 26: /* 45 nm nehalem, "Bloomfield" */
1398 case 30: /* 45 nm nehalem, "Lynnfield" */
1399 case 46: /* 45 nm nehalem-ex, "Beckton" */
1400 memcpy(hw_cache_event_ids
, nehalem_hw_cache_event_ids
,
1401 sizeof(hw_cache_event_ids
));
1402 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
1403 sizeof(hw_cache_extra_regs
));
1405 intel_pmu_lbr_init_nhm();
1407 x86_pmu
.event_constraints
= intel_nehalem_event_constraints
;
1408 x86_pmu
.pebs_constraints
= intel_nehalem_pebs_event_constraints
;
1409 x86_pmu
.percore_constraints
= intel_nehalem_percore_constraints
;
1410 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
1411 x86_pmu
.extra_regs
= intel_nehalem_extra_regs
;
1412 pr_cont("Nehalem events, ");
1416 memcpy(hw_cache_event_ids
, atom_hw_cache_event_ids
,
1417 sizeof(hw_cache_event_ids
));
1419 intel_pmu_lbr_init_atom();
1421 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
1422 x86_pmu
.pebs_constraints
= intel_atom_pebs_event_constraints
;
1423 pr_cont("Atom events, ");
1426 case 37: /* 32 nm nehalem, "Clarkdale" */
1427 case 44: /* 32 nm nehalem, "Gulftown" */
1428 memcpy(hw_cache_event_ids
, westmere_hw_cache_event_ids
,
1429 sizeof(hw_cache_event_ids
));
1430 memcpy(hw_cache_extra_regs
, nehalem_hw_cache_extra_regs
,
1431 sizeof(hw_cache_extra_regs
));
1433 intel_pmu_lbr_init_nhm();
1435 x86_pmu
.event_constraints
= intel_westmere_event_constraints
;
1436 x86_pmu
.percore_constraints
= intel_westmere_percore_constraints
;
1437 x86_pmu
.enable_all
= intel_pmu_nhm_enable_all
;
1438 x86_pmu
.pebs_constraints
= intel_westmere_pebs_event_constraints
;
1439 x86_pmu
.extra_regs
= intel_westmere_extra_regs
;
1440 pr_cont("Westmere events, ");
1443 case 42: /* SandyBridge */
1444 memcpy(hw_cache_event_ids
, snb_hw_cache_event_ids
,
1445 sizeof(hw_cache_event_ids
));
1447 intel_pmu_lbr_init_nhm();
1449 x86_pmu
.event_constraints
= intel_snb_event_constraints
;
1450 x86_pmu
.pebs_constraints
= intel_snb_pebs_events
;
1451 pr_cont("SandyBridge events, ");
1456 * default constraints for v2 and up
1458 x86_pmu
.event_constraints
= intel_gen_event_constraints
;
1459 pr_cont("generic architected perfmon, ");
1464 #else /* CONFIG_CPU_SUP_INTEL */
1466 static int intel_pmu_init(void)
1471 #endif /* CONFIG_CPU_SUP_INTEL */