4 * This provides a low-level interface to the hardware's Debug Store
5 * feature that is used for branch trace store (BTS) and
6 * precise-event based sampling (PEBS).
9 * - DS and BTS hardware configuration
10 * - buffer overflow handling (to be done)
14 * - security checking (is the caller allowed to trace the task)
15 * - buffer allocation (memory accounting)
18 * Copyright (C) 2007-2009 Intel Corporation.
19 * Markus Metzger <markus.t.metzger@intel.com>, 2007-2009
25 #include <linux/errno.h>
26 #include <linux/string.h>
27 #include <linux/slab.h>
28 #include <linux/sched.h>
30 #include <linux/kernel.h>
32 #include "ds_selftest.h"
35 * The configuration for a particular DS hardware implementation.
37 struct ds_configuration
{
38 /* the name of the configuration */
40 /* the size of one pointer-typed field in the DS structure and
41 in the BTS and PEBS buffers in bytes;
42 this covers the first 8 DS fields related to buffer management. */
43 unsigned char sizeof_ptr_field
;
44 /* the size of a BTS/PEBS record in bytes */
45 unsigned char sizeof_rec
[2];
46 /* a series of bit-masks to control various features indexed
47 * by enum ds_feature */
48 unsigned long ctl
[dsf_ctl_max
];
50 static DEFINE_PER_CPU(struct ds_configuration
, ds_cfg_array
);
52 #define ds_cfg per_cpu(ds_cfg_array, smp_processor_id())
54 #define MAX_SIZEOF_DS (12 * 8) /* maximal size of a DS configuration */
55 #define MAX_SIZEOF_BTS (3 * 8) /* maximal size of a BTS record */
56 #define DS_ALIGNMENT (1 << 3) /* BTS and PEBS buffer alignment */
59 (ds_cfg.ctl[dsf_bts] | ds_cfg.ctl[dsf_bts_kernel] | ds_cfg.ctl[dsf_bts_user] |\
60 ds_cfg.ctl[dsf_bts_overflow])
64 * A BTS or PEBS tracer.
66 * This holds the configuration of the tracer and serves as a handle
67 * to identify tracers.
70 /* the DS context (partially) owned by this tracer */
71 struct ds_context
*context
;
72 /* the buffer provided on ds_request() and its size in bytes */
78 /* the common DS part */
80 /* the trace including the DS configuration */
81 struct bts_trace trace
;
82 /* buffer overflow notification function */
83 bts_ovfl_callback_t ovfl
;
87 /* the common DS part */
89 /* the trace including the DS configuration */
90 struct pebs_trace trace
;
91 /* buffer overflow notification function */
92 pebs_ovfl_callback_t ovfl
;
96 * Debug Store (DS) save area configuration (see Intel64 and IA32
97 * Architectures Software Developer's Manual, section 18.5)
99 * The DS configuration consists of the following fields; different
100 * architetures vary in the size of those fields.
101 * - double-word aligned base linear address of the BTS buffer
102 * - write pointer into the BTS buffer
103 * - end linear address of the BTS buffer (one byte beyond the end of
105 * - interrupt pointer into BTS buffer
106 * (interrupt occurs when write pointer passes interrupt pointer)
107 * - double-word aligned base linear address of the PEBS buffer
108 * - write pointer into the PEBS buffer
109 * - end linear address of the PEBS buffer (one byte beyond the end of
111 * - interrupt pointer into PEBS buffer
112 * (interrupt occurs when write pointer passes interrupt pointer)
113 * - value to which counter is reset following counter overflow
115 * Later architectures use 64bit pointers throughout, whereas earlier
116 * architectures use 32bit pointers in 32bit mode.
119 * We compute the base address for the first 8 fields based on:
120 * - the field size stored in the DS configuration
121 * - the relative field position
122 * - an offset giving the start of the respective region
124 * This offset is further used to index various arrays holding
125 * information for BTS and PEBS at the respective index.
127 * On later 32bit processors, we only access the lower 32bit of the
128 * 64bit pointer fields. The upper halves will be zeroed out.
135 ds_interrupt_threshold
,
143 static inline unsigned long ds_get(const unsigned char *base
,
144 enum ds_qualifier qual
, enum ds_field field
)
146 base
+= (ds_cfg
.sizeof_ptr_field
* (field
+ (4 * qual
)));
147 return *(unsigned long *)base
;
150 static inline void ds_set(unsigned char *base
, enum ds_qualifier qual
,
151 enum ds_field field
, unsigned long value
)
153 base
+= (ds_cfg
.sizeof_ptr_field
* (field
+ (4 * qual
)));
154 (*(unsigned long *)base
) = value
;
159 * Locking is done only for allocating BTS or PEBS resources.
161 static DEFINE_SPINLOCK(ds_lock
);
165 * We either support (system-wide) per-cpu or per-thread allocation.
166 * We distinguish the two based on the task_struct pointer, where a
167 * NULL pointer indicates per-cpu allocation for the current cpu.
169 * Allocations are use-counted. As soon as resources are allocated,
170 * further allocations must be of the same type (per-cpu or
171 * per-thread). We model this by counting allocations (i.e. the number
172 * of tracers of a certain type) for one type negatively:
174 * >0 number of per-thread tracers
175 * <0 number of per-cpu tracers
177 * Tracers essentially gives the number of ds contexts for a certain
178 * type of allocation.
180 static atomic_t tracers
= ATOMIC_INIT(0);
182 static inline void get_tracer(struct task_struct
*task
)
185 atomic_inc(&tracers
);
187 atomic_dec(&tracers
);
190 static inline void put_tracer(struct task_struct
*task
)
193 atomic_dec(&tracers
);
195 atomic_inc(&tracers
);
198 static inline int check_tracer(struct task_struct
*task
)
201 (atomic_read(&tracers
) >= 0) :
202 (atomic_read(&tracers
) <= 0);
207 * The DS context is either attached to a thread or to a cpu:
208 * - in the former case, the thread_struct contains a pointer to the
210 * - in the latter case, we use a static array of per-cpu context
213 * Contexts are use-counted. They are allocated on first access and
214 * deallocated when the last user puts the context.
217 /* pointer to the DS configuration; goes into MSR_IA32_DS_AREA */
218 unsigned char ds
[MAX_SIZEOF_DS
];
219 /* the owner of the BTS and PEBS configuration, respectively */
220 struct bts_tracer
*bts_master
;
221 struct pebs_tracer
*pebs_master
;
224 /* a pointer to the context location inside the thread_struct
225 * or the per_cpu context array */
226 struct ds_context
**this;
227 /* a pointer to the task owning this context, or NULL, if the
228 * context is owned by a cpu */
229 struct task_struct
*task
;
232 static DEFINE_PER_CPU(struct ds_context
*, system_context_array
);
234 #define system_context per_cpu(system_context_array, smp_processor_id())
237 static inline struct ds_context
*ds_get_context(struct task_struct
*task
)
239 struct ds_context
**p_context
=
240 (task
? &task
->thread
.ds_ctx
: &system_context
);
241 struct ds_context
*context
= NULL
;
242 struct ds_context
*new_context
= NULL
;
245 /* Chances are small that we already have a context. */
246 new_context
= kzalloc(sizeof(*new_context
), GFP_KERNEL
);
250 spin_lock_irqsave(&ds_lock
, irq
);
252 context
= *p_context
;
254 context
= new_context
;
256 context
->this = p_context
;
257 context
->task
= task
;
261 set_tsk_thread_flag(task
, TIF_DS_AREA_MSR
);
263 if (!task
|| (task
== current
))
264 wrmsrl(MSR_IA32_DS_AREA
, (unsigned long)context
->ds
);
266 *p_context
= context
;
271 spin_unlock_irqrestore(&ds_lock
, irq
);
273 if (context
!= new_context
)
279 static inline void ds_put_context(struct ds_context
*context
)
286 spin_lock_irqsave(&ds_lock
, irq
);
288 if (--context
->count
) {
289 spin_unlock_irqrestore(&ds_lock
, irq
);
293 *(context
->this) = NULL
;
296 clear_tsk_thread_flag(context
->task
, TIF_DS_AREA_MSR
);
298 if (!context
->task
|| (context
->task
== current
))
299 wrmsrl(MSR_IA32_DS_AREA
, 0);
301 spin_unlock_irqrestore(&ds_lock
, irq
);
308 * Call the tracer's callback on a buffer overflow.
310 * context: the ds context
311 * qual: the buffer type
313 static void ds_overflow(struct ds_context
*context
, enum ds_qualifier qual
)
317 if (context
->bts_master
&&
318 context
->bts_master
->ovfl
)
319 context
->bts_master
->ovfl(context
->bts_master
);
322 if (context
->pebs_master
&&
323 context
->pebs_master
->ovfl
)
324 context
->pebs_master
->ovfl(context
->pebs_master
);
331 * Write raw data into the BTS or PEBS buffer.
333 * The remainder of any partially written record is zeroed out.
335 * context: the DS context
336 * qual: the buffer type
337 * record: the data to write
338 * size: the size of the data
340 static int ds_write(struct ds_context
*context
, enum ds_qualifier qual
,
341 const void *record
, size_t size
)
343 int bytes_written
= 0;
349 unsigned long base
, index
, end
, write_end
, int_th
;
350 unsigned long write_size
, adj_write_size
;
353 * write as much as possible without producing an
354 * overflow interrupt.
356 * interrupt_threshold must either be
357 * - bigger than absolute_maximum or
358 * - point to a record between buffer_base and absolute_maximum
360 * index points to a valid record.
362 base
= ds_get(context
->ds
, qual
, ds_buffer_base
);
363 index
= ds_get(context
->ds
, qual
, ds_index
);
364 end
= ds_get(context
->ds
, qual
, ds_absolute_maximum
);
365 int_th
= ds_get(context
->ds
, qual
, ds_interrupt_threshold
);
367 write_end
= min(end
, int_th
);
369 /* if we are already beyond the interrupt threshold,
370 * we fill the entire buffer */
371 if (write_end
<= index
)
374 if (write_end
<= index
)
377 write_size
= min((unsigned long) size
, write_end
- index
);
378 memcpy((void *)index
, record
, write_size
);
380 record
= (const char *)record
+ write_size
;
382 bytes_written
+= write_size
;
384 adj_write_size
= write_size
/ ds_cfg
.sizeof_rec
[qual
];
385 adj_write_size
*= ds_cfg
.sizeof_rec
[qual
];
387 /* zero out trailing bytes */
388 memset((char *)index
+ write_size
, 0,
389 adj_write_size
- write_size
);
390 index
+= adj_write_size
;
394 ds_set(context
->ds
, qual
, ds_index
, index
);
397 ds_overflow(context
, qual
);
400 return bytes_written
;
405 * Branch Trace Store (BTS) uses the following format. Different
406 * architectures vary in the size of those fields.
407 * - source linear address
408 * - destination linear address
411 * Later architectures use 64bit pointers throughout, whereas earlier
412 * architectures use 32bit pointers in 32bit mode.
414 * We compute the base address for the fields based on:
415 * - the field size stored in the DS configuration
416 * - the relative field position
418 * In order to store additional information in the BTS buffer, we use
419 * a special source address to indicate that the record requires
420 * special interpretation.
422 * Netburst indicated via a bit in the flags field whether the branch
423 * was predicted; this is ignored.
425 * We use two levels of abstraction:
426 * - the raw data level defined here
427 * - an arch-independent level defined in ds.h
436 bts_jiffies
= bts_to
,
439 bts_qual_mask
= (bts_qual_max
- 1),
440 bts_escape
= ((unsigned long)-1 & ~bts_qual_mask
)
443 static inline unsigned long bts_get(const char *base
, enum bts_field field
)
445 base
+= (ds_cfg
.sizeof_ptr_field
* field
);
446 return *(unsigned long *)base
;
449 static inline void bts_set(char *base
, enum bts_field field
, unsigned long val
)
451 base
+= (ds_cfg
.sizeof_ptr_field
* field
);;
452 (*(unsigned long *)base
) = val
;
457 * The raw BTS data is architecture dependent.
459 * For higher-level users, we give an arch-independent view.
460 * - ds.h defines struct bts_struct
461 * - bts_read translates one raw bts record into a bts_struct
462 * - bts_write translates one bts_struct into the raw format and
463 * writes it into the top of the parameter tracer's buffer.
465 * return: bytes read/written on success; -Eerrno, otherwise
467 static int bts_read(struct bts_tracer
*tracer
, const void *at
,
468 struct bts_struct
*out
)
473 if (at
< tracer
->trace
.ds
.begin
)
476 if (tracer
->trace
.ds
.end
< (at
+ tracer
->trace
.ds
.size
))
479 memset(out
, 0, sizeof(*out
));
480 if ((bts_get(at
, bts_qual
) & ~bts_qual_mask
) == bts_escape
) {
481 out
->qualifier
= (bts_get(at
, bts_qual
) & bts_qual_mask
);
482 out
->variant
.timestamp
.jiffies
= bts_get(at
, bts_jiffies
);
483 out
->variant
.timestamp
.pid
= bts_get(at
, bts_pid
);
485 out
->qualifier
= bts_branch
;
486 out
->variant
.lbr
.from
= bts_get(at
, bts_from
);
487 out
->variant
.lbr
.to
= bts_get(at
, bts_to
);
489 if (!out
->variant
.lbr
.from
&& !out
->variant
.lbr
.to
)
490 out
->qualifier
= bts_invalid
;
493 return ds_cfg
.sizeof_rec
[ds_bts
];
496 static int bts_write(struct bts_tracer
*tracer
, const struct bts_struct
*in
)
498 unsigned char raw
[MAX_SIZEOF_BTS
];
503 if (MAX_SIZEOF_BTS
< ds_cfg
.sizeof_rec
[ds_bts
])
506 switch (in
->qualifier
) {
508 bts_set(raw
, bts_from
, 0);
509 bts_set(raw
, bts_to
, 0);
510 bts_set(raw
, bts_flags
, 0);
513 bts_set(raw
, bts_from
, in
->variant
.lbr
.from
);
514 bts_set(raw
, bts_to
, in
->variant
.lbr
.to
);
515 bts_set(raw
, bts_flags
, 0);
517 case bts_task_arrives
:
518 case bts_task_departs
:
519 bts_set(raw
, bts_qual
, (bts_escape
| in
->qualifier
));
520 bts_set(raw
, bts_jiffies
, in
->variant
.timestamp
.jiffies
);
521 bts_set(raw
, bts_pid
, in
->variant
.timestamp
.pid
);
527 return ds_write(tracer
->ds
.context
, ds_bts
, raw
,
528 ds_cfg
.sizeof_rec
[ds_bts
]);
532 static void ds_write_config(struct ds_context
*context
,
533 struct ds_trace
*cfg
, enum ds_qualifier qual
)
535 unsigned char *ds
= context
->ds
;
537 ds_set(ds
, qual
, ds_buffer_base
, (unsigned long)cfg
->begin
);
538 ds_set(ds
, qual
, ds_index
, (unsigned long)cfg
->top
);
539 ds_set(ds
, qual
, ds_absolute_maximum
, (unsigned long)cfg
->end
);
540 ds_set(ds
, qual
, ds_interrupt_threshold
, (unsigned long)cfg
->ith
);
543 static void ds_read_config(struct ds_context
*context
,
544 struct ds_trace
*cfg
, enum ds_qualifier qual
)
546 unsigned char *ds
= context
->ds
;
548 cfg
->begin
= (void *)ds_get(ds
, qual
, ds_buffer_base
);
549 cfg
->top
= (void *)ds_get(ds
, qual
, ds_index
);
550 cfg
->end
= (void *)ds_get(ds
, qual
, ds_absolute_maximum
);
551 cfg
->ith
= (void *)ds_get(ds
, qual
, ds_interrupt_threshold
);
554 static void ds_init_ds_trace(struct ds_trace
*trace
, enum ds_qualifier qual
,
555 void *base
, size_t size
, size_t ith
,
556 unsigned int flags
) {
557 unsigned long buffer
, adj
;
559 /* adjust the buffer address and size to meet alignment
561 * - buffer is double-word aligned
562 * - size is multiple of record size
564 * We checked the size at the very beginning; we have enough
565 * space to do the adjustment.
567 buffer
= (unsigned long)base
;
569 adj
= ALIGN(buffer
, DS_ALIGNMENT
) - buffer
;
573 trace
->n
= size
/ ds_cfg
.sizeof_rec
[qual
];
574 trace
->size
= ds_cfg
.sizeof_rec
[qual
];
576 size
= (trace
->n
* trace
->size
);
578 trace
->begin
= (void *)buffer
;
579 trace
->top
= trace
->begin
;
580 trace
->end
= (void *)(buffer
+ size
);
581 /* The value for 'no threshold' is -1, which will set the
582 * threshold outside of the buffer, just like we want it.
584 trace
->ith
= (void *)(buffer
+ size
- ith
);
586 trace
->flags
= flags
;
590 static int ds_request(struct ds_tracer
*tracer
, struct ds_trace
*trace
,
591 enum ds_qualifier qual
, struct task_struct
*task
,
592 void *base
, size_t size
, size_t th
, unsigned int flags
)
594 struct ds_context
*context
;
598 if (!ds_cfg
.sizeof_rec
[qual
])
605 /* we require some space to do alignment adjustments below */
607 if (size
< (DS_ALIGNMENT
+ ds_cfg
.sizeof_rec
[qual
]))
610 if (th
!= (size_t)-1) {
611 th
*= ds_cfg
.sizeof_rec
[qual
];
618 tracer
->buffer
= base
;
622 context
= ds_get_context(task
);
625 tracer
->context
= context
;
627 ds_init_ds_trace(trace
, qual
, base
, size
, th
, flags
);
634 struct bts_tracer
*ds_request_bts(struct task_struct
*task
,
635 void *base
, size_t size
,
636 bts_ovfl_callback_t ovfl
, size_t th
,
639 struct bts_tracer
*tracer
;
643 /* buffer overflow notification is not yet implemented */
649 tracer
= kzalloc(sizeof(*tracer
), GFP_KERNEL
);
654 error
= ds_request(&tracer
->ds
, &tracer
->trace
.ds
,
655 ds_bts
, task
, base
, size
, th
, flags
);
660 spin_lock_irqsave(&ds_lock
, irq
);
663 if (!check_tracer(task
))
668 if (tracer
->ds
.context
->bts_master
)
670 tracer
->ds
.context
->bts_master
= tracer
;
672 spin_unlock_irqrestore(&ds_lock
, irq
);
675 tracer
->trace
.read
= bts_read
;
676 tracer
->trace
.write
= bts_write
;
678 ds_write_config(tracer
->ds
.context
, &tracer
->trace
.ds
, ds_bts
);
679 ds_resume_bts(tracer
);
686 spin_unlock_irqrestore(&ds_lock
, irq
);
687 ds_put_context(tracer
->ds
.context
);
691 return ERR_PTR(error
);
694 struct pebs_tracer
*ds_request_pebs(struct task_struct
*task
,
695 void *base
, size_t size
,
696 pebs_ovfl_callback_t ovfl
, size_t th
,
699 struct pebs_tracer
*tracer
;
703 /* buffer overflow notification is not yet implemented */
709 tracer
= kzalloc(sizeof(*tracer
), GFP_KERNEL
);
714 error
= ds_request(&tracer
->ds
, &tracer
->trace
.ds
,
715 ds_pebs
, task
, base
, size
, th
, flags
);
719 spin_lock_irqsave(&ds_lock
, irq
);
722 if (!check_tracer(task
))
727 if (tracer
->ds
.context
->pebs_master
)
729 tracer
->ds
.context
->pebs_master
= tracer
;
731 spin_unlock_irqrestore(&ds_lock
, irq
);
733 ds_write_config(tracer
->ds
.context
, &tracer
->trace
.ds
, ds_pebs
);
734 ds_resume_pebs(tracer
);
741 spin_unlock_irqrestore(&ds_lock
, irq
);
742 ds_put_context(tracer
->ds
.context
);
746 return ERR_PTR(error
);
749 void ds_release_bts(struct bts_tracer
*tracer
)
754 ds_suspend_bts(tracer
);
756 WARN_ON_ONCE(tracer
->ds
.context
->bts_master
!= tracer
);
757 tracer
->ds
.context
->bts_master
= NULL
;
759 put_tracer(tracer
->ds
.context
->task
);
760 ds_put_context(tracer
->ds
.context
);
765 void ds_suspend_bts(struct bts_tracer
*tracer
)
767 struct task_struct
*task
;
772 task
= tracer
->ds
.context
->task
;
774 if (!task
|| (task
== current
))
775 update_debugctlmsr(get_debugctlmsr() & ~BTS_CONTROL
);
778 task
->thread
.debugctlmsr
&= ~BTS_CONTROL
;
780 if (!task
->thread
.debugctlmsr
)
781 clear_tsk_thread_flag(task
, TIF_DEBUGCTLMSR
);
785 void ds_resume_bts(struct bts_tracer
*tracer
)
787 struct task_struct
*task
;
788 unsigned long control
;
793 task
= tracer
->ds
.context
->task
;
795 control
= ds_cfg
.ctl
[dsf_bts
];
796 if (!(tracer
->trace
.ds
.flags
& BTS_KERNEL
))
797 control
|= ds_cfg
.ctl
[dsf_bts_kernel
];
798 if (!(tracer
->trace
.ds
.flags
& BTS_USER
))
799 control
|= ds_cfg
.ctl
[dsf_bts_user
];
802 task
->thread
.debugctlmsr
|= control
;
803 set_tsk_thread_flag(task
, TIF_DEBUGCTLMSR
);
806 if (!task
|| (task
== current
))
807 update_debugctlmsr(get_debugctlmsr() | control
);
810 void ds_release_pebs(struct pebs_tracer
*tracer
)
815 ds_suspend_pebs(tracer
);
817 WARN_ON_ONCE(tracer
->ds
.context
->pebs_master
!= tracer
);
818 tracer
->ds
.context
->pebs_master
= NULL
;
820 put_tracer(tracer
->ds
.context
->task
);
821 ds_put_context(tracer
->ds
.context
);
826 void ds_suspend_pebs(struct pebs_tracer
*tracer
)
831 void ds_resume_pebs(struct pebs_tracer
*tracer
)
836 const struct bts_trace
*ds_read_bts(struct bts_tracer
*tracer
)
841 ds_read_config(tracer
->ds
.context
, &tracer
->trace
.ds
, ds_bts
);
842 return &tracer
->trace
;
845 const struct pebs_trace
*ds_read_pebs(struct pebs_tracer
*tracer
)
850 ds_read_config(tracer
->ds
.context
, &tracer
->trace
.ds
, ds_pebs
);
851 tracer
->trace
.reset_value
=
852 *(u64
*)(tracer
->ds
.context
->ds
+
853 (ds_cfg
.sizeof_ptr_field
* 8));
855 return &tracer
->trace
;
858 int ds_reset_bts(struct bts_tracer
*tracer
)
863 tracer
->trace
.ds
.top
= tracer
->trace
.ds
.begin
;
865 ds_set(tracer
->ds
.context
->ds
, ds_bts
, ds_index
,
866 (unsigned long)tracer
->trace
.ds
.top
);
871 int ds_reset_pebs(struct pebs_tracer
*tracer
)
876 tracer
->trace
.ds
.top
= tracer
->trace
.ds
.begin
;
878 ds_set(tracer
->ds
.context
->ds
, ds_bts
, ds_index
,
879 (unsigned long)tracer
->trace
.ds
.top
);
884 int ds_set_pebs_reset(struct pebs_tracer
*tracer
, u64 value
)
889 *(u64
*)(tracer
->ds
.context
->ds
+
890 (ds_cfg
.sizeof_ptr_field
* 8)) = value
;
895 static const struct ds_configuration ds_cfg_netburst
= {
897 .ctl
[dsf_bts
] = (1 << 2) | (1 << 3),
898 .ctl
[dsf_bts_kernel
] = (1 << 5),
899 .ctl
[dsf_bts_user
] = (1 << 6),
901 static const struct ds_configuration ds_cfg_pentium_m
= {
903 .ctl
[dsf_bts
] = (1 << 6) | (1 << 7),
905 static const struct ds_configuration ds_cfg_core2_atom
= {
906 .name
= "Core 2/Atom",
907 .ctl
[dsf_bts
] = (1 << 6) | (1 << 7),
908 .ctl
[dsf_bts_kernel
] = (1 << 9),
909 .ctl
[dsf_bts_user
] = (1 << 10),
913 ds_configure(const struct ds_configuration
*cfg
,
914 struct cpuinfo_x86
*cpu
)
916 unsigned long nr_pebs_fields
= 0;
918 printk(KERN_INFO
"[ds] using %s configuration\n", cfg
->name
);
926 memset(&ds_cfg
, 0, sizeof(ds_cfg
));
929 ds_cfg
.sizeof_ptr_field
=
930 (cpu_has(cpu
, X86_FEATURE_DTES64
) ? 8 : 4);
932 ds_cfg
.sizeof_rec
[ds_bts
] = ds_cfg
.sizeof_ptr_field
* 3;
933 ds_cfg
.sizeof_rec
[ds_pebs
] = ds_cfg
.sizeof_ptr_field
* nr_pebs_fields
;
935 if (!cpu_has(cpu
, X86_FEATURE_BTS
)) {
936 ds_cfg
.sizeof_rec
[ds_bts
] = 0;
937 printk(KERN_INFO
"[ds] bts not available\n");
939 if (!cpu_has(cpu
, X86_FEATURE_PEBS
)) {
940 ds_cfg
.sizeof_rec
[ds_pebs
] = 0;
941 printk(KERN_INFO
"[ds] pebs not available\n");
944 if (ds_cfg
.sizeof_rec
[ds_bts
]) {
947 error
= ds_selftest_bts();
949 WARN(1, "[ds] selftest failed. disabling bts.\n");
950 ds_cfg
.sizeof_rec
[ds_bts
] = 0;
954 if (ds_cfg
.sizeof_rec
[ds_pebs
]) {
957 error
= ds_selftest_pebs();
959 WARN(1, "[ds] selftest failed. disabling pebs.\n");
960 ds_cfg
.sizeof_rec
[ds_pebs
] = 0;
964 printk(KERN_INFO
"[ds] sizes: address: %u bit, ",
965 8 * ds_cfg
.sizeof_ptr_field
);
966 printk("bts/pebs record: %u/%u bytes\n",
967 ds_cfg
.sizeof_rec
[ds_bts
], ds_cfg
.sizeof_rec
[ds_pebs
]);
969 WARN_ON_ONCE(MAX_SIZEOF_DS
< (12 * ds_cfg
.sizeof_field
));
972 void __cpuinit
ds_init_intel(struct cpuinfo_x86
*c
)
976 switch (c
->x86_model
) {
978 case 0xd: /* Pentium M */
979 ds_configure(&ds_cfg_pentium_m
, c
);
982 case 0x17: /* Core2 */
983 case 0x1c: /* Atom */
984 ds_configure(&ds_cfg_core2_atom
, c
);
988 /* sorry, don't know about them */
993 switch (c
->x86_model
) {
996 case 0x2: /* Netburst */
997 ds_configure(&ds_cfg_netburst
, c
);
1000 /* sorry, don't know about them */
1005 /* sorry, don't know about them */
1011 * Change the DS configuration from tracing prev to tracing next.
1013 void ds_switch_to(struct task_struct
*prev
, struct task_struct
*next
)
1015 struct ds_context
*prev_ctx
= prev
->thread
.ds_ctx
;
1016 struct ds_context
*next_ctx
= next
->thread
.ds_ctx
;
1019 update_debugctlmsr(0);
1021 if (prev_ctx
->bts_master
&&
1022 (prev_ctx
->bts_master
->trace
.ds
.flags
& BTS_TIMESTAMPS
)) {
1023 struct bts_struct ts
= {
1024 .qualifier
= bts_task_departs
,
1025 .variant
.timestamp
.jiffies
= jiffies_64
,
1026 .variant
.timestamp
.pid
= prev
->pid
1028 bts_write(prev_ctx
->bts_master
, &ts
);
1033 if (next_ctx
->bts_master
&&
1034 (next_ctx
->bts_master
->trace
.ds
.flags
& BTS_TIMESTAMPS
)) {
1035 struct bts_struct ts
= {
1036 .qualifier
= bts_task_arrives
,
1037 .variant
.timestamp
.jiffies
= jiffies_64
,
1038 .variant
.timestamp
.pid
= next
->pid
1040 bts_write(next_ctx
->bts_master
, &ts
);
1043 wrmsrl(MSR_IA32_DS_AREA
, (unsigned long)next_ctx
->ds
);
1046 update_debugctlmsr(next
->thread
.debugctlmsr
);
1049 void ds_copy_thread(struct task_struct
*tsk
, struct task_struct
*father
)
1051 clear_tsk_thread_flag(tsk
, TIF_DS_AREA_MSR
);
1052 tsk
->thread
.ds_ctx
= NULL
;
1055 void ds_exit_thread(struct task_struct
*tsk
)