4 * This provides a low-level interface to the hardware's Debug Store
5 * feature that is used for branch trace store (BTS) and
6 * precise-event based sampling (PEBS).
9 * - DS and BTS hardware configuration
10 * - buffer overflow handling (to be done)
14 * - security checking (is the caller allowed to trace the task)
15 * - buffer allocation (memory accounting)
18 * Copyright (C) 2007-2008 Intel Corporation.
19 * Markus Metzger <markus.t.metzger@intel.com>, 2007-2008
25 #include <linux/errno.h>
26 #include <linux/string.h>
27 #include <linux/slab.h>
28 #include <linux/sched.h>
30 #include <linux/kernel.h>
34 * The configuration for a particular DS hardware implementation.
36 struct ds_configuration
{
37 /* the name of the configuration */
39 /* the size of one pointer-typed field in the DS structure and
40 in the BTS and PEBS buffers in bytes;
41 this covers the first 8 DS fields related to buffer management. */
42 unsigned char sizeof_field
;
43 /* the size of a BTS/PEBS record in bytes */
44 unsigned char sizeof_rec
[2];
45 /* a series of bit-masks to control various features indexed
46 * by enum ds_feature */
47 unsigned long ctl
[dsf_ctl_max
];
49 static DEFINE_PER_CPU(struct ds_configuration
, ds_cfg_array
);
51 #define ds_cfg per_cpu(ds_cfg_array, smp_processor_id())
53 #define MAX_SIZEOF_DS (12 * 8) /* maximal size of a DS configuration */
54 #define MAX_SIZEOF_BTS (3 * 8) /* maximal size of a BTS record */
55 #define DS_ALIGNMENT (1 << 3) /* BTS and PEBS buffer alignment */
58 (ds_cfg.ctl[dsf_bts] | ds_cfg.ctl[dsf_bts_kernel] | ds_cfg.ctl[dsf_bts_user] |\
59 ds_cfg.ctl[dsf_bts_overflow])
63 * A BTS or PEBS tracer.
65 * This holds the configuration of the tracer and serves as a handle
66 * to identify tracers.
69 /* the DS context (partially) owned by this tracer */
70 struct ds_context
*context
;
71 /* the buffer provided on ds_request() and its size in bytes */
77 /* the common DS part */
79 /* the trace including the DS configuration */
80 struct bts_trace trace
;
81 /* buffer overflow notification function */
82 bts_ovfl_callback_t ovfl
;
86 /* the common DS part */
88 /* the trace including the DS configuration */
89 struct pebs_trace trace
;
90 /* buffer overflow notification function */
91 pebs_ovfl_callback_t ovfl
;
95 * Debug Store (DS) save area configuration (see Intel64 and IA32
96 * Architectures Software Developer's Manual, section 18.5)
98 * The DS configuration consists of the following fields; different
99 * architetures vary in the size of those fields.
100 * - double-word aligned base linear address of the BTS buffer
101 * - write pointer into the BTS buffer
102 * - end linear address of the BTS buffer (one byte beyond the end of
104 * - interrupt pointer into BTS buffer
105 * (interrupt occurs when write pointer passes interrupt pointer)
106 * - double-word aligned base linear address of the PEBS buffer
107 * - write pointer into the PEBS buffer
108 * - end linear address of the PEBS buffer (one byte beyond the end of
110 * - interrupt pointer into PEBS buffer
111 * (interrupt occurs when write pointer passes interrupt pointer)
112 * - value to which counter is reset following counter overflow
114 * Later architectures use 64bit pointers throughout, whereas earlier
115 * architectures use 32bit pointers in 32bit mode.
118 * We compute the base address for the first 8 fields based on:
119 * - the field size stored in the DS configuration
120 * - the relative field position
121 * - an offset giving the start of the respective region
123 * This offset is further used to index various arrays holding
124 * information for BTS and PEBS at the respective index.
126 * On later 32bit processors, we only access the lower 32bit of the
127 * 64bit pointer fields. The upper halves will be zeroed out.
134 ds_interrupt_threshold
,
142 static inline unsigned long ds_get(const unsigned char *base
,
143 enum ds_qualifier qual
, enum ds_field field
)
145 base
+= (ds_cfg
.sizeof_field
* (field
+ (4 * qual
)));
146 return *(unsigned long *)base
;
149 static inline void ds_set(unsigned char *base
, enum ds_qualifier qual
,
150 enum ds_field field
, unsigned long value
)
152 base
+= (ds_cfg
.sizeof_field
* (field
+ (4 * qual
)));
153 (*(unsigned long *)base
) = value
;
158 * Locking is done only for allocating BTS or PEBS resources.
160 static DEFINE_SPINLOCK(ds_lock
);
164 * We either support (system-wide) per-cpu or per-thread allocation.
165 * We distinguish the two based on the task_struct pointer, where a
166 * NULL pointer indicates per-cpu allocation for the current cpu.
168 * Allocations are use-counted. As soon as resources are allocated,
169 * further allocations must be of the same type (per-cpu or
170 * per-thread). We model this by counting allocations (i.e. the number
171 * of tracers of a certain type) for one type negatively:
173 * >0 number of per-thread tracers
174 * <0 number of per-cpu tracers
176 * Tracers essentially gives the number of ds contexts for a certain
177 * type of allocation.
179 static atomic_t tracers
= ATOMIC_INIT(0);
181 static inline void get_tracer(struct task_struct
*task
)
184 atomic_inc(&tracers
);
186 atomic_dec(&tracers
);
189 static inline void put_tracer(struct task_struct
*task
)
192 atomic_dec(&tracers
);
194 atomic_inc(&tracers
);
197 static inline int check_tracer(struct task_struct
*task
)
200 (atomic_read(&tracers
) >= 0) :
201 (atomic_read(&tracers
) <= 0);
206 * The DS context is either attached to a thread or to a cpu:
207 * - in the former case, the thread_struct contains a pointer to the
209 * - in the latter case, we use a static array of per-cpu context
212 * Contexts are use-counted. They are allocated on first access and
213 * deallocated when the last user puts the context.
216 /* pointer to the DS configuration; goes into MSR_IA32_DS_AREA */
217 unsigned char ds
[MAX_SIZEOF_DS
];
218 /* the owner of the BTS and PEBS configuration, respectively */
219 struct bts_tracer
*bts_master
;
220 struct pebs_tracer
*pebs_master
;
223 /* a pointer to the context location inside the thread_struct
224 * or the per_cpu context array */
225 struct ds_context
**this;
226 /* a pointer to the task owning this context, or NULL, if the
227 * context is owned by a cpu */
228 struct task_struct
*task
;
231 static DEFINE_PER_CPU(struct ds_context
*, system_context_array
);
233 #define system_context per_cpu(system_context_array, smp_processor_id())
236 static inline struct ds_context
*ds_get_context(struct task_struct
*task
)
238 struct ds_context
**p_context
=
239 (task
? &task
->thread
.ds_ctx
: &system_context
);
240 struct ds_context
*context
= NULL
;
241 struct ds_context
*new_context
= NULL
;
244 /* Chances are small that we already have a context. */
245 new_context
= kzalloc(sizeof(*new_context
), GFP_KERNEL
);
249 spin_lock_irqsave(&ds_lock
, irq
);
251 context
= *p_context
;
253 context
= new_context
;
255 context
->this = p_context
;
256 context
->task
= task
;
260 set_tsk_thread_flag(task
, TIF_DS_AREA_MSR
);
262 if (!task
|| (task
== current
))
263 wrmsrl(MSR_IA32_DS_AREA
, (unsigned long)context
->ds
);
265 *p_context
= context
;
270 spin_unlock_irqrestore(&ds_lock
, irq
);
272 if (context
!= new_context
)
278 static inline void ds_put_context(struct ds_context
*context
)
285 spin_lock_irqsave(&ds_lock
, irq
);
287 if (--context
->count
) {
288 spin_unlock_irqrestore(&ds_lock
, irq
);
292 *(context
->this) = NULL
;
295 clear_tsk_thread_flag(context
->task
, TIF_DS_AREA_MSR
);
297 if (!context
->task
|| (context
->task
== current
))
298 wrmsrl(MSR_IA32_DS_AREA
, 0);
300 spin_unlock_irqrestore(&ds_lock
, irq
);
307 * Call the tracer's callback on a buffer overflow.
309 * context: the ds context
310 * qual: the buffer type
312 static void ds_overflow(struct ds_context
*context
, enum ds_qualifier qual
)
316 if (context
->bts_master
&&
317 context
->bts_master
->ovfl
)
318 context
->bts_master
->ovfl(context
->bts_master
);
321 if (context
->pebs_master
&&
322 context
->pebs_master
->ovfl
)
323 context
->pebs_master
->ovfl(context
->pebs_master
);
330 * Write raw data into the BTS or PEBS buffer.
332 * The remainder of any partially written record is zeroed out.
334 * context: the DS context
335 * qual: the buffer type
336 * record: the data to write
337 * size: the size of the data
339 static int ds_write(struct ds_context
*context
, enum ds_qualifier qual
,
340 const void *record
, size_t size
)
342 int bytes_written
= 0;
348 unsigned long base
, index
, end
, write_end
, int_th
;
349 unsigned long write_size
, adj_write_size
;
352 * write as much as possible without producing an
353 * overflow interrupt.
355 * interrupt_threshold must either be
356 * - bigger than absolute_maximum or
357 * - point to a record between buffer_base and absolute_maximum
359 * index points to a valid record.
361 base
= ds_get(context
->ds
, qual
, ds_buffer_base
);
362 index
= ds_get(context
->ds
, qual
, ds_index
);
363 end
= ds_get(context
->ds
, qual
, ds_absolute_maximum
);
364 int_th
= ds_get(context
->ds
, qual
, ds_interrupt_threshold
);
366 write_end
= min(end
, int_th
);
368 /* if we are already beyond the interrupt threshold,
369 * we fill the entire buffer */
370 if (write_end
<= index
)
373 if (write_end
<= index
)
376 write_size
= min((unsigned long) size
, write_end
- index
);
377 memcpy((void *)index
, record
, write_size
);
379 record
= (const char *)record
+ write_size
;
381 bytes_written
+= write_size
;
383 adj_write_size
= write_size
/ ds_cfg
.sizeof_rec
[qual
];
384 adj_write_size
*= ds_cfg
.sizeof_rec
[qual
];
386 /* zero out trailing bytes */
387 memset((char *)index
+ write_size
, 0,
388 adj_write_size
- write_size
);
389 index
+= adj_write_size
;
393 ds_set(context
->ds
, qual
, ds_index
, index
);
396 ds_overflow(context
, qual
);
399 return bytes_written
;
404 * Branch Trace Store (BTS) uses the following format. Different
405 * architectures vary in the size of those fields.
406 * - source linear address
407 * - destination linear address
410 * Later architectures use 64bit pointers throughout, whereas earlier
411 * architectures use 32bit pointers in 32bit mode.
413 * We compute the base address for the first 8 fields based on:
414 * - the field size stored in the DS configuration
415 * - the relative field position
417 * In order to store additional information in the BTS buffer, we use
418 * a special source address to indicate that the record requires
419 * special interpretation.
421 * Netburst indicated via a bit in the flags field whether the branch
422 * was predicted; this is ignored.
424 * We use two levels of abstraction:
425 * - the raw data level defined here
426 * - an arch-independent level defined in ds.h
435 bts_jiffies
= bts_to
,
438 bts_qual_mask
= (bts_qual_max
- 1),
439 bts_escape
= ((unsigned long)-1 & ~bts_qual_mask
)
442 static inline unsigned long bts_get(const char *base
, enum bts_field field
)
444 base
+= (ds_cfg
.sizeof_field
* field
);
445 return *(unsigned long *)base
;
448 static inline void bts_set(char *base
, enum bts_field field
, unsigned long val
)
450 base
+= (ds_cfg
.sizeof_field
* field
);;
451 (*(unsigned long *)base
) = val
;
456 * The raw BTS data is architecture dependent.
458 * For higher-level users, we give an arch-independent view.
459 * - ds.h defines struct bts_struct
460 * - bts_read translates one raw bts record into a bts_struct
461 * - bts_write translates one bts_struct into the raw format and
462 * writes it into the top of the parameter tracer's buffer.
464 * return: bytes read/written on success; -Eerrno, otherwise
466 static int bts_read(struct bts_tracer
*tracer
, const void *at
,
467 struct bts_struct
*out
)
472 if (at
< tracer
->trace
.ds
.begin
)
475 if (tracer
->trace
.ds
.end
< (at
+ tracer
->trace
.ds
.size
))
478 memset(out
, 0, sizeof(*out
));
479 if ((bts_get(at
, bts_qual
) & ~bts_qual_mask
) == bts_escape
) {
480 out
->qualifier
= (bts_get(at
, bts_qual
) & bts_qual_mask
);
481 out
->variant
.timestamp
.jiffies
= bts_get(at
, bts_jiffies
);
482 out
->variant
.timestamp
.pid
= bts_get(at
, bts_pid
);
484 out
->qualifier
= bts_branch
;
485 out
->variant
.lbr
.from
= bts_get(at
, bts_from
);
486 out
->variant
.lbr
.to
= bts_get(at
, bts_to
);
488 if (!out
->variant
.lbr
.from
&& !out
->variant
.lbr
.to
)
489 out
->qualifier
= bts_invalid
;
492 return ds_cfg
.sizeof_rec
[ds_bts
];
495 static int bts_write(struct bts_tracer
*tracer
, const struct bts_struct
*in
)
497 unsigned char raw
[MAX_SIZEOF_BTS
];
502 if (MAX_SIZEOF_BTS
< ds_cfg
.sizeof_rec
[ds_bts
])
505 switch (in
->qualifier
) {
507 bts_set(raw
, bts_from
, 0);
508 bts_set(raw
, bts_to
, 0);
509 bts_set(raw
, bts_flags
, 0);
512 bts_set(raw
, bts_from
, in
->variant
.lbr
.from
);
513 bts_set(raw
, bts_to
, in
->variant
.lbr
.to
);
514 bts_set(raw
, bts_flags
, 0);
516 case bts_task_arrives
:
517 case bts_task_departs
:
518 bts_set(raw
, bts_qual
, (bts_escape
| in
->qualifier
));
519 bts_set(raw
, bts_jiffies
, in
->variant
.timestamp
.jiffies
);
520 bts_set(raw
, bts_pid
, in
->variant
.timestamp
.pid
);
526 return ds_write(tracer
->ds
.context
, ds_bts
, raw
,
527 ds_cfg
.sizeof_rec
[ds_bts
]);
531 static void ds_write_config(struct ds_context
*context
,
532 struct ds_trace
*cfg
, enum ds_qualifier qual
)
534 unsigned char *ds
= context
->ds
;
536 ds_set(ds
, qual
, ds_buffer_base
, (unsigned long)cfg
->begin
);
537 ds_set(ds
, qual
, ds_index
, (unsigned long)cfg
->top
);
538 ds_set(ds
, qual
, ds_absolute_maximum
, (unsigned long)cfg
->end
);
539 ds_set(ds
, qual
, ds_interrupt_threshold
, (unsigned long)cfg
->ith
);
542 static void ds_read_config(struct ds_context
*context
,
543 struct ds_trace
*cfg
, enum ds_qualifier qual
)
545 unsigned char *ds
= context
->ds
;
547 cfg
->begin
= (void *)ds_get(ds
, qual
, ds_buffer_base
);
548 cfg
->top
= (void *)ds_get(ds
, qual
, ds_index
);
549 cfg
->end
= (void *)ds_get(ds
, qual
, ds_absolute_maximum
);
550 cfg
->ith
= (void *)ds_get(ds
, qual
, ds_interrupt_threshold
);
553 static void ds_init_ds_trace(struct ds_trace
*trace
, enum ds_qualifier qual
,
554 void *base
, size_t size
, size_t ith
,
555 unsigned int flags
) {
556 unsigned long buffer
, adj
;
558 /* adjust the buffer address and size to meet alignment
560 * - buffer is double-word aligned
561 * - size is multiple of record size
563 * We checked the size at the very beginning; we have enough
564 * space to do the adjustment.
566 buffer
= (unsigned long)base
;
568 adj
= ALIGN(buffer
, DS_ALIGNMENT
) - buffer
;
572 trace
->n
= size
/ ds_cfg
.sizeof_rec
[qual
];
573 trace
->size
= ds_cfg
.sizeof_rec
[qual
];
575 size
= (trace
->n
* trace
->size
);
577 trace
->begin
= (void *)buffer
;
578 trace
->top
= trace
->begin
;
579 trace
->end
= (void *)(buffer
+ size
);
580 /* The value for 'no threshold' is -1, which will set the
581 * threshold outside of the buffer, just like we want it.
583 trace
->ith
= (void *)(buffer
+ size
- ith
);
585 trace
->flags
= flags
;
589 static int ds_request(struct ds_tracer
*tracer
, struct ds_trace
*trace
,
590 enum ds_qualifier qual
, struct task_struct
*task
,
591 void *base
, size_t size
, size_t th
, unsigned int flags
)
593 struct ds_context
*context
;
600 /* we require some space to do alignment adjustments below */
602 if (size
< (DS_ALIGNMENT
+ ds_cfg
.sizeof_rec
[qual
]))
605 if (th
!= (size_t)-1) {
606 th
*= ds_cfg
.sizeof_rec
[qual
];
613 tracer
->buffer
= base
;
617 context
= ds_get_context(task
);
620 tracer
->context
= context
;
622 ds_init_ds_trace(trace
, qual
, base
, size
, th
, flags
);
629 struct bts_tracer
*ds_request_bts(struct task_struct
*task
,
630 void *base
, size_t size
,
631 bts_ovfl_callback_t ovfl
, size_t th
,
634 struct bts_tracer
*tracer
;
639 if (!ds_cfg
.ctl
[dsf_bts
])
642 /* buffer overflow notification is not yet implemented */
648 tracer
= kzalloc(sizeof(*tracer
), GFP_KERNEL
);
653 error
= ds_request(&tracer
->ds
, &tracer
->trace
.ds
,
654 ds_bts
, task
, base
, size
, th
, flags
);
659 spin_lock_irqsave(&ds_lock
, irq
);
662 if (!check_tracer(task
))
667 if (tracer
->ds
.context
->bts_master
)
669 tracer
->ds
.context
->bts_master
= tracer
;
671 spin_unlock_irqrestore(&ds_lock
, irq
);
674 tracer
->trace
.read
= bts_read
;
675 tracer
->trace
.write
= bts_write
;
677 ds_write_config(tracer
->ds
.context
, &tracer
->trace
.ds
, ds_bts
);
678 ds_resume_bts(tracer
);
685 spin_unlock_irqrestore(&ds_lock
, irq
);
686 ds_put_context(tracer
->ds
.context
);
690 return ERR_PTR(error
);
693 struct pebs_tracer
*ds_request_pebs(struct task_struct
*task
,
694 void *base
, size_t size
,
695 pebs_ovfl_callback_t ovfl
, size_t th
,
698 struct pebs_tracer
*tracer
;
702 /* buffer overflow notification is not yet implemented */
708 tracer
= kzalloc(sizeof(*tracer
), GFP_KERNEL
);
713 error
= ds_request(&tracer
->ds
, &tracer
->trace
.ds
,
714 ds_pebs
, task
, base
, size
, th
, flags
);
718 spin_lock_irqsave(&ds_lock
, irq
);
721 if (!check_tracer(task
))
726 if (tracer
->ds
.context
->pebs_master
)
728 tracer
->ds
.context
->pebs_master
= tracer
;
730 spin_unlock_irqrestore(&ds_lock
, irq
);
732 ds_write_config(tracer
->ds
.context
, &tracer
->trace
.ds
, ds_bts
);
733 ds_resume_pebs(tracer
);
740 spin_unlock_irqrestore(&ds_lock
, irq
);
741 ds_put_context(tracer
->ds
.context
);
745 return ERR_PTR(error
);
748 void ds_release_bts(struct bts_tracer
*tracer
)
753 ds_suspend_bts(tracer
);
755 WARN_ON_ONCE(tracer
->ds
.context
->bts_master
!= tracer
);
756 tracer
->ds
.context
->bts_master
= NULL
;
758 put_tracer(tracer
->ds
.context
->task
);
759 ds_put_context(tracer
->ds
.context
);
764 void ds_suspend_bts(struct bts_tracer
*tracer
)
766 struct task_struct
*task
;
771 task
= tracer
->ds
.context
->task
;
773 if (!task
|| (task
== current
))
774 update_debugctlmsr(get_debugctlmsr() & ~BTS_CONTROL
);
777 task
->thread
.debugctlmsr
&= ~BTS_CONTROL
;
779 if (!task
->thread
.debugctlmsr
)
780 clear_tsk_thread_flag(task
, TIF_DEBUGCTLMSR
);
784 void ds_resume_bts(struct bts_tracer
*tracer
)
786 struct task_struct
*task
;
787 unsigned long control
;
792 task
= tracer
->ds
.context
->task
;
794 control
= ds_cfg
.ctl
[dsf_bts
];
795 if (!(tracer
->trace
.ds
.flags
& BTS_KERNEL
))
796 control
|= ds_cfg
.ctl
[dsf_bts_kernel
];
797 if (!(tracer
->trace
.ds
.flags
& BTS_USER
))
798 control
|= ds_cfg
.ctl
[dsf_bts_user
];
801 task
->thread
.debugctlmsr
|= control
;
802 set_tsk_thread_flag(task
, TIF_DEBUGCTLMSR
);
805 if (!task
|| (task
== current
))
806 update_debugctlmsr(get_debugctlmsr() | control
);
809 void ds_release_pebs(struct pebs_tracer
*tracer
)
814 ds_suspend_pebs(tracer
);
816 WARN_ON_ONCE(tracer
->ds
.context
->pebs_master
!= tracer
);
817 tracer
->ds
.context
->pebs_master
= NULL
;
819 put_tracer(tracer
->ds
.context
->task
);
820 ds_put_context(tracer
->ds
.context
);
825 void ds_suspend_pebs(struct pebs_tracer
*tracer
)
830 void ds_resume_pebs(struct pebs_tracer
*tracer
)
835 const struct bts_trace
*ds_read_bts(struct bts_tracer
*tracer
)
840 ds_read_config(tracer
->ds
.context
, &tracer
->trace
.ds
, ds_bts
);
841 return &tracer
->trace
;
844 const struct pebs_trace
*ds_read_pebs(struct pebs_tracer
*tracer
)
849 ds_read_config(tracer
->ds
.context
, &tracer
->trace
.ds
, ds_pebs
);
850 tracer
->trace
.reset_value
=
851 *(u64
*)(tracer
->ds
.context
->ds
+ (ds_cfg
.sizeof_field
* 8));
853 return &tracer
->trace
;
856 int ds_reset_bts(struct bts_tracer
*tracer
)
861 tracer
->trace
.ds
.top
= tracer
->trace
.ds
.begin
;
863 ds_set(tracer
->ds
.context
->ds
, ds_bts
, ds_index
,
864 (unsigned long)tracer
->trace
.ds
.top
);
869 int ds_reset_pebs(struct pebs_tracer
*tracer
)
874 tracer
->trace
.ds
.top
= tracer
->trace
.ds
.begin
;
876 ds_set(tracer
->ds
.context
->ds
, ds_bts
, ds_index
,
877 (unsigned long)tracer
->trace
.ds
.top
);
882 int ds_set_pebs_reset(struct pebs_tracer
*tracer
, u64 value
)
887 *(u64
*)(tracer
->ds
.context
->ds
+ (ds_cfg
.sizeof_field
* 8)) = value
;
892 static const struct ds_configuration ds_cfg_netburst
= {
894 .ctl
[dsf_bts
] = (1 << 2) | (1 << 3),
895 .ctl
[dsf_bts_kernel
] = (1 << 5),
896 .ctl
[dsf_bts_user
] = (1 << 6),
898 .sizeof_field
= sizeof(long),
899 .sizeof_rec
[ds_bts
] = sizeof(long) * 3,
901 .sizeof_rec
[ds_pebs
] = sizeof(long) * 10,
903 .sizeof_rec
[ds_pebs
] = sizeof(long) * 18,
906 static const struct ds_configuration ds_cfg_pentium_m
= {
908 .ctl
[dsf_bts
] = (1 << 6) | (1 << 7),
910 .sizeof_field
= sizeof(long),
911 .sizeof_rec
[ds_bts
] = sizeof(long) * 3,
913 .sizeof_rec
[ds_pebs
] = sizeof(long) * 10,
915 .sizeof_rec
[ds_pebs
] = sizeof(long) * 18,
918 static const struct ds_configuration ds_cfg_core2
= {
920 .ctl
[dsf_bts
] = (1 << 6) | (1 << 7),
921 .ctl
[dsf_bts_kernel
] = (1 << 9),
922 .ctl
[dsf_bts_user
] = (1 << 10),
925 .sizeof_rec
[ds_bts
] = 8 * 3,
926 .sizeof_rec
[ds_pebs
] = 8 * 18,
930 ds_configure(const struct ds_configuration
*cfg
)
932 memset(&ds_cfg
, 0, sizeof(ds_cfg
));
935 printk(KERN_INFO
"[ds] using %s configuration\n", ds_cfg
.name
);
938 ds_cfg
.ctl
[dsf_bts
] = 0;
939 printk(KERN_INFO
"[ds] bts not available\n");
942 printk(KERN_INFO
"[ds] pebs not available\n");
944 WARN_ON_ONCE(MAX_SIZEOF_DS
< (12 * ds_cfg
.sizeof_field
));
947 void __cpuinit
ds_init_intel(struct cpuinfo_x86
*c
)
951 switch (c
->x86_model
) {
953 /* sorry, don't know about them */
956 case 0xE: /* Pentium M */
957 ds_configure(&ds_cfg_pentium_m
);
959 default: /* Core2, Atom, ... */
960 ds_configure(&ds_cfg_core2
);
965 switch (c
->x86_model
) {
968 case 0x2: /* Netburst */
969 ds_configure(&ds_cfg_netburst
);
972 /* sorry, don't know about them */
977 /* sorry, don't know about them */
983 * Change the DS configuration from tracing prev to tracing next.
985 void ds_switch_to(struct task_struct
*prev
, struct task_struct
*next
)
987 struct ds_context
*prev_ctx
= prev
->thread
.ds_ctx
;
988 struct ds_context
*next_ctx
= next
->thread
.ds_ctx
;
991 update_debugctlmsr(0);
993 if (prev_ctx
->bts_master
&&
994 (prev_ctx
->bts_master
->trace
.ds
.flags
& BTS_TIMESTAMPS
)) {
995 struct bts_struct ts
= {
996 .qualifier
= bts_task_departs
,
997 .variant
.timestamp
.jiffies
= jiffies_64
,
998 .variant
.timestamp
.pid
= prev
->pid
1000 bts_write(prev_ctx
->bts_master
, &ts
);
1005 if (next_ctx
->bts_master
&&
1006 (next_ctx
->bts_master
->trace
.ds
.flags
& BTS_TIMESTAMPS
)) {
1007 struct bts_struct ts
= {
1008 .qualifier
= bts_task_arrives
,
1009 .variant
.timestamp
.jiffies
= jiffies_64
,
1010 .variant
.timestamp
.pid
= next
->pid
1012 bts_write(next_ctx
->bts_master
, &ts
);
1015 wrmsrl(MSR_IA32_DS_AREA
, (unsigned long)next_ctx
->ds
);
1018 update_debugctlmsr(next
->thread
.debugctlmsr
);
1021 void ds_copy_thread(struct task_struct
*tsk
, struct task_struct
*father
)
1023 clear_tsk_thread_flag(tsk
, TIF_DS_AREA_MSR
);
1024 tsk
->thread
.ds_ctx
= NULL
;
1027 void ds_exit_thread(struct task_struct
*tsk
)
1029 WARN_ON(tsk
->thread
.ds_ctx
);