4 * It is designed to allow debugging traces of kvm to be generated
5 * on UP / SMP machines. Each trace entry can be timestamped so that
6 * it's possible to reconstruct a chronological record of trace events.
7 * The implementation refers to blktrace kernel support.
9 * Copyright (c) 2008 Intel Corporation
10 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
12 * Authors: Feng(Eric) Liu, eric.e.liu@intel.com
17 #include <linux/module.h>
18 #include <linux/relay.h>
19 #include <linux/debugfs.h>
20 #include <linux/ktime.h>
22 #include <linux/kvm_host.h>
24 #define KVM_TRACE_STATE_RUNNING (1 << 0)
25 #define KVM_TRACE_STATE_PAUSE (1 << 1)
26 #define KVM_TRACE_STATE_CLEARUP (1 << 2)
31 struct dentry
*lost_file
;
32 atomic_t lost_records
;
34 static struct kvm_trace
*kvm_trace
;
36 struct kvm_trace_probe
{
40 marker_probe_func
*probe_func
;
43 static inline int calc_rec_size(int timestamp
, int extra
)
45 int rec_size
= KVM_TRC_HEAD_SIZE
;
48 return timestamp
? rec_size
+= KVM_TRC_CYCLE_SIZE
: rec_size
;
51 static void kvm_add_trace(void *probe_private
, void *call_data
,
52 const char *format
, va_list *args
)
54 struct kvm_trace_probe
*p
= probe_private
;
55 struct kvm_trace
*kt
= kvm_trace
;
56 struct kvm_trace_rec rec
;
57 struct kvm_vcpu
*vcpu
;
61 if (unlikely(kt
->trace_state
!= KVM_TRACE_STATE_RUNNING
))
64 rec
.rec_val
= TRACE_REC_EVENT_ID(va_arg(*args
, u32
));
65 vcpu
= va_arg(*args
, struct kvm_vcpu
*);
66 rec
.pid
= current
->tgid
;
67 rec
.vcpu_id
= vcpu
->vcpu_id
;
69 extra
= va_arg(*args
, u32
);
70 WARN_ON(!(extra
<= KVM_TRC_EXTRA_MAX
));
71 extra
= min_t(u32
, extra
, KVM_TRC_EXTRA_MAX
);
73 rec
.rec_val
|= TRACE_REC_TCS(p
->timestamp_in
)
74 | TRACE_REC_NUM_DATA_ARGS(extra
);
76 if (p
->timestamp_in
) {
77 rec
.u
.timestamp
.timestamp
= ktime_to_ns(ktime_get());
79 for (i
= 0; i
< extra
; i
++)
80 rec
.u
.timestamp
.extra_u32
[i
] = va_arg(*args
, u32
);
82 for (i
= 0; i
< extra
; i
++)
83 rec
.u
.notimestamp
.extra_u32
[i
] = va_arg(*args
, u32
);
86 size
= calc_rec_size(p
->timestamp_in
, extra
* sizeof(u32
));
87 relay_write(kt
->rchan
, &rec
, size
);
90 static struct kvm_trace_probe kvm_trace_probes
[] = {
91 { "kvm_trace_entryexit", "%u %p %u %u %u %u %u %u", 1, kvm_add_trace
},
92 { "kvm_trace_handler", "%u %p %u %u %u %u %u %u", 0, kvm_add_trace
},
95 static int lost_records_get(void *data
, u64
*val
)
97 struct kvm_trace
*kt
= data
;
99 *val
= atomic_read(&kt
->lost_records
);
103 DEFINE_SIMPLE_ATTRIBUTE(kvm_trace_lost_ops
, lost_records_get
, NULL
, "%llu\n");
106 * The relay channel is used in "no-overwrite" mode, it keeps trace of how
107 * many times we encountered a full subbuffer, to tell user space app the
108 * lost records there were.
110 static int kvm_subbuf_start_callback(struct rchan_buf
*buf
, void *subbuf
,
111 void *prev_subbuf
, size_t prev_padding
)
113 struct kvm_trace
*kt
;
115 if (!relay_buf_full(buf
)) {
118 * executed only once when the channel is opened
119 * save metadata as first record
121 subbuf_start_reserve(buf
, sizeof(u32
));
122 *(u32
*)subbuf
= 0x12345678;
128 kt
= buf
->chan
->private_data
;
129 atomic_inc(&kt
->lost_records
);
134 static struct dentry
*kvm_create_buf_file_callack(const char *filename
,
135 struct dentry
*parent
,
137 struct rchan_buf
*buf
,
140 return debugfs_create_file(filename
, mode
, parent
, buf
,
141 &relay_file_operations
);
144 static int kvm_remove_buf_file_callback(struct dentry
*dentry
)
146 debugfs_remove(dentry
);
150 static struct rchan_callbacks kvm_relay_callbacks
= {
151 .subbuf_start
= kvm_subbuf_start_callback
,
152 .create_buf_file
= kvm_create_buf_file_callack
,
153 .remove_buf_file
= kvm_remove_buf_file_callback
,
156 static int do_kvm_trace_enable(struct kvm_user_trace_setup
*kuts
)
158 struct kvm_trace
*kt
;
161 if (!kuts
->buf_size
|| !kuts
->buf_nr
)
164 kt
= kzalloc(sizeof(*kt
), GFP_KERNEL
);
169 atomic_set(&kt
->lost_records
, 0);
170 kt
->lost_file
= debugfs_create_file("lost_records", 0444, kvm_debugfs_dir
,
171 kt
, &kvm_trace_lost_ops
);
175 kt
->rchan
= relay_open("trace", kvm_debugfs_dir
, kuts
->buf_size
,
176 kuts
->buf_nr
, &kvm_relay_callbacks
, kt
);
182 for (i
= 0; i
< ARRAY_SIZE(kvm_trace_probes
); i
++) {
183 struct kvm_trace_probe
*p
= &kvm_trace_probes
[i
];
185 r
= marker_probe_register(p
->name
, p
->format
, p
->probe_func
, p
);
187 printk(KERN_INFO
"Unable to register probe %s\n",
191 kvm_trace
->trace_state
= KVM_TRACE_STATE_RUNNING
;
197 debugfs_remove(kt
->lost_file
);
199 relay_close(kt
->rchan
);
205 static int kvm_trace_enable(char __user
*arg
)
207 struct kvm_user_trace_setup kuts
;
210 ret
= copy_from_user(&kuts
, arg
, sizeof(kuts
));
214 ret
= do_kvm_trace_enable(&kuts
);
221 static int kvm_trace_pause(void)
223 struct kvm_trace
*kt
= kvm_trace
;
229 if (kt
->trace_state
== KVM_TRACE_STATE_RUNNING
) {
230 kt
->trace_state
= KVM_TRACE_STATE_PAUSE
;
231 relay_flush(kt
->rchan
);
238 void kvm_trace_cleanup(void)
240 struct kvm_trace
*kt
= kvm_trace
;
246 if (kt
->trace_state
== KVM_TRACE_STATE_RUNNING
||
247 kt
->trace_state
== KVM_TRACE_STATE_PAUSE
) {
249 kt
->trace_state
= KVM_TRACE_STATE_CLEARUP
;
251 for (i
= 0; i
< ARRAY_SIZE(kvm_trace_probes
); i
++) {
252 struct kvm_trace_probe
*p
= &kvm_trace_probes
[i
];
253 marker_probe_unregister(p
->name
, p
->probe_func
, p
);
255 marker_synchronize_unregister();
257 relay_close(kt
->rchan
);
258 debugfs_remove(kt
->lost_file
);
263 int kvm_trace_ioctl(unsigned int ioctl
, unsigned long arg
)
265 void __user
*argp
= (void __user
*)arg
;
268 if (!capable(CAP_SYS_ADMIN
))
272 case KVM_TRACE_ENABLE
:
273 r
= kvm_trace_enable(argp
);
275 case KVM_TRACE_PAUSE
:
276 r
= kvm_trace_pause();
278 case KVM_TRACE_DISABLE
: