4 * It is designed to allow debugging traces of kvm to be generated
5 * on UP / SMP machines. Each trace entry can be timestamped so that
6 * it's possible to reconstruct a chronological record of trace events.
7 * The implementation refers to blktrace kernel support.
9 * Copyright (c) 2008 Intel Corporation
10 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
12 * Authors: Feng(Eric) Liu, eric.e.liu@intel.com
17 #include <linux/module.h>
18 #include <linux/relay.h>
19 #include <linux/debugfs.h>
21 #include <linux/kvm_host.h>
23 #define KVM_TRACE_STATE_RUNNING (1 << 0)
24 #define KVM_TRACE_STATE_PAUSE (1 << 1)
25 #define KVM_TRACE_STATE_CLEARUP (1 << 2)
30 struct dentry
*lost_file
;
31 atomic_t lost_records
;
33 static struct kvm_trace
*kvm_trace
;
35 struct kvm_trace_probe
{
39 marker_probe_func
*probe_func
;
42 static inline int calc_rec_size(int cycle
, int extra
)
44 int rec_size
= KVM_TRC_HEAD_SIZE
;
47 return cycle
? rec_size
+= KVM_TRC_CYCLE_SIZE
: rec_size
;
50 static void kvm_add_trace(void *probe_private
, void *call_data
,
51 const char *format
, va_list *args
)
53 struct kvm_trace_probe
*p
= probe_private
;
54 struct kvm_trace
*kt
= kvm_trace
;
55 struct kvm_trace_rec rec
;
56 struct kvm_vcpu
*vcpu
;
59 if (unlikely(kt
->trace_state
!= KVM_TRACE_STATE_RUNNING
))
62 rec
.event
= va_arg(*args
, u32
);
63 vcpu
= va_arg(*args
, struct kvm_vcpu
*);
64 rec
.pid
= current
->tgid
;
65 rec
.vcpu_id
= vcpu
->vcpu_id
;
67 extra
= va_arg(*args
, u32
);
68 WARN_ON(!(extra
<= KVM_TRC_EXTRA_MAX
));
69 extra
= min_t(u32
, extra
, KVM_TRC_EXTRA_MAX
);
70 rec
.extra_u32
= extra
;
72 rec
.cycle_in
= p
->cycle_in
;
75 rec
.u
.cycle
.cycle_u64
= get_cycles();
77 for (i
= 0; i
< rec
.extra_u32
; i
++)
78 rec
.u
.cycle
.extra_u32
[i
] = va_arg(*args
, u32
);
80 for (i
= 0; i
< rec
.extra_u32
; i
++)
81 rec
.u
.nocycle
.extra_u32
[i
] = va_arg(*args
, u32
);
84 size
= calc_rec_size(rec
.cycle_in
, rec
.extra_u32
* sizeof(u32
));
85 relay_write(kt
->rchan
, &rec
, size
);
88 static struct kvm_trace_probe kvm_trace_probes
[] = {
89 { "kvm_trace_entryexit", "%u %p %u %u %u %u %u %u", 1, kvm_add_trace
},
90 { "kvm_trace_handler", "%u %p %u %u %u %u %u %u", 0, kvm_add_trace
},
93 static int lost_records_get(void *data
, u64
*val
)
95 struct kvm_trace
*kt
= data
;
97 *val
= atomic_read(&kt
->lost_records
);
101 DEFINE_SIMPLE_ATTRIBUTE(kvm_trace_lost_ops
, lost_records_get
, NULL
, "%llu\n");
104 * The relay channel is used in "no-overwrite" mode, it keeps trace of how
105 * many times we encountered a full subbuffer, to tell user space app the
106 * lost records there were.
108 static int kvm_subbuf_start_callback(struct rchan_buf
*buf
, void *subbuf
,
109 void *prev_subbuf
, size_t prev_padding
)
111 struct kvm_trace
*kt
;
113 if (!relay_buf_full(buf
)) {
116 * executed only once when the channel is opened
117 * save metadata as first record
119 subbuf_start_reserve(buf
, sizeof(u32
));
120 *(u32
*)subbuf
= 0x12345678;
126 kt
= buf
->chan
->private_data
;
127 atomic_inc(&kt
->lost_records
);
132 static struct dentry
*kvm_create_buf_file_callack(const char *filename
,
133 struct dentry
*parent
,
135 struct rchan_buf
*buf
,
138 return debugfs_create_file(filename
, mode
, parent
, buf
,
139 &relay_file_operations
);
142 static int kvm_remove_buf_file_callback(struct dentry
*dentry
)
144 debugfs_remove(dentry
);
148 static struct rchan_callbacks kvm_relay_callbacks
= {
149 .subbuf_start
= kvm_subbuf_start_callback
,
150 .create_buf_file
= kvm_create_buf_file_callack
,
151 .remove_buf_file
= kvm_remove_buf_file_callback
,
154 static int do_kvm_trace_enable(struct kvm_user_trace_setup
*kuts
)
156 struct kvm_trace
*kt
;
159 if (!kuts
->buf_size
|| !kuts
->buf_nr
)
162 kt
= kzalloc(sizeof(*kt
), GFP_KERNEL
);
167 atomic_set(&kt
->lost_records
, 0);
168 kt
->lost_file
= debugfs_create_file("lost_records", 0444, kvm_debugfs_dir
,
169 kt
, &kvm_trace_lost_ops
);
173 kt
->rchan
= relay_open("trace", kvm_debugfs_dir
, kuts
->buf_size
,
174 kuts
->buf_nr
, &kvm_relay_callbacks
, kt
);
180 for (i
= 0; i
< ARRAY_SIZE(kvm_trace_probes
); i
++) {
181 struct kvm_trace_probe
*p
= &kvm_trace_probes
[i
];
183 r
= marker_probe_register(p
->name
, p
->format
, p
->probe_func
, p
);
185 printk(KERN_INFO
"Unable to register probe %s\n",
189 kvm_trace
->trace_state
= KVM_TRACE_STATE_RUNNING
;
195 debugfs_remove(kt
->lost_file
);
197 relay_close(kt
->rchan
);
203 static int kvm_trace_enable(char __user
*arg
)
205 struct kvm_user_trace_setup kuts
;
208 ret
= copy_from_user(&kuts
, arg
, sizeof(kuts
));
212 ret
= do_kvm_trace_enable(&kuts
);
219 static int kvm_trace_pause(void)
221 struct kvm_trace
*kt
= kvm_trace
;
227 if (kt
->trace_state
== KVM_TRACE_STATE_RUNNING
) {
228 kt
->trace_state
= KVM_TRACE_STATE_PAUSE
;
229 relay_flush(kt
->rchan
);
236 void kvm_trace_cleanup(void)
238 struct kvm_trace
*kt
= kvm_trace
;
244 if (kt
->trace_state
== KVM_TRACE_STATE_RUNNING
||
245 kt
->trace_state
== KVM_TRACE_STATE_PAUSE
) {
247 kt
->trace_state
= KVM_TRACE_STATE_CLEARUP
;
249 for (i
= 0; i
< ARRAY_SIZE(kvm_trace_probes
); i
++) {
250 struct kvm_trace_probe
*p
= &kvm_trace_probes
[i
];
251 marker_probe_unregister(p
->name
, p
->probe_func
, p
);
254 relay_close(kt
->rchan
);
255 debugfs_remove(kt
->lost_file
);
260 int kvm_trace_ioctl(unsigned int ioctl
, unsigned long arg
)
262 void __user
*argp
= (void __user
*)arg
;
265 if (!capable(CAP_SYS_ADMIN
))
269 case KVM_TRACE_ENABLE
:
270 r
= kvm_trace_enable(argp
);
272 case KVM_TRACE_PAUSE
:
273 r
= kvm_trace_pause();
275 case KVM_TRACE_DISABLE
: