ARM: shmobile: r8a7791: add ADSP clocks
[linux-2.6/btrfs-unstable.git] / kernel / events / ring_buffer.c
blobeadb95ce7aace86925b9639399e29feb5be33323
1 /*
2 * Performance events ring-buffer code:
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 * For licensing details see kernel-base/COPYING
12 #include <linux/perf_event.h>
13 #include <linux/vmalloc.h>
14 #include <linux/slab.h>
15 #include <linux/circ_buf.h>
16 #include <linux/poll.h>
18 #include "internal.h"
20 static void perf_output_wakeup(struct perf_output_handle *handle)
22 atomic_set(&handle->rb->poll, POLLIN);
24 handle->event->pending_wakeup = 1;
25 irq_work_queue(&handle->event->pending);
29 * We need to ensure a later event_id doesn't publish a head when a former
30 * event isn't done writing. However since we need to deal with NMIs we
31 * cannot fully serialize things.
33 * We only publish the head (and generate a wakeup) when the outer-most
34 * event completes.
36 static void perf_output_get_handle(struct perf_output_handle *handle)
38 struct ring_buffer *rb = handle->rb;
40 preempt_disable();
41 local_inc(&rb->nest);
42 handle->wakeup = local_read(&rb->wakeup);
45 static void perf_output_put_handle(struct perf_output_handle *handle)
47 struct ring_buffer *rb = handle->rb;
48 unsigned long head;
50 again:
51 head = local_read(&rb->head);
54 * IRQ/NMI can happen here, which means we can miss a head update.
57 if (!local_dec_and_test(&rb->nest))
58 goto out;
61 * Since the mmap() consumer (userspace) can run on a different CPU:
63 * kernel user
65 * if (LOAD ->data_tail) { LOAD ->data_head
66 * (A) smp_rmb() (C)
67 * STORE $data LOAD $data
68 * smp_wmb() (B) smp_mb() (D)
69 * STORE ->data_head STORE ->data_tail
70 * }
72 * Where A pairs with D, and B pairs with C.
74 * In our case (A) is a control dependency that separates the load of
75 * the ->data_tail and the stores of $data. In case ->data_tail
76 * indicates there is no room in the buffer to store $data we do not.
78 * D needs to be a full barrier since it separates the data READ
79 * from the tail WRITE.
81 * For B a WMB is sufficient since it separates two WRITEs, and for C
82 * an RMB is sufficient since it separates two READs.
84 * See perf_output_begin().
86 smp_wmb(); /* B, matches C */
87 rb->user_page->data_head = head;
90 * Now check if we missed an update -- rely on previous implied
91 * compiler barriers to force a re-read.
93 if (unlikely(head != local_read(&rb->head))) {
94 local_inc(&rb->nest);
95 goto again;
98 if (handle->wakeup != local_read(&rb->wakeup))
99 perf_output_wakeup(handle);
101 out:
102 preempt_enable();
105 int perf_output_begin(struct perf_output_handle *handle,
106 struct perf_event *event, unsigned int size)
108 struct ring_buffer *rb;
109 unsigned long tail, offset, head;
110 int have_lost, page_shift;
111 struct {
112 struct perf_event_header header;
113 u64 id;
114 u64 lost;
115 } lost_event;
117 rcu_read_lock();
119 * For inherited events we send all the output towards the parent.
121 if (event->parent)
122 event = event->parent;
124 rb = rcu_dereference(event->rb);
125 if (unlikely(!rb))
126 goto out;
128 if (unlikely(!rb->nr_pages))
129 goto out;
131 handle->rb = rb;
132 handle->event = event;
134 have_lost = local_read(&rb->lost);
135 if (unlikely(have_lost)) {
136 size += sizeof(lost_event);
137 if (event->attr.sample_id_all)
138 size += event->id_header_size;
141 perf_output_get_handle(handle);
143 do {
144 tail = ACCESS_ONCE(rb->user_page->data_tail);
145 offset = head = local_read(&rb->head);
146 if (!rb->overwrite &&
147 unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size))
148 goto fail;
151 * The above forms a control dependency barrier separating the
152 * @tail load above from the data stores below. Since the @tail
153 * load is required to compute the branch to fail below.
155 * A, matches D; the full memory barrier userspace SHOULD issue
156 * after reading the data and before storing the new tail
157 * position.
159 * See perf_output_put_handle().
162 head += size;
163 } while (local_cmpxchg(&rb->head, offset, head) != offset);
166 * We rely on the implied barrier() by local_cmpxchg() to ensure
167 * none of the data stores below can be lifted up by the compiler.
170 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
171 local_add(rb->watermark, &rb->wakeup);
173 page_shift = PAGE_SHIFT + page_order(rb);
175 handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
176 offset &= (1UL << page_shift) - 1;
177 handle->addr = rb->data_pages[handle->page] + offset;
178 handle->size = (1UL << page_shift) - offset;
180 if (unlikely(have_lost)) {
181 struct perf_sample_data sample_data;
183 lost_event.header.size = sizeof(lost_event);
184 lost_event.header.type = PERF_RECORD_LOST;
185 lost_event.header.misc = 0;
186 lost_event.id = event->id;
187 lost_event.lost = local_xchg(&rb->lost, 0);
189 perf_event_header__init_id(&lost_event.header,
190 &sample_data, event);
191 perf_output_put(handle, lost_event);
192 perf_event__output_id_sample(event, handle, &sample_data);
195 return 0;
197 fail:
198 local_inc(&rb->lost);
199 perf_output_put_handle(handle);
200 out:
201 rcu_read_unlock();
203 return -ENOSPC;
206 unsigned int perf_output_copy(struct perf_output_handle *handle,
207 const void *buf, unsigned int len)
209 return __output_copy(handle, buf, len);
212 unsigned int perf_output_skip(struct perf_output_handle *handle,
213 unsigned int len)
215 return __output_skip(handle, NULL, len);
218 void perf_output_end(struct perf_output_handle *handle)
220 perf_output_put_handle(handle);
221 rcu_read_unlock();
224 static void
225 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
227 long max_size = perf_data_size(rb);
229 if (watermark)
230 rb->watermark = min(max_size, watermark);
232 if (!rb->watermark)
233 rb->watermark = max_size / 2;
235 if (flags & RING_BUFFER_WRITABLE)
236 rb->overwrite = 0;
237 else
238 rb->overwrite = 1;
240 atomic_set(&rb->refcount, 1);
242 INIT_LIST_HEAD(&rb->event_list);
243 spin_lock_init(&rb->event_lock);
246 #ifndef CONFIG_PERF_USE_VMALLOC
249 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
252 struct page *
253 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
255 if (pgoff > rb->nr_pages)
256 return NULL;
258 if (pgoff == 0)
259 return virt_to_page(rb->user_page);
261 return virt_to_page(rb->data_pages[pgoff - 1]);
264 static void *perf_mmap_alloc_page(int cpu)
266 struct page *page;
267 int node;
269 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
270 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
271 if (!page)
272 return NULL;
274 return page_address(page);
277 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
279 struct ring_buffer *rb;
280 unsigned long size;
281 int i;
283 size = sizeof(struct ring_buffer);
284 size += nr_pages * sizeof(void *);
286 rb = kzalloc(size, GFP_KERNEL);
287 if (!rb)
288 goto fail;
290 rb->user_page = perf_mmap_alloc_page(cpu);
291 if (!rb->user_page)
292 goto fail_user_page;
294 for (i = 0; i < nr_pages; i++) {
295 rb->data_pages[i] = perf_mmap_alloc_page(cpu);
296 if (!rb->data_pages[i])
297 goto fail_data_pages;
300 rb->nr_pages = nr_pages;
302 ring_buffer_init(rb, watermark, flags);
304 return rb;
306 fail_data_pages:
307 for (i--; i >= 0; i--)
308 free_page((unsigned long)rb->data_pages[i]);
310 free_page((unsigned long)rb->user_page);
312 fail_user_page:
313 kfree(rb);
315 fail:
316 return NULL;
319 static void perf_mmap_free_page(unsigned long addr)
321 struct page *page = virt_to_page((void *)addr);
323 page->mapping = NULL;
324 __free_page(page);
327 void rb_free(struct ring_buffer *rb)
329 int i;
331 perf_mmap_free_page((unsigned long)rb->user_page);
332 for (i = 0; i < rb->nr_pages; i++)
333 perf_mmap_free_page((unsigned long)rb->data_pages[i]);
334 kfree(rb);
337 #else
338 static int data_page_nr(struct ring_buffer *rb)
340 return rb->nr_pages << page_order(rb);
343 struct page *
344 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
346 /* The '>' counts in the user page. */
347 if (pgoff > data_page_nr(rb))
348 return NULL;
350 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
353 static void perf_mmap_unmark_page(void *addr)
355 struct page *page = vmalloc_to_page(addr);
357 page->mapping = NULL;
360 static void rb_free_work(struct work_struct *work)
362 struct ring_buffer *rb;
363 void *base;
364 int i, nr;
366 rb = container_of(work, struct ring_buffer, work);
367 nr = data_page_nr(rb);
369 base = rb->user_page;
370 /* The '<=' counts in the user page. */
371 for (i = 0; i <= nr; i++)
372 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
374 vfree(base);
375 kfree(rb);
378 void rb_free(struct ring_buffer *rb)
380 schedule_work(&rb->work);
383 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
385 struct ring_buffer *rb;
386 unsigned long size;
387 void *all_buf;
389 size = sizeof(struct ring_buffer);
390 size += sizeof(void *);
392 rb = kzalloc(size, GFP_KERNEL);
393 if (!rb)
394 goto fail;
396 INIT_WORK(&rb->work, rb_free_work);
398 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
399 if (!all_buf)
400 goto fail_all_buf;
402 rb->user_page = all_buf;
403 rb->data_pages[0] = all_buf + PAGE_SIZE;
404 rb->page_order = ilog2(nr_pages);
405 rb->nr_pages = !!nr_pages;
407 ring_buffer_init(rb, watermark, flags);
409 return rb;
411 fail_all_buf:
412 kfree(rb);
414 fail:
415 return NULL;
418 #endif