4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
6 #include <linux/ring_buffer.h>
7 #include <linux/trace_clock.h>
8 #include <linux/ftrace_irq.h>
9 #include <linux/spinlock.h>
10 #include <linux/debugfs.h>
11 #include <linux/uaccess.h>
12 #include <linux/hardirq.h>
13 #include <linux/kmemcheck.h>
14 #include <linux/module.h>
15 #include <linux/percpu.h>
16 #include <linux/mutex.h>
17 #include <linux/init.h>
18 #include <linux/hash.h>
19 #include <linux/list.h>
20 #include <linux/cpu.h>
26 * The ring buffer header is special. We must manually up keep it.
28 int ring_buffer_print_entry_header(struct trace_seq
*s
)
32 ret
= trace_seq_printf(s
, "# compressed entry header\n");
33 ret
= trace_seq_printf(s
, "\ttype_len : 5 bits\n");
34 ret
= trace_seq_printf(s
, "\ttime_delta : 27 bits\n");
35 ret
= trace_seq_printf(s
, "\tarray : 32 bits\n");
36 ret
= trace_seq_printf(s
, "\n");
37 ret
= trace_seq_printf(s
, "\tpadding : type == %d\n",
38 RINGBUF_TYPE_PADDING
);
39 ret
= trace_seq_printf(s
, "\ttime_extend : type == %d\n",
40 RINGBUF_TYPE_TIME_EXTEND
);
41 ret
= trace_seq_printf(s
, "\tdata max type_len == %d\n",
42 RINGBUF_TYPE_DATA_TYPE_LEN_MAX
);
48 * The ring buffer is made up of a list of pages. A separate list of pages is
49 * allocated for each CPU. A writer may only write to a buffer that is
50 * associated with the CPU it is currently executing on. A reader may read
51 * from any per cpu buffer.
53 * The reader is special. For each per cpu buffer, the reader has its own
54 * reader page. When a reader has read the entire reader page, this reader
55 * page is swapped with another page in the ring buffer.
57 * Now, as long as the writer is off the reader page, the reader can do what
58 * ever it wants with that page. The writer will never write to that page
59 * again (as long as it is out of the ring buffer).
61 * Here's some silly ASCII art.
64 * |reader| RING BUFFER
66 * +------+ +---+ +---+ +---+
75 * |reader| RING BUFFER
76 * |page |------------------v
77 * +------+ +---+ +---+ +---+
86 * |reader| RING BUFFER
87 * |page |------------------v
88 * +------+ +---+ +---+ +---+
93 * +------------------------------+
97 * |buffer| RING BUFFER
98 * |page |------------------v
99 * +------+ +---+ +---+ +---+
101 * | New +---+ +---+ +---+
104 * +------------------------------+
107 * After we make this swap, the reader can hand this page off to the splice
108 * code and be done with it. It can even allocate a new page if it needs to
109 * and swap that into the ring buffer.
111 * We will be using cmpxchg soon to make all this lockless.
116 * A fast way to enable or disable all ring buffers is to
117 * call tracing_on or tracing_off. Turning off the ring buffers
118 * prevents all ring buffers from being recorded to.
119 * Turning this switch on, makes it OK to write to the
120 * ring buffer, if the ring buffer is enabled itself.
122 * There's three layers that must be on in order to write
123 * to the ring buffer.
125 * 1) This global flag must be set.
126 * 2) The ring buffer must be enabled for recording.
127 * 3) The per cpu buffer must be enabled for recording.
129 * In case of an anomaly, this global flag has a bit set that
130 * will permantly disable all ring buffers.
134 * Global flag to disable all recording to ring buffers
135 * This has two bits: ON, DISABLED
139 * 0 0 : ring buffers are off
140 * 1 0 : ring buffers are on
141 * X 1 : ring buffers are permanently disabled
145 RB_BUFFERS_ON_BIT
= 0,
146 RB_BUFFERS_DISABLED_BIT
= 1,
150 RB_BUFFERS_ON
= 1 << RB_BUFFERS_ON_BIT
,
151 RB_BUFFERS_DISABLED
= 1 << RB_BUFFERS_DISABLED_BIT
,
154 static unsigned long ring_buffer_flags __read_mostly
= RB_BUFFERS_ON
;
156 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
159 * tracing_on - enable all tracing buffers
161 * This function enables all tracing buffers that may have been
162 * disabled with tracing_off.
164 void tracing_on(void)
166 set_bit(RB_BUFFERS_ON_BIT
, &ring_buffer_flags
);
168 EXPORT_SYMBOL_GPL(tracing_on
);
171 * tracing_off - turn off all tracing buffers
173 * This function stops all tracing buffers from recording data.
174 * It does not disable any overhead the tracers themselves may
175 * be causing. This function simply causes all recording to
176 * the ring buffers to fail.
178 void tracing_off(void)
180 clear_bit(RB_BUFFERS_ON_BIT
, &ring_buffer_flags
);
182 EXPORT_SYMBOL_GPL(tracing_off
);
185 * tracing_off_permanent - permanently disable ring buffers
187 * This function, once called, will disable all ring buffers
190 void tracing_off_permanent(void)
192 set_bit(RB_BUFFERS_DISABLED_BIT
, &ring_buffer_flags
);
196 * tracing_is_on - show state of ring buffers enabled
198 int tracing_is_on(void)
200 return ring_buffer_flags
== RB_BUFFERS_ON
;
202 EXPORT_SYMBOL_GPL(tracing_is_on
);
204 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
205 #define RB_ALIGNMENT 4U
206 #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
207 #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
209 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
210 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
213 RB_LEN_TIME_EXTEND
= 8,
214 RB_LEN_TIME_STAMP
= 16,
217 static inline int rb_null_event(struct ring_buffer_event
*event
)
219 return event
->type_len
== RINGBUF_TYPE_PADDING
&& !event
->time_delta
;
222 static void rb_event_set_padding(struct ring_buffer_event
*event
)
224 /* padding has a NULL time_delta */
225 event
->type_len
= RINGBUF_TYPE_PADDING
;
226 event
->time_delta
= 0;
230 rb_event_data_length(struct ring_buffer_event
*event
)
235 length
= event
->type_len
* RB_ALIGNMENT
;
237 length
= event
->array
[0];
238 return length
+ RB_EVNT_HDR_SIZE
;
241 /* inline for ring buffer fast paths */
243 rb_event_length(struct ring_buffer_event
*event
)
245 switch (event
->type_len
) {
246 case RINGBUF_TYPE_PADDING
:
247 if (rb_null_event(event
))
250 return event
->array
[0] + RB_EVNT_HDR_SIZE
;
252 case RINGBUF_TYPE_TIME_EXTEND
:
253 return RB_LEN_TIME_EXTEND
;
255 case RINGBUF_TYPE_TIME_STAMP
:
256 return RB_LEN_TIME_STAMP
;
258 case RINGBUF_TYPE_DATA
:
259 return rb_event_data_length(event
);
268 * ring_buffer_event_length - return the length of the event
269 * @event: the event to get the length of
271 unsigned ring_buffer_event_length(struct ring_buffer_event
*event
)
273 unsigned length
= rb_event_length(event
);
274 if (event
->type_len
> RINGBUF_TYPE_DATA_TYPE_LEN_MAX
)
276 length
-= RB_EVNT_HDR_SIZE
;
277 if (length
> RB_MAX_SMALL_DATA
+ sizeof(event
->array
[0]))
278 length
-= sizeof(event
->array
[0]);
281 EXPORT_SYMBOL_GPL(ring_buffer_event_length
);
283 /* inline for ring buffer fast paths */
285 rb_event_data(struct ring_buffer_event
*event
)
287 BUG_ON(event
->type_len
> RINGBUF_TYPE_DATA_TYPE_LEN_MAX
);
288 /* If length is in len field, then array[0] has the data */
290 return (void *)&event
->array
[0];
291 /* Otherwise length is in array[0] and array[1] has the data */
292 return (void *)&event
->array
[1];
296 * ring_buffer_event_data - return the data of the event
297 * @event: the event to get the data from
299 void *ring_buffer_event_data(struct ring_buffer_event
*event
)
301 return rb_event_data(event
);
303 EXPORT_SYMBOL_GPL(ring_buffer_event_data
);
305 #define for_each_buffer_cpu(buffer, cpu) \
306 for_each_cpu(cpu, buffer->cpumask)
309 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
310 #define TS_DELTA_TEST (~TS_MASK)
312 struct buffer_data_page
{
313 u64 time_stamp
; /* page time stamp */
314 local_t commit
; /* write committed index */
315 unsigned char data
[]; /* data of buffer page */
319 * Note, the buffer_page list must be first. The buffer pages
320 * are allocated in cache lines, which means that each buffer
321 * page will be at the beginning of a cache line, and thus
322 * the least significant bits will be zero. We use this to
323 * add flags in the list struct pointers, to make the ring buffer
327 struct list_head list
; /* list of buffer pages */
328 local_t write
; /* index for next write */
329 unsigned read
; /* index for next read */
330 local_t entries
; /* entries on this page */
331 struct buffer_data_page
*page
; /* Actual data page */
335 * The buffer page counters, write and entries, must be reset
336 * atomically when crossing page boundaries. To synchronize this
337 * update, two counters are inserted into the number. One is
338 * the actual counter for the write position or count on the page.
340 * The other is a counter of updaters. Before an update happens
341 * the update partition of the counter is incremented. This will
342 * allow the updater to update the counter atomically.
344 * The counter is 20 bits, and the state data is 12.
346 #define RB_WRITE_MASK 0xfffff
347 #define RB_WRITE_INTCNT (1 << 20)
349 static void rb_init_page(struct buffer_data_page
*bpage
)
351 local_set(&bpage
->commit
, 0);
355 * ring_buffer_page_len - the size of data on the page.
356 * @page: The page to read
358 * Returns the amount of data on the page, including buffer page header.
360 size_t ring_buffer_page_len(void *page
)
362 return local_read(&((struct buffer_data_page
*)page
)->commit
)
367 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
370 static void free_buffer_page(struct buffer_page
*bpage
)
372 free_page((unsigned long)bpage
->page
);
377 * We need to fit the time_stamp delta into 27 bits.
379 static inline int test_time_stamp(u64 delta
)
381 if (delta
& TS_DELTA_TEST
)
386 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
388 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
389 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
391 /* Max number of timestamps that can fit on a page */
392 #define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP)
394 int ring_buffer_print_page_header(struct trace_seq
*s
)
396 struct buffer_data_page field
;
399 ret
= trace_seq_printf(s
, "\tfield: u64 timestamp;\t"
400 "offset:0;\tsize:%u;\tsigned:%u;\n",
401 (unsigned int)sizeof(field
.time_stamp
),
402 (unsigned int)is_signed_type(u64
));
404 ret
= trace_seq_printf(s
, "\tfield: local_t commit;\t"
405 "offset:%u;\tsize:%u;\tsigned:%u;\n",
406 (unsigned int)offsetof(typeof(field
), commit
),
407 (unsigned int)sizeof(field
.commit
),
408 (unsigned int)is_signed_type(long));
410 ret
= trace_seq_printf(s
, "\tfield: char data;\t"
411 "offset:%u;\tsize:%u;\tsigned:%u;\n",
412 (unsigned int)offsetof(typeof(field
), data
),
413 (unsigned int)BUF_PAGE_SIZE
,
414 (unsigned int)is_signed_type(char));
420 * head_page == tail_page && head == tail then buffer is empty.
422 struct ring_buffer_per_cpu
{
424 struct ring_buffer
*buffer
;
425 spinlock_t reader_lock
; /* serialize readers */
426 arch_spinlock_t lock
;
427 struct lock_class_key lock_key
;
428 struct list_head
*pages
;
429 struct buffer_page
*head_page
; /* read from head */
430 struct buffer_page
*tail_page
; /* write to tail */
431 struct buffer_page
*commit_page
; /* committed pages */
432 struct buffer_page
*reader_page
;
433 local_t commit_overrun
;
441 atomic_t record_disabled
;
448 atomic_t record_disabled
;
449 cpumask_var_t cpumask
;
451 struct lock_class_key
*reader_lock_key
;
455 struct ring_buffer_per_cpu
**buffers
;
457 #ifdef CONFIG_HOTPLUG_CPU
458 struct notifier_block cpu_notify
;
463 struct ring_buffer_iter
{
464 struct ring_buffer_per_cpu
*cpu_buffer
;
466 struct buffer_page
*head_page
;
467 struct buffer_page
*cache_reader_page
;
468 unsigned long cache_read
;
472 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
473 #define RB_WARN_ON(b, cond) \
475 int _____ret = unlikely(cond); \
477 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
478 struct ring_buffer_per_cpu *__b = \
480 atomic_inc(&__b->buffer->record_disabled); \
482 atomic_inc(&b->record_disabled); \
488 /* Up this if you want to test the TIME_EXTENTS and normalization */
489 #define DEBUG_SHIFT 0
491 static inline u64
rb_time_stamp(struct ring_buffer
*buffer
)
493 /* shift to debug/test normalization and TIME_EXTENTS */
494 return buffer
->clock() << DEBUG_SHIFT
;
497 u64
ring_buffer_time_stamp(struct ring_buffer
*buffer
, int cpu
)
501 preempt_disable_notrace();
502 time
= rb_time_stamp(buffer
);
503 preempt_enable_no_resched_notrace();
507 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp
);
509 void ring_buffer_normalize_time_stamp(struct ring_buffer
*buffer
,
512 /* Just stupid testing the normalize function and deltas */
515 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp
);
518 * Making the ring buffer lockless makes things tricky.
519 * Although writes only happen on the CPU that they are on,
520 * and they only need to worry about interrupts. Reads can
523 * The reader page is always off the ring buffer, but when the
524 * reader finishes with a page, it needs to swap its page with
525 * a new one from the buffer. The reader needs to take from
526 * the head (writes go to the tail). But if a writer is in overwrite
527 * mode and wraps, it must push the head page forward.
529 * Here lies the problem.
531 * The reader must be careful to replace only the head page, and
532 * not another one. As described at the top of the file in the
533 * ASCII art, the reader sets its old page to point to the next
534 * page after head. It then sets the page after head to point to
535 * the old reader page. But if the writer moves the head page
536 * during this operation, the reader could end up with the tail.
538 * We use cmpxchg to help prevent this race. We also do something
539 * special with the page before head. We set the LSB to 1.
541 * When the writer must push the page forward, it will clear the
542 * bit that points to the head page, move the head, and then set
543 * the bit that points to the new head page.
545 * We also don't want an interrupt coming in and moving the head
546 * page on another writer. Thus we use the second LSB to catch
549 * head->list->prev->next bit 1 bit 0
552 * Points to head page 0 1
555 * Note we can not trust the prev pointer of the head page, because:
557 * +----+ +-----+ +-----+
558 * | |------>| T |---X--->| N |
560 * +----+ +-----+ +-----+
563 * +----------| R |----------+ |
567 * Key: ---X--> HEAD flag set in pointer
572 * (see __rb_reserve_next() to see where this happens)
574 * What the above shows is that the reader just swapped out
575 * the reader page with a page in the buffer, but before it
576 * could make the new header point back to the new page added
577 * it was preempted by a writer. The writer moved forward onto
578 * the new page added by the reader and is about to move forward
581 * You can see, it is legitimate for the previous pointer of
582 * the head (or any page) not to point back to itself. But only
586 #define RB_PAGE_NORMAL 0UL
587 #define RB_PAGE_HEAD 1UL
588 #define RB_PAGE_UPDATE 2UL
591 #define RB_FLAG_MASK 3UL
593 /* PAGE_MOVED is not part of the mask */
594 #define RB_PAGE_MOVED 4UL
597 * rb_list_head - remove any bit
599 static struct list_head
*rb_list_head(struct list_head
*list
)
601 unsigned long val
= (unsigned long)list
;
603 return (struct list_head
*)(val
& ~RB_FLAG_MASK
);
607 * rb_is_head_page - test if the given page is the head page
609 * Because the reader may move the head_page pointer, we can
610 * not trust what the head page is (it may be pointing to
611 * the reader page). But if the next page is a header page,
612 * its flags will be non zero.
615 rb_is_head_page(struct ring_buffer_per_cpu
*cpu_buffer
,
616 struct buffer_page
*page
, struct list_head
*list
)
620 val
= (unsigned long)list
->next
;
622 if ((val
& ~RB_FLAG_MASK
) != (unsigned long)&page
->list
)
623 return RB_PAGE_MOVED
;
625 return val
& RB_FLAG_MASK
;
631 * The unique thing about the reader page, is that, if the
632 * writer is ever on it, the previous pointer never points
633 * back to the reader page.
635 static int rb_is_reader_page(struct buffer_page
*page
)
637 struct list_head
*list
= page
->list
.prev
;
639 return rb_list_head(list
->next
) != &page
->list
;
643 * rb_set_list_to_head - set a list_head to be pointing to head.
645 static void rb_set_list_to_head(struct ring_buffer_per_cpu
*cpu_buffer
,
646 struct list_head
*list
)
650 ptr
= (unsigned long *)&list
->next
;
651 *ptr
|= RB_PAGE_HEAD
;
652 *ptr
&= ~RB_PAGE_UPDATE
;
656 * rb_head_page_activate - sets up head page
658 static void rb_head_page_activate(struct ring_buffer_per_cpu
*cpu_buffer
)
660 struct buffer_page
*head
;
662 head
= cpu_buffer
->head_page
;
667 * Set the previous list pointer to have the HEAD flag.
669 rb_set_list_to_head(cpu_buffer
, head
->list
.prev
);
672 static void rb_list_head_clear(struct list_head
*list
)
674 unsigned long *ptr
= (unsigned long *)&list
->next
;
676 *ptr
&= ~RB_FLAG_MASK
;
680 * rb_head_page_dactivate - clears head page ptr (for free list)
683 rb_head_page_deactivate(struct ring_buffer_per_cpu
*cpu_buffer
)
685 struct list_head
*hd
;
687 /* Go through the whole list and clear any pointers found. */
688 rb_list_head_clear(cpu_buffer
->pages
);
690 list_for_each(hd
, cpu_buffer
->pages
)
691 rb_list_head_clear(hd
);
694 static int rb_head_page_set(struct ring_buffer_per_cpu
*cpu_buffer
,
695 struct buffer_page
*head
,
696 struct buffer_page
*prev
,
697 int old_flag
, int new_flag
)
699 struct list_head
*list
;
700 unsigned long val
= (unsigned long)&head
->list
;
705 val
&= ~RB_FLAG_MASK
;
707 ret
= cmpxchg((unsigned long *)&list
->next
,
708 val
| old_flag
, val
| new_flag
);
710 /* check if the reader took the page */
711 if ((ret
& ~RB_FLAG_MASK
) != val
)
712 return RB_PAGE_MOVED
;
714 return ret
& RB_FLAG_MASK
;
717 static int rb_head_page_set_update(struct ring_buffer_per_cpu
*cpu_buffer
,
718 struct buffer_page
*head
,
719 struct buffer_page
*prev
,
722 return rb_head_page_set(cpu_buffer
, head
, prev
,
723 old_flag
, RB_PAGE_UPDATE
);
726 static int rb_head_page_set_head(struct ring_buffer_per_cpu
*cpu_buffer
,
727 struct buffer_page
*head
,
728 struct buffer_page
*prev
,
731 return rb_head_page_set(cpu_buffer
, head
, prev
,
732 old_flag
, RB_PAGE_HEAD
);
735 static int rb_head_page_set_normal(struct ring_buffer_per_cpu
*cpu_buffer
,
736 struct buffer_page
*head
,
737 struct buffer_page
*prev
,
740 return rb_head_page_set(cpu_buffer
, head
, prev
,
741 old_flag
, RB_PAGE_NORMAL
);
744 static inline void rb_inc_page(struct ring_buffer_per_cpu
*cpu_buffer
,
745 struct buffer_page
**bpage
)
747 struct list_head
*p
= rb_list_head((*bpage
)->list
.next
);
749 *bpage
= list_entry(p
, struct buffer_page
, list
);
752 static struct buffer_page
*
753 rb_set_head_page(struct ring_buffer_per_cpu
*cpu_buffer
)
755 struct buffer_page
*head
;
756 struct buffer_page
*page
;
757 struct list_head
*list
;
760 if (RB_WARN_ON(cpu_buffer
, !cpu_buffer
->head_page
))
764 list
= cpu_buffer
->pages
;
765 if (RB_WARN_ON(cpu_buffer
, rb_list_head(list
->prev
->next
) != list
))
768 page
= head
= cpu_buffer
->head_page
;
770 * It is possible that the writer moves the header behind
771 * where we started, and we miss in one loop.
772 * A second loop should grab the header, but we'll do
773 * three loops just because I'm paranoid.
775 for (i
= 0; i
< 3; i
++) {
777 if (rb_is_head_page(cpu_buffer
, page
, page
->list
.prev
)) {
778 cpu_buffer
->head_page
= page
;
781 rb_inc_page(cpu_buffer
, &page
);
782 } while (page
!= head
);
785 RB_WARN_ON(cpu_buffer
, 1);
790 static int rb_head_page_replace(struct buffer_page
*old
,
791 struct buffer_page
*new)
793 unsigned long *ptr
= (unsigned long *)&old
->list
.prev
->next
;
797 val
= *ptr
& ~RB_FLAG_MASK
;
800 ret
= cmpxchg(ptr
, val
, (unsigned long)&new->list
);
806 * rb_tail_page_update - move the tail page forward
808 * Returns 1 if moved tail page, 0 if someone else did.
810 static int rb_tail_page_update(struct ring_buffer_per_cpu
*cpu_buffer
,
811 struct buffer_page
*tail_page
,
812 struct buffer_page
*next_page
)
814 struct buffer_page
*old_tail
;
815 unsigned long old_entries
;
816 unsigned long old_write
;
820 * The tail page now needs to be moved forward.
822 * We need to reset the tail page, but without messing
823 * with possible erasing of data brought in by interrupts
824 * that have moved the tail page and are currently on it.
826 * We add a counter to the write field to denote this.
828 old_write
= local_add_return(RB_WRITE_INTCNT
, &next_page
->write
);
829 old_entries
= local_add_return(RB_WRITE_INTCNT
, &next_page
->entries
);
832 * Just make sure we have seen our old_write and synchronize
833 * with any interrupts that come in.
838 * If the tail page is still the same as what we think
839 * it is, then it is up to us to update the tail
842 if (tail_page
== cpu_buffer
->tail_page
) {
843 /* Zero the write counter */
844 unsigned long val
= old_write
& ~RB_WRITE_MASK
;
845 unsigned long eval
= old_entries
& ~RB_WRITE_MASK
;
848 * This will only succeed if an interrupt did
849 * not come in and change it. In which case, we
850 * do not want to modify it.
852 * We add (void) to let the compiler know that we do not care
853 * about the return value of these functions. We use the
854 * cmpxchg to only update if an interrupt did not already
855 * do it for us. If the cmpxchg fails, we don't care.
857 (void)local_cmpxchg(&next_page
->write
, old_write
, val
);
858 (void)local_cmpxchg(&next_page
->entries
, old_entries
, eval
);
861 * No need to worry about races with clearing out the commit.
862 * it only can increment when a commit takes place. But that
863 * only happens in the outer most nested commit.
865 local_set(&next_page
->page
->commit
, 0);
867 old_tail
= cmpxchg(&cpu_buffer
->tail_page
,
868 tail_page
, next_page
);
870 if (old_tail
== tail_page
)
877 static int rb_check_bpage(struct ring_buffer_per_cpu
*cpu_buffer
,
878 struct buffer_page
*bpage
)
880 unsigned long val
= (unsigned long)bpage
;
882 if (RB_WARN_ON(cpu_buffer
, val
& RB_FLAG_MASK
))
889 * rb_check_list - make sure a pointer to a list has the last bits zero
891 static int rb_check_list(struct ring_buffer_per_cpu
*cpu_buffer
,
892 struct list_head
*list
)
894 if (RB_WARN_ON(cpu_buffer
, rb_list_head(list
->prev
) != list
->prev
))
896 if (RB_WARN_ON(cpu_buffer
, rb_list_head(list
->next
) != list
->next
))
902 * check_pages - integrity check of buffer pages
903 * @cpu_buffer: CPU buffer with pages to test
905 * As a safety measure we check to make sure the data pages have not
908 static int rb_check_pages(struct ring_buffer_per_cpu
*cpu_buffer
)
910 struct list_head
*head
= cpu_buffer
->pages
;
911 struct buffer_page
*bpage
, *tmp
;
913 rb_head_page_deactivate(cpu_buffer
);
915 if (RB_WARN_ON(cpu_buffer
, head
->next
->prev
!= head
))
917 if (RB_WARN_ON(cpu_buffer
, head
->prev
->next
!= head
))
920 if (rb_check_list(cpu_buffer
, head
))
923 list_for_each_entry_safe(bpage
, tmp
, head
, list
) {
924 if (RB_WARN_ON(cpu_buffer
,
925 bpage
->list
.next
->prev
!= &bpage
->list
))
927 if (RB_WARN_ON(cpu_buffer
,
928 bpage
->list
.prev
->next
!= &bpage
->list
))
930 if (rb_check_list(cpu_buffer
, &bpage
->list
))
934 rb_head_page_activate(cpu_buffer
);
939 static int rb_allocate_pages(struct ring_buffer_per_cpu
*cpu_buffer
,
942 struct buffer_page
*bpage
, *tmp
;
949 for (i
= 0; i
< nr_pages
; i
++) {
950 bpage
= kzalloc_node(ALIGN(sizeof(*bpage
), cache_line_size()),
951 GFP_KERNEL
, cpu_to_node(cpu_buffer
->cpu
));
955 rb_check_bpage(cpu_buffer
, bpage
);
957 list_add(&bpage
->list
, &pages
);
959 addr
= __get_free_page(GFP_KERNEL
);
962 bpage
->page
= (void *)addr
;
963 rb_init_page(bpage
->page
);
967 * The ring buffer page list is a circular list that does not
968 * start and end with a list head. All page list items point to
971 cpu_buffer
->pages
= pages
.next
;
974 rb_check_pages(cpu_buffer
);
979 list_for_each_entry_safe(bpage
, tmp
, &pages
, list
) {
980 list_del_init(&bpage
->list
);
981 free_buffer_page(bpage
);
986 static struct ring_buffer_per_cpu
*
987 rb_allocate_cpu_buffer(struct ring_buffer
*buffer
, int cpu
)
989 struct ring_buffer_per_cpu
*cpu_buffer
;
990 struct buffer_page
*bpage
;
994 cpu_buffer
= kzalloc_node(ALIGN(sizeof(*cpu_buffer
), cache_line_size()),
995 GFP_KERNEL
, cpu_to_node(cpu
));
999 cpu_buffer
->cpu
= cpu
;
1000 cpu_buffer
->buffer
= buffer
;
1001 spin_lock_init(&cpu_buffer
->reader_lock
);
1002 lockdep_set_class(&cpu_buffer
->reader_lock
, buffer
->reader_lock_key
);
1003 cpu_buffer
->lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
1005 bpage
= kzalloc_node(ALIGN(sizeof(*bpage
), cache_line_size()),
1006 GFP_KERNEL
, cpu_to_node(cpu
));
1008 goto fail_free_buffer
;
1010 rb_check_bpage(cpu_buffer
, bpage
);
1012 cpu_buffer
->reader_page
= bpage
;
1013 addr
= __get_free_page(GFP_KERNEL
);
1015 goto fail_free_reader
;
1016 bpage
->page
= (void *)addr
;
1017 rb_init_page(bpage
->page
);
1019 INIT_LIST_HEAD(&cpu_buffer
->reader_page
->list
);
1021 ret
= rb_allocate_pages(cpu_buffer
, buffer
->pages
);
1023 goto fail_free_reader
;
1025 cpu_buffer
->head_page
1026 = list_entry(cpu_buffer
->pages
, struct buffer_page
, list
);
1027 cpu_buffer
->tail_page
= cpu_buffer
->commit_page
= cpu_buffer
->head_page
;
1029 rb_head_page_activate(cpu_buffer
);
1034 free_buffer_page(cpu_buffer
->reader_page
);
1041 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu
*cpu_buffer
)
1043 struct list_head
*head
= cpu_buffer
->pages
;
1044 struct buffer_page
*bpage
, *tmp
;
1046 free_buffer_page(cpu_buffer
->reader_page
);
1048 rb_head_page_deactivate(cpu_buffer
);
1051 list_for_each_entry_safe(bpage
, tmp
, head
, list
) {
1052 list_del_init(&bpage
->list
);
1053 free_buffer_page(bpage
);
1055 bpage
= list_entry(head
, struct buffer_page
, list
);
1056 free_buffer_page(bpage
);
1062 #ifdef CONFIG_HOTPLUG_CPU
1063 static int rb_cpu_notify(struct notifier_block
*self
,
1064 unsigned long action
, void *hcpu
);
1068 * ring_buffer_alloc - allocate a new ring_buffer
1069 * @size: the size in bytes per cpu that is needed.
1070 * @flags: attributes to set for the ring buffer.
1072 * Currently the only flag that is available is the RB_FL_OVERWRITE
1073 * flag. This flag means that the buffer will overwrite old data
1074 * when the buffer wraps. If this flag is not set, the buffer will
1075 * drop data when the tail hits the head.
1077 struct ring_buffer
*__ring_buffer_alloc(unsigned long size
, unsigned flags
,
1078 struct lock_class_key
*key
)
1080 struct ring_buffer
*buffer
;
1084 /* keep it in its own cache line */
1085 buffer
= kzalloc(ALIGN(sizeof(*buffer
), cache_line_size()),
1090 if (!alloc_cpumask_var(&buffer
->cpumask
, GFP_KERNEL
))
1091 goto fail_free_buffer
;
1093 buffer
->pages
= DIV_ROUND_UP(size
, BUF_PAGE_SIZE
);
1094 buffer
->flags
= flags
;
1095 buffer
->clock
= trace_clock_local
;
1096 buffer
->reader_lock_key
= key
;
1098 /* need at least two pages */
1099 if (buffer
->pages
< 2)
1103 * In case of non-hotplug cpu, if the ring-buffer is allocated
1104 * in early initcall, it will not be notified of secondary cpus.
1105 * In that off case, we need to allocate for all possible cpus.
1107 #ifdef CONFIG_HOTPLUG_CPU
1109 cpumask_copy(buffer
->cpumask
, cpu_online_mask
);
1111 cpumask_copy(buffer
->cpumask
, cpu_possible_mask
);
1113 buffer
->cpus
= nr_cpu_ids
;
1115 bsize
= sizeof(void *) * nr_cpu_ids
;
1116 buffer
->buffers
= kzalloc(ALIGN(bsize
, cache_line_size()),
1118 if (!buffer
->buffers
)
1119 goto fail_free_cpumask
;
1121 for_each_buffer_cpu(buffer
, cpu
) {
1122 buffer
->buffers
[cpu
] =
1123 rb_allocate_cpu_buffer(buffer
, cpu
);
1124 if (!buffer
->buffers
[cpu
])
1125 goto fail_free_buffers
;
1128 #ifdef CONFIG_HOTPLUG_CPU
1129 buffer
->cpu_notify
.notifier_call
= rb_cpu_notify
;
1130 buffer
->cpu_notify
.priority
= 0;
1131 register_cpu_notifier(&buffer
->cpu_notify
);
1135 mutex_init(&buffer
->mutex
);
1140 for_each_buffer_cpu(buffer
, cpu
) {
1141 if (buffer
->buffers
[cpu
])
1142 rb_free_cpu_buffer(buffer
->buffers
[cpu
]);
1144 kfree(buffer
->buffers
);
1147 free_cpumask_var(buffer
->cpumask
);
1154 EXPORT_SYMBOL_GPL(__ring_buffer_alloc
);
1157 * ring_buffer_free - free a ring buffer.
1158 * @buffer: the buffer to free.
1161 ring_buffer_free(struct ring_buffer
*buffer
)
1167 #ifdef CONFIG_HOTPLUG_CPU
1168 unregister_cpu_notifier(&buffer
->cpu_notify
);
1171 for_each_buffer_cpu(buffer
, cpu
)
1172 rb_free_cpu_buffer(buffer
->buffers
[cpu
]);
1176 kfree(buffer
->buffers
);
1177 free_cpumask_var(buffer
->cpumask
);
1181 EXPORT_SYMBOL_GPL(ring_buffer_free
);
1183 void ring_buffer_set_clock(struct ring_buffer
*buffer
,
1186 buffer
->clock
= clock
;
1189 static void rb_reset_cpu(struct ring_buffer_per_cpu
*cpu_buffer
);
1192 rb_remove_pages(struct ring_buffer_per_cpu
*cpu_buffer
, unsigned nr_pages
)
1194 struct buffer_page
*bpage
;
1195 struct list_head
*p
;
1198 spin_lock_irq(&cpu_buffer
->reader_lock
);
1199 rb_head_page_deactivate(cpu_buffer
);
1201 for (i
= 0; i
< nr_pages
; i
++) {
1202 if (RB_WARN_ON(cpu_buffer
, list_empty(cpu_buffer
->pages
)))
1204 p
= cpu_buffer
->pages
->next
;
1205 bpage
= list_entry(p
, struct buffer_page
, list
);
1206 list_del_init(&bpage
->list
);
1207 free_buffer_page(bpage
);
1209 if (RB_WARN_ON(cpu_buffer
, list_empty(cpu_buffer
->pages
)))
1212 rb_reset_cpu(cpu_buffer
);
1213 rb_check_pages(cpu_buffer
);
1215 spin_unlock_irq(&cpu_buffer
->reader_lock
);
1219 rb_insert_pages(struct ring_buffer_per_cpu
*cpu_buffer
,
1220 struct list_head
*pages
, unsigned nr_pages
)
1222 struct buffer_page
*bpage
;
1223 struct list_head
*p
;
1226 spin_lock_irq(&cpu_buffer
->reader_lock
);
1227 rb_head_page_deactivate(cpu_buffer
);
1229 for (i
= 0; i
< nr_pages
; i
++) {
1230 if (RB_WARN_ON(cpu_buffer
, list_empty(pages
)))
1233 bpage
= list_entry(p
, struct buffer_page
, list
);
1234 list_del_init(&bpage
->list
);
1235 list_add_tail(&bpage
->list
, cpu_buffer
->pages
);
1237 rb_reset_cpu(cpu_buffer
);
1238 rb_check_pages(cpu_buffer
);
1240 spin_unlock_irq(&cpu_buffer
->reader_lock
);
1244 * ring_buffer_resize - resize the ring buffer
1245 * @buffer: the buffer to resize.
1246 * @size: the new size.
1248 * Minimum size is 2 * BUF_PAGE_SIZE.
1250 * Returns -1 on failure.
1252 int ring_buffer_resize(struct ring_buffer
*buffer
, unsigned long size
)
1254 struct ring_buffer_per_cpu
*cpu_buffer
;
1255 unsigned nr_pages
, rm_pages
, new_pages
;
1256 struct buffer_page
*bpage
, *tmp
;
1257 unsigned long buffer_size
;
1263 * Always succeed at resizing a non-existent buffer:
1268 size
= DIV_ROUND_UP(size
, BUF_PAGE_SIZE
);
1269 size
*= BUF_PAGE_SIZE
;
1270 buffer_size
= buffer
->pages
* BUF_PAGE_SIZE
;
1272 /* we need a minimum of two pages */
1273 if (size
< BUF_PAGE_SIZE
* 2)
1274 size
= BUF_PAGE_SIZE
* 2;
1276 if (size
== buffer_size
)
1279 atomic_inc(&buffer
->record_disabled
);
1281 /* Make sure all writers are done with this buffer. */
1282 synchronize_sched();
1284 mutex_lock(&buffer
->mutex
);
1287 nr_pages
= DIV_ROUND_UP(size
, BUF_PAGE_SIZE
);
1289 if (size
< buffer_size
) {
1291 /* easy case, just free pages */
1292 if (RB_WARN_ON(buffer
, nr_pages
>= buffer
->pages
))
1295 rm_pages
= buffer
->pages
- nr_pages
;
1297 for_each_buffer_cpu(buffer
, cpu
) {
1298 cpu_buffer
= buffer
->buffers
[cpu
];
1299 rb_remove_pages(cpu_buffer
, rm_pages
);
1305 * This is a bit more difficult. We only want to add pages
1306 * when we can allocate enough for all CPUs. We do this
1307 * by allocating all the pages and storing them on a local
1308 * link list. If we succeed in our allocation, then we
1309 * add these pages to the cpu_buffers. Otherwise we just free
1310 * them all and return -ENOMEM;
1312 if (RB_WARN_ON(buffer
, nr_pages
<= buffer
->pages
))
1315 new_pages
= nr_pages
- buffer
->pages
;
1317 for_each_buffer_cpu(buffer
, cpu
) {
1318 for (i
= 0; i
< new_pages
; i
++) {
1319 bpage
= kzalloc_node(ALIGN(sizeof(*bpage
),
1321 GFP_KERNEL
, cpu_to_node(cpu
));
1324 list_add(&bpage
->list
, &pages
);
1325 addr
= __get_free_page(GFP_KERNEL
);
1328 bpage
->page
= (void *)addr
;
1329 rb_init_page(bpage
->page
);
1333 for_each_buffer_cpu(buffer
, cpu
) {
1334 cpu_buffer
= buffer
->buffers
[cpu
];
1335 rb_insert_pages(cpu_buffer
, &pages
, new_pages
);
1338 if (RB_WARN_ON(buffer
, !list_empty(&pages
)))
1342 buffer
->pages
= nr_pages
;
1344 mutex_unlock(&buffer
->mutex
);
1346 atomic_dec(&buffer
->record_disabled
);
1351 list_for_each_entry_safe(bpage
, tmp
, &pages
, list
) {
1352 list_del_init(&bpage
->list
);
1353 free_buffer_page(bpage
);
1356 mutex_unlock(&buffer
->mutex
);
1357 atomic_dec(&buffer
->record_disabled
);
1361 * Something went totally wrong, and we are too paranoid
1362 * to even clean up the mess.
1366 mutex_unlock(&buffer
->mutex
);
1367 atomic_dec(&buffer
->record_disabled
);
1370 EXPORT_SYMBOL_GPL(ring_buffer_resize
);
1372 static inline void *
1373 __rb_data_page_index(struct buffer_data_page
*bpage
, unsigned index
)
1375 return bpage
->data
+ index
;
1378 static inline void *__rb_page_index(struct buffer_page
*bpage
, unsigned index
)
1380 return bpage
->page
->data
+ index
;
1383 static inline struct ring_buffer_event
*
1384 rb_reader_event(struct ring_buffer_per_cpu
*cpu_buffer
)
1386 return __rb_page_index(cpu_buffer
->reader_page
,
1387 cpu_buffer
->reader_page
->read
);
1390 static inline struct ring_buffer_event
*
1391 rb_iter_head_event(struct ring_buffer_iter
*iter
)
1393 return __rb_page_index(iter
->head_page
, iter
->head
);
1396 static inline unsigned long rb_page_write(struct buffer_page
*bpage
)
1398 return local_read(&bpage
->write
) & RB_WRITE_MASK
;
1401 static inline unsigned rb_page_commit(struct buffer_page
*bpage
)
1403 return local_read(&bpage
->page
->commit
);
1406 static inline unsigned long rb_page_entries(struct buffer_page
*bpage
)
1408 return local_read(&bpage
->entries
) & RB_WRITE_MASK
;
1411 /* Size is determined by what has been commited */
1412 static inline unsigned rb_page_size(struct buffer_page
*bpage
)
1414 return rb_page_commit(bpage
);
1417 static inline unsigned
1418 rb_commit_index(struct ring_buffer_per_cpu
*cpu_buffer
)
1420 return rb_page_commit(cpu_buffer
->commit_page
);
1423 static inline unsigned
1424 rb_event_index(struct ring_buffer_event
*event
)
1426 unsigned long addr
= (unsigned long)event
;
1428 return (addr
& ~PAGE_MASK
) - BUF_PAGE_HDR_SIZE
;
1432 rb_event_is_commit(struct ring_buffer_per_cpu
*cpu_buffer
,
1433 struct ring_buffer_event
*event
)
1435 unsigned long addr
= (unsigned long)event
;
1436 unsigned long index
;
1438 index
= rb_event_index(event
);
1441 return cpu_buffer
->commit_page
->page
== (void *)addr
&&
1442 rb_commit_index(cpu_buffer
) == index
;
1446 rb_set_commit_to_write(struct ring_buffer_per_cpu
*cpu_buffer
)
1448 unsigned long max_count
;
1451 * We only race with interrupts and NMIs on this CPU.
1452 * If we own the commit event, then we can commit
1453 * all others that interrupted us, since the interruptions
1454 * are in stack format (they finish before they come
1455 * back to us). This allows us to do a simple loop to
1456 * assign the commit to the tail.
1459 max_count
= cpu_buffer
->buffer
->pages
* 100;
1461 while (cpu_buffer
->commit_page
!= cpu_buffer
->tail_page
) {
1462 if (RB_WARN_ON(cpu_buffer
, !(--max_count
)))
1464 if (RB_WARN_ON(cpu_buffer
,
1465 rb_is_reader_page(cpu_buffer
->tail_page
)))
1467 local_set(&cpu_buffer
->commit_page
->page
->commit
,
1468 rb_page_write(cpu_buffer
->commit_page
));
1469 rb_inc_page(cpu_buffer
, &cpu_buffer
->commit_page
);
1470 cpu_buffer
->write_stamp
=
1471 cpu_buffer
->commit_page
->page
->time_stamp
;
1472 /* add barrier to keep gcc from optimizing too much */
1475 while (rb_commit_index(cpu_buffer
) !=
1476 rb_page_write(cpu_buffer
->commit_page
)) {
1478 local_set(&cpu_buffer
->commit_page
->page
->commit
,
1479 rb_page_write(cpu_buffer
->commit_page
));
1480 RB_WARN_ON(cpu_buffer
,
1481 local_read(&cpu_buffer
->commit_page
->page
->commit
) &
1486 /* again, keep gcc from optimizing */
1490 * If an interrupt came in just after the first while loop
1491 * and pushed the tail page forward, we will be left with
1492 * a dangling commit that will never go forward.
1494 if (unlikely(cpu_buffer
->commit_page
!= cpu_buffer
->tail_page
))
1498 static void rb_reset_reader_page(struct ring_buffer_per_cpu
*cpu_buffer
)
1500 cpu_buffer
->read_stamp
= cpu_buffer
->reader_page
->page
->time_stamp
;
1501 cpu_buffer
->reader_page
->read
= 0;
1504 static void rb_inc_iter(struct ring_buffer_iter
*iter
)
1506 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
1509 * The iterator could be on the reader page (it starts there).
1510 * But the head could have moved, since the reader was
1511 * found. Check for this case and assign the iterator
1512 * to the head page instead of next.
1514 if (iter
->head_page
== cpu_buffer
->reader_page
)
1515 iter
->head_page
= rb_set_head_page(cpu_buffer
);
1517 rb_inc_page(cpu_buffer
, &iter
->head_page
);
1519 iter
->read_stamp
= iter
->head_page
->page
->time_stamp
;
1524 * ring_buffer_update_event - update event type and data
1525 * @event: the even to update
1526 * @type: the type of event
1527 * @length: the size of the event field in the ring buffer
1529 * Update the type and data fields of the event. The length
1530 * is the actual size that is written to the ring buffer,
1531 * and with this, we can determine what to place into the
1535 rb_update_event(struct ring_buffer_event
*event
,
1536 unsigned type
, unsigned length
)
1538 event
->type_len
= type
;
1542 case RINGBUF_TYPE_PADDING
:
1543 case RINGBUF_TYPE_TIME_EXTEND
:
1544 case RINGBUF_TYPE_TIME_STAMP
:
1548 length
-= RB_EVNT_HDR_SIZE
;
1549 if (length
> RB_MAX_SMALL_DATA
)
1550 event
->array
[0] = length
;
1552 event
->type_len
= DIV_ROUND_UP(length
, RB_ALIGNMENT
);
1560 * rb_handle_head_page - writer hit the head page
1562 * Returns: +1 to retry page
1567 rb_handle_head_page(struct ring_buffer_per_cpu
*cpu_buffer
,
1568 struct buffer_page
*tail_page
,
1569 struct buffer_page
*next_page
)
1571 struct buffer_page
*new_head
;
1576 entries
= rb_page_entries(next_page
);
1579 * The hard part is here. We need to move the head
1580 * forward, and protect against both readers on
1581 * other CPUs and writers coming in via interrupts.
1583 type
= rb_head_page_set_update(cpu_buffer
, next_page
, tail_page
,
1587 * type can be one of four:
1588 * NORMAL - an interrupt already moved it for us
1589 * HEAD - we are the first to get here.
1590 * UPDATE - we are the interrupt interrupting
1592 * MOVED - a reader on another CPU moved the next
1593 * pointer to its reader page. Give up
1600 * We changed the head to UPDATE, thus
1601 * it is our responsibility to update
1604 local_add(entries
, &cpu_buffer
->overrun
);
1607 * The entries will be zeroed out when we move the
1611 /* still more to do */
1614 case RB_PAGE_UPDATE
:
1616 * This is an interrupt that interrupt the
1617 * previous update. Still more to do.
1620 case RB_PAGE_NORMAL
:
1622 * An interrupt came in before the update
1623 * and processed this for us.
1624 * Nothing left to do.
1629 * The reader is on another CPU and just did
1630 * a swap with our next_page.
1635 RB_WARN_ON(cpu_buffer
, 1); /* WTF??? */
1640 * Now that we are here, the old head pointer is
1641 * set to UPDATE. This will keep the reader from
1642 * swapping the head page with the reader page.
1643 * The reader (on another CPU) will spin till
1646 * We just need to protect against interrupts
1647 * doing the job. We will set the next pointer
1648 * to HEAD. After that, we set the old pointer
1649 * to NORMAL, but only if it was HEAD before.
1650 * otherwise we are an interrupt, and only
1651 * want the outer most commit to reset it.
1653 new_head
= next_page
;
1654 rb_inc_page(cpu_buffer
, &new_head
);
1656 ret
= rb_head_page_set_head(cpu_buffer
, new_head
, next_page
,
1660 * Valid returns are:
1661 * HEAD - an interrupt came in and already set it.
1662 * NORMAL - One of two things:
1663 * 1) We really set it.
1664 * 2) A bunch of interrupts came in and moved
1665 * the page forward again.
1669 case RB_PAGE_NORMAL
:
1673 RB_WARN_ON(cpu_buffer
, 1);
1678 * It is possible that an interrupt came in,
1679 * set the head up, then more interrupts came in
1680 * and moved it again. When we get back here,
1681 * the page would have been set to NORMAL but we
1682 * just set it back to HEAD.
1684 * How do you detect this? Well, if that happened
1685 * the tail page would have moved.
1687 if (ret
== RB_PAGE_NORMAL
) {
1689 * If the tail had moved passed next, then we need
1690 * to reset the pointer.
1692 if (cpu_buffer
->tail_page
!= tail_page
&&
1693 cpu_buffer
->tail_page
!= next_page
)
1694 rb_head_page_set_normal(cpu_buffer
, new_head
,
1700 * If this was the outer most commit (the one that
1701 * changed the original pointer from HEAD to UPDATE),
1702 * then it is up to us to reset it to NORMAL.
1704 if (type
== RB_PAGE_HEAD
) {
1705 ret
= rb_head_page_set_normal(cpu_buffer
, next_page
,
1708 if (RB_WARN_ON(cpu_buffer
,
1709 ret
!= RB_PAGE_UPDATE
))
1716 static unsigned rb_calculate_event_length(unsigned length
)
1718 struct ring_buffer_event event
; /* Used only for sizeof array */
1720 /* zero length can cause confusions */
1724 if (length
> RB_MAX_SMALL_DATA
)
1725 length
+= sizeof(event
.array
[0]);
1727 length
+= RB_EVNT_HDR_SIZE
;
1728 length
= ALIGN(length
, RB_ALIGNMENT
);
1734 rb_reset_tail(struct ring_buffer_per_cpu
*cpu_buffer
,
1735 struct buffer_page
*tail_page
,
1736 unsigned long tail
, unsigned long length
)
1738 struct ring_buffer_event
*event
;
1741 * Only the event that crossed the page boundary
1742 * must fill the old tail_page with padding.
1744 if (tail
>= BUF_PAGE_SIZE
) {
1745 local_sub(length
, &tail_page
->write
);
1749 event
= __rb_page_index(tail_page
, tail
);
1750 kmemcheck_annotate_bitfield(event
, bitfield
);
1753 * If this event is bigger than the minimum size, then
1754 * we need to be careful that we don't subtract the
1755 * write counter enough to allow another writer to slip
1757 * We put in a discarded commit instead, to make sure
1758 * that this space is not used again.
1760 * If we are less than the minimum size, we don't need to
1763 if (tail
> (BUF_PAGE_SIZE
- RB_EVNT_MIN_SIZE
)) {
1764 /* No room for any events */
1766 /* Mark the rest of the page with padding */
1767 rb_event_set_padding(event
);
1769 /* Set the write back to the previous setting */
1770 local_sub(length
, &tail_page
->write
);
1774 /* Put in a discarded event */
1775 event
->array
[0] = (BUF_PAGE_SIZE
- tail
) - RB_EVNT_HDR_SIZE
;
1776 event
->type_len
= RINGBUF_TYPE_PADDING
;
1777 /* time delta must be non zero */
1778 event
->time_delta
= 1;
1780 /* Set write to end of buffer */
1781 length
= (tail
+ length
) - BUF_PAGE_SIZE
;
1782 local_sub(length
, &tail_page
->write
);
1785 static struct ring_buffer_event
*
1786 rb_move_tail(struct ring_buffer_per_cpu
*cpu_buffer
,
1787 unsigned long length
, unsigned long tail
,
1788 struct buffer_page
*tail_page
, u64
*ts
)
1790 struct buffer_page
*commit_page
= cpu_buffer
->commit_page
;
1791 struct ring_buffer
*buffer
= cpu_buffer
->buffer
;
1792 struct buffer_page
*next_page
;
1795 next_page
= tail_page
;
1797 rb_inc_page(cpu_buffer
, &next_page
);
1800 * If for some reason, we had an interrupt storm that made
1801 * it all the way around the buffer, bail, and warn
1804 if (unlikely(next_page
== commit_page
)) {
1805 local_inc(&cpu_buffer
->commit_overrun
);
1810 * This is where the fun begins!
1812 * We are fighting against races between a reader that
1813 * could be on another CPU trying to swap its reader
1814 * page with the buffer head.
1816 * We are also fighting against interrupts coming in and
1817 * moving the head or tail on us as well.
1819 * If the next page is the head page then we have filled
1820 * the buffer, unless the commit page is still on the
1823 if (rb_is_head_page(cpu_buffer
, next_page
, &tail_page
->list
)) {
1826 * If the commit is not on the reader page, then
1827 * move the header page.
1829 if (!rb_is_reader_page(cpu_buffer
->commit_page
)) {
1831 * If we are not in overwrite mode,
1832 * this is easy, just stop here.
1834 if (!(buffer
->flags
& RB_FL_OVERWRITE
))
1837 ret
= rb_handle_head_page(cpu_buffer
,
1846 * We need to be careful here too. The
1847 * commit page could still be on the reader
1848 * page. We could have a small buffer, and
1849 * have filled up the buffer with events
1850 * from interrupts and such, and wrapped.
1852 * Note, if the tail page is also the on the
1853 * reader_page, we let it move out.
1855 if (unlikely((cpu_buffer
->commit_page
!=
1856 cpu_buffer
->tail_page
) &&
1857 (cpu_buffer
->commit_page
==
1858 cpu_buffer
->reader_page
))) {
1859 local_inc(&cpu_buffer
->commit_overrun
);
1865 ret
= rb_tail_page_update(cpu_buffer
, tail_page
, next_page
);
1868 * Nested commits always have zero deltas, so
1869 * just reread the time stamp
1871 *ts
= rb_time_stamp(buffer
);
1872 next_page
->page
->time_stamp
= *ts
;
1877 rb_reset_tail(cpu_buffer
, tail_page
, tail
, length
);
1879 /* fail and let the caller try again */
1880 return ERR_PTR(-EAGAIN
);
1884 rb_reset_tail(cpu_buffer
, tail_page
, tail
, length
);
1889 static struct ring_buffer_event
*
1890 __rb_reserve_next(struct ring_buffer_per_cpu
*cpu_buffer
,
1891 unsigned type
, unsigned long length
, u64
*ts
)
1893 struct buffer_page
*tail_page
;
1894 struct ring_buffer_event
*event
;
1895 unsigned long tail
, write
;
1897 tail_page
= cpu_buffer
->tail_page
;
1898 write
= local_add_return(length
, &tail_page
->write
);
1900 /* set write to only the index of the write */
1901 write
&= RB_WRITE_MASK
;
1902 tail
= write
- length
;
1904 /* See if we shot pass the end of this buffer page */
1905 if (write
> BUF_PAGE_SIZE
)
1906 return rb_move_tail(cpu_buffer
, length
, tail
,
1909 /* We reserved something on the buffer */
1911 event
= __rb_page_index(tail_page
, tail
);
1912 kmemcheck_annotate_bitfield(event
, bitfield
);
1913 rb_update_event(event
, type
, length
);
1915 /* The passed in type is zero for DATA */
1917 local_inc(&tail_page
->entries
);
1920 * If this is the first commit on the page, then update
1924 tail_page
->page
->time_stamp
= *ts
;
1930 rb_try_to_discard(struct ring_buffer_per_cpu
*cpu_buffer
,
1931 struct ring_buffer_event
*event
)
1933 unsigned long new_index
, old_index
;
1934 struct buffer_page
*bpage
;
1935 unsigned long index
;
1938 new_index
= rb_event_index(event
);
1939 old_index
= new_index
+ rb_event_length(event
);
1940 addr
= (unsigned long)event
;
1943 bpage
= cpu_buffer
->tail_page
;
1945 if (bpage
->page
== (void *)addr
&& rb_page_write(bpage
) == old_index
) {
1946 unsigned long write_mask
=
1947 local_read(&bpage
->write
) & ~RB_WRITE_MASK
;
1949 * This is on the tail page. It is possible that
1950 * a write could come in and move the tail page
1951 * and write to the next page. That is fine
1952 * because we just shorten what is on this page.
1954 old_index
+= write_mask
;
1955 new_index
+= write_mask
;
1956 index
= local_cmpxchg(&bpage
->write
, old_index
, new_index
);
1957 if (index
== old_index
)
1961 /* could not discard */
1966 rb_add_time_stamp(struct ring_buffer_per_cpu
*cpu_buffer
,
1967 u64
*ts
, u64
*delta
)
1969 struct ring_buffer_event
*event
;
1973 if (unlikely(*delta
> (1ULL << 59) && !once
++)) {
1974 printk(KERN_WARNING
"Delta way too big! %llu"
1975 " ts=%llu write stamp = %llu\n",
1976 (unsigned long long)*delta
,
1977 (unsigned long long)*ts
,
1978 (unsigned long long)cpu_buffer
->write_stamp
);
1983 * The delta is too big, we to add a
1986 event
= __rb_reserve_next(cpu_buffer
,
1987 RINGBUF_TYPE_TIME_EXTEND
,
1993 if (PTR_ERR(event
) == -EAGAIN
)
1996 /* Only a commited time event can update the write stamp */
1997 if (rb_event_is_commit(cpu_buffer
, event
)) {
1999 * If this is the first on the page, then it was
2000 * updated with the page itself. Try to discard it
2001 * and if we can't just make it zero.
2003 if (rb_event_index(event
)) {
2004 event
->time_delta
= *delta
& TS_MASK
;
2005 event
->array
[0] = *delta
>> TS_SHIFT
;
2007 /* try to discard, since we do not need this */
2008 if (!rb_try_to_discard(cpu_buffer
, event
)) {
2009 /* nope, just zero it */
2010 event
->time_delta
= 0;
2011 event
->array
[0] = 0;
2014 cpu_buffer
->write_stamp
= *ts
;
2015 /* let the caller know this was the commit */
2018 /* Try to discard the event */
2019 if (!rb_try_to_discard(cpu_buffer
, event
)) {
2020 /* Darn, this is just wasted space */
2021 event
->time_delta
= 0;
2022 event
->array
[0] = 0;
2032 static void rb_start_commit(struct ring_buffer_per_cpu
*cpu_buffer
)
2034 local_inc(&cpu_buffer
->committing
);
2035 local_inc(&cpu_buffer
->commits
);
2038 static void rb_end_commit(struct ring_buffer_per_cpu
*cpu_buffer
)
2040 unsigned long commits
;
2042 if (RB_WARN_ON(cpu_buffer
,
2043 !local_read(&cpu_buffer
->committing
)))
2047 commits
= local_read(&cpu_buffer
->commits
);
2048 /* synchronize with interrupts */
2050 if (local_read(&cpu_buffer
->committing
) == 1)
2051 rb_set_commit_to_write(cpu_buffer
);
2053 local_dec(&cpu_buffer
->committing
);
2055 /* synchronize with interrupts */
2059 * Need to account for interrupts coming in between the
2060 * updating of the commit page and the clearing of the
2061 * committing counter.
2063 if (unlikely(local_read(&cpu_buffer
->commits
) != commits
) &&
2064 !local_read(&cpu_buffer
->committing
)) {
2065 local_inc(&cpu_buffer
->committing
);
2070 static struct ring_buffer_event
*
2071 rb_reserve_next_event(struct ring_buffer
*buffer
,
2072 struct ring_buffer_per_cpu
*cpu_buffer
,
2073 unsigned long length
)
2075 struct ring_buffer_event
*event
;
2080 rb_start_commit(cpu_buffer
);
2082 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2084 * Due to the ability to swap a cpu buffer from a buffer
2085 * it is possible it was swapped before we committed.
2086 * (committing stops a swap). We check for it here and
2087 * if it happened, we have to fail the write.
2090 if (unlikely(ACCESS_ONCE(cpu_buffer
->buffer
) != buffer
)) {
2091 local_dec(&cpu_buffer
->committing
);
2092 local_dec(&cpu_buffer
->commits
);
2097 length
= rb_calculate_event_length(length
);
2100 * We allow for interrupts to reenter here and do a trace.
2101 * If one does, it will cause this original code to loop
2102 * back here. Even with heavy interrupts happening, this
2103 * should only happen a few times in a row. If this happens
2104 * 1000 times in a row, there must be either an interrupt
2105 * storm or we have something buggy.
2108 if (RB_WARN_ON(cpu_buffer
, ++nr_loops
> 1000))
2111 ts
= rb_time_stamp(cpu_buffer
->buffer
);
2114 * Only the first commit can update the timestamp.
2115 * Yes there is a race here. If an interrupt comes in
2116 * just after the conditional and it traces too, then it
2117 * will also check the deltas. More than one timestamp may
2118 * also be made. But only the entry that did the actual
2119 * commit will be something other than zero.
2121 if (likely(cpu_buffer
->tail_page
== cpu_buffer
->commit_page
&&
2122 rb_page_write(cpu_buffer
->tail_page
) ==
2123 rb_commit_index(cpu_buffer
))) {
2126 diff
= ts
- cpu_buffer
->write_stamp
;
2128 /* make sure this diff is calculated here */
2131 /* Did the write stamp get updated already? */
2132 if (unlikely(ts
< cpu_buffer
->write_stamp
))
2136 if (unlikely(test_time_stamp(delta
))) {
2138 commit
= rb_add_time_stamp(cpu_buffer
, &ts
, &delta
);
2139 if (commit
== -EBUSY
)
2142 if (commit
== -EAGAIN
)
2145 RB_WARN_ON(cpu_buffer
, commit
< 0);
2150 event
= __rb_reserve_next(cpu_buffer
, 0, length
, &ts
);
2151 if (unlikely(PTR_ERR(event
) == -EAGAIN
))
2157 if (!rb_event_is_commit(cpu_buffer
, event
))
2160 event
->time_delta
= delta
;
2165 rb_end_commit(cpu_buffer
);
2169 #ifdef CONFIG_TRACING
2171 #define TRACE_RECURSIVE_DEPTH 16
2173 static int trace_recursive_lock(void)
2175 current
->trace_recursion
++;
2177 if (likely(current
->trace_recursion
< TRACE_RECURSIVE_DEPTH
))
2180 /* Disable all tracing before we do anything else */
2181 tracing_off_permanent();
2183 printk_once(KERN_WARNING
"Tracing recursion: depth[%ld]:"
2184 "HC[%lu]:SC[%lu]:NMI[%lu]\n",
2185 current
->trace_recursion
,
2186 hardirq_count() >> HARDIRQ_SHIFT
,
2187 softirq_count() >> SOFTIRQ_SHIFT
,
2194 static void trace_recursive_unlock(void)
2196 WARN_ON_ONCE(!current
->trace_recursion
);
2198 current
->trace_recursion
--;
2203 #define trace_recursive_lock() (0)
2204 #define trace_recursive_unlock() do { } while (0)
2208 static DEFINE_PER_CPU(int, rb_need_resched
);
2211 * ring_buffer_lock_reserve - reserve a part of the buffer
2212 * @buffer: the ring buffer to reserve from
2213 * @length: the length of the data to reserve (excluding event header)
2215 * Returns a reseverd event on the ring buffer to copy directly to.
2216 * The user of this interface will need to get the body to write into
2217 * and can use the ring_buffer_event_data() interface.
2219 * The length is the length of the data needed, not the event length
2220 * which also includes the event header.
2222 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2223 * If NULL is returned, then nothing has been allocated or locked.
2225 struct ring_buffer_event
*
2226 ring_buffer_lock_reserve(struct ring_buffer
*buffer
, unsigned long length
)
2228 struct ring_buffer_per_cpu
*cpu_buffer
;
2229 struct ring_buffer_event
*event
;
2232 if (ring_buffer_flags
!= RB_BUFFERS_ON
)
2235 if (atomic_read(&buffer
->record_disabled
))
2238 /* If we are tracing schedule, we don't want to recurse */
2239 resched
= ftrace_preempt_disable();
2241 if (trace_recursive_lock())
2244 cpu
= raw_smp_processor_id();
2246 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
2249 cpu_buffer
= buffer
->buffers
[cpu
];
2251 if (atomic_read(&cpu_buffer
->record_disabled
))
2254 if (length
> BUF_MAX_DATA_SIZE
)
2257 event
= rb_reserve_next_event(buffer
, cpu_buffer
, length
);
2262 * Need to store resched state on this cpu.
2263 * Only the first needs to.
2266 if (preempt_count() == 1)
2267 per_cpu(rb_need_resched
, cpu
) = resched
;
2272 trace_recursive_unlock();
2275 ftrace_preempt_enable(resched
);
2278 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve
);
2281 rb_update_write_stamp(struct ring_buffer_per_cpu
*cpu_buffer
,
2282 struct ring_buffer_event
*event
)
2285 * The event first in the commit queue updates the
2288 if (rb_event_is_commit(cpu_buffer
, event
))
2289 cpu_buffer
->write_stamp
+= event
->time_delta
;
2292 static void rb_commit(struct ring_buffer_per_cpu
*cpu_buffer
,
2293 struct ring_buffer_event
*event
)
2295 local_inc(&cpu_buffer
->entries
);
2296 rb_update_write_stamp(cpu_buffer
, event
);
2297 rb_end_commit(cpu_buffer
);
2301 * ring_buffer_unlock_commit - commit a reserved
2302 * @buffer: The buffer to commit to
2303 * @event: The event pointer to commit.
2305 * This commits the data to the ring buffer, and releases any locks held.
2307 * Must be paired with ring_buffer_lock_reserve.
2309 int ring_buffer_unlock_commit(struct ring_buffer
*buffer
,
2310 struct ring_buffer_event
*event
)
2312 struct ring_buffer_per_cpu
*cpu_buffer
;
2313 int cpu
= raw_smp_processor_id();
2315 cpu_buffer
= buffer
->buffers
[cpu
];
2317 rb_commit(cpu_buffer
, event
);
2319 trace_recursive_unlock();
2322 * Only the last preempt count needs to restore preemption.
2324 if (preempt_count() == 1)
2325 ftrace_preempt_enable(per_cpu(rb_need_resched
, cpu
));
2327 preempt_enable_no_resched_notrace();
2331 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit
);
2333 static inline void rb_event_discard(struct ring_buffer_event
*event
)
2335 /* array[0] holds the actual length for the discarded event */
2336 event
->array
[0] = rb_event_data_length(event
) - RB_EVNT_HDR_SIZE
;
2337 event
->type_len
= RINGBUF_TYPE_PADDING
;
2338 /* time delta must be non zero */
2339 if (!event
->time_delta
)
2340 event
->time_delta
= 1;
2344 * Decrement the entries to the page that an event is on.
2345 * The event does not even need to exist, only the pointer
2346 * to the page it is on. This may only be called before the commit
2350 rb_decrement_entry(struct ring_buffer_per_cpu
*cpu_buffer
,
2351 struct ring_buffer_event
*event
)
2353 unsigned long addr
= (unsigned long)event
;
2354 struct buffer_page
*bpage
= cpu_buffer
->commit_page
;
2355 struct buffer_page
*start
;
2359 /* Do the likely case first */
2360 if (likely(bpage
->page
== (void *)addr
)) {
2361 local_dec(&bpage
->entries
);
2366 * Because the commit page may be on the reader page we
2367 * start with the next page and check the end loop there.
2369 rb_inc_page(cpu_buffer
, &bpage
);
2372 if (bpage
->page
== (void *)addr
) {
2373 local_dec(&bpage
->entries
);
2376 rb_inc_page(cpu_buffer
, &bpage
);
2377 } while (bpage
!= start
);
2379 /* commit not part of this buffer?? */
2380 RB_WARN_ON(cpu_buffer
, 1);
2384 * ring_buffer_commit_discard - discard an event that has not been committed
2385 * @buffer: the ring buffer
2386 * @event: non committed event to discard
2388 * Sometimes an event that is in the ring buffer needs to be ignored.
2389 * This function lets the user discard an event in the ring buffer
2390 * and then that event will not be read later.
2392 * This function only works if it is called before the the item has been
2393 * committed. It will try to free the event from the ring buffer
2394 * if another event has not been added behind it.
2396 * If another event has been added behind it, it will set the event
2397 * up as discarded, and perform the commit.
2399 * If this function is called, do not call ring_buffer_unlock_commit on
2402 void ring_buffer_discard_commit(struct ring_buffer
*buffer
,
2403 struct ring_buffer_event
*event
)
2405 struct ring_buffer_per_cpu
*cpu_buffer
;
2408 /* The event is discarded regardless */
2409 rb_event_discard(event
);
2411 cpu
= smp_processor_id();
2412 cpu_buffer
= buffer
->buffers
[cpu
];
2415 * This must only be called if the event has not been
2416 * committed yet. Thus we can assume that preemption
2417 * is still disabled.
2419 RB_WARN_ON(buffer
, !local_read(&cpu_buffer
->committing
));
2421 rb_decrement_entry(cpu_buffer
, event
);
2422 if (rb_try_to_discard(cpu_buffer
, event
))
2426 * The commit is still visible by the reader, so we
2427 * must still update the timestamp.
2429 rb_update_write_stamp(cpu_buffer
, event
);
2431 rb_end_commit(cpu_buffer
);
2433 trace_recursive_unlock();
2436 * Only the last preempt count needs to restore preemption.
2438 if (preempt_count() == 1)
2439 ftrace_preempt_enable(per_cpu(rb_need_resched
, cpu
));
2441 preempt_enable_no_resched_notrace();
2444 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit
);
2447 * ring_buffer_write - write data to the buffer without reserving
2448 * @buffer: The ring buffer to write to.
2449 * @length: The length of the data being written (excluding the event header)
2450 * @data: The data to write to the buffer.
2452 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2453 * one function. If you already have the data to write to the buffer, it
2454 * may be easier to simply call this function.
2456 * Note, like ring_buffer_lock_reserve, the length is the length of the data
2457 * and not the length of the event which would hold the header.
2459 int ring_buffer_write(struct ring_buffer
*buffer
,
2460 unsigned long length
,
2463 struct ring_buffer_per_cpu
*cpu_buffer
;
2464 struct ring_buffer_event
*event
;
2469 if (ring_buffer_flags
!= RB_BUFFERS_ON
)
2472 if (atomic_read(&buffer
->record_disabled
))
2475 resched
= ftrace_preempt_disable();
2477 cpu
= raw_smp_processor_id();
2479 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
2482 cpu_buffer
= buffer
->buffers
[cpu
];
2484 if (atomic_read(&cpu_buffer
->record_disabled
))
2487 if (length
> BUF_MAX_DATA_SIZE
)
2490 event
= rb_reserve_next_event(buffer
, cpu_buffer
, length
);
2494 body
= rb_event_data(event
);
2496 memcpy(body
, data
, length
);
2498 rb_commit(cpu_buffer
, event
);
2502 ftrace_preempt_enable(resched
);
2506 EXPORT_SYMBOL_GPL(ring_buffer_write
);
2508 static int rb_per_cpu_empty(struct ring_buffer_per_cpu
*cpu_buffer
)
2510 struct buffer_page
*reader
= cpu_buffer
->reader_page
;
2511 struct buffer_page
*head
= rb_set_head_page(cpu_buffer
);
2512 struct buffer_page
*commit
= cpu_buffer
->commit_page
;
2514 /* In case of error, head will be NULL */
2515 if (unlikely(!head
))
2518 return reader
->read
== rb_page_commit(reader
) &&
2519 (commit
== reader
||
2521 head
->read
== rb_page_commit(commit
)));
2525 * ring_buffer_record_disable - stop all writes into the buffer
2526 * @buffer: The ring buffer to stop writes to.
2528 * This prevents all writes to the buffer. Any attempt to write
2529 * to the buffer after this will fail and return NULL.
2531 * The caller should call synchronize_sched() after this.
2533 void ring_buffer_record_disable(struct ring_buffer
*buffer
)
2535 atomic_inc(&buffer
->record_disabled
);
2537 EXPORT_SYMBOL_GPL(ring_buffer_record_disable
);
2540 * ring_buffer_record_enable - enable writes to the buffer
2541 * @buffer: The ring buffer to enable writes
2543 * Note, multiple disables will need the same number of enables
2544 * to truely enable the writing (much like preempt_disable).
2546 void ring_buffer_record_enable(struct ring_buffer
*buffer
)
2548 atomic_dec(&buffer
->record_disabled
);
2550 EXPORT_SYMBOL_GPL(ring_buffer_record_enable
);
2553 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
2554 * @buffer: The ring buffer to stop writes to.
2555 * @cpu: The CPU buffer to stop
2557 * This prevents all writes to the buffer. Any attempt to write
2558 * to the buffer after this will fail and return NULL.
2560 * The caller should call synchronize_sched() after this.
2562 void ring_buffer_record_disable_cpu(struct ring_buffer
*buffer
, int cpu
)
2564 struct ring_buffer_per_cpu
*cpu_buffer
;
2566 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
2569 cpu_buffer
= buffer
->buffers
[cpu
];
2570 atomic_inc(&cpu_buffer
->record_disabled
);
2572 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu
);
2575 * ring_buffer_record_enable_cpu - enable writes to the buffer
2576 * @buffer: The ring buffer to enable writes
2577 * @cpu: The CPU to enable.
2579 * Note, multiple disables will need the same number of enables
2580 * to truely enable the writing (much like preempt_disable).
2582 void ring_buffer_record_enable_cpu(struct ring_buffer
*buffer
, int cpu
)
2584 struct ring_buffer_per_cpu
*cpu_buffer
;
2586 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
2589 cpu_buffer
= buffer
->buffers
[cpu
];
2590 atomic_dec(&cpu_buffer
->record_disabled
);
2592 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu
);
2595 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
2596 * @buffer: The ring buffer
2597 * @cpu: The per CPU buffer to get the entries from.
2599 unsigned long ring_buffer_entries_cpu(struct ring_buffer
*buffer
, int cpu
)
2601 struct ring_buffer_per_cpu
*cpu_buffer
;
2604 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
2607 cpu_buffer
= buffer
->buffers
[cpu
];
2608 ret
= (local_read(&cpu_buffer
->entries
) - local_read(&cpu_buffer
->overrun
))
2613 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu
);
2616 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
2617 * @buffer: The ring buffer
2618 * @cpu: The per CPU buffer to get the number of overruns from
2620 unsigned long ring_buffer_overrun_cpu(struct ring_buffer
*buffer
, int cpu
)
2622 struct ring_buffer_per_cpu
*cpu_buffer
;
2625 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
2628 cpu_buffer
= buffer
->buffers
[cpu
];
2629 ret
= local_read(&cpu_buffer
->overrun
);
2633 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu
);
2636 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
2637 * @buffer: The ring buffer
2638 * @cpu: The per CPU buffer to get the number of overruns from
2641 ring_buffer_commit_overrun_cpu(struct ring_buffer
*buffer
, int cpu
)
2643 struct ring_buffer_per_cpu
*cpu_buffer
;
2646 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
2649 cpu_buffer
= buffer
->buffers
[cpu
];
2650 ret
= local_read(&cpu_buffer
->commit_overrun
);
2654 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu
);
2657 * ring_buffer_entries - get the number of entries in a buffer
2658 * @buffer: The ring buffer
2660 * Returns the total number of entries in the ring buffer
2663 unsigned long ring_buffer_entries(struct ring_buffer
*buffer
)
2665 struct ring_buffer_per_cpu
*cpu_buffer
;
2666 unsigned long entries
= 0;
2669 /* if you care about this being correct, lock the buffer */
2670 for_each_buffer_cpu(buffer
, cpu
) {
2671 cpu_buffer
= buffer
->buffers
[cpu
];
2672 entries
+= (local_read(&cpu_buffer
->entries
) -
2673 local_read(&cpu_buffer
->overrun
)) - cpu_buffer
->read
;
2678 EXPORT_SYMBOL_GPL(ring_buffer_entries
);
2681 * ring_buffer_overruns - get the number of overruns in buffer
2682 * @buffer: The ring buffer
2684 * Returns the total number of overruns in the ring buffer
2687 unsigned long ring_buffer_overruns(struct ring_buffer
*buffer
)
2689 struct ring_buffer_per_cpu
*cpu_buffer
;
2690 unsigned long overruns
= 0;
2693 /* if you care about this being correct, lock the buffer */
2694 for_each_buffer_cpu(buffer
, cpu
) {
2695 cpu_buffer
= buffer
->buffers
[cpu
];
2696 overruns
+= local_read(&cpu_buffer
->overrun
);
2701 EXPORT_SYMBOL_GPL(ring_buffer_overruns
);
2703 static void rb_iter_reset(struct ring_buffer_iter
*iter
)
2705 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
2707 /* Iterator usage is expected to have record disabled */
2708 if (list_empty(&cpu_buffer
->reader_page
->list
)) {
2709 iter
->head_page
= rb_set_head_page(cpu_buffer
);
2710 if (unlikely(!iter
->head_page
))
2712 iter
->head
= iter
->head_page
->read
;
2714 iter
->head_page
= cpu_buffer
->reader_page
;
2715 iter
->head
= cpu_buffer
->reader_page
->read
;
2718 iter
->read_stamp
= cpu_buffer
->read_stamp
;
2720 iter
->read_stamp
= iter
->head_page
->page
->time_stamp
;
2721 iter
->cache_reader_page
= cpu_buffer
->reader_page
;
2722 iter
->cache_read
= cpu_buffer
->read
;
2726 * ring_buffer_iter_reset - reset an iterator
2727 * @iter: The iterator to reset
2729 * Resets the iterator, so that it will start from the beginning
2732 void ring_buffer_iter_reset(struct ring_buffer_iter
*iter
)
2734 struct ring_buffer_per_cpu
*cpu_buffer
;
2735 unsigned long flags
;
2740 cpu_buffer
= iter
->cpu_buffer
;
2742 spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
2743 rb_iter_reset(iter
);
2744 spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
2746 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset
);
2749 * ring_buffer_iter_empty - check if an iterator has no more to read
2750 * @iter: The iterator to check
2752 int ring_buffer_iter_empty(struct ring_buffer_iter
*iter
)
2754 struct ring_buffer_per_cpu
*cpu_buffer
;
2756 cpu_buffer
= iter
->cpu_buffer
;
2758 return iter
->head_page
== cpu_buffer
->commit_page
&&
2759 iter
->head
== rb_commit_index(cpu_buffer
);
2761 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty
);
2764 rb_update_read_stamp(struct ring_buffer_per_cpu
*cpu_buffer
,
2765 struct ring_buffer_event
*event
)
2769 switch (event
->type_len
) {
2770 case RINGBUF_TYPE_PADDING
:
2773 case RINGBUF_TYPE_TIME_EXTEND
:
2774 delta
= event
->array
[0];
2776 delta
+= event
->time_delta
;
2777 cpu_buffer
->read_stamp
+= delta
;
2780 case RINGBUF_TYPE_TIME_STAMP
:
2781 /* FIXME: not implemented */
2784 case RINGBUF_TYPE_DATA
:
2785 cpu_buffer
->read_stamp
+= event
->time_delta
;
2795 rb_update_iter_read_stamp(struct ring_buffer_iter
*iter
,
2796 struct ring_buffer_event
*event
)
2800 switch (event
->type_len
) {
2801 case RINGBUF_TYPE_PADDING
:
2804 case RINGBUF_TYPE_TIME_EXTEND
:
2805 delta
= event
->array
[0];
2807 delta
+= event
->time_delta
;
2808 iter
->read_stamp
+= delta
;
2811 case RINGBUF_TYPE_TIME_STAMP
:
2812 /* FIXME: not implemented */
2815 case RINGBUF_TYPE_DATA
:
2816 iter
->read_stamp
+= event
->time_delta
;
2825 static struct buffer_page
*
2826 rb_get_reader_page(struct ring_buffer_per_cpu
*cpu_buffer
)
2828 struct buffer_page
*reader
= NULL
;
2829 unsigned long flags
;
2833 local_irq_save(flags
);
2834 arch_spin_lock(&cpu_buffer
->lock
);
2838 * This should normally only loop twice. But because the
2839 * start of the reader inserts an empty page, it causes
2840 * a case where we will loop three times. There should be no
2841 * reason to loop four times (that I know of).
2843 if (RB_WARN_ON(cpu_buffer
, ++nr_loops
> 3)) {
2848 reader
= cpu_buffer
->reader_page
;
2850 /* If there's more to read, return this page */
2851 if (cpu_buffer
->reader_page
->read
< rb_page_size(reader
))
2854 /* Never should we have an index greater than the size */
2855 if (RB_WARN_ON(cpu_buffer
,
2856 cpu_buffer
->reader_page
->read
> rb_page_size(reader
)))
2859 /* check if we caught up to the tail */
2861 if (cpu_buffer
->commit_page
== cpu_buffer
->reader_page
)
2865 * Reset the reader page to size zero.
2867 local_set(&cpu_buffer
->reader_page
->write
, 0);
2868 local_set(&cpu_buffer
->reader_page
->entries
, 0);
2869 local_set(&cpu_buffer
->reader_page
->page
->commit
, 0);
2873 * Splice the empty reader page into the list around the head.
2875 reader
= rb_set_head_page(cpu_buffer
);
2876 cpu_buffer
->reader_page
->list
.next
= rb_list_head(reader
->list
.next
);
2877 cpu_buffer
->reader_page
->list
.prev
= reader
->list
.prev
;
2880 * cpu_buffer->pages just needs to point to the buffer, it
2881 * has no specific buffer page to point to. Lets move it out
2882 * of our way so we don't accidently swap it.
2884 cpu_buffer
->pages
= reader
->list
.prev
;
2886 /* The reader page will be pointing to the new head */
2887 rb_set_list_to_head(cpu_buffer
, &cpu_buffer
->reader_page
->list
);
2890 * Here's the tricky part.
2892 * We need to move the pointer past the header page.
2893 * But we can only do that if a writer is not currently
2894 * moving it. The page before the header page has the
2895 * flag bit '1' set if it is pointing to the page we want.
2896 * but if the writer is in the process of moving it
2897 * than it will be '2' or already moved '0'.
2900 ret
= rb_head_page_replace(reader
, cpu_buffer
->reader_page
);
2903 * If we did not convert it, then we must try again.
2909 * Yeah! We succeeded in replacing the page.
2911 * Now make the new head point back to the reader page.
2913 rb_list_head(reader
->list
.next
)->prev
= &cpu_buffer
->reader_page
->list
;
2914 rb_inc_page(cpu_buffer
, &cpu_buffer
->head_page
);
2916 /* Finally update the reader page to the new head */
2917 cpu_buffer
->reader_page
= reader
;
2918 rb_reset_reader_page(cpu_buffer
);
2923 arch_spin_unlock(&cpu_buffer
->lock
);
2924 local_irq_restore(flags
);
2929 static void rb_advance_reader(struct ring_buffer_per_cpu
*cpu_buffer
)
2931 struct ring_buffer_event
*event
;
2932 struct buffer_page
*reader
;
2935 reader
= rb_get_reader_page(cpu_buffer
);
2937 /* This function should not be called when buffer is empty */
2938 if (RB_WARN_ON(cpu_buffer
, !reader
))
2941 event
= rb_reader_event(cpu_buffer
);
2943 if (event
->type_len
<= RINGBUF_TYPE_DATA_TYPE_LEN_MAX
)
2946 rb_update_read_stamp(cpu_buffer
, event
);
2948 length
= rb_event_length(event
);
2949 cpu_buffer
->reader_page
->read
+= length
;
2952 static void rb_advance_iter(struct ring_buffer_iter
*iter
)
2954 struct ring_buffer
*buffer
;
2955 struct ring_buffer_per_cpu
*cpu_buffer
;
2956 struct ring_buffer_event
*event
;
2959 cpu_buffer
= iter
->cpu_buffer
;
2960 buffer
= cpu_buffer
->buffer
;
2963 * Check if we are at the end of the buffer.
2965 if (iter
->head
>= rb_page_size(iter
->head_page
)) {
2966 /* discarded commits can make the page empty */
2967 if (iter
->head_page
== cpu_buffer
->commit_page
)
2973 event
= rb_iter_head_event(iter
);
2975 length
= rb_event_length(event
);
2978 * This should not be called to advance the header if we are
2979 * at the tail of the buffer.
2981 if (RB_WARN_ON(cpu_buffer
,
2982 (iter
->head_page
== cpu_buffer
->commit_page
) &&
2983 (iter
->head
+ length
> rb_commit_index(cpu_buffer
))))
2986 rb_update_iter_read_stamp(iter
, event
);
2988 iter
->head
+= length
;
2990 /* check for end of page padding */
2991 if ((iter
->head
>= rb_page_size(iter
->head_page
)) &&
2992 (iter
->head_page
!= cpu_buffer
->commit_page
))
2993 rb_advance_iter(iter
);
2996 static struct ring_buffer_event
*
2997 rb_buffer_peek(struct ring_buffer_per_cpu
*cpu_buffer
, u64
*ts
)
2999 struct ring_buffer_event
*event
;
3000 struct buffer_page
*reader
;
3005 * We repeat when a timestamp is encountered. It is possible
3006 * to get multiple timestamps from an interrupt entering just
3007 * as one timestamp is about to be written, or from discarded
3008 * commits. The most that we can have is the number on a single page.
3010 if (RB_WARN_ON(cpu_buffer
, ++nr_loops
> RB_TIMESTAMPS_PER_PAGE
))
3013 reader
= rb_get_reader_page(cpu_buffer
);
3017 event
= rb_reader_event(cpu_buffer
);
3019 switch (event
->type_len
) {
3020 case RINGBUF_TYPE_PADDING
:
3021 if (rb_null_event(event
))
3022 RB_WARN_ON(cpu_buffer
, 1);
3024 * Because the writer could be discarding every
3025 * event it creates (which would probably be bad)
3026 * if we were to go back to "again" then we may never
3027 * catch up, and will trigger the warn on, or lock
3028 * the box. Return the padding, and we will release
3029 * the current locks, and try again.
3033 case RINGBUF_TYPE_TIME_EXTEND
:
3034 /* Internal data, OK to advance */
3035 rb_advance_reader(cpu_buffer
);
3038 case RINGBUF_TYPE_TIME_STAMP
:
3039 /* FIXME: not implemented */
3040 rb_advance_reader(cpu_buffer
);
3043 case RINGBUF_TYPE_DATA
:
3045 *ts
= cpu_buffer
->read_stamp
+ event
->time_delta
;
3046 ring_buffer_normalize_time_stamp(cpu_buffer
->buffer
,
3047 cpu_buffer
->cpu
, ts
);
3057 EXPORT_SYMBOL_GPL(ring_buffer_peek
);
3059 static struct ring_buffer_event
*
3060 rb_iter_peek(struct ring_buffer_iter
*iter
, u64
*ts
)
3062 struct ring_buffer
*buffer
;
3063 struct ring_buffer_per_cpu
*cpu_buffer
;
3064 struct ring_buffer_event
*event
;
3067 cpu_buffer
= iter
->cpu_buffer
;
3068 buffer
= cpu_buffer
->buffer
;
3071 * Check if someone performed a consuming read to
3072 * the buffer. A consuming read invalidates the iterator
3073 * and we need to reset the iterator in this case.
3075 if (unlikely(iter
->cache_read
!= cpu_buffer
->read
||
3076 iter
->cache_reader_page
!= cpu_buffer
->reader_page
))
3077 rb_iter_reset(iter
);
3080 if (ring_buffer_iter_empty(iter
))
3084 * We repeat when a timestamp is encountered.
3085 * We can get multiple timestamps by nested interrupts or also
3086 * if filtering is on (discarding commits). Since discarding
3087 * commits can be frequent we can get a lot of timestamps.
3088 * But we limit them by not adding timestamps if they begin
3089 * at the start of a page.
3091 if (RB_WARN_ON(cpu_buffer
, ++nr_loops
> RB_TIMESTAMPS_PER_PAGE
))
3094 if (rb_per_cpu_empty(cpu_buffer
))
3097 if (iter
->head
>= local_read(&iter
->head_page
->page
->commit
)) {
3102 event
= rb_iter_head_event(iter
);
3104 switch (event
->type_len
) {
3105 case RINGBUF_TYPE_PADDING
:
3106 if (rb_null_event(event
)) {
3110 rb_advance_iter(iter
);
3113 case RINGBUF_TYPE_TIME_EXTEND
:
3114 /* Internal data, OK to advance */
3115 rb_advance_iter(iter
);
3118 case RINGBUF_TYPE_TIME_STAMP
:
3119 /* FIXME: not implemented */
3120 rb_advance_iter(iter
);
3123 case RINGBUF_TYPE_DATA
:
3125 *ts
= iter
->read_stamp
+ event
->time_delta
;
3126 ring_buffer_normalize_time_stamp(buffer
,
3127 cpu_buffer
->cpu
, ts
);
3137 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek
);
3139 static inline int rb_ok_to_lock(void)
3142 * If an NMI die dumps out the content of the ring buffer
3143 * do not grab locks. We also permanently disable the ring
3144 * buffer too. A one time deal is all you get from reading
3145 * the ring buffer from an NMI.
3147 if (likely(!in_nmi()))
3150 tracing_off_permanent();
3155 * ring_buffer_peek - peek at the next event to be read
3156 * @buffer: The ring buffer to read
3157 * @cpu: The cpu to peak at
3158 * @ts: The timestamp counter of this event.
3160 * This will return the event that will be read next, but does
3161 * not consume the data.
3163 struct ring_buffer_event
*
3164 ring_buffer_peek(struct ring_buffer
*buffer
, int cpu
, u64
*ts
)
3166 struct ring_buffer_per_cpu
*cpu_buffer
= buffer
->buffers
[cpu
];
3167 struct ring_buffer_event
*event
;
3168 unsigned long flags
;
3171 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3174 dolock
= rb_ok_to_lock();
3176 local_irq_save(flags
);
3178 spin_lock(&cpu_buffer
->reader_lock
);
3179 event
= rb_buffer_peek(cpu_buffer
, ts
);
3180 if (event
&& event
->type_len
== RINGBUF_TYPE_PADDING
)
3181 rb_advance_reader(cpu_buffer
);
3183 spin_unlock(&cpu_buffer
->reader_lock
);
3184 local_irq_restore(flags
);
3186 if (event
&& event
->type_len
== RINGBUF_TYPE_PADDING
)
3193 * ring_buffer_iter_peek - peek at the next event to be read
3194 * @iter: The ring buffer iterator
3195 * @ts: The timestamp counter of this event.
3197 * This will return the event that will be read next, but does
3198 * not increment the iterator.
3200 struct ring_buffer_event
*
3201 ring_buffer_iter_peek(struct ring_buffer_iter
*iter
, u64
*ts
)
3203 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
3204 struct ring_buffer_event
*event
;
3205 unsigned long flags
;
3208 spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
3209 event
= rb_iter_peek(iter
, ts
);
3210 spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
3212 if (event
&& event
->type_len
== RINGBUF_TYPE_PADDING
)
3219 * ring_buffer_consume - return an event and consume it
3220 * @buffer: The ring buffer to get the next event from
3222 * Returns the next event in the ring buffer, and that event is consumed.
3223 * Meaning, that sequential reads will keep returning a different event,
3224 * and eventually empty the ring buffer if the producer is slower.
3226 struct ring_buffer_event
*
3227 ring_buffer_consume(struct ring_buffer
*buffer
, int cpu
, u64
*ts
)
3229 struct ring_buffer_per_cpu
*cpu_buffer
;
3230 struct ring_buffer_event
*event
= NULL
;
3231 unsigned long flags
;
3234 dolock
= rb_ok_to_lock();
3237 /* might be called in atomic */
3240 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3243 cpu_buffer
= buffer
->buffers
[cpu
];
3244 local_irq_save(flags
);
3246 spin_lock(&cpu_buffer
->reader_lock
);
3248 event
= rb_buffer_peek(cpu_buffer
, ts
);
3250 rb_advance_reader(cpu_buffer
);
3253 spin_unlock(&cpu_buffer
->reader_lock
);
3254 local_irq_restore(flags
);
3259 if (event
&& event
->type_len
== RINGBUF_TYPE_PADDING
)
3264 EXPORT_SYMBOL_GPL(ring_buffer_consume
);
3267 * ring_buffer_read_start - start a non consuming read of the buffer
3268 * @buffer: The ring buffer to read from
3269 * @cpu: The cpu buffer to iterate over
3271 * This starts up an iteration through the buffer. It also disables
3272 * the recording to the buffer until the reading is finished.
3273 * This prevents the reading from being corrupted. This is not
3274 * a consuming read, so a producer is not expected.
3276 * Must be paired with ring_buffer_finish.
3278 struct ring_buffer_iter
*
3279 ring_buffer_read_start(struct ring_buffer
*buffer
, int cpu
)
3281 struct ring_buffer_per_cpu
*cpu_buffer
;
3282 struct ring_buffer_iter
*iter
;
3283 unsigned long flags
;
3285 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3288 iter
= kmalloc(sizeof(*iter
), GFP_KERNEL
);
3292 cpu_buffer
= buffer
->buffers
[cpu
];
3294 iter
->cpu_buffer
= cpu_buffer
;
3296 atomic_inc(&cpu_buffer
->record_disabled
);
3297 synchronize_sched();
3299 spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
3300 arch_spin_lock(&cpu_buffer
->lock
);
3301 rb_iter_reset(iter
);
3302 arch_spin_unlock(&cpu_buffer
->lock
);
3303 spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
3307 EXPORT_SYMBOL_GPL(ring_buffer_read_start
);
3310 * ring_buffer_finish - finish reading the iterator of the buffer
3311 * @iter: The iterator retrieved by ring_buffer_start
3313 * This re-enables the recording to the buffer, and frees the
3317 ring_buffer_read_finish(struct ring_buffer_iter
*iter
)
3319 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
3321 atomic_dec(&cpu_buffer
->record_disabled
);
3324 EXPORT_SYMBOL_GPL(ring_buffer_read_finish
);
3327 * ring_buffer_read - read the next item in the ring buffer by the iterator
3328 * @iter: The ring buffer iterator
3329 * @ts: The time stamp of the event read.
3331 * This reads the next event in the ring buffer and increments the iterator.
3333 struct ring_buffer_event
*
3334 ring_buffer_read(struct ring_buffer_iter
*iter
, u64
*ts
)
3336 struct ring_buffer_event
*event
;
3337 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
3338 unsigned long flags
;
3340 spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
3342 event
= rb_iter_peek(iter
, ts
);
3346 if (event
->type_len
== RINGBUF_TYPE_PADDING
)
3349 rb_advance_iter(iter
);
3351 spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
3355 EXPORT_SYMBOL_GPL(ring_buffer_read
);
3358 * ring_buffer_size - return the size of the ring buffer (in bytes)
3359 * @buffer: The ring buffer.
3361 unsigned long ring_buffer_size(struct ring_buffer
*buffer
)
3363 return BUF_PAGE_SIZE
* buffer
->pages
;
3365 EXPORT_SYMBOL_GPL(ring_buffer_size
);
3368 rb_reset_cpu(struct ring_buffer_per_cpu
*cpu_buffer
)
3370 rb_head_page_deactivate(cpu_buffer
);
3372 cpu_buffer
->head_page
3373 = list_entry(cpu_buffer
->pages
, struct buffer_page
, list
);
3374 local_set(&cpu_buffer
->head_page
->write
, 0);
3375 local_set(&cpu_buffer
->head_page
->entries
, 0);
3376 local_set(&cpu_buffer
->head_page
->page
->commit
, 0);
3378 cpu_buffer
->head_page
->read
= 0;
3380 cpu_buffer
->tail_page
= cpu_buffer
->head_page
;
3381 cpu_buffer
->commit_page
= cpu_buffer
->head_page
;
3383 INIT_LIST_HEAD(&cpu_buffer
->reader_page
->list
);
3384 local_set(&cpu_buffer
->reader_page
->write
, 0);
3385 local_set(&cpu_buffer
->reader_page
->entries
, 0);
3386 local_set(&cpu_buffer
->reader_page
->page
->commit
, 0);
3387 cpu_buffer
->reader_page
->read
= 0;
3389 local_set(&cpu_buffer
->commit_overrun
, 0);
3390 local_set(&cpu_buffer
->overrun
, 0);
3391 local_set(&cpu_buffer
->entries
, 0);
3392 local_set(&cpu_buffer
->committing
, 0);
3393 local_set(&cpu_buffer
->commits
, 0);
3394 cpu_buffer
->read
= 0;
3396 cpu_buffer
->write_stamp
= 0;
3397 cpu_buffer
->read_stamp
= 0;
3399 rb_head_page_activate(cpu_buffer
);
3403 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
3404 * @buffer: The ring buffer to reset a per cpu buffer of
3405 * @cpu: The CPU buffer to be reset
3407 void ring_buffer_reset_cpu(struct ring_buffer
*buffer
, int cpu
)
3409 struct ring_buffer_per_cpu
*cpu_buffer
= buffer
->buffers
[cpu
];
3410 unsigned long flags
;
3412 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3415 atomic_inc(&cpu_buffer
->record_disabled
);
3417 spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
3419 if (RB_WARN_ON(cpu_buffer
, local_read(&cpu_buffer
->committing
)))
3422 arch_spin_lock(&cpu_buffer
->lock
);
3424 rb_reset_cpu(cpu_buffer
);
3426 arch_spin_unlock(&cpu_buffer
->lock
);
3429 spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
3431 atomic_dec(&cpu_buffer
->record_disabled
);
3433 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu
);
3436 * ring_buffer_reset - reset a ring buffer
3437 * @buffer: The ring buffer to reset all cpu buffers
3439 void ring_buffer_reset(struct ring_buffer
*buffer
)
3443 for_each_buffer_cpu(buffer
, cpu
)
3444 ring_buffer_reset_cpu(buffer
, cpu
);
3446 EXPORT_SYMBOL_GPL(ring_buffer_reset
);
3449 * rind_buffer_empty - is the ring buffer empty?
3450 * @buffer: The ring buffer to test
3452 int ring_buffer_empty(struct ring_buffer
*buffer
)
3454 struct ring_buffer_per_cpu
*cpu_buffer
;
3455 unsigned long flags
;
3460 dolock
= rb_ok_to_lock();
3462 /* yes this is racy, but if you don't like the race, lock the buffer */
3463 for_each_buffer_cpu(buffer
, cpu
) {
3464 cpu_buffer
= buffer
->buffers
[cpu
];
3465 local_irq_save(flags
);
3467 spin_lock(&cpu_buffer
->reader_lock
);
3468 ret
= rb_per_cpu_empty(cpu_buffer
);
3470 spin_unlock(&cpu_buffer
->reader_lock
);
3471 local_irq_restore(flags
);
3479 EXPORT_SYMBOL_GPL(ring_buffer_empty
);
3482 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
3483 * @buffer: The ring buffer
3484 * @cpu: The CPU buffer to test
3486 int ring_buffer_empty_cpu(struct ring_buffer
*buffer
, int cpu
)
3488 struct ring_buffer_per_cpu
*cpu_buffer
;
3489 unsigned long flags
;
3493 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3496 dolock
= rb_ok_to_lock();
3498 cpu_buffer
= buffer
->buffers
[cpu
];
3499 local_irq_save(flags
);
3501 spin_lock(&cpu_buffer
->reader_lock
);
3502 ret
= rb_per_cpu_empty(cpu_buffer
);
3504 spin_unlock(&cpu_buffer
->reader_lock
);
3505 local_irq_restore(flags
);
3509 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu
);
3511 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3513 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
3514 * @buffer_a: One buffer to swap with
3515 * @buffer_b: The other buffer to swap with
3517 * This function is useful for tracers that want to take a "snapshot"
3518 * of a CPU buffer and has another back up buffer lying around.
3519 * it is expected that the tracer handles the cpu buffer not being
3520 * used at the moment.
3522 int ring_buffer_swap_cpu(struct ring_buffer
*buffer_a
,
3523 struct ring_buffer
*buffer_b
, int cpu
)
3525 struct ring_buffer_per_cpu
*cpu_buffer_a
;
3526 struct ring_buffer_per_cpu
*cpu_buffer_b
;
3529 if (!cpumask_test_cpu(cpu
, buffer_a
->cpumask
) ||
3530 !cpumask_test_cpu(cpu
, buffer_b
->cpumask
))
3533 /* At least make sure the two buffers are somewhat the same */
3534 if (buffer_a
->pages
!= buffer_b
->pages
)
3539 if (ring_buffer_flags
!= RB_BUFFERS_ON
)
3542 if (atomic_read(&buffer_a
->record_disabled
))
3545 if (atomic_read(&buffer_b
->record_disabled
))
3548 cpu_buffer_a
= buffer_a
->buffers
[cpu
];
3549 cpu_buffer_b
= buffer_b
->buffers
[cpu
];
3551 if (atomic_read(&cpu_buffer_a
->record_disabled
))
3554 if (atomic_read(&cpu_buffer_b
->record_disabled
))
3558 * We can't do a synchronize_sched here because this
3559 * function can be called in atomic context.
3560 * Normally this will be called from the same CPU as cpu.
3561 * If not it's up to the caller to protect this.
3563 atomic_inc(&cpu_buffer_a
->record_disabled
);
3564 atomic_inc(&cpu_buffer_b
->record_disabled
);
3567 if (local_read(&cpu_buffer_a
->committing
))
3569 if (local_read(&cpu_buffer_b
->committing
))
3572 buffer_a
->buffers
[cpu
] = cpu_buffer_b
;
3573 buffer_b
->buffers
[cpu
] = cpu_buffer_a
;
3575 cpu_buffer_b
->buffer
= buffer_a
;
3576 cpu_buffer_a
->buffer
= buffer_b
;
3581 atomic_dec(&cpu_buffer_a
->record_disabled
);
3582 atomic_dec(&cpu_buffer_b
->record_disabled
);
3586 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu
);
3587 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
3590 * ring_buffer_alloc_read_page - allocate a page to read from buffer
3591 * @buffer: the buffer to allocate for.
3593 * This function is used in conjunction with ring_buffer_read_page.
3594 * When reading a full page from the ring buffer, these functions
3595 * can be used to speed up the process. The calling function should
3596 * allocate a few pages first with this function. Then when it
3597 * needs to get pages from the ring buffer, it passes the result
3598 * of this function into ring_buffer_read_page, which will swap
3599 * the page that was allocated, with the read page of the buffer.
3602 * The page allocated, or NULL on error.
3604 void *ring_buffer_alloc_read_page(struct ring_buffer
*buffer
)
3606 struct buffer_data_page
*bpage
;
3609 addr
= __get_free_page(GFP_KERNEL
);
3613 bpage
= (void *)addr
;
3615 rb_init_page(bpage
);
3619 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page
);
3622 * ring_buffer_free_read_page - free an allocated read page
3623 * @buffer: the buffer the page was allocate for
3624 * @data: the page to free
3626 * Free a page allocated from ring_buffer_alloc_read_page.
3628 void ring_buffer_free_read_page(struct ring_buffer
*buffer
, void *data
)
3630 free_page((unsigned long)data
);
3632 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page
);
3635 * ring_buffer_read_page - extract a page from the ring buffer
3636 * @buffer: buffer to extract from
3637 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
3638 * @len: amount to extract
3639 * @cpu: the cpu of the buffer to extract
3640 * @full: should the extraction only happen when the page is full.
3642 * This function will pull out a page from the ring buffer and consume it.
3643 * @data_page must be the address of the variable that was returned
3644 * from ring_buffer_alloc_read_page. This is because the page might be used
3645 * to swap with a page in the ring buffer.
3648 * rpage = ring_buffer_alloc_read_page(buffer);
3651 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
3653 * process_page(rpage, ret);
3655 * When @full is set, the function will not return true unless
3656 * the writer is off the reader page.
3658 * Note: it is up to the calling functions to handle sleeps and wakeups.
3659 * The ring buffer can be used anywhere in the kernel and can not
3660 * blindly call wake_up. The layer that uses the ring buffer must be
3661 * responsible for that.
3664 * >=0 if data has been transferred, returns the offset of consumed data.
3665 * <0 if no data has been transferred.
3667 int ring_buffer_read_page(struct ring_buffer
*buffer
,
3668 void **data_page
, size_t len
, int cpu
, int full
)
3670 struct ring_buffer_per_cpu
*cpu_buffer
= buffer
->buffers
[cpu
];
3671 struct ring_buffer_event
*event
;
3672 struct buffer_data_page
*bpage
;
3673 struct buffer_page
*reader
;
3674 unsigned long flags
;
3675 unsigned int commit
;
3680 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
3684 * If len is not big enough to hold the page header, then
3685 * we can not copy anything.
3687 if (len
<= BUF_PAGE_HDR_SIZE
)
3690 len
-= BUF_PAGE_HDR_SIZE
;
3699 spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
3701 reader
= rb_get_reader_page(cpu_buffer
);
3705 event
= rb_reader_event(cpu_buffer
);
3707 read
= reader
->read
;
3708 commit
= rb_page_commit(reader
);
3711 * If this page has been partially read or
3712 * if len is not big enough to read the rest of the page or
3713 * a writer is still on the page, then
3714 * we must copy the data from the page to the buffer.
3715 * Otherwise, we can simply swap the page with the one passed in.
3717 if (read
|| (len
< (commit
- read
)) ||
3718 cpu_buffer
->reader_page
== cpu_buffer
->commit_page
) {
3719 struct buffer_data_page
*rpage
= cpu_buffer
->reader_page
->page
;
3720 unsigned int rpos
= read
;
3721 unsigned int pos
= 0;
3727 if (len
> (commit
- read
))
3728 len
= (commit
- read
);
3730 size
= rb_event_length(event
);
3735 /* save the current timestamp, since the user will need it */
3736 save_timestamp
= cpu_buffer
->read_stamp
;
3738 /* Need to copy one event at a time */
3740 memcpy(bpage
->data
+ pos
, rpage
->data
+ rpos
, size
);
3744 rb_advance_reader(cpu_buffer
);
3745 rpos
= reader
->read
;
3748 event
= rb_reader_event(cpu_buffer
);
3749 size
= rb_event_length(event
);
3750 } while (len
> size
);
3753 local_set(&bpage
->commit
, pos
);
3754 bpage
->time_stamp
= save_timestamp
;
3756 /* we copied everything to the beginning */
3759 /* update the entry counter */
3760 cpu_buffer
->read
+= rb_page_entries(reader
);
3762 /* swap the pages */
3763 rb_init_page(bpage
);
3764 bpage
= reader
->page
;
3765 reader
->page
= *data_page
;
3766 local_set(&reader
->write
, 0);
3767 local_set(&reader
->entries
, 0);
3774 spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
3779 EXPORT_SYMBOL_GPL(ring_buffer_read_page
);
3781 #ifdef CONFIG_TRACING
3783 rb_simple_read(struct file
*filp
, char __user
*ubuf
,
3784 size_t cnt
, loff_t
*ppos
)
3786 unsigned long *p
= filp
->private_data
;
3790 if (test_bit(RB_BUFFERS_DISABLED_BIT
, p
))
3791 r
= sprintf(buf
, "permanently disabled\n");
3793 r
= sprintf(buf
, "%d\n", test_bit(RB_BUFFERS_ON_BIT
, p
));
3795 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
3799 rb_simple_write(struct file
*filp
, const char __user
*ubuf
,
3800 size_t cnt
, loff_t
*ppos
)
3802 unsigned long *p
= filp
->private_data
;
3807 if (cnt
>= sizeof(buf
))
3810 if (copy_from_user(&buf
, ubuf
, cnt
))
3815 ret
= strict_strtoul(buf
, 10, &val
);
3820 set_bit(RB_BUFFERS_ON_BIT
, p
);
3822 clear_bit(RB_BUFFERS_ON_BIT
, p
);
3829 static const struct file_operations rb_simple_fops
= {
3830 .open
= tracing_open_generic
,
3831 .read
= rb_simple_read
,
3832 .write
= rb_simple_write
,
3836 static __init
int rb_init_debugfs(void)
3838 struct dentry
*d_tracer
;
3840 d_tracer
= tracing_init_dentry();
3842 trace_create_file("tracing_on", 0644, d_tracer
,
3843 &ring_buffer_flags
, &rb_simple_fops
);
3848 fs_initcall(rb_init_debugfs
);
3851 #ifdef CONFIG_HOTPLUG_CPU
3852 static int rb_cpu_notify(struct notifier_block
*self
,
3853 unsigned long action
, void *hcpu
)
3855 struct ring_buffer
*buffer
=
3856 container_of(self
, struct ring_buffer
, cpu_notify
);
3857 long cpu
= (long)hcpu
;
3860 case CPU_UP_PREPARE
:
3861 case CPU_UP_PREPARE_FROZEN
:
3862 if (cpumask_test_cpu(cpu
, buffer
->cpumask
))
3865 buffer
->buffers
[cpu
] =
3866 rb_allocate_cpu_buffer(buffer
, cpu
);
3867 if (!buffer
->buffers
[cpu
]) {
3868 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
3873 cpumask_set_cpu(cpu
, buffer
->cpumask
);
3875 case CPU_DOWN_PREPARE
:
3876 case CPU_DOWN_PREPARE_FROZEN
:
3879 * If we were to free the buffer, then the user would
3880 * lose any trace that was in the buffer.