4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
6 #include <linux/ring_buffer.h>
7 #include <linux/spinlock.h>
8 #include <linux/debugfs.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/percpu.h>
12 #include <linux/mutex.h>
13 #include <linux/sched.h> /* used for sched_clock() (for now) */
14 #include <linux/init.h>
15 #include <linux/hash.h>
16 #include <linux/list.h>
22 * A fast way to enable or disable all ring buffers is to
23 * call tracing_on or tracing_off. Turning off the ring buffers
24 * prevents all ring buffers from being recorded to.
25 * Turning this switch on, makes it OK to write to the
26 * ring buffer, if the ring buffer is enabled itself.
28 * There's three layers that must be on in order to write
31 * 1) This global flag must be set.
32 * 2) The ring buffer must be enabled for recording.
33 * 3) The per cpu buffer must be enabled for recording.
35 * In case of an anomaly, this global flag has a bit set that
36 * will permantly disable all ring buffers.
40 * Global flag to disable all recording to ring buffers
41 * This has two bits: ON, DISABLED
45 * 0 0 : ring buffers are off
46 * 1 0 : ring buffers are on
47 * X 1 : ring buffers are permanently disabled
51 RB_BUFFERS_ON_BIT
= 0,
52 RB_BUFFERS_DISABLED_BIT
= 1,
56 RB_BUFFERS_ON
= 1 << RB_BUFFERS_ON_BIT
,
57 RB_BUFFERS_DISABLED
= 1 << RB_BUFFERS_DISABLED_BIT
,
60 static long ring_buffer_flags __read_mostly
= RB_BUFFERS_ON
;
63 * tracing_on - enable all tracing buffers
65 * This function enables all tracing buffers that may have been
66 * disabled with tracing_off.
70 set_bit(RB_BUFFERS_ON_BIT
, &ring_buffer_flags
);
72 EXPORT_SYMBOL_GPL(tracing_on
);
75 * tracing_off - turn off all tracing buffers
77 * This function stops all tracing buffers from recording data.
78 * It does not disable any overhead the tracers themselves may
79 * be causing. This function simply causes all recording to
80 * the ring buffers to fail.
82 void tracing_off(void)
84 clear_bit(RB_BUFFERS_ON_BIT
, &ring_buffer_flags
);
86 EXPORT_SYMBOL_GPL(tracing_off
);
89 * tracing_off_permanent - permanently disable ring buffers
91 * This function, once called, will disable all ring buffers
94 void tracing_off_permanent(void)
96 set_bit(RB_BUFFERS_DISABLED_BIT
, &ring_buffer_flags
);
101 /* Up this if you want to test the TIME_EXTENTS and normalization */
102 #define DEBUG_SHIFT 0
105 u64
ring_buffer_time_stamp(int cpu
)
109 preempt_disable_notrace();
110 /* shift to debug/test normalization and TIME_EXTENTS */
111 time
= sched_clock() << DEBUG_SHIFT
;
112 preempt_enable_no_resched_notrace();
116 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp
);
118 void ring_buffer_normalize_time_stamp(int cpu
, u64
*ts
)
120 /* Just stupid testing the normalize function and deltas */
123 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp
);
125 #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
126 #define RB_ALIGNMENT 4U
127 #define RB_MAX_SMALL_DATA 28
130 RB_LEN_TIME_EXTEND
= 8,
131 RB_LEN_TIME_STAMP
= 16,
134 /* inline for ring buffer fast paths */
136 rb_event_length(struct ring_buffer_event
*event
)
140 switch (event
->type
) {
141 case RINGBUF_TYPE_PADDING
:
145 case RINGBUF_TYPE_TIME_EXTEND
:
146 return RB_LEN_TIME_EXTEND
;
148 case RINGBUF_TYPE_TIME_STAMP
:
149 return RB_LEN_TIME_STAMP
;
151 case RINGBUF_TYPE_DATA
:
153 length
= event
->len
* RB_ALIGNMENT
;
155 length
= event
->array
[0];
156 return length
+ RB_EVNT_HDR_SIZE
;
165 * ring_buffer_event_length - return the length of the event
166 * @event: the event to get the length of
168 unsigned ring_buffer_event_length(struct ring_buffer_event
*event
)
170 unsigned length
= rb_event_length(event
);
171 if (event
->type
!= RINGBUF_TYPE_DATA
)
173 length
-= RB_EVNT_HDR_SIZE
;
174 if (length
> RB_MAX_SMALL_DATA
+ sizeof(event
->array
[0]))
175 length
-= sizeof(event
->array
[0]);
178 EXPORT_SYMBOL_GPL(ring_buffer_event_length
);
180 /* inline for ring buffer fast paths */
182 rb_event_data(struct ring_buffer_event
*event
)
184 BUG_ON(event
->type
!= RINGBUF_TYPE_DATA
);
185 /* If length is in len field, then array[0] has the data */
187 return (void *)&event
->array
[0];
188 /* Otherwise length is in array[0] and array[1] has the data */
189 return (void *)&event
->array
[1];
193 * ring_buffer_event_data - return the data of the event
194 * @event: the event to get the data from
196 void *ring_buffer_event_data(struct ring_buffer_event
*event
)
198 return rb_event_data(event
);
200 EXPORT_SYMBOL_GPL(ring_buffer_event_data
);
202 #define for_each_buffer_cpu(buffer, cpu) \
203 for_each_cpu(cpu, buffer->cpumask)
206 #define TS_MASK ((1ULL << TS_SHIFT) - 1)
207 #define TS_DELTA_TEST (~TS_MASK)
209 struct buffer_data_page
{
210 u64 time_stamp
; /* page time stamp */
211 local_t commit
; /* write commited index */
212 unsigned char data
[]; /* data of buffer page */
216 local_t write
; /* index for next write */
217 unsigned read
; /* index for next read */
218 struct list_head list
; /* list of free pages */
219 struct buffer_data_page
*page
; /* Actual data page */
222 static void rb_init_page(struct buffer_data_page
*bpage
)
224 local_set(&bpage
->commit
, 0);
228 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
231 static void free_buffer_page(struct buffer_page
*bpage
)
233 free_page((unsigned long)bpage
->page
);
238 * We need to fit the time_stamp delta into 27 bits.
240 static inline int test_time_stamp(u64 delta
)
242 if (delta
& TS_DELTA_TEST
)
247 #define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page))
250 * head_page == tail_page && head == tail then buffer is empty.
252 struct ring_buffer_per_cpu
{
254 struct ring_buffer
*buffer
;
255 spinlock_t reader_lock
; /* serialize readers */
257 struct lock_class_key lock_key
;
258 struct list_head pages
;
259 struct buffer_page
*head_page
; /* read from head */
260 struct buffer_page
*tail_page
; /* write to tail */
261 struct buffer_page
*commit_page
; /* commited pages */
262 struct buffer_page
*reader_page
;
263 unsigned long overrun
;
264 unsigned long entries
;
267 atomic_t record_disabled
;
274 cpumask_var_t cpumask
;
275 atomic_t record_disabled
;
279 struct ring_buffer_per_cpu
**buffers
;
282 struct ring_buffer_iter
{
283 struct ring_buffer_per_cpu
*cpu_buffer
;
285 struct buffer_page
*head_page
;
289 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
290 #define RB_WARN_ON(buffer, cond) \
292 int _____ret = unlikely(cond); \
294 atomic_inc(&buffer->record_disabled); \
301 * check_pages - integrity check of buffer pages
302 * @cpu_buffer: CPU buffer with pages to test
304 * As a safty measure we check to make sure the data pages have not
307 static int rb_check_pages(struct ring_buffer_per_cpu
*cpu_buffer
)
309 struct list_head
*head
= &cpu_buffer
->pages
;
310 struct buffer_page
*bpage
, *tmp
;
312 if (RB_WARN_ON(cpu_buffer
, head
->next
->prev
!= head
))
314 if (RB_WARN_ON(cpu_buffer
, head
->prev
->next
!= head
))
317 list_for_each_entry_safe(bpage
, tmp
, head
, list
) {
318 if (RB_WARN_ON(cpu_buffer
,
319 bpage
->list
.next
->prev
!= &bpage
->list
))
321 if (RB_WARN_ON(cpu_buffer
,
322 bpage
->list
.prev
->next
!= &bpage
->list
))
329 static int rb_allocate_pages(struct ring_buffer_per_cpu
*cpu_buffer
,
332 struct list_head
*head
= &cpu_buffer
->pages
;
333 struct buffer_page
*bpage
, *tmp
;
338 for (i
= 0; i
< nr_pages
; i
++) {
339 bpage
= kzalloc_node(ALIGN(sizeof(*bpage
), cache_line_size()),
340 GFP_KERNEL
, cpu_to_node(cpu_buffer
->cpu
));
343 list_add(&bpage
->list
, &pages
);
345 addr
= __get_free_page(GFP_KERNEL
);
348 bpage
->page
= (void *)addr
;
349 rb_init_page(bpage
->page
);
352 list_splice(&pages
, head
);
354 rb_check_pages(cpu_buffer
);
359 list_for_each_entry_safe(bpage
, tmp
, &pages
, list
) {
360 list_del_init(&bpage
->list
);
361 free_buffer_page(bpage
);
366 static struct ring_buffer_per_cpu
*
367 rb_allocate_cpu_buffer(struct ring_buffer
*buffer
, int cpu
)
369 struct ring_buffer_per_cpu
*cpu_buffer
;
370 struct buffer_page
*bpage
;
374 cpu_buffer
= kzalloc_node(ALIGN(sizeof(*cpu_buffer
), cache_line_size()),
375 GFP_KERNEL
, cpu_to_node(cpu
));
379 cpu_buffer
->cpu
= cpu
;
380 cpu_buffer
->buffer
= buffer
;
381 spin_lock_init(&cpu_buffer
->reader_lock
);
382 cpu_buffer
->lock
= (raw_spinlock_t
)__RAW_SPIN_LOCK_UNLOCKED
;
383 INIT_LIST_HEAD(&cpu_buffer
->pages
);
385 bpage
= kzalloc_node(ALIGN(sizeof(*bpage
), cache_line_size()),
386 GFP_KERNEL
, cpu_to_node(cpu
));
388 goto fail_free_buffer
;
390 cpu_buffer
->reader_page
= bpage
;
391 addr
= __get_free_page(GFP_KERNEL
);
393 goto fail_free_reader
;
394 bpage
->page
= (void *)addr
;
395 rb_init_page(bpage
->page
);
397 INIT_LIST_HEAD(&cpu_buffer
->reader_page
->list
);
399 ret
= rb_allocate_pages(cpu_buffer
, buffer
->pages
);
401 goto fail_free_reader
;
403 cpu_buffer
->head_page
404 = list_entry(cpu_buffer
->pages
.next
, struct buffer_page
, list
);
405 cpu_buffer
->tail_page
= cpu_buffer
->commit_page
= cpu_buffer
->head_page
;
410 free_buffer_page(cpu_buffer
->reader_page
);
417 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu
*cpu_buffer
)
419 struct list_head
*head
= &cpu_buffer
->pages
;
420 struct buffer_page
*bpage
, *tmp
;
422 list_del_init(&cpu_buffer
->reader_page
->list
);
423 free_buffer_page(cpu_buffer
->reader_page
);
425 list_for_each_entry_safe(bpage
, tmp
, head
, list
) {
426 list_del_init(&bpage
->list
);
427 free_buffer_page(bpage
);
433 * Causes compile errors if the struct buffer_page gets bigger
434 * than the struct page.
436 extern int ring_buffer_page_too_big(void);
439 * ring_buffer_alloc - allocate a new ring_buffer
440 * @size: the size in bytes per cpu that is needed.
441 * @flags: attributes to set for the ring buffer.
443 * Currently the only flag that is available is the RB_FL_OVERWRITE
444 * flag. This flag means that the buffer will overwrite old data
445 * when the buffer wraps. If this flag is not set, the buffer will
446 * drop data when the tail hits the head.
448 struct ring_buffer
*ring_buffer_alloc(unsigned long size
, unsigned flags
)
450 struct ring_buffer
*buffer
;
454 /* Paranoid! Optimizes out when all is well */
455 if (sizeof(struct buffer_page
) > sizeof(struct page
))
456 ring_buffer_page_too_big();
459 /* keep it in its own cache line */
460 buffer
= kzalloc(ALIGN(sizeof(*buffer
), cache_line_size()),
465 if (!alloc_cpumask_var(&buffer
->cpumask
, GFP_KERNEL
))
466 goto fail_free_buffer
;
468 buffer
->pages
= DIV_ROUND_UP(size
, BUF_PAGE_SIZE
);
469 buffer
->flags
= flags
;
471 /* need at least two pages */
472 if (buffer
->pages
== 1)
475 cpumask_copy(buffer
->cpumask
, cpu_possible_mask
);
476 buffer
->cpus
= nr_cpu_ids
;
478 bsize
= sizeof(void *) * nr_cpu_ids
;
479 buffer
->buffers
= kzalloc(ALIGN(bsize
, cache_line_size()),
481 if (!buffer
->buffers
)
482 goto fail_free_cpumask
;
484 for_each_buffer_cpu(buffer
, cpu
) {
485 buffer
->buffers
[cpu
] =
486 rb_allocate_cpu_buffer(buffer
, cpu
);
487 if (!buffer
->buffers
[cpu
])
488 goto fail_free_buffers
;
491 mutex_init(&buffer
->mutex
);
496 for_each_buffer_cpu(buffer
, cpu
) {
497 if (buffer
->buffers
[cpu
])
498 rb_free_cpu_buffer(buffer
->buffers
[cpu
]);
500 kfree(buffer
->buffers
);
503 free_cpumask_var(buffer
->cpumask
);
509 EXPORT_SYMBOL_GPL(ring_buffer_alloc
);
512 * ring_buffer_free - free a ring buffer.
513 * @buffer: the buffer to free.
516 ring_buffer_free(struct ring_buffer
*buffer
)
520 for_each_buffer_cpu(buffer
, cpu
)
521 rb_free_cpu_buffer(buffer
->buffers
[cpu
]);
523 free_cpumask_var(buffer
->cpumask
);
527 EXPORT_SYMBOL_GPL(ring_buffer_free
);
529 static void rb_reset_cpu(struct ring_buffer_per_cpu
*cpu_buffer
);
532 rb_remove_pages(struct ring_buffer_per_cpu
*cpu_buffer
, unsigned nr_pages
)
534 struct buffer_page
*bpage
;
538 atomic_inc(&cpu_buffer
->record_disabled
);
541 for (i
= 0; i
< nr_pages
; i
++) {
542 if (RB_WARN_ON(cpu_buffer
, list_empty(&cpu_buffer
->pages
)))
544 p
= cpu_buffer
->pages
.next
;
545 bpage
= list_entry(p
, struct buffer_page
, list
);
546 list_del_init(&bpage
->list
);
547 free_buffer_page(bpage
);
549 if (RB_WARN_ON(cpu_buffer
, list_empty(&cpu_buffer
->pages
)))
552 rb_reset_cpu(cpu_buffer
);
554 rb_check_pages(cpu_buffer
);
556 atomic_dec(&cpu_buffer
->record_disabled
);
561 rb_insert_pages(struct ring_buffer_per_cpu
*cpu_buffer
,
562 struct list_head
*pages
, unsigned nr_pages
)
564 struct buffer_page
*bpage
;
568 atomic_inc(&cpu_buffer
->record_disabled
);
571 for (i
= 0; i
< nr_pages
; i
++) {
572 if (RB_WARN_ON(cpu_buffer
, list_empty(pages
)))
575 bpage
= list_entry(p
, struct buffer_page
, list
);
576 list_del_init(&bpage
->list
);
577 list_add_tail(&bpage
->list
, &cpu_buffer
->pages
);
579 rb_reset_cpu(cpu_buffer
);
581 rb_check_pages(cpu_buffer
);
583 atomic_dec(&cpu_buffer
->record_disabled
);
587 * ring_buffer_resize - resize the ring buffer
588 * @buffer: the buffer to resize.
589 * @size: the new size.
591 * The tracer is responsible for making sure that the buffer is
592 * not being used while changing the size.
593 * Note: We may be able to change the above requirement by using
594 * RCU synchronizations.
596 * Minimum size is 2 * BUF_PAGE_SIZE.
598 * Returns -1 on failure.
600 int ring_buffer_resize(struct ring_buffer
*buffer
, unsigned long size
)
602 struct ring_buffer_per_cpu
*cpu_buffer
;
603 unsigned nr_pages
, rm_pages
, new_pages
;
604 struct buffer_page
*bpage
, *tmp
;
605 unsigned long buffer_size
;
611 * Always succeed at resizing a non-existent buffer:
616 size
= DIV_ROUND_UP(size
, BUF_PAGE_SIZE
);
617 size
*= BUF_PAGE_SIZE
;
618 buffer_size
= buffer
->pages
* BUF_PAGE_SIZE
;
620 /* we need a minimum of two pages */
621 if (size
< BUF_PAGE_SIZE
* 2)
622 size
= BUF_PAGE_SIZE
* 2;
624 if (size
== buffer_size
)
627 mutex_lock(&buffer
->mutex
);
629 nr_pages
= DIV_ROUND_UP(size
, BUF_PAGE_SIZE
);
631 if (size
< buffer_size
) {
633 /* easy case, just free pages */
634 if (RB_WARN_ON(buffer
, nr_pages
>= buffer
->pages
)) {
635 mutex_unlock(&buffer
->mutex
);
639 rm_pages
= buffer
->pages
- nr_pages
;
641 for_each_buffer_cpu(buffer
, cpu
) {
642 cpu_buffer
= buffer
->buffers
[cpu
];
643 rb_remove_pages(cpu_buffer
, rm_pages
);
649 * This is a bit more difficult. We only want to add pages
650 * when we can allocate enough for all CPUs. We do this
651 * by allocating all the pages and storing them on a local
652 * link list. If we succeed in our allocation, then we
653 * add these pages to the cpu_buffers. Otherwise we just free
654 * them all and return -ENOMEM;
656 if (RB_WARN_ON(buffer
, nr_pages
<= buffer
->pages
)) {
657 mutex_unlock(&buffer
->mutex
);
661 new_pages
= nr_pages
- buffer
->pages
;
663 for_each_buffer_cpu(buffer
, cpu
) {
664 for (i
= 0; i
< new_pages
; i
++) {
665 bpage
= kzalloc_node(ALIGN(sizeof(*bpage
),
667 GFP_KERNEL
, cpu_to_node(cpu
));
670 list_add(&bpage
->list
, &pages
);
671 addr
= __get_free_page(GFP_KERNEL
);
674 bpage
->page
= (void *)addr
;
675 rb_init_page(bpage
->page
);
679 for_each_buffer_cpu(buffer
, cpu
) {
680 cpu_buffer
= buffer
->buffers
[cpu
];
681 rb_insert_pages(cpu_buffer
, &pages
, new_pages
);
684 if (RB_WARN_ON(buffer
, !list_empty(&pages
))) {
685 mutex_unlock(&buffer
->mutex
);
690 buffer
->pages
= nr_pages
;
691 mutex_unlock(&buffer
->mutex
);
696 list_for_each_entry_safe(bpage
, tmp
, &pages
, list
) {
697 list_del_init(&bpage
->list
);
698 free_buffer_page(bpage
);
700 mutex_unlock(&buffer
->mutex
);
703 EXPORT_SYMBOL_GPL(ring_buffer_resize
);
705 static inline int rb_null_event(struct ring_buffer_event
*event
)
707 return event
->type
== RINGBUF_TYPE_PADDING
;
711 __rb_data_page_index(struct buffer_data_page
*bpage
, unsigned index
)
713 return bpage
->data
+ index
;
716 static inline void *__rb_page_index(struct buffer_page
*bpage
, unsigned index
)
718 return bpage
->page
->data
+ index
;
721 static inline struct ring_buffer_event
*
722 rb_reader_event(struct ring_buffer_per_cpu
*cpu_buffer
)
724 return __rb_page_index(cpu_buffer
->reader_page
,
725 cpu_buffer
->reader_page
->read
);
728 static inline struct ring_buffer_event
*
729 rb_head_event(struct ring_buffer_per_cpu
*cpu_buffer
)
731 return __rb_page_index(cpu_buffer
->head_page
,
732 cpu_buffer
->head_page
->read
);
735 static inline struct ring_buffer_event
*
736 rb_iter_head_event(struct ring_buffer_iter
*iter
)
738 return __rb_page_index(iter
->head_page
, iter
->head
);
741 static inline unsigned rb_page_write(struct buffer_page
*bpage
)
743 return local_read(&bpage
->write
);
746 static inline unsigned rb_page_commit(struct buffer_page
*bpage
)
748 return local_read(&bpage
->page
->commit
);
751 /* Size is determined by what has been commited */
752 static inline unsigned rb_page_size(struct buffer_page
*bpage
)
754 return rb_page_commit(bpage
);
757 static inline unsigned
758 rb_commit_index(struct ring_buffer_per_cpu
*cpu_buffer
)
760 return rb_page_commit(cpu_buffer
->commit_page
);
763 static inline unsigned rb_head_size(struct ring_buffer_per_cpu
*cpu_buffer
)
765 return rb_page_commit(cpu_buffer
->head_page
);
769 * When the tail hits the head and the buffer is in overwrite mode,
770 * the head jumps to the next page and all content on the previous
771 * page is discarded. But before doing so, we update the overrun
772 * variable of the buffer.
774 static void rb_update_overflow(struct ring_buffer_per_cpu
*cpu_buffer
)
776 struct ring_buffer_event
*event
;
779 for (head
= 0; head
< rb_head_size(cpu_buffer
);
780 head
+= rb_event_length(event
)) {
782 event
= __rb_page_index(cpu_buffer
->head_page
, head
);
783 if (RB_WARN_ON(cpu_buffer
, rb_null_event(event
)))
785 /* Only count data entries */
786 if (event
->type
!= RINGBUF_TYPE_DATA
)
788 cpu_buffer
->overrun
++;
789 cpu_buffer
->entries
--;
793 static inline void rb_inc_page(struct ring_buffer_per_cpu
*cpu_buffer
,
794 struct buffer_page
**bpage
)
796 struct list_head
*p
= (*bpage
)->list
.next
;
798 if (p
== &cpu_buffer
->pages
)
801 *bpage
= list_entry(p
, struct buffer_page
, list
);
804 static inline unsigned
805 rb_event_index(struct ring_buffer_event
*event
)
807 unsigned long addr
= (unsigned long)event
;
809 return (addr
& ~PAGE_MASK
) - (PAGE_SIZE
- BUF_PAGE_SIZE
);
813 rb_is_commit(struct ring_buffer_per_cpu
*cpu_buffer
,
814 struct ring_buffer_event
*event
)
816 unsigned long addr
= (unsigned long)event
;
819 index
= rb_event_index(event
);
822 return cpu_buffer
->commit_page
->page
== (void *)addr
&&
823 rb_commit_index(cpu_buffer
) == index
;
827 rb_set_commit_event(struct ring_buffer_per_cpu
*cpu_buffer
,
828 struct ring_buffer_event
*event
)
830 unsigned long addr
= (unsigned long)event
;
833 index
= rb_event_index(event
);
836 while (cpu_buffer
->commit_page
->page
!= (void *)addr
) {
837 if (RB_WARN_ON(cpu_buffer
,
838 cpu_buffer
->commit_page
== cpu_buffer
->tail_page
))
840 cpu_buffer
->commit_page
->page
->commit
=
841 cpu_buffer
->commit_page
->write
;
842 rb_inc_page(cpu_buffer
, &cpu_buffer
->commit_page
);
843 cpu_buffer
->write_stamp
=
844 cpu_buffer
->commit_page
->page
->time_stamp
;
847 /* Now set the commit to the event's index */
848 local_set(&cpu_buffer
->commit_page
->page
->commit
, index
);
852 rb_set_commit_to_write(struct ring_buffer_per_cpu
*cpu_buffer
)
855 * We only race with interrupts and NMIs on this CPU.
856 * If we own the commit event, then we can commit
857 * all others that interrupted us, since the interruptions
858 * are in stack format (they finish before they come
859 * back to us). This allows us to do a simple loop to
860 * assign the commit to the tail.
863 while (cpu_buffer
->commit_page
!= cpu_buffer
->tail_page
) {
864 cpu_buffer
->commit_page
->page
->commit
=
865 cpu_buffer
->commit_page
->write
;
866 rb_inc_page(cpu_buffer
, &cpu_buffer
->commit_page
);
867 cpu_buffer
->write_stamp
=
868 cpu_buffer
->commit_page
->page
->time_stamp
;
869 /* add barrier to keep gcc from optimizing too much */
872 while (rb_commit_index(cpu_buffer
) !=
873 rb_page_write(cpu_buffer
->commit_page
)) {
874 cpu_buffer
->commit_page
->page
->commit
=
875 cpu_buffer
->commit_page
->write
;
879 /* again, keep gcc from optimizing */
883 * If an interrupt came in just after the first while loop
884 * and pushed the tail page forward, we will be left with
885 * a dangling commit that will never go forward.
887 if (unlikely(cpu_buffer
->commit_page
!= cpu_buffer
->tail_page
))
891 static void rb_reset_reader_page(struct ring_buffer_per_cpu
*cpu_buffer
)
893 cpu_buffer
->read_stamp
= cpu_buffer
->reader_page
->page
->time_stamp
;
894 cpu_buffer
->reader_page
->read
= 0;
897 static void rb_inc_iter(struct ring_buffer_iter
*iter
)
899 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
902 * The iterator could be on the reader page (it starts there).
903 * But the head could have moved, since the reader was
904 * found. Check for this case and assign the iterator
905 * to the head page instead of next.
907 if (iter
->head_page
== cpu_buffer
->reader_page
)
908 iter
->head_page
= cpu_buffer
->head_page
;
910 rb_inc_page(cpu_buffer
, &iter
->head_page
);
912 iter
->read_stamp
= iter
->head_page
->page
->time_stamp
;
917 * ring_buffer_update_event - update event type and data
918 * @event: the even to update
919 * @type: the type of event
920 * @length: the size of the event field in the ring buffer
922 * Update the type and data fields of the event. The length
923 * is the actual size that is written to the ring buffer,
924 * and with this, we can determine what to place into the
928 rb_update_event(struct ring_buffer_event
*event
,
929 unsigned type
, unsigned length
)
935 case RINGBUF_TYPE_PADDING
:
938 case RINGBUF_TYPE_TIME_EXTEND
:
939 event
->len
= DIV_ROUND_UP(RB_LEN_TIME_EXTEND
, RB_ALIGNMENT
);
942 case RINGBUF_TYPE_TIME_STAMP
:
943 event
->len
= DIV_ROUND_UP(RB_LEN_TIME_STAMP
, RB_ALIGNMENT
);
946 case RINGBUF_TYPE_DATA
:
947 length
-= RB_EVNT_HDR_SIZE
;
948 if (length
> RB_MAX_SMALL_DATA
) {
950 event
->array
[0] = length
;
952 event
->len
= DIV_ROUND_UP(length
, RB_ALIGNMENT
);
959 static unsigned rb_calculate_event_length(unsigned length
)
961 struct ring_buffer_event event
; /* Used only for sizeof array */
963 /* zero length can cause confusions */
967 if (length
> RB_MAX_SMALL_DATA
)
968 length
+= sizeof(event
.array
[0]);
970 length
+= RB_EVNT_HDR_SIZE
;
971 length
= ALIGN(length
, RB_ALIGNMENT
);
976 static struct ring_buffer_event
*
977 __rb_reserve_next(struct ring_buffer_per_cpu
*cpu_buffer
,
978 unsigned type
, unsigned long length
, u64
*ts
)
980 struct buffer_page
*tail_page
, *head_page
, *reader_page
, *commit_page
;
981 unsigned long tail
, write
;
982 struct ring_buffer
*buffer
= cpu_buffer
->buffer
;
983 struct ring_buffer_event
*event
;
986 commit_page
= cpu_buffer
->commit_page
;
987 /* we just need to protect against interrupts */
989 tail_page
= cpu_buffer
->tail_page
;
990 write
= local_add_return(length
, &tail_page
->write
);
991 tail
= write
- length
;
993 /* See if we shot pass the end of this buffer page */
994 if (write
> BUF_PAGE_SIZE
) {
995 struct buffer_page
*next_page
= tail_page
;
997 local_irq_save(flags
);
998 __raw_spin_lock(&cpu_buffer
->lock
);
1000 rb_inc_page(cpu_buffer
, &next_page
);
1002 head_page
= cpu_buffer
->head_page
;
1003 reader_page
= cpu_buffer
->reader_page
;
1005 /* we grabbed the lock before incrementing */
1006 if (RB_WARN_ON(cpu_buffer
, next_page
== reader_page
))
1010 * If for some reason, we had an interrupt storm that made
1011 * it all the way around the buffer, bail, and warn
1014 if (unlikely(next_page
== commit_page
)) {
1019 if (next_page
== head_page
) {
1020 if (!(buffer
->flags
& RB_FL_OVERWRITE
))
1023 /* tail_page has not moved yet? */
1024 if (tail_page
== cpu_buffer
->tail_page
) {
1025 /* count overflows */
1026 rb_update_overflow(cpu_buffer
);
1028 rb_inc_page(cpu_buffer
, &head_page
);
1029 cpu_buffer
->head_page
= head_page
;
1030 cpu_buffer
->head_page
->read
= 0;
1035 * If the tail page is still the same as what we think
1036 * it is, then it is up to us to update the tail
1039 if (tail_page
== cpu_buffer
->tail_page
) {
1040 local_set(&next_page
->write
, 0);
1041 local_set(&next_page
->page
->commit
, 0);
1042 cpu_buffer
->tail_page
= next_page
;
1044 /* reread the time stamp */
1045 *ts
= ring_buffer_time_stamp(cpu_buffer
->cpu
);
1046 cpu_buffer
->tail_page
->page
->time_stamp
= *ts
;
1050 * The actual tail page has moved forward.
1052 if (tail
< BUF_PAGE_SIZE
) {
1053 /* Mark the rest of the page with padding */
1054 event
= __rb_page_index(tail_page
, tail
);
1055 event
->type
= RINGBUF_TYPE_PADDING
;
1058 if (tail
<= BUF_PAGE_SIZE
)
1059 /* Set the write back to the previous setting */
1060 local_set(&tail_page
->write
, tail
);
1063 * If this was a commit entry that failed,
1064 * increment that too
1066 if (tail_page
== cpu_buffer
->commit_page
&&
1067 tail
== rb_commit_index(cpu_buffer
)) {
1068 rb_set_commit_to_write(cpu_buffer
);
1071 __raw_spin_unlock(&cpu_buffer
->lock
);
1072 local_irq_restore(flags
);
1074 /* fail and let the caller try again */
1075 return ERR_PTR(-EAGAIN
);
1078 /* We reserved something on the buffer */
1080 if (RB_WARN_ON(cpu_buffer
, write
> BUF_PAGE_SIZE
))
1083 event
= __rb_page_index(tail_page
, tail
);
1084 rb_update_event(event
, type
, length
);
1087 * If this is a commit and the tail is zero, then update
1088 * this page's time stamp.
1090 if (!tail
&& rb_is_commit(cpu_buffer
, event
))
1091 cpu_buffer
->commit_page
->page
->time_stamp
= *ts
;
1097 if (tail
<= BUF_PAGE_SIZE
)
1098 local_set(&tail_page
->write
, tail
);
1100 __raw_spin_unlock(&cpu_buffer
->lock
);
1101 local_irq_restore(flags
);
1106 rb_add_time_stamp(struct ring_buffer_per_cpu
*cpu_buffer
,
1107 u64
*ts
, u64
*delta
)
1109 struct ring_buffer_event
*event
;
1113 if (unlikely(*delta
> (1ULL << 59) && !once
++)) {
1114 printk(KERN_WARNING
"Delta way too big! %llu"
1115 " ts=%llu write stamp = %llu\n",
1116 (unsigned long long)*delta
,
1117 (unsigned long long)*ts
,
1118 (unsigned long long)cpu_buffer
->write_stamp
);
1123 * The delta is too big, we to add a
1126 event
= __rb_reserve_next(cpu_buffer
,
1127 RINGBUF_TYPE_TIME_EXTEND
,
1133 if (PTR_ERR(event
) == -EAGAIN
)
1136 /* Only a commited time event can update the write stamp */
1137 if (rb_is_commit(cpu_buffer
, event
)) {
1139 * If this is the first on the page, then we need to
1140 * update the page itself, and just put in a zero.
1142 if (rb_event_index(event
)) {
1143 event
->time_delta
= *delta
& TS_MASK
;
1144 event
->array
[0] = *delta
>> TS_SHIFT
;
1146 cpu_buffer
->commit_page
->page
->time_stamp
= *ts
;
1147 event
->time_delta
= 0;
1148 event
->array
[0] = 0;
1150 cpu_buffer
->write_stamp
= *ts
;
1151 /* let the caller know this was the commit */
1154 /* Darn, this is just wasted space */
1155 event
->time_delta
= 0;
1156 event
->array
[0] = 0;
1165 static struct ring_buffer_event
*
1166 rb_reserve_next_event(struct ring_buffer_per_cpu
*cpu_buffer
,
1167 unsigned type
, unsigned long length
)
1169 struct ring_buffer_event
*event
;
1176 * We allow for interrupts to reenter here and do a trace.
1177 * If one does, it will cause this original code to loop
1178 * back here. Even with heavy interrupts happening, this
1179 * should only happen a few times in a row. If this happens
1180 * 1000 times in a row, there must be either an interrupt
1181 * storm or we have something buggy.
1184 if (RB_WARN_ON(cpu_buffer
, ++nr_loops
> 1000))
1187 ts
= ring_buffer_time_stamp(cpu_buffer
->cpu
);
1190 * Only the first commit can update the timestamp.
1191 * Yes there is a race here. If an interrupt comes in
1192 * just after the conditional and it traces too, then it
1193 * will also check the deltas. More than one timestamp may
1194 * also be made. But only the entry that did the actual
1195 * commit will be something other than zero.
1197 if (cpu_buffer
->tail_page
== cpu_buffer
->commit_page
&&
1198 rb_page_write(cpu_buffer
->tail_page
) ==
1199 rb_commit_index(cpu_buffer
)) {
1201 delta
= ts
- cpu_buffer
->write_stamp
;
1203 /* make sure this delta is calculated here */
1206 /* Did the write stamp get updated already? */
1207 if (unlikely(ts
< cpu_buffer
->write_stamp
))
1210 if (test_time_stamp(delta
)) {
1212 commit
= rb_add_time_stamp(cpu_buffer
, &ts
, &delta
);
1214 if (commit
== -EBUSY
)
1217 if (commit
== -EAGAIN
)
1220 RB_WARN_ON(cpu_buffer
, commit
< 0);
1223 /* Non commits have zero deltas */
1226 event
= __rb_reserve_next(cpu_buffer
, type
, length
, &ts
);
1227 if (PTR_ERR(event
) == -EAGAIN
)
1231 if (unlikely(commit
))
1233 * Ouch! We needed a timestamp and it was commited. But
1234 * we didn't get our event reserved.
1236 rb_set_commit_to_write(cpu_buffer
);
1241 * If the timestamp was commited, make the commit our entry
1242 * now so that we will update it when needed.
1245 rb_set_commit_event(cpu_buffer
, event
);
1246 else if (!rb_is_commit(cpu_buffer
, event
))
1249 event
->time_delta
= delta
;
1254 static DEFINE_PER_CPU(int, rb_need_resched
);
1257 * ring_buffer_lock_reserve - reserve a part of the buffer
1258 * @buffer: the ring buffer to reserve from
1259 * @length: the length of the data to reserve (excluding event header)
1260 * @flags: a pointer to save the interrupt flags
1262 * Returns a reseverd event on the ring buffer to copy directly to.
1263 * The user of this interface will need to get the body to write into
1264 * and can use the ring_buffer_event_data() interface.
1266 * The length is the length of the data needed, not the event length
1267 * which also includes the event header.
1269 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1270 * If NULL is returned, then nothing has been allocated or locked.
1272 struct ring_buffer_event
*
1273 ring_buffer_lock_reserve(struct ring_buffer
*buffer
,
1274 unsigned long length
,
1275 unsigned long *flags
)
1277 struct ring_buffer_per_cpu
*cpu_buffer
;
1278 struct ring_buffer_event
*event
;
1281 if (ring_buffer_flags
!= RB_BUFFERS_ON
)
1284 if (atomic_read(&buffer
->record_disabled
))
1287 /* If we are tracing schedule, we don't want to recurse */
1288 resched
= ftrace_preempt_disable();
1290 cpu
= raw_smp_processor_id();
1292 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
1295 cpu_buffer
= buffer
->buffers
[cpu
];
1297 if (atomic_read(&cpu_buffer
->record_disabled
))
1300 length
= rb_calculate_event_length(length
);
1301 if (length
> BUF_PAGE_SIZE
)
1304 event
= rb_reserve_next_event(cpu_buffer
, RINGBUF_TYPE_DATA
, length
);
1309 * Need to store resched state on this cpu.
1310 * Only the first needs to.
1313 if (preempt_count() == 1)
1314 per_cpu(rb_need_resched
, cpu
) = resched
;
1319 ftrace_preempt_enable(resched
);
1322 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve
);
1324 static void rb_commit(struct ring_buffer_per_cpu
*cpu_buffer
,
1325 struct ring_buffer_event
*event
)
1327 cpu_buffer
->entries
++;
1329 /* Only process further if we own the commit */
1330 if (!rb_is_commit(cpu_buffer
, event
))
1333 cpu_buffer
->write_stamp
+= event
->time_delta
;
1335 rb_set_commit_to_write(cpu_buffer
);
1339 * ring_buffer_unlock_commit - commit a reserved
1340 * @buffer: The buffer to commit to
1341 * @event: The event pointer to commit.
1342 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1344 * This commits the data to the ring buffer, and releases any locks held.
1346 * Must be paired with ring_buffer_lock_reserve.
1348 int ring_buffer_unlock_commit(struct ring_buffer
*buffer
,
1349 struct ring_buffer_event
*event
,
1350 unsigned long flags
)
1352 struct ring_buffer_per_cpu
*cpu_buffer
;
1353 int cpu
= raw_smp_processor_id();
1355 cpu_buffer
= buffer
->buffers
[cpu
];
1357 rb_commit(cpu_buffer
, event
);
1360 * Only the last preempt count needs to restore preemption.
1362 if (preempt_count() == 1)
1363 ftrace_preempt_enable(per_cpu(rb_need_resched
, cpu
));
1365 preempt_enable_no_resched_notrace();
1369 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit
);
1372 * ring_buffer_write - write data to the buffer without reserving
1373 * @buffer: The ring buffer to write to.
1374 * @length: The length of the data being written (excluding the event header)
1375 * @data: The data to write to the buffer.
1377 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1378 * one function. If you already have the data to write to the buffer, it
1379 * may be easier to simply call this function.
1381 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1382 * and not the length of the event which would hold the header.
1384 int ring_buffer_write(struct ring_buffer
*buffer
,
1385 unsigned long length
,
1388 struct ring_buffer_per_cpu
*cpu_buffer
;
1389 struct ring_buffer_event
*event
;
1390 unsigned long event_length
;
1395 if (ring_buffer_flags
!= RB_BUFFERS_ON
)
1398 if (atomic_read(&buffer
->record_disabled
))
1401 resched
= ftrace_preempt_disable();
1403 cpu
= raw_smp_processor_id();
1405 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
1408 cpu_buffer
= buffer
->buffers
[cpu
];
1410 if (atomic_read(&cpu_buffer
->record_disabled
))
1413 event_length
= rb_calculate_event_length(length
);
1414 event
= rb_reserve_next_event(cpu_buffer
,
1415 RINGBUF_TYPE_DATA
, event_length
);
1419 body
= rb_event_data(event
);
1421 memcpy(body
, data
, length
);
1423 rb_commit(cpu_buffer
, event
);
1427 ftrace_preempt_enable(resched
);
1431 EXPORT_SYMBOL_GPL(ring_buffer_write
);
1433 static int rb_per_cpu_empty(struct ring_buffer_per_cpu
*cpu_buffer
)
1435 struct buffer_page
*reader
= cpu_buffer
->reader_page
;
1436 struct buffer_page
*head
= cpu_buffer
->head_page
;
1437 struct buffer_page
*commit
= cpu_buffer
->commit_page
;
1439 return reader
->read
== rb_page_commit(reader
) &&
1440 (commit
== reader
||
1442 head
->read
== rb_page_commit(commit
)));
1446 * ring_buffer_record_disable - stop all writes into the buffer
1447 * @buffer: The ring buffer to stop writes to.
1449 * This prevents all writes to the buffer. Any attempt to write
1450 * to the buffer after this will fail and return NULL.
1452 * The caller should call synchronize_sched() after this.
1454 void ring_buffer_record_disable(struct ring_buffer
*buffer
)
1456 atomic_inc(&buffer
->record_disabled
);
1458 EXPORT_SYMBOL_GPL(ring_buffer_record_disable
);
1461 * ring_buffer_record_enable - enable writes to the buffer
1462 * @buffer: The ring buffer to enable writes
1464 * Note, multiple disables will need the same number of enables
1465 * to truely enable the writing (much like preempt_disable).
1467 void ring_buffer_record_enable(struct ring_buffer
*buffer
)
1469 atomic_dec(&buffer
->record_disabled
);
1471 EXPORT_SYMBOL_GPL(ring_buffer_record_enable
);
1474 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1475 * @buffer: The ring buffer to stop writes to.
1476 * @cpu: The CPU buffer to stop
1478 * This prevents all writes to the buffer. Any attempt to write
1479 * to the buffer after this will fail and return NULL.
1481 * The caller should call synchronize_sched() after this.
1483 void ring_buffer_record_disable_cpu(struct ring_buffer
*buffer
, int cpu
)
1485 struct ring_buffer_per_cpu
*cpu_buffer
;
1487 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
1490 cpu_buffer
= buffer
->buffers
[cpu
];
1491 atomic_inc(&cpu_buffer
->record_disabled
);
1493 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu
);
1496 * ring_buffer_record_enable_cpu - enable writes to the buffer
1497 * @buffer: The ring buffer to enable writes
1498 * @cpu: The CPU to enable.
1500 * Note, multiple disables will need the same number of enables
1501 * to truely enable the writing (much like preempt_disable).
1503 void ring_buffer_record_enable_cpu(struct ring_buffer
*buffer
, int cpu
)
1505 struct ring_buffer_per_cpu
*cpu_buffer
;
1507 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
1510 cpu_buffer
= buffer
->buffers
[cpu
];
1511 atomic_dec(&cpu_buffer
->record_disabled
);
1513 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu
);
1516 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1517 * @buffer: The ring buffer
1518 * @cpu: The per CPU buffer to get the entries from.
1520 unsigned long ring_buffer_entries_cpu(struct ring_buffer
*buffer
, int cpu
)
1522 struct ring_buffer_per_cpu
*cpu_buffer
;
1524 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
1527 cpu_buffer
= buffer
->buffers
[cpu
];
1528 return cpu_buffer
->entries
;
1530 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu
);
1533 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1534 * @buffer: The ring buffer
1535 * @cpu: The per CPU buffer to get the number of overruns from
1537 unsigned long ring_buffer_overrun_cpu(struct ring_buffer
*buffer
, int cpu
)
1539 struct ring_buffer_per_cpu
*cpu_buffer
;
1541 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
1544 cpu_buffer
= buffer
->buffers
[cpu
];
1545 return cpu_buffer
->overrun
;
1547 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu
);
1550 * ring_buffer_entries - get the number of entries in a buffer
1551 * @buffer: The ring buffer
1553 * Returns the total number of entries in the ring buffer
1556 unsigned long ring_buffer_entries(struct ring_buffer
*buffer
)
1558 struct ring_buffer_per_cpu
*cpu_buffer
;
1559 unsigned long entries
= 0;
1562 /* if you care about this being correct, lock the buffer */
1563 for_each_buffer_cpu(buffer
, cpu
) {
1564 cpu_buffer
= buffer
->buffers
[cpu
];
1565 entries
+= cpu_buffer
->entries
;
1570 EXPORT_SYMBOL_GPL(ring_buffer_entries
);
1573 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1574 * @buffer: The ring buffer
1576 * Returns the total number of overruns in the ring buffer
1579 unsigned long ring_buffer_overruns(struct ring_buffer
*buffer
)
1581 struct ring_buffer_per_cpu
*cpu_buffer
;
1582 unsigned long overruns
= 0;
1585 /* if you care about this being correct, lock the buffer */
1586 for_each_buffer_cpu(buffer
, cpu
) {
1587 cpu_buffer
= buffer
->buffers
[cpu
];
1588 overruns
+= cpu_buffer
->overrun
;
1593 EXPORT_SYMBOL_GPL(ring_buffer_overruns
);
1595 static void rb_iter_reset(struct ring_buffer_iter
*iter
)
1597 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
1599 /* Iterator usage is expected to have record disabled */
1600 if (list_empty(&cpu_buffer
->reader_page
->list
)) {
1601 iter
->head_page
= cpu_buffer
->head_page
;
1602 iter
->head
= cpu_buffer
->head_page
->read
;
1604 iter
->head_page
= cpu_buffer
->reader_page
;
1605 iter
->head
= cpu_buffer
->reader_page
->read
;
1608 iter
->read_stamp
= cpu_buffer
->read_stamp
;
1610 iter
->read_stamp
= iter
->head_page
->page
->time_stamp
;
1614 * ring_buffer_iter_reset - reset an iterator
1615 * @iter: The iterator to reset
1617 * Resets the iterator, so that it will start from the beginning
1620 void ring_buffer_iter_reset(struct ring_buffer_iter
*iter
)
1622 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
1623 unsigned long flags
;
1625 spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
1626 rb_iter_reset(iter
);
1627 spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
1629 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset
);
1632 * ring_buffer_iter_empty - check if an iterator has no more to read
1633 * @iter: The iterator to check
1635 int ring_buffer_iter_empty(struct ring_buffer_iter
*iter
)
1637 struct ring_buffer_per_cpu
*cpu_buffer
;
1639 cpu_buffer
= iter
->cpu_buffer
;
1641 return iter
->head_page
== cpu_buffer
->commit_page
&&
1642 iter
->head
== rb_commit_index(cpu_buffer
);
1644 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty
);
1647 rb_update_read_stamp(struct ring_buffer_per_cpu
*cpu_buffer
,
1648 struct ring_buffer_event
*event
)
1652 switch (event
->type
) {
1653 case RINGBUF_TYPE_PADDING
:
1656 case RINGBUF_TYPE_TIME_EXTEND
:
1657 delta
= event
->array
[0];
1659 delta
+= event
->time_delta
;
1660 cpu_buffer
->read_stamp
+= delta
;
1663 case RINGBUF_TYPE_TIME_STAMP
:
1664 /* FIXME: not implemented */
1667 case RINGBUF_TYPE_DATA
:
1668 cpu_buffer
->read_stamp
+= event
->time_delta
;
1678 rb_update_iter_read_stamp(struct ring_buffer_iter
*iter
,
1679 struct ring_buffer_event
*event
)
1683 switch (event
->type
) {
1684 case RINGBUF_TYPE_PADDING
:
1687 case RINGBUF_TYPE_TIME_EXTEND
:
1688 delta
= event
->array
[0];
1690 delta
+= event
->time_delta
;
1691 iter
->read_stamp
+= delta
;
1694 case RINGBUF_TYPE_TIME_STAMP
:
1695 /* FIXME: not implemented */
1698 case RINGBUF_TYPE_DATA
:
1699 iter
->read_stamp
+= event
->time_delta
;
1708 static struct buffer_page
*
1709 rb_get_reader_page(struct ring_buffer_per_cpu
*cpu_buffer
)
1711 struct buffer_page
*reader
= NULL
;
1712 unsigned long flags
;
1715 local_irq_save(flags
);
1716 __raw_spin_lock(&cpu_buffer
->lock
);
1720 * This should normally only loop twice. But because the
1721 * start of the reader inserts an empty page, it causes
1722 * a case where we will loop three times. There should be no
1723 * reason to loop four times (that I know of).
1725 if (RB_WARN_ON(cpu_buffer
, ++nr_loops
> 3)) {
1730 reader
= cpu_buffer
->reader_page
;
1732 /* If there's more to read, return this page */
1733 if (cpu_buffer
->reader_page
->read
< rb_page_size(reader
))
1736 /* Never should we have an index greater than the size */
1737 if (RB_WARN_ON(cpu_buffer
,
1738 cpu_buffer
->reader_page
->read
> rb_page_size(reader
)))
1741 /* check if we caught up to the tail */
1743 if (cpu_buffer
->commit_page
== cpu_buffer
->reader_page
)
1747 * Splice the empty reader page into the list around the head.
1748 * Reset the reader page to size zero.
1751 reader
= cpu_buffer
->head_page
;
1752 cpu_buffer
->reader_page
->list
.next
= reader
->list
.next
;
1753 cpu_buffer
->reader_page
->list
.prev
= reader
->list
.prev
;
1755 local_set(&cpu_buffer
->reader_page
->write
, 0);
1756 local_set(&cpu_buffer
->reader_page
->page
->commit
, 0);
1758 /* Make the reader page now replace the head */
1759 reader
->list
.prev
->next
= &cpu_buffer
->reader_page
->list
;
1760 reader
->list
.next
->prev
= &cpu_buffer
->reader_page
->list
;
1763 * If the tail is on the reader, then we must set the head
1764 * to the inserted page, otherwise we set it one before.
1766 cpu_buffer
->head_page
= cpu_buffer
->reader_page
;
1768 if (cpu_buffer
->commit_page
!= reader
)
1769 rb_inc_page(cpu_buffer
, &cpu_buffer
->head_page
);
1771 /* Finally update the reader page to the new head */
1772 cpu_buffer
->reader_page
= reader
;
1773 rb_reset_reader_page(cpu_buffer
);
1778 __raw_spin_unlock(&cpu_buffer
->lock
);
1779 local_irq_restore(flags
);
1784 static void rb_advance_reader(struct ring_buffer_per_cpu
*cpu_buffer
)
1786 struct ring_buffer_event
*event
;
1787 struct buffer_page
*reader
;
1790 reader
= rb_get_reader_page(cpu_buffer
);
1792 /* This function should not be called when buffer is empty */
1793 if (RB_WARN_ON(cpu_buffer
, !reader
))
1796 event
= rb_reader_event(cpu_buffer
);
1798 if (event
->type
== RINGBUF_TYPE_DATA
)
1799 cpu_buffer
->entries
--;
1801 rb_update_read_stamp(cpu_buffer
, event
);
1803 length
= rb_event_length(event
);
1804 cpu_buffer
->reader_page
->read
+= length
;
1807 static void rb_advance_iter(struct ring_buffer_iter
*iter
)
1809 struct ring_buffer
*buffer
;
1810 struct ring_buffer_per_cpu
*cpu_buffer
;
1811 struct ring_buffer_event
*event
;
1814 cpu_buffer
= iter
->cpu_buffer
;
1815 buffer
= cpu_buffer
->buffer
;
1818 * Check if we are at the end of the buffer.
1820 if (iter
->head
>= rb_page_size(iter
->head_page
)) {
1821 if (RB_WARN_ON(buffer
,
1822 iter
->head_page
== cpu_buffer
->commit_page
))
1828 event
= rb_iter_head_event(iter
);
1830 length
= rb_event_length(event
);
1833 * This should not be called to advance the header if we are
1834 * at the tail of the buffer.
1836 if (RB_WARN_ON(cpu_buffer
,
1837 (iter
->head_page
== cpu_buffer
->commit_page
) &&
1838 (iter
->head
+ length
> rb_commit_index(cpu_buffer
))))
1841 rb_update_iter_read_stamp(iter
, event
);
1843 iter
->head
+= length
;
1845 /* check for end of page padding */
1846 if ((iter
->head
>= rb_page_size(iter
->head_page
)) &&
1847 (iter
->head_page
!= cpu_buffer
->commit_page
))
1848 rb_advance_iter(iter
);
1851 static struct ring_buffer_event
*
1852 rb_buffer_peek(struct ring_buffer
*buffer
, int cpu
, u64
*ts
)
1854 struct ring_buffer_per_cpu
*cpu_buffer
;
1855 struct ring_buffer_event
*event
;
1856 struct buffer_page
*reader
;
1859 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
1862 cpu_buffer
= buffer
->buffers
[cpu
];
1866 * We repeat when a timestamp is encountered. It is possible
1867 * to get multiple timestamps from an interrupt entering just
1868 * as one timestamp is about to be written. The max times
1869 * that this can happen is the number of nested interrupts we
1870 * can have. Nesting 10 deep of interrupts is clearly
1873 if (RB_WARN_ON(cpu_buffer
, ++nr_loops
> 10))
1876 reader
= rb_get_reader_page(cpu_buffer
);
1880 event
= rb_reader_event(cpu_buffer
);
1882 switch (event
->type
) {
1883 case RINGBUF_TYPE_PADDING
:
1884 RB_WARN_ON(cpu_buffer
, 1);
1885 rb_advance_reader(cpu_buffer
);
1888 case RINGBUF_TYPE_TIME_EXTEND
:
1889 /* Internal data, OK to advance */
1890 rb_advance_reader(cpu_buffer
);
1893 case RINGBUF_TYPE_TIME_STAMP
:
1894 /* FIXME: not implemented */
1895 rb_advance_reader(cpu_buffer
);
1898 case RINGBUF_TYPE_DATA
:
1900 *ts
= cpu_buffer
->read_stamp
+ event
->time_delta
;
1901 ring_buffer_normalize_time_stamp(cpu_buffer
->cpu
, ts
);
1911 EXPORT_SYMBOL_GPL(ring_buffer_peek
);
1913 static struct ring_buffer_event
*
1914 rb_iter_peek(struct ring_buffer_iter
*iter
, u64
*ts
)
1916 struct ring_buffer
*buffer
;
1917 struct ring_buffer_per_cpu
*cpu_buffer
;
1918 struct ring_buffer_event
*event
;
1921 if (ring_buffer_iter_empty(iter
))
1924 cpu_buffer
= iter
->cpu_buffer
;
1925 buffer
= cpu_buffer
->buffer
;
1929 * We repeat when a timestamp is encountered. It is possible
1930 * to get multiple timestamps from an interrupt entering just
1931 * as one timestamp is about to be written. The max times
1932 * that this can happen is the number of nested interrupts we
1933 * can have. Nesting 10 deep of interrupts is clearly
1936 if (RB_WARN_ON(cpu_buffer
, ++nr_loops
> 10))
1939 if (rb_per_cpu_empty(cpu_buffer
))
1942 event
= rb_iter_head_event(iter
);
1944 switch (event
->type
) {
1945 case RINGBUF_TYPE_PADDING
:
1949 case RINGBUF_TYPE_TIME_EXTEND
:
1950 /* Internal data, OK to advance */
1951 rb_advance_iter(iter
);
1954 case RINGBUF_TYPE_TIME_STAMP
:
1955 /* FIXME: not implemented */
1956 rb_advance_iter(iter
);
1959 case RINGBUF_TYPE_DATA
:
1961 *ts
= iter
->read_stamp
+ event
->time_delta
;
1962 ring_buffer_normalize_time_stamp(cpu_buffer
->cpu
, ts
);
1972 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek
);
1975 * ring_buffer_peek - peek at the next event to be read
1976 * @buffer: The ring buffer to read
1977 * @cpu: The cpu to peak at
1978 * @ts: The timestamp counter of this event.
1980 * This will return the event that will be read next, but does
1981 * not consume the data.
1983 struct ring_buffer_event
*
1984 ring_buffer_peek(struct ring_buffer
*buffer
, int cpu
, u64
*ts
)
1986 struct ring_buffer_per_cpu
*cpu_buffer
= buffer
->buffers
[cpu
];
1987 struct ring_buffer_event
*event
;
1988 unsigned long flags
;
1990 spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
1991 event
= rb_buffer_peek(buffer
, cpu
, ts
);
1992 spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
1998 * ring_buffer_iter_peek - peek at the next event to be read
1999 * @iter: The ring buffer iterator
2000 * @ts: The timestamp counter of this event.
2002 * This will return the event that will be read next, but does
2003 * not increment the iterator.
2005 struct ring_buffer_event
*
2006 ring_buffer_iter_peek(struct ring_buffer_iter
*iter
, u64
*ts
)
2008 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
2009 struct ring_buffer_event
*event
;
2010 unsigned long flags
;
2012 spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
2013 event
= rb_iter_peek(iter
, ts
);
2014 spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
2020 * ring_buffer_consume - return an event and consume it
2021 * @buffer: The ring buffer to get the next event from
2023 * Returns the next event in the ring buffer, and that event is consumed.
2024 * Meaning, that sequential reads will keep returning a different event,
2025 * and eventually empty the ring buffer if the producer is slower.
2027 struct ring_buffer_event
*
2028 ring_buffer_consume(struct ring_buffer
*buffer
, int cpu
, u64
*ts
)
2030 struct ring_buffer_per_cpu
*cpu_buffer
= buffer
->buffers
[cpu
];
2031 struct ring_buffer_event
*event
;
2032 unsigned long flags
;
2034 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
2037 spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
2039 event
= rb_buffer_peek(buffer
, cpu
, ts
);
2043 rb_advance_reader(cpu_buffer
);
2046 spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
2050 EXPORT_SYMBOL_GPL(ring_buffer_consume
);
2053 * ring_buffer_read_start - start a non consuming read of the buffer
2054 * @buffer: The ring buffer to read from
2055 * @cpu: The cpu buffer to iterate over
2057 * This starts up an iteration through the buffer. It also disables
2058 * the recording to the buffer until the reading is finished.
2059 * This prevents the reading from being corrupted. This is not
2060 * a consuming read, so a producer is not expected.
2062 * Must be paired with ring_buffer_finish.
2064 struct ring_buffer_iter
*
2065 ring_buffer_read_start(struct ring_buffer
*buffer
, int cpu
)
2067 struct ring_buffer_per_cpu
*cpu_buffer
;
2068 struct ring_buffer_iter
*iter
;
2069 unsigned long flags
;
2071 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
2074 iter
= kmalloc(sizeof(*iter
), GFP_KERNEL
);
2078 cpu_buffer
= buffer
->buffers
[cpu
];
2080 iter
->cpu_buffer
= cpu_buffer
;
2082 atomic_inc(&cpu_buffer
->record_disabled
);
2083 synchronize_sched();
2085 spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
2086 __raw_spin_lock(&cpu_buffer
->lock
);
2087 rb_iter_reset(iter
);
2088 __raw_spin_unlock(&cpu_buffer
->lock
);
2089 spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
2093 EXPORT_SYMBOL_GPL(ring_buffer_read_start
);
2096 * ring_buffer_finish - finish reading the iterator of the buffer
2097 * @iter: The iterator retrieved by ring_buffer_start
2099 * This re-enables the recording to the buffer, and frees the
2103 ring_buffer_read_finish(struct ring_buffer_iter
*iter
)
2105 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
2107 atomic_dec(&cpu_buffer
->record_disabled
);
2110 EXPORT_SYMBOL_GPL(ring_buffer_read_finish
);
2113 * ring_buffer_read - read the next item in the ring buffer by the iterator
2114 * @iter: The ring buffer iterator
2115 * @ts: The time stamp of the event read.
2117 * This reads the next event in the ring buffer and increments the iterator.
2119 struct ring_buffer_event
*
2120 ring_buffer_read(struct ring_buffer_iter
*iter
, u64
*ts
)
2122 struct ring_buffer_event
*event
;
2123 struct ring_buffer_per_cpu
*cpu_buffer
= iter
->cpu_buffer
;
2124 unsigned long flags
;
2126 spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
2127 event
= rb_iter_peek(iter
, ts
);
2131 rb_advance_iter(iter
);
2133 spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
2137 EXPORT_SYMBOL_GPL(ring_buffer_read
);
2140 * ring_buffer_size - return the size of the ring buffer (in bytes)
2141 * @buffer: The ring buffer.
2143 unsigned long ring_buffer_size(struct ring_buffer
*buffer
)
2145 return BUF_PAGE_SIZE
* buffer
->pages
;
2147 EXPORT_SYMBOL_GPL(ring_buffer_size
);
2150 rb_reset_cpu(struct ring_buffer_per_cpu
*cpu_buffer
)
2152 cpu_buffer
->head_page
2153 = list_entry(cpu_buffer
->pages
.next
, struct buffer_page
, list
);
2154 local_set(&cpu_buffer
->head_page
->write
, 0);
2155 local_set(&cpu_buffer
->head_page
->page
->commit
, 0);
2157 cpu_buffer
->head_page
->read
= 0;
2159 cpu_buffer
->tail_page
= cpu_buffer
->head_page
;
2160 cpu_buffer
->commit_page
= cpu_buffer
->head_page
;
2162 INIT_LIST_HEAD(&cpu_buffer
->reader_page
->list
);
2163 local_set(&cpu_buffer
->reader_page
->write
, 0);
2164 local_set(&cpu_buffer
->reader_page
->page
->commit
, 0);
2165 cpu_buffer
->reader_page
->read
= 0;
2167 cpu_buffer
->overrun
= 0;
2168 cpu_buffer
->entries
= 0;
2172 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2173 * @buffer: The ring buffer to reset a per cpu buffer of
2174 * @cpu: The CPU buffer to be reset
2176 void ring_buffer_reset_cpu(struct ring_buffer
*buffer
, int cpu
)
2178 struct ring_buffer_per_cpu
*cpu_buffer
= buffer
->buffers
[cpu
];
2179 unsigned long flags
;
2181 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
2184 spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
2186 __raw_spin_lock(&cpu_buffer
->lock
);
2188 rb_reset_cpu(cpu_buffer
);
2190 __raw_spin_unlock(&cpu_buffer
->lock
);
2192 spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
2194 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu
);
2197 * ring_buffer_reset - reset a ring buffer
2198 * @buffer: The ring buffer to reset all cpu buffers
2200 void ring_buffer_reset(struct ring_buffer
*buffer
)
2204 for_each_buffer_cpu(buffer
, cpu
)
2205 ring_buffer_reset_cpu(buffer
, cpu
);
2207 EXPORT_SYMBOL_GPL(ring_buffer_reset
);
2210 * rind_buffer_empty - is the ring buffer empty?
2211 * @buffer: The ring buffer to test
2213 int ring_buffer_empty(struct ring_buffer
*buffer
)
2215 struct ring_buffer_per_cpu
*cpu_buffer
;
2218 /* yes this is racy, but if you don't like the race, lock the buffer */
2219 for_each_buffer_cpu(buffer
, cpu
) {
2220 cpu_buffer
= buffer
->buffers
[cpu
];
2221 if (!rb_per_cpu_empty(cpu_buffer
))
2226 EXPORT_SYMBOL_GPL(ring_buffer_empty
);
2229 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2230 * @buffer: The ring buffer
2231 * @cpu: The CPU buffer to test
2233 int ring_buffer_empty_cpu(struct ring_buffer
*buffer
, int cpu
)
2235 struct ring_buffer_per_cpu
*cpu_buffer
;
2237 if (!cpumask_test_cpu(cpu
, buffer
->cpumask
))
2240 cpu_buffer
= buffer
->buffers
[cpu
];
2241 return rb_per_cpu_empty(cpu_buffer
);
2243 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu
);
2246 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2247 * @buffer_a: One buffer to swap with
2248 * @buffer_b: The other buffer to swap with
2250 * This function is useful for tracers that want to take a "snapshot"
2251 * of a CPU buffer and has another back up buffer lying around.
2252 * it is expected that the tracer handles the cpu buffer not being
2253 * used at the moment.
2255 int ring_buffer_swap_cpu(struct ring_buffer
*buffer_a
,
2256 struct ring_buffer
*buffer_b
, int cpu
)
2258 struct ring_buffer_per_cpu
*cpu_buffer_a
;
2259 struct ring_buffer_per_cpu
*cpu_buffer_b
;
2261 if (!cpumask_test_cpu(cpu
, buffer_a
->cpumask
) ||
2262 !cpumask_test_cpu(cpu
, buffer_b
->cpumask
))
2265 /* At least make sure the two buffers are somewhat the same */
2266 if (buffer_a
->pages
!= buffer_b
->pages
)
2269 cpu_buffer_a
= buffer_a
->buffers
[cpu
];
2270 cpu_buffer_b
= buffer_b
->buffers
[cpu
];
2273 * We can't do a synchronize_sched here because this
2274 * function can be called in atomic context.
2275 * Normally this will be called from the same CPU as cpu.
2276 * If not it's up to the caller to protect this.
2278 atomic_inc(&cpu_buffer_a
->record_disabled
);
2279 atomic_inc(&cpu_buffer_b
->record_disabled
);
2281 buffer_a
->buffers
[cpu
] = cpu_buffer_b
;
2282 buffer_b
->buffers
[cpu
] = cpu_buffer_a
;
2284 cpu_buffer_b
->buffer
= buffer_a
;
2285 cpu_buffer_a
->buffer
= buffer_b
;
2287 atomic_dec(&cpu_buffer_a
->record_disabled
);
2288 atomic_dec(&cpu_buffer_b
->record_disabled
);
2292 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu
);
2294 static void rb_remove_entries(struct ring_buffer_per_cpu
*cpu_buffer
,
2295 struct buffer_data_page
*bpage
)
2297 struct ring_buffer_event
*event
;
2300 __raw_spin_lock(&cpu_buffer
->lock
);
2301 for (head
= 0; head
< local_read(&bpage
->commit
);
2302 head
+= rb_event_length(event
)) {
2304 event
= __rb_data_page_index(bpage
, head
);
2305 if (RB_WARN_ON(cpu_buffer
, rb_null_event(event
)))
2307 /* Only count data entries */
2308 if (event
->type
!= RINGBUF_TYPE_DATA
)
2310 cpu_buffer
->entries
--;
2312 __raw_spin_unlock(&cpu_buffer
->lock
);
2316 * ring_buffer_alloc_read_page - allocate a page to read from buffer
2317 * @buffer: the buffer to allocate for.
2319 * This function is used in conjunction with ring_buffer_read_page.
2320 * When reading a full page from the ring buffer, these functions
2321 * can be used to speed up the process. The calling function should
2322 * allocate a few pages first with this function. Then when it
2323 * needs to get pages from the ring buffer, it passes the result
2324 * of this function into ring_buffer_read_page, which will swap
2325 * the page that was allocated, with the read page of the buffer.
2328 * The page allocated, or NULL on error.
2330 void *ring_buffer_alloc_read_page(struct ring_buffer
*buffer
)
2333 struct buffer_data_page
*bpage
;
2335 addr
= __get_free_page(GFP_KERNEL
);
2339 bpage
= (void *)addr
;
2345 * ring_buffer_free_read_page - free an allocated read page
2346 * @buffer: the buffer the page was allocate for
2347 * @data: the page to free
2349 * Free a page allocated from ring_buffer_alloc_read_page.
2351 void ring_buffer_free_read_page(struct ring_buffer
*buffer
, void *data
)
2353 free_page((unsigned long)data
);
2357 * ring_buffer_read_page - extract a page from the ring buffer
2358 * @buffer: buffer to extract from
2359 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
2360 * @cpu: the cpu of the buffer to extract
2361 * @full: should the extraction only happen when the page is full.
2363 * This function will pull out a page from the ring buffer and consume it.
2364 * @data_page must be the address of the variable that was returned
2365 * from ring_buffer_alloc_read_page. This is because the page might be used
2366 * to swap with a page in the ring buffer.
2369 * rpage = ring_buffer_alloc_page(buffer);
2372 * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0);
2374 * process_page(rpage);
2376 * When @full is set, the function will not return true unless
2377 * the writer is off the reader page.
2379 * Note: it is up to the calling functions to handle sleeps and wakeups.
2380 * The ring buffer can be used anywhere in the kernel and can not
2381 * blindly call wake_up. The layer that uses the ring buffer must be
2382 * responsible for that.
2385 * 1 if data has been transferred
2386 * 0 if no data has been transferred.
2388 int ring_buffer_read_page(struct ring_buffer
*buffer
,
2389 void **data_page
, int cpu
, int full
)
2391 struct ring_buffer_per_cpu
*cpu_buffer
= buffer
->buffers
[cpu
];
2392 struct ring_buffer_event
*event
;
2393 struct buffer_data_page
*bpage
;
2394 unsigned long flags
;
2404 spin_lock_irqsave(&cpu_buffer
->reader_lock
, flags
);
2407 * rb_buffer_peek will get the next ring buffer if
2408 * the current reader page is empty.
2410 event
= rb_buffer_peek(buffer
, cpu
, NULL
);
2414 /* check for data */
2415 if (!local_read(&cpu_buffer
->reader_page
->page
->commit
))
2418 * If the writer is already off of the read page, then simply
2419 * switch the read page with the given page. Otherwise
2420 * we need to copy the data from the reader to the writer.
2422 if (cpu_buffer
->reader_page
== cpu_buffer
->commit_page
) {
2423 unsigned int read
= cpu_buffer
->reader_page
->read
;
2427 /* The writer is still on the reader page, we must copy */
2428 bpage
= cpu_buffer
->reader_page
->page
;
2430 cpu_buffer
->reader_page
->page
->data
+ read
,
2431 local_read(&bpage
->commit
) - read
);
2433 /* consume what was read */
2434 cpu_buffer
->reader_page
+= read
;
2437 /* swap the pages */
2438 rb_init_page(bpage
);
2439 bpage
= cpu_buffer
->reader_page
->page
;
2440 cpu_buffer
->reader_page
->page
= *data_page
;
2441 cpu_buffer
->reader_page
->read
= 0;
2446 /* update the entry counter */
2447 rb_remove_entries(cpu_buffer
, bpage
);
2449 spin_unlock_irqrestore(&cpu_buffer
->reader_lock
, flags
);
2455 rb_simple_read(struct file
*filp
, char __user
*ubuf
,
2456 size_t cnt
, loff_t
*ppos
)
2458 long *p
= filp
->private_data
;
2462 if (test_bit(RB_BUFFERS_DISABLED_BIT
, p
))
2463 r
= sprintf(buf
, "permanently disabled\n");
2465 r
= sprintf(buf
, "%d\n", test_bit(RB_BUFFERS_ON_BIT
, p
));
2467 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
2471 rb_simple_write(struct file
*filp
, const char __user
*ubuf
,
2472 size_t cnt
, loff_t
*ppos
)
2474 long *p
= filp
->private_data
;
2479 if (cnt
>= sizeof(buf
))
2482 if (copy_from_user(&buf
, ubuf
, cnt
))
2487 ret
= strict_strtoul(buf
, 10, &val
);
2492 set_bit(RB_BUFFERS_ON_BIT
, p
);
2494 clear_bit(RB_BUFFERS_ON_BIT
, p
);
2501 static struct file_operations rb_simple_fops
= {
2502 .open
= tracing_open_generic
,
2503 .read
= rb_simple_read
,
2504 .write
= rb_simple_write
,
2508 static __init
int rb_init_debugfs(void)
2510 struct dentry
*d_tracer
;
2511 struct dentry
*entry
;
2513 d_tracer
= tracing_init_dentry();
2515 entry
= debugfs_create_file("tracing_on", 0644, d_tracer
,
2516 &ring_buffer_flags
, &rb_simple_fops
);
2518 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2523 fs_initcall(rb_init_debugfs
);