1 #ifndef _KERNEL_EVENTS_INTERNAL_H
2 #define _KERNEL_EVENTS_INTERNAL_H
4 #define RING_BUFFER_WRITABLE 0x01
8 struct rcu_head rcu_head
;
9 #ifdef CONFIG_PERF_USE_VMALLOC
10 struct work_struct work
;
11 int page_order
; /* allocation order */
13 int nr_pages
; /* nr of data pages */
14 int writable
; /* are we writable */
16 atomic_t poll
; /* POLL_ for wakeups */
18 local_t head
; /* write position */
19 local_t nest
; /* nested writers */
20 local_t events
; /* event limit */
21 local_t wakeup
; /* wakeup stamp */
22 local_t lost
; /* nr records lost */
24 long watermark
; /* wakeup watermark */
26 struct perf_event_mmap_page
*user_page
;
30 extern void rb_free(struct ring_buffer
*rb
);
31 extern struct ring_buffer
*
32 rb_alloc(int nr_pages
, long watermark
, int cpu
, int flags
);
33 extern void perf_event_wakeup(struct perf_event
*event
);
36 perf_event_header__init_id(struct perf_event_header
*header
,
37 struct perf_sample_data
*data
,
38 struct perf_event
*event
);
40 perf_event__output_id_sample(struct perf_event
*event
,
41 struct perf_output_handle
*handle
,
42 struct perf_sample_data
*sample
);
45 perf_mmap_to_page(struct ring_buffer
*rb
, unsigned long pgoff
);
47 #ifdef CONFIG_PERF_USE_VMALLOC
49 * Back perf_mmap() with vmalloc memory.
51 * Required for architectures that have d-cache aliasing issues.
54 static inline int page_order(struct ring_buffer
*rb
)
56 return rb
->page_order
;
61 static inline int page_order(struct ring_buffer
*rb
)
67 static unsigned long perf_data_size(struct ring_buffer
*rb
)
69 return rb
->nr_pages
<< (PAGE_SHIFT
+ page_order(rb
));
73 __output_copy(struct perf_output_handle
*handle
,
74 const void *buf
, unsigned int len
)
77 unsigned long size
= min_t(unsigned long, handle
->size
, len
);
79 memcpy(handle
->addr
, buf
, size
);
86 struct ring_buffer
*rb
= handle
->rb
;
89 handle
->page
&= rb
->nr_pages
- 1;
90 handle
->addr
= rb
->data_pages
[handle
->page
];
91 handle
->size
= PAGE_SIZE
<< page_order(rb
);
96 #endif /* _KERNEL_EVENTS_INTERNAL_H */