media: v4l: async: fix return of unitialized variable ret
[linux-2.6/btrfs-unstable.git] / kernel / events / internal.h
blob843e9704733551aa7b2cac2c3db3dd35e7618c6d
1 #ifndef _KERNEL_EVENTS_INTERNAL_H
2 #define _KERNEL_EVENTS_INTERNAL_H
4 #include <linux/hardirq.h>
5 #include <linux/uaccess.h>
7 /* Buffer handling */
9 #define RING_BUFFER_WRITABLE 0x01
11 struct ring_buffer {
12 atomic_t refcount;
13 struct rcu_head rcu_head;
14 #ifdef CONFIG_PERF_USE_VMALLOC
15 struct work_struct work;
16 int page_order; /* allocation order */
17 #endif
18 int nr_pages; /* nr of data pages */
19 int overwrite; /* can overwrite itself */
20 int paused; /* can write into ring buffer */
22 atomic_t poll; /* POLL_ for wakeups */
24 local_t head; /* write position */
25 local_t nest; /* nested writers */
26 local_t events; /* event limit */
27 local_t wakeup; /* wakeup stamp */
28 local_t lost; /* nr records lost */
30 long watermark; /* wakeup watermark */
31 long aux_watermark;
32 /* poll crap */
33 spinlock_t event_lock;
34 struct list_head event_list;
36 atomic_t mmap_count;
37 unsigned long mmap_locked;
38 struct user_struct *mmap_user;
40 /* AUX area */
41 long aux_head;
42 local_t aux_nest;
43 long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */
44 unsigned long aux_pgoff;
45 int aux_nr_pages;
46 int aux_overwrite;
47 atomic_t aux_mmap_count;
48 unsigned long aux_mmap_locked;
49 void (*free_aux)(void *);
50 atomic_t aux_refcount;
51 void **aux_pages;
52 void *aux_priv;
54 struct perf_event_mmap_page *user_page;
55 void *data_pages[0];
58 extern void rb_free(struct ring_buffer *rb);
60 static inline void rb_free_rcu(struct rcu_head *rcu_head)
62 struct ring_buffer *rb;
64 rb = container_of(rcu_head, struct ring_buffer, rcu_head);
65 rb_free(rb);
68 static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
70 if (!pause && rb->nr_pages)
71 rb->paused = 0;
72 else
73 rb->paused = 1;
76 extern struct ring_buffer *
77 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
78 extern void perf_event_wakeup(struct perf_event *event);
79 extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
80 pgoff_t pgoff, int nr_pages, long watermark, int flags);
81 extern void rb_free_aux(struct ring_buffer *rb);
82 extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
83 extern void ring_buffer_put(struct ring_buffer *rb);
85 static inline bool rb_has_aux(struct ring_buffer *rb)
87 return !!rb->aux_nr_pages;
90 void perf_event_aux_event(struct perf_event *event, unsigned long head,
91 unsigned long size, u64 flags);
93 extern struct page *
94 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
96 #ifdef CONFIG_PERF_USE_VMALLOC
98 * Back perf_mmap() with vmalloc memory.
100 * Required for architectures that have d-cache aliasing issues.
103 static inline int page_order(struct ring_buffer *rb)
105 return rb->page_order;
108 #else
110 static inline int page_order(struct ring_buffer *rb)
112 return 0;
114 #endif
116 static inline unsigned long perf_data_size(struct ring_buffer *rb)
118 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
121 static inline unsigned long perf_aux_size(struct ring_buffer *rb)
123 return rb->aux_nr_pages << PAGE_SHIFT;
126 #define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...) \
128 unsigned long size, written; \
130 do { \
131 size = min(handle->size, len); \
132 written = memcpy_func(__VA_ARGS__); \
133 written = size - written; \
135 len -= written; \
136 handle->addr += written; \
137 if (advance_buf) \
138 buf += written; \
139 handle->size -= written; \
140 if (!handle->size) { \
141 struct ring_buffer *rb = handle->rb; \
143 handle->page++; \
144 handle->page &= rb->nr_pages - 1; \
145 handle->addr = rb->data_pages[handle->page]; \
146 handle->size = PAGE_SIZE << page_order(rb); \
148 } while (len && written == size); \
150 return len; \
153 #define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
154 static inline unsigned long \
155 func_name(struct perf_output_handle *handle, \
156 const void *buf, unsigned long len) \
157 __DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
159 static inline unsigned long
160 __output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
161 const void *buf, unsigned long len)
163 unsigned long orig_len = len;
164 __DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
165 orig_len - len, size)
168 static inline unsigned long
169 memcpy_common(void *dst, const void *src, unsigned long n)
171 memcpy(dst, src, n);
172 return 0;
175 DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
177 static inline unsigned long
178 memcpy_skip(void *dst, const void *src, unsigned long n)
180 return 0;
183 DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
185 #ifndef arch_perf_out_copy_user
186 #define arch_perf_out_copy_user arch_perf_out_copy_user
188 static inline unsigned long
189 arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
191 unsigned long ret;
193 pagefault_disable();
194 ret = __copy_from_user_inatomic(dst, src, n);
195 pagefault_enable();
197 return ret;
199 #endif
201 DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
203 /* Callchain handling */
204 extern struct perf_callchain_entry *
205 perf_callchain(struct perf_event *event, struct pt_regs *regs);
207 static inline int get_recursion_context(int *recursion)
209 int rctx;
211 if (unlikely(in_nmi()))
212 rctx = 3;
213 else if (in_irq())
214 rctx = 2;
215 else if (in_softirq())
216 rctx = 1;
217 else
218 rctx = 0;
220 if (recursion[rctx])
221 return -1;
223 recursion[rctx]++;
224 barrier();
226 return rctx;
229 static inline void put_recursion_context(int *recursion, int rctx)
231 barrier();
232 recursion[rctx]--;
235 #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
236 static inline bool arch_perf_have_user_stack_dump(void)
238 return true;
241 #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
242 #else
243 static inline bool arch_perf_have_user_stack_dump(void)
245 return false;
248 #define perf_user_stack_pointer(regs) 0
249 #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
251 #endif /* _KERNEL_EVENTS_INTERNAL_H */