ARM: imx: use SOC_IMX25 instead of ARCH_MX25 for multi-SoC
[linux-2.6.git] / include / linux / pipe_fs_i.h
blob445796945ac9d1c491c1d740f0f2ca7bfa1ae22b
1 #ifndef _LINUX_PIPE_FS_I_H
2 #define _LINUX_PIPE_FS_I_H
4 #define PIPEFS_MAGIC 0x50495045
6 #define PIPE_DEF_BUFFERS 16
8 #define PIPE_BUF_FLAG_LRU 0x01 /* page is on the LRU */
9 #define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */
10 #define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */
12 /**
13 * struct pipe_buffer - a linux kernel pipe buffer
14 * @page: the page containing the data for the pipe buffer
15 * @offset: offset of data inside the @page
16 * @len: length of data inside the @page
17 * @ops: operations associated with this buffer. See @pipe_buf_operations.
18 * @flags: pipe buffer flags. See above.
19 * @private: private data owned by the ops.
20 **/
21 struct pipe_buffer {
22 struct page *page;
23 unsigned int offset, len;
24 const struct pipe_buf_operations *ops;
25 unsigned int flags;
26 unsigned long private;
29 /**
30 * struct pipe_inode_info - a linux kernel pipe
31 * @wait: reader/writer wait point in case of empty/full pipe
32 * @nrbufs: the number of non-empty pipe buffers in this pipe
33 * @curbuf: the current pipe buffer entry
34 * @tmp_page: cached released page
35 * @readers: number of current readers of this pipe
36 * @writers: number of current writers of this pipe
37 * @waiting_writers: number of writers blocked waiting for room
38 * @r_counter: reader counter
39 * @w_counter: writer counter
40 * @fasync_readers: reader side fasync
41 * @fasync_writers: writer side fasync
42 * @inode: inode this pipe is attached to
43 * @bufs: the circular array of pipe buffers
44 **/
45 struct pipe_inode_info {
46 wait_queue_head_t wait;
47 unsigned int nrbufs, curbuf, buffers;
48 unsigned int readers;
49 unsigned int writers;
50 unsigned int waiting_writers;
51 unsigned int r_counter;
52 unsigned int w_counter;
53 struct page *tmp_page;
54 struct fasync_struct *fasync_readers;
55 struct fasync_struct *fasync_writers;
56 struct inode *inode;
57 struct pipe_buffer *bufs;
61 * Note on the nesting of these functions:
63 * ->confirm()
64 * ->steal()
65 * ...
66 * ->map()
67 * ...
68 * ->unmap()
70 * That is, ->map() must be called on a confirmed buffer,
71 * same goes for ->steal(). See below for the meaning of each
72 * operation. Also see kerneldoc in fs/pipe.c for the pipe
73 * and generic variants of these hooks.
75 struct pipe_buf_operations {
77 * This is set to 1, if the generic pipe read/write may coalesce
78 * data into an existing buffer. If this is set to 0, a new pipe
79 * page segment is always used for new data.
81 int can_merge;
84 * ->map() returns a virtual address mapping of the pipe buffer.
85 * The last integer flag reflects whether this should be an atomic
86 * mapping or not. The atomic map is faster, however you can't take
87 * page faults before calling ->unmap() again. So if you need to eg
88 * access user data through copy_to/from_user(), then you must get
89 * a non-atomic map. ->map() uses the KM_USER0 atomic slot for
90 * atomic maps, so you can't map more than one pipe_buffer at once
91 * and you have to be careful if mapping another page as source
92 * or destination for a copy (IOW, it has to use something else
93 * than KM_USER0).
95 void * (*map)(struct pipe_inode_info *, struct pipe_buffer *, int);
98 * Undoes ->map(), finishes the virtual mapping of the pipe buffer.
100 void (*unmap)(struct pipe_inode_info *, struct pipe_buffer *, void *);
103 * ->confirm() verifies that the data in the pipe buffer is there
104 * and that the contents are good. If the pages in the pipe belong
105 * to a file system, we may need to wait for IO completion in this
106 * hook. Returns 0 for good, or a negative error value in case of
107 * error.
109 int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *);
112 * When the contents of this pipe buffer has been completely
113 * consumed by a reader, ->release() is called.
115 void (*release)(struct pipe_inode_info *, struct pipe_buffer *);
118 * Attempt to take ownership of the pipe buffer and its contents.
119 * ->steal() returns 0 for success, in which case the contents
120 * of the pipe (the buf->page) is locked and now completely owned
121 * by the caller. The page may then be transferred to a different
122 * mapping, the most often used case is insertion into different
123 * file address space cache.
125 int (*steal)(struct pipe_inode_info *, struct pipe_buffer *);
128 * Get a reference to the pipe buffer.
130 void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
133 /* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
134 memory allocation, whereas PIPE_BUF makes atomicity guarantees. */
135 #define PIPE_SIZE PAGE_SIZE
137 /* Pipe lock and unlock operations */
138 void pipe_lock(struct pipe_inode_info *);
139 void pipe_unlock(struct pipe_inode_info *);
140 void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
142 extern unsigned int pipe_max_size, pipe_min_size;
143 int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *);
146 /* Drop the inode semaphore and wait for a pipe event, atomically */
147 void pipe_wait(struct pipe_inode_info *pipe);
149 struct pipe_inode_info * alloc_pipe_info(struct inode * inode);
150 void free_pipe_info(struct inode * inode);
151 void __free_pipe_info(struct pipe_inode_info *);
153 /* Generic pipe buffer ops functions */
154 void *generic_pipe_buf_map(struct pipe_inode_info *, struct pipe_buffer *, int);
155 void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void *);
156 void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
157 int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
158 int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
159 void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
161 /* for F_SETPIPE_SZ and F_GETPIPE_SZ */
162 long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
164 #endif