4 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
8 #include <linux/file.h>
9 #include <linux/poll.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/mount.h>
15 #include <linux/pipe_fs_i.h>
16 #include <linux/uio.h>
17 #include <linux/highmem.h>
18 #include <linux/pagemap.h>
20 #include <asm/uaccess.h>
21 #include <asm/ioctls.h>
24 * We use a start+len construction, which provides full use of the
26 * -- Florian Coosmann (FGC)
28 * Reads with count = 0 should always return 0.
29 * -- Julian Bradfield 1999-06-07.
31 * FIFOs and Pipes now generate SIGIO for both readers and writers.
32 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
34 * pipe_read & write cleanup
35 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
38 /* Drop the inode semaphore and wait for a pipe event, atomically */
39 void pipe_wait(struct inode
* inode
)
44 * Pipes are system-local resources, so sleeping on them
45 * is considered a noninteractive wait:
47 prepare_to_wait(PIPE_WAIT(*inode
), &wait
, TASK_INTERRUPTIBLE
|TASK_NONINTERACTIVE
);
48 mutex_unlock(PIPE_MUTEX(*inode
));
50 finish_wait(PIPE_WAIT(*inode
), &wait
);
51 mutex_lock(PIPE_MUTEX(*inode
));
55 pipe_iov_copy_from_user(void *to
, struct iovec
*iov
, unsigned long len
)
62 copy
= min_t(unsigned long, len
, iov
->iov_len
);
64 if (copy_from_user(to
, iov
->iov_base
, copy
))
68 iov
->iov_base
+= copy
;
75 pipe_iov_copy_to_user(struct iovec
*iov
, const void *from
, unsigned long len
)
82 copy
= min_t(unsigned long, len
, iov
->iov_len
);
84 if (copy_to_user(iov
->iov_base
, from
, copy
))
88 iov
->iov_base
+= copy
;
94 static void anon_pipe_buf_release(struct pipe_inode_info
*info
, struct pipe_buffer
*buf
)
96 struct page
*page
= buf
->page
;
99 * If nobody else uses this page, and we don't already have a
100 * temporary page, let's keep track of it as a one-deep
103 if (page_count(page
) == 1 && !info
->tmp_page
) {
104 info
->tmp_page
= page
;
109 * Otherwise just release our reference to it
111 page_cache_release(page
);
114 static void *anon_pipe_buf_map(struct file
*file
, struct pipe_inode_info
*info
, struct pipe_buffer
*buf
)
116 return kmap(buf
->page
);
119 static void anon_pipe_buf_unmap(struct pipe_inode_info
*info
, struct pipe_buffer
*buf
)
124 static int anon_pipe_buf_steal(struct pipe_inode_info
*info
,
125 struct pipe_buffer
*buf
)
131 static struct pipe_buf_operations anon_pipe_buf_ops
= {
133 .map
= anon_pipe_buf_map
,
134 .unmap
= anon_pipe_buf_unmap
,
135 .release
= anon_pipe_buf_release
,
136 .steal
= anon_pipe_buf_steal
,
140 pipe_readv(struct file
*filp
, const struct iovec
*_iov
,
141 unsigned long nr_segs
, loff_t
*ppos
)
143 struct inode
*inode
= filp
->f_dentry
->d_inode
;
144 struct pipe_inode_info
*info
;
147 struct iovec
*iov
= (struct iovec
*)_iov
;
150 total_len
= iov_length(iov
, nr_segs
);
151 /* Null read succeeds. */
152 if (unlikely(total_len
== 0))
157 mutex_lock(PIPE_MUTEX(*inode
));
158 info
= inode
->i_pipe
;
160 int bufs
= info
->nrbufs
;
162 int curbuf
= info
->curbuf
;
163 struct pipe_buffer
*buf
= info
->bufs
+ curbuf
;
164 struct pipe_buf_operations
*ops
= buf
->ops
;
166 size_t chars
= buf
->len
;
169 if (chars
> total_len
)
172 addr
= ops
->map(filp
, info
, buf
);
178 error
= pipe_iov_copy_to_user(iov
, addr
+ buf
->offset
, chars
);
179 ops
->unmap(info
, buf
);
180 if (unlikely(error
)) {
181 if (!ret
) ret
= -EFAULT
;
185 buf
->offset
+= chars
;
189 ops
->release(info
, buf
);
190 curbuf
= (curbuf
+ 1) & (PIPE_BUFFERS
-1);
191 info
->curbuf
= curbuf
;
192 info
->nrbufs
= --bufs
;
197 break; /* common path: read succeeded */
199 if (bufs
) /* More to do? */
201 if (!PIPE_WRITERS(*inode
))
203 if (!PIPE_WAITING_WRITERS(*inode
)) {
204 /* syscall merging: Usually we must not sleep
205 * if O_NONBLOCK is set, or if we got some data.
206 * But if a writer sleeps in kernel space, then
207 * we can wait for that data without violating POSIX.
211 if (filp
->f_flags
& O_NONBLOCK
) {
216 if (signal_pending(current
)) {
217 if (!ret
) ret
= -ERESTARTSYS
;
221 wake_up_interruptible_sync(PIPE_WAIT(*inode
));
222 kill_fasync(PIPE_FASYNC_WRITERS(*inode
), SIGIO
, POLL_OUT
);
226 mutex_unlock(PIPE_MUTEX(*inode
));
227 /* Signal writers asynchronously that there is more room. */
229 wake_up_interruptible(PIPE_WAIT(*inode
));
230 kill_fasync(PIPE_FASYNC_WRITERS(*inode
), SIGIO
, POLL_OUT
);
238 pipe_read(struct file
*filp
, char __user
*buf
, size_t count
, loff_t
*ppos
)
240 struct iovec iov
= { .iov_base
= buf
, .iov_len
= count
};
241 return pipe_readv(filp
, &iov
, 1, ppos
);
245 pipe_writev(struct file
*filp
, const struct iovec
*_iov
,
246 unsigned long nr_segs
, loff_t
*ppos
)
248 struct inode
*inode
= filp
->f_dentry
->d_inode
;
249 struct pipe_inode_info
*info
;
252 struct iovec
*iov
= (struct iovec
*)_iov
;
256 total_len
= iov_length(iov
, nr_segs
);
257 /* Null write succeeds. */
258 if (unlikely(total_len
== 0))
263 mutex_lock(PIPE_MUTEX(*inode
));
264 info
= inode
->i_pipe
;
266 if (!PIPE_READERS(*inode
)) {
267 send_sig(SIGPIPE
, current
, 0);
272 /* We try to merge small writes */
273 chars
= total_len
& (PAGE_SIZE
-1); /* size of the last buffer */
274 if (info
->nrbufs
&& chars
!= 0) {
275 int lastbuf
= (info
->curbuf
+ info
->nrbufs
- 1) & (PIPE_BUFFERS
-1);
276 struct pipe_buffer
*buf
= info
->bufs
+ lastbuf
;
277 struct pipe_buf_operations
*ops
= buf
->ops
;
278 int offset
= buf
->offset
+ buf
->len
;
279 if (ops
->can_merge
&& offset
+ chars
<= PAGE_SIZE
) {
283 addr
= ops
->map(filp
, info
, buf
);
285 error
= PTR_ERR(addr
);
288 error
= pipe_iov_copy_from_user(offset
+ addr
, iov
,
290 ops
->unmap(info
, buf
);
305 if (!PIPE_READERS(*inode
)) {
306 send_sig(SIGPIPE
, current
, 0);
307 if (!ret
) ret
= -EPIPE
;
311 if (bufs
< PIPE_BUFFERS
) {
312 int newbuf
= (info
->curbuf
+ bufs
) & (PIPE_BUFFERS
-1);
313 struct pipe_buffer
*buf
= info
->bufs
+ newbuf
;
314 struct page
*page
= info
->tmp_page
;
318 page
= alloc_page(GFP_HIGHUSER
);
319 if (unlikely(!page
)) {
320 ret
= ret
? : -ENOMEM
;
323 info
->tmp_page
= page
;
325 /* Always wakeup, even if the copy fails. Otherwise
326 * we lock up (O_NONBLOCK-)readers that sleep due to
328 * FIXME! Is this really true?
332 if (chars
> total_len
)
335 error
= pipe_iov_copy_from_user(kmap(page
), iov
, chars
);
337 if (unlikely(error
)) {
338 if (!ret
) ret
= -EFAULT
;
343 /* Insert it into the buffer array */
345 buf
->ops
= &anon_pipe_buf_ops
;
348 info
->nrbufs
= ++bufs
;
349 info
->tmp_page
= NULL
;
355 if (bufs
< PIPE_BUFFERS
)
357 if (filp
->f_flags
& O_NONBLOCK
) {
358 if (!ret
) ret
= -EAGAIN
;
361 if (signal_pending(current
)) {
362 if (!ret
) ret
= -ERESTARTSYS
;
366 wake_up_interruptible_sync(PIPE_WAIT(*inode
));
367 kill_fasync(PIPE_FASYNC_READERS(*inode
), SIGIO
, POLL_IN
);
370 PIPE_WAITING_WRITERS(*inode
)++;
372 PIPE_WAITING_WRITERS(*inode
)--;
375 mutex_unlock(PIPE_MUTEX(*inode
));
377 wake_up_interruptible(PIPE_WAIT(*inode
));
378 kill_fasync(PIPE_FASYNC_READERS(*inode
), SIGIO
, POLL_IN
);
381 file_update_time(filp
);
386 pipe_write(struct file
*filp
, const char __user
*buf
,
387 size_t count
, loff_t
*ppos
)
389 struct iovec iov
= { .iov_base
= (void __user
*)buf
, .iov_len
= count
};
390 return pipe_writev(filp
, &iov
, 1, ppos
);
394 bad_pipe_r(struct file
*filp
, char __user
*buf
, size_t count
, loff_t
*ppos
)
400 bad_pipe_w(struct file
*filp
, const char __user
*buf
, size_t count
, loff_t
*ppos
)
406 pipe_ioctl(struct inode
*pino
, struct file
*filp
,
407 unsigned int cmd
, unsigned long arg
)
409 struct inode
*inode
= filp
->f_dentry
->d_inode
;
410 struct pipe_inode_info
*info
;
411 int count
, buf
, nrbufs
;
415 mutex_lock(PIPE_MUTEX(*inode
));
416 info
= inode
->i_pipe
;
419 nrbufs
= info
->nrbufs
;
420 while (--nrbufs
>= 0) {
421 count
+= info
->bufs
[buf
].len
;
422 buf
= (buf
+1) & (PIPE_BUFFERS
-1);
424 mutex_unlock(PIPE_MUTEX(*inode
));
425 return put_user(count
, (int __user
*)arg
);
431 /* No kernel lock held - fine */
433 pipe_poll(struct file
*filp
, poll_table
*wait
)
436 struct inode
*inode
= filp
->f_dentry
->d_inode
;
437 struct pipe_inode_info
*info
= inode
->i_pipe
;
440 poll_wait(filp
, PIPE_WAIT(*inode
), wait
);
442 /* Reading only -- no need for acquiring the semaphore. */
443 nrbufs
= info
->nrbufs
;
445 if (filp
->f_mode
& FMODE_READ
) {
446 mask
= (nrbufs
> 0) ? POLLIN
| POLLRDNORM
: 0;
447 if (!PIPE_WRITERS(*inode
) && filp
->f_version
!= PIPE_WCOUNTER(*inode
))
451 if (filp
->f_mode
& FMODE_WRITE
) {
452 mask
|= (nrbufs
< PIPE_BUFFERS
) ? POLLOUT
| POLLWRNORM
: 0;
454 * Most Unices do not set POLLERR for FIFOs but on Linux they
455 * behave exactly like pipes for poll().
457 if (!PIPE_READERS(*inode
))
465 pipe_release(struct inode
*inode
, int decr
, int decw
)
467 mutex_lock(PIPE_MUTEX(*inode
));
468 PIPE_READERS(*inode
) -= decr
;
469 PIPE_WRITERS(*inode
) -= decw
;
470 if (!PIPE_READERS(*inode
) && !PIPE_WRITERS(*inode
)) {
471 free_pipe_info(inode
);
473 wake_up_interruptible(PIPE_WAIT(*inode
));
474 kill_fasync(PIPE_FASYNC_READERS(*inode
), SIGIO
, POLL_IN
);
475 kill_fasync(PIPE_FASYNC_WRITERS(*inode
), SIGIO
, POLL_OUT
);
477 mutex_unlock(PIPE_MUTEX(*inode
));
483 pipe_read_fasync(int fd
, struct file
*filp
, int on
)
485 struct inode
*inode
= filp
->f_dentry
->d_inode
;
488 mutex_lock(PIPE_MUTEX(*inode
));
489 retval
= fasync_helper(fd
, filp
, on
, PIPE_FASYNC_READERS(*inode
));
490 mutex_unlock(PIPE_MUTEX(*inode
));
500 pipe_write_fasync(int fd
, struct file
*filp
, int on
)
502 struct inode
*inode
= filp
->f_dentry
->d_inode
;
505 mutex_lock(PIPE_MUTEX(*inode
));
506 retval
= fasync_helper(fd
, filp
, on
, PIPE_FASYNC_WRITERS(*inode
));
507 mutex_unlock(PIPE_MUTEX(*inode
));
517 pipe_rdwr_fasync(int fd
, struct file
*filp
, int on
)
519 struct inode
*inode
= filp
->f_dentry
->d_inode
;
522 mutex_lock(PIPE_MUTEX(*inode
));
524 retval
= fasync_helper(fd
, filp
, on
, PIPE_FASYNC_READERS(*inode
));
527 retval
= fasync_helper(fd
, filp
, on
, PIPE_FASYNC_WRITERS(*inode
));
529 mutex_unlock(PIPE_MUTEX(*inode
));
539 pipe_read_release(struct inode
*inode
, struct file
*filp
)
541 pipe_read_fasync(-1, filp
, 0);
542 return pipe_release(inode
, 1, 0);
546 pipe_write_release(struct inode
*inode
, struct file
*filp
)
548 pipe_write_fasync(-1, filp
, 0);
549 return pipe_release(inode
, 0, 1);
553 pipe_rdwr_release(struct inode
*inode
, struct file
*filp
)
557 pipe_rdwr_fasync(-1, filp
, 0);
558 decr
= (filp
->f_mode
& FMODE_READ
) != 0;
559 decw
= (filp
->f_mode
& FMODE_WRITE
) != 0;
560 return pipe_release(inode
, decr
, decw
);
564 pipe_read_open(struct inode
*inode
, struct file
*filp
)
566 /* We could have perhaps used atomic_t, but this and friends
567 below are the only places. So it doesn't seem worthwhile. */
568 mutex_lock(PIPE_MUTEX(*inode
));
569 PIPE_READERS(*inode
)++;
570 mutex_unlock(PIPE_MUTEX(*inode
));
576 pipe_write_open(struct inode
*inode
, struct file
*filp
)
578 mutex_lock(PIPE_MUTEX(*inode
));
579 PIPE_WRITERS(*inode
)++;
580 mutex_unlock(PIPE_MUTEX(*inode
));
586 pipe_rdwr_open(struct inode
*inode
, struct file
*filp
)
588 mutex_lock(PIPE_MUTEX(*inode
));
589 if (filp
->f_mode
& FMODE_READ
)
590 PIPE_READERS(*inode
)++;
591 if (filp
->f_mode
& FMODE_WRITE
)
592 PIPE_WRITERS(*inode
)++;
593 mutex_unlock(PIPE_MUTEX(*inode
));
599 * The file_operations structs are not static because they
600 * are also used in linux/fs/fifo.c to do operations on FIFOs.
602 const struct file_operations read_fifo_fops
= {
609 .open
= pipe_read_open
,
610 .release
= pipe_read_release
,
611 .fasync
= pipe_read_fasync
,
614 const struct file_operations write_fifo_fops
= {
618 .writev
= pipe_writev
,
621 .open
= pipe_write_open
,
622 .release
= pipe_write_release
,
623 .fasync
= pipe_write_fasync
,
626 const struct file_operations rdwr_fifo_fops
= {
631 .writev
= pipe_writev
,
634 .open
= pipe_rdwr_open
,
635 .release
= pipe_rdwr_release
,
636 .fasync
= pipe_rdwr_fasync
,
639 static struct file_operations read_pipe_fops
= {
646 .open
= pipe_read_open
,
647 .release
= pipe_read_release
,
648 .fasync
= pipe_read_fasync
,
651 static struct file_operations write_pipe_fops
= {
655 .writev
= pipe_writev
,
658 .open
= pipe_write_open
,
659 .release
= pipe_write_release
,
660 .fasync
= pipe_write_fasync
,
663 static struct file_operations rdwr_pipe_fops
= {
668 .writev
= pipe_writev
,
671 .open
= pipe_rdwr_open
,
672 .release
= pipe_rdwr_release
,
673 .fasync
= pipe_rdwr_fasync
,
676 void free_pipe_info(struct inode
*inode
)
679 struct pipe_inode_info
*info
= inode
->i_pipe
;
681 inode
->i_pipe
= NULL
;
682 for (i
= 0; i
< PIPE_BUFFERS
; i
++) {
683 struct pipe_buffer
*buf
= info
->bufs
+ i
;
685 buf
->ops
->release(info
, buf
);
688 __free_page(info
->tmp_page
);
692 struct inode
* pipe_new(struct inode
* inode
)
694 struct pipe_inode_info
*info
;
696 info
= kzalloc(sizeof(struct pipe_inode_info
), GFP_KERNEL
);
699 inode
->i_pipe
= info
;
701 init_waitqueue_head(PIPE_WAIT(*inode
));
702 PIPE_RCOUNTER(*inode
) = PIPE_WCOUNTER(*inode
) = 1;
709 static struct vfsmount
*pipe_mnt __read_mostly
;
710 static int pipefs_delete_dentry(struct dentry
*dentry
)
714 static struct dentry_operations pipefs_dentry_operations
= {
715 .d_delete
= pipefs_delete_dentry
,
718 static struct inode
* get_pipe_inode(void)
720 struct inode
*inode
= new_inode(pipe_mnt
->mnt_sb
);
727 PIPE_READERS(*inode
) = PIPE_WRITERS(*inode
) = 1;
728 inode
->i_fop
= &rdwr_pipe_fops
;
731 * Mark the inode dirty from the very beginning,
732 * that way it will never be moved to the dirty
733 * list because "mark_inode_dirty()" will think
734 * that it already _is_ on the dirty list.
736 inode
->i_state
= I_DIRTY
;
737 inode
->i_mode
= S_IFIFO
| S_IRUSR
| S_IWUSR
;
738 inode
->i_uid
= current
->fsuid
;
739 inode
->i_gid
= current
->fsgid
;
740 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
741 inode
->i_blksize
= PAGE_SIZE
;
754 struct dentry
*dentry
;
755 struct inode
* inode
;
756 struct file
*f1
, *f2
;
761 f1
= get_empty_filp();
765 f2
= get_empty_filp();
769 inode
= get_pipe_inode();
773 error
= get_unused_fd();
775 goto close_f12_inode
;
778 error
= get_unused_fd();
780 goto close_f12_inode_i
;
784 sprintf(name
, "[%lu]", inode
->i_ino
);
786 this.len
= strlen(name
);
787 this.hash
= inode
->i_ino
; /* will go */
788 dentry
= d_alloc(pipe_mnt
->mnt_sb
->s_root
, &this);
790 goto close_f12_inode_i_j
;
791 dentry
->d_op
= &pipefs_dentry_operations
;
792 d_add(dentry
, inode
);
793 f1
->f_vfsmnt
= f2
->f_vfsmnt
= mntget(mntget(pipe_mnt
));
794 f1
->f_dentry
= f2
->f_dentry
= dget(dentry
);
795 f1
->f_mapping
= f2
->f_mapping
= inode
->i_mapping
;
798 f1
->f_pos
= f2
->f_pos
= 0;
799 f1
->f_flags
= O_RDONLY
;
800 f1
->f_op
= &read_pipe_fops
;
801 f1
->f_mode
= FMODE_READ
;
805 f2
->f_flags
= O_WRONLY
;
806 f2
->f_op
= &write_pipe_fops
;
807 f2
->f_mode
= FMODE_WRITE
;
821 free_pipe_info(inode
);
832 * pipefs should _never_ be mounted by userland - too much of security hassle,
833 * no real gain from having the whole whorehouse mounted. So we don't need
834 * any operations on the root directory. However, we need a non-trivial
835 * d_name - pipe: will go nicely and kill the special-casing in procfs.
838 static struct super_block
*pipefs_get_sb(struct file_system_type
*fs_type
,
839 int flags
, const char *dev_name
, void *data
)
841 return get_sb_pseudo(fs_type
, "pipe:", NULL
, PIPEFS_MAGIC
);
844 static struct file_system_type pipe_fs_type
= {
846 .get_sb
= pipefs_get_sb
,
847 .kill_sb
= kill_anon_super
,
850 static int __init
init_pipe_fs(void)
852 int err
= register_filesystem(&pipe_fs_type
);
854 pipe_mnt
= kern_mount(&pipe_fs_type
);
855 if (IS_ERR(pipe_mnt
)) {
856 err
= PTR_ERR(pipe_mnt
);
857 unregister_filesystem(&pipe_fs_type
);
863 static void __exit
exit_pipe_fs(void)
865 unregister_filesystem(&pipe_fs_type
);
869 fs_initcall(init_pipe_fs
);
870 module_exit(exit_pipe_fs
);