ACPI: thinkpad-acpi: use bitfields to hold subdriver flags
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / pipe.c
blobebafde7d6abab948f9fa3a23e71f39360c757c71
1 /*
2 * linux/fs/pipe.c
4 * Copyright (C) 1991, 1992, 1999 Linus Torvalds
5 */
7 #include <linux/mm.h>
8 #include <linux/file.h>
9 #include <linux/poll.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/fs.h>
14 #include <linux/mount.h>
15 #include <linux/pipe_fs_i.h>
16 #include <linux/uio.h>
17 #include <linux/highmem.h>
18 #include <linux/pagemap.h>
19 #include <linux/audit.h>
21 #include <asm/uaccess.h>
22 #include <asm/ioctls.h>
25 * We use a start+len construction, which provides full use of the
26 * allocated memory.
27 * -- Florian Coosmann (FGC)
29 * Reads with count = 0 should always return 0.
30 * -- Julian Bradfield 1999-06-07.
32 * FIFOs and Pipes now generate SIGIO for both readers and writers.
33 * -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
35 * pipe_read & write cleanup
36 * -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
39 /* Drop the inode semaphore and wait for a pipe event, atomically */
40 void pipe_wait(struct pipe_inode_info *pipe)
42 DEFINE_WAIT(wait);
45 * Pipes are system-local resources, so sleeping on them
46 * is considered a noninteractive wait:
48 prepare_to_wait(&pipe->wait, &wait,
49 TASK_INTERRUPTIBLE | TASK_NONINTERACTIVE);
50 if (pipe->inode)
51 mutex_unlock(&pipe->inode->i_mutex);
52 schedule();
53 finish_wait(&pipe->wait, &wait);
54 if (pipe->inode)
55 mutex_lock(&pipe->inode->i_mutex);
58 static int
59 pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
60 int atomic)
62 unsigned long copy;
64 while (len > 0) {
65 while (!iov->iov_len)
66 iov++;
67 copy = min_t(unsigned long, len, iov->iov_len);
69 if (atomic) {
70 if (__copy_from_user_inatomic(to, iov->iov_base, copy))
71 return -EFAULT;
72 } else {
73 if (copy_from_user(to, iov->iov_base, copy))
74 return -EFAULT;
76 to += copy;
77 len -= copy;
78 iov->iov_base += copy;
79 iov->iov_len -= copy;
81 return 0;
84 static int
85 pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len,
86 int atomic)
88 unsigned long copy;
90 while (len > 0) {
91 while (!iov->iov_len)
92 iov++;
93 copy = min_t(unsigned long, len, iov->iov_len);
95 if (atomic) {
96 if (__copy_to_user_inatomic(iov->iov_base, from, copy))
97 return -EFAULT;
98 } else {
99 if (copy_to_user(iov->iov_base, from, copy))
100 return -EFAULT;
102 from += copy;
103 len -= copy;
104 iov->iov_base += copy;
105 iov->iov_len -= copy;
107 return 0;
111 * Attempt to pre-fault in the user memory, so we can use atomic copies.
112 * Returns the number of bytes not faulted in.
114 static int iov_fault_in_pages_write(struct iovec *iov, unsigned long len)
116 while (!iov->iov_len)
117 iov++;
119 while (len > 0) {
120 unsigned long this_len;
122 this_len = min_t(unsigned long, len, iov->iov_len);
123 if (fault_in_pages_writeable(iov->iov_base, this_len))
124 break;
126 len -= this_len;
127 iov++;
130 return len;
134 * Pre-fault in the user memory, so we can use atomic copies.
136 static void iov_fault_in_pages_read(struct iovec *iov, unsigned long len)
138 while (!iov->iov_len)
139 iov++;
141 while (len > 0) {
142 unsigned long this_len;
144 this_len = min_t(unsigned long, len, iov->iov_len);
145 fault_in_pages_readable(iov->iov_base, this_len);
146 len -= this_len;
147 iov++;
151 static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
152 struct pipe_buffer *buf)
154 struct page *page = buf->page;
157 * If nobody else uses this page, and we don't already have a
158 * temporary page, let's keep track of it as a one-deep
159 * allocation cache. (Otherwise just release our reference to it)
161 if (page_count(page) == 1 && !pipe->tmp_page)
162 pipe->tmp_page = page;
163 else
164 page_cache_release(page);
167 void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
168 struct pipe_buffer *buf, int atomic)
170 if (atomic) {
171 buf->flags |= PIPE_BUF_FLAG_ATOMIC;
172 return kmap_atomic(buf->page, KM_USER0);
175 return kmap(buf->page);
178 void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
179 struct pipe_buffer *buf, void *map_data)
181 if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
182 buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
183 kunmap_atomic(map_data, KM_USER0);
184 } else
185 kunmap(buf->page);
188 int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
189 struct pipe_buffer *buf)
191 struct page *page = buf->page;
193 if (page_count(page) == 1) {
194 lock_page(page);
195 return 0;
198 return 1;
201 void generic_pipe_buf_get(struct pipe_inode_info *info, struct pipe_buffer *buf)
203 page_cache_get(buf->page);
206 int generic_pipe_buf_pin(struct pipe_inode_info *info, struct pipe_buffer *buf)
208 return 0;
211 static const struct pipe_buf_operations anon_pipe_buf_ops = {
212 .can_merge = 1,
213 .map = generic_pipe_buf_map,
214 .unmap = generic_pipe_buf_unmap,
215 .pin = generic_pipe_buf_pin,
216 .release = anon_pipe_buf_release,
217 .steal = generic_pipe_buf_steal,
218 .get = generic_pipe_buf_get,
221 static ssize_t
222 pipe_read(struct kiocb *iocb, const struct iovec *_iov,
223 unsigned long nr_segs, loff_t pos)
225 struct file *filp = iocb->ki_filp;
226 struct inode *inode = filp->f_path.dentry->d_inode;
227 struct pipe_inode_info *pipe;
228 int do_wakeup;
229 ssize_t ret;
230 struct iovec *iov = (struct iovec *)_iov;
231 size_t total_len;
233 total_len = iov_length(iov, nr_segs);
234 /* Null read succeeds. */
235 if (unlikely(total_len == 0))
236 return 0;
238 do_wakeup = 0;
239 ret = 0;
240 mutex_lock(&inode->i_mutex);
241 pipe = inode->i_pipe;
242 for (;;) {
243 int bufs = pipe->nrbufs;
244 if (bufs) {
245 int curbuf = pipe->curbuf;
246 struct pipe_buffer *buf = pipe->bufs + curbuf;
247 const struct pipe_buf_operations *ops = buf->ops;
248 void *addr;
249 size_t chars = buf->len;
250 int error, atomic;
252 if (chars > total_len)
253 chars = total_len;
255 error = ops->pin(pipe, buf);
256 if (error) {
257 if (!ret)
258 error = ret;
259 break;
262 atomic = !iov_fault_in_pages_write(iov, chars);
263 redo:
264 addr = ops->map(pipe, buf, atomic);
265 error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
266 ops->unmap(pipe, buf, addr);
267 if (unlikely(error)) {
269 * Just retry with the slow path if we failed.
271 if (atomic) {
272 atomic = 0;
273 goto redo;
275 if (!ret)
276 ret = error;
277 break;
279 ret += chars;
280 buf->offset += chars;
281 buf->len -= chars;
282 if (!buf->len) {
283 buf->ops = NULL;
284 ops->release(pipe, buf);
285 curbuf = (curbuf + 1) & (PIPE_BUFFERS-1);
286 pipe->curbuf = curbuf;
287 pipe->nrbufs = --bufs;
288 do_wakeup = 1;
290 total_len -= chars;
291 if (!total_len)
292 break; /* common path: read succeeded */
294 if (bufs) /* More to do? */
295 continue;
296 if (!pipe->writers)
297 break;
298 if (!pipe->waiting_writers) {
299 /* syscall merging: Usually we must not sleep
300 * if O_NONBLOCK is set, or if we got some data.
301 * But if a writer sleeps in kernel space, then
302 * we can wait for that data without violating POSIX.
304 if (ret)
305 break;
306 if (filp->f_flags & O_NONBLOCK) {
307 ret = -EAGAIN;
308 break;
311 if (signal_pending(current)) {
312 if (!ret)
313 ret = -ERESTARTSYS;
314 break;
316 if (do_wakeup) {
317 wake_up_interruptible_sync(&pipe->wait);
318 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
320 pipe_wait(pipe);
322 mutex_unlock(&inode->i_mutex);
324 /* Signal writers asynchronously that there is more room. */
325 if (do_wakeup) {
326 wake_up_interruptible(&pipe->wait);
327 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
329 if (ret > 0)
330 file_accessed(filp);
331 return ret;
334 static ssize_t
335 pipe_write(struct kiocb *iocb, const struct iovec *_iov,
336 unsigned long nr_segs, loff_t ppos)
338 struct file *filp = iocb->ki_filp;
339 struct inode *inode = filp->f_path.dentry->d_inode;
340 struct pipe_inode_info *pipe;
341 ssize_t ret;
342 int do_wakeup;
343 struct iovec *iov = (struct iovec *)_iov;
344 size_t total_len;
345 ssize_t chars;
347 total_len = iov_length(iov, nr_segs);
348 /* Null write succeeds. */
349 if (unlikely(total_len == 0))
350 return 0;
352 do_wakeup = 0;
353 ret = 0;
354 mutex_lock(&inode->i_mutex);
355 pipe = inode->i_pipe;
357 if (!pipe->readers) {
358 send_sig(SIGPIPE, current, 0);
359 ret = -EPIPE;
360 goto out;
363 /* We try to merge small writes */
364 chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
365 if (pipe->nrbufs && chars != 0) {
366 int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
367 (PIPE_BUFFERS-1);
368 struct pipe_buffer *buf = pipe->bufs + lastbuf;
369 const struct pipe_buf_operations *ops = buf->ops;
370 int offset = buf->offset + buf->len;
372 if (ops->can_merge && offset + chars <= PAGE_SIZE) {
373 int error, atomic = 1;
374 void *addr;
376 error = ops->pin(pipe, buf);
377 if (error)
378 goto out;
380 iov_fault_in_pages_read(iov, chars);
381 redo1:
382 addr = ops->map(pipe, buf, atomic);
383 error = pipe_iov_copy_from_user(offset + addr, iov,
384 chars, atomic);
385 ops->unmap(pipe, buf, addr);
386 ret = error;
387 do_wakeup = 1;
388 if (error) {
389 if (atomic) {
390 atomic = 0;
391 goto redo1;
393 goto out;
395 buf->len += chars;
396 total_len -= chars;
397 ret = chars;
398 if (!total_len)
399 goto out;
403 for (;;) {
404 int bufs;
406 if (!pipe->readers) {
407 send_sig(SIGPIPE, current, 0);
408 if (!ret)
409 ret = -EPIPE;
410 break;
412 bufs = pipe->nrbufs;
413 if (bufs < PIPE_BUFFERS) {
414 int newbuf = (pipe->curbuf + bufs) & (PIPE_BUFFERS-1);
415 struct pipe_buffer *buf = pipe->bufs + newbuf;
416 struct page *page = pipe->tmp_page;
417 char *src;
418 int error, atomic = 1;
420 if (!page) {
421 page = alloc_page(GFP_HIGHUSER);
422 if (unlikely(!page)) {
423 ret = ret ? : -ENOMEM;
424 break;
426 pipe->tmp_page = page;
428 /* Always wake up, even if the copy fails. Otherwise
429 * we lock up (O_NONBLOCK-)readers that sleep due to
430 * syscall merging.
431 * FIXME! Is this really true?
433 do_wakeup = 1;
434 chars = PAGE_SIZE;
435 if (chars > total_len)
436 chars = total_len;
438 iov_fault_in_pages_read(iov, chars);
439 redo2:
440 if (atomic)
441 src = kmap_atomic(page, KM_USER0);
442 else
443 src = kmap(page);
445 error = pipe_iov_copy_from_user(src, iov, chars,
446 atomic);
447 if (atomic)
448 kunmap_atomic(src, KM_USER0);
449 else
450 kunmap(page);
452 if (unlikely(error)) {
453 if (atomic) {
454 atomic = 0;
455 goto redo2;
457 if (!ret)
458 ret = error;
459 break;
461 ret += chars;
463 /* Insert it into the buffer array */
464 buf->page = page;
465 buf->ops = &anon_pipe_buf_ops;
466 buf->offset = 0;
467 buf->len = chars;
468 pipe->nrbufs = ++bufs;
469 pipe->tmp_page = NULL;
471 total_len -= chars;
472 if (!total_len)
473 break;
475 if (bufs < PIPE_BUFFERS)
476 continue;
477 if (filp->f_flags & O_NONBLOCK) {
478 if (!ret)
479 ret = -EAGAIN;
480 break;
482 if (signal_pending(current)) {
483 if (!ret)
484 ret = -ERESTARTSYS;
485 break;
487 if (do_wakeup) {
488 wake_up_interruptible_sync(&pipe->wait);
489 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
490 do_wakeup = 0;
492 pipe->waiting_writers++;
493 pipe_wait(pipe);
494 pipe->waiting_writers--;
496 out:
497 mutex_unlock(&inode->i_mutex);
498 if (do_wakeup) {
499 wake_up_interruptible(&pipe->wait);
500 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
502 if (ret > 0)
503 file_update_time(filp);
504 return ret;
507 static ssize_t
508 bad_pipe_r(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
510 return -EBADF;
513 static ssize_t
514 bad_pipe_w(struct file *filp, const char __user *buf, size_t count,
515 loff_t *ppos)
517 return -EBADF;
520 static int
521 pipe_ioctl(struct inode *pino, struct file *filp,
522 unsigned int cmd, unsigned long arg)
524 struct inode *inode = filp->f_path.dentry->d_inode;
525 struct pipe_inode_info *pipe;
526 int count, buf, nrbufs;
528 switch (cmd) {
529 case FIONREAD:
530 mutex_lock(&inode->i_mutex);
531 pipe = inode->i_pipe;
532 count = 0;
533 buf = pipe->curbuf;
534 nrbufs = pipe->nrbufs;
535 while (--nrbufs >= 0) {
536 count += pipe->bufs[buf].len;
537 buf = (buf+1) & (PIPE_BUFFERS-1);
539 mutex_unlock(&inode->i_mutex);
541 return put_user(count, (int __user *)arg);
542 default:
543 return -EINVAL;
547 /* No kernel lock held - fine */
548 static unsigned int
549 pipe_poll(struct file *filp, poll_table *wait)
551 unsigned int mask;
552 struct inode *inode = filp->f_path.dentry->d_inode;
553 struct pipe_inode_info *pipe = inode->i_pipe;
554 int nrbufs;
556 poll_wait(filp, &pipe->wait, wait);
558 /* Reading only -- no need for acquiring the semaphore. */
559 nrbufs = pipe->nrbufs;
560 mask = 0;
561 if (filp->f_mode & FMODE_READ) {
562 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
563 if (!pipe->writers && filp->f_version != pipe->w_counter)
564 mask |= POLLHUP;
567 if (filp->f_mode & FMODE_WRITE) {
568 mask |= (nrbufs < PIPE_BUFFERS) ? POLLOUT | POLLWRNORM : 0;
570 * Most Unices do not set POLLERR for FIFOs but on Linux they
571 * behave exactly like pipes for poll().
573 if (!pipe->readers)
574 mask |= POLLERR;
577 return mask;
580 static int
581 pipe_release(struct inode *inode, int decr, int decw)
583 struct pipe_inode_info *pipe;
585 mutex_lock(&inode->i_mutex);
586 pipe = inode->i_pipe;
587 pipe->readers -= decr;
588 pipe->writers -= decw;
590 if (!pipe->readers && !pipe->writers) {
591 free_pipe_info(inode);
592 } else {
593 wake_up_interruptible(&pipe->wait);
594 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
595 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
597 mutex_unlock(&inode->i_mutex);
599 return 0;
602 static int
603 pipe_read_fasync(int fd, struct file *filp, int on)
605 struct inode *inode = filp->f_path.dentry->d_inode;
606 int retval;
608 mutex_lock(&inode->i_mutex);
609 retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_readers);
610 mutex_unlock(&inode->i_mutex);
612 if (retval < 0)
613 return retval;
615 return 0;
619 static int
620 pipe_write_fasync(int fd, struct file *filp, int on)
622 struct inode *inode = filp->f_path.dentry->d_inode;
623 int retval;
625 mutex_lock(&inode->i_mutex);
626 retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_writers);
627 mutex_unlock(&inode->i_mutex);
629 if (retval < 0)
630 return retval;
632 return 0;
636 static int
637 pipe_rdwr_fasync(int fd, struct file *filp, int on)
639 struct inode *inode = filp->f_path.dentry->d_inode;
640 struct pipe_inode_info *pipe = inode->i_pipe;
641 int retval;
643 mutex_lock(&inode->i_mutex);
645 retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
647 if (retval >= 0)
648 retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
650 mutex_unlock(&inode->i_mutex);
652 if (retval < 0)
653 return retval;
655 return 0;
659 static int
660 pipe_read_release(struct inode *inode, struct file *filp)
662 pipe_read_fasync(-1, filp, 0);
663 return pipe_release(inode, 1, 0);
666 static int
667 pipe_write_release(struct inode *inode, struct file *filp)
669 pipe_write_fasync(-1, filp, 0);
670 return pipe_release(inode, 0, 1);
673 static int
674 pipe_rdwr_release(struct inode *inode, struct file *filp)
676 int decr, decw;
678 pipe_rdwr_fasync(-1, filp, 0);
679 decr = (filp->f_mode & FMODE_READ) != 0;
680 decw = (filp->f_mode & FMODE_WRITE) != 0;
681 return pipe_release(inode, decr, decw);
684 static int
685 pipe_read_open(struct inode *inode, struct file *filp)
687 /* We could have perhaps used atomic_t, but this and friends
688 below are the only places. So it doesn't seem worthwhile. */
689 mutex_lock(&inode->i_mutex);
690 inode->i_pipe->readers++;
691 mutex_unlock(&inode->i_mutex);
693 return 0;
696 static int
697 pipe_write_open(struct inode *inode, struct file *filp)
699 mutex_lock(&inode->i_mutex);
700 inode->i_pipe->writers++;
701 mutex_unlock(&inode->i_mutex);
703 return 0;
706 static int
707 pipe_rdwr_open(struct inode *inode, struct file *filp)
709 mutex_lock(&inode->i_mutex);
710 if (filp->f_mode & FMODE_READ)
711 inode->i_pipe->readers++;
712 if (filp->f_mode & FMODE_WRITE)
713 inode->i_pipe->writers++;
714 mutex_unlock(&inode->i_mutex);
716 return 0;
720 * The file_operations structs are not static because they
721 * are also used in linux/fs/fifo.c to do operations on FIFOs.
723 const struct file_operations read_fifo_fops = {
724 .llseek = no_llseek,
725 .read = do_sync_read,
726 .aio_read = pipe_read,
727 .write = bad_pipe_w,
728 .poll = pipe_poll,
729 .ioctl = pipe_ioctl,
730 .open = pipe_read_open,
731 .release = pipe_read_release,
732 .fasync = pipe_read_fasync,
735 const struct file_operations write_fifo_fops = {
736 .llseek = no_llseek,
737 .read = bad_pipe_r,
738 .write = do_sync_write,
739 .aio_write = pipe_write,
740 .poll = pipe_poll,
741 .ioctl = pipe_ioctl,
742 .open = pipe_write_open,
743 .release = pipe_write_release,
744 .fasync = pipe_write_fasync,
747 const struct file_operations rdwr_fifo_fops = {
748 .llseek = no_llseek,
749 .read = do_sync_read,
750 .aio_read = pipe_read,
751 .write = do_sync_write,
752 .aio_write = pipe_write,
753 .poll = pipe_poll,
754 .ioctl = pipe_ioctl,
755 .open = pipe_rdwr_open,
756 .release = pipe_rdwr_release,
757 .fasync = pipe_rdwr_fasync,
760 static const struct file_operations read_pipe_fops = {
761 .llseek = no_llseek,
762 .read = do_sync_read,
763 .aio_read = pipe_read,
764 .write = bad_pipe_w,
765 .poll = pipe_poll,
766 .ioctl = pipe_ioctl,
767 .open = pipe_read_open,
768 .release = pipe_read_release,
769 .fasync = pipe_read_fasync,
772 static const struct file_operations write_pipe_fops = {
773 .llseek = no_llseek,
774 .read = bad_pipe_r,
775 .write = do_sync_write,
776 .aio_write = pipe_write,
777 .poll = pipe_poll,
778 .ioctl = pipe_ioctl,
779 .open = pipe_write_open,
780 .release = pipe_write_release,
781 .fasync = pipe_write_fasync,
784 static const struct file_operations rdwr_pipe_fops = {
785 .llseek = no_llseek,
786 .read = do_sync_read,
787 .aio_read = pipe_read,
788 .write = do_sync_write,
789 .aio_write = pipe_write,
790 .poll = pipe_poll,
791 .ioctl = pipe_ioctl,
792 .open = pipe_rdwr_open,
793 .release = pipe_rdwr_release,
794 .fasync = pipe_rdwr_fasync,
797 struct pipe_inode_info * alloc_pipe_info(struct inode *inode)
799 struct pipe_inode_info *pipe;
801 pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
802 if (pipe) {
803 init_waitqueue_head(&pipe->wait);
804 pipe->r_counter = pipe->w_counter = 1;
805 pipe->inode = inode;
808 return pipe;
811 void __free_pipe_info(struct pipe_inode_info *pipe)
813 int i;
815 for (i = 0; i < PIPE_BUFFERS; i++) {
816 struct pipe_buffer *buf = pipe->bufs + i;
817 if (buf->ops)
818 buf->ops->release(pipe, buf);
820 if (pipe->tmp_page)
821 __free_page(pipe->tmp_page);
822 kfree(pipe);
825 void free_pipe_info(struct inode *inode)
827 __free_pipe_info(inode->i_pipe);
828 inode->i_pipe = NULL;
831 static struct vfsmount *pipe_mnt __read_mostly;
832 static int pipefs_delete_dentry(struct dentry *dentry)
835 * At creation time, we pretended this dentry was hashed
836 * (by clearing DCACHE_UNHASHED bit in d_flags)
837 * At delete time, we restore the truth : not hashed.
838 * (so that dput() can proceed correctly)
840 dentry->d_flags |= DCACHE_UNHASHED;
841 return 0;
844 static struct dentry_operations pipefs_dentry_operations = {
845 .d_delete = pipefs_delete_dentry,
848 static struct inode * get_pipe_inode(void)
850 struct inode *inode = new_inode(pipe_mnt->mnt_sb);
851 struct pipe_inode_info *pipe;
853 if (!inode)
854 goto fail_inode;
856 pipe = alloc_pipe_info(inode);
857 if (!pipe)
858 goto fail_iput;
859 inode->i_pipe = pipe;
861 pipe->readers = pipe->writers = 1;
862 inode->i_fop = &rdwr_pipe_fops;
865 * Mark the inode dirty from the very beginning,
866 * that way it will never be moved to the dirty
867 * list because "mark_inode_dirty()" will think
868 * that it already _is_ on the dirty list.
870 inode->i_state = I_DIRTY;
871 inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
872 inode->i_uid = current->fsuid;
873 inode->i_gid = current->fsgid;
874 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
876 return inode;
878 fail_iput:
879 iput(inode);
881 fail_inode:
882 return NULL;
885 struct file *create_write_pipe(void)
887 int err;
888 struct inode *inode;
889 struct file *f;
890 struct dentry *dentry;
891 char name[32];
892 struct qstr this;
894 f = get_empty_filp();
895 if (!f)
896 return ERR_PTR(-ENFILE);
897 err = -ENFILE;
898 inode = get_pipe_inode();
899 if (!inode)
900 goto err_file;
902 this.len = sprintf(name, "[%lu]", inode->i_ino);
903 this.name = name;
904 this.hash = 0;
905 err = -ENOMEM;
906 dentry = d_alloc(pipe_mnt->mnt_sb->s_root, &this);
907 if (!dentry)
908 goto err_inode;
910 dentry->d_op = &pipefs_dentry_operations;
912 * We dont want to publish this dentry into global dentry hash table.
913 * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED
914 * This permits a working /proc/$pid/fd/XXX on pipes
916 dentry->d_flags &= ~DCACHE_UNHASHED;
917 d_instantiate(dentry, inode);
918 f->f_path.mnt = mntget(pipe_mnt);
919 f->f_path.dentry = dentry;
920 f->f_mapping = inode->i_mapping;
922 f->f_flags = O_WRONLY;
923 f->f_op = &write_pipe_fops;
924 f->f_mode = FMODE_WRITE;
925 f->f_version = 0;
927 return f;
929 err_inode:
930 free_pipe_info(inode);
931 iput(inode);
932 err_file:
933 put_filp(f);
934 return ERR_PTR(err);
937 void free_write_pipe(struct file *f)
939 free_pipe_info(f->f_dentry->d_inode);
940 dput(f->f_path.dentry);
941 mntput(f->f_path.mnt);
942 put_filp(f);
945 struct file *create_read_pipe(struct file *wrf)
947 struct file *f = get_empty_filp();
948 if (!f)
949 return ERR_PTR(-ENFILE);
951 /* Grab pipe from the writer */
952 f->f_path.mnt = mntget(wrf->f_path.mnt);
953 f->f_path.dentry = dget(wrf->f_path.dentry);
954 f->f_mapping = wrf->f_path.dentry->d_inode->i_mapping;
956 f->f_pos = 0;
957 f->f_flags = O_RDONLY;
958 f->f_op = &read_pipe_fops;
959 f->f_mode = FMODE_READ;
960 f->f_version = 0;
962 return f;
965 int do_pipe(int *fd)
967 struct file *fw, *fr;
968 int error;
969 int fdw, fdr;
971 fw = create_write_pipe();
972 if (IS_ERR(fw))
973 return PTR_ERR(fw);
974 fr = create_read_pipe(fw);
975 error = PTR_ERR(fr);
976 if (IS_ERR(fr))
977 goto err_write_pipe;
979 error = get_unused_fd();
980 if (error < 0)
981 goto err_read_pipe;
982 fdr = error;
984 error = get_unused_fd();
985 if (error < 0)
986 goto err_fdr;
987 fdw = error;
989 error = audit_fd_pair(fdr, fdw);
990 if (error < 0)
991 goto err_fdw;
993 fd_install(fdr, fr);
994 fd_install(fdw, fw);
995 fd[0] = fdr;
996 fd[1] = fdw;
998 return 0;
1000 err_fdw:
1001 put_unused_fd(fdw);
1002 err_fdr:
1003 put_unused_fd(fdr);
1004 err_read_pipe:
1005 dput(fr->f_dentry);
1006 mntput(fr->f_vfsmnt);
1007 put_filp(fr);
1008 err_write_pipe:
1009 free_write_pipe(fw);
1010 return error;
1014 * pipefs should _never_ be mounted by userland - too much of security hassle,
1015 * no real gain from having the whole whorehouse mounted. So we don't need
1016 * any operations on the root directory. However, we need a non-trivial
1017 * d_name - pipe: will go nicely and kill the special-casing in procfs.
1019 static int pipefs_get_sb(struct file_system_type *fs_type,
1020 int flags, const char *dev_name, void *data,
1021 struct vfsmount *mnt)
1023 return get_sb_pseudo(fs_type, "pipe:", NULL, PIPEFS_MAGIC, mnt);
1026 static struct file_system_type pipe_fs_type = {
1027 .name = "pipefs",
1028 .get_sb = pipefs_get_sb,
1029 .kill_sb = kill_anon_super,
1032 static int __init init_pipe_fs(void)
1034 int err = register_filesystem(&pipe_fs_type);
1036 if (!err) {
1037 pipe_mnt = kern_mount(&pipe_fs_type);
1038 if (IS_ERR(pipe_mnt)) {
1039 err = PTR_ERR(pipe_mnt);
1040 unregister_filesystem(&pipe_fs_type);
1043 return err;
1046 static void __exit exit_pipe_fs(void)
1048 unregister_filesystem(&pipe_fs_type);
1049 mntput(pipe_mnt);
1052 fs_initcall(init_pipe_fs);
1053 module_exit(exit_pipe_fs);