2 * Copyright (c) 1996 John S. Dyson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
16 * 4. Modifications may be freely made to this file if the above conditions
19 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $
20 * $DragonFly: src/sys/kern/sys_pipe.c,v 1.50 2008/09/09 04:06:13 dillon Exp $
24 * This file contains a high-performance replacement for the socket-based
25 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
26 * all features of sockets, but does do everything that pipes normally
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
33 #include <sys/fcntl.h>
35 #include <sys/filedesc.h>
36 #include <sys/filio.h>
37 #include <sys/ttycom.h>
40 #include <sys/select.h>
41 #include <sys/signalvar.h>
42 #include <sys/sysproto.h>
44 #include <sys/vnode.h>
46 #include <sys/event.h>
47 #include <sys/globaldata.h>
48 #include <sys/module.h>
49 #include <sys/malloc.h>
50 #include <sys/sysctl.h>
51 #include <sys/socket.h>
54 #include <vm/vm_param.h>
56 #include <vm/vm_object.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_extern.h>
60 #include <vm/vm_map.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_zone.h>
64 #include <sys/file2.h>
65 #include <sys/signal2.h>
67 #include <machine/cpufunc.h>
70 * interfaces to the outside world
72 static int pipe_read (struct file
*fp
, struct uio
*uio
,
73 struct ucred
*cred
, int flags
);
74 static int pipe_write (struct file
*fp
, struct uio
*uio
,
75 struct ucred
*cred
, int flags
);
76 static int pipe_close (struct file
*fp
);
77 static int pipe_shutdown (struct file
*fp
, int how
);
78 static int pipe_poll (struct file
*fp
, int events
, struct ucred
*cred
);
79 static int pipe_kqfilter (struct file
*fp
, struct knote
*kn
);
80 static int pipe_stat (struct file
*fp
, struct stat
*sb
, struct ucred
*cred
);
81 static int pipe_ioctl (struct file
*fp
, u_long cmd
, caddr_t data
, struct ucred
*cred
);
83 static struct fileops pipeops
= {
85 .fo_write
= pipe_write
,
86 .fo_ioctl
= pipe_ioctl
,
88 .fo_kqfilter
= pipe_kqfilter
,
90 .fo_close
= pipe_close
,
91 .fo_shutdown
= pipe_shutdown
94 static void filt_pipedetach(struct knote
*kn
);
95 static int filt_piperead(struct knote
*kn
, long hint
);
96 static int filt_pipewrite(struct knote
*kn
, long hint
);
98 static struct filterops pipe_rfiltops
=
99 { 1, NULL
, filt_pipedetach
, filt_piperead
};
100 static struct filterops pipe_wfiltops
=
101 { 1, NULL
, filt_pipedetach
, filt_pipewrite
};
103 MALLOC_DEFINE(M_PIPE
, "pipe", "pipe structures");
106 * Default pipe buffer size(s), this can be kind-of large now because pipe
107 * space is pageable. The pipe code will try to maintain locality of
108 * reference for performance reasons, so small amounts of outstanding I/O
109 * will not wipe the cache.
111 #define MINPIPESIZE (PIPE_SIZE/3)
112 #define MAXPIPESIZE (2*PIPE_SIZE/3)
115 * Limit the number of "big" pipes
117 #define LIMITBIGPIPES 64
118 #define PIPEQ_MAX_CACHE 16 /* per-cpu pipe structure cache */
120 static int pipe_maxbig
= LIMITBIGPIPES
;
121 static int pipe_maxcache
= PIPEQ_MAX_CACHE
;
122 static int pipe_bigcount
;
123 static int pipe_nbig
;
124 static int pipe_bcache_alloc
;
125 static int pipe_bkmem_alloc
;
126 static int pipe_rblocked_count
;
127 static int pipe_wblocked_count
;
129 SYSCTL_NODE(_kern
, OID_AUTO
, pipe
, CTLFLAG_RW
, 0, "Pipe operation");
130 SYSCTL_INT(_kern_pipe
, OID_AUTO
, nbig
,
131 CTLFLAG_RD
, &pipe_nbig
, 0, "numer of big pipes allocated");
132 SYSCTL_INT(_kern_pipe
, OID_AUTO
, bigcount
,
133 CTLFLAG_RW
, &pipe_bigcount
, 0, "number of times pipe expanded");
134 SYSCTL_INT(_kern_pipe
, OID_AUTO
, rblocked
,
135 CTLFLAG_RW
, &pipe_rblocked_count
, 0, "number of times pipe expanded");
136 SYSCTL_INT(_kern_pipe
, OID_AUTO
, wblocked
,
137 CTLFLAG_RW
, &pipe_wblocked_count
, 0, "number of times pipe expanded");
138 SYSCTL_INT(_kern_pipe
, OID_AUTO
, maxcache
,
139 CTLFLAG_RW
, &pipe_maxcache
, 0, "max pipes cached per-cpu");
140 SYSCTL_INT(_kern_pipe
, OID_AUTO
, maxbig
,
141 CTLFLAG_RW
, &pipe_maxbig
, 0, "max number of big pipes");
143 static int pipe_delay
= 5000; /* 5uS default */
144 SYSCTL_INT(_kern_pipe
, OID_AUTO
, delay
,
145 CTLFLAG_RW
, &pipe_delay
, 0, "SMP delay optimization in ns");
146 static int pipe_mpsafe
= 1;
147 SYSCTL_INT(_kern_pipe
, OID_AUTO
, mpsafe
,
148 CTLFLAG_RW
, &pipe_mpsafe
, 0, "");
150 #if !defined(NO_PIPE_SYSCTL_STATS)
151 SYSCTL_INT(_kern_pipe
, OID_AUTO
, bcache_alloc
,
152 CTLFLAG_RW
, &pipe_bcache_alloc
, 0, "pipe buffer from pcpu cache");
153 SYSCTL_INT(_kern_pipe
, OID_AUTO
, bkmem_alloc
,
154 CTLFLAG_RW
, &pipe_bkmem_alloc
, 0, "pipe buffer from kmem");
157 static void pipeclose (struct pipe
*cpipe
);
158 static void pipe_free_kmem (struct pipe
*cpipe
);
159 static int pipe_create (struct pipe
**cpipep
);
160 static __inline
void pipeselwakeup (struct pipe
*cpipe
);
161 static int pipespace (struct pipe
*cpipe
, int size
);
164 pipeselwakeup(struct pipe
*cpipe
)
166 if (cpipe
->pipe_state
& PIPE_SEL
) {
168 cpipe
->pipe_state
&= ~PIPE_SEL
;
169 selwakeup(&cpipe
->pipe_sel
);
172 if ((cpipe
->pipe_state
& PIPE_ASYNC
) && cpipe
->pipe_sigio
) {
174 pgsigio(cpipe
->pipe_sigio
, SIGIO
, 0);
177 if (SLIST_FIRST(&cpipe
->pipe_sel
.si_note
)) {
179 KNOTE(&cpipe
->pipe_sel
.si_note
, 0);
185 * These routines are called before and after a UIO. The UIO
186 * may block, causing our held tokens to be lost temporarily.
188 * We use these routines to serialize reads against other reads
189 * and writes against other writes.
191 * The read token is held on entry so *ipp does not race.
194 pipe_start_uio(struct pipe
*cpipe
, int *ipp
)
200 error
= tsleep(ipp
, PCATCH
, "pipexx", 0);
209 pipe_end_uio(struct pipe
*cpipe
, int *ipp
)
221 pipe_get_mplock(int *save
)
224 if (pipe_mpsafe
== 0) {
235 pipe_rel_mplock(int *save
)
245 * The pipe system call for the DTYPE_PIPE type of pipes
247 * pipe_ARgs(int dummy)
252 sys_pipe(struct pipe_args
*uap
)
254 struct thread
*td
= curthread
;
255 struct proc
*p
= td
->td_proc
;
256 struct file
*rf
, *wf
;
257 struct pipe
*rpipe
, *wpipe
;
262 rpipe
= wpipe
= NULL
;
263 if (pipe_create(&rpipe
) || pipe_create(&wpipe
)) {
269 error
= falloc(p
, &rf
, &fd1
);
275 uap
->sysmsg_fds
[0] = fd1
;
278 * Warning: once we've gotten past allocation of the fd for the
279 * read-side, we can only drop the read side via fdrop() in order
280 * to avoid races against processes which manage to dup() the read
281 * side while we are blocked trying to allocate the write side.
283 rf
->f_type
= DTYPE_PIPE
;
284 rf
->f_flag
= FREAD
| FWRITE
;
285 rf
->f_ops
= &pipeops
;
287 error
= falloc(p
, &wf
, &fd2
);
289 fsetfd(p
, NULL
, fd1
);
291 /* rpipe has been closed by fdrop(). */
295 wf
->f_type
= DTYPE_PIPE
;
296 wf
->f_flag
= FREAD
| FWRITE
;
297 wf
->f_ops
= &pipeops
;
299 uap
->sysmsg_fds
[1] = fd2
;
301 rpipe
->pipe_slock
= kmalloc(sizeof(struct lock
),
302 M_PIPE
, M_WAITOK
|M_ZERO
);
303 wpipe
->pipe_slock
= rpipe
->pipe_slock
;
304 rpipe
->pipe_peer
= wpipe
;
305 wpipe
->pipe_peer
= rpipe
;
306 lockinit(rpipe
->pipe_slock
, "pipecl", 0, 0);
309 * Once activated the peer relationship remains valid until
310 * both sides are closed.
321 * Allocate kva for pipe circular buffer, the space is pageable
322 * This routine will 'realloc' the size of a pipe safely, if it fails
323 * it will retain the old buffer.
324 * If it fails it will return ENOMEM.
327 pipespace(struct pipe
*cpipe
, int size
)
329 struct vm_object
*object
;
333 npages
= round_page(size
) / PAGE_SIZE
;
334 object
= cpipe
->pipe_buffer
.object
;
337 * [re]create the object if necessary and reserve space for it
338 * in the kernel_map. The object and memory are pageable. On
339 * success, free the old resources before assigning the new
342 if (object
== NULL
|| object
->size
!= npages
) {
344 object
= vm_object_allocate(OBJT_DEFAULT
, npages
);
345 buffer
= (caddr_t
)vm_map_min(&kernel_map
);
347 error
= vm_map_find(&kernel_map
, object
, 0,
348 (vm_offset_t
*)&buffer
, size
,
351 VM_PROT_ALL
, VM_PROT_ALL
,
354 if (error
!= KERN_SUCCESS
) {
355 vm_object_deallocate(object
);
359 pipe_free_kmem(cpipe
);
361 cpipe
->pipe_buffer
.object
= object
;
362 cpipe
->pipe_buffer
.buffer
= buffer
;
363 cpipe
->pipe_buffer
.size
= size
;
368 cpipe
->pipe_buffer
.rindex
= 0;
369 cpipe
->pipe_buffer
.windex
= 0;
374 * Initialize and allocate VM and memory for pipe, pulling the pipe from
375 * our per-cpu cache if possible. For now make sure it is sized for the
376 * smaller PIPE_SIZE default.
379 pipe_create(struct pipe
**cpipep
)
381 globaldata_t gd
= mycpu
;
385 if ((cpipe
= gd
->gd_pipeq
) != NULL
) {
386 gd
->gd_pipeq
= cpipe
->pipe_peer
;
388 cpipe
->pipe_peer
= NULL
;
389 cpipe
->pipe_wantwcnt
= 0;
391 cpipe
= kmalloc(sizeof(struct pipe
), M_PIPE
, M_WAITOK
|M_ZERO
);
394 if ((error
= pipespace(cpipe
, PIPE_SIZE
)) != 0)
396 vfs_timestamp(&cpipe
->pipe_ctime
);
397 cpipe
->pipe_atime
= cpipe
->pipe_ctime
;
398 cpipe
->pipe_mtime
= cpipe
->pipe_ctime
;
399 lwkt_token_init(&cpipe
->pipe_rlock
);
400 lwkt_token_init(&cpipe
->pipe_wlock
);
405 * MPALMOSTSAFE (acquires mplock)
408 pipe_read(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int fflags
)
414 u_int size
; /* total bytes available */
415 u_int nsize
; /* total bytes to read */
416 u_int rindex
; /* contiguous bytes available */
424 if (uio
->uio_resid
== 0)
428 * Setup locks, calculate nbio
430 pipe_get_mplock(&mpsave
);
431 rpipe
= (struct pipe
*)fp
->f_data
;
432 lwkt_gettoken(&rlock
, &rpipe
->pipe_rlock
);
434 if (fflags
& O_FBLOCKING
)
436 else if (fflags
& O_FNONBLOCKING
)
438 else if (fp
->f_flag
& O_NONBLOCK
)
444 * Reads are serialized. Note howeverthat pipe_buffer.buffer and
445 * pipe_buffer.size can change out from under us when the number
446 * of bytes in the buffer are zero due to the write-side doing a
449 error
= pipe_start_uio(rpipe
, &rpipe
->pipe_rip
);
451 pipe_rel_mplock(&mpsave
);
452 lwkt_reltoken(&rlock
);
457 bigread
= (uio
->uio_resid
> 10 * 1024 * 1024);
460 while (uio
->uio_resid
) {
464 if (bigread
&& --bigcount
== 0) {
467 if (CURSIG(curthread
->td_lwp
)) {
473 size
= rpipe
->pipe_buffer
.windex
- rpipe
->pipe_buffer
.rindex
;
476 rindex
= rpipe
->pipe_buffer
.rindex
&
477 (rpipe
->pipe_buffer
.size
- 1);
479 if (nsize
> rpipe
->pipe_buffer
.size
- rindex
)
480 nsize
= rpipe
->pipe_buffer
.size
- rindex
;
481 nsize
= szmin(nsize
, uio
->uio_resid
);
483 error
= uiomove(&rpipe
->pipe_buffer
.buffer
[rindex
],
488 rpipe
->pipe_buffer
.rindex
+= nsize
;
492 * If the FIFO is still over half full just continue
493 * and do not try to notify the writer yet.
495 if (size
- nsize
>= (rpipe
->pipe_buffer
.size
>> 1)) {
501 * When the FIFO is less then half full notify any
502 * waiting writer. WANTW can be checked while
503 * holding just the rlock.
506 if ((rpipe
->pipe_state
& PIPE_WANTW
) == 0)
511 * If the "write-side" was blocked we wake it up. This code
512 * is reached either when the buffer is completely emptied
513 * or if it becomes more then half-empty.
515 * Pipe_state can only be modified if both the rlock and
518 if (rpipe
->pipe_state
& PIPE_WANTW
) {
519 lwkt_gettoken(&wlock
, &rpipe
->pipe_wlock
);
520 if (rpipe
->pipe_state
& PIPE_WANTW
) {
522 rpipe
->pipe_state
&= ~PIPE_WANTW
;
523 lwkt_reltoken(&wlock
);
526 lwkt_reltoken(&wlock
);
531 * Pick up our copy loop again if the writer sent data to
532 * us while we were messing around.
534 * On a SMP box poll up to pipe_delay nanoseconds for new
535 * data. Typically a value of 2000 to 4000 is sufficient
536 * to eradicate most IPIs/tsleeps/wakeups when a pipe
537 * is used for synchronous communications with small packets,
538 * and 8000 or so (8uS) will pipeline large buffer xfers
539 * between cpus over a pipe.
541 * For synchronous communications a hit means doing a
542 * full Awrite-Bread-Bwrite-Aread cycle in less then 2uS,
543 * where as miss requiring a tsleep/wakeup sequence
544 * will take 7uS or more.
546 if (rpipe
->pipe_buffer
.windex
!= rpipe
->pipe_buffer
.rindex
)
549 #if defined(SMP) && defined(_RDTSC_SUPPORTED_)
554 tsc_target
= tsc_get_target(pipe_delay
);
555 while (tsc_test_target(tsc_target
) == 0) {
556 if (rpipe
->pipe_buffer
.windex
!=
557 rpipe
->pipe_buffer
.rindex
) {
568 * Detect EOF condition, do not set error.
570 if (rpipe
->pipe_state
& PIPE_REOF
)
574 * Break if some data was read, or if this was a non-blocking
586 * Last chance, interlock with WANTR.
588 lwkt_gettoken(&wlock
, &rpipe
->pipe_wlock
);
589 size
= rpipe
->pipe_buffer
.windex
- rpipe
->pipe_buffer
.rindex
;
591 lwkt_reltoken(&wlock
);
596 * If there is no more to read in the pipe, reset its
597 * pointers to the beginning. This improves cache hit
600 * We need both locks to modify both pointers, and there
601 * must also not be a write in progress or the uiomove()
602 * in the write might block and temporarily release
603 * its wlock, then reacquire and update windex. We are
604 * only serialized against reads, not writes.
606 * XXX should we even bother resetting the indices? It
607 * might actually be more cache efficient not to.
609 if (rpipe
->pipe_buffer
.rindex
== rpipe
->pipe_buffer
.windex
&&
610 rpipe
->pipe_wip
== 0) {
611 rpipe
->pipe_buffer
.rindex
= 0;
612 rpipe
->pipe_buffer
.windex
= 0;
616 * Wait for more data.
618 * Pipe_state can only be set if both the rlock and wlock
621 rpipe
->pipe_state
|= PIPE_WANTR
;
622 tsleep_interlock(rpipe
, PCATCH
);
623 lwkt_reltoken(&wlock
);
624 error
= tsleep(rpipe
, PCATCH
| PINTERLOCKED
, "piperd", 0);
625 ++pipe_rblocked_count
;
629 pipe_end_uio(rpipe
, &rpipe
->pipe_rip
);
632 * Uptime last access time
634 if (error
== 0 && nread
)
635 vfs_timestamp(&rpipe
->pipe_atime
);
638 * If we drained the FIFO more then half way then handle
639 * write blocking hysteresis.
641 * Note that PIPE_WANTW cannot be set by the writer without
642 * it holding both rlock and wlock, so we can test it
643 * while holding just rlock.
646 if (rpipe
->pipe_state
& PIPE_WANTW
) {
647 lwkt_gettoken(&wlock
, &rpipe
->pipe_wlock
);
648 if (rpipe
->pipe_state
& PIPE_WANTW
) {
649 rpipe
->pipe_state
&= ~PIPE_WANTW
;
650 lwkt_reltoken(&wlock
);
653 lwkt_reltoken(&wlock
);
657 size
= rpipe
->pipe_buffer
.windex
- rpipe
->pipe_buffer
.rindex
;
658 lwkt_reltoken(&rlock
);
661 * If enough space is available in buffer then wakeup sel writers?
663 if ((rpipe
->pipe_buffer
.size
- size
) >= PIPE_BUF
)
664 pipeselwakeup(rpipe
);
665 pipe_rel_mplock(&mpsave
);
670 * MPALMOSTSAFE - acquires mplock
673 pipe_write(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int fflags
)
678 struct pipe
*wpipe
, *rpipe
;
688 pipe_get_mplock(&mpsave
);
691 * Writes go to the peer. The peer will always exist.
693 rpipe
= (struct pipe
*) fp
->f_data
;
694 wpipe
= rpipe
->pipe_peer
;
695 lwkt_gettoken(&wlock
, &wpipe
->pipe_wlock
);
696 if (wpipe
->pipe_state
& PIPE_WEOF
) {
697 pipe_rel_mplock(&mpsave
);
698 lwkt_reltoken(&wlock
);
703 * Degenerate case (EPIPE takes prec)
705 if (uio
->uio_resid
== 0) {
706 pipe_rel_mplock(&mpsave
);
707 lwkt_reltoken(&wlock
);
712 * Writes are serialized (start_uio must be called with wlock)
714 error
= pipe_start_uio(wpipe
, &wpipe
->pipe_wip
);
716 pipe_rel_mplock(&mpsave
);
717 lwkt_reltoken(&wlock
);
721 if (fflags
& O_FBLOCKING
)
723 else if (fflags
& O_FNONBLOCKING
)
725 else if (fp
->f_flag
& O_NONBLOCK
)
731 * If it is advantageous to resize the pipe buffer, do
732 * so. We are write-serialized so we can block safely.
734 if ((wpipe
->pipe_buffer
.size
<= PIPE_SIZE
) &&
735 (pipe_nbig
< pipe_maxbig
) &&
736 wpipe
->pipe_wantwcnt
> 4 &&
737 (wpipe
->pipe_buffer
.rindex
== wpipe
->pipe_buffer
.windex
)) {
739 * Recheck after lock.
741 lwkt_gettoken(&rlock
, &wpipe
->pipe_rlock
);
742 if ((wpipe
->pipe_buffer
.size
<= PIPE_SIZE
) &&
743 (pipe_nbig
< pipe_maxbig
) &&
744 (wpipe
->pipe_buffer
.rindex
== wpipe
->pipe_buffer
.windex
)) {
745 atomic_add_int(&pipe_nbig
, 1);
746 if (pipespace(wpipe
, BIG_PIPE_SIZE
) == 0)
749 atomic_subtract_int(&pipe_nbig
, 1);
751 lwkt_reltoken(&rlock
);
754 orig_resid
= uio
->uio_resid
;
757 bigwrite
= (uio
->uio_resid
> 10 * 1024 * 1024);
760 while (uio
->uio_resid
) {
761 if (wpipe
->pipe_state
& PIPE_WEOF
) {
769 if (bigwrite
&& --bigcount
== 0) {
772 if (CURSIG(curthread
->td_lwp
)) {
778 windex
= wpipe
->pipe_buffer
.windex
&
779 (wpipe
->pipe_buffer
.size
- 1);
780 space
= wpipe
->pipe_buffer
.size
-
781 (wpipe
->pipe_buffer
.windex
- wpipe
->pipe_buffer
.rindex
);
784 /* Writes of size <= PIPE_BUF must be atomic. */
785 if ((space
< uio
->uio_resid
) && (orig_resid
<= PIPE_BUF
))
789 * Write to fill, read size handles write hysteresis. Also
790 * additional restrictions can cause select-based non-blocking
797 * Transfer size is minimum of uio transfer
798 * and free space in pipe buffer.
800 * Limit each uiocopy to no more then PIPE_SIZE
801 * so we can keep the gravy train going on a
802 * SMP box. This doubles the performance for
803 * write sizes > 16K. Otherwise large writes
804 * wind up doing an inefficient synchronous
807 space
= szmin(space
, uio
->uio_resid
);
808 if (space
> PIPE_SIZE
)
812 * First segment to transfer is minimum of
813 * transfer size and contiguous space in
814 * pipe buffer. If first segment to transfer
815 * is less than the transfer size, we've got
816 * a wraparound in the buffer.
818 segsize
= wpipe
->pipe_buffer
.size
- windex
;
824 * If this is the first loop and the reader is
825 * blocked, do a preemptive wakeup of the reader.
827 * On SMP the IPI latency plus the wlock interlock
828 * on the reader side is the fastest way to get the
829 * reader going. (The scheduler will hard loop on
832 * NOTE: We can't clear WANTR here without acquiring
833 * the rlock, which we don't want to do here!
835 if ((wpipe
->pipe_state
& PIPE_WANTR
) && pipe_mpsafe
> 1)
840 * Transfer segment, which may include a wrap-around.
841 * Update windex to account for both all in one go
842 * so the reader can read() the data atomically.
844 error
= uiomove(&wpipe
->pipe_buffer
.buffer
[windex
],
846 if (error
== 0 && segsize
< space
) {
847 segsize
= space
- segsize
;
848 error
= uiomove(&wpipe
->pipe_buffer
.buffer
[0],
854 wpipe
->pipe_buffer
.windex
+= space
;
860 * We need both the rlock and the wlock to interlock against
861 * the EOF, WANTW, and size checks, and to modify pipe_state.
863 * These are token locks so we do not have to worry about
866 lwkt_gettoken(&rlock
, &wpipe
->pipe_rlock
);
869 * If the "read-side" has been blocked, wake it up now
870 * and yield to let it drain synchronously rather
873 if (wpipe
->pipe_state
& PIPE_WANTR
) {
874 wpipe
->pipe_state
&= ~PIPE_WANTR
;
879 * don't block on non-blocking I/O
882 lwkt_reltoken(&rlock
);
888 * re-test whether we have to block in the writer after
889 * acquiring both locks, in case the reader opened up
892 space
= wpipe
->pipe_buffer
.size
-
893 (wpipe
->pipe_buffer
.windex
- wpipe
->pipe_buffer
.rindex
);
895 if ((space
< uio
->uio_resid
) && (orig_resid
<= PIPE_BUF
))
899 * We have no more space and have something to offer,
900 * wake up select/poll.
903 wpipe
->pipe_state
|= PIPE_WANTW
;
904 ++wpipe
->pipe_wantwcnt
;
905 pipeselwakeup(wpipe
);
906 if (wpipe
->pipe_state
& PIPE_WANTW
)
907 error
= tsleep(wpipe
, PCATCH
, "pipewr", 0);
908 ++pipe_wblocked_count
;
910 lwkt_reltoken(&rlock
);
913 * Break out if we errored or the read side wants us to go
918 if (wpipe
->pipe_state
& PIPE_WEOF
) {
923 pipe_end_uio(wpipe
, &wpipe
->pipe_wip
);
926 * If we have put any characters in the buffer, we wake up
929 * Both rlock and wlock are required to be able to modify pipe_state.
931 if (wpipe
->pipe_buffer
.windex
!= wpipe
->pipe_buffer
.rindex
) {
932 if (wpipe
->pipe_state
& PIPE_WANTR
) {
933 lwkt_gettoken(&rlock
, &wpipe
->pipe_rlock
);
934 if (wpipe
->pipe_state
& PIPE_WANTR
) {
935 wpipe
->pipe_state
&= ~PIPE_WANTR
;
936 lwkt_reltoken(&rlock
);
939 lwkt_reltoken(&rlock
);
945 * Don't return EPIPE if I/O was successful
947 if ((wpipe
->pipe_buffer
.rindex
== wpipe
->pipe_buffer
.windex
) &&
948 (uio
->uio_resid
== 0) &&
954 vfs_timestamp(&wpipe
->pipe_mtime
);
957 * We have something to offer,
958 * wake up select/poll.
960 space
= wpipe
->pipe_buffer
.windex
- wpipe
->pipe_buffer
.rindex
;
961 lwkt_reltoken(&wlock
);
963 pipeselwakeup(wpipe
);
964 pipe_rel_mplock(&mpsave
);
969 * MPALMOSTSAFE - acquires mplock
971 * we implement a very minimal set of ioctls for compatibility with sockets.
974 pipe_ioctl(struct file
*fp
, u_long cmd
, caddr_t data
, struct ucred
*cred
)
982 pipe_get_mplock(&mpsave
);
983 mpipe
= (struct pipe
*)fp
->f_data
;
985 lwkt_gettoken(&rlock
, &mpipe
->pipe_rlock
);
986 lwkt_gettoken(&wlock
, &mpipe
->pipe_wlock
);
991 mpipe
->pipe_state
|= PIPE_ASYNC
;
993 mpipe
->pipe_state
&= ~PIPE_ASYNC
;
998 *(int *)data
= mpipe
->pipe_buffer
.windex
-
999 mpipe
->pipe_buffer
.rindex
;
1004 error
= fsetown(*(int *)data
, &mpipe
->pipe_sigio
);
1008 *(int *)data
= fgetown(mpipe
->pipe_sigio
);
1012 /* This is deprecated, FIOSETOWN should be used instead. */
1014 error
= fsetown(-(*(int *)data
), &mpipe
->pipe_sigio
);
1019 /* This is deprecated, FIOGETOWN should be used instead. */
1020 *(int *)data
= -fgetown(mpipe
->pipe_sigio
);
1027 lwkt_reltoken(&rlock
);
1028 lwkt_reltoken(&wlock
);
1029 pipe_rel_mplock(&mpsave
);
1035 * MPALMOSTSAFE - acquires mplock
1038 pipe_poll(struct file
*fp
, int events
, struct ucred
*cred
)
1046 pipe_get_mplock(&mpsave
);
1047 rpipe
= (struct pipe
*)fp
->f_data
;
1048 wpipe
= rpipe
->pipe_peer
;
1049 if (events
& (POLLIN
| POLLRDNORM
)) {
1050 if ((rpipe
->pipe_buffer
.windex
!= rpipe
->pipe_buffer
.rindex
) ||
1051 (rpipe
->pipe_state
& PIPE_REOF
)) {
1052 revents
|= events
& (POLLIN
| POLLRDNORM
);
1056 if (events
& (POLLOUT
| POLLWRNORM
)) {
1057 if (wpipe
== NULL
|| (wpipe
->pipe_state
& PIPE_WEOF
)) {
1058 revents
|= events
& (POLLOUT
| POLLWRNORM
);
1060 space
= wpipe
->pipe_buffer
.windex
-
1061 wpipe
->pipe_buffer
.rindex
;
1062 space
= wpipe
->pipe_buffer
.size
- space
;
1063 if (space
>= PIPE_BUF
)
1064 revents
|= events
& (POLLOUT
| POLLWRNORM
);
1068 if ((rpipe
->pipe_state
& PIPE_REOF
) ||
1070 (wpipe
->pipe_state
& PIPE_WEOF
))
1074 if (events
& (POLLIN
| POLLRDNORM
)) {
1075 selrecord(curthread
, &rpipe
->pipe_sel
);
1076 rpipe
->pipe_state
|= PIPE_SEL
;
1079 if (events
& (POLLOUT
| POLLWRNORM
)) {
1080 selrecord(curthread
, &wpipe
->pipe_sel
);
1081 wpipe
->pipe_state
|= PIPE_SEL
;
1084 pipe_rel_mplock(&mpsave
);
1092 pipe_stat(struct file
*fp
, struct stat
*ub
, struct ucred
*cred
)
1097 pipe_get_mplock(&mpsave
);
1098 pipe
= (struct pipe
*)fp
->f_data
;
1100 bzero((caddr_t
)ub
, sizeof(*ub
));
1101 ub
->st_mode
= S_IFIFO
;
1102 ub
->st_blksize
= pipe
->pipe_buffer
.size
;
1103 ub
->st_size
= pipe
->pipe_buffer
.windex
- pipe
->pipe_buffer
.rindex
;
1104 ub
->st_blocks
= (ub
->st_size
+ ub
->st_blksize
- 1) / ub
->st_blksize
;
1105 ub
->st_atimespec
= pipe
->pipe_atime
;
1106 ub
->st_mtimespec
= pipe
->pipe_mtime
;
1107 ub
->st_ctimespec
= pipe
->pipe_ctime
;
1109 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev,
1111 * XXX (st_dev, st_ino) should be unique.
1113 pipe_rel_mplock(&mpsave
);
1118 * MPALMOSTSAFE - acquires mplock
1121 pipe_close(struct file
*fp
)
1126 cpipe
= (struct pipe
*)fp
->f_data
;
1127 fp
->f_ops
= &badfileops
;
1129 funsetown(cpipe
->pipe_sigio
);
1136 * Shutdown one or both directions of a full-duplex pipe.
1138 * MPALMOSTSAFE - acquires mplock
1141 pipe_shutdown(struct file
*fp
, int how
)
1146 lwkt_tokref rpipe_rlock
;
1147 lwkt_tokref rpipe_wlock
;
1148 lwkt_tokref wpipe_rlock
;
1149 lwkt_tokref wpipe_wlock
;
1152 pipe_get_mplock(&mpsave
);
1153 rpipe
= (struct pipe
*)fp
->f_data
;
1154 wpipe
= rpipe
->pipe_peer
;
1157 * We modify pipe_state on both pipes, which means we need
1160 lwkt_gettoken(&rpipe_rlock
, &rpipe
->pipe_rlock
);
1161 lwkt_gettoken(&rpipe_wlock
, &rpipe
->pipe_wlock
);
1162 lwkt_gettoken(&wpipe_rlock
, &wpipe
->pipe_rlock
);
1163 lwkt_gettoken(&wpipe_wlock
, &wpipe
->pipe_wlock
);
1168 rpipe
->pipe_state
|= PIPE_REOF
; /* my reads */
1169 rpipe
->pipe_state
|= PIPE_WEOF
; /* peer writes */
1170 if (rpipe
->pipe_state
& PIPE_WANTR
) {
1171 rpipe
->pipe_state
&= ~PIPE_WANTR
;
1174 if (rpipe
->pipe_state
& PIPE_WANTW
) {
1175 rpipe
->pipe_state
&= ~PIPE_WANTW
;
1183 wpipe
->pipe_state
|= PIPE_REOF
; /* peer reads */
1184 wpipe
->pipe_state
|= PIPE_WEOF
; /* my writes */
1185 if (wpipe
->pipe_state
& PIPE_WANTR
) {
1186 wpipe
->pipe_state
&= ~PIPE_WANTR
;
1189 if (wpipe
->pipe_state
& PIPE_WANTW
) {
1190 wpipe
->pipe_state
&= ~PIPE_WANTW
;
1196 pipeselwakeup(rpipe
);
1197 pipeselwakeup(wpipe
);
1199 lwkt_reltoken(&rpipe_rlock
);
1200 lwkt_reltoken(&rpipe_wlock
);
1201 lwkt_reltoken(&wpipe_rlock
);
1202 lwkt_reltoken(&wpipe_wlock
);
1204 pipe_rel_mplock(&mpsave
);
1209 pipe_free_kmem(struct pipe
*cpipe
)
1211 if (cpipe
->pipe_buffer
.buffer
!= NULL
) {
1212 if (cpipe
->pipe_buffer
.size
> PIPE_SIZE
)
1213 atomic_subtract_int(&pipe_nbig
, 1);
1214 kmem_free(&kernel_map
,
1215 (vm_offset_t
)cpipe
->pipe_buffer
.buffer
,
1216 cpipe
->pipe_buffer
.size
);
1217 cpipe
->pipe_buffer
.buffer
= NULL
;
1218 cpipe
->pipe_buffer
.object
= NULL
;
1223 * Close the pipe. The slock must be held to interlock against simultanious
1224 * closes. The rlock and wlock must be held to adjust the pipe_state.
1227 pipeclose(struct pipe
*cpipe
)
1231 lwkt_tokref cpipe_rlock
;
1232 lwkt_tokref cpipe_wlock
;
1233 lwkt_tokref ppipe_rlock
;
1234 lwkt_tokref ppipe_wlock
;
1240 * The slock may not have been allocated yet (close during
1243 * We need both the read and write tokens to modify pipe_state.
1245 if (cpipe
->pipe_slock
)
1246 lockmgr(cpipe
->pipe_slock
, LK_EXCLUSIVE
);
1247 lwkt_gettoken(&cpipe_rlock
, &cpipe
->pipe_rlock
);
1248 lwkt_gettoken(&cpipe_wlock
, &cpipe
->pipe_wlock
);
1251 * Set our state, wakeup anyone waiting in select, and
1252 * wakeup anyone blocked on our pipe.
1254 cpipe
->pipe_state
|= PIPE_CLOSED
| PIPE_REOF
| PIPE_WEOF
;
1255 pipeselwakeup(cpipe
);
1256 if (cpipe
->pipe_state
& (PIPE_WANTR
| PIPE_WANTW
)) {
1257 cpipe
->pipe_state
&= ~(PIPE_WANTR
| PIPE_WANTW
);
1262 * Disconnect from peer.
1264 if ((ppipe
= cpipe
->pipe_peer
) != NULL
) {
1265 lwkt_gettoken(&ppipe_rlock
, &ppipe
->pipe_rlock
);
1266 lwkt_gettoken(&ppipe_wlock
, &ppipe
->pipe_wlock
);
1267 ppipe
->pipe_state
|= PIPE_REOF
| PIPE_WEOF
;
1268 pipeselwakeup(ppipe
);
1269 if (ppipe
->pipe_state
& (PIPE_WANTR
| PIPE_WANTW
)) {
1270 ppipe
->pipe_state
&= ~(PIPE_WANTR
| PIPE_WANTW
);
1273 if (SLIST_FIRST(&ppipe
->pipe_sel
.si_note
)) {
1275 KNOTE(&ppipe
->pipe_sel
.si_note
, 0);
1278 lwkt_reltoken(&ppipe_rlock
);
1279 lwkt_reltoken(&ppipe_wlock
);
1283 * If the peer is also closed we can free resources for both
1284 * sides, otherwise we leave our side intact to deal with any
1285 * races (since we only have the slock).
1287 if (ppipe
&& (ppipe
->pipe_state
& PIPE_CLOSED
)) {
1288 cpipe
->pipe_peer
= NULL
;
1289 ppipe
->pipe_peer
= NULL
;
1290 ppipe
->pipe_slock
= NULL
; /* we will free the slock */
1295 lwkt_reltoken(&cpipe_rlock
);
1296 lwkt_reltoken(&cpipe_wlock
);
1297 if (cpipe
->pipe_slock
)
1298 lockmgr(cpipe
->pipe_slock
, LK_RELEASE
);
1301 * If we disassociated from our peer we can free resources
1303 if (ppipe
== NULL
) {
1305 if (cpipe
->pipe_slock
) {
1306 kfree(cpipe
->pipe_slock
, M_PIPE
);
1307 cpipe
->pipe_slock
= NULL
;
1309 if (gd
->gd_pipeqcount
>= pipe_maxcache
||
1310 cpipe
->pipe_buffer
.size
!= PIPE_SIZE
1312 pipe_free_kmem(cpipe
);
1313 kfree(cpipe
, M_PIPE
);
1315 cpipe
->pipe_state
= 0;
1316 cpipe
->pipe_peer
= gd
->gd_pipeq
;
1317 gd
->gd_pipeq
= cpipe
;
1318 ++gd
->gd_pipeqcount
;
1324 * MPALMOSTSAFE - acquires mplock
1327 pipe_kqfilter(struct file
*fp
, struct knote
*kn
)
1332 cpipe
= (struct pipe
*)kn
->kn_fp
->f_data
;
1334 switch (kn
->kn_filter
) {
1336 kn
->kn_fop
= &pipe_rfiltops
;
1339 kn
->kn_fop
= &pipe_wfiltops
;
1340 cpipe
= cpipe
->pipe_peer
;
1341 if (cpipe
== NULL
) {
1342 /* other end of pipe has been closed */
1350 kn
->kn_hook
= (caddr_t
)cpipe
;
1352 SLIST_INSERT_HEAD(&cpipe
->pipe_sel
.si_note
, kn
, kn_selnext
);
1358 filt_pipedetach(struct knote
*kn
)
1360 struct pipe
*cpipe
= (struct pipe
*)kn
->kn_hook
;
1362 SLIST_REMOVE(&cpipe
->pipe_sel
.si_note
, kn
, knote
, kn_selnext
);
1367 filt_piperead(struct knote
*kn
, long hint
)
1369 struct pipe
*rpipe
= (struct pipe
*)kn
->kn_fp
->f_data
;
1371 kn
->kn_data
= rpipe
->pipe_buffer
.windex
- rpipe
->pipe_buffer
.rindex
;
1374 if (rpipe
->pipe_state
& PIPE_REOF
) {
1375 kn
->kn_flags
|= EV_EOF
;
1378 return (kn
->kn_data
> 0);
1383 filt_pipewrite(struct knote
*kn
, long hint
)
1385 struct pipe
*rpipe
= (struct pipe
*)kn
->kn_fp
->f_data
;
1386 struct pipe
*wpipe
= rpipe
->pipe_peer
;
1390 if ((wpipe
== NULL
) || (wpipe
->pipe_state
& PIPE_WEOF
)) {
1392 kn
->kn_flags
|= EV_EOF
;
1395 space
= wpipe
->pipe_buffer
.windex
-
1396 wpipe
->pipe_buffer
.rindex
;
1397 space
= wpipe
->pipe_buffer
.size
- space
;
1398 kn
->kn_data
= space
;
1399 return (kn
->kn_data
>= PIPE_BUF
);