2 * Copyright (c) 1996 John S. Dyson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
16 * 4. Modifications may be freely made to this file if the above conditions
19 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $
20 * $DragonFly: src/sys/kern/sys_pipe.c,v 1.50 2008/09/09 04:06:13 dillon Exp $
24 * This file contains a high-performance replacement for the socket-based
25 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
26 * all features of sockets, but does do everything that pipes normally
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
33 #include <sys/fcntl.h>
35 #include <sys/filedesc.h>
36 #include <sys/filio.h>
37 #include <sys/ttycom.h>
40 #include <sys/select.h>
41 #include <sys/signalvar.h>
42 #include <sys/sysproto.h>
44 #include <sys/vnode.h>
46 #include <sys/event.h>
47 #include <sys/globaldata.h>
48 #include <sys/module.h>
49 #include <sys/malloc.h>
50 #include <sys/sysctl.h>
51 #include <sys/socket.h>
54 #include <vm/vm_param.h>
56 #include <vm/vm_object.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_extern.h>
60 #include <vm/vm_map.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_zone.h>
64 #include <sys/file2.h>
65 #include <sys/signal2.h>
67 #include <machine/cpufunc.h>
70 * interfaces to the outside world
72 static int pipe_read (struct file
*fp
, struct uio
*uio
,
73 struct ucred
*cred
, int flags
);
74 static int pipe_write (struct file
*fp
, struct uio
*uio
,
75 struct ucred
*cred
, int flags
);
76 static int pipe_close (struct file
*fp
);
77 static int pipe_shutdown (struct file
*fp
, int how
);
78 static int pipe_poll (struct file
*fp
, int events
, struct ucred
*cred
);
79 static int pipe_kqfilter (struct file
*fp
, struct knote
*kn
);
80 static int pipe_stat (struct file
*fp
, struct stat
*sb
, struct ucred
*cred
);
81 static int pipe_ioctl (struct file
*fp
, u_long cmd
, caddr_t data
,
82 struct ucred
*cred
, struct sysmsg
*msg
);
84 static struct fileops pipeops
= {
86 .fo_write
= pipe_write
,
87 .fo_ioctl
= pipe_ioctl
,
89 .fo_kqfilter
= pipe_kqfilter
,
91 .fo_close
= pipe_close
,
92 .fo_shutdown
= pipe_shutdown
95 static void filt_pipedetach(struct knote
*kn
);
96 static int filt_piperead(struct knote
*kn
, long hint
);
97 static int filt_pipewrite(struct knote
*kn
, long hint
);
99 static struct filterops pipe_rfiltops
=
100 { 1, NULL
, filt_pipedetach
, filt_piperead
};
101 static struct filterops pipe_wfiltops
=
102 { 1, NULL
, filt_pipedetach
, filt_pipewrite
};
104 MALLOC_DEFINE(M_PIPE
, "pipe", "pipe structures");
107 * Default pipe buffer size(s), this can be kind-of large now because pipe
108 * space is pageable. The pipe code will try to maintain locality of
109 * reference for performance reasons, so small amounts of outstanding I/O
110 * will not wipe the cache.
112 #define MINPIPESIZE (PIPE_SIZE/3)
113 #define MAXPIPESIZE (2*PIPE_SIZE/3)
116 * Limit the number of "big" pipes
118 #define LIMITBIGPIPES 64
119 #define PIPEQ_MAX_CACHE 16 /* per-cpu pipe structure cache */
121 static int pipe_maxbig
= LIMITBIGPIPES
;
122 static int pipe_maxcache
= PIPEQ_MAX_CACHE
;
123 static int pipe_bigcount
;
124 static int pipe_nbig
;
125 static int pipe_bcache_alloc
;
126 static int pipe_bkmem_alloc
;
127 static int pipe_rblocked_count
;
128 static int pipe_wblocked_count
;
130 SYSCTL_NODE(_kern
, OID_AUTO
, pipe
, CTLFLAG_RW
, 0, "Pipe operation");
131 SYSCTL_INT(_kern_pipe
, OID_AUTO
, nbig
,
132 CTLFLAG_RD
, &pipe_nbig
, 0, "numer of big pipes allocated");
133 SYSCTL_INT(_kern_pipe
, OID_AUTO
, bigcount
,
134 CTLFLAG_RW
, &pipe_bigcount
, 0, "number of times pipe expanded");
135 SYSCTL_INT(_kern_pipe
, OID_AUTO
, rblocked
,
136 CTLFLAG_RW
, &pipe_rblocked_count
, 0, "number of times pipe expanded");
137 SYSCTL_INT(_kern_pipe
, OID_AUTO
, wblocked
,
138 CTLFLAG_RW
, &pipe_wblocked_count
, 0, "number of times pipe expanded");
139 SYSCTL_INT(_kern_pipe
, OID_AUTO
, maxcache
,
140 CTLFLAG_RW
, &pipe_maxcache
, 0, "max pipes cached per-cpu");
141 SYSCTL_INT(_kern_pipe
, OID_AUTO
, maxbig
,
142 CTLFLAG_RW
, &pipe_maxbig
, 0, "max number of big pipes");
144 static int pipe_delay
= 5000; /* 5uS default */
145 SYSCTL_INT(_kern_pipe
, OID_AUTO
, delay
,
146 CTLFLAG_RW
, &pipe_delay
, 0, "SMP delay optimization in ns");
147 static int pipe_mpsafe
= 1;
148 SYSCTL_INT(_kern_pipe
, OID_AUTO
, mpsafe
,
149 CTLFLAG_RW
, &pipe_mpsafe
, 0, "");
151 #if !defined(NO_PIPE_SYSCTL_STATS)
152 SYSCTL_INT(_kern_pipe
, OID_AUTO
, bcache_alloc
,
153 CTLFLAG_RW
, &pipe_bcache_alloc
, 0, "pipe buffer from pcpu cache");
154 SYSCTL_INT(_kern_pipe
, OID_AUTO
, bkmem_alloc
,
155 CTLFLAG_RW
, &pipe_bkmem_alloc
, 0, "pipe buffer from kmem");
158 static void pipeclose (struct pipe
*cpipe
);
159 static void pipe_free_kmem (struct pipe
*cpipe
);
160 static int pipe_create (struct pipe
**cpipep
);
161 static __inline
void pipeselwakeup (struct pipe
*cpipe
);
162 static int pipespace (struct pipe
*cpipe
, int size
);
165 pipeseltest(struct pipe
*cpipe
)
167 return ((cpipe
->pipe_state
& PIPE_SEL
) ||
168 ((cpipe
->pipe_state
& PIPE_ASYNC
) && cpipe
->pipe_sigio
) ||
169 SLIST_FIRST(&cpipe
->pipe_sel
.si_note
));
173 pipeselwakeup(struct pipe
*cpipe
)
175 if (cpipe
->pipe_state
& PIPE_SEL
) {
177 cpipe
->pipe_state
&= ~PIPE_SEL
;
178 selwakeup(&cpipe
->pipe_sel
);
181 if ((cpipe
->pipe_state
& PIPE_ASYNC
) && cpipe
->pipe_sigio
) {
183 pgsigio(cpipe
->pipe_sigio
, SIGIO
, 0);
186 if (SLIST_FIRST(&cpipe
->pipe_sel
.si_note
)) {
188 KNOTE(&cpipe
->pipe_sel
.si_note
, 0);
194 * These routines are called before and after a UIO. The UIO
195 * may block, causing our held tokens to be lost temporarily.
197 * We use these routines to serialize reads against other reads
198 * and writes against other writes.
200 * The read token is held on entry so *ipp does not race.
203 pipe_start_uio(struct pipe
*cpipe
, int *ipp
)
209 error
= tsleep(ipp
, PCATCH
, "pipexx", 0);
218 pipe_end_uio(struct pipe
*cpipe
, int *ipp
)
230 pipe_get_mplock(int *save
)
233 if (pipe_mpsafe
== 0) {
244 pipe_rel_mplock(int *save
)
254 * The pipe system call for the DTYPE_PIPE type of pipes
256 * pipe_ARgs(int dummy)
261 sys_pipe(struct pipe_args
*uap
)
263 struct thread
*td
= curthread
;
264 struct proc
*p
= td
->td_proc
;
265 struct file
*rf
, *wf
;
266 struct pipe
*rpipe
, *wpipe
;
271 rpipe
= wpipe
= NULL
;
272 if (pipe_create(&rpipe
) || pipe_create(&wpipe
)) {
278 error
= falloc(p
, &rf
, &fd1
);
284 uap
->sysmsg_fds
[0] = fd1
;
287 * Warning: once we've gotten past allocation of the fd for the
288 * read-side, we can only drop the read side via fdrop() in order
289 * to avoid races against processes which manage to dup() the read
290 * side while we are blocked trying to allocate the write side.
292 rf
->f_type
= DTYPE_PIPE
;
293 rf
->f_flag
= FREAD
| FWRITE
;
294 rf
->f_ops
= &pipeops
;
296 error
= falloc(p
, &wf
, &fd2
);
298 fsetfd(p
, NULL
, fd1
);
300 /* rpipe has been closed by fdrop(). */
304 wf
->f_type
= DTYPE_PIPE
;
305 wf
->f_flag
= FREAD
| FWRITE
;
306 wf
->f_ops
= &pipeops
;
308 uap
->sysmsg_fds
[1] = fd2
;
310 rpipe
->pipe_slock
= kmalloc(sizeof(struct lock
),
311 M_PIPE
, M_WAITOK
|M_ZERO
);
312 wpipe
->pipe_slock
= rpipe
->pipe_slock
;
313 rpipe
->pipe_peer
= wpipe
;
314 wpipe
->pipe_peer
= rpipe
;
315 lockinit(rpipe
->pipe_slock
, "pipecl", 0, 0);
318 * Once activated the peer relationship remains valid until
319 * both sides are closed.
330 * Allocate kva for pipe circular buffer, the space is pageable
331 * This routine will 'realloc' the size of a pipe safely, if it fails
332 * it will retain the old buffer.
333 * If it fails it will return ENOMEM.
336 pipespace(struct pipe
*cpipe
, int size
)
338 struct vm_object
*object
;
342 npages
= round_page(size
) / PAGE_SIZE
;
343 object
= cpipe
->pipe_buffer
.object
;
346 * [re]create the object if necessary and reserve space for it
347 * in the kernel_map. The object and memory are pageable. On
348 * success, free the old resources before assigning the new
351 if (object
== NULL
|| object
->size
!= npages
) {
353 object
= vm_object_allocate(OBJT_DEFAULT
, npages
);
354 buffer
= (caddr_t
)vm_map_min(&kernel_map
);
356 error
= vm_map_find(&kernel_map
, object
, 0,
357 (vm_offset_t
*)&buffer
, size
,
360 VM_PROT_ALL
, VM_PROT_ALL
,
363 if (error
!= KERN_SUCCESS
) {
364 vm_object_deallocate(object
);
368 pipe_free_kmem(cpipe
);
370 cpipe
->pipe_buffer
.object
= object
;
371 cpipe
->pipe_buffer
.buffer
= buffer
;
372 cpipe
->pipe_buffer
.size
= size
;
377 cpipe
->pipe_buffer
.rindex
= 0;
378 cpipe
->pipe_buffer
.windex
= 0;
383 * Initialize and allocate VM and memory for pipe, pulling the pipe from
384 * our per-cpu cache if possible. For now make sure it is sized for the
385 * smaller PIPE_SIZE default.
388 pipe_create(struct pipe
**cpipep
)
390 globaldata_t gd
= mycpu
;
394 if ((cpipe
= gd
->gd_pipeq
) != NULL
) {
395 gd
->gd_pipeq
= cpipe
->pipe_peer
;
397 cpipe
->pipe_peer
= NULL
;
398 cpipe
->pipe_wantwcnt
= 0;
400 cpipe
= kmalloc(sizeof(struct pipe
), M_PIPE
, M_WAITOK
|M_ZERO
);
403 if ((error
= pipespace(cpipe
, PIPE_SIZE
)) != 0)
405 vfs_timestamp(&cpipe
->pipe_ctime
);
406 cpipe
->pipe_atime
= cpipe
->pipe_ctime
;
407 cpipe
->pipe_mtime
= cpipe
->pipe_ctime
;
408 lwkt_token_init(&cpipe
->pipe_rlock
);
409 lwkt_token_init(&cpipe
->pipe_wlock
);
414 * MPALMOSTSAFE (acquires mplock)
417 pipe_read(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int fflags
)
423 u_int size
; /* total bytes available */
424 u_int nsize
; /* total bytes to read */
425 u_int rindex
; /* contiguous bytes available */
433 if (uio
->uio_resid
== 0)
437 * Setup locks, calculate nbio
439 pipe_get_mplock(&mpsave
);
440 rpipe
= (struct pipe
*)fp
->f_data
;
441 lwkt_gettoken(&rlock
, &rpipe
->pipe_rlock
);
443 if (fflags
& O_FBLOCKING
)
445 else if (fflags
& O_FNONBLOCKING
)
447 else if (fp
->f_flag
& O_NONBLOCK
)
453 * Reads are serialized. Note howeverthat pipe_buffer.buffer and
454 * pipe_buffer.size can change out from under us when the number
455 * of bytes in the buffer are zero due to the write-side doing a
458 error
= pipe_start_uio(rpipe
, &rpipe
->pipe_rip
);
460 pipe_rel_mplock(&mpsave
);
461 lwkt_reltoken(&rlock
);
466 bigread
= (uio
->uio_resid
> 10 * 1024 * 1024);
469 while (uio
->uio_resid
) {
473 if (bigread
&& --bigcount
== 0) {
476 if (CURSIG(curthread
->td_lwp
)) {
482 size
= rpipe
->pipe_buffer
.windex
- rpipe
->pipe_buffer
.rindex
;
485 rindex
= rpipe
->pipe_buffer
.rindex
&
486 (rpipe
->pipe_buffer
.size
- 1);
488 if (nsize
> rpipe
->pipe_buffer
.size
- rindex
)
489 nsize
= rpipe
->pipe_buffer
.size
- rindex
;
490 nsize
= szmin(nsize
, uio
->uio_resid
);
492 error
= uiomove(&rpipe
->pipe_buffer
.buffer
[rindex
],
497 rpipe
->pipe_buffer
.rindex
+= nsize
;
501 * If the FIFO is still over half full just continue
502 * and do not try to notify the writer yet.
504 if (size
- nsize
>= (rpipe
->pipe_buffer
.size
>> 1)) {
510 * When the FIFO is less then half full notify any
511 * waiting writer. WANTW can be checked while
512 * holding just the rlock.
515 if ((rpipe
->pipe_state
& PIPE_WANTW
) == 0)
520 * If the "write-side" was blocked we wake it up. This code
521 * is reached either when the buffer is completely emptied
522 * or if it becomes more then half-empty.
524 * Pipe_state can only be modified if both the rlock and
527 if (rpipe
->pipe_state
& PIPE_WANTW
) {
528 lwkt_gettoken(&wlock
, &rpipe
->pipe_wlock
);
529 if (rpipe
->pipe_state
& PIPE_WANTW
) {
531 rpipe
->pipe_state
&= ~PIPE_WANTW
;
532 lwkt_reltoken(&wlock
);
535 lwkt_reltoken(&wlock
);
540 * Pick up our copy loop again if the writer sent data to
541 * us while we were messing around.
543 * On a SMP box poll up to pipe_delay nanoseconds for new
544 * data. Typically a value of 2000 to 4000 is sufficient
545 * to eradicate most IPIs/tsleeps/wakeups when a pipe
546 * is used for synchronous communications with small packets,
547 * and 8000 or so (8uS) will pipeline large buffer xfers
548 * between cpus over a pipe.
550 * For synchronous communications a hit means doing a
551 * full Awrite-Bread-Bwrite-Aread cycle in less then 2uS,
552 * where as miss requiring a tsleep/wakeup sequence
553 * will take 7uS or more.
555 if (rpipe
->pipe_buffer
.windex
!= rpipe
->pipe_buffer
.rindex
)
558 #if defined(SMP) && defined(_RDTSC_SUPPORTED_)
563 tsc_target
= tsc_get_target(pipe_delay
);
564 while (tsc_test_target(tsc_target
) == 0) {
565 if (rpipe
->pipe_buffer
.windex
!=
566 rpipe
->pipe_buffer
.rindex
) {
577 * Detect EOF condition, do not set error.
579 if (rpipe
->pipe_state
& PIPE_REOF
)
583 * Break if some data was read, or if this was a non-blocking
595 * Last chance, interlock with WANTR.
597 lwkt_gettoken(&wlock
, &rpipe
->pipe_wlock
);
598 size
= rpipe
->pipe_buffer
.windex
- rpipe
->pipe_buffer
.rindex
;
600 lwkt_reltoken(&wlock
);
605 * Retest EOF - acquiring a new token can temporarily release
606 * tokens already held.
608 if (rpipe
->pipe_state
& PIPE_REOF
) {
609 lwkt_reltoken(&wlock
);
614 * If there is no more to read in the pipe, reset its
615 * pointers to the beginning. This improves cache hit
618 * We need both locks to modify both pointers, and there
619 * must also not be a write in progress or the uiomove()
620 * in the write might block and temporarily release
621 * its wlock, then reacquire and update windex. We are
622 * only serialized against reads, not writes.
624 * XXX should we even bother resetting the indices? It
625 * might actually be more cache efficient not to.
627 if (rpipe
->pipe_buffer
.rindex
== rpipe
->pipe_buffer
.windex
&&
628 rpipe
->pipe_wip
== 0) {
629 rpipe
->pipe_buffer
.rindex
= 0;
630 rpipe
->pipe_buffer
.windex
= 0;
634 * Wait for more data.
636 * Pipe_state can only be set if both the rlock and wlock
639 rpipe
->pipe_state
|= PIPE_WANTR
;
640 tsleep_interlock(rpipe
, PCATCH
);
641 lwkt_reltoken(&wlock
);
642 error
= tsleep(rpipe
, PCATCH
| PINTERLOCKED
, "piperd", 0);
643 ++pipe_rblocked_count
;
647 pipe_end_uio(rpipe
, &rpipe
->pipe_rip
);
650 * Uptime last access time
652 if (error
== 0 && nread
)
653 vfs_timestamp(&rpipe
->pipe_atime
);
656 * If we drained the FIFO more then half way then handle
657 * write blocking hysteresis.
659 * Note that PIPE_WANTW cannot be set by the writer without
660 * it holding both rlock and wlock, so we can test it
661 * while holding just rlock.
664 if (rpipe
->pipe_state
& PIPE_WANTW
) {
665 lwkt_gettoken(&wlock
, &rpipe
->pipe_wlock
);
666 if (rpipe
->pipe_state
& PIPE_WANTW
) {
667 rpipe
->pipe_state
&= ~PIPE_WANTW
;
668 lwkt_reltoken(&wlock
);
671 lwkt_reltoken(&wlock
);
674 if (pipeseltest(rpipe
)) {
675 lwkt_gettoken(&wlock
, &rpipe
->pipe_wlock
);
676 pipeselwakeup(rpipe
);
677 lwkt_reltoken(&wlock
);
680 /*size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;*/
681 lwkt_reltoken(&rlock
);
683 pipe_rel_mplock(&mpsave
);
688 * MPALMOSTSAFE - acquires mplock
691 pipe_write(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int fflags
)
696 struct pipe
*wpipe
, *rpipe
;
706 pipe_get_mplock(&mpsave
);
709 * Writes go to the peer. The peer will always exist.
711 rpipe
= (struct pipe
*) fp
->f_data
;
712 wpipe
= rpipe
->pipe_peer
;
713 lwkt_gettoken(&wlock
, &wpipe
->pipe_wlock
);
714 if (wpipe
->pipe_state
& PIPE_WEOF
) {
715 pipe_rel_mplock(&mpsave
);
716 lwkt_reltoken(&wlock
);
721 * Degenerate case (EPIPE takes prec)
723 if (uio
->uio_resid
== 0) {
724 pipe_rel_mplock(&mpsave
);
725 lwkt_reltoken(&wlock
);
730 * Writes are serialized (start_uio must be called with wlock)
732 error
= pipe_start_uio(wpipe
, &wpipe
->pipe_wip
);
734 pipe_rel_mplock(&mpsave
);
735 lwkt_reltoken(&wlock
);
739 if (fflags
& O_FBLOCKING
)
741 else if (fflags
& O_FNONBLOCKING
)
743 else if (fp
->f_flag
& O_NONBLOCK
)
749 * If it is advantageous to resize the pipe buffer, do
750 * so. We are write-serialized so we can block safely.
752 if ((wpipe
->pipe_buffer
.size
<= PIPE_SIZE
) &&
753 (pipe_nbig
< pipe_maxbig
) &&
754 wpipe
->pipe_wantwcnt
> 4 &&
755 (wpipe
->pipe_buffer
.rindex
== wpipe
->pipe_buffer
.windex
)) {
757 * Recheck after lock.
759 lwkt_gettoken(&rlock
, &wpipe
->pipe_rlock
);
760 if ((wpipe
->pipe_buffer
.size
<= PIPE_SIZE
) &&
761 (pipe_nbig
< pipe_maxbig
) &&
762 (wpipe
->pipe_buffer
.rindex
== wpipe
->pipe_buffer
.windex
)) {
763 atomic_add_int(&pipe_nbig
, 1);
764 if (pipespace(wpipe
, BIG_PIPE_SIZE
) == 0)
767 atomic_subtract_int(&pipe_nbig
, 1);
769 lwkt_reltoken(&rlock
);
772 orig_resid
= uio
->uio_resid
;
775 bigwrite
= (uio
->uio_resid
> 10 * 1024 * 1024);
778 while (uio
->uio_resid
) {
779 if (wpipe
->pipe_state
& PIPE_WEOF
) {
787 if (bigwrite
&& --bigcount
== 0) {
790 if (CURSIG(curthread
->td_lwp
)) {
796 windex
= wpipe
->pipe_buffer
.windex
&
797 (wpipe
->pipe_buffer
.size
- 1);
798 space
= wpipe
->pipe_buffer
.size
-
799 (wpipe
->pipe_buffer
.windex
- wpipe
->pipe_buffer
.rindex
);
802 /* Writes of size <= PIPE_BUF must be atomic. */
803 if ((space
< uio
->uio_resid
) && (orig_resid
<= PIPE_BUF
))
807 * Write to fill, read size handles write hysteresis. Also
808 * additional restrictions can cause select-based non-blocking
815 * Transfer size is minimum of uio transfer
816 * and free space in pipe buffer.
818 * Limit each uiocopy to no more then PIPE_SIZE
819 * so we can keep the gravy train going on a
820 * SMP box. This doubles the performance for
821 * write sizes > 16K. Otherwise large writes
822 * wind up doing an inefficient synchronous
825 space
= szmin(space
, uio
->uio_resid
);
826 if (space
> PIPE_SIZE
)
830 * First segment to transfer is minimum of
831 * transfer size and contiguous space in
832 * pipe buffer. If first segment to transfer
833 * is less than the transfer size, we've got
834 * a wraparound in the buffer.
836 segsize
= wpipe
->pipe_buffer
.size
- windex
;
842 * If this is the first loop and the reader is
843 * blocked, do a preemptive wakeup of the reader.
845 * On SMP the IPI latency plus the wlock interlock
846 * on the reader side is the fastest way to get the
847 * reader going. (The scheduler will hard loop on
850 * NOTE: We can't clear WANTR here without acquiring
851 * the rlock, which we don't want to do here!
853 if ((wpipe
->pipe_state
& PIPE_WANTR
) && pipe_mpsafe
> 1)
858 * Transfer segment, which may include a wrap-around.
859 * Update windex to account for both all in one go
860 * so the reader can read() the data atomically.
862 error
= uiomove(&wpipe
->pipe_buffer
.buffer
[windex
],
864 if (error
== 0 && segsize
< space
) {
865 segsize
= space
- segsize
;
866 error
= uiomove(&wpipe
->pipe_buffer
.buffer
[0],
872 wpipe
->pipe_buffer
.windex
+= space
;
878 * We need both the rlock and the wlock to interlock against
879 * the EOF, WANTW, and size checks, and to modify pipe_state.
881 * These are token locks so we do not have to worry about
884 lwkt_gettoken(&rlock
, &wpipe
->pipe_rlock
);
887 * If the "read-side" has been blocked, wake it up now
888 * and yield to let it drain synchronously rather
891 if (wpipe
->pipe_state
& PIPE_WANTR
) {
892 wpipe
->pipe_state
&= ~PIPE_WANTR
;
897 * don't block on non-blocking I/O
900 lwkt_reltoken(&rlock
);
906 * re-test whether we have to block in the writer after
907 * acquiring both locks, in case the reader opened up
910 space
= wpipe
->pipe_buffer
.size
-
911 (wpipe
->pipe_buffer
.windex
- wpipe
->pipe_buffer
.rindex
);
913 if ((space
< uio
->uio_resid
) && (orig_resid
<= PIPE_BUF
))
917 * Retest EOF - acquiring a new token can temporarily release
918 * tokens already held.
920 if (wpipe
->pipe_state
& PIPE_WEOF
) {
921 lwkt_reltoken(&rlock
);
927 * We have no more space and have something to offer,
928 * wake up select/poll.
931 wpipe
->pipe_state
|= PIPE_WANTW
;
932 ++wpipe
->pipe_wantwcnt
;
933 pipeselwakeup(wpipe
);
934 if (wpipe
->pipe_state
& PIPE_WANTW
)
935 error
= tsleep(wpipe
, PCATCH
, "pipewr", 0);
936 ++pipe_wblocked_count
;
938 lwkt_reltoken(&rlock
);
941 * Break out if we errored or the read side wants us to go
946 if (wpipe
->pipe_state
& PIPE_WEOF
) {
951 pipe_end_uio(wpipe
, &wpipe
->pipe_wip
);
954 * If we have put any characters in the buffer, we wake up
957 * Both rlock and wlock are required to be able to modify pipe_state.
959 if (wpipe
->pipe_buffer
.windex
!= wpipe
->pipe_buffer
.rindex
) {
960 if (wpipe
->pipe_state
& PIPE_WANTR
) {
961 lwkt_gettoken(&rlock
, &wpipe
->pipe_rlock
);
962 if (wpipe
->pipe_state
& PIPE_WANTR
) {
963 wpipe
->pipe_state
&= ~PIPE_WANTR
;
964 lwkt_reltoken(&rlock
);
967 lwkt_reltoken(&rlock
);
970 if (pipeseltest(wpipe
)) {
971 lwkt_gettoken(&rlock
, &wpipe
->pipe_rlock
);
972 pipeselwakeup(wpipe
);
973 lwkt_reltoken(&rlock
);
978 * Don't return EPIPE if I/O was successful
980 if ((wpipe
->pipe_buffer
.rindex
== wpipe
->pipe_buffer
.windex
) &&
981 (uio
->uio_resid
== 0) &&
987 vfs_timestamp(&wpipe
->pipe_mtime
);
990 * We have something to offer,
991 * wake up select/poll.
993 /*space = wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex;*/
994 lwkt_reltoken(&wlock
);
995 pipe_rel_mplock(&mpsave
);
1000 * MPALMOSTSAFE - acquires mplock
1002 * we implement a very minimal set of ioctls for compatibility with sockets.
1005 pipe_ioctl(struct file
*fp
, u_long cmd
, caddr_t data
,
1006 struct ucred
*cred
, struct sysmsg
*msg
)
1014 pipe_get_mplock(&mpsave
);
1015 mpipe
= (struct pipe
*)fp
->f_data
;
1017 lwkt_gettoken(&rlock
, &mpipe
->pipe_rlock
);
1018 lwkt_gettoken(&wlock
, &mpipe
->pipe_wlock
);
1023 mpipe
->pipe_state
|= PIPE_ASYNC
;
1025 mpipe
->pipe_state
&= ~PIPE_ASYNC
;
1030 *(int *)data
= mpipe
->pipe_buffer
.windex
-
1031 mpipe
->pipe_buffer
.rindex
;
1036 error
= fsetown(*(int *)data
, &mpipe
->pipe_sigio
);
1040 *(int *)data
= fgetown(mpipe
->pipe_sigio
);
1044 /* This is deprecated, FIOSETOWN should be used instead. */
1046 error
= fsetown(-(*(int *)data
), &mpipe
->pipe_sigio
);
1051 /* This is deprecated, FIOGETOWN should be used instead. */
1052 *(int *)data
= -fgetown(mpipe
->pipe_sigio
);
1059 lwkt_reltoken(&rlock
);
1060 lwkt_reltoken(&wlock
);
1061 pipe_rel_mplock(&mpsave
);
1067 * MPALMOSTSAFE - acquires mplock
1069 * poll for events (helper)
1072 pipe_poll_events(struct pipe
*rpipe
, struct pipe
*wpipe
, int events
)
1077 if (events
& (POLLIN
| POLLRDNORM
)) {
1078 if ((rpipe
->pipe_buffer
.windex
!= rpipe
->pipe_buffer
.rindex
) ||
1079 (rpipe
->pipe_state
& PIPE_REOF
)) {
1080 revents
|= events
& (POLLIN
| POLLRDNORM
);
1084 if (events
& (POLLOUT
| POLLWRNORM
)) {
1085 if (wpipe
== NULL
|| (wpipe
->pipe_state
& PIPE_WEOF
)) {
1086 revents
|= events
& (POLLOUT
| POLLWRNORM
);
1088 space
= wpipe
->pipe_buffer
.windex
-
1089 wpipe
->pipe_buffer
.rindex
;
1090 space
= wpipe
->pipe_buffer
.size
- space
;
1091 if (space
>= PIPE_BUF
)
1092 revents
|= events
& (POLLOUT
| POLLWRNORM
);
1096 if ((rpipe
->pipe_state
& PIPE_REOF
) ||
1098 (wpipe
->pipe_state
& PIPE_WEOF
)) {
1105 * Poll for events from file pointer.
1108 pipe_poll(struct file
*fp
, int events
, struct ucred
*cred
)
1110 lwkt_tokref rpipe_rlock
;
1111 lwkt_tokref rpipe_wlock
;
1112 lwkt_tokref wpipe_rlock
;
1113 lwkt_tokref wpipe_wlock
;
1119 pipe_get_mplock(&mpsave
);
1120 rpipe
= (struct pipe
*)fp
->f_data
;
1121 wpipe
= rpipe
->pipe_peer
;
1123 revents
= pipe_poll_events(rpipe
, wpipe
, events
);
1125 if (events
& (POLLIN
| POLLRDNORM
)) {
1126 lwkt_gettoken(&rpipe_rlock
, &rpipe
->pipe_rlock
);
1127 lwkt_gettoken(&rpipe_wlock
, &rpipe
->pipe_wlock
);
1129 if (events
& (POLLOUT
| POLLWRNORM
)) {
1130 lwkt_gettoken(&wpipe_rlock
, &wpipe
->pipe_rlock
);
1131 lwkt_gettoken(&wpipe_wlock
, &wpipe
->pipe_wlock
);
1133 revents
= pipe_poll_events(rpipe
, wpipe
, events
);
1135 if (events
& (POLLIN
| POLLRDNORM
)) {
1136 selrecord(curthread
, &rpipe
->pipe_sel
);
1137 rpipe
->pipe_state
|= PIPE_SEL
;
1140 if (events
& (POLLOUT
| POLLWRNORM
)) {
1141 selrecord(curthread
, &wpipe
->pipe_sel
);
1142 wpipe
->pipe_state
|= PIPE_SEL
;
1145 if (events
& (POLLIN
| POLLRDNORM
)) {
1146 lwkt_reltoken(&rpipe_rlock
);
1147 lwkt_reltoken(&rpipe_wlock
);
1149 if (events
& (POLLOUT
| POLLWRNORM
)) {
1150 lwkt_reltoken(&wpipe_rlock
);
1151 lwkt_reltoken(&wpipe_wlock
);
1154 pipe_rel_mplock(&mpsave
);
1162 pipe_stat(struct file
*fp
, struct stat
*ub
, struct ucred
*cred
)
1167 pipe_get_mplock(&mpsave
);
1168 pipe
= (struct pipe
*)fp
->f_data
;
1170 bzero((caddr_t
)ub
, sizeof(*ub
));
1171 ub
->st_mode
= S_IFIFO
;
1172 ub
->st_blksize
= pipe
->pipe_buffer
.size
;
1173 ub
->st_size
= pipe
->pipe_buffer
.windex
- pipe
->pipe_buffer
.rindex
;
1174 ub
->st_blocks
= (ub
->st_size
+ ub
->st_blksize
- 1) / ub
->st_blksize
;
1175 ub
->st_atimespec
= pipe
->pipe_atime
;
1176 ub
->st_mtimespec
= pipe
->pipe_mtime
;
1177 ub
->st_ctimespec
= pipe
->pipe_ctime
;
1179 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev,
1181 * XXX (st_dev, st_ino) should be unique.
1183 pipe_rel_mplock(&mpsave
);
1188 * MPALMOSTSAFE - acquires mplock
1191 pipe_close(struct file
*fp
)
1196 cpipe
= (struct pipe
*)fp
->f_data
;
1197 fp
->f_ops
= &badfileops
;
1199 funsetown(cpipe
->pipe_sigio
);
1206 * Shutdown one or both directions of a full-duplex pipe.
1208 * MPALMOSTSAFE - acquires mplock
1211 pipe_shutdown(struct file
*fp
, int how
)
1216 lwkt_tokref rpipe_rlock
;
1217 lwkt_tokref rpipe_wlock
;
1218 lwkt_tokref wpipe_rlock
;
1219 lwkt_tokref wpipe_wlock
;
1222 pipe_get_mplock(&mpsave
);
1223 rpipe
= (struct pipe
*)fp
->f_data
;
1224 wpipe
= rpipe
->pipe_peer
;
1227 * We modify pipe_state on both pipes, which means we need
1230 lwkt_gettoken(&rpipe_rlock
, &rpipe
->pipe_rlock
);
1231 lwkt_gettoken(&rpipe_wlock
, &rpipe
->pipe_wlock
);
1232 lwkt_gettoken(&wpipe_rlock
, &wpipe
->pipe_rlock
);
1233 lwkt_gettoken(&wpipe_wlock
, &wpipe
->pipe_wlock
);
1238 rpipe
->pipe_state
|= PIPE_REOF
; /* my reads */
1239 rpipe
->pipe_state
|= PIPE_WEOF
; /* peer writes */
1240 if (rpipe
->pipe_state
& PIPE_WANTR
) {
1241 rpipe
->pipe_state
&= ~PIPE_WANTR
;
1244 if (rpipe
->pipe_state
& PIPE_WANTW
) {
1245 rpipe
->pipe_state
&= ~PIPE_WANTW
;
1253 wpipe
->pipe_state
|= PIPE_REOF
; /* peer reads */
1254 wpipe
->pipe_state
|= PIPE_WEOF
; /* my writes */
1255 if (wpipe
->pipe_state
& PIPE_WANTR
) {
1256 wpipe
->pipe_state
&= ~PIPE_WANTR
;
1259 if (wpipe
->pipe_state
& PIPE_WANTW
) {
1260 wpipe
->pipe_state
&= ~PIPE_WANTW
;
1266 pipeselwakeup(rpipe
);
1267 pipeselwakeup(wpipe
);
1269 lwkt_reltoken(&rpipe_rlock
);
1270 lwkt_reltoken(&rpipe_wlock
);
1271 lwkt_reltoken(&wpipe_rlock
);
1272 lwkt_reltoken(&wpipe_wlock
);
1274 pipe_rel_mplock(&mpsave
);
1279 pipe_free_kmem(struct pipe
*cpipe
)
1281 if (cpipe
->pipe_buffer
.buffer
!= NULL
) {
1282 if (cpipe
->pipe_buffer
.size
> PIPE_SIZE
)
1283 atomic_subtract_int(&pipe_nbig
, 1);
1284 kmem_free(&kernel_map
,
1285 (vm_offset_t
)cpipe
->pipe_buffer
.buffer
,
1286 cpipe
->pipe_buffer
.size
);
1287 cpipe
->pipe_buffer
.buffer
= NULL
;
1288 cpipe
->pipe_buffer
.object
= NULL
;
1293 * Close the pipe. The slock must be held to interlock against simultanious
1294 * closes. The rlock and wlock must be held to adjust the pipe_state.
1297 pipeclose(struct pipe
*cpipe
)
1301 lwkt_tokref cpipe_rlock
;
1302 lwkt_tokref cpipe_wlock
;
1303 lwkt_tokref ppipe_rlock
;
1304 lwkt_tokref ppipe_wlock
;
1310 * The slock may not have been allocated yet (close during
1313 * We need both the read and write tokens to modify pipe_state.
1315 if (cpipe
->pipe_slock
)
1316 lockmgr(cpipe
->pipe_slock
, LK_EXCLUSIVE
);
1317 lwkt_gettoken(&cpipe_rlock
, &cpipe
->pipe_rlock
);
1318 lwkt_gettoken(&cpipe_wlock
, &cpipe
->pipe_wlock
);
1321 * Set our state, wakeup anyone waiting in select, and
1322 * wakeup anyone blocked on our pipe.
1324 cpipe
->pipe_state
|= PIPE_CLOSED
| PIPE_REOF
| PIPE_WEOF
;
1325 pipeselwakeup(cpipe
);
1326 if (cpipe
->pipe_state
& (PIPE_WANTR
| PIPE_WANTW
)) {
1327 cpipe
->pipe_state
&= ~(PIPE_WANTR
| PIPE_WANTW
);
1332 * Disconnect from peer.
1334 if ((ppipe
= cpipe
->pipe_peer
) != NULL
) {
1335 lwkt_gettoken(&ppipe_rlock
, &ppipe
->pipe_rlock
);
1336 lwkt_gettoken(&ppipe_wlock
, &ppipe
->pipe_wlock
);
1337 ppipe
->pipe_state
|= PIPE_REOF
| PIPE_WEOF
;
1338 pipeselwakeup(ppipe
);
1339 if (ppipe
->pipe_state
& (PIPE_WANTR
| PIPE_WANTW
)) {
1340 ppipe
->pipe_state
&= ~(PIPE_WANTR
| PIPE_WANTW
);
1343 if (SLIST_FIRST(&ppipe
->pipe_sel
.si_note
)) {
1345 KNOTE(&ppipe
->pipe_sel
.si_note
, 0);
1348 lwkt_reltoken(&ppipe_rlock
);
1349 lwkt_reltoken(&ppipe_wlock
);
1353 * If the peer is also closed we can free resources for both
1354 * sides, otherwise we leave our side intact to deal with any
1355 * races (since we only have the slock).
1357 if (ppipe
&& (ppipe
->pipe_state
& PIPE_CLOSED
)) {
1358 cpipe
->pipe_peer
= NULL
;
1359 ppipe
->pipe_peer
= NULL
;
1360 ppipe
->pipe_slock
= NULL
; /* we will free the slock */
1365 lwkt_reltoken(&cpipe_rlock
);
1366 lwkt_reltoken(&cpipe_wlock
);
1367 if (cpipe
->pipe_slock
)
1368 lockmgr(cpipe
->pipe_slock
, LK_RELEASE
);
1371 * If we disassociated from our peer we can free resources
1373 if (ppipe
== NULL
) {
1375 if (cpipe
->pipe_slock
) {
1376 kfree(cpipe
->pipe_slock
, M_PIPE
);
1377 cpipe
->pipe_slock
= NULL
;
1379 if (gd
->gd_pipeqcount
>= pipe_maxcache
||
1380 cpipe
->pipe_buffer
.size
!= PIPE_SIZE
1382 pipe_free_kmem(cpipe
);
1383 kfree(cpipe
, M_PIPE
);
1385 cpipe
->pipe_state
= 0;
1386 cpipe
->pipe_peer
= gd
->gd_pipeq
;
1387 gd
->gd_pipeq
= cpipe
;
1388 ++gd
->gd_pipeqcount
;
1394 * MPALMOSTSAFE - acquires mplock
1397 pipe_kqfilter(struct file
*fp
, struct knote
*kn
)
1402 cpipe
= (struct pipe
*)kn
->kn_fp
->f_data
;
1404 switch (kn
->kn_filter
) {
1406 kn
->kn_fop
= &pipe_rfiltops
;
1409 kn
->kn_fop
= &pipe_wfiltops
;
1410 cpipe
= cpipe
->pipe_peer
;
1411 if (cpipe
== NULL
) {
1412 /* other end of pipe has been closed */
1420 kn
->kn_hook
= (caddr_t
)cpipe
;
1422 SLIST_INSERT_HEAD(&cpipe
->pipe_sel
.si_note
, kn
, kn_selnext
);
1428 filt_pipedetach(struct knote
*kn
)
1430 struct pipe
*cpipe
= (struct pipe
*)kn
->kn_hook
;
1432 SLIST_REMOVE(&cpipe
->pipe_sel
.si_note
, kn
, knote
, kn_selnext
);
1437 filt_piperead(struct knote
*kn
, long hint
)
1439 struct pipe
*rpipe
= (struct pipe
*)kn
->kn_fp
->f_data
;
1441 kn
->kn_data
= rpipe
->pipe_buffer
.windex
- rpipe
->pipe_buffer
.rindex
;
1444 if (rpipe
->pipe_state
& PIPE_REOF
) {
1445 kn
->kn_flags
|= EV_EOF
;
1448 return (kn
->kn_data
> 0);
1453 filt_pipewrite(struct knote
*kn
, long hint
)
1455 struct pipe
*rpipe
= (struct pipe
*)kn
->kn_fp
->f_data
;
1456 struct pipe
*wpipe
= rpipe
->pipe_peer
;
1460 if ((wpipe
== NULL
) || (wpipe
->pipe_state
& PIPE_WEOF
)) {
1462 kn
->kn_flags
|= EV_EOF
;
1465 space
= wpipe
->pipe_buffer
.windex
-
1466 wpipe
->pipe_buffer
.rindex
;
1467 space
= wpipe
->pipe_buffer
.size
- space
;
1468 kn
->kn_data
= space
;
1469 return (kn
->kn_data
>= PIPE_BUF
);