2 * Copyright (c) 1996 John S. Dyson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
16 * 4. Modifications may be freely made to this file if the above conditions
19 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $
20 * $DragonFly: src/sys/kern/sys_pipe.c,v 1.50 2008/09/09 04:06:13 dillon Exp $
24 * This file contains a high-performance replacement for the socket-based
25 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
26 * all features of sockets, but does do everything that pipes normally
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
33 #include <sys/fcntl.h>
35 #include <sys/filedesc.h>
36 #include <sys/filio.h>
37 #include <sys/ttycom.h>
40 #include <sys/select.h>
41 #include <sys/signalvar.h>
42 #include <sys/sysproto.h>
44 #include <sys/vnode.h>
46 #include <sys/event.h>
47 #include <sys/globaldata.h>
48 #include <sys/module.h>
49 #include <sys/malloc.h>
50 #include <sys/sysctl.h>
51 #include <sys/socket.h>
54 #include <vm/vm_param.h>
56 #include <vm/vm_object.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_extern.h>
60 #include <vm/vm_map.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_zone.h>
64 #include <sys/file2.h>
66 #include <machine/cpufunc.h>
69 * interfaces to the outside world
71 static int pipe_read (struct file
*fp
, struct uio
*uio
,
72 struct ucred
*cred
, int flags
);
73 static int pipe_write (struct file
*fp
, struct uio
*uio
,
74 struct ucred
*cred
, int flags
);
75 static int pipe_close (struct file
*fp
);
76 static int pipe_shutdown (struct file
*fp
, int how
);
77 static int pipe_poll (struct file
*fp
, int events
, struct ucred
*cred
);
78 static int pipe_kqfilter (struct file
*fp
, struct knote
*kn
);
79 static int pipe_stat (struct file
*fp
, struct stat
*sb
, struct ucred
*cred
);
80 static int pipe_ioctl (struct file
*fp
, u_long cmd
, caddr_t data
, struct ucred
*cred
);
82 static struct fileops pipeops
= {
84 .fo_write
= pipe_write
,
85 .fo_ioctl
= pipe_ioctl
,
87 .fo_kqfilter
= pipe_kqfilter
,
89 .fo_close
= pipe_close
,
90 .fo_shutdown
= pipe_shutdown
93 static void filt_pipedetach(struct knote
*kn
);
94 static int filt_piperead(struct knote
*kn
, long hint
);
95 static int filt_pipewrite(struct knote
*kn
, long hint
);
97 static struct filterops pipe_rfiltops
=
98 { 1, NULL
, filt_pipedetach
, filt_piperead
};
99 static struct filterops pipe_wfiltops
=
100 { 1, NULL
, filt_pipedetach
, filt_pipewrite
};
102 MALLOC_DEFINE(M_PIPE
, "pipe", "pipe structures");
105 * Default pipe buffer size(s), this can be kind-of large now because pipe
106 * space is pageable. The pipe code will try to maintain locality of
107 * reference for performance reasons, so small amounts of outstanding I/O
108 * will not wipe the cache.
110 #define MINPIPESIZE (PIPE_SIZE/3)
111 #define MAXPIPESIZE (2*PIPE_SIZE/3)
114 * Limit the number of "big" pipes
116 #define LIMITBIGPIPES 64
117 #define PIPEQ_MAX_CACHE 16 /* per-cpu pipe structure cache */
119 static int pipe_maxbig
= LIMITBIGPIPES
;
120 static int pipe_maxcache
= PIPEQ_MAX_CACHE
;
121 static int pipe_bigcount
;
122 static int pipe_nbig
;
123 static int pipe_bcache_alloc
;
124 static int pipe_bkmem_alloc
;
126 SYSCTL_NODE(_kern
, OID_AUTO
, pipe
, CTLFLAG_RW
, 0, "Pipe operation");
127 SYSCTL_INT(_kern_pipe
, OID_AUTO
, nbig
,
128 CTLFLAG_RD
, &pipe_nbig
, 0, "numer of big pipes allocated");
129 SYSCTL_INT(_kern_pipe
, OID_AUTO
, bigcount
,
130 CTLFLAG_RW
, &pipe_bigcount
, 0, "number of times pipe expanded");
131 SYSCTL_INT(_kern_pipe
, OID_AUTO
, maxcache
,
132 CTLFLAG_RW
, &pipe_maxcache
, 0, "max pipes cached per-cpu");
133 SYSCTL_INT(_kern_pipe
, OID_AUTO
, maxbig
,
134 CTLFLAG_RW
, &pipe_maxbig
, 0, "max number of big pipes");
135 #if !defined(NO_PIPE_SYSCTL_STATS)
136 SYSCTL_INT(_kern_pipe
, OID_AUTO
, bcache_alloc
,
137 CTLFLAG_RW
, &pipe_bcache_alloc
, 0, "pipe buffer from pcpu cache");
138 SYSCTL_INT(_kern_pipe
, OID_AUTO
, bkmem_alloc
,
139 CTLFLAG_RW
, &pipe_bkmem_alloc
, 0, "pipe buffer from kmem");
142 static void pipeclose (struct pipe
*cpipe
);
143 static void pipe_free_kmem (struct pipe
*cpipe
);
144 static int pipe_create (struct pipe
**cpipep
);
145 static __inline
int pipelock (struct pipe
*cpipe
, int catch);
146 static __inline
void pipeunlock (struct pipe
*cpipe
);
147 static __inline
void pipeselwakeup (struct pipe
*cpipe
);
148 static int pipespace (struct pipe
*cpipe
, int size
);
151 * The pipe system call for the DTYPE_PIPE type of pipes
153 * pipe_ARgs(int dummy)
158 sys_pipe(struct pipe_args
*uap
)
160 struct thread
*td
= curthread
;
161 struct proc
*p
= td
->td_proc
;
162 struct file
*rf
, *wf
;
163 struct pipe
*rpipe
, *wpipe
;
168 rpipe
= wpipe
= NULL
;
169 if (pipe_create(&rpipe
) || pipe_create(&wpipe
)) {
175 error
= falloc(p
, &rf
, &fd1
);
181 uap
->sysmsg_fds
[0] = fd1
;
184 * Warning: once we've gotten past allocation of the fd for the
185 * read-side, we can only drop the read side via fdrop() in order
186 * to avoid races against processes which manage to dup() the read
187 * side while we are blocked trying to allocate the write side.
189 rf
->f_type
= DTYPE_PIPE
;
190 rf
->f_flag
= FREAD
| FWRITE
;
191 rf
->f_ops
= &pipeops
;
193 error
= falloc(p
, &wf
, &fd2
);
195 fsetfd(p
, NULL
, fd1
);
197 /* rpipe has been closed by fdrop(). */
201 wf
->f_type
= DTYPE_PIPE
;
202 wf
->f_flag
= FREAD
| FWRITE
;
203 wf
->f_ops
= &pipeops
;
205 uap
->sysmsg_fds
[1] = fd2
;
207 rpipe
->pipe_peer
= wpipe
;
208 wpipe
->pipe_peer
= rpipe
;
219 * Allocate kva for pipe circular buffer, the space is pageable
220 * This routine will 'realloc' the size of a pipe safely, if it fails
221 * it will retain the old buffer.
222 * If it fails it will return ENOMEM.
225 pipespace(struct pipe
*cpipe
, int size
)
227 struct vm_object
*object
;
231 npages
= round_page(size
) / PAGE_SIZE
;
232 object
= cpipe
->pipe_buffer
.object
;
235 * [re]create the object if necessary and reserve space for it
236 * in the kernel_map. The object and memory are pageable. On
237 * success, free the old resources before assigning the new
240 if (object
== NULL
|| object
->size
!= npages
) {
241 object
= vm_object_allocate(OBJT_DEFAULT
, npages
);
242 buffer
= (caddr_t
)vm_map_min(&kernel_map
);
244 error
= vm_map_find(&kernel_map
, object
, 0,
245 (vm_offset_t
*)&buffer
, size
,
248 VM_PROT_ALL
, VM_PROT_ALL
,
251 if (error
!= KERN_SUCCESS
) {
252 vm_object_deallocate(object
);
255 pipe_free_kmem(cpipe
);
256 cpipe
->pipe_buffer
.object
= object
;
257 cpipe
->pipe_buffer
.buffer
= buffer
;
258 cpipe
->pipe_buffer
.size
= size
;
263 cpipe
->pipe_buffer
.rindex
= 0;
264 cpipe
->pipe_buffer
.windex
= 0;
269 * Initialize and allocate VM and memory for pipe, pulling the pipe from
270 * our per-cpu cache if possible. For now make sure it is sized for the
271 * smaller PIPE_SIZE default.
274 pipe_create(struct pipe
**cpipep
)
276 globaldata_t gd
= mycpu
;
280 if ((cpipe
= gd
->gd_pipeq
) != NULL
) {
281 gd
->gd_pipeq
= cpipe
->pipe_peer
;
283 cpipe
->pipe_peer
= NULL
;
285 cpipe
= kmalloc(sizeof(struct pipe
), M_PIPE
, M_WAITOK
|M_ZERO
);
288 if ((error
= pipespace(cpipe
, PIPE_SIZE
)) != 0)
290 vfs_timestamp(&cpipe
->pipe_ctime
);
291 cpipe
->pipe_atime
= cpipe
->pipe_ctime
;
292 cpipe
->pipe_mtime
= cpipe
->pipe_ctime
;
298 * lock a pipe for I/O, blocking other access
301 pipelock(struct pipe
*cpipe
, int catch)
305 while (cpipe
->pipe_state
& PIPE_LOCK
) {
306 cpipe
->pipe_state
|= PIPE_LWANT
;
307 error
= tsleep(cpipe
, (catch ? PCATCH
: 0), "pipelk", 0);
311 cpipe
->pipe_state
|= PIPE_LOCK
;
316 * unlock a pipe I/O lock
319 pipeunlock(struct pipe
*cpipe
)
322 cpipe
->pipe_state
&= ~PIPE_LOCK
;
323 if (cpipe
->pipe_state
& PIPE_LWANT
) {
324 cpipe
->pipe_state
&= ~PIPE_LWANT
;
330 pipeselwakeup(struct pipe
*cpipe
)
333 if (cpipe
->pipe_state
& PIPE_SEL
) {
334 cpipe
->pipe_state
&= ~PIPE_SEL
;
335 selwakeup(&cpipe
->pipe_sel
);
337 if ((cpipe
->pipe_state
& PIPE_ASYNC
) && cpipe
->pipe_sigio
)
338 pgsigio(cpipe
->pipe_sigio
, SIGIO
, 0);
339 KNOTE(&cpipe
->pipe_sel
.si_note
, 0);
343 * MPALMOSTSAFE (acquires mplock)
346 pipe_read(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int fflags
)
352 u_int size
; /* total bytes available */
353 u_int rindex
; /* contiguous bytes available */
356 rpipe
= (struct pipe
*) fp
->f_data
;
358 error
= pipelock(rpipe
, 1);
362 if (fflags
& O_FBLOCKING
)
364 else if (fflags
& O_FNONBLOCKING
)
366 else if (fp
->f_flag
& O_NONBLOCK
)
371 while (uio
->uio_resid
) {
372 size
= rpipe
->pipe_buffer
.windex
- rpipe
->pipe_buffer
.rindex
;
374 rindex
= rpipe
->pipe_buffer
.rindex
&
375 (rpipe
->pipe_buffer
.size
- 1);
376 if (size
> rpipe
->pipe_buffer
.size
- rindex
)
377 size
= rpipe
->pipe_buffer
.size
- rindex
;
378 if (size
> (u_int
)uio
->uio_resid
)
379 size
= (u_int
)uio
->uio_resid
;
381 error
= uiomove(&rpipe
->pipe_buffer
.buffer
[rindex
],
385 rpipe
->pipe_buffer
.rindex
+= size
;
388 * If there is no more to read in the pipe, reset
389 * its pointers to the beginning. This improves
392 if (rpipe
->pipe_buffer
.rindex
==
393 rpipe
->pipe_buffer
.windex
) {
394 rpipe
->pipe_buffer
.rindex
= 0;
395 rpipe
->pipe_buffer
.windex
= 0;
400 * detect EOF condition
401 * read returns 0 on EOF, no need to set error
403 if (rpipe
->pipe_state
& PIPE_EOF
)
407 * If the "write-side" has been blocked, wake it up now.
409 if (rpipe
->pipe_state
& PIPE_WANTW
) {
410 rpipe
->pipe_state
&= ~PIPE_WANTW
;
415 * Break if some data was read.
421 * Unlock the pipe buffer for our remaining
422 * processing. We will either break out with an
423 * error or we will sleep and relock to loop.
428 * Handle non-blocking mode operation or
429 * wait for more data.
434 rpipe
->pipe_state
|= PIPE_WANTR
;
435 if ((error
= tsleep(rpipe
, PCATCH
,
436 "piperd", 0)) == 0) {
437 error
= pipelock(rpipe
, 1);
447 vfs_timestamp(&rpipe
->pipe_atime
);
452 * PIPE_WANT processing only makes sense if pipe_busy is 0.
454 size
= rpipe
->pipe_buffer
.windex
- rpipe
->pipe_buffer
.rindex
;
456 if ((rpipe
->pipe_busy
== 0) && (rpipe
->pipe_state
& PIPE_WANT
)) {
457 rpipe
->pipe_state
&= ~(PIPE_WANT
|PIPE_WANTW
);
459 } else if (size
< MINPIPESIZE
) {
461 * Handle write blocking hysteresis.
463 if (rpipe
->pipe_state
& PIPE_WANTW
) {
464 rpipe
->pipe_state
&= ~PIPE_WANTW
;
469 if ((rpipe
->pipe_buffer
.size
- size
) >= PIPE_BUF
)
470 pipeselwakeup(rpipe
);
476 * MPALMOSTSAFE - acquires mplock
479 pipe_write(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int fflags
)
484 struct pipe
*wpipe
, *rpipe
;
489 rpipe
= (struct pipe
*) fp
->f_data
;
490 wpipe
= rpipe
->pipe_peer
;
493 * detect loss of pipe read side, issue SIGPIPE if lost.
495 if ((wpipe
== NULL
) || (wpipe
->pipe_state
& PIPE_EOF
)) {
501 if (fflags
& O_FBLOCKING
)
503 else if (fflags
& O_FNONBLOCKING
)
505 else if (fp
->f_flag
& O_NONBLOCK
)
511 * If it is advantageous to resize the pipe buffer, do
514 if ((uio
->uio_resid
> PIPE_SIZE
) &&
515 (pipe_nbig
< pipe_maxbig
) &&
516 (wpipe
->pipe_buffer
.size
<= PIPE_SIZE
) &&
517 (wpipe
->pipe_buffer
.rindex
== wpipe
->pipe_buffer
.windex
) &&
518 (error
= pipelock(wpipe
, 1)) == 0) {
520 * Recheck after lock.
522 if ((pipe_nbig
< pipe_maxbig
) &&
523 (wpipe
->pipe_buffer
.size
<= PIPE_SIZE
) &&
524 (wpipe
->pipe_buffer
.rindex
== wpipe
->pipe_buffer
.windex
)) {
525 if (pipespace(wpipe
, BIG_PIPE_SIZE
) == 0) {
534 * If an early error occured unbusy and return, waking up any pending
539 if ((wpipe
->pipe_busy
== 0) &&
540 (wpipe
->pipe_state
& PIPE_WANT
)) {
541 wpipe
->pipe_state
&= ~(PIPE_WANT
| PIPE_WANTR
);
548 KASSERT(wpipe
->pipe_buffer
.buffer
!= NULL
, ("pipe buffer gone"));
550 orig_resid
= uio
->uio_resid
;
552 while (uio
->uio_resid
) {
553 if (wpipe
->pipe_state
& PIPE_EOF
) {
558 windex
= wpipe
->pipe_buffer
.windex
&
559 (wpipe
->pipe_buffer
.size
- 1);
560 space
= wpipe
->pipe_buffer
.size
-
561 (wpipe
->pipe_buffer
.windex
- wpipe
->pipe_buffer
.rindex
);
563 /* Writes of size <= PIPE_BUF must be atomic. */
564 if ((space
< uio
->uio_resid
) && (orig_resid
<= PIPE_BUF
))
568 * Write to fill, read size handles write hysteresis. Also
569 * additional restrictions can cause select-based non-blocking
573 if ((error
= pipelock(wpipe
,1)) == 0) {
577 * If a process blocked in uiomove, our
578 * value for space might be bad.
580 * XXX will we be ok if the reader has gone
583 if (space
> (wpipe
->pipe_buffer
.size
-
584 (wpipe
->pipe_buffer
.windex
-
585 wpipe
->pipe_buffer
.rindex
))) {
589 windex
= wpipe
->pipe_buffer
.windex
&
590 (wpipe
->pipe_buffer
.size
- 1);
593 * Transfer size is minimum of uio transfer
594 * and free space in pipe buffer.
596 if (space
> (u_int
)uio
->uio_resid
)
597 space
= (u_int
)uio
->uio_resid
;
600 * First segment to transfer is minimum of
601 * transfer size and contiguous space in
602 * pipe buffer. If first segment to transfer
603 * is less than the transfer size, we've got
604 * a wraparound in the buffer.
606 segsize
= wpipe
->pipe_buffer
.size
- windex
;
610 /* Transfer first segment */
613 &wpipe
->pipe_buffer
.buffer
[windex
],
616 if (error
== 0 && segsize
< space
) {
618 * Transfer remaining part now, to
619 * support atomic writes. Wraparound
622 error
= uiomove(&wpipe
->pipe_buffer
.
624 space
- segsize
, uio
);
627 wpipe
->pipe_buffer
.windex
+= space
;
635 * If the "read-side" has been blocked, wake it up now
636 * and yield to let it drain synchronously rather
639 if (wpipe
->pipe_state
& PIPE_WANTR
) {
640 wpipe
->pipe_state
&= ~PIPE_WANTR
;
645 * don't block on non-blocking I/O
653 * We have no more space and have something to offer,
654 * wake up select/poll.
656 pipeselwakeup(wpipe
);
658 wpipe
->pipe_state
|= PIPE_WANTW
;
659 error
= tsleep(wpipe
, PCATCH
, "pipewr", 0);
663 * If read side wants to go away, we just issue a signal
666 if (wpipe
->pipe_state
& PIPE_EOF
) {
675 if ((wpipe
->pipe_busy
== 0) && (wpipe
->pipe_state
& PIPE_WANT
)) {
676 wpipe
->pipe_state
&= ~(PIPE_WANT
| PIPE_WANTR
);
678 } else if (wpipe
->pipe_buffer
.windex
!= wpipe
->pipe_buffer
.rindex
) {
680 * If we have put any characters in the buffer, we wake up
683 if (wpipe
->pipe_state
& PIPE_WANTR
) {
684 wpipe
->pipe_state
&= ~PIPE_WANTR
;
690 * Don't return EPIPE if I/O was successful
692 if ((wpipe
->pipe_buffer
.rindex
== wpipe
->pipe_buffer
.windex
) &&
693 (uio
->uio_resid
== 0) &&
699 vfs_timestamp(&wpipe
->pipe_mtime
);
702 * We have something to offer,
703 * wake up select/poll.
705 if (wpipe
->pipe_buffer
.rindex
!= wpipe
->pipe_buffer
.windex
)
706 pipeselwakeup(wpipe
);
712 * MPALMOSTSAFE - acquires mplock
714 * we implement a very minimal set of ioctls for compatibility with sockets.
717 pipe_ioctl(struct file
*fp
, u_long cmd
, caddr_t data
, struct ucred
*cred
)
723 mpipe
= (struct pipe
*)fp
->f_data
;
728 mpipe
->pipe_state
|= PIPE_ASYNC
;
730 mpipe
->pipe_state
&= ~PIPE_ASYNC
;
735 *(int *)data
= mpipe
->pipe_buffer
.windex
-
736 mpipe
->pipe_buffer
.rindex
;
740 error
= fsetown(*(int *)data
, &mpipe
->pipe_sigio
);
743 *(int *)data
= fgetown(mpipe
->pipe_sigio
);
747 /* This is deprecated, FIOSETOWN should be used instead. */
748 error
= fsetown(-(*(int *)data
), &mpipe
->pipe_sigio
);
752 /* This is deprecated, FIOGETOWN should be used instead. */
753 *(int *)data
= -fgetown(mpipe
->pipe_sigio
);
765 * MPALMOSTSAFE - acquires mplock
768 pipe_poll(struct file
*fp
, int events
, struct ucred
*cred
)
776 rpipe
= (struct pipe
*)fp
->f_data
;
777 wpipe
= rpipe
->pipe_peer
;
778 if (events
& (POLLIN
| POLLRDNORM
)) {
779 if ((rpipe
->pipe_buffer
.windex
!= rpipe
->pipe_buffer
.rindex
) ||
780 (rpipe
->pipe_state
& PIPE_EOF
)) {
781 revents
|= events
& (POLLIN
| POLLRDNORM
);
785 if (events
& (POLLOUT
| POLLWRNORM
)) {
786 if (wpipe
== NULL
|| (wpipe
->pipe_state
& PIPE_EOF
)) {
787 revents
|= events
& (POLLOUT
| POLLWRNORM
);
789 space
= wpipe
->pipe_buffer
.windex
-
790 wpipe
->pipe_buffer
.rindex
;
791 space
= wpipe
->pipe_buffer
.size
- space
;
792 if (space
>= PIPE_BUF
)
793 revents
|= events
& (POLLOUT
| POLLWRNORM
);
797 if ((rpipe
->pipe_state
& PIPE_EOF
) ||
799 (wpipe
->pipe_state
& PIPE_EOF
))
803 if (events
& (POLLIN
| POLLRDNORM
)) {
804 selrecord(curthread
, &rpipe
->pipe_sel
);
805 rpipe
->pipe_state
|= PIPE_SEL
;
808 if (events
& (POLLOUT
| POLLWRNORM
)) {
809 selrecord(curthread
, &wpipe
->pipe_sel
);
810 wpipe
->pipe_state
|= PIPE_SEL
;
818 * MPALMOSTSAFE - acquires mplock
821 pipe_stat(struct file
*fp
, struct stat
*ub
, struct ucred
*cred
)
826 pipe
= (struct pipe
*)fp
->f_data
;
828 bzero((caddr_t
)ub
, sizeof(*ub
));
829 ub
->st_mode
= S_IFIFO
;
830 ub
->st_blksize
= pipe
->pipe_buffer
.size
;
831 ub
->st_size
= pipe
->pipe_buffer
.windex
- pipe
->pipe_buffer
.rindex
;
832 ub
->st_blocks
= (ub
->st_size
+ ub
->st_blksize
- 1) / ub
->st_blksize
;
833 ub
->st_atimespec
= pipe
->pipe_atime
;
834 ub
->st_mtimespec
= pipe
->pipe_mtime
;
835 ub
->st_ctimespec
= pipe
->pipe_ctime
;
837 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev,
839 * XXX (st_dev, st_ino) should be unique.
846 * MPALMOSTSAFE - acquires mplock
849 pipe_close(struct file
*fp
)
854 cpipe
= (struct pipe
*)fp
->f_data
;
855 fp
->f_ops
= &badfileops
;
857 funsetown(cpipe
->pipe_sigio
);
864 * Shutdown one or both directions of a full-duplex pipe.
866 * MPALMOSTSAFE - acquires mplock
869 pipe_shutdown(struct file
*fp
, int how
)
876 rpipe
= (struct pipe
*)fp
->f_data
;
882 rpipe
->pipe_state
|= PIPE_EOF
;
883 pipeselwakeup(rpipe
);
884 if (rpipe
->pipe_busy
)
892 if (rpipe
&& (wpipe
= rpipe
->pipe_peer
) != NULL
) {
893 wpipe
->pipe_state
|= PIPE_EOF
;
894 pipeselwakeup(wpipe
);
895 if (wpipe
->pipe_busy
)
905 pipe_free_kmem(struct pipe
*cpipe
)
907 if (cpipe
->pipe_buffer
.buffer
!= NULL
) {
908 if (cpipe
->pipe_buffer
.size
> PIPE_SIZE
)
910 kmem_free(&kernel_map
,
911 (vm_offset_t
)cpipe
->pipe_buffer
.buffer
,
912 cpipe
->pipe_buffer
.size
);
913 cpipe
->pipe_buffer
.buffer
= NULL
;
914 cpipe
->pipe_buffer
.object
= NULL
;
922 pipeclose(struct pipe
*cpipe
)
930 pipeselwakeup(cpipe
);
933 * If the other side is blocked, wake it up saying that
934 * we want to close it down.
936 while (cpipe
->pipe_busy
) {
938 cpipe
->pipe_state
|= PIPE_WANT
| PIPE_EOF
;
939 tsleep(cpipe
, 0, "pipecl", 0);
943 * Disconnect from peer
945 if ((ppipe
= cpipe
->pipe_peer
) != NULL
) {
946 pipeselwakeup(ppipe
);
948 ppipe
->pipe_state
|= PIPE_EOF
;
950 KNOTE(&ppipe
->pipe_sel
.si_note
, 0);
951 ppipe
->pipe_peer
= NULL
;
954 if (cpipe
->pipe_kva
) {
955 pmap_qremove(cpipe
->pipe_kva
, XIO_INTERNAL_PAGES
);
956 kmem_free(&kernel_map
, cpipe
->pipe_kva
, XIO_INTERNAL_SIZE
);
961 * free or cache resources
964 if (gd
->gd_pipeqcount
>= pipe_maxcache
||
965 cpipe
->pipe_buffer
.size
!= PIPE_SIZE
967 pipe_free_kmem(cpipe
);
968 kfree(cpipe
, M_PIPE
);
970 cpipe
->pipe_state
= 0;
971 cpipe
->pipe_busy
= 0;
972 cpipe
->pipe_peer
= gd
->gd_pipeq
;
973 gd
->gd_pipeq
= cpipe
;
979 * MPALMOSTSAFE - acquires mplock
982 pipe_kqfilter(struct file
*fp
, struct knote
*kn
)
987 cpipe
= (struct pipe
*)kn
->kn_fp
->f_data
;
989 switch (kn
->kn_filter
) {
991 kn
->kn_fop
= &pipe_rfiltops
;
994 kn
->kn_fop
= &pipe_wfiltops
;
995 cpipe
= cpipe
->pipe_peer
;
997 /* other end of pipe has been closed */
1005 kn
->kn_hook
= (caddr_t
)cpipe
;
1007 SLIST_INSERT_HEAD(&cpipe
->pipe_sel
.si_note
, kn
, kn_selnext
);
1013 filt_pipedetach(struct knote
*kn
)
1015 struct pipe
*cpipe
= (struct pipe
*)kn
->kn_hook
;
1017 SLIST_REMOVE(&cpipe
->pipe_sel
.si_note
, kn
, knote
, kn_selnext
);
1022 filt_piperead(struct knote
*kn
, long hint
)
1024 struct pipe
*rpipe
= (struct pipe
*)kn
->kn_fp
->f_data
;
1025 struct pipe
*wpipe
= rpipe
->pipe_peer
;
1027 kn
->kn_data
= rpipe
->pipe_buffer
.windex
- rpipe
->pipe_buffer
.rindex
;
1029 if ((rpipe
->pipe_state
& PIPE_EOF
) ||
1030 (wpipe
== NULL
) || (wpipe
->pipe_state
& PIPE_EOF
)) {
1031 kn
->kn_flags
|= EV_EOF
;
1034 return (kn
->kn_data
> 0);
1039 filt_pipewrite(struct knote
*kn
, long hint
)
1041 struct pipe
*rpipe
= (struct pipe
*)kn
->kn_fp
->f_data
;
1042 struct pipe
*wpipe
= rpipe
->pipe_peer
;
1045 if ((wpipe
== NULL
) || (wpipe
->pipe_state
& PIPE_EOF
)) {
1047 kn
->kn_flags
|= EV_EOF
;
1050 space
= wpipe
->pipe_buffer
.windex
-
1051 wpipe
->pipe_buffer
.rindex
;
1052 space
= wpipe
->pipe_buffer
.size
- space
;
1053 kn
->kn_data
= space
;
1054 return (kn
->kn_data
>= PIPE_BUF
);