2 * Copyright (c) 1996 John S. Dyson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
16 * 4. Modifications may be freely made to this file if the above conditions
21 * This file contains a high-performance replacement for the socket-based
22 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
23 * all features of sockets, but does do everything that pipes normally
28 * This code has two modes of operation, a small write mode and a large
29 * write mode. The small write mode acts like conventional pipes with
30 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the
31 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT
32 * and PIPE_SIZE in size, it is fully mapped and wired into the kernel, and
33 * the receiving process can copy it directly from the pages in the sending
36 * If the sending process receives a signal, it is possible that it will
37 * go away, and certainly its address space can change, because control
38 * is returned back to the user-mode side. In that case, the pipe code
39 * arranges to copy the buffer supplied by the user process, to a pageable
40 * kernel buffer, and the receiving process will grab the data from the
41 * pageable kernel buffer. Since signals don't happen all that often,
42 * the copy operation is normally eliminated.
44 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
45 * happen for small transfers so that the system will not spend all of
46 * its time context switching.
48 * In order to limit the resource use of pipes, two sysctls exist:
50 * kern.ipc.maxpipekva - This is a hard limit on the amount of pageable
51 * address space available to us in pipe_map. This value is normally
52 * autotuned, but may also be loader tuned.
54 * kern.ipc.pipekva - This read-only sysctl tracks the current amount of
55 * memory in use by pipes.
57 * Based on how large pipekva is relative to maxpipekva, the following
61 * New pipes are given 16K of memory backing, pipes may dynamically
62 * grow to as large as 64K where needed.
64 * New pipes are given 4K (or PAGE_SIZE) of memory backing,
65 * existing pipes may NOT grow.
67 * New pipes are given 4K (or PAGE_SIZE) of memory backing,
68 * existing pipes will be shrunk down to 4K whenever possible.
70 * Resizing may be disabled by setting kern.ipc.piperesizeallowed=0. If
71 * that is set, the only resize that will occur is the 0 -> SMALL_PIPE_SIZE
72 * resize which MUST occur for reverse-direction pipes when they are
75 * Additional information about the current state of pipes may be obtained
76 * from kern.ipc.pipes, kern.ipc.pipefragretry, kern.ipc.pipeallocfail,
77 * and kern.ipc.piperesizefail.
79 * Locking rules: There are two locks present here: A mutex, used via
80 * PIPE_LOCK, and a flag, used via pipelock(). All locking is done via
81 * the flag, as mutexes can not persist over uiomove. The mutex
82 * exists only to guard access to the flag, and is not in itself a
83 * locking mechanism. Also note that there is only a single mutex for
84 * both directions of a pipe.
86 * As pipelock() may have to sleep before it can acquire the flag, it
87 * is important to reread all data after a call to pipelock(); everything
88 * in the structure may have changed.
91 #include <sys/cdefs.h>
92 __FBSDID("$FreeBSD$");
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/fcntl.h>
100 #include <sys/filedesc.h>
101 #include <sys/filio.h>
102 #include <sys/kernel.h>
103 #include <sys/lock.h>
104 #include <sys/mutex.h>
105 #include <sys/ttycom.h>
106 #include <sys/stat.h>
107 #include <sys/malloc.h>
108 #include <sys/poll.h>
109 #include <sys/selinfo.h>
110 #include <sys/signalvar.h>
111 #include <sys/sysctl.h>
112 #include <sys/sysproto.h>
113 #include <sys/pipe.h>
114 #include <sys/proc.h>
115 #include <sys/vnode.h>
117 #include <sys/event.h>
119 #include <security/mac/mac_framework.h>
122 #include <vm/vm_param.h>
123 #include <vm/vm_object.h>
124 #include <vm/vm_kern.h>
125 #include <vm/vm_extern.h>
127 #include <vm/vm_map.h>
128 #include <vm/vm_page.h>
132 * Use this define if you want to disable *fancy* VM things. Expect an
133 * approx 30% decrease in transfer rate. This could be useful for
136 /* #define PIPE_NODIRECT */
139 * interfaces to the outside world
141 static fo_rdwr_t pipe_read
;
142 static fo_rdwr_t pipe_write
;
143 static fo_truncate_t pipe_truncate
;
144 static fo_ioctl_t pipe_ioctl
;
145 static fo_poll_t pipe_poll
;
146 static fo_kqfilter_t pipe_kqfilter
;
147 static fo_stat_t pipe_stat
;
148 static fo_close_t pipe_close
;
150 static struct fileops pipeops
= {
151 .fo_read
= pipe_read
,
152 .fo_write
= pipe_write
,
153 .fo_truncate
= pipe_truncate
,
154 .fo_ioctl
= pipe_ioctl
,
155 .fo_poll
= pipe_poll
,
156 .fo_kqfilter
= pipe_kqfilter
,
157 .fo_stat
= pipe_stat
,
158 .fo_close
= pipe_close
,
159 .fo_flags
= DFLAG_PASSABLE
162 static void filt_pipedetach(struct knote
*kn
);
163 static int filt_piperead(struct knote
*kn
, long hint
);
164 static int filt_pipewrite(struct knote
*kn
, long hint
);
166 static struct filterops pipe_rfiltops
=
167 { 1, NULL
, filt_pipedetach
, filt_piperead
};
168 static struct filterops pipe_wfiltops
=
169 { 1, NULL
, filt_pipedetach
, filt_pipewrite
};
172 * Default pipe buffer size(s), this can be kind-of large now because pipe
173 * space is pageable. The pipe code will try to maintain locality of
174 * reference for performance reasons, so small amounts of outstanding I/O
175 * will not wipe the cache.
177 #define MINPIPESIZE (PIPE_SIZE/3)
178 #define MAXPIPESIZE (2*PIPE_SIZE/3)
180 static int amountpipekva
;
181 static int pipefragretry
;
182 static int pipeallocfail
;
183 static int piperesizefail
;
184 static int piperesizeallowed
= 1;
186 SYSCTL_INT(_kern_ipc
, OID_AUTO
, maxpipekva
, CTLFLAG_RDTUN
,
187 &maxpipekva
, 0, "Pipe KVA limit");
188 SYSCTL_INT(_kern_ipc
, OID_AUTO
, pipekva
, CTLFLAG_RD
,
189 &amountpipekva
, 0, "Pipe KVA usage");
190 SYSCTL_INT(_kern_ipc
, OID_AUTO
, pipefragretry
, CTLFLAG_RD
,
191 &pipefragretry
, 0, "Pipe allocation retries due to fragmentation");
192 SYSCTL_INT(_kern_ipc
, OID_AUTO
, pipeallocfail
, CTLFLAG_RD
,
193 &pipeallocfail
, 0, "Pipe allocation failures");
194 SYSCTL_INT(_kern_ipc
, OID_AUTO
, piperesizefail
, CTLFLAG_RD
,
195 &piperesizefail
, 0, "Pipe resize failures");
196 SYSCTL_INT(_kern_ipc
, OID_AUTO
, piperesizeallowed
, CTLFLAG_RW
,
197 &piperesizeallowed
, 0, "Pipe resizing allowed");
199 static void pipeinit(void *dummy __unused
);
200 static void pipeclose(struct pipe
*cpipe
);
201 static void pipe_free_kmem(struct pipe
*cpipe
);
202 static int pipe_create(struct pipe
*pipe
, int backing
);
203 static __inline
int pipelock(struct pipe
*cpipe
, int catch);
204 static __inline
void pipeunlock(struct pipe
*cpipe
);
205 static __inline
void pipeselwakeup(struct pipe
*cpipe
);
206 #ifndef PIPE_NODIRECT
207 static int pipe_build_write_buffer(struct pipe
*wpipe
, struct uio
*uio
);
208 static void pipe_destroy_write_buffer(struct pipe
*wpipe
);
209 static int pipe_direct_write(struct pipe
*wpipe
, struct uio
*uio
);
210 static void pipe_clone_write_buffer(struct pipe
*wpipe
);
212 static int pipespace(struct pipe
*cpipe
, int size
);
213 static int pipespace_new(struct pipe
*cpipe
, int size
);
215 static int pipe_zone_ctor(void *mem
, int size
, void *arg
, int flags
);
216 static int pipe_zone_init(void *mem
, int size
, int flags
);
217 static void pipe_zone_fini(void *mem
, int size
);
219 static uma_zone_t pipe_zone
;
221 SYSINIT(vfs
, SI_SUB_VFS
, SI_ORDER_ANY
, pipeinit
, NULL
);
224 pipeinit(void *dummy __unused
)
227 pipe_zone
= uma_zcreate("pipe", sizeof(struct pipepair
),
228 pipe_zone_ctor
, NULL
, pipe_zone_init
, pipe_zone_fini
,
230 KASSERT(pipe_zone
!= NULL
, ("pipe_zone not initialized"));
234 pipe_zone_ctor(void *mem
, int size
, void *arg
, int flags
)
237 struct pipe
*rpipe
, *wpipe
;
239 KASSERT(size
== sizeof(*pp
), ("pipe_zone_ctor: wrong size"));
241 pp
= (struct pipepair
*)mem
;
244 * We zero both pipe endpoints to make sure all the kmem pointers
245 * are NULL, flag fields are zero'd, etc. We timestamp both
246 * endpoints with the same time.
248 rpipe
= &pp
->pp_rpipe
;
249 bzero(rpipe
, sizeof(*rpipe
));
250 vfs_timestamp(&rpipe
->pipe_ctime
);
251 rpipe
->pipe_atime
= rpipe
->pipe_mtime
= rpipe
->pipe_ctime
;
253 wpipe
= &pp
->pp_wpipe
;
254 bzero(wpipe
, sizeof(*wpipe
));
255 wpipe
->pipe_ctime
= rpipe
->pipe_ctime
;
256 wpipe
->pipe_atime
= wpipe
->pipe_mtime
= rpipe
->pipe_ctime
;
258 rpipe
->pipe_peer
= wpipe
;
259 rpipe
->pipe_pair
= pp
;
260 wpipe
->pipe_peer
= rpipe
;
261 wpipe
->pipe_pair
= pp
;
264 * Mark both endpoints as present; they will later get free'd
265 * one at a time. When both are free'd, then the whole pair
268 rpipe
->pipe_present
= PIPE_ACTIVE
;
269 wpipe
->pipe_present
= PIPE_ACTIVE
;
272 * Eventually, the MAC Framework may initialize the label
273 * in ctor or init, but for now we do it elswhere to avoid
274 * blocking in ctor or init.
282 pipe_zone_init(void *mem
, int size
, int flags
)
286 KASSERT(size
== sizeof(*pp
), ("pipe_zone_init: wrong size"));
288 pp
= (struct pipepair
*)mem
;
290 mtx_init(&pp
->pp_mtx
, "pipe mutex", NULL
, MTX_DEF
| MTX_RECURSE
);
295 pipe_zone_fini(void *mem
, int size
)
299 KASSERT(size
== sizeof(*pp
), ("pipe_zone_fini: wrong size"));
301 pp
= (struct pipepair
*)mem
;
303 mtx_destroy(&pp
->pp_mtx
);
307 * The pipe system call for the DTYPE_PIPE type of pipes. If we fail, let
308 * the zone pick up the pieces via pipeclose().
314 struct pipe_args
/* {
318 struct filedesc
*fdp
= td
->td_proc
->p_fd
;
319 struct file
*rf
, *wf
;
321 struct pipe
*rpipe
, *wpipe
;
324 pp
= uma_zalloc(pipe_zone
, M_WAITOK
);
327 * The MAC label is shared between the connected endpoints. As a
328 * result mac_pipe_init() and mac_pipe_create() are called once
329 * for the pair, and not on the endpoints.
332 mac_pipe_create(td
->td_ucred
, pp
);
334 rpipe
= &pp
->pp_rpipe
;
335 wpipe
= &pp
->pp_wpipe
;
337 knlist_init(&rpipe
->pipe_sel
.si_note
, PIPE_MTX(rpipe
), NULL
, NULL
,
339 knlist_init(&wpipe
->pipe_sel
.si_note
, PIPE_MTX(wpipe
), NULL
, NULL
,
342 /* Only the forward direction pipe is backed by default */
343 if ((error
= pipe_create(rpipe
, 1)) != 0 ||
344 (error
= pipe_create(wpipe
, 0)) != 0) {
350 rpipe
->pipe_state
|= PIPE_DIRECTOK
;
351 wpipe
->pipe_state
|= PIPE_DIRECTOK
;
353 error
= falloc(td
, &rf
, &fd
);
359 /* An extra reference on `rf' has been held for us by falloc(). */
360 td
->td_retval
[0] = fd
;
363 * Warning: once we've gotten past allocation of the fd for the
364 * read-side, we can only drop the read side via fdrop() in order
365 * to avoid races against processes which manage to dup() the read
366 * side while we are blocked trying to allocate the write side.
368 finit(rf
, FREAD
| FWRITE
, DTYPE_PIPE
, rpipe
, &pipeops
);
369 error
= falloc(td
, &wf
, &fd
);
371 fdclose(fdp
, rf
, td
->td_retval
[0], td
);
373 /* rpipe has been closed by fdrop(). */
377 /* An extra reference on `wf' has been held for us by falloc(). */
378 finit(wf
, FREAD
| FWRITE
, DTYPE_PIPE
, wpipe
, &pipeops
);
380 td
->td_retval
[1] = fd
;
387 * Allocate kva for pipe circular buffer, the space is pageable
388 * This routine will 'realloc' the size of a pipe safely, if it fails
389 * it will retain the old buffer.
390 * If it fails it will return ENOMEM.
393 pipespace_new(cpipe
, size
)
398 int error
, cnt
, firstseg
;
399 static int curfail
= 0;
400 static struct timeval lastfail
;
402 KASSERT(!mtx_owned(PIPE_MTX(cpipe
)), ("pipespace: pipe mutex locked"));
403 KASSERT(!(cpipe
->pipe_state
& PIPE_DIRECTW
),
404 ("pipespace: resize of direct writes not allowed"));
406 cnt
= cpipe
->pipe_buffer
.cnt
;
410 size
= round_page(size
);
411 buffer
= (caddr_t
) vm_map_min(pipe_map
);
413 error
= vm_map_find(pipe_map
, NULL
, 0,
414 (vm_offset_t
*) &buffer
, size
, 1,
415 VM_PROT_ALL
, VM_PROT_ALL
, 0);
416 if (error
!= KERN_SUCCESS
) {
417 if ((cpipe
->pipe_buffer
.buffer
== NULL
) &&
418 (size
> SMALL_PIPE_SIZE
)) {
419 size
= SMALL_PIPE_SIZE
;
423 if (cpipe
->pipe_buffer
.buffer
== NULL
) {
425 if (ppsratecheck(&lastfail
, &curfail
, 1))
426 printf("kern.ipc.maxpipekva exceeded; see tuning(7)\n");
433 /* copy data, then free old resources if we're resizing */
435 if (cpipe
->pipe_buffer
.in
<= cpipe
->pipe_buffer
.out
) {
436 firstseg
= cpipe
->pipe_buffer
.size
- cpipe
->pipe_buffer
.out
;
437 bcopy(&cpipe
->pipe_buffer
.buffer
[cpipe
->pipe_buffer
.out
],
439 if ((cnt
- firstseg
) > 0)
440 bcopy(cpipe
->pipe_buffer
.buffer
, &buffer
[firstseg
],
441 cpipe
->pipe_buffer
.in
);
443 bcopy(&cpipe
->pipe_buffer
.buffer
[cpipe
->pipe_buffer
.out
],
447 pipe_free_kmem(cpipe
);
448 cpipe
->pipe_buffer
.buffer
= buffer
;
449 cpipe
->pipe_buffer
.size
= size
;
450 cpipe
->pipe_buffer
.in
= cnt
;
451 cpipe
->pipe_buffer
.out
= 0;
452 cpipe
->pipe_buffer
.cnt
= cnt
;
453 atomic_add_int(&amountpipekva
, cpipe
->pipe_buffer
.size
);
458 * Wrapper for pipespace_new() that performs locking assertions.
461 pipespace(cpipe
, size
)
466 KASSERT(cpipe
->pipe_state
& PIPE_LOCKFL
,
467 ("Unlocked pipe passed to pipespace"));
468 return (pipespace_new(cpipe
, size
));
472 * lock a pipe for I/O, blocking other access
475 pipelock(cpipe
, catch)
481 PIPE_LOCK_ASSERT(cpipe
, MA_OWNED
);
482 while (cpipe
->pipe_state
& PIPE_LOCKFL
) {
483 cpipe
->pipe_state
|= PIPE_LWANT
;
484 error
= msleep(cpipe
, PIPE_MTX(cpipe
),
485 catch ? (PRIBIO
| PCATCH
) : PRIBIO
,
490 cpipe
->pipe_state
|= PIPE_LOCKFL
;
495 * unlock a pipe I/O lock
502 PIPE_LOCK_ASSERT(cpipe
, MA_OWNED
);
503 KASSERT(cpipe
->pipe_state
& PIPE_LOCKFL
,
504 ("Unlocked pipe passed to pipeunlock"));
505 cpipe
->pipe_state
&= ~PIPE_LOCKFL
;
506 if (cpipe
->pipe_state
& PIPE_LWANT
) {
507 cpipe
->pipe_state
&= ~PIPE_LWANT
;
517 PIPE_LOCK_ASSERT(cpipe
, MA_OWNED
);
518 if (cpipe
->pipe_state
& PIPE_SEL
) {
519 selwakeuppri(&cpipe
->pipe_sel
, PSOCK
);
520 if (!SEL_WAITING(&cpipe
->pipe_sel
))
521 cpipe
->pipe_state
&= ~PIPE_SEL
;
523 if ((cpipe
->pipe_state
& PIPE_ASYNC
) && cpipe
->pipe_sigio
)
524 pgsigio(&cpipe
->pipe_sigio
, SIGIO
, 0);
525 KNOTE_LOCKED(&cpipe
->pipe_sel
.si_note
, 0);
529 * Initialize and allocate VM and memory for pipe. The structure
530 * will start out zero'd from the ctor, so we just manage the kmem.
533 pipe_create(pipe
, backing
)
540 if (amountpipekva
> maxpipekva
/ 2)
541 error
= pipespace_new(pipe
, SMALL_PIPE_SIZE
);
543 error
= pipespace_new(pipe
, PIPE_SIZE
);
545 /* If we're not backing this pipe, no need to do anything. */
553 pipe_read(fp
, uio
, active_cred
, flags
, td
)
556 struct ucred
*active_cred
;
560 struct pipe
*rpipe
= fp
->f_data
;
567 error
= pipelock(rpipe
, 1);
572 error
= mac_pipe_check_read(active_cred
, rpipe
->pipe_pair
);
576 if (amountpipekva
> (3 * maxpipekva
) / 4) {
577 if (!(rpipe
->pipe_state
& PIPE_DIRECTW
) &&
578 (rpipe
->pipe_buffer
.size
> SMALL_PIPE_SIZE
) &&
579 (rpipe
->pipe_buffer
.cnt
<= SMALL_PIPE_SIZE
) &&
580 (piperesizeallowed
== 1)) {
582 pipespace(rpipe
, SMALL_PIPE_SIZE
);
587 while (uio
->uio_resid
) {
589 * normal pipe buffer receive
591 if (rpipe
->pipe_buffer
.cnt
> 0) {
592 size
= rpipe
->pipe_buffer
.size
- rpipe
->pipe_buffer
.out
;
593 if (size
> rpipe
->pipe_buffer
.cnt
)
594 size
= rpipe
->pipe_buffer
.cnt
;
595 if (size
> (u_int
) uio
->uio_resid
)
596 size
= (u_int
) uio
->uio_resid
;
600 &rpipe
->pipe_buffer
.buffer
[rpipe
->pipe_buffer
.out
],
606 rpipe
->pipe_buffer
.out
+= size
;
607 if (rpipe
->pipe_buffer
.out
>= rpipe
->pipe_buffer
.size
)
608 rpipe
->pipe_buffer
.out
= 0;
610 rpipe
->pipe_buffer
.cnt
-= size
;
613 * If there is no more to read in the pipe, reset
614 * its pointers to the beginning. This improves
617 if (rpipe
->pipe_buffer
.cnt
== 0) {
618 rpipe
->pipe_buffer
.in
= 0;
619 rpipe
->pipe_buffer
.out
= 0;
622 #ifndef PIPE_NODIRECT
624 * Direct copy, bypassing a kernel buffer.
626 } else if ((size
= rpipe
->pipe_map
.cnt
) &&
627 (rpipe
->pipe_state
& PIPE_DIRECTW
)) {
628 if (size
> (u_int
) uio
->uio_resid
)
629 size
= (u_int
) uio
->uio_resid
;
632 error
= uiomove_fromphys(rpipe
->pipe_map
.ms
,
633 rpipe
->pipe_map
.pos
, size
, uio
);
638 rpipe
->pipe_map
.pos
+= size
;
639 rpipe
->pipe_map
.cnt
-= size
;
640 if (rpipe
->pipe_map
.cnt
== 0) {
641 rpipe
->pipe_state
&= ~PIPE_DIRECTW
;
647 * detect EOF condition
648 * read returns 0 on EOF, no need to set error
650 if (rpipe
->pipe_state
& PIPE_EOF
)
654 * If the "write-side" has been blocked, wake it up now.
656 if (rpipe
->pipe_state
& PIPE_WANTW
) {
657 rpipe
->pipe_state
&= ~PIPE_WANTW
;
662 * Break if some data was read.
668 * Unlock the pipe buffer for our remaining processing.
669 * We will either break out with an error or we will
670 * sleep and relock to loop.
675 * Handle non-blocking mode operation or
676 * wait for more data.
678 if (fp
->f_flag
& FNONBLOCK
) {
681 rpipe
->pipe_state
|= PIPE_WANTR
;
682 if ((error
= msleep(rpipe
, PIPE_MTX(rpipe
),
685 error
= pipelock(rpipe
, 1);
696 /* XXX: should probably do this before getting any locks. */
698 vfs_timestamp(&rpipe
->pipe_atime
);
703 * PIPE_WANT processing only makes sense if pipe_busy is 0.
705 if ((rpipe
->pipe_busy
== 0) && (rpipe
->pipe_state
& PIPE_WANT
)) {
706 rpipe
->pipe_state
&= ~(PIPE_WANT
|PIPE_WANTW
);
708 } else if (rpipe
->pipe_buffer
.cnt
< MINPIPESIZE
) {
710 * Handle write blocking hysteresis.
712 if (rpipe
->pipe_state
& PIPE_WANTW
) {
713 rpipe
->pipe_state
&= ~PIPE_WANTW
;
718 if ((rpipe
->pipe_buffer
.size
- rpipe
->pipe_buffer
.cnt
) >= PIPE_BUF
)
719 pipeselwakeup(rpipe
);
725 #ifndef PIPE_NODIRECT
727 * Map the sending processes' buffer into kernel space and wire it.
728 * This is similar to a physical write operation.
731 pipe_build_write_buffer(wpipe
, uio
)
738 vm_offset_t addr
, endaddr
;
740 PIPE_LOCK_ASSERT(wpipe
, MA_NOTOWNED
);
741 KASSERT(wpipe
->pipe_state
& PIPE_DIRECTW
,
742 ("Clone attempt on non-direct write pipe!"));
744 size
= (u_int
) uio
->uio_iov
->iov_len
;
745 if (size
> wpipe
->pipe_buffer
.size
)
746 size
= wpipe
->pipe_buffer
.size
;
748 pmap
= vmspace_pmap(curproc
->p_vmspace
);
749 endaddr
= round_page((vm_offset_t
)uio
->uio_iov
->iov_base
+ size
);
750 addr
= trunc_page((vm_offset_t
)uio
->uio_iov
->iov_base
);
751 for (i
= 0; addr
< endaddr
; addr
+= PAGE_SIZE
, i
++) {
753 * vm_fault_quick() can sleep. Consequently,
754 * vm_page_lock_queue() and vm_page_unlock_queue()
755 * should not be performed outside of this loop.
758 if (vm_fault_quick((caddr_t
)addr
, VM_PROT_READ
) < 0) {
759 vm_page_lock_queues();
760 for (j
= 0; j
< i
; j
++)
761 vm_page_unhold(wpipe
->pipe_map
.ms
[j
]);
762 vm_page_unlock_queues();
765 wpipe
->pipe_map
.ms
[i
] = pmap_extract_and_hold(pmap
, addr
,
767 if (wpipe
->pipe_map
.ms
[i
] == NULL
)
772 * set up the control block
774 wpipe
->pipe_map
.npages
= i
;
775 wpipe
->pipe_map
.pos
=
776 ((vm_offset_t
) uio
->uio_iov
->iov_base
) & PAGE_MASK
;
777 wpipe
->pipe_map
.cnt
= size
;
780 * and update the uio data
783 uio
->uio_iov
->iov_len
-= size
;
784 uio
->uio_iov
->iov_base
= (char *)uio
->uio_iov
->iov_base
+ size
;
785 if (uio
->uio_iov
->iov_len
== 0)
787 uio
->uio_resid
-= size
;
788 uio
->uio_offset
+= size
;
793 * unmap and unwire the process buffer
796 pipe_destroy_write_buffer(wpipe
)
801 PIPE_LOCK_ASSERT(wpipe
, MA_OWNED
);
802 vm_page_lock_queues();
803 for (i
= 0; i
< wpipe
->pipe_map
.npages
; i
++) {
804 vm_page_unhold(wpipe
->pipe_map
.ms
[i
]);
806 vm_page_unlock_queues();
807 wpipe
->pipe_map
.npages
= 0;
811 * In the case of a signal, the writing process might go away. This
812 * code copies the data into the circular buffer so that the source
813 * pages can be freed without loss of data.
816 pipe_clone_write_buffer(wpipe
)
824 PIPE_LOCK_ASSERT(wpipe
, MA_OWNED
);
825 size
= wpipe
->pipe_map
.cnt
;
826 pos
= wpipe
->pipe_map
.pos
;
828 wpipe
->pipe_buffer
.in
= size
;
829 wpipe
->pipe_buffer
.out
= 0;
830 wpipe
->pipe_buffer
.cnt
= size
;
831 wpipe
->pipe_state
&= ~PIPE_DIRECTW
;
834 iov
.iov_base
= wpipe
->pipe_buffer
.buffer
;
839 uio
.uio_resid
= size
;
840 uio
.uio_segflg
= UIO_SYSSPACE
;
841 uio
.uio_rw
= UIO_READ
;
842 uio
.uio_td
= curthread
;
843 uiomove_fromphys(wpipe
->pipe_map
.ms
, pos
, size
, &uio
);
845 pipe_destroy_write_buffer(wpipe
);
849 * This implements the pipe buffer write mechanism. Note that only
850 * a direct write OR a normal pipe write can be pending at any given time.
851 * If there are any characters in the pipe buffer, the direct write will
852 * be deferred until the receiving process grabs all of the bytes from
853 * the pipe buffer. Then the direct mapping write is set-up.
856 pipe_direct_write(wpipe
, uio
)
863 PIPE_LOCK_ASSERT(wpipe
, MA_OWNED
);
864 error
= pipelock(wpipe
, 1);
865 if (wpipe
->pipe_state
& PIPE_EOF
)
871 while (wpipe
->pipe_state
& PIPE_DIRECTW
) {
872 if (wpipe
->pipe_state
& PIPE_WANTR
) {
873 wpipe
->pipe_state
&= ~PIPE_WANTR
;
876 pipeselwakeup(wpipe
);
877 wpipe
->pipe_state
|= PIPE_WANTW
;
879 error
= msleep(wpipe
, PIPE_MTX(wpipe
),
880 PRIBIO
| PCATCH
, "pipdww", 0);
886 wpipe
->pipe_map
.cnt
= 0; /* transfer not ready yet */
887 if (wpipe
->pipe_buffer
.cnt
> 0) {
888 if (wpipe
->pipe_state
& PIPE_WANTR
) {
889 wpipe
->pipe_state
&= ~PIPE_WANTR
;
892 pipeselwakeup(wpipe
);
893 wpipe
->pipe_state
|= PIPE_WANTW
;
895 error
= msleep(wpipe
, PIPE_MTX(wpipe
),
896 PRIBIO
| PCATCH
, "pipdwc", 0);
903 wpipe
->pipe_state
|= PIPE_DIRECTW
;
906 error
= pipe_build_write_buffer(wpipe
, uio
);
909 wpipe
->pipe_state
&= ~PIPE_DIRECTW
;
915 while (!error
&& (wpipe
->pipe_state
& PIPE_DIRECTW
)) {
916 if (wpipe
->pipe_state
& PIPE_EOF
) {
917 pipe_destroy_write_buffer(wpipe
);
918 pipeselwakeup(wpipe
);
923 if (wpipe
->pipe_state
& PIPE_WANTR
) {
924 wpipe
->pipe_state
&= ~PIPE_WANTR
;
927 pipeselwakeup(wpipe
);
929 error
= msleep(wpipe
, PIPE_MTX(wpipe
), PRIBIO
| PCATCH
,
934 if (wpipe
->pipe_state
& PIPE_EOF
)
936 if (wpipe
->pipe_state
& PIPE_DIRECTW
) {
938 * this bit of trickery substitutes a kernel buffer for
939 * the process that might be going away.
941 pipe_clone_write_buffer(wpipe
);
943 pipe_destroy_write_buffer(wpipe
);
955 pipe_write(fp
, uio
, active_cred
, flags
, td
)
958 struct ucred
*active_cred
;
963 int desiredsize
, orig_resid
;
964 struct pipe
*wpipe
, *rpipe
;
967 wpipe
= rpipe
->pipe_peer
;
970 error
= pipelock(wpipe
, 1);
976 * detect loss of pipe read side, issue SIGPIPE if lost.
978 if (wpipe
->pipe_present
!= PIPE_ACTIVE
||
979 (wpipe
->pipe_state
& PIPE_EOF
)) {
985 error
= mac_pipe_check_write(active_cred
, wpipe
->pipe_pair
);
994 /* Choose a larger size if it's advantageous */
995 desiredsize
= max(SMALL_PIPE_SIZE
, wpipe
->pipe_buffer
.size
);
996 while (desiredsize
< wpipe
->pipe_buffer
.cnt
+ uio
->uio_resid
) {
997 if (piperesizeallowed
!= 1)
999 if (amountpipekva
> maxpipekva
/ 2)
1001 if (desiredsize
== BIG_PIPE_SIZE
)
1003 desiredsize
= desiredsize
* 2;
1006 /* Choose a smaller size if we're in a OOM situation */
1007 if ((amountpipekva
> (3 * maxpipekva
) / 4) &&
1008 (wpipe
->pipe_buffer
.size
> SMALL_PIPE_SIZE
) &&
1009 (wpipe
->pipe_buffer
.cnt
<= SMALL_PIPE_SIZE
) &&
1010 (piperesizeallowed
== 1))
1011 desiredsize
= SMALL_PIPE_SIZE
;
1013 /* Resize if the above determined that a new size was necessary */
1014 if ((desiredsize
!= wpipe
->pipe_buffer
.size
) &&
1015 ((wpipe
->pipe_state
& PIPE_DIRECTW
) == 0)) {
1017 pipespace(wpipe
, desiredsize
);
1020 if (wpipe
->pipe_buffer
.size
== 0) {
1022 * This can only happen for reverse direction use of pipes
1023 * in a complete OOM situation.
1034 orig_resid
= uio
->uio_resid
;
1036 while (uio
->uio_resid
) {
1040 if (wpipe
->pipe_state
& PIPE_EOF
) {
1045 #ifndef PIPE_NODIRECT
1047 * If the transfer is large, we can gain performance if
1048 * we do process-to-process copies directly.
1049 * If the write is non-blocking, we don't use the
1050 * direct write mechanism.
1052 * The direct write mechanism will detect the reader going
1055 if (uio
->uio_segflg
== UIO_USERSPACE
&&
1056 uio
->uio_iov
->iov_len
>= PIPE_MINDIRECT
&&
1057 wpipe
->pipe_buffer
.size
>= PIPE_MINDIRECT
&&
1058 (fp
->f_flag
& FNONBLOCK
) == 0) {
1060 error
= pipe_direct_write(wpipe
, uio
);
1068 * Pipe buffered writes cannot be coincidental with
1069 * direct writes. We wait until the currently executing
1070 * direct write is completed before we start filling the
1071 * pipe buffer. We break out if a signal occurs or the
1074 if (wpipe
->pipe_state
& PIPE_DIRECTW
) {
1075 if (wpipe
->pipe_state
& PIPE_WANTR
) {
1076 wpipe
->pipe_state
&= ~PIPE_WANTR
;
1079 pipeselwakeup(wpipe
);
1080 wpipe
->pipe_state
|= PIPE_WANTW
;
1082 error
= msleep(wpipe
, PIPE_MTX(rpipe
), PRIBIO
| PCATCH
,
1090 space
= wpipe
->pipe_buffer
.size
- wpipe
->pipe_buffer
.cnt
;
1092 /* Writes of size <= PIPE_BUF must be atomic. */
1093 if ((space
< uio
->uio_resid
) && (orig_resid
<= PIPE_BUF
))
1097 int size
; /* Transfer size */
1098 int segsize
; /* first segment to transfer */
1101 * Transfer size is minimum of uio transfer
1102 * and free space in pipe buffer.
1104 if (space
> uio
->uio_resid
)
1105 size
= uio
->uio_resid
;
1109 * First segment to transfer is minimum of
1110 * transfer size and contiguous space in
1111 * pipe buffer. If first segment to transfer
1112 * is less than the transfer size, we've got
1113 * a wraparound in the buffer.
1115 segsize
= wpipe
->pipe_buffer
.size
-
1116 wpipe
->pipe_buffer
.in
;
1120 /* Transfer first segment */
1123 error
= uiomove(&wpipe
->pipe_buffer
.buffer
[wpipe
->pipe_buffer
.in
],
1127 if (error
== 0 && segsize
< size
) {
1128 KASSERT(wpipe
->pipe_buffer
.in
+ segsize
==
1129 wpipe
->pipe_buffer
.size
,
1130 ("Pipe buffer wraparound disappeared"));
1132 * Transfer remaining part now, to
1133 * support atomic writes. Wraparound
1139 &wpipe
->pipe_buffer
.buffer
[0],
1140 size
- segsize
, uio
);
1144 wpipe
->pipe_buffer
.in
+= size
;
1145 if (wpipe
->pipe_buffer
.in
>=
1146 wpipe
->pipe_buffer
.size
) {
1147 KASSERT(wpipe
->pipe_buffer
.in
==
1149 wpipe
->pipe_buffer
.size
,
1150 ("Expected wraparound bad"));
1151 wpipe
->pipe_buffer
.in
= size
- segsize
;
1154 wpipe
->pipe_buffer
.cnt
+= size
;
1155 KASSERT(wpipe
->pipe_buffer
.cnt
<=
1156 wpipe
->pipe_buffer
.size
,
1157 ("Pipe buffer overflow"));
1164 * If the "read-side" has been blocked, wake it up now.
1166 if (wpipe
->pipe_state
& PIPE_WANTR
) {
1167 wpipe
->pipe_state
&= ~PIPE_WANTR
;
1172 * don't block on non-blocking I/O
1174 if (fp
->f_flag
& FNONBLOCK
) {
1181 * We have no more space and have something to offer,
1182 * wake up select/poll.
1184 pipeselwakeup(wpipe
);
1186 wpipe
->pipe_state
|= PIPE_WANTW
;
1188 error
= msleep(wpipe
, PIPE_MTX(rpipe
),
1189 PRIBIO
| PCATCH
, "pipewr", 0);
1198 if ((wpipe
->pipe_busy
== 0) && (wpipe
->pipe_state
& PIPE_WANT
)) {
1199 wpipe
->pipe_state
&= ~(PIPE_WANT
| PIPE_WANTR
);
1201 } else if (wpipe
->pipe_buffer
.cnt
> 0) {
1203 * If we have put any characters in the buffer, we wake up
1206 if (wpipe
->pipe_state
& PIPE_WANTR
) {
1207 wpipe
->pipe_state
&= ~PIPE_WANTR
;
1213 * Don't return EPIPE if I/O was successful
1215 if ((wpipe
->pipe_buffer
.cnt
== 0) &&
1216 (uio
->uio_resid
== 0) &&
1222 vfs_timestamp(&wpipe
->pipe_mtime
);
1225 * We have something to offer,
1226 * wake up select/poll.
1228 if (wpipe
->pipe_buffer
.cnt
)
1229 pipeselwakeup(wpipe
);
1238 pipe_truncate(fp
, length
, active_cred
, td
)
1241 struct ucred
*active_cred
;
1249 * we implement a very minimal set of ioctls for compatibility with sockets.
1252 pipe_ioctl(fp
, cmd
, data
, active_cred
, td
)
1256 struct ucred
*active_cred
;
1259 struct pipe
*mpipe
= fp
->f_data
;
1265 error
= mac_pipe_check_ioctl(active_cred
, mpipe
->pipe_pair
, cmd
, data
);
1280 mpipe
->pipe_state
|= PIPE_ASYNC
;
1282 mpipe
->pipe_state
&= ~PIPE_ASYNC
;
1287 if (mpipe
->pipe_state
& PIPE_DIRECTW
)
1288 *(int *)data
= mpipe
->pipe_map
.cnt
;
1290 *(int *)data
= mpipe
->pipe_buffer
.cnt
;
1295 error
= fsetown(*(int *)data
, &mpipe
->pipe_sigio
);
1299 *(int *)data
= fgetown(&mpipe
->pipe_sigio
);
1302 /* This is deprecated, FIOSETOWN should be used instead. */
1305 error
= fsetown(-(*(int *)data
), &mpipe
->pipe_sigio
);
1308 /* This is deprecated, FIOGETOWN should be used instead. */
1310 *(int *)data
= -fgetown(&mpipe
->pipe_sigio
);
1323 pipe_poll(fp
, events
, active_cred
, td
)
1326 struct ucred
*active_cred
;
1329 struct pipe
*rpipe
= fp
->f_data
;
1336 wpipe
= rpipe
->pipe_peer
;
1339 error
= mac_pipe_check_poll(active_cred
, rpipe
->pipe_pair
);
1343 if (events
& (POLLIN
| POLLRDNORM
))
1344 if ((rpipe
->pipe_state
& PIPE_DIRECTW
) ||
1345 (rpipe
->pipe_buffer
.cnt
> 0) ||
1346 (rpipe
->pipe_state
& PIPE_EOF
))
1347 revents
|= events
& (POLLIN
| POLLRDNORM
);
1349 if (events
& (POLLOUT
| POLLWRNORM
))
1350 if (wpipe
->pipe_present
!= PIPE_ACTIVE
||
1351 (wpipe
->pipe_state
& PIPE_EOF
) ||
1352 (((wpipe
->pipe_state
& PIPE_DIRECTW
) == 0) &&
1353 (wpipe
->pipe_buffer
.size
- wpipe
->pipe_buffer
.cnt
) >= PIPE_BUF
))
1354 revents
|= events
& (POLLOUT
| POLLWRNORM
);
1356 if ((rpipe
->pipe_state
& PIPE_EOF
) ||
1357 wpipe
->pipe_present
!= PIPE_ACTIVE
||
1358 (wpipe
->pipe_state
& PIPE_EOF
))
1362 if (events
& (POLLIN
| POLLRDNORM
)) {
1363 selrecord(td
, &rpipe
->pipe_sel
);
1364 if (SEL_WAITING(&rpipe
->pipe_sel
))
1365 rpipe
->pipe_state
|= PIPE_SEL
;
1368 if (events
& (POLLOUT
| POLLWRNORM
)) {
1369 selrecord(td
, &wpipe
->pipe_sel
);
1370 if (SEL_WAITING(&wpipe
->pipe_sel
))
1371 wpipe
->pipe_state
|= PIPE_SEL
;
1383 * We shouldn't need locks here as we're doing a read and this should
1384 * be a natural race.
1387 pipe_stat(fp
, ub
, active_cred
, td
)
1390 struct ucred
*active_cred
;
1393 struct pipe
*pipe
= fp
->f_data
;
1398 error
= mac_pipe_check_stat(active_cred
, pipe
->pipe_pair
);
1403 bzero(ub
, sizeof(*ub
));
1404 ub
->st_mode
= S_IFIFO
;
1405 ub
->st_blksize
= PAGE_SIZE
;
1406 if (pipe
->pipe_state
& PIPE_DIRECTW
)
1407 ub
->st_size
= pipe
->pipe_map
.cnt
;
1409 ub
->st_size
= pipe
->pipe_buffer
.cnt
;
1410 ub
->st_blocks
= (ub
->st_size
+ ub
->st_blksize
- 1) / ub
->st_blksize
;
1411 ub
->st_atimespec
= pipe
->pipe_atime
;
1412 ub
->st_mtimespec
= pipe
->pipe_mtime
;
1413 ub
->st_ctimespec
= pipe
->pipe_ctime
;
1414 ub
->st_uid
= fp
->f_cred
->cr_uid
;
1415 ub
->st_gid
= fp
->f_cred
->cr_gid
;
1417 * Left as 0: st_dev, st_ino, st_nlink, st_rdev, st_flags, st_gen.
1418 * XXX (st_dev, st_ino) should be unique.
1429 struct pipe
*cpipe
= fp
->f_data
;
1431 fp
->f_ops
= &badfileops
;
1433 funsetown(&cpipe
->pipe_sigio
);
1439 pipe_free_kmem(cpipe
)
1443 KASSERT(!mtx_owned(PIPE_MTX(cpipe
)),
1444 ("pipe_free_kmem: pipe mutex locked"));
1446 if (cpipe
->pipe_buffer
.buffer
!= NULL
) {
1447 atomic_subtract_int(&amountpipekva
, cpipe
->pipe_buffer
.size
);
1448 vm_map_remove(pipe_map
,
1449 (vm_offset_t
)cpipe
->pipe_buffer
.buffer
,
1450 (vm_offset_t
)cpipe
->pipe_buffer
.buffer
+ cpipe
->pipe_buffer
.size
);
1451 cpipe
->pipe_buffer
.buffer
= NULL
;
1453 #ifndef PIPE_NODIRECT
1455 cpipe
->pipe_map
.cnt
= 0;
1456 cpipe
->pipe_map
.pos
= 0;
1457 cpipe
->pipe_map
.npages
= 0;
1469 struct pipepair
*pp
;
1472 KASSERT(cpipe
!= NULL
, ("pipeclose: cpipe == NULL"));
1476 pp
= cpipe
->pipe_pair
;
1478 pipeselwakeup(cpipe
);
1481 * If the other side is blocked, wake it up saying that
1482 * we want to close it down.
1484 cpipe
->pipe_state
|= PIPE_EOF
;
1485 while (cpipe
->pipe_busy
) {
1487 cpipe
->pipe_state
|= PIPE_WANT
;
1489 msleep(cpipe
, PIPE_MTX(cpipe
), PRIBIO
, "pipecl", 0);
1495 * Disconnect from peer, if any.
1497 ppipe
= cpipe
->pipe_peer
;
1498 if (ppipe
->pipe_present
== PIPE_ACTIVE
) {
1499 pipeselwakeup(ppipe
);
1501 ppipe
->pipe_state
|= PIPE_EOF
;
1503 KNOTE_LOCKED(&ppipe
->pipe_sel
.si_note
, 0);
1507 * Mark this endpoint as free. Release kmem resources. We
1508 * don't mark this endpoint as unused until we've finished
1509 * doing that, or the pipe might disappear out from under
1513 pipe_free_kmem(cpipe
);
1515 cpipe
->pipe_present
= PIPE_CLOSING
;
1519 * knlist_clear() may sleep dropping the PIPE_MTX. Set the
1520 * PIPE_FINALIZED, that allows other end to free the
1521 * pipe_pair, only after the knotes are completely dismantled.
1523 knlist_clear(&cpipe
->pipe_sel
.si_note
, 1);
1524 cpipe
->pipe_present
= PIPE_FINALIZED
;
1525 knlist_destroy(&cpipe
->pipe_sel
.si_note
);
1528 * If both endpoints are now closed, release the memory for the
1529 * pipe pair. If not, unlock.
1531 if (ppipe
->pipe_present
== PIPE_FINALIZED
) {
1534 mac_pipe_destroy(pp
);
1536 uma_zfree(pipe_zone
, cpipe
->pipe_pair
);
1543 pipe_kqfilter(struct file
*fp
, struct knote
*kn
)
1547 cpipe
= kn
->kn_fp
->f_data
;
1549 switch (kn
->kn_filter
) {
1551 kn
->kn_fop
= &pipe_rfiltops
;
1554 kn
->kn_fop
= &pipe_wfiltops
;
1555 if (cpipe
->pipe_peer
->pipe_present
!= PIPE_ACTIVE
) {
1556 /* other end of pipe has been closed */
1560 cpipe
= cpipe
->pipe_peer
;
1567 knlist_add(&cpipe
->pipe_sel
.si_note
, kn
, 1);
1573 filt_pipedetach(struct knote
*kn
)
1575 struct pipe
*cpipe
= (struct pipe
*)kn
->kn_fp
->f_data
;
1578 if (kn
->kn_filter
== EVFILT_WRITE
)
1579 cpipe
= cpipe
->pipe_peer
;
1580 knlist_remove(&cpipe
->pipe_sel
.si_note
, kn
, 1);
1586 filt_piperead(struct knote
*kn
, long hint
)
1588 struct pipe
*rpipe
= kn
->kn_fp
->f_data
;
1589 struct pipe
*wpipe
= rpipe
->pipe_peer
;
1593 kn
->kn_data
= rpipe
->pipe_buffer
.cnt
;
1594 if ((kn
->kn_data
== 0) && (rpipe
->pipe_state
& PIPE_DIRECTW
))
1595 kn
->kn_data
= rpipe
->pipe_map
.cnt
;
1597 if ((rpipe
->pipe_state
& PIPE_EOF
) ||
1598 wpipe
->pipe_present
!= PIPE_ACTIVE
||
1599 (wpipe
->pipe_state
& PIPE_EOF
)) {
1600 kn
->kn_flags
|= EV_EOF
;
1604 ret
= kn
->kn_data
> 0;
1611 filt_pipewrite(struct knote
*kn
, long hint
)
1613 struct pipe
*rpipe
= kn
->kn_fp
->f_data
;
1614 struct pipe
*wpipe
= rpipe
->pipe_peer
;
1617 if (wpipe
->pipe_present
!= PIPE_ACTIVE
||
1618 (wpipe
->pipe_state
& PIPE_EOF
)) {
1620 kn
->kn_flags
|= EV_EOF
;
1624 kn
->kn_data
= wpipe
->pipe_buffer
.size
- wpipe
->pipe_buffer
.cnt
;
1625 if (wpipe
->pipe_state
& PIPE_DIRECTW
)
1629 return (kn
->kn_data
>= PIPE_BUF
);