2 * Copyright (c) 1996 John S. Dyson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
16 * 4. Modifications may be freely made to this file if the above conditions
19 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $
20 * $DragonFly: src/sys/kern/sys_pipe.c,v 1.50 2008/09/09 04:06:13 dillon Exp $
24 * This file contains a high-performance replacement for the socket-based
25 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
26 * all features of sockets, but does do everything that pipes normally
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
33 #include <sys/fcntl.h>
35 #include <sys/filedesc.h>
36 #include <sys/filio.h>
37 #include <sys/ttycom.h>
40 #include <sys/select.h>
41 #include <sys/signalvar.h>
42 #include <sys/sysproto.h>
44 #include <sys/vnode.h>
46 #include <sys/event.h>
47 #include <sys/globaldata.h>
48 #include <sys/module.h>
49 #include <sys/malloc.h>
50 #include <sys/sysctl.h>
51 #include <sys/socket.h>
54 #include <vm/vm_param.h>
56 #include <vm/vm_object.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_extern.h>
60 #include <vm/vm_map.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_zone.h>
64 #include <sys/file2.h>
65 #include <sys/signal2.h>
67 #include <machine/cpufunc.h>
70 * interfaces to the outside world
72 static int pipe_read (struct file
*fp
, struct uio
*uio
,
73 struct ucred
*cred
, int flags
);
74 static int pipe_write (struct file
*fp
, struct uio
*uio
,
75 struct ucred
*cred
, int flags
);
76 static int pipe_close (struct file
*fp
);
77 static int pipe_shutdown (struct file
*fp
, int how
);
78 static int pipe_poll (struct file
*fp
, int events
, struct ucred
*cred
);
79 static int pipe_kqfilter (struct file
*fp
, struct knote
*kn
);
80 static int pipe_stat (struct file
*fp
, struct stat
*sb
, struct ucred
*cred
);
81 static int pipe_ioctl (struct file
*fp
, u_long cmd
, caddr_t data
,
82 struct ucred
*cred
, struct sysmsg
*msg
);
84 static struct fileops pipeops
= {
86 .fo_write
= pipe_write
,
87 .fo_ioctl
= pipe_ioctl
,
89 .fo_kqfilter
= pipe_kqfilter
,
91 .fo_close
= pipe_close
,
92 .fo_shutdown
= pipe_shutdown
95 static void filt_pipedetach(struct knote
*kn
);
96 static int filt_piperead(struct knote
*kn
, long hint
);
97 static int filt_pipewrite(struct knote
*kn
, long hint
);
99 static struct filterops pipe_rfiltops
=
100 { 1, NULL
, filt_pipedetach
, filt_piperead
};
101 static struct filterops pipe_wfiltops
=
102 { 1, NULL
, filt_pipedetach
, filt_pipewrite
};
104 MALLOC_DEFINE(M_PIPE
, "pipe", "pipe structures");
107 * Default pipe buffer size(s), this can be kind-of large now because pipe
108 * space is pageable. The pipe code will try to maintain locality of
109 * reference for performance reasons, so small amounts of outstanding I/O
110 * will not wipe the cache.
112 #define MINPIPESIZE (PIPE_SIZE/3)
113 #define MAXPIPESIZE (2*PIPE_SIZE/3)
116 * Limit the number of "big" pipes
118 #define LIMITBIGPIPES 64
119 #define PIPEQ_MAX_CACHE 16 /* per-cpu pipe structure cache */
121 static int pipe_maxbig
= LIMITBIGPIPES
;
122 static int pipe_maxcache
= PIPEQ_MAX_CACHE
;
123 static int pipe_bigcount
;
124 static int pipe_nbig
;
125 static int pipe_bcache_alloc
;
126 static int pipe_bkmem_alloc
;
127 static int pipe_rblocked_count
;
128 static int pipe_wblocked_count
;
130 SYSCTL_NODE(_kern
, OID_AUTO
, pipe
, CTLFLAG_RW
, 0, "Pipe operation");
131 SYSCTL_INT(_kern_pipe
, OID_AUTO
, nbig
,
132 CTLFLAG_RD
, &pipe_nbig
, 0, "numer of big pipes allocated");
133 SYSCTL_INT(_kern_pipe
, OID_AUTO
, bigcount
,
134 CTLFLAG_RW
, &pipe_bigcount
, 0, "number of times pipe expanded");
135 SYSCTL_INT(_kern_pipe
, OID_AUTO
, rblocked
,
136 CTLFLAG_RW
, &pipe_rblocked_count
, 0, "number of times pipe expanded");
137 SYSCTL_INT(_kern_pipe
, OID_AUTO
, wblocked
,
138 CTLFLAG_RW
, &pipe_wblocked_count
, 0, "number of times pipe expanded");
139 SYSCTL_INT(_kern_pipe
, OID_AUTO
, maxcache
,
140 CTLFLAG_RW
, &pipe_maxcache
, 0, "max pipes cached per-cpu");
141 SYSCTL_INT(_kern_pipe
, OID_AUTO
, maxbig
,
142 CTLFLAG_RW
, &pipe_maxbig
, 0, "max number of big pipes");
144 static int pipe_delay
= 5000; /* 5uS default */
145 SYSCTL_INT(_kern_pipe
, OID_AUTO
, delay
,
146 CTLFLAG_RW
, &pipe_delay
, 0, "SMP delay optimization in ns");
147 static int pipe_mpsafe
= 1;
148 SYSCTL_INT(_kern_pipe
, OID_AUTO
, mpsafe
,
149 CTLFLAG_RW
, &pipe_mpsafe
, 0, "");
151 #if !defined(NO_PIPE_SYSCTL_STATS)
152 SYSCTL_INT(_kern_pipe
, OID_AUTO
, bcache_alloc
,
153 CTLFLAG_RW
, &pipe_bcache_alloc
, 0, "pipe buffer from pcpu cache");
154 SYSCTL_INT(_kern_pipe
, OID_AUTO
, bkmem_alloc
,
155 CTLFLAG_RW
, &pipe_bkmem_alloc
, 0, "pipe buffer from kmem");
158 static void pipeclose (struct pipe
*cpipe
);
159 static void pipe_free_kmem (struct pipe
*cpipe
);
160 static int pipe_create (struct pipe
**cpipep
);
161 static __inline
void pipeselwakeup (struct pipe
*cpipe
);
162 static int pipespace (struct pipe
*cpipe
, int size
);
165 pipeselwakeup(struct pipe
*cpipe
)
167 if (cpipe
->pipe_state
& PIPE_SEL
) {
169 cpipe
->pipe_state
&= ~PIPE_SEL
;
170 selwakeup(&cpipe
->pipe_sel
);
173 if ((cpipe
->pipe_state
& PIPE_ASYNC
) && cpipe
->pipe_sigio
) {
175 pgsigio(cpipe
->pipe_sigio
, SIGIO
, 0);
178 if (SLIST_FIRST(&cpipe
->pipe_sel
.si_note
)) {
180 KNOTE(&cpipe
->pipe_sel
.si_note
, 0);
186 * These routines are called before and after a UIO. The UIO
187 * may block, causing our held tokens to be lost temporarily.
189 * We use these routines to serialize reads against other reads
190 * and writes against other writes.
192 * The read token is held on entry so *ipp does not race.
195 pipe_start_uio(struct pipe
*cpipe
, int *ipp
)
201 error
= tsleep(ipp
, PCATCH
, "pipexx", 0);
210 pipe_end_uio(struct pipe
*cpipe
, int *ipp
)
222 pipe_get_mplock(int *save
)
225 if (pipe_mpsafe
== 0) {
236 pipe_rel_mplock(int *save
)
246 * The pipe system call for the DTYPE_PIPE type of pipes
248 * pipe_ARgs(int dummy)
253 sys_pipe(struct pipe_args
*uap
)
255 struct thread
*td
= curthread
;
256 struct proc
*p
= td
->td_proc
;
257 struct file
*rf
, *wf
;
258 struct pipe
*rpipe
, *wpipe
;
263 rpipe
= wpipe
= NULL
;
264 if (pipe_create(&rpipe
) || pipe_create(&wpipe
)) {
270 error
= falloc(p
, &rf
, &fd1
);
276 uap
->sysmsg_fds
[0] = fd1
;
279 * Warning: once we've gotten past allocation of the fd for the
280 * read-side, we can only drop the read side via fdrop() in order
281 * to avoid races against processes which manage to dup() the read
282 * side while we are blocked trying to allocate the write side.
284 rf
->f_type
= DTYPE_PIPE
;
285 rf
->f_flag
= FREAD
| FWRITE
;
286 rf
->f_ops
= &pipeops
;
288 error
= falloc(p
, &wf
, &fd2
);
290 fsetfd(p
, NULL
, fd1
);
292 /* rpipe has been closed by fdrop(). */
296 wf
->f_type
= DTYPE_PIPE
;
297 wf
->f_flag
= FREAD
| FWRITE
;
298 wf
->f_ops
= &pipeops
;
300 uap
->sysmsg_fds
[1] = fd2
;
302 rpipe
->pipe_slock
= kmalloc(sizeof(struct lock
),
303 M_PIPE
, M_WAITOK
|M_ZERO
);
304 wpipe
->pipe_slock
= rpipe
->pipe_slock
;
305 rpipe
->pipe_peer
= wpipe
;
306 wpipe
->pipe_peer
= rpipe
;
307 lockinit(rpipe
->pipe_slock
, "pipecl", 0, 0);
310 * Once activated the peer relationship remains valid until
311 * both sides are closed.
322 * Allocate kva for pipe circular buffer, the space is pageable
323 * This routine will 'realloc' the size of a pipe safely, if it fails
324 * it will retain the old buffer.
325 * If it fails it will return ENOMEM.
328 pipespace(struct pipe
*cpipe
, int size
)
330 struct vm_object
*object
;
334 npages
= round_page(size
) / PAGE_SIZE
;
335 object
= cpipe
->pipe_buffer
.object
;
338 * [re]create the object if necessary and reserve space for it
339 * in the kernel_map. The object and memory are pageable. On
340 * success, free the old resources before assigning the new
343 if (object
== NULL
|| object
->size
!= npages
) {
345 object
= vm_object_allocate(OBJT_DEFAULT
, npages
);
346 buffer
= (caddr_t
)vm_map_min(&kernel_map
);
348 error
= vm_map_find(&kernel_map
, object
, 0,
349 (vm_offset_t
*)&buffer
, size
,
352 VM_PROT_ALL
, VM_PROT_ALL
,
355 if (error
!= KERN_SUCCESS
) {
356 vm_object_deallocate(object
);
360 pipe_free_kmem(cpipe
);
362 cpipe
->pipe_buffer
.object
= object
;
363 cpipe
->pipe_buffer
.buffer
= buffer
;
364 cpipe
->pipe_buffer
.size
= size
;
369 cpipe
->pipe_buffer
.rindex
= 0;
370 cpipe
->pipe_buffer
.windex
= 0;
375 * Initialize and allocate VM and memory for pipe, pulling the pipe from
376 * our per-cpu cache if possible. For now make sure it is sized for the
377 * smaller PIPE_SIZE default.
380 pipe_create(struct pipe
**cpipep
)
382 globaldata_t gd
= mycpu
;
386 if ((cpipe
= gd
->gd_pipeq
) != NULL
) {
387 gd
->gd_pipeq
= cpipe
->pipe_peer
;
389 cpipe
->pipe_peer
= NULL
;
390 cpipe
->pipe_wantwcnt
= 0;
392 cpipe
= kmalloc(sizeof(struct pipe
), M_PIPE
, M_WAITOK
|M_ZERO
);
395 if ((error
= pipespace(cpipe
, PIPE_SIZE
)) != 0)
397 vfs_timestamp(&cpipe
->pipe_ctime
);
398 cpipe
->pipe_atime
= cpipe
->pipe_ctime
;
399 cpipe
->pipe_mtime
= cpipe
->pipe_ctime
;
400 lwkt_token_init(&cpipe
->pipe_rlock
);
401 lwkt_token_init(&cpipe
->pipe_wlock
);
406 * MPALMOSTSAFE (acquires mplock)
409 pipe_read(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int fflags
)
415 u_int size
; /* total bytes available */
416 u_int nsize
; /* total bytes to read */
417 u_int rindex
; /* contiguous bytes available */
425 if (uio
->uio_resid
== 0)
429 * Setup locks, calculate nbio
431 pipe_get_mplock(&mpsave
);
432 rpipe
= (struct pipe
*)fp
->f_data
;
433 lwkt_gettoken(&rlock
, &rpipe
->pipe_rlock
);
435 if (fflags
& O_FBLOCKING
)
437 else if (fflags
& O_FNONBLOCKING
)
439 else if (fp
->f_flag
& O_NONBLOCK
)
445 * Reads are serialized. Note howeverthat pipe_buffer.buffer and
446 * pipe_buffer.size can change out from under us when the number
447 * of bytes in the buffer are zero due to the write-side doing a
450 error
= pipe_start_uio(rpipe
, &rpipe
->pipe_rip
);
452 pipe_rel_mplock(&mpsave
);
453 lwkt_reltoken(&rlock
);
458 bigread
= (uio
->uio_resid
> 10 * 1024 * 1024);
461 while (uio
->uio_resid
) {
465 if (bigread
&& --bigcount
== 0) {
468 if (CURSIG(curthread
->td_lwp
)) {
474 size
= rpipe
->pipe_buffer
.windex
- rpipe
->pipe_buffer
.rindex
;
477 rindex
= rpipe
->pipe_buffer
.rindex
&
478 (rpipe
->pipe_buffer
.size
- 1);
480 if (nsize
> rpipe
->pipe_buffer
.size
- rindex
)
481 nsize
= rpipe
->pipe_buffer
.size
- rindex
;
482 nsize
= szmin(nsize
, uio
->uio_resid
);
484 error
= uiomove(&rpipe
->pipe_buffer
.buffer
[rindex
],
489 rpipe
->pipe_buffer
.rindex
+= nsize
;
493 * If the FIFO is still over half full just continue
494 * and do not try to notify the writer yet.
496 if (size
- nsize
>= (rpipe
->pipe_buffer
.size
>> 1)) {
502 * When the FIFO is less then half full notify any
503 * waiting writer. WANTW can be checked while
504 * holding just the rlock.
507 if ((rpipe
->pipe_state
& PIPE_WANTW
) == 0)
512 * If the "write-side" was blocked we wake it up. This code
513 * is reached either when the buffer is completely emptied
514 * or if it becomes more then half-empty.
516 * Pipe_state can only be modified if both the rlock and
519 if (rpipe
->pipe_state
& PIPE_WANTW
) {
520 lwkt_gettoken(&wlock
, &rpipe
->pipe_wlock
);
521 if (rpipe
->pipe_state
& PIPE_WANTW
) {
523 rpipe
->pipe_state
&= ~PIPE_WANTW
;
524 lwkt_reltoken(&wlock
);
527 lwkt_reltoken(&wlock
);
532 * Pick up our copy loop again if the writer sent data to
533 * us while we were messing around.
535 * On a SMP box poll up to pipe_delay nanoseconds for new
536 * data. Typically a value of 2000 to 4000 is sufficient
537 * to eradicate most IPIs/tsleeps/wakeups when a pipe
538 * is used for synchronous communications with small packets,
539 * and 8000 or so (8uS) will pipeline large buffer xfers
540 * between cpus over a pipe.
542 * For synchronous communications a hit means doing a
543 * full Awrite-Bread-Bwrite-Aread cycle in less then 2uS,
544 * where as miss requiring a tsleep/wakeup sequence
545 * will take 7uS or more.
547 if (rpipe
->pipe_buffer
.windex
!= rpipe
->pipe_buffer
.rindex
)
550 #if defined(SMP) && defined(_RDTSC_SUPPORTED_)
555 tsc_target
= tsc_get_target(pipe_delay
);
556 while (tsc_test_target(tsc_target
) == 0) {
557 if (rpipe
->pipe_buffer
.windex
!=
558 rpipe
->pipe_buffer
.rindex
) {
569 * Detect EOF condition, do not set error.
571 if (rpipe
->pipe_state
& PIPE_REOF
)
575 * Break if some data was read, or if this was a non-blocking
587 * Last chance, interlock with WANTR.
589 lwkt_gettoken(&wlock
, &rpipe
->pipe_wlock
);
590 size
= rpipe
->pipe_buffer
.windex
- rpipe
->pipe_buffer
.rindex
;
592 lwkt_reltoken(&wlock
);
597 * If there is no more to read in the pipe, reset its
598 * pointers to the beginning. This improves cache hit
601 * We need both locks to modify both pointers, and there
602 * must also not be a write in progress or the uiomove()
603 * in the write might block and temporarily release
604 * its wlock, then reacquire and update windex. We are
605 * only serialized against reads, not writes.
607 * XXX should we even bother resetting the indices? It
608 * might actually be more cache efficient not to.
610 if (rpipe
->pipe_buffer
.rindex
== rpipe
->pipe_buffer
.windex
&&
611 rpipe
->pipe_wip
== 0) {
612 rpipe
->pipe_buffer
.rindex
= 0;
613 rpipe
->pipe_buffer
.windex
= 0;
617 * Wait for more data.
619 * Pipe_state can only be set if both the rlock and wlock
622 rpipe
->pipe_state
|= PIPE_WANTR
;
623 tsleep_interlock(rpipe
, PCATCH
);
624 lwkt_reltoken(&wlock
);
625 error
= tsleep(rpipe
, PCATCH
| PINTERLOCKED
, "piperd", 0);
626 ++pipe_rblocked_count
;
630 pipe_end_uio(rpipe
, &rpipe
->pipe_rip
);
633 * Uptime last access time
635 if (error
== 0 && nread
)
636 vfs_timestamp(&rpipe
->pipe_atime
);
639 * If we drained the FIFO more then half way then handle
640 * write blocking hysteresis.
642 * Note that PIPE_WANTW cannot be set by the writer without
643 * it holding both rlock and wlock, so we can test it
644 * while holding just rlock.
647 if (rpipe
->pipe_state
& PIPE_WANTW
) {
648 lwkt_gettoken(&wlock
, &rpipe
->pipe_wlock
);
649 if (rpipe
->pipe_state
& PIPE_WANTW
) {
650 rpipe
->pipe_state
&= ~PIPE_WANTW
;
651 lwkt_reltoken(&wlock
);
654 lwkt_reltoken(&wlock
);
658 size
= rpipe
->pipe_buffer
.windex
- rpipe
->pipe_buffer
.rindex
;
659 lwkt_reltoken(&rlock
);
662 * If enough space is available in buffer then wakeup sel writers?
664 if ((rpipe
->pipe_buffer
.size
- size
) >= PIPE_BUF
)
665 pipeselwakeup(rpipe
);
666 pipe_rel_mplock(&mpsave
);
671 * MPALMOSTSAFE - acquires mplock
674 pipe_write(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int fflags
)
679 struct pipe
*wpipe
, *rpipe
;
689 pipe_get_mplock(&mpsave
);
692 * Writes go to the peer. The peer will always exist.
694 rpipe
= (struct pipe
*) fp
->f_data
;
695 wpipe
= rpipe
->pipe_peer
;
696 lwkt_gettoken(&wlock
, &wpipe
->pipe_wlock
);
697 if (wpipe
->pipe_state
& PIPE_WEOF
) {
698 pipe_rel_mplock(&mpsave
);
699 lwkt_reltoken(&wlock
);
704 * Degenerate case (EPIPE takes prec)
706 if (uio
->uio_resid
== 0) {
707 pipe_rel_mplock(&mpsave
);
708 lwkt_reltoken(&wlock
);
713 * Writes are serialized (start_uio must be called with wlock)
715 error
= pipe_start_uio(wpipe
, &wpipe
->pipe_wip
);
717 pipe_rel_mplock(&mpsave
);
718 lwkt_reltoken(&wlock
);
722 if (fflags
& O_FBLOCKING
)
724 else if (fflags
& O_FNONBLOCKING
)
726 else if (fp
->f_flag
& O_NONBLOCK
)
732 * If it is advantageous to resize the pipe buffer, do
733 * so. We are write-serialized so we can block safely.
735 if ((wpipe
->pipe_buffer
.size
<= PIPE_SIZE
) &&
736 (pipe_nbig
< pipe_maxbig
) &&
737 wpipe
->pipe_wantwcnt
> 4 &&
738 (wpipe
->pipe_buffer
.rindex
== wpipe
->pipe_buffer
.windex
)) {
740 * Recheck after lock.
742 lwkt_gettoken(&rlock
, &wpipe
->pipe_rlock
);
743 if ((wpipe
->pipe_buffer
.size
<= PIPE_SIZE
) &&
744 (pipe_nbig
< pipe_maxbig
) &&
745 (wpipe
->pipe_buffer
.rindex
== wpipe
->pipe_buffer
.windex
)) {
746 atomic_add_int(&pipe_nbig
, 1);
747 if (pipespace(wpipe
, BIG_PIPE_SIZE
) == 0)
750 atomic_subtract_int(&pipe_nbig
, 1);
752 lwkt_reltoken(&rlock
);
755 orig_resid
= uio
->uio_resid
;
758 bigwrite
= (uio
->uio_resid
> 10 * 1024 * 1024);
761 while (uio
->uio_resid
) {
762 if (wpipe
->pipe_state
& PIPE_WEOF
) {
770 if (bigwrite
&& --bigcount
== 0) {
773 if (CURSIG(curthread
->td_lwp
)) {
779 windex
= wpipe
->pipe_buffer
.windex
&
780 (wpipe
->pipe_buffer
.size
- 1);
781 space
= wpipe
->pipe_buffer
.size
-
782 (wpipe
->pipe_buffer
.windex
- wpipe
->pipe_buffer
.rindex
);
785 /* Writes of size <= PIPE_BUF must be atomic. */
786 if ((space
< uio
->uio_resid
) && (orig_resid
<= PIPE_BUF
))
790 * Write to fill, read size handles write hysteresis. Also
791 * additional restrictions can cause select-based non-blocking
798 * Transfer size is minimum of uio transfer
799 * and free space in pipe buffer.
801 * Limit each uiocopy to no more then PIPE_SIZE
802 * so we can keep the gravy train going on a
803 * SMP box. This doubles the performance for
804 * write sizes > 16K. Otherwise large writes
805 * wind up doing an inefficient synchronous
808 space
= szmin(space
, uio
->uio_resid
);
809 if (space
> PIPE_SIZE
)
813 * First segment to transfer is minimum of
814 * transfer size and contiguous space in
815 * pipe buffer. If first segment to transfer
816 * is less than the transfer size, we've got
817 * a wraparound in the buffer.
819 segsize
= wpipe
->pipe_buffer
.size
- windex
;
825 * If this is the first loop and the reader is
826 * blocked, do a preemptive wakeup of the reader.
828 * On SMP the IPI latency plus the wlock interlock
829 * on the reader side is the fastest way to get the
830 * reader going. (The scheduler will hard loop on
833 * NOTE: We can't clear WANTR here without acquiring
834 * the rlock, which we don't want to do here!
836 if ((wpipe
->pipe_state
& PIPE_WANTR
) && pipe_mpsafe
> 1)
841 * Transfer segment, which may include a wrap-around.
842 * Update windex to account for both all in one go
843 * so the reader can read() the data atomically.
845 error
= uiomove(&wpipe
->pipe_buffer
.buffer
[windex
],
847 if (error
== 0 && segsize
< space
) {
848 segsize
= space
- segsize
;
849 error
= uiomove(&wpipe
->pipe_buffer
.buffer
[0],
855 wpipe
->pipe_buffer
.windex
+= space
;
861 * We need both the rlock and the wlock to interlock against
862 * the EOF, WANTW, and size checks, and to modify pipe_state.
864 * These are token locks so we do not have to worry about
867 lwkt_gettoken(&rlock
, &wpipe
->pipe_rlock
);
870 * If the "read-side" has been blocked, wake it up now
871 * and yield to let it drain synchronously rather
874 if (wpipe
->pipe_state
& PIPE_WANTR
) {
875 wpipe
->pipe_state
&= ~PIPE_WANTR
;
880 * don't block on non-blocking I/O
883 lwkt_reltoken(&rlock
);
889 * re-test whether we have to block in the writer after
890 * acquiring both locks, in case the reader opened up
893 space
= wpipe
->pipe_buffer
.size
-
894 (wpipe
->pipe_buffer
.windex
- wpipe
->pipe_buffer
.rindex
);
896 if ((space
< uio
->uio_resid
) && (orig_resid
<= PIPE_BUF
))
900 * We have no more space and have something to offer,
901 * wake up select/poll.
904 wpipe
->pipe_state
|= PIPE_WANTW
;
905 ++wpipe
->pipe_wantwcnt
;
906 pipeselwakeup(wpipe
);
907 if (wpipe
->pipe_state
& PIPE_WANTW
)
908 error
= tsleep(wpipe
, PCATCH
, "pipewr", 0);
909 ++pipe_wblocked_count
;
911 lwkt_reltoken(&rlock
);
914 * Break out if we errored or the read side wants us to go
919 if (wpipe
->pipe_state
& PIPE_WEOF
) {
924 pipe_end_uio(wpipe
, &wpipe
->pipe_wip
);
927 * If we have put any characters in the buffer, we wake up
930 * Both rlock and wlock are required to be able to modify pipe_state.
932 if (wpipe
->pipe_buffer
.windex
!= wpipe
->pipe_buffer
.rindex
) {
933 if (wpipe
->pipe_state
& PIPE_WANTR
) {
934 lwkt_gettoken(&rlock
, &wpipe
->pipe_rlock
);
935 if (wpipe
->pipe_state
& PIPE_WANTR
) {
936 wpipe
->pipe_state
&= ~PIPE_WANTR
;
937 lwkt_reltoken(&rlock
);
940 lwkt_reltoken(&rlock
);
946 * Don't return EPIPE if I/O was successful
948 if ((wpipe
->pipe_buffer
.rindex
== wpipe
->pipe_buffer
.windex
) &&
949 (uio
->uio_resid
== 0) &&
955 vfs_timestamp(&wpipe
->pipe_mtime
);
958 * We have something to offer,
959 * wake up select/poll.
961 space
= wpipe
->pipe_buffer
.windex
- wpipe
->pipe_buffer
.rindex
;
962 lwkt_reltoken(&wlock
);
964 pipeselwakeup(wpipe
);
965 pipe_rel_mplock(&mpsave
);
970 * MPALMOSTSAFE - acquires mplock
972 * we implement a very minimal set of ioctls for compatibility with sockets.
975 pipe_ioctl(struct file
*fp
, u_long cmd
, caddr_t data
,
976 struct ucred
*cred
, struct sysmsg
*msg
)
984 pipe_get_mplock(&mpsave
);
985 mpipe
= (struct pipe
*)fp
->f_data
;
987 lwkt_gettoken(&rlock
, &mpipe
->pipe_rlock
);
988 lwkt_gettoken(&wlock
, &mpipe
->pipe_wlock
);
993 mpipe
->pipe_state
|= PIPE_ASYNC
;
995 mpipe
->pipe_state
&= ~PIPE_ASYNC
;
1000 *(int *)data
= mpipe
->pipe_buffer
.windex
-
1001 mpipe
->pipe_buffer
.rindex
;
1006 error
= fsetown(*(int *)data
, &mpipe
->pipe_sigio
);
1010 *(int *)data
= fgetown(mpipe
->pipe_sigio
);
1014 /* This is deprecated, FIOSETOWN should be used instead. */
1016 error
= fsetown(-(*(int *)data
), &mpipe
->pipe_sigio
);
1021 /* This is deprecated, FIOGETOWN should be used instead. */
1022 *(int *)data
= -fgetown(mpipe
->pipe_sigio
);
1029 lwkt_reltoken(&rlock
);
1030 lwkt_reltoken(&wlock
);
1031 pipe_rel_mplock(&mpsave
);
1037 * MPALMOSTSAFE - acquires mplock
1040 pipe_poll(struct file
*fp
, int events
, struct ucred
*cred
)
1048 pipe_get_mplock(&mpsave
);
1049 rpipe
= (struct pipe
*)fp
->f_data
;
1050 wpipe
= rpipe
->pipe_peer
;
1051 if (events
& (POLLIN
| POLLRDNORM
)) {
1052 if ((rpipe
->pipe_buffer
.windex
!= rpipe
->pipe_buffer
.rindex
) ||
1053 (rpipe
->pipe_state
& PIPE_REOF
)) {
1054 revents
|= events
& (POLLIN
| POLLRDNORM
);
1058 if (events
& (POLLOUT
| POLLWRNORM
)) {
1059 if (wpipe
== NULL
|| (wpipe
->pipe_state
& PIPE_WEOF
)) {
1060 revents
|= events
& (POLLOUT
| POLLWRNORM
);
1062 space
= wpipe
->pipe_buffer
.windex
-
1063 wpipe
->pipe_buffer
.rindex
;
1064 space
= wpipe
->pipe_buffer
.size
- space
;
1065 if (space
>= PIPE_BUF
)
1066 revents
|= events
& (POLLOUT
| POLLWRNORM
);
1070 if ((rpipe
->pipe_state
& PIPE_REOF
) ||
1072 (wpipe
->pipe_state
& PIPE_WEOF
))
1076 if (events
& (POLLIN
| POLLRDNORM
)) {
1077 selrecord(curthread
, &rpipe
->pipe_sel
);
1078 rpipe
->pipe_state
|= PIPE_SEL
;
1081 if (events
& (POLLOUT
| POLLWRNORM
)) {
1082 selrecord(curthread
, &wpipe
->pipe_sel
);
1083 wpipe
->pipe_state
|= PIPE_SEL
;
1086 pipe_rel_mplock(&mpsave
);
1094 pipe_stat(struct file
*fp
, struct stat
*ub
, struct ucred
*cred
)
1099 pipe_get_mplock(&mpsave
);
1100 pipe
= (struct pipe
*)fp
->f_data
;
1102 bzero((caddr_t
)ub
, sizeof(*ub
));
1103 ub
->st_mode
= S_IFIFO
;
1104 ub
->st_blksize
= pipe
->pipe_buffer
.size
;
1105 ub
->st_size
= pipe
->pipe_buffer
.windex
- pipe
->pipe_buffer
.rindex
;
1106 ub
->st_blocks
= (ub
->st_size
+ ub
->st_blksize
- 1) / ub
->st_blksize
;
1107 ub
->st_atimespec
= pipe
->pipe_atime
;
1108 ub
->st_mtimespec
= pipe
->pipe_mtime
;
1109 ub
->st_ctimespec
= pipe
->pipe_ctime
;
1111 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev,
1113 * XXX (st_dev, st_ino) should be unique.
1115 pipe_rel_mplock(&mpsave
);
1120 * MPALMOSTSAFE - acquires mplock
1123 pipe_close(struct file
*fp
)
1128 cpipe
= (struct pipe
*)fp
->f_data
;
1129 fp
->f_ops
= &badfileops
;
1131 funsetown(cpipe
->pipe_sigio
);
1138 * Shutdown one or both directions of a full-duplex pipe.
1140 * MPALMOSTSAFE - acquires mplock
1143 pipe_shutdown(struct file
*fp
, int how
)
1148 lwkt_tokref rpipe_rlock
;
1149 lwkt_tokref rpipe_wlock
;
1150 lwkt_tokref wpipe_rlock
;
1151 lwkt_tokref wpipe_wlock
;
1154 pipe_get_mplock(&mpsave
);
1155 rpipe
= (struct pipe
*)fp
->f_data
;
1156 wpipe
= rpipe
->pipe_peer
;
1159 * We modify pipe_state on both pipes, which means we need
1162 lwkt_gettoken(&rpipe_rlock
, &rpipe
->pipe_rlock
);
1163 lwkt_gettoken(&rpipe_wlock
, &rpipe
->pipe_wlock
);
1164 lwkt_gettoken(&wpipe_rlock
, &wpipe
->pipe_rlock
);
1165 lwkt_gettoken(&wpipe_wlock
, &wpipe
->pipe_wlock
);
1170 rpipe
->pipe_state
|= PIPE_REOF
; /* my reads */
1171 rpipe
->pipe_state
|= PIPE_WEOF
; /* peer writes */
1172 if (rpipe
->pipe_state
& PIPE_WANTR
) {
1173 rpipe
->pipe_state
&= ~PIPE_WANTR
;
1176 if (rpipe
->pipe_state
& PIPE_WANTW
) {
1177 rpipe
->pipe_state
&= ~PIPE_WANTW
;
1185 wpipe
->pipe_state
|= PIPE_REOF
; /* peer reads */
1186 wpipe
->pipe_state
|= PIPE_WEOF
; /* my writes */
1187 if (wpipe
->pipe_state
& PIPE_WANTR
) {
1188 wpipe
->pipe_state
&= ~PIPE_WANTR
;
1191 if (wpipe
->pipe_state
& PIPE_WANTW
) {
1192 wpipe
->pipe_state
&= ~PIPE_WANTW
;
1198 pipeselwakeup(rpipe
);
1199 pipeselwakeup(wpipe
);
1201 lwkt_reltoken(&rpipe_rlock
);
1202 lwkt_reltoken(&rpipe_wlock
);
1203 lwkt_reltoken(&wpipe_rlock
);
1204 lwkt_reltoken(&wpipe_wlock
);
1206 pipe_rel_mplock(&mpsave
);
1211 pipe_free_kmem(struct pipe
*cpipe
)
1213 if (cpipe
->pipe_buffer
.buffer
!= NULL
) {
1214 if (cpipe
->pipe_buffer
.size
> PIPE_SIZE
)
1215 atomic_subtract_int(&pipe_nbig
, 1);
1216 kmem_free(&kernel_map
,
1217 (vm_offset_t
)cpipe
->pipe_buffer
.buffer
,
1218 cpipe
->pipe_buffer
.size
);
1219 cpipe
->pipe_buffer
.buffer
= NULL
;
1220 cpipe
->pipe_buffer
.object
= NULL
;
1225 * Close the pipe. The slock must be held to interlock against simultanious
1226 * closes. The rlock and wlock must be held to adjust the pipe_state.
1229 pipeclose(struct pipe
*cpipe
)
1233 lwkt_tokref cpipe_rlock
;
1234 lwkt_tokref cpipe_wlock
;
1235 lwkt_tokref ppipe_rlock
;
1236 lwkt_tokref ppipe_wlock
;
1242 * The slock may not have been allocated yet (close during
1245 * We need both the read and write tokens to modify pipe_state.
1247 if (cpipe
->pipe_slock
)
1248 lockmgr(cpipe
->pipe_slock
, LK_EXCLUSIVE
);
1249 lwkt_gettoken(&cpipe_rlock
, &cpipe
->pipe_rlock
);
1250 lwkt_gettoken(&cpipe_wlock
, &cpipe
->pipe_wlock
);
1253 * Set our state, wakeup anyone waiting in select, and
1254 * wakeup anyone blocked on our pipe.
1256 cpipe
->pipe_state
|= PIPE_CLOSED
| PIPE_REOF
| PIPE_WEOF
;
1257 pipeselwakeup(cpipe
);
1258 if (cpipe
->pipe_state
& (PIPE_WANTR
| PIPE_WANTW
)) {
1259 cpipe
->pipe_state
&= ~(PIPE_WANTR
| PIPE_WANTW
);
1264 * Disconnect from peer.
1266 if ((ppipe
= cpipe
->pipe_peer
) != NULL
) {
1267 lwkt_gettoken(&ppipe_rlock
, &ppipe
->pipe_rlock
);
1268 lwkt_gettoken(&ppipe_wlock
, &ppipe
->pipe_wlock
);
1269 ppipe
->pipe_state
|= PIPE_REOF
| PIPE_WEOF
;
1270 pipeselwakeup(ppipe
);
1271 if (ppipe
->pipe_state
& (PIPE_WANTR
| PIPE_WANTW
)) {
1272 ppipe
->pipe_state
&= ~(PIPE_WANTR
| PIPE_WANTW
);
1275 if (SLIST_FIRST(&ppipe
->pipe_sel
.si_note
)) {
1277 KNOTE(&ppipe
->pipe_sel
.si_note
, 0);
1280 lwkt_reltoken(&ppipe_rlock
);
1281 lwkt_reltoken(&ppipe_wlock
);
1285 * If the peer is also closed we can free resources for both
1286 * sides, otherwise we leave our side intact to deal with any
1287 * races (since we only have the slock).
1289 if (ppipe
&& (ppipe
->pipe_state
& PIPE_CLOSED
)) {
1290 cpipe
->pipe_peer
= NULL
;
1291 ppipe
->pipe_peer
= NULL
;
1292 ppipe
->pipe_slock
= NULL
; /* we will free the slock */
1297 lwkt_reltoken(&cpipe_rlock
);
1298 lwkt_reltoken(&cpipe_wlock
);
1299 if (cpipe
->pipe_slock
)
1300 lockmgr(cpipe
->pipe_slock
, LK_RELEASE
);
1303 * If we disassociated from our peer we can free resources
1305 if (ppipe
== NULL
) {
1307 if (cpipe
->pipe_slock
) {
1308 kfree(cpipe
->pipe_slock
, M_PIPE
);
1309 cpipe
->pipe_slock
= NULL
;
1311 if (gd
->gd_pipeqcount
>= pipe_maxcache
||
1312 cpipe
->pipe_buffer
.size
!= PIPE_SIZE
1314 pipe_free_kmem(cpipe
);
1315 kfree(cpipe
, M_PIPE
);
1317 cpipe
->pipe_state
= 0;
1318 cpipe
->pipe_peer
= gd
->gd_pipeq
;
1319 gd
->gd_pipeq
= cpipe
;
1320 ++gd
->gd_pipeqcount
;
1326 * MPALMOSTSAFE - acquires mplock
1329 pipe_kqfilter(struct file
*fp
, struct knote
*kn
)
1334 cpipe
= (struct pipe
*)kn
->kn_fp
->f_data
;
1336 switch (kn
->kn_filter
) {
1338 kn
->kn_fop
= &pipe_rfiltops
;
1341 kn
->kn_fop
= &pipe_wfiltops
;
1342 cpipe
= cpipe
->pipe_peer
;
1343 if (cpipe
== NULL
) {
1344 /* other end of pipe has been closed */
1352 kn
->kn_hook
= (caddr_t
)cpipe
;
1354 SLIST_INSERT_HEAD(&cpipe
->pipe_sel
.si_note
, kn
, kn_selnext
);
1360 filt_pipedetach(struct knote
*kn
)
1362 struct pipe
*cpipe
= (struct pipe
*)kn
->kn_hook
;
1364 SLIST_REMOVE(&cpipe
->pipe_sel
.si_note
, kn
, knote
, kn_selnext
);
1369 filt_piperead(struct knote
*kn
, long hint
)
1371 struct pipe
*rpipe
= (struct pipe
*)kn
->kn_fp
->f_data
;
1373 kn
->kn_data
= rpipe
->pipe_buffer
.windex
- rpipe
->pipe_buffer
.rindex
;
1376 if (rpipe
->pipe_state
& PIPE_REOF
) {
1377 kn
->kn_flags
|= EV_EOF
;
1380 return (kn
->kn_data
> 0);
1385 filt_pipewrite(struct knote
*kn
, long hint
)
1387 struct pipe
*rpipe
= (struct pipe
*)kn
->kn_fp
->f_data
;
1388 struct pipe
*wpipe
= rpipe
->pipe_peer
;
1392 if ((wpipe
== NULL
) || (wpipe
->pipe_state
& PIPE_WEOF
)) {
1394 kn
->kn_flags
|= EV_EOF
;
1397 space
= wpipe
->pipe_buffer
.windex
-
1398 wpipe
->pipe_buffer
.rindex
;
1399 space
= wpipe
->pipe_buffer
.size
- space
;
1400 kn
->kn_data
= space
;
1401 return (kn
->kn_data
>= PIPE_BUF
);