2 * Copyright (c) 2006-2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/kern/kern_syslink.c,v 1.16 2008/10/26 04:29:19 sephe Exp $
37 * This module implements the core syslink() system call and provides
38 * glue for kernel syslink frontends and backends, creating a intra-host
39 * communications infrastructure and DMA transport abstraction.
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/endian.h>
46 #include <sys/malloc.h>
47 #include <sys/alist.h>
53 #include <sys/objcache.h>
54 #include <sys/queue.h>
55 #include <sys/thread.h>
57 #include <sys/sysctl.h>
58 #include <sys/sysproto.h>
60 #include <sys/socket.h>
61 #include <sys/socketvar.h>
62 #include <sys/socketops.h>
63 #include <sys/sysref.h>
64 #include <sys/syslink.h>
65 #include <sys/syslink_msg.h>
66 #include <netinet/in.h>
68 #include <sys/thread2.h>
69 #include <sys/spinlock2.h>
71 #include <sys/mplock2.h>
73 #include "opt_syslink.h"
76 * Syslink Connection abstraction
85 struct slmsg_rb_tree reply_rb_root
; /* replies to requests */
87 struct sldesc
*peer
; /* peer syslink, if any */
88 struct file
*xfp
; /* external file pointer */
89 struct slcommon
*common
;
91 int rwaiters
; /* number of threads waiting */
92 int wblocked
; /* blocked waiting for us to drain */
93 size_t cmdbytes
; /* unreplied commands pending */
94 size_t repbytes
; /* undrained replies pending */
95 int (*backend_wblocked
)(struct sldesc
*, int, sl_proto_t
);
96 int (*backend_write
)(struct sldesc
*, struct slmsg
*);
97 void (*backend_reply
)(struct sldesc
*,struct slmsg
*,struct slmsg
*);
98 void (*backend_dispose
)(struct sldesc
*, struct slmsg
*);
101 #define SLF_RSHUTDOWN 0x0001
102 #define SLF_WSHUTDOWN 0x0002
104 static int syslink_cmd_new(struct syslink_info_new
*info
, int *result
);
105 static struct sldesc
*allocsldesc(struct slcommon
*common
);
106 static void setsldescfp(struct sldesc
*sl
, struct file
*fp
);
107 static void shutdownsldesc(struct sldesc
*sl
, int how
);
108 static void shutdownsldesc2(struct sldesc
*sl
, int how
);
109 static void sldrop(struct sldesc
*sl
);
110 static int syslink_validate_msg(struct syslink_msg
*msg
, int bytes
);
111 static int syslink_validate_elm(struct syslink_elm
*elm
, sl_reclen_t bytes
,
112 int swapit
, int depth
);
114 static int sl_local_mmap(struct slmsg
*slmsg
, char *base
, size_t len
);
115 static void sl_local_munmap(struct slmsg
*slmsg
);
117 static int backend_wblocked_user(struct sldesc
*sl
, int nbio
, sl_proto_t proto
);
118 static int backend_write_user(struct sldesc
*sl
, struct slmsg
*slmsg
);
119 static void backend_reply_user(struct sldesc
*sl
, struct slmsg
*slcmd
,
120 struct slmsg
*slrep
);
121 static void backend_dispose_user(struct sldesc
*sl
, struct slmsg
*slmsg
);
123 static int backend_wblocked_kern(struct sldesc
*sl
, int nbio
, sl_proto_t proto
);
124 static int backend_write_kern(struct sldesc
*sl
, struct slmsg
*slmsg
);
125 static void backend_reply_kern(struct sldesc
*sl
, struct slmsg
*slcmd
,
126 struct slmsg
*slrep
);
127 static void backend_dispose_kern(struct sldesc
*sl
, struct slmsg
*slmsg
);
128 static void slmsg_put(struct slmsg
*slmsg
);
131 * Objcache memory backend
133 * All three object caches return slmsg structures but each is optimized
134 * for syslink message buffers of varying sizes. We use the slightly
135 * more complex ctor/dtor API in order to provide ready-to-go slmsg's.
138 static struct objcache
*sl_objcache_big
;
139 static struct objcache
*sl_objcache_small
;
140 static struct objcache
*sl_objcache_none
;
142 MALLOC_DEFINE(M_SYSLINK
, "syslink", "syslink manager");
144 static boolean_t
slmsg_ctor(void *data
, void *private, int ocflags
);
145 static void slmsg_dtor(void *data
, void *private);
149 syslinkinit(void *dummy __unused
)
151 size_t n
= sizeof(struct slmsg
);
153 sl_objcache_none
= objcache_create_mbacked(M_SYSLINK
, n
, NULL
, 64,
154 slmsg_ctor
, slmsg_dtor
,
156 sl_objcache_small
= objcache_create_mbacked(M_SYSLINK
, n
, NULL
, 64,
157 slmsg_ctor
, slmsg_dtor
,
159 sl_objcache_big
= objcache_create_mbacked(M_SYSLINK
, n
, NULL
, 16,
160 slmsg_ctor
, slmsg_dtor
,
166 slmsg_ctor(void *data
, void *private, int ocflags
)
168 struct slmsg
*slmsg
= data
;
170 bzero(slmsg
, sizeof(*slmsg
));
172 slmsg
->oc
= *(struct objcache
**)private;
173 if (slmsg
->oc
== sl_objcache_none
) {
175 } else if (slmsg
->oc
== sl_objcache_small
) {
176 slmsg
->maxsize
= SLMSG_SMALL
;
177 } else if (slmsg
->oc
== sl_objcache_big
) {
178 slmsg
->maxsize
= SLMSG_BIG
;
180 panic("slmsg_ctor: bad objcache?\n");
182 if (slmsg
->maxsize
) {
183 slmsg
->msg
= kmalloc(slmsg
->maxsize
,
184 M_SYSLINK
, M_WAITOK
|M_ZERO
);
186 xio_init(&slmsg
->xio
);
192 slmsg_dtor(void *data
, void *private)
194 struct slmsg
*slmsg
= data
;
196 if (slmsg
->maxsize
&& slmsg
->msg
) {
197 kfree(slmsg
->msg
, M_SYSLINK
);
203 SYSINIT(syslink
, SI_BOOT2_MACHDEP
, SI_ORDER_ANY
, syslinkinit
, NULL
)
205 static int rb_slmsg_compare(struct slmsg
*msg1
, struct slmsg
*msg2
);
206 RB_GENERATE2(slmsg_rb_tree
, slmsg
, rbnode
, rb_slmsg_compare
,
207 sysid_t
, msg
->sm_msgid
);
212 static int syslink_enabled
;
213 SYSCTL_NODE(_kern
, OID_AUTO
, syslink
, CTLFLAG_RW
, 0, "Pipe operation");
214 SYSCTL_INT(_kern_syslink
, OID_AUTO
, enabled
,
215 CTLFLAG_RW
, &syslink_enabled
, 0, "Enable SYSLINK");
216 static size_t syslink_bufsize
= 65536;
217 SYSCTL_UINT(_kern_syslink
, OID_AUTO
, bufsize
,
218 CTLFLAG_RW
, &syslink_bufsize
, 0, "Maximum buffer size");
221 * Fileops API - typically used to glue a userland frontend with a
225 static int slfileop_read(struct file
*fp
, struct uio
*uio
,
226 struct ucred
*cred
, int flags
);
227 static int slfileop_write(struct file
*fp
, struct uio
*uio
,
228 struct ucred
*cred
, int flags
);
229 static int slfileop_close(struct file
*fp
);
230 static int slfileop_stat(struct file
*fp
, struct stat
*sb
, struct ucred
*cred
);
231 static int slfileop_shutdown(struct file
*fp
, int how
);
232 static int slfileop_ioctl(struct file
*fp
, u_long cmd
, caddr_t data
,
233 struct ucred
*cred
, struct sysmsg
*msg
);
234 static int slfileop_kqfilter(struct file
*fp
, struct knote
*kn
);
236 static struct fileops syslinkops
= {
237 .fo_read
= slfileop_read
,
238 .fo_write
= slfileop_write
,
239 .fo_ioctl
= slfileop_ioctl
,
240 .fo_kqfilter
= slfileop_kqfilter
,
241 .fo_stat
= slfileop_stat
,
242 .fo_close
= slfileop_close
,
243 .fo_shutdown
= slfileop_shutdown
246 /************************************************************************
247 * PRIMARY SYSTEM CALL INTERFACE *
248 ************************************************************************
250 * syslink(int cmd, struct syslink_info *info, size_t bytes)
255 sys_syslink(struct syslink_args
*uap
)
257 union syslink_info_all info
;
261 * System call is under construction and disabled by default.
262 * Superuser access is also required for now, but eventually
263 * will not be needed.
265 if (syslink_enabled
== 0)
267 error
= priv_check(curthread
, PRIV_ROOT
);
272 * Load and validate the info structure. Unloaded bytes are zerod
273 * out. The label field must always be 0-filled, even if not used
276 bzero(&info
, sizeof(info
));
277 if ((unsigned)uap
->bytes
<= sizeof(info
)) {
279 error
= copyin(uap
->info
, &info
, uap
->bytes
);
288 * Process the command
291 case SYSLINK_CMD_NEW
:
292 error
= syslink_cmd_new(&info
.cmd_new
, &uap
->sysmsg_result
);
300 if (error
== 0 && info
.head
.wbflag
)
301 copyout(&info
, uap
->info
, uap
->bytes
);
306 * Create a linked pair of descriptors, like a pipe.
310 syslink_cmd_new(struct syslink_info_new
*info
, int *result
)
312 struct thread
*td
= curthread
;
313 struct filedesc
*fdp
= td
->td_proc
->p_fd
;
317 struct sldesc
*slpeer
;
321 error
= falloc(td
->td_lwp
, &fp1
, &fd1
);
324 error
= falloc(td
->td_lwp
, &fp2
, &fd2
);
326 fsetfd(fdp
, NULL
, fd1
);
330 slpeer
= allocsldesc(NULL
);
331 slpeer
->backend_wblocked
= backend_wblocked_user
;
332 slpeer
->backend_write
= backend_write_user
;
333 slpeer
->backend_reply
= backend_reply_user
;
334 slpeer
->backend_dispose
= backend_dispose_user
;
335 sl
= allocsldesc(slpeer
->common
);
337 sl
->backend_wblocked
= backend_wblocked_user
;
338 sl
->backend_write
= backend_write_user
;
339 sl
->backend_reply
= backend_reply_user
;
340 sl
->backend_dispose
= backend_dispose_user
;
343 setsldescfp(sl
, fp1
);
344 setsldescfp(slpeer
, fp2
);
346 fsetfd(fdp
, fp1
, fd1
);
348 fsetfd(fdp
, fp2
, fd2
);
351 info
->head
.wbflag
= 1; /* write back */
358 /************************************************************************
359 * LOW LEVEL SLDESC SUPPORT *
360 ************************************************************************
366 allocsldesc(struct slcommon
*common
)
370 sl
= kmalloc(sizeof(struct sldesc
), M_SYSLINK
, M_WAITOK
|M_ZERO
);
372 common
= kmalloc(sizeof(*common
), M_SYSLINK
, M_WAITOK
|M_ZERO
);
373 TAILQ_INIT(&sl
->inq
); /* incoming requests */
374 RB_INIT(&sl
->reply_rb_root
); /* match incoming replies */
375 spin_init(&sl
->spin
);
383 setsldescfp(struct sldesc
*sl
, struct file
*fp
)
386 fp
->f_type
= DTYPE_SYSLINK
;
387 fp
->f_flag
= FREAD
| FWRITE
;
388 fp
->f_ops
= &syslinkops
;
393 * Red-black tree compare function
397 rb_slmsg_compare(struct slmsg
*msg1
, struct slmsg
*msg2
)
399 if (msg1
->msg
->sm_msgid
< msg2
->msg
->sm_msgid
)
401 if (msg1
->msg
->sm_msgid
== msg2
->msg
->sm_msgid
)
408 shutdownsldesc(struct sldesc
*sl
, int how
)
413 shutdownsldesc2(sl
, how
);
416 * Return unread and unreplied messages
418 spin_lock(&sl
->spin
);
419 while ((slmsg
= TAILQ_FIRST(&sl
->inq
)) != NULL
) {
420 TAILQ_REMOVE(&sl
->inq
, slmsg
, tqnode
);
421 spin_unlock(&sl
->spin
);
422 if (slmsg
->msg
->sm_proto
& SM_PROTO_REPLY
) {
423 sl
->repbytes
-= slmsg
->maxsize
;
424 slmsg
->flags
&= ~SLMSGF_ONINQ
;
425 sl
->peer
->backend_dispose(sl
->peer
, slmsg
);
427 /* leave ONINQ set for commands, it will cleared below */
428 spin_lock(&sl
->spin
);
430 while ((slmsg
= RB_ROOT(&sl
->reply_rb_root
)) != NULL
) {
431 RB_REMOVE(slmsg_rb_tree
, &sl
->reply_rb_root
, slmsg
);
432 sl
->cmdbytes
-= slmsg
->maxsize
;
433 spin_unlock(&sl
->spin
);
434 slmsg
->flags
&= ~SLMSGF_ONINQ
;
435 sl
->peer
->backend_reply(sl
->peer
, slmsg
, NULL
);
436 spin_lock(&sl
->spin
);
438 spin_unlock(&sl
->spin
);
441 * Call shutdown on the peer with the opposite flags
455 shutdownsldesc2(sl
->peer
, rhow
);
460 shutdownsldesc2(struct sldesc
*sl
, int how
)
462 spin_lock(&sl
->spin
);
465 sl
->flags
|= SLF_RSHUTDOWN
;
468 sl
->flags
|= SLF_WSHUTDOWN
;
471 sl
->flags
|= SLF_RSHUTDOWN
| SLF_WSHUTDOWN
;
474 spin_unlock(&sl
->spin
);
477 * Handle signaling on the user side
481 wakeup(&sl
->rwaiters
);
485 sl
->wblocked
= 0; /* race ok */
486 wakeup(&sl
->wblocked
);
493 sldrop(struct sldesc
*sl
)
495 struct sldesc
*slpeer
;
497 spin_lock(&sl
->common
->spin
);
498 if (--sl
->common
->refs
== 0) {
499 spin_unlock(&sl
->common
->spin
);
500 if ((slpeer
= sl
->peer
) != NULL
) {
503 slpeer
->common
= NULL
;
504 KKASSERT(slpeer
->xfp
== NULL
);
505 KKASSERT(TAILQ_EMPTY(&slpeer
->inq
));
506 KKASSERT(RB_EMPTY(&slpeer
->reply_rb_root
));
507 kfree(slpeer
, M_SYSLINK
);
509 KKASSERT(sl
->xfp
== NULL
);
510 KKASSERT(TAILQ_EMPTY(&sl
->inq
));
511 KKASSERT(RB_EMPTY(&sl
->reply_rb_root
));
512 kfree(sl
->common
, M_SYSLINK
);
514 kfree(sl
, M_SYSLINK
);
516 spin_unlock(&sl
->common
->spin
);
522 slmsg_put(struct slmsg
*slmsg
)
524 if (slmsg
->flags
& SLMSGF_HASXIO
) {
525 slmsg
->flags
&= ~SLMSGF_HASXIO
;
527 xio_release(&slmsg
->xio
);
530 slmsg
->flags
&= ~SLMSGF_LINMAP
;
531 objcache_put(slmsg
->oc
, slmsg
);
534 /************************************************************************
536 ************************************************************************
538 * Implement userland fileops.
544 slfileop_read(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
546 struct sldesc
*sl
= fp
->f_data
; /* fp refed on call */
550 struct syslink_msg
*wmsg
;
555 * Kinda messy. Figure out the non-blocking state
557 if (flags
& O_FBLOCKING
)
559 else if (flags
& O_FNONBLOCKING
)
561 else if (fp
->f_flag
& O_NONBLOCK
)
569 * iov0 - message buffer
570 * iov1 - DMA buffer or backup buffer
572 if (uio
->uio_iovcnt
< 1) {
576 iov0
= &uio
->uio_iov
[0];
577 if (uio
->uio_iovcnt
> 2) {
583 * Get a message, blocking if necessary.
585 spin_lock(&sl
->spin
);
586 while ((slmsg
= TAILQ_FIRST(&sl
->inq
)) == NULL
) {
587 if (sl
->flags
& SLF_RSHUTDOWN
) {
596 error
= ssleep(&sl
->rwaiters
, &sl
->spin
, PCATCH
, "slrmsg", 0);
604 * We have a message and still hold the spinlock. Make sure the
605 * uio has enough room to hold the message.
607 * Note that replies do not have XIOs.
609 if (slmsg
->msgsize
> iov0
->iov_len
) {
613 if (slmsg
->xio
.xio_bytes
) {
614 if (uio
->uio_iovcnt
!= 2) {
618 iov1
= &uio
->uio_iov
[1];
619 if (slmsg
->xio
.xio_bytes
> iov1
->iov_len
) {
628 * Dequeue the message. Adjust repbytes immediately. cmdbytes
629 * are adjusted when the command is replied to, not here.
631 TAILQ_REMOVE(&sl
->inq
, slmsg
, tqnode
);
632 if (slmsg
->msg
->sm_proto
& SM_PROTO_REPLY
)
633 sl
->repbytes
-= slmsg
->maxsize
;
634 spin_unlock(&sl
->spin
);
637 * Load the message data into the user buffer.
639 * If receiving a command an XIO may exist specifying a DMA buffer.
640 * For commands, if DMAW is set we have to copy or map the buffer
641 * so the caller can access the data being written. If DMAR is set
642 * we do not have to copy but we still must map the buffer so the
643 * caller can directly fill in the data being requested.
645 error
= uiomove((void *)slmsg
->msg
, slmsg
->msgsize
, uio
);
646 if (error
== 0 && slmsg
->xio
.xio_bytes
&&
647 (wmsg
->sm_head
.se_cmd
& SE_CMDF_REPLY
) == 0) {
648 if (wmsg
->sm_head
.se_cmd
& SE_CMDF_DMAW
) {
650 * Data being passed to caller or being passed in both
651 * directions, copy or map.
654 if ((flags
& O_MAPONREAD
) &&
655 (slmsg
->xio
.xio_flags
& XIOF_VMLINEAR
)) {
656 error
= sl_local_mmap(slmsg
,
660 error
= xio_copy_xtou(&slmsg
->xio
, 0,
662 slmsg
->xio
.xio_bytes
);
664 error
= xio_copy_xtou(&slmsg
->xio
, 0,
666 slmsg
->xio
.xio_bytes
);
669 } else if (wmsg
->sm_head
.se_cmd
& SE_CMDF_DMAR
) {
671 * Data will be passed back to originator, map
672 * the buffer if we can, else use the backup
673 * buffer at the same VA supplied by the caller.
676 if ((flags
& O_MAPONREAD
) &&
677 (slmsg
->xio
.xio_flags
& XIOF_VMLINEAR
)) {
678 error
= sl_local_mmap(slmsg
,
681 error
= 0; /* ignore errors */
692 * Requeue the message if we could not read it successfully
694 spin_lock(&sl
->spin
);
695 TAILQ_INSERT_HEAD(&sl
->inq
, slmsg
, tqnode
);
696 slmsg
->flags
|= SLMSGF_ONINQ
;
697 spin_unlock(&sl
->spin
);
698 } else if (slmsg
->msg
->sm_proto
& SM_PROTO_REPLY
) {
700 * Dispose of any received reply after we've copied it
701 * to userland. We don't need the slmsg any more.
703 slmsg
->flags
&= ~SLMSGF_ONINQ
;
704 sl
->peer
->backend_dispose(sl
->peer
, slmsg
);
705 if (sl
->wblocked
&& sl
->repbytes
< syslink_bufsize
) {
706 sl
->wblocked
= 0; /* MP race ok here */
707 wakeup(&sl
->wblocked
);
711 * Leave the command in the RB tree but clear ONINQ now
712 * that we have returned it to userland so userland can
715 slmsg
->flags
&= ~SLMSGF_ONINQ
;
719 spin_unlock(&sl
->spin
);
725 * Userland writes syslink message (optionally with DMA buffer in iov[1]).
729 slfileop_write(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
731 struct sldesc
*sl
= fp
->f_data
;
734 struct syslink_msg sltmp
;
735 struct syslink_msg
*wmsg
; /* wire message */
744 * Kinda messy. Figure out the non-blocking state
746 if (flags
& O_FBLOCKING
)
748 else if (flags
& O_FNONBLOCKING
)
750 else if (fp
->f_flag
& O_NONBLOCK
)
758 if (uio
->uio_iovcnt
< 1) {
762 iov0
= &uio
->uio_iov
[0];
763 if (iov0
->iov_len
> SLMSG_BIG
) {
767 if (uio
->uio_iovcnt
> 2) {
771 if (uio
->uio_iovcnt
> 1) {
772 iov1
= &uio
->uio_iov
[1];
773 if (iov1
->iov_len
> XIO_INTERNAL_SIZE
) {
777 if ((intptr_t)iov1
->iov_base
& PAGE_MASK
) {
786 * Handle the buffer-full case. slpeer cmdbytes is managed
787 * by the backend function, not us so if the callback just
788 * directly implements the message and never adjusts cmdbytes,
789 * we will never sleep here.
791 if (sl
->flags
& SLF_WSHUTDOWN
) {
797 * Only commands can block the pipe, not replies. Otherwise a
798 * deadlock is possible.
800 error
= copyin(iov0
->iov_base
, &sltmp
, sizeof(sltmp
));
803 if ((proto
= sltmp
.sm_proto
) & SM_PROTO_ENDIAN_REV
)
804 proto
= bswap16(proto
);
805 error
= sl
->peer
->backend_wblocked(sl
->peer
, nbio
, proto
);
810 * Allocate a slmsg and load the message. Note that the bytes
811 * returned to userland only reflects the primary syslink message
812 * and does not include any DMA buffers.
814 if (iov0
->iov_len
<= SLMSG_SMALL
)
815 slmsg
= objcache_get(sl_objcache_small
, M_WAITOK
);
817 slmsg
= objcache_get(sl_objcache_big
, M_WAITOK
);
818 slmsg
->msgsize
= iov0
->iov_len
;
821 error
= uiomove((void *)wmsg
, iov0
->iov_len
, uio
);
824 error
= syslink_validate_msg(wmsg
, slmsg
->msgsize
);
828 if ((wmsg
->sm_head
.se_cmd
& SE_CMDF_REPLY
) == 0) {
830 * Install the XIO for commands if any DMA flags are set.
832 * XIOF_VMLINEAR requires that the XIO represent a
833 * contiguous set of pages associated with a single VM
834 * object (so the reader side can mmap it easily).
836 * XIOF_VMLINEAR might not be set when the kernel sends
837 * commands to userland so the reader side backs off to
838 * a backup buffer if it isn't set, but we require it
839 * for userland writes.
841 xflags
= XIOF_VMLINEAR
;
842 if (wmsg
->sm_head
.se_cmd
& SE_CMDF_DMAR
)
843 xflags
|= XIOF_READ
| XIOF_WRITE
;
844 else if (wmsg
->sm_head
.se_cmd
& SE_CMDF_DMAW
)
846 if (xflags
&& iov1
) {
848 error
= xio_init_ubuf(&slmsg
->xio
, iov1
->iov_base
,
849 iov1
->iov_len
, xflags
);
853 slmsg
->flags
|= SLMSGF_HASXIO
;
855 error
= sl
->peer
->backend_write(sl
->peer
, slmsg
);
858 * Replies have to be matched up against received commands.
860 spin_lock(&sl
->spin
);
861 slcmd
= slmsg_rb_tree_RB_LOOKUP(&sl
->reply_rb_root
,
862 slmsg
->msg
->sm_msgid
);
863 if (slcmd
== NULL
|| (slcmd
->flags
& SLMSGF_ONINQ
)) {
865 spin_unlock(&sl
->spin
);
868 RB_REMOVE(slmsg_rb_tree
, &sl
->reply_rb_root
, slcmd
);
869 sl
->cmdbytes
-= slcmd
->maxsize
;
870 spin_unlock(&sl
->spin
);
873 * If the original command specified DMAR, has an xio, and
874 * our write specifies a DMA buffer, then we can do a
875 * copyback. But if we are linearly mapped and the caller
876 * is using the map base address, then the caller filled in
877 * the data via the direct memory map and no copyback is
880 if ((slcmd
->msg
->sm_head
.se_cmd
& SE_CMDF_DMAR
) && iov1
&&
881 (slcmd
->flags
& SLMSGF_HASXIO
) &&
882 ((slcmd
->flags
& SLMSGF_LINMAP
) == 0 ||
883 iov1
->iov_base
!= slcmd
->vmbase
)
886 if (iov1
->iov_len
> slcmd
->xio
.xio_bytes
)
887 count
= slcmd
->xio
.xio_bytes
;
889 count
= iov1
->iov_len
;
891 error
= xio_copy_utox(&slcmd
->xio
, 0, iov1
->iov_base
,
897 * If we had mapped a DMA buffer, remove it
899 if (slcmd
->flags
& SLMSGF_LINMAP
) {
901 sl_local_munmap(slcmd
);
906 * Reply and handle unblocking
908 sl
->peer
->backend_reply(sl
->peer
, slcmd
, slmsg
);
909 if (sl
->wblocked
&& sl
->cmdbytes
< syslink_bufsize
) {
910 sl
->wblocked
= 0; /* MP race ok here */
911 wakeup(&sl
->wblocked
);
915 * slmsg has already been dealt with, make sure error is
916 * 0 so we do not double-free it.
930 * Close a syslink descriptor.
932 * Disassociate the syslink from the file descriptor and disconnect from
937 slfileop_close(struct file
*fp
)
942 * Disassociate the file pointer. Take ownership of the ref on the
947 fp
->f_ops
= &badfileops
;
951 * Shutdown both directions. The other side will not issue API
952 * calls to us after we've shutdown both directions.
954 shutdownsldesc(sl
, SHUT_RDWR
);
959 KKASSERT(sl
->cmdbytes
== 0);
960 KKASSERT(sl
->repbytes
== 0);
970 slfileop_stat (struct file
*fp
, struct stat
*sb
, struct ucred
*cred
)
977 slfileop_shutdown (struct file
*fp
, int how
)
979 shutdownsldesc((struct sldesc
*)fp
->f_data
, how
);
985 slfileop_ioctl (struct file
*fp
, u_long cmd
, caddr_t data
,
986 struct ucred
*cred
, struct sysmsg
*msg
)
993 slfileop_kqfilter(struct file
*fp
, struct knote
*kn
)
998 /************************************************************************
999 * LOCAL MEMORY MAPPING *
1000 ************************************************************************
1002 * This feature is currently not implemented
1008 sl_local_mmap(struct slmsg
*slmsg
, char *base
, size_t len
)
1010 return (EOPNOTSUPP
);
1015 sl_local_munmap(struct slmsg
*slmsg
)
1024 sl_local_mmap(struct slmsg
*slmsg
, char *base
, size_t len
)
1026 struct vmspace
*vms
= curproc
->p_vmspace
;
1027 vm_offset_t addr
= (vm_offset_t
)base
;
1029 /* XXX check user address range */
1030 error
= vm_map_replace(
1032 (vm_offset_t
)base
, (vm_offset_t
)base
+ len
,
1033 slmsg
->xio
.xio_pages
[0]->object
,
1034 slmsg
->xio
.xio_pages
[0]->pindex
<< PAGE_SHIFT
,
1035 VM_PROT_READ
|VM_PROT_WRITE
,
1036 VM_PROT_READ
|VM_PROT_WRITE
,
1037 MAP_DISABLE_SYNCER
);
1040 slmsg
->flags
|= SLMSGF_LINMAP
;
1041 slmsg
->vmbase
= base
;
1042 slmsg
->vmsize
= len
;
1049 sl_local_munmap(struct slmsg
*slmsg
)
1051 if (slmsg
->flags
& SLMSGF_LINMAP
) {
1052 vm_map_remove(&curproc
->p_vmspace
->vm_map
,
1054 slmsg
->vmbase
+ slcmd
->vmsize
);
1055 slmsg
->flags
&= ~SLMSGF_LINMAP
;
1061 /************************************************************************
1062 * MESSAGE VALIDATION *
1063 ************************************************************************
1065 * Validate that the syslink message. Check that all headers and elements
1066 * conform. Correct the endian if necessary.
1068 * NOTE: If reverse endian needs to be corrected, SE_CMDF_UNTRANSLATED
1069 * is recursively flipped on all syslink_elm's in the message. As the
1070 * message traverses the mesh, multiple flips may occur. It is
1071 * up to the RPC protocol layer to correct opaque data payloads and
1072 * SE_CMDF_UNTRANSLATED prevents the protocol layer from misinterpreting
1073 * a command or reply element which has not been endian-corrected.
1077 syslink_validate_msg(struct syslink_msg
*msg
, int bytes
)
1084 * The raw message must be properly-aligned.
1086 if (bytes
& SL_ALIGNMASK
)
1091 * The message must at least contain the msgid, bytes, and
1094 if (bytes
< SL_MIN_PAD_SIZE
)
1098 * Fix the endian if it is reversed.
1100 if (msg
->sm_proto
& SM_PROTO_ENDIAN_REV
) {
1101 msg
->sm_msgid
= bswap64(msg
->sm_msgid
);
1102 msg
->sm_sessid
= bswap64(msg
->sm_sessid
);
1103 msg
->sm_bytes
= bswap16(msg
->sm_bytes
);
1104 msg
->sm_proto
= bswap16(msg
->sm_proto
);
1105 msg
->sm_rlabel
= bswap32(msg
->sm_rlabel
);
1106 if (msg
->sm_proto
& SM_PROTO_ENDIAN_REV
)
1114 * Validate the contents. For PADs, the entire payload is
1115 * ignored and the minimum message size can be as small as
1118 if (msg
->sm_proto
== SMPROTO_PAD
) {
1119 if (msg
->sm_bytes
< SL_MIN_PAD_SIZE
||
1120 msg
->sm_bytes
> bytes
) {
1123 /* ignore the entire payload, it can be garbage */
1125 if (msg
->sm_bytes
< SL_MIN_MSG_SIZE
||
1126 msg
->sm_bytes
> bytes
) {
1129 error
= syslink_validate_elm(
1132 offsetof(struct syslink_msg
,
1134 swapit
, SL_MAXDEPTH
);
1140 * The aligned payload size must be used to locate the
1141 * next syslink_msg in the buffer.
1143 aligned_reclen
= SL_MSG_ALIGN(msg
->sm_bytes
);
1144 bytes
-= aligned_reclen
;
1145 msg
= (void *)((char *)msg
+ aligned_reclen
);
1152 syslink_validate_elm(struct syslink_elm
*elm
, sl_reclen_t bytes
,
1153 int swapit
, int depth
)
1158 * If the buffer isn't big enough to fit the header, stop now!
1160 if (bytes
< SL_MIN_ELM_SIZE
)
1163 * All syslink_elm headers are recursively endian-adjusted. Opaque
1164 * data payloads are not.
1167 elm
->se_cmd
= bswap16(elm
->se_cmd
) ^ SE_CMDF_UNTRANSLATED
;
1168 elm
->se_bytes
= bswap16(elm
->se_bytes
);
1169 elm
->se_aux
= bswap32(elm
->se_aux
);
1173 * Check element size requirements.
1175 if (elm
->se_bytes
< SL_MIN_ELM_SIZE
|| elm
->se_bytes
> bytes
)
1179 * Recursively check structured payloads. A structured payload may
1180 * contain as few as 0 recursive elements.
1182 if (elm
->se_cmd
& SE_CMDF_STRUCTURED
) {
1185 bytes
-= SL_MIN_ELM_SIZE
;
1188 if (syslink_validate_elm(elm
, bytes
, swapit
, depth
- 1))
1190 aligned_reclen
= SL_MSG_ALIGN(elm
->se_bytes
);
1191 elm
= (void *)((char *)elm
+ aligned_reclen
);
1192 bytes
-= aligned_reclen
;
1198 /************************************************************************
1199 * BACKEND FUNCTIONS - USER DESCRIPTOR *
1200 ************************************************************************
1202 * Peer backend links are primarily used when userland creates a pair
1203 * of linked descriptors.
1207 * Do any required blocking / nbio handling for attempts to write to
1208 * a sldesc associated with a user descriptor.
1212 backend_wblocked_user(struct sldesc
*sl
, int nbio
, sl_proto_t proto
)
1215 int *bytesp
= (proto
& SM_PROTO_REPLY
) ? &sl
->repbytes
: &sl
->cmdbytes
;
1218 * Block until sufficient data is drained by the target. It is
1219 * ok to have a MP race against cmdbytes.
1221 if (*bytesp
>= syslink_bufsize
) {
1222 spin_lock(&sl
->spin
);
1223 while (*bytesp
>= syslink_bufsize
) {
1224 if (sl
->flags
& SLF_WSHUTDOWN
) {
1233 error
= ssleep(&sl
->wblocked
, &sl
->spin
,
1234 PCATCH
, "slwmsg", 0);
1238 spin_unlock(&sl
->spin
);
1244 * Unconditionally write a syslink message to the sldesc associated with
1245 * a user descriptor. Command messages are also placed in a red-black
1246 * tree so their DMA tag (if any) can be accessed and so they can be
1247 * linked to any reply message.
1251 backend_write_user(struct sldesc
*sl
, struct slmsg
*slmsg
)
1255 spin_lock(&sl
->spin
);
1256 if (sl
->flags
& SLF_RSHUTDOWN
) {
1258 * Not accepting new messages
1261 } else if (slmsg
->msg
->sm_proto
& SM_PROTO_REPLY
) {
1265 TAILQ_INSERT_TAIL(&sl
->inq
, slmsg
, tqnode
);
1266 sl
->repbytes
+= slmsg
->maxsize
;
1267 slmsg
->flags
|= SLMSGF_ONINQ
;
1269 } else if (RB_INSERT(slmsg_rb_tree
, &sl
->reply_rb_root
, slmsg
)) {
1271 * Write a command, but there was a msgid collision when
1272 * we tried to insert it into the RB tree.
1277 * Write a command, successful insertion into the RB tree.
1279 TAILQ_INSERT_TAIL(&sl
->inq
, slmsg
, tqnode
);
1280 sl
->cmdbytes
+= slmsg
->maxsize
;
1281 slmsg
->flags
|= SLMSGF_ONINQ
;
1284 spin_unlock(&sl
->spin
);
1286 wakeup(&sl
->rwaiters
);
1291 * Our peer is replying a command we previously sent it back to us, along
1292 * with the reply message (if not NULL). We just queue the reply to
1293 * userland and free of the command.
1297 backend_reply_user(struct sldesc
*sl
, struct slmsg
*slcmd
, struct slmsg
*slrep
)
1303 spin_lock(&sl
->spin
);
1304 if ((sl
->flags
& SLF_RSHUTDOWN
) == 0) {
1305 TAILQ_INSERT_TAIL(&sl
->inq
, slrep
, tqnode
);
1306 sl
->repbytes
+= slrep
->maxsize
;
1311 spin_unlock(&sl
->spin
);
1313 sl
->peer
->backend_dispose(sl
->peer
, slrep
);
1314 else if (sl
->rwaiters
)
1315 wakeup(&sl
->rwaiters
);
1321 backend_dispose_user(struct sldesc
*sl
, struct slmsg
*slmsg
)
1326 /************************************************************************
1327 * KERNEL DRIVER OR FILESYSTEM API *
1328 ************************************************************************
1333 * Create a user<->kernel link, returning the user descriptor in *fdp
1334 * and the kernel descriptor in *kslp. 0 is returned on success, and an
1335 * error code is returned on failure.
1338 syslink_ukbackend(int *pfd
, struct sldesc
**kslp
)
1340 struct thread
*td
= curthread
;
1341 struct filedesc
*fdp
= td
->td_proc
->p_fd
;
1351 error
= falloc(td
->td_lwp
, &fp
, &fd
);
1354 usl
= allocsldesc(NULL
);
1355 usl
->backend_wblocked
= backend_wblocked_user
;
1356 usl
->backend_write
= backend_write_user
;
1357 usl
->backend_reply
= backend_reply_user
;
1358 usl
->backend_dispose
= backend_dispose_user
;
1360 ksl
= allocsldesc(usl
->common
);
1362 ksl
->backend_wblocked
= backend_wblocked_kern
;
1363 ksl
->backend_write
= backend_write_kern
;
1364 ksl
->backend_reply
= backend_reply_kern
;
1365 ksl
->backend_dispose
= backend_dispose_kern
;
1369 setsldescfp(usl
, fp
);
1370 fsetfd(fdp
, fp
, fd
);
1379 * Assign a unique message id, issue a syslink message to userland,
1380 * and wait for a reply.
1383 syslink_kdomsg(struct sldesc
*ksl
, struct slmsg
*slmsg
)
1385 struct syslink_msg
*msg
;
1389 * Finish initializing slmsg and post it to the red-black tree for
1390 * reply matching. If the message id is already in use we return
1391 * EEXIST, giving the originator the chance to roll a new msgid.
1394 slmsg
->msgsize
= msg
->sm_bytes
;
1395 if ((error
= syslink_validate_msg(msg
, msg
->sm_bytes
)) != 0)
1397 msg
->sm_msgid
= allocsysid();
1400 * Issue the request and wait for a matching reply or failure,
1401 * then remove the message from the matching tree and return.
1403 error
= ksl
->peer
->backend_write(ksl
->peer
, slmsg
);
1404 spin_lock(&ksl
->spin
);
1406 while (slmsg
->rep
== NULL
) {
1407 error
= ssleep(slmsg
, &ksl
->spin
, 0, "kwtmsg", 0);
1408 /* XXX ignore error for now */
1410 if (slmsg
->rep
== (struct slmsg
*)-1) {
1414 error
= slmsg
->rep
->msg
->sm_head
.se_aux
;
1417 spin_unlock(&ksl
->spin
);
1422 * Similar to syslink_kdomsg but return immediately instead of
1423 * waiting for a reply. The kernel must supply a callback function
1424 * which will be made in the context of the user process replying
1428 syslink_ksendmsg(struct sldesc
*ksl
, struct slmsg
*slmsg
,
1429 void (*func
)(struct slmsg
*, void *, int), void *arg
)
1431 struct syslink_msg
*msg
;
1435 * Finish initializing slmsg and post it to the red-black tree for
1436 * reply matching. If the message id is already in use we return
1437 * EEXIST, giving the originator the chance to roll a new msgid.
1440 slmsg
->msgsize
= msg
->sm_bytes
;
1441 slmsg
->callback_func
= func
;
1442 slmsg
->callback_data
= arg
;
1443 if ((error
= syslink_validate_msg(msg
, msg
->sm_bytes
)) != 0)
1445 msg
->sm_msgid
= allocsysid();
1448 * Issue the request. If no error occured the operation will be
1449 * in progress, otherwise the operation is considered to have failed
1450 * and the caller can deallocate the slmsg.
1452 error
= ksl
->peer
->backend_write(ksl
->peer
, slmsg
);
1457 syslink_kwaitmsg(struct sldesc
*ksl
, struct slmsg
*slmsg
)
1461 spin_lock(&ksl
->spin
);
1462 while (slmsg
->rep
== NULL
) {
1463 error
= ssleep(slmsg
, &ksl
->spin
, 0, "kwtmsg", 0);
1464 /* XXX ignore error for now */
1466 if (slmsg
->rep
== (struct slmsg
*)-1) {
1470 error
= slmsg
->rep
->msg
->sm_head
.se_aux
;
1472 spin_unlock(&ksl
->spin
);
1477 syslink_kallocmsg(void)
1479 return(objcache_get(sl_objcache_small
, M_WAITOK
));
1483 syslink_kfreemsg(struct sldesc
*ksl
, struct slmsg
*slmsg
)
1487 if ((rep
= slmsg
->rep
) != NULL
) {
1489 ksl
->peer
->backend_dispose(ksl
->peer
, rep
);
1491 slmsg
->callback_func
= NULL
;
1496 syslink_kshutdown(struct sldesc
*ksl
, int how
)
1498 shutdownsldesc(ksl
, how
);
1502 syslink_kclose(struct sldesc
*ksl
)
1504 shutdownsldesc(ksl
, SHUT_RDWR
);
1509 * Associate a DMA buffer with a kernel syslink message prior to it
1510 * being sent to userland. The DMA buffer is set up from the point
1511 * of view of the target.
1514 syslink_kdmabuf_pages(struct slmsg
*slmsg
, struct vm_page
**mbase
, int npages
)
1519 xflags
= XIOF_VMLINEAR
;
1520 if (slmsg
->msg
->sm_head
.se_cmd
& SE_CMDF_DMAR
)
1521 xflags
|= XIOF_READ
| XIOF_WRITE
;
1522 else if (slmsg
->msg
->sm_head
.se_cmd
& SE_CMDF_DMAW
)
1523 xflags
|= XIOF_READ
;
1524 error
= xio_init_pages(&slmsg
->xio
, mbase
, npages
, xflags
);
1525 slmsg
->flags
|= SLMSGF_HASXIO
;
1530 * Associate a DMA buffer with a kernel syslink message prior to it
1531 * being sent to userland. The DMA buffer is set up from the point
1532 * of view of the target.
1535 syslink_kdmabuf_data(struct slmsg
*slmsg
, char *base
, int bytes
)
1539 xflags
= XIOF_VMLINEAR
;
1540 if (slmsg
->msg
->sm_head
.se_cmd
& SE_CMDF_DMAR
)
1541 xflags
|= XIOF_READ
| XIOF_WRITE
;
1542 else if (slmsg
->msg
->sm_head
.se_cmd
& SE_CMDF_DMAW
)
1543 xflags
|= XIOF_READ
;
1544 xio_init_kbuf(&slmsg
->xio
, base
, bytes
);
1545 slmsg
->xio
.xio_flags
|= xflags
;
1546 slmsg
->flags
|= SLMSGF_HASXIO
;
1550 /************************************************************************
1551 * BACKEND FUNCTIONS FOR KERNEL API *
1552 ************************************************************************
1554 * These are the backend functions for a sldesc associated with a kernel
1559 * Our peer wants to write a syslink message to us and is asking us to
1560 * block if our input queue is full. We don't implement command reception
1561 * so don't block right now.
1565 backend_wblocked_kern(struct sldesc
*ksl
, int nbio
, sl_proto_t proto
)
1572 * Our peer is writing a request to the kernel. At the moment we do not
1577 backend_write_kern(struct sldesc
*ksl
, struct slmsg
*slmsg
)
1583 * Our peer wants to reply to a syslink message we sent it earlier. The
1584 * original command (that we passed to our peer), and the peer's reply
1585 * is specified. If the peer has failed slrep will be NULL.
1589 backend_reply_kern(struct sldesc
*ksl
, struct slmsg
*slcmd
, struct slmsg
*slrep
)
1593 spin_lock(&ksl
->spin
);
1594 if (slrep
== NULL
) {
1595 slcmd
->rep
= (struct slmsg
*)-1;
1599 error
= slrep
->msg
->sm_head
.se_aux
;
1601 spin_unlock(&ksl
->spin
);
1604 * Issue callback or wakeup a synchronous waiter.
1606 if (slcmd
->callback_func
) {
1607 slcmd
->callback_func(slcmd
, slcmd
->callback_data
, error
);
1614 * Any reply messages we sent to our peer are returned to us for disposal.
1615 * Since we do not currently accept commands from our peer, there will not
1616 * be any replies returned to the peer to dispose of.
1620 backend_dispose_kern(struct sldesc
*ksl
, struct slmsg
*slmsg
)
1622 panic("backend_dispose_kern: kernel can't accept commands so it "
1623 "certainly did not reply to one!");