2 * Copyright (c) 2006-2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * This module implements the core syslink() system call and provides
36 * glue for kernel syslink frontends and backends, creating a intra-host
37 * communications infrastructure and DMA transport abstraction.
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/endian.h>
44 #include <sys/malloc.h>
45 #include <sys/alist.h>
51 #include <sys/objcache.h>
52 #include <sys/queue.h>
53 #include <sys/thread.h>
55 #include <sys/sysctl.h>
56 #include <sys/sysproto.h>
58 #include <sys/socket.h>
59 #include <sys/socketvar.h>
60 #include <sys/socketops.h>
61 #include <sys/sysref.h>
62 #include <sys/syslink.h>
63 #include <sys/syslink_msg.h>
64 #include <netinet/in.h>
66 #include <sys/thread2.h>
67 #include <sys/spinlock2.h>
69 #include <sys/mplock2.h>
71 #include "opt_syslink.h"
74 * Syslink Connection abstraction
83 struct slmsg_rb_tree reply_rb_root
; /* replies to requests */
85 struct sldesc
*peer
; /* peer syslink, if any */
86 struct file
*xfp
; /* external file pointer */
87 struct slcommon
*common
;
89 int rwaiters
; /* number of threads waiting */
90 int wblocked
; /* blocked waiting for us to drain */
91 size_t cmdbytes
; /* unreplied commands pending */
92 size_t repbytes
; /* undrained replies pending */
93 int (*backend_wblocked
)(struct sldesc
*, int, sl_proto_t
);
94 int (*backend_write
)(struct sldesc
*, struct slmsg
*);
95 void (*backend_reply
)(struct sldesc
*,struct slmsg
*,struct slmsg
*);
96 void (*backend_dispose
)(struct sldesc
*, struct slmsg
*);
99 #define SLF_RSHUTDOWN 0x0001
100 #define SLF_WSHUTDOWN 0x0002
102 static int syslink_cmd_new(struct syslink_info_new
*info
, int *result
);
103 static struct sldesc
*allocsldesc(struct slcommon
*common
);
104 static void setsldescfp(struct sldesc
*sl
, struct file
*fp
);
105 static void shutdownsldesc(struct sldesc
*sl
, int how
);
106 static void shutdownsldesc2(struct sldesc
*sl
, int how
);
107 static void sldrop(struct sldesc
*sl
);
108 static int syslink_validate_msg(struct syslink_msg
*msg
, int bytes
);
109 static int syslink_validate_elm(struct syslink_elm
*elm
, sl_reclen_t bytes
,
110 int swapit
, int depth
);
112 static int sl_local_mmap(struct slmsg
*slmsg
, char *base
, size_t len
);
113 static void sl_local_munmap(struct slmsg
*slmsg
);
115 static int backend_wblocked_user(struct sldesc
*sl
, int nbio
, sl_proto_t proto
);
116 static int backend_write_user(struct sldesc
*sl
, struct slmsg
*slmsg
);
117 static void backend_reply_user(struct sldesc
*sl
, struct slmsg
*slcmd
,
118 struct slmsg
*slrep
);
119 static void backend_dispose_user(struct sldesc
*sl
, struct slmsg
*slmsg
);
121 static int backend_wblocked_kern(struct sldesc
*sl
, int nbio
, sl_proto_t proto
);
122 static int backend_write_kern(struct sldesc
*sl
, struct slmsg
*slmsg
);
123 static void backend_reply_kern(struct sldesc
*sl
, struct slmsg
*slcmd
,
124 struct slmsg
*slrep
);
125 static void backend_dispose_kern(struct sldesc
*sl
, struct slmsg
*slmsg
);
126 static void slmsg_put(struct slmsg
*slmsg
);
129 * Objcache memory backend
131 * All three object caches return slmsg structures but each is optimized
132 * for syslink message buffers of varying sizes. We use the slightly
133 * more complex ctor/dtor API in order to provide ready-to-go slmsg's.
136 static struct objcache
*sl_objcache_big
;
137 static struct objcache
*sl_objcache_small
;
138 static struct objcache
*sl_objcache_none
;
140 MALLOC_DEFINE(M_SYSLINK
, "syslink", "syslink manager");
142 static boolean_t
slmsg_ctor(void *data
, void *private, int ocflags
);
143 static void slmsg_dtor(void *data
, void *private);
147 syslinkinit(void *dummy __unused
)
149 size_t n
= sizeof(struct slmsg
);
151 sl_objcache_none
= objcache_create_mbacked(M_SYSLINK
, n
, 0, 64,
152 slmsg_ctor
, slmsg_dtor
,
154 sl_objcache_small
= objcache_create_mbacked(M_SYSLINK
, n
, 0, 64,
155 slmsg_ctor
, slmsg_dtor
,
157 sl_objcache_big
= objcache_create_mbacked(M_SYSLINK
, n
, 0, 16,
158 slmsg_ctor
, slmsg_dtor
,
164 slmsg_ctor(void *data
, void *private, int ocflags
)
166 struct slmsg
*slmsg
= data
;
168 bzero(slmsg
, sizeof(*slmsg
));
170 slmsg
->oc
= *(struct objcache
**)private;
171 if (slmsg
->oc
== sl_objcache_none
) {
173 } else if (slmsg
->oc
== sl_objcache_small
) {
174 slmsg
->maxsize
= SLMSG_SMALL
;
175 } else if (slmsg
->oc
== sl_objcache_big
) {
176 slmsg
->maxsize
= SLMSG_BIG
;
178 panic("slmsg_ctor: bad objcache?");
180 if (slmsg
->maxsize
) {
181 slmsg
->msg
= kmalloc(slmsg
->maxsize
,
182 M_SYSLINK
, M_WAITOK
|M_ZERO
);
184 xio_init(&slmsg
->xio
);
190 slmsg_dtor(void *data
, void *private)
192 struct slmsg
*slmsg
= data
;
194 if (slmsg
->maxsize
&& slmsg
->msg
) {
195 kfree(slmsg
->msg
, M_SYSLINK
);
201 SYSINIT(syslink
, SI_BOOT2_MACHDEP
, SI_ORDER_ANY
, syslinkinit
, NULL
)
203 static int rb_slmsg_compare(struct slmsg
*msg1
, struct slmsg
*msg2
);
204 RB_GENERATE2(slmsg_rb_tree
, slmsg
, rbnode
, rb_slmsg_compare
,
205 sysid_t
, msg
->sm_msgid
);
210 static int syslink_enabled
;
211 SYSCTL_NODE(_kern
, OID_AUTO
, syslink
, CTLFLAG_RW
, 0, "Pipe operation");
212 SYSCTL_INT(_kern_syslink
, OID_AUTO
, enabled
,
213 CTLFLAG_RW
, &syslink_enabled
, 0, "Enable SYSLINK");
214 static size_t syslink_bufsize
= 65536;
215 SYSCTL_UINT(_kern_syslink
, OID_AUTO
, bufsize
,
216 CTLFLAG_RW
, &syslink_bufsize
, 0, "Maximum buffer size");
219 * Fileops API - typically used to glue a userland frontend with a
223 static int slfileop_read(struct file
*fp
, struct uio
*uio
,
224 struct ucred
*cred
, int flags
);
225 static int slfileop_write(struct file
*fp
, struct uio
*uio
,
226 struct ucred
*cred
, int flags
);
227 static int slfileop_close(struct file
*fp
);
228 static int slfileop_stat(struct file
*fp
, struct stat
*sb
, struct ucred
*cred
);
229 static int slfileop_shutdown(struct file
*fp
, int how
);
230 static int slfileop_ioctl(struct file
*fp
, u_long cmd
, caddr_t data
,
231 struct ucred
*cred
, struct sysmsg
*msg
);
232 static int slfileop_kqfilter(struct file
*fp
, struct knote
*kn
);
234 static struct fileops syslinkops
= {
235 .fo_read
= slfileop_read
,
236 .fo_write
= slfileop_write
,
237 .fo_ioctl
= slfileop_ioctl
,
238 .fo_kqfilter
= slfileop_kqfilter
,
239 .fo_stat
= slfileop_stat
,
240 .fo_close
= slfileop_close
,
241 .fo_shutdown
= slfileop_shutdown
244 /************************************************************************
245 * PRIMARY SYSTEM CALL INTERFACE *
246 ************************************************************************
248 * syslink(int cmd, struct syslink_info *info, size_t bytes)
253 sys_syslink(struct syslink_args
*uap
)
255 union syslink_info_all info
;
259 * System call is under construction and disabled by default.
260 * Superuser access is also required for now, but eventually
261 * will not be needed.
263 if (syslink_enabled
== 0)
265 error
= priv_check(curthread
, PRIV_ROOT
);
270 * Load and validate the info structure. Unloaded bytes are zerod
271 * out. The label field must always be 0-filled, even if not used
274 bzero(&info
, sizeof(info
));
275 if ((unsigned)uap
->bytes
<= sizeof(info
)) {
277 error
= copyin(uap
->info
, &info
, uap
->bytes
);
286 * Process the command
289 case SYSLINK_CMD_NEW
:
290 error
= syslink_cmd_new(&info
.cmd_new
, &uap
->sysmsg_result
);
298 if (error
== 0 && info
.head
.wbflag
)
299 copyout(&info
, uap
->info
, uap
->bytes
);
304 * Create a linked pair of descriptors, like a pipe.
308 syslink_cmd_new(struct syslink_info_new
*info
, int *result
)
310 struct thread
*td
= curthread
;
311 struct filedesc
*fdp
= td
->td_proc
->p_fd
;
315 struct sldesc
*slpeer
;
319 error
= falloc(td
->td_lwp
, &fp1
, &fd1
);
322 error
= falloc(td
->td_lwp
, &fp2
, &fd2
);
324 fsetfd(fdp
, NULL
, fd1
);
328 slpeer
= allocsldesc(NULL
);
329 slpeer
->backend_wblocked
= backend_wblocked_user
;
330 slpeer
->backend_write
= backend_write_user
;
331 slpeer
->backend_reply
= backend_reply_user
;
332 slpeer
->backend_dispose
= backend_dispose_user
;
333 sl
= allocsldesc(slpeer
->common
);
335 sl
->backend_wblocked
= backend_wblocked_user
;
336 sl
->backend_write
= backend_write_user
;
337 sl
->backend_reply
= backend_reply_user
;
338 sl
->backend_dispose
= backend_dispose_user
;
341 setsldescfp(sl
, fp1
);
342 setsldescfp(slpeer
, fp2
);
344 fsetfd(fdp
, fp1
, fd1
);
346 fsetfd(fdp
, fp2
, fd2
);
349 info
->head
.wbflag
= 1; /* write back */
356 /************************************************************************
357 * LOW LEVEL SLDESC SUPPORT *
358 ************************************************************************
364 allocsldesc(struct slcommon
*common
)
368 sl
= kmalloc(sizeof(struct sldesc
), M_SYSLINK
, M_WAITOK
|M_ZERO
);
370 common
= kmalloc(sizeof(*common
), M_SYSLINK
, M_WAITOK
|M_ZERO
);
371 TAILQ_INIT(&sl
->inq
); /* incoming requests */
372 RB_INIT(&sl
->reply_rb_root
); /* match incoming replies */
373 spin_init(&sl
->spin
);
381 setsldescfp(struct sldesc
*sl
, struct file
*fp
)
384 fp
->f_type
= DTYPE_SYSLINK
;
385 fp
->f_flag
= FREAD
| FWRITE
;
386 fp
->f_ops
= &syslinkops
;
391 * Red-black tree compare function
395 rb_slmsg_compare(struct slmsg
*msg1
, struct slmsg
*msg2
)
397 if (msg1
->msg
->sm_msgid
< msg2
->msg
->sm_msgid
)
399 if (msg1
->msg
->sm_msgid
== msg2
->msg
->sm_msgid
)
406 shutdownsldesc(struct sldesc
*sl
, int how
)
411 shutdownsldesc2(sl
, how
);
414 * Return unread and unreplied messages
416 spin_lock(&sl
->spin
);
417 while ((slmsg
= TAILQ_FIRST(&sl
->inq
)) != NULL
) {
418 TAILQ_REMOVE(&sl
->inq
, slmsg
, tqnode
);
419 spin_unlock(&sl
->spin
);
420 if (slmsg
->msg
->sm_proto
& SM_PROTO_REPLY
) {
421 sl
->repbytes
-= slmsg
->maxsize
;
422 slmsg
->flags
&= ~SLMSGF_ONINQ
;
423 sl
->peer
->backend_dispose(sl
->peer
, slmsg
);
425 /* leave ONINQ set for commands, it will cleared below */
426 spin_lock(&sl
->spin
);
428 while ((slmsg
= RB_ROOT(&sl
->reply_rb_root
)) != NULL
) {
429 RB_REMOVE(slmsg_rb_tree
, &sl
->reply_rb_root
, slmsg
);
430 sl
->cmdbytes
-= slmsg
->maxsize
;
431 spin_unlock(&sl
->spin
);
432 slmsg
->flags
&= ~SLMSGF_ONINQ
;
433 sl
->peer
->backend_reply(sl
->peer
, slmsg
, NULL
);
434 spin_lock(&sl
->spin
);
436 spin_unlock(&sl
->spin
);
439 * Call shutdown on the peer with the opposite flags
453 shutdownsldesc2(sl
->peer
, rhow
);
458 shutdownsldesc2(struct sldesc
*sl
, int how
)
460 spin_lock(&sl
->spin
);
463 sl
->flags
|= SLF_RSHUTDOWN
;
466 sl
->flags
|= SLF_WSHUTDOWN
;
469 sl
->flags
|= SLF_RSHUTDOWN
| SLF_WSHUTDOWN
;
472 spin_unlock(&sl
->spin
);
475 * Handle signaling on the user side
479 wakeup(&sl
->rwaiters
);
483 sl
->wblocked
= 0; /* race ok */
484 wakeup(&sl
->wblocked
);
491 sldrop(struct sldesc
*sl
)
493 struct sldesc
*slpeer
;
495 spin_lock(&sl
->common
->spin
);
496 if (--sl
->common
->refs
== 0) {
497 spin_unlock(&sl
->common
->spin
);
498 if ((slpeer
= sl
->peer
) != NULL
) {
501 slpeer
->common
= NULL
;
502 KKASSERT(slpeer
->xfp
== NULL
);
503 KKASSERT(TAILQ_EMPTY(&slpeer
->inq
));
504 KKASSERT(RB_EMPTY(&slpeer
->reply_rb_root
));
505 kfree(slpeer
, M_SYSLINK
);
507 KKASSERT(sl
->xfp
== NULL
);
508 KKASSERT(TAILQ_EMPTY(&sl
->inq
));
509 KKASSERT(RB_EMPTY(&sl
->reply_rb_root
));
510 kfree(sl
->common
, M_SYSLINK
);
512 kfree(sl
, M_SYSLINK
);
514 spin_unlock(&sl
->common
->spin
);
520 slmsg_put(struct slmsg
*slmsg
)
522 if (slmsg
->flags
& SLMSGF_HASXIO
) {
523 slmsg
->flags
&= ~SLMSGF_HASXIO
;
525 xio_release(&slmsg
->xio
);
528 slmsg
->flags
&= ~SLMSGF_LINMAP
;
529 objcache_put(slmsg
->oc
, slmsg
);
532 /************************************************************************
534 ************************************************************************
536 * Implement userland fileops.
542 slfileop_read(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
544 struct sldesc
*sl
= fp
->f_data
; /* fp refed on call */
548 struct syslink_msg
*wmsg
;
553 * Kinda messy. Figure out the non-blocking state
555 if (flags
& O_FBLOCKING
)
557 else if (flags
& O_FNONBLOCKING
)
559 else if (fp
->f_flag
& O_NONBLOCK
)
567 * iov0 - message buffer
568 * iov1 - DMA buffer or backup buffer
570 if (uio
->uio_iovcnt
< 1) {
574 iov0
= &uio
->uio_iov
[0];
575 if (uio
->uio_iovcnt
> 2) {
581 * Get a message, blocking if necessary.
583 spin_lock(&sl
->spin
);
584 while ((slmsg
= TAILQ_FIRST(&sl
->inq
)) == NULL
) {
585 if (sl
->flags
& SLF_RSHUTDOWN
) {
594 error
= ssleep(&sl
->rwaiters
, &sl
->spin
, PCATCH
, "slrmsg", 0);
602 * We have a message and still hold the spinlock. Make sure the
603 * uio has enough room to hold the message.
605 * Note that replies do not have XIOs.
607 if (slmsg
->msgsize
> iov0
->iov_len
) {
611 if (slmsg
->xio
.xio_bytes
) {
612 if (uio
->uio_iovcnt
!= 2) {
616 iov1
= &uio
->uio_iov
[1];
617 if (slmsg
->xio
.xio_bytes
> iov1
->iov_len
) {
626 * Dequeue the message. Adjust repbytes immediately. cmdbytes
627 * are adjusted when the command is replied to, not here.
629 TAILQ_REMOVE(&sl
->inq
, slmsg
, tqnode
);
630 if (slmsg
->msg
->sm_proto
& SM_PROTO_REPLY
)
631 sl
->repbytes
-= slmsg
->maxsize
;
632 spin_unlock(&sl
->spin
);
635 * Load the message data into the user buffer.
637 * If receiving a command an XIO may exist specifying a DMA buffer.
638 * For commands, if DMAW is set we have to copy or map the buffer
639 * so the caller can access the data being written. If DMAR is set
640 * we do not have to copy but we still must map the buffer so the
641 * caller can directly fill in the data being requested.
643 error
= uiomove((void *)slmsg
->msg
, slmsg
->msgsize
, uio
);
644 if (error
== 0 && slmsg
->xio
.xio_bytes
&&
645 (wmsg
->sm_head
.se_cmd
& SE_CMDF_REPLY
) == 0) {
646 if (wmsg
->sm_head
.se_cmd
& SE_CMDF_DMAW
) {
648 * Data being passed to caller or being passed in both
649 * directions, copy or map.
652 if ((flags
& O_MAPONREAD
) &&
653 (slmsg
->xio
.xio_flags
& XIOF_VMLINEAR
)) {
654 error
= sl_local_mmap(slmsg
,
658 error
= xio_copy_xtou(&slmsg
->xio
, 0,
660 slmsg
->xio
.xio_bytes
);
662 error
= xio_copy_xtou(&slmsg
->xio
, 0,
664 slmsg
->xio
.xio_bytes
);
667 } else if (wmsg
->sm_head
.se_cmd
& SE_CMDF_DMAR
) {
669 * Data will be passed back to originator, map
670 * the buffer if we can, else use the backup
671 * buffer at the same VA supplied by the caller.
674 if ((flags
& O_MAPONREAD
) &&
675 (slmsg
->xio
.xio_flags
& XIOF_VMLINEAR
)) {
676 error
= sl_local_mmap(slmsg
,
679 error
= 0; /* ignore errors */
690 * Requeue the message if we could not read it successfully
692 spin_lock(&sl
->spin
);
693 TAILQ_INSERT_HEAD(&sl
->inq
, slmsg
, tqnode
);
694 slmsg
->flags
|= SLMSGF_ONINQ
;
695 spin_unlock(&sl
->spin
);
696 } else if (slmsg
->msg
->sm_proto
& SM_PROTO_REPLY
) {
698 * Dispose of any received reply after we've copied it
699 * to userland. We don't need the slmsg any more.
701 slmsg
->flags
&= ~SLMSGF_ONINQ
;
702 sl
->peer
->backend_dispose(sl
->peer
, slmsg
);
703 if (sl
->wblocked
&& sl
->repbytes
< syslink_bufsize
) {
704 sl
->wblocked
= 0; /* MP race ok here */
705 wakeup(&sl
->wblocked
);
709 * Leave the command in the RB tree but clear ONINQ now
710 * that we have returned it to userland so userland can
713 slmsg
->flags
&= ~SLMSGF_ONINQ
;
717 spin_unlock(&sl
->spin
);
723 * Userland writes syslink message (optionally with DMA buffer in iov[1]).
727 slfileop_write(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
729 struct sldesc
*sl
= fp
->f_data
;
732 struct syslink_msg sltmp
;
733 struct syslink_msg
*wmsg
; /* wire message */
742 * Kinda messy. Figure out the non-blocking state
744 if (flags
& O_FBLOCKING
)
746 else if (flags
& O_FNONBLOCKING
)
748 else if (fp
->f_flag
& O_NONBLOCK
)
756 if (uio
->uio_iovcnt
< 1) {
760 iov0
= &uio
->uio_iov
[0];
761 if (iov0
->iov_len
> SLMSG_BIG
) {
765 if (uio
->uio_iovcnt
> 2) {
769 if (uio
->uio_iovcnt
> 1) {
770 iov1
= &uio
->uio_iov
[1];
771 if (iov1
->iov_len
> XIO_INTERNAL_SIZE
) {
775 if ((intptr_t)iov1
->iov_base
& PAGE_MASK
) {
784 * Handle the buffer-full case. slpeer cmdbytes is managed
785 * by the backend function, not us so if the callback just
786 * directly implements the message and never adjusts cmdbytes,
787 * we will never sleep here.
789 if (sl
->flags
& SLF_WSHUTDOWN
) {
795 * Only commands can block the pipe, not replies. Otherwise a
796 * deadlock is possible.
798 error
= copyin(iov0
->iov_base
, &sltmp
, sizeof(sltmp
));
801 if ((proto
= sltmp
.sm_proto
) & SM_PROTO_ENDIAN_REV
)
802 proto
= bswap16(proto
);
803 error
= sl
->peer
->backend_wblocked(sl
->peer
, nbio
, proto
);
808 * Allocate a slmsg and load the message. Note that the bytes
809 * returned to userland only reflects the primary syslink message
810 * and does not include any DMA buffers.
812 if (iov0
->iov_len
<= SLMSG_SMALL
)
813 slmsg
= objcache_get(sl_objcache_small
, M_WAITOK
);
815 slmsg
= objcache_get(sl_objcache_big
, M_WAITOK
);
816 slmsg
->msgsize
= iov0
->iov_len
;
819 error
= uiomove((void *)wmsg
, iov0
->iov_len
, uio
);
822 error
= syslink_validate_msg(wmsg
, slmsg
->msgsize
);
826 if ((wmsg
->sm_head
.se_cmd
& SE_CMDF_REPLY
) == 0) {
828 * Install the XIO for commands if any DMA flags are set.
830 * XIOF_VMLINEAR requires that the XIO represent a
831 * contiguous set of pages associated with a single VM
832 * object (so the reader side can mmap it easily).
834 * XIOF_VMLINEAR might not be set when the kernel sends
835 * commands to userland so the reader side backs off to
836 * a backup buffer if it isn't set, but we require it
837 * for userland writes.
839 xflags
= XIOF_VMLINEAR
;
840 if (wmsg
->sm_head
.se_cmd
& SE_CMDF_DMAR
)
841 xflags
|= XIOF_READ
| XIOF_WRITE
;
842 else if (wmsg
->sm_head
.se_cmd
& SE_CMDF_DMAW
)
844 if (xflags
&& iov1
) {
846 error
= xio_init_ubuf(&slmsg
->xio
, iov1
->iov_base
,
847 iov1
->iov_len
, xflags
);
851 slmsg
->flags
|= SLMSGF_HASXIO
;
853 error
= sl
->peer
->backend_write(sl
->peer
, slmsg
);
856 * Replies have to be matched up against received commands.
858 spin_lock(&sl
->spin
);
859 slcmd
= slmsg_rb_tree_RB_LOOKUP(&sl
->reply_rb_root
,
860 slmsg
->msg
->sm_msgid
);
861 if (slcmd
== NULL
|| (slcmd
->flags
& SLMSGF_ONINQ
)) {
863 spin_unlock(&sl
->spin
);
866 RB_REMOVE(slmsg_rb_tree
, &sl
->reply_rb_root
, slcmd
);
867 sl
->cmdbytes
-= slcmd
->maxsize
;
868 spin_unlock(&sl
->spin
);
871 * If the original command specified DMAR, has an xio, and
872 * our write specifies a DMA buffer, then we can do a
873 * copyback. But if we are linearly mapped and the caller
874 * is using the map base address, then the caller filled in
875 * the data via the direct memory map and no copyback is
878 if ((slcmd
->msg
->sm_head
.se_cmd
& SE_CMDF_DMAR
) && iov1
&&
879 (slcmd
->flags
& SLMSGF_HASXIO
) &&
880 ((slcmd
->flags
& SLMSGF_LINMAP
) == 0 ||
881 iov1
->iov_base
!= slcmd
->vmbase
)
884 if (iov1
->iov_len
> slcmd
->xio
.xio_bytes
)
885 count
= slcmd
->xio
.xio_bytes
;
887 count
= iov1
->iov_len
;
889 error
= xio_copy_utox(&slcmd
->xio
, 0, iov1
->iov_base
,
895 * If we had mapped a DMA buffer, remove it
897 if (slcmd
->flags
& SLMSGF_LINMAP
) {
899 sl_local_munmap(slcmd
);
904 * Reply and handle unblocking
906 sl
->peer
->backend_reply(sl
->peer
, slcmd
, slmsg
);
907 if (sl
->wblocked
&& sl
->cmdbytes
< syslink_bufsize
) {
908 sl
->wblocked
= 0; /* MP race ok here */
909 wakeup(&sl
->wblocked
);
913 * slmsg has already been dealt with, make sure error is
914 * 0 so we do not double-free it.
928 * Close a syslink descriptor.
930 * Disassociate the syslink from the file descriptor and disconnect from
935 slfileop_close(struct file
*fp
)
940 * Disassociate the file pointer. Take ownership of the ref on the
945 fp
->f_ops
= &badfileops
;
949 * Shutdown both directions. The other side will not issue API
950 * calls to us after we've shutdown both directions.
952 shutdownsldesc(sl
, SHUT_RDWR
);
957 KKASSERT(sl
->cmdbytes
== 0);
958 KKASSERT(sl
->repbytes
== 0);
968 slfileop_stat (struct file
*fp
, struct stat
*sb
, struct ucred
*cred
)
975 slfileop_shutdown (struct file
*fp
, int how
)
977 shutdownsldesc((struct sldesc
*)fp
->f_data
, how
);
983 slfileop_ioctl (struct file
*fp
, u_long cmd
, caddr_t data
,
984 struct ucred
*cred
, struct sysmsg
*msg
)
991 slfileop_kqfilter(struct file
*fp
, struct knote
*kn
)
996 /************************************************************************
997 * LOCAL MEMORY MAPPING *
998 ************************************************************************
1000 * This feature is currently not implemented
1006 sl_local_mmap(struct slmsg
*slmsg
, char *base
, size_t len
)
1008 return (EOPNOTSUPP
);
1013 sl_local_munmap(struct slmsg
*slmsg
)
1022 sl_local_mmap(struct slmsg
*slmsg
, char *base
, size_t len
)
1024 struct vmspace
*vms
= curproc
->p_vmspace
;
1025 vm_offset_t addr
= (vm_offset_t
)base
;
1027 /* XXX check user address range */
1028 error
= vm_map_replace(
1030 (vm_offset_t
)base
, (vm_offset_t
)base
+ len
,
1031 slmsg
->xio
.xio_pages
[0]->object
,
1032 slmsg
->xio
.xio_pages
[0]->pindex
<< PAGE_SHIFT
,
1033 VM_PROT_READ
|VM_PROT_WRITE
,
1034 VM_PROT_READ
|VM_PROT_WRITE
,
1035 MAP_DISABLE_SYNCER
);
1038 slmsg
->flags
|= SLMSGF_LINMAP
;
1039 slmsg
->vmbase
= base
;
1040 slmsg
->vmsize
= len
;
1047 sl_local_munmap(struct slmsg
*slmsg
)
1049 if (slmsg
->flags
& SLMSGF_LINMAP
) {
1050 vm_map_remove(&curproc
->p_vmspace
->vm_map
,
1052 slmsg
->vmbase
+ slcmd
->vmsize
);
1053 slmsg
->flags
&= ~SLMSGF_LINMAP
;
1059 /************************************************************************
1060 * MESSAGE VALIDATION *
1061 ************************************************************************
1063 * Validate that the syslink message. Check that all headers and elements
1064 * conform. Correct the endian if necessary.
1066 * NOTE: If reverse endian needs to be corrected, SE_CMDF_UNTRANSLATED
1067 * is recursively flipped on all syslink_elm's in the message. As the
1068 * message traverses the mesh, multiple flips may occur. It is
1069 * up to the RPC protocol layer to correct opaque data payloads and
1070 * SE_CMDF_UNTRANSLATED prevents the protocol layer from misinterpreting
1071 * a command or reply element which has not been endian-corrected.
1075 syslink_validate_msg(struct syslink_msg
*msg
, int bytes
)
1082 * The raw message must be properly-aligned.
1084 if (bytes
& SL_ALIGNMASK
)
1089 * The message must at least contain the msgid, bytes, and
1092 if (bytes
< SL_MIN_PAD_SIZE
)
1096 * Fix the endian if it is reversed.
1098 if (msg
->sm_proto
& SM_PROTO_ENDIAN_REV
) {
1099 msg
->sm_msgid
= bswap64(msg
->sm_msgid
);
1100 msg
->sm_sessid
= bswap64(msg
->sm_sessid
);
1101 msg
->sm_bytes
= bswap16(msg
->sm_bytes
);
1102 msg
->sm_proto
= bswap16(msg
->sm_proto
);
1103 msg
->sm_rlabel
= bswap32(msg
->sm_rlabel
);
1104 if (msg
->sm_proto
& SM_PROTO_ENDIAN_REV
)
1112 * Validate the contents. For PADs, the entire payload is
1113 * ignored and the minimum message size can be as small as
1116 if (msg
->sm_proto
== SMPROTO_PAD
) {
1117 if (msg
->sm_bytes
< SL_MIN_PAD_SIZE
||
1118 msg
->sm_bytes
> bytes
) {
1121 /* ignore the entire payload, it can be garbage */
1123 if (msg
->sm_bytes
< SL_MIN_MSG_SIZE
||
1124 msg
->sm_bytes
> bytes
) {
1127 error
= syslink_validate_elm(
1130 offsetof(struct syslink_msg
,
1132 swapit
, SL_MAXDEPTH
);
1138 * The aligned payload size must be used to locate the
1139 * next syslink_msg in the buffer.
1141 aligned_reclen
= SL_MSG_ALIGN(msg
->sm_bytes
);
1142 bytes
-= aligned_reclen
;
1143 msg
= (void *)((char *)msg
+ aligned_reclen
);
1150 syslink_validate_elm(struct syslink_elm
*elm
, sl_reclen_t bytes
,
1151 int swapit
, int depth
)
1156 * If the buffer isn't big enough to fit the header, stop now!
1158 if (bytes
< SL_MIN_ELM_SIZE
)
1161 * All syslink_elm headers are recursively endian-adjusted. Opaque
1162 * data payloads are not.
1165 elm
->se_cmd
= bswap16(elm
->se_cmd
) ^ SE_CMDF_UNTRANSLATED
;
1166 elm
->se_bytes
= bswap16(elm
->se_bytes
);
1167 elm
->se_aux
= bswap32(elm
->se_aux
);
1171 * Check element size requirements.
1173 if (elm
->se_bytes
< SL_MIN_ELM_SIZE
|| elm
->se_bytes
> bytes
)
1177 * Recursively check structured payloads. A structured payload may
1178 * contain as few as 0 recursive elements.
1180 if (elm
->se_cmd
& SE_CMDF_STRUCTURED
) {
1183 bytes
-= SL_MIN_ELM_SIZE
;
1186 if (syslink_validate_elm(elm
, bytes
, swapit
, depth
- 1))
1188 aligned_reclen
= SL_MSG_ALIGN(elm
->se_bytes
);
1189 elm
= (void *)((char *)elm
+ aligned_reclen
);
1190 bytes
-= aligned_reclen
;
1196 /************************************************************************
1197 * BACKEND FUNCTIONS - USER DESCRIPTOR *
1198 ************************************************************************
1200 * Peer backend links are primarily used when userland creates a pair
1201 * of linked descriptors.
1205 * Do any required blocking / nbio handling for attempts to write to
1206 * a sldesc associated with a user descriptor.
1210 backend_wblocked_user(struct sldesc
*sl
, int nbio
, sl_proto_t proto
)
1213 int *bytesp
= (proto
& SM_PROTO_REPLY
) ? &sl
->repbytes
: &sl
->cmdbytes
;
1216 * Block until sufficient data is drained by the target. It is
1217 * ok to have a MP race against cmdbytes.
1219 if (*bytesp
>= syslink_bufsize
) {
1220 spin_lock(&sl
->spin
);
1221 while (*bytesp
>= syslink_bufsize
) {
1222 if (sl
->flags
& SLF_WSHUTDOWN
) {
1231 error
= ssleep(&sl
->wblocked
, &sl
->spin
,
1232 PCATCH
, "slwmsg", 0);
1236 spin_unlock(&sl
->spin
);
1242 * Unconditionally write a syslink message to the sldesc associated with
1243 * a user descriptor. Command messages are also placed in a red-black
1244 * tree so their DMA tag (if any) can be accessed and so they can be
1245 * linked to any reply message.
1249 backend_write_user(struct sldesc
*sl
, struct slmsg
*slmsg
)
1253 spin_lock(&sl
->spin
);
1254 if (sl
->flags
& SLF_RSHUTDOWN
) {
1256 * Not accepting new messages
1259 } else if (slmsg
->msg
->sm_proto
& SM_PROTO_REPLY
) {
1263 TAILQ_INSERT_TAIL(&sl
->inq
, slmsg
, tqnode
);
1264 sl
->repbytes
+= slmsg
->maxsize
;
1265 slmsg
->flags
|= SLMSGF_ONINQ
;
1267 } else if (RB_INSERT(slmsg_rb_tree
, &sl
->reply_rb_root
, slmsg
)) {
1269 * Write a command, but there was a msgid collision when
1270 * we tried to insert it into the RB tree.
1275 * Write a command, successful insertion into the RB tree.
1277 TAILQ_INSERT_TAIL(&sl
->inq
, slmsg
, tqnode
);
1278 sl
->cmdbytes
+= slmsg
->maxsize
;
1279 slmsg
->flags
|= SLMSGF_ONINQ
;
1282 spin_unlock(&sl
->spin
);
1284 wakeup(&sl
->rwaiters
);
1289 * Our peer is replying a command we previously sent it back to us, along
1290 * with the reply message (if not NULL). We just queue the reply to
1291 * userland and free of the command.
1295 backend_reply_user(struct sldesc
*sl
, struct slmsg
*slcmd
, struct slmsg
*slrep
)
1301 spin_lock(&sl
->spin
);
1302 if ((sl
->flags
& SLF_RSHUTDOWN
) == 0) {
1303 TAILQ_INSERT_TAIL(&sl
->inq
, slrep
, tqnode
);
1304 sl
->repbytes
+= slrep
->maxsize
;
1309 spin_unlock(&sl
->spin
);
1311 sl
->peer
->backend_dispose(sl
->peer
, slrep
);
1312 else if (sl
->rwaiters
)
1313 wakeup(&sl
->rwaiters
);
1319 backend_dispose_user(struct sldesc
*sl
, struct slmsg
*slmsg
)
1324 /************************************************************************
1325 * KERNEL DRIVER OR FILESYSTEM API *
1326 ************************************************************************
1331 * Create a user<->kernel link, returning the user descriptor in *fdp
1332 * and the kernel descriptor in *kslp. 0 is returned on success, and an
1333 * error code is returned on failure.
1336 syslink_ukbackend(int *pfd
, struct sldesc
**kslp
)
1338 struct thread
*td
= curthread
;
1339 struct filedesc
*fdp
= td
->td_proc
->p_fd
;
1349 error
= falloc(td
->td_lwp
, &fp
, &fd
);
1352 usl
= allocsldesc(NULL
);
1353 usl
->backend_wblocked
= backend_wblocked_user
;
1354 usl
->backend_write
= backend_write_user
;
1355 usl
->backend_reply
= backend_reply_user
;
1356 usl
->backend_dispose
= backend_dispose_user
;
1358 ksl
= allocsldesc(usl
->common
);
1360 ksl
->backend_wblocked
= backend_wblocked_kern
;
1361 ksl
->backend_write
= backend_write_kern
;
1362 ksl
->backend_reply
= backend_reply_kern
;
1363 ksl
->backend_dispose
= backend_dispose_kern
;
1367 setsldescfp(usl
, fp
);
1368 fsetfd(fdp
, fp
, fd
);
1377 * Assign a unique message id, issue a syslink message to userland,
1378 * and wait for a reply.
1381 syslink_kdomsg(struct sldesc
*ksl
, struct slmsg
*slmsg
)
1383 struct syslink_msg
*msg
;
1387 * Finish initializing slmsg and post it to the red-black tree for
1388 * reply matching. If the message id is already in use we return
1389 * EEXIST, giving the originator the chance to roll a new msgid.
1392 slmsg
->msgsize
= msg
->sm_bytes
;
1393 if ((error
= syslink_validate_msg(msg
, msg
->sm_bytes
)) != 0)
1395 msg
->sm_msgid
= allocsysid();
1398 * Issue the request and wait for a matching reply or failure,
1399 * then remove the message from the matching tree and return.
1401 error
= ksl
->peer
->backend_write(ksl
->peer
, slmsg
);
1402 spin_lock(&ksl
->spin
);
1404 while (slmsg
->rep
== NULL
) {
1405 error
= ssleep(slmsg
, &ksl
->spin
, 0, "kwtmsg", 0);
1406 /* XXX ignore error for now */
1408 if (slmsg
->rep
== (struct slmsg
*)-1) {
1412 error
= slmsg
->rep
->msg
->sm_head
.se_aux
;
1415 spin_unlock(&ksl
->spin
);
1420 * Similar to syslink_kdomsg but return immediately instead of
1421 * waiting for a reply. The kernel must supply a callback function
1422 * which will be made in the context of the user process replying
1426 syslink_ksendmsg(struct sldesc
*ksl
, struct slmsg
*slmsg
,
1427 void (*func
)(struct slmsg
*, void *, int), void *arg
)
1429 struct syslink_msg
*msg
;
1433 * Finish initializing slmsg and post it to the red-black tree for
1434 * reply matching. If the message id is already in use we return
1435 * EEXIST, giving the originator the chance to roll a new msgid.
1438 slmsg
->msgsize
= msg
->sm_bytes
;
1439 slmsg
->callback_func
= func
;
1440 slmsg
->callback_data
= arg
;
1441 if ((error
= syslink_validate_msg(msg
, msg
->sm_bytes
)) != 0)
1443 msg
->sm_msgid
= allocsysid();
1446 * Issue the request. If no error occured the operation will be
1447 * in progress, otherwise the operation is considered to have failed
1448 * and the caller can deallocate the slmsg.
1450 error
= ksl
->peer
->backend_write(ksl
->peer
, slmsg
);
1455 syslink_kwaitmsg(struct sldesc
*ksl
, struct slmsg
*slmsg
)
1459 spin_lock(&ksl
->spin
);
1460 while (slmsg
->rep
== NULL
) {
1461 error
= ssleep(slmsg
, &ksl
->spin
, 0, "kwtmsg", 0);
1462 /* XXX ignore error for now */
1464 if (slmsg
->rep
== (struct slmsg
*)-1) {
1468 error
= slmsg
->rep
->msg
->sm_head
.se_aux
;
1470 spin_unlock(&ksl
->spin
);
1475 syslink_kallocmsg(void)
1477 return(objcache_get(sl_objcache_small
, M_WAITOK
));
1481 syslink_kfreemsg(struct sldesc
*ksl
, struct slmsg
*slmsg
)
1485 if ((rep
= slmsg
->rep
) != NULL
) {
1487 ksl
->peer
->backend_dispose(ksl
->peer
, rep
);
1489 slmsg
->callback_func
= NULL
;
1494 syslink_kshutdown(struct sldesc
*ksl
, int how
)
1496 shutdownsldesc(ksl
, how
);
1500 syslink_kclose(struct sldesc
*ksl
)
1502 shutdownsldesc(ksl
, SHUT_RDWR
);
1507 * Associate a DMA buffer with a kernel syslink message prior to it
1508 * being sent to userland. The DMA buffer is set up from the point
1509 * of view of the target.
1512 syslink_kdmabuf_pages(struct slmsg
*slmsg
, struct vm_page
**mbase
, int npages
)
1517 xflags
= XIOF_VMLINEAR
;
1518 if (slmsg
->msg
->sm_head
.se_cmd
& SE_CMDF_DMAR
)
1519 xflags
|= XIOF_READ
| XIOF_WRITE
;
1520 else if (slmsg
->msg
->sm_head
.se_cmd
& SE_CMDF_DMAW
)
1521 xflags
|= XIOF_READ
;
1522 error
= xio_init_pages(&slmsg
->xio
, mbase
, npages
, xflags
);
1523 slmsg
->flags
|= SLMSGF_HASXIO
;
1528 * Associate a DMA buffer with a kernel syslink message prior to it
1529 * being sent to userland. The DMA buffer is set up from the point
1530 * of view of the target.
1533 syslink_kdmabuf_data(struct slmsg
*slmsg
, char *base
, int bytes
)
1537 xflags
= XIOF_VMLINEAR
;
1538 if (slmsg
->msg
->sm_head
.se_cmd
& SE_CMDF_DMAR
)
1539 xflags
|= XIOF_READ
| XIOF_WRITE
;
1540 else if (slmsg
->msg
->sm_head
.se_cmd
& SE_CMDF_DMAW
)
1541 xflags
|= XIOF_READ
;
1542 xio_init_kbuf(&slmsg
->xio
, base
, bytes
);
1543 slmsg
->xio
.xio_flags
|= xflags
;
1544 slmsg
->flags
|= SLMSGF_HASXIO
;
1548 /************************************************************************
1549 * BACKEND FUNCTIONS FOR KERNEL API *
1550 ************************************************************************
1552 * These are the backend functions for a sldesc associated with a kernel
1557 * Our peer wants to write a syslink message to us and is asking us to
1558 * block if our input queue is full. We don't implement command reception
1559 * so don't block right now.
1563 backend_wblocked_kern(struct sldesc
*ksl
, int nbio
, sl_proto_t proto
)
1570 * Our peer is writing a request to the kernel. At the moment we do not
1575 backend_write_kern(struct sldesc
*ksl
, struct slmsg
*slmsg
)
1581 * Our peer wants to reply to a syslink message we sent it earlier. The
1582 * original command (that we passed to our peer), and the peer's reply
1583 * is specified. If the peer has failed slrep will be NULL.
1587 backend_reply_kern(struct sldesc
*ksl
, struct slmsg
*slcmd
, struct slmsg
*slrep
)
1591 spin_lock(&ksl
->spin
);
1592 if (slrep
== NULL
) {
1593 slcmd
->rep
= (struct slmsg
*)-1;
1597 error
= slrep
->msg
->sm_head
.se_aux
;
1599 spin_unlock(&ksl
->spin
);
1602 * Issue callback or wakeup a synchronous waiter.
1604 if (slcmd
->callback_func
) {
1605 slcmd
->callback_func(slcmd
, slcmd
->callback_data
, error
);
1612 * Any reply messages we sent to our peer are returned to us for disposal.
1613 * Since we do not currently accept commands from our peer, there will not
1614 * be any replies returned to the peer to dispose of.
1618 backend_dispose_kern(struct sldesc
*ksl
, struct slmsg
*slmsg
)
1620 panic("backend_dispose_kern: kernel can't accept commands so it "
1621 "certainly did not reply to one!");