2 * Copyright (c) 2006-2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/kern/kern_syslink.c,v 1.15 2007/08/13 17:47:19 dillon Exp $
37 * This module implements the core syslink() system call and provides
38 * glue for kernel syslink frontends and backends, creating a intra-host
39 * communications infrastructure and DMA transport abstraction.
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/endian.h>
46 #include <sys/malloc.h>
47 #include <sys/alist.h>
52 #include <sys/objcache.h>
53 #include <sys/queue.h>
54 #include <sys/thread.h>
56 #include <sys/sysctl.h>
57 #include <sys/sysproto.h>
59 #include <sys/socket.h>
60 #include <sys/socketvar.h>
61 #include <sys/socketops.h>
62 #include <sys/sysref.h>
63 #include <sys/syslink.h>
64 #include <sys/syslink_msg.h>
65 #include <netinet/in.h>
67 #include <sys/thread2.h>
68 #include <sys/spinlock2.h>
71 #include "opt_syslink.h"
74 * Syslink Connection abstraction
83 struct slmsg_rb_tree reply_rb_root
; /* replies to requests */
85 struct sldesc
*peer
; /* peer syslink, if any */
86 struct file
*xfp
; /* external file pointer */
87 struct slcommon
*common
;
89 int rwaiters
; /* number of threads waiting */
90 int wblocked
; /* blocked waiting for us to drain */
91 size_t cmdbytes
; /* unreplied commands pending */
92 size_t repbytes
; /* undrained replies pending */
93 int (*backend_wblocked
)(struct sldesc
*, int, sl_proto_t
);
94 int (*backend_write
)(struct sldesc
*, struct slmsg
*);
95 void (*backend_reply
)(struct sldesc
*,struct slmsg
*,struct slmsg
*);
96 void (*backend_dispose
)(struct sldesc
*, struct slmsg
*);
99 #define SLF_RSHUTDOWN 0x0001
100 #define SLF_WSHUTDOWN 0x0002
102 static int syslink_cmd_new(struct syslink_info_new
*info
, int *result
);
103 static struct sldesc
*allocsldesc(struct slcommon
*common
);
104 static void setsldescfp(struct sldesc
*sl
, struct file
*fp
);
105 static void shutdownsldesc(struct sldesc
*sl
, int how
);
106 static void shutdownsldesc2(struct sldesc
*sl
, int how
);
107 static void sldrop(struct sldesc
*sl
);
108 static int syslink_validate_msg(struct syslink_msg
*msg
, int bytes
);
109 static int syslink_validate_elm(struct syslink_elm
*elm
, sl_reclen_t bytes
,
110 int swapit
, int depth
);
112 static int sl_local_mmap(struct slmsg
*slmsg
, char *base
, size_t len
);
113 static void sl_local_munmap(struct slmsg
*slmsg
);
115 static int backend_wblocked_user(struct sldesc
*sl
, int nbio
, sl_proto_t proto
);
116 static int backend_write_user(struct sldesc
*sl
, struct slmsg
*slmsg
);
117 static void backend_reply_user(struct sldesc
*sl
, struct slmsg
*slcmd
,
118 struct slmsg
*slrep
);
119 static void backend_dispose_user(struct sldesc
*sl
, struct slmsg
*slmsg
);
121 static int backend_wblocked_kern(struct sldesc
*sl
, int nbio
, sl_proto_t proto
);
122 static int backend_write_kern(struct sldesc
*sl
, struct slmsg
*slmsg
);
123 static void backend_reply_kern(struct sldesc
*sl
, struct slmsg
*slcmd
,
124 struct slmsg
*slrep
);
125 static void backend_dispose_kern(struct sldesc
*sl
, struct slmsg
*slmsg
);
126 static void slmsg_put(struct slmsg
*slmsg
);
129 * Objcache memory backend
131 * All three object caches return slmsg structures but each is optimized
132 * for syslink message buffers of varying sizes. We use the slightly
133 * more complex ctor/dtor API in order to provide ready-to-go slmsg's.
136 static struct objcache
*sl_objcache_big
;
137 static struct objcache
*sl_objcache_small
;
138 static struct objcache
*sl_objcache_none
;
140 MALLOC_DEFINE(M_SYSLINK
, "syslink", "syslink manager");
142 static boolean_t
slmsg_ctor(void *data
, void *private, int ocflags
);
143 static void slmsg_dtor(void *data
, void *private);
147 syslinkinit(void *dummy __unused
)
149 size_t n
= sizeof(struct slmsg
);
151 sl_objcache_none
= objcache_create_mbacked(M_SYSLINK
, n
, 0, 64,
152 slmsg_ctor
, slmsg_dtor
,
154 sl_objcache_small
= objcache_create_mbacked(M_SYSLINK
, n
, 0, 64,
155 slmsg_ctor
, slmsg_dtor
,
157 sl_objcache_big
= objcache_create_mbacked(M_SYSLINK
, n
, 0, 16,
158 slmsg_ctor
, slmsg_dtor
,
164 slmsg_ctor(void *data
, void *private, int ocflags
)
166 struct slmsg
*slmsg
= data
;
168 bzero(slmsg
, sizeof(*slmsg
));
170 slmsg
->oc
= *(struct objcache
**)private;
171 if (slmsg
->oc
== sl_objcache_none
) {
173 } else if (slmsg
->oc
== sl_objcache_small
) {
174 slmsg
->maxsize
= SLMSG_SMALL
;
175 } else if (slmsg
->oc
== sl_objcache_big
) {
176 slmsg
->maxsize
= SLMSG_BIG
;
178 panic("slmsg_ctor: bad objcache?\n");
180 if (slmsg
->maxsize
) {
181 slmsg
->msg
= kmalloc(slmsg
->maxsize
,
182 M_SYSLINK
, M_WAITOK
|M_ZERO
);
184 xio_init(&slmsg
->xio
);
190 slmsg_dtor(void *data
, void *private)
192 struct slmsg
*slmsg
= data
;
194 if (slmsg
->maxsize
&& slmsg
->msg
) {
195 kfree(slmsg
->msg
, M_SYSLINK
);
201 SYSINIT(syslink
, SI_BOOT2_MACHDEP
, SI_ORDER_ANY
, syslinkinit
, NULL
)
203 static int rb_slmsg_compare(struct slmsg
*msg1
, struct slmsg
*msg2
);
204 RB_GENERATE2(slmsg_rb_tree
, slmsg
, rbnode
, rb_slmsg_compare
,
205 sysid_t
, msg
->sm_msgid
);
210 static int syslink_enabled
;
211 SYSCTL_NODE(_kern
, OID_AUTO
, syslink
, CTLFLAG_RW
, 0, "Pipe operation");
212 SYSCTL_INT(_kern_syslink
, OID_AUTO
, enabled
,
213 CTLFLAG_RW
, &syslink_enabled
, 0, "Enable SYSLINK");
214 static size_t syslink_bufsize
= 65536;
215 SYSCTL_UINT(_kern_syslink
, OID_AUTO
, bufsize
,
216 CTLFLAG_RW
, &syslink_bufsize
, 0, "Maximum buffer size");
219 * Fileops API - typically used to glue a userland frontend with a
223 static int slfileop_read(struct file
*fp
, struct uio
*uio
,
224 struct ucred
*cred
, int flags
);
225 static int slfileop_write(struct file
*fp
, struct uio
*uio
,
226 struct ucred
*cred
, int flags
);
227 static int slfileop_close(struct file
*fp
);
228 static int slfileop_stat(struct file
*fp
, struct stat
*sb
, struct ucred
*cred
);
229 static int slfileop_shutdown(struct file
*fp
, int how
);
230 static int slfileop_ioctl(struct file
*fp
, u_long cmd
, caddr_t data
,
232 static int slfileop_poll(struct file
*fp
, int events
, struct ucred
*cred
);
233 static int slfileop_kqfilter(struct file
*fp
, struct knote
*kn
);
235 static struct fileops syslinkops
= {
236 .fo_read
= slfileop_read
,
237 .fo_write
= slfileop_write
,
238 .fo_ioctl
= slfileop_ioctl
,
239 .fo_poll
= slfileop_poll
,
240 .fo_kqfilter
= slfileop_kqfilter
,
241 .fo_stat
= slfileop_stat
,
242 .fo_close
= slfileop_close
,
243 .fo_shutdown
= slfileop_shutdown
246 /************************************************************************
247 * PRIMARY SYSTEM CALL INTERFACE *
248 ************************************************************************
250 * syslink(int cmd, struct syslink_info *info, size_t bytes)
253 sys_syslink(struct syslink_args
*uap
)
255 union syslink_info_all info
;
259 * System call is under construction and disabled by default.
260 * Superuser access is also required for now, but eventually
261 * will not be needed.
263 if (syslink_enabled
== 0)
265 error
= suser(curthread
);
270 * Load and validate the info structure. Unloaded bytes are zerod
271 * out. The label field must always be 0-filled, even if not used
274 bzero(&info
, sizeof(info
));
275 if ((unsigned)uap
->bytes
<= sizeof(info
)) {
277 error
= copyin(uap
->info
, &info
, uap
->bytes
);
285 * Process the command
288 case SYSLINK_CMD_NEW
:
289 error
= syslink_cmd_new(&info
.cmd_new
, &uap
->sysmsg_result
);
295 if (error
== 0 && info
.head
.wbflag
)
296 copyout(&info
, uap
->info
, uap
->bytes
);
301 * Create a linked pair of descriptors, like a pipe.
305 syslink_cmd_new(struct syslink_info_new
*info
, int *result
)
307 struct proc
*p
= curproc
;
311 struct sldesc
*slpeer
;
315 error
= falloc(p
, &fp1
, &fd1
);
318 error
= falloc(p
, &fp2
, &fd2
);
320 fsetfd(p
, NULL
, fd1
);
324 slpeer
= allocsldesc(NULL
);
325 slpeer
->backend_wblocked
= backend_wblocked_user
;
326 slpeer
->backend_write
= backend_write_user
;
327 slpeer
->backend_reply
= backend_reply_user
;
328 slpeer
->backend_dispose
= backend_dispose_user
;
329 sl
= allocsldesc(slpeer
->common
);
331 sl
->backend_wblocked
= backend_wblocked_user
;
332 sl
->backend_write
= backend_write_user
;
333 sl
->backend_reply
= backend_reply_user
;
334 sl
->backend_dispose
= backend_dispose_user
;
337 setsldescfp(sl
, fp1
);
338 setsldescfp(slpeer
, fp2
);
345 info
->head
.wbflag
= 1; /* write back */
352 /************************************************************************
353 * LOW LEVEL SLDESC SUPPORT *
354 ************************************************************************
360 allocsldesc(struct slcommon
*common
)
364 sl
= kmalloc(sizeof(struct sldesc
), M_SYSLINK
, M_WAITOK
|M_ZERO
);
366 common
= kmalloc(sizeof(*common
), M_SYSLINK
, M_WAITOK
|M_ZERO
);
367 TAILQ_INIT(&sl
->inq
); /* incoming requests */
368 RB_INIT(&sl
->reply_rb_root
); /* match incoming replies */
369 spin_init(&sl
->spin
);
377 setsldescfp(struct sldesc
*sl
, struct file
*fp
)
380 fp
->f_type
= DTYPE_SYSLINK
;
381 fp
->f_flag
= FREAD
| FWRITE
;
382 fp
->f_ops
= &syslinkops
;
387 * Red-black tree compare function
391 rb_slmsg_compare(struct slmsg
*msg1
, struct slmsg
*msg2
)
393 if (msg1
->msg
->sm_msgid
< msg2
->msg
->sm_msgid
)
395 if (msg1
->msg
->sm_msgid
== msg2
->msg
->sm_msgid
)
402 shutdownsldesc(struct sldesc
*sl
, int how
)
407 shutdownsldesc2(sl
, how
);
410 * Return unread and unreplied messages
412 spin_lock_wr(&sl
->spin
);
413 while ((slmsg
= TAILQ_FIRST(&sl
->inq
)) != NULL
) {
414 TAILQ_REMOVE(&sl
->inq
, slmsg
, tqnode
);
415 spin_unlock_wr(&sl
->spin
);
416 if (slmsg
->msg
->sm_proto
& SM_PROTO_REPLY
) {
417 sl
->repbytes
-= slmsg
->maxsize
;
418 slmsg
->flags
&= ~SLMSGF_ONINQ
;
419 sl
->peer
->backend_dispose(sl
->peer
, slmsg
);
421 /* leave ONINQ set for commands, it will cleared below */
422 spin_lock_wr(&sl
->spin
);
424 while ((slmsg
= RB_ROOT(&sl
->reply_rb_root
)) != NULL
) {
425 RB_REMOVE(slmsg_rb_tree
, &sl
->reply_rb_root
, slmsg
);
426 sl
->cmdbytes
-= slmsg
->maxsize
;
427 spin_unlock_wr(&sl
->spin
);
428 slmsg
->flags
&= ~SLMSGF_ONINQ
;
429 sl
->peer
->backend_reply(sl
->peer
, slmsg
, NULL
);
430 spin_lock_wr(&sl
->spin
);
432 spin_unlock_wr(&sl
->spin
);
435 * Call shutdown on the peer with the opposite flags
449 shutdownsldesc2(sl
->peer
, rhow
);
454 shutdownsldesc2(struct sldesc
*sl
, int how
)
456 spin_lock_wr(&sl
->spin
);
459 sl
->flags
|= SLF_RSHUTDOWN
;
462 sl
->flags
|= SLF_WSHUTDOWN
;
465 sl
->flags
|= SLF_RSHUTDOWN
| SLF_WSHUTDOWN
;
468 spin_unlock_wr(&sl
->spin
);
471 * Handle signaling on the user side
475 wakeup(&sl
->rwaiters
);
479 sl
->wblocked
= 0; /* race ok */
480 wakeup(&sl
->wblocked
);
487 sldrop(struct sldesc
*sl
)
489 struct sldesc
*slpeer
;
491 spin_lock_wr(&sl
->common
->spin
);
492 if (--sl
->common
->refs
== 0) {
493 spin_unlock_wr(&sl
->common
->spin
);
494 if ((slpeer
= sl
->peer
) != NULL
) {
497 slpeer
->common
= NULL
;
498 KKASSERT(slpeer
->xfp
== NULL
);
499 KKASSERT(TAILQ_EMPTY(&slpeer
->inq
));
500 KKASSERT(RB_EMPTY(&slpeer
->reply_rb_root
));
501 kfree(slpeer
, M_SYSLINK
);
503 KKASSERT(sl
->xfp
== NULL
);
504 KKASSERT(TAILQ_EMPTY(&sl
->inq
));
505 KKASSERT(RB_EMPTY(&sl
->reply_rb_root
));
506 kfree(sl
->common
, M_SYSLINK
);
508 kfree(sl
, M_SYSLINK
);
510 spin_unlock_wr(&sl
->common
->spin
);
516 slmsg_put(struct slmsg
*slmsg
)
518 if (slmsg
->flags
& SLMSGF_HASXIO
) {
519 slmsg
->flags
&= ~SLMSGF_HASXIO
;
521 xio_release(&slmsg
->xio
);
524 slmsg
->flags
&= ~SLMSGF_LINMAP
;
525 objcache_put(slmsg
->oc
, slmsg
);
528 /************************************************************************
530 ************************************************************************
532 * Implement userland fileops.
538 slfileop_read(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
540 struct sldesc
*sl
= fp
->f_data
; /* fp refed on call */
544 struct syslink_msg
*wmsg
;
549 * Kinda messy. Figure out the non-blocking state
551 if (flags
& O_FBLOCKING
)
553 else if (flags
& O_FNONBLOCKING
)
555 else if (fp
->f_flag
& O_NONBLOCK
)
563 * iov0 - message buffer
564 * iov1 - DMA buffer or backup buffer
566 if (uio
->uio_iovcnt
< 1) {
570 iov0
= &uio
->uio_iov
[0];
571 if (uio
->uio_iovcnt
> 2) {
577 * Get a message, blocking if necessary.
579 spin_lock_wr(&sl
->spin
);
580 while ((slmsg
= TAILQ_FIRST(&sl
->inq
)) == NULL
) {
581 if (sl
->flags
& SLF_RSHUTDOWN
) {
590 error
= msleep(&sl
->rwaiters
, &sl
->spin
, PCATCH
, "slrmsg", 0);
598 * We have a message and still hold the spinlock. Make sure the
599 * uio has enough room to hold the message.
601 * Note that replies do not have XIOs.
603 if (slmsg
->msgsize
> iov0
->iov_len
) {
607 if (slmsg
->xio
.xio_bytes
) {
608 if (uio
->uio_iovcnt
!= 2) {
612 iov1
= &uio
->uio_iov
[1];
613 if (slmsg
->xio
.xio_bytes
> iov1
->iov_len
) {
622 * Dequeue the message. Adjust repbytes immediately. cmdbytes
623 * are adjusted when the command is replied to, not here.
625 TAILQ_REMOVE(&sl
->inq
, slmsg
, tqnode
);
626 if (slmsg
->msg
->sm_proto
& SM_PROTO_REPLY
)
627 sl
->repbytes
-= slmsg
->maxsize
;
628 spin_unlock_wr(&sl
->spin
);
631 * Load the message data into the user buffer.
633 * If receiving a command an XIO may exist specifying a DMA buffer.
634 * For commands, if DMAW is set we have to copy or map the buffer
635 * so the caller can access the data being written. If DMAR is set
636 * we do not have to copy but we still must map the buffer so the
637 * caller can directly fill in the data being requested.
639 error
= uiomove((void *)slmsg
->msg
, slmsg
->msgsize
, uio
);
640 if (error
== 0 && slmsg
->xio
.xio_bytes
&&
641 (wmsg
->sm_head
.se_cmd
& SE_CMDF_REPLY
) == 0) {
642 if (wmsg
->sm_head
.se_cmd
& SE_CMDF_DMAW
) {
644 * Data being passed to caller or being passed in both
645 * directions, copy or map.
648 if ((flags
& O_MAPONREAD
) &&
649 (slmsg
->xio
.xio_flags
& XIOF_VMLINEAR
)) {
650 error
= sl_local_mmap(slmsg
,
654 error
= xio_copy_xtou(&slmsg
->xio
, 0,
656 slmsg
->xio
.xio_bytes
);
658 error
= xio_copy_xtou(&slmsg
->xio
, 0,
660 slmsg
->xio
.xio_bytes
);
663 } else if (wmsg
->sm_head
.se_cmd
& SE_CMDF_DMAR
) {
665 * Data will be passed back to originator, map
666 * the buffer if we can, else use the backup
667 * buffer at the same VA supplied by the caller.
670 if ((flags
& O_MAPONREAD
) &&
671 (slmsg
->xio
.xio_flags
& XIOF_VMLINEAR
)) {
672 error
= sl_local_mmap(slmsg
,
675 error
= 0; /* ignore errors */
686 * Requeue the message if we could not read it successfully
688 spin_lock_wr(&sl
->spin
);
689 TAILQ_INSERT_HEAD(&sl
->inq
, slmsg
, tqnode
);
690 slmsg
->flags
|= SLMSGF_ONINQ
;
691 spin_unlock_wr(&sl
->spin
);
692 } else if (slmsg
->msg
->sm_proto
& SM_PROTO_REPLY
) {
694 * Dispose of any received reply after we've copied it
695 * to userland. We don't need the slmsg any more.
697 slmsg
->flags
&= ~SLMSGF_ONINQ
;
698 sl
->peer
->backend_dispose(sl
->peer
, slmsg
);
699 if (sl
->wblocked
&& sl
->repbytes
< syslink_bufsize
) {
700 sl
->wblocked
= 0; /* MP race ok here */
701 wakeup(&sl
->wblocked
);
705 * Leave the command in the RB tree but clear ONINQ now
706 * that we have returned it to userland so userland can
709 slmsg
->flags
&= ~SLMSGF_ONINQ
;
713 spin_unlock_wr(&sl
->spin
);
719 * Userland writes syslink message (optionally with DMA buffer in iov[1]).
723 slfileop_write(struct file
*fp
, struct uio
*uio
, struct ucred
*cred
, int flags
)
725 struct sldesc
*sl
= fp
->f_data
;
728 struct syslink_msg sltmp
;
729 struct syslink_msg
*wmsg
; /* wire message */
738 * Kinda messy. Figure out the non-blocking state
740 if (flags
& O_FBLOCKING
)
742 else if (flags
& O_FNONBLOCKING
)
744 else if (fp
->f_flag
& O_NONBLOCK
)
752 if (uio
->uio_iovcnt
< 1) {
756 iov0
= &uio
->uio_iov
[0];
757 if (iov0
->iov_len
> SLMSG_BIG
) {
761 if (uio
->uio_iovcnt
> 2) {
765 if (uio
->uio_iovcnt
> 1) {
766 iov1
= &uio
->uio_iov
[1];
767 if (iov1
->iov_len
> XIO_INTERNAL_SIZE
) {
771 if ((intptr_t)iov1
->iov_base
& PAGE_MASK
) {
780 * Handle the buffer-full case. slpeer cmdbytes is managed
781 * by the backend function, not us so if the callback just
782 * directly implements the message and never adjusts cmdbytes,
783 * we will never sleep here.
785 if (sl
->flags
& SLF_WSHUTDOWN
) {
791 * Only commands can block the pipe, not replies. Otherwise a
792 * deadlock is possible.
794 error
= copyin(iov0
->iov_base
, &sltmp
, sizeof(sltmp
));
797 if ((proto
= sltmp
.sm_proto
) & SM_PROTO_ENDIAN_REV
)
798 proto
= bswap16(proto
);
799 error
= sl
->peer
->backend_wblocked(sl
->peer
, nbio
, proto
);
804 * Allocate a slmsg and load the message. Note that the bytes
805 * returned to userland only reflects the primary syslink message
806 * and does not include any DMA buffers.
808 if (iov0
->iov_len
<= SLMSG_SMALL
)
809 slmsg
= objcache_get(sl_objcache_small
, M_WAITOK
);
811 slmsg
= objcache_get(sl_objcache_big
, M_WAITOK
);
812 slmsg
->msgsize
= iov0
->iov_len
;
815 error
= uiomove((void *)wmsg
, iov0
->iov_len
, uio
);
818 error
= syslink_validate_msg(wmsg
, slmsg
->msgsize
);
822 if ((wmsg
->sm_head
.se_cmd
& SE_CMDF_REPLY
) == 0) {
824 * Install the XIO for commands if any DMA flags are set.
826 * XIOF_VMLINEAR requires that the XIO represent a
827 * contiguous set of pages associated with a single VM
828 * object (so the reader side can mmap it easily).
830 * XIOF_VMLINEAR might not be set when the kernel sends
831 * commands to userland so the reader side backs off to
832 * a backup buffer if it isn't set, but we require it
833 * for userland writes.
835 xflags
= XIOF_VMLINEAR
;
836 if (wmsg
->sm_head
.se_cmd
& SE_CMDF_DMAR
)
837 xflags
|= XIOF_READ
| XIOF_WRITE
;
838 else if (wmsg
->sm_head
.se_cmd
& SE_CMDF_DMAW
)
840 if (xflags
&& iov1
) {
842 error
= xio_init_ubuf(&slmsg
->xio
, iov1
->iov_base
,
843 iov1
->iov_len
, xflags
);
847 slmsg
->flags
|= SLMSGF_HASXIO
;
849 error
= sl
->peer
->backend_write(sl
->peer
, slmsg
);
852 * Replies have to be matched up against received commands.
854 spin_lock_wr(&sl
->spin
);
855 slcmd
= slmsg_rb_tree_RB_LOOKUP(&sl
->reply_rb_root
,
856 slmsg
->msg
->sm_msgid
);
857 if (slcmd
== NULL
|| (slcmd
->flags
& SLMSGF_ONINQ
)) {
859 spin_unlock_wr(&sl
->spin
);
862 RB_REMOVE(slmsg_rb_tree
, &sl
->reply_rb_root
, slcmd
);
863 sl
->cmdbytes
-= slcmd
->maxsize
;
864 spin_unlock_wr(&sl
->spin
);
867 * If the original command specified DMAR, has an xio, and
868 * our write specifies a DMA buffer, then we can do a
869 * copyback. But if we are linearly mapped and the caller
870 * is using the map base address, then the caller filled in
871 * the data via the direct memory map and no copyback is
874 if ((slcmd
->msg
->sm_head
.se_cmd
& SE_CMDF_DMAR
) && iov1
&&
875 (slcmd
->flags
& SLMSGF_HASXIO
) &&
876 ((slcmd
->flags
& SLMSGF_LINMAP
) == 0 ||
877 iov1
->iov_base
!= slcmd
->vmbase
)
880 if (iov1
->iov_len
> slcmd
->xio
.xio_bytes
)
881 count
= slcmd
->xio
.xio_bytes
;
883 count
= iov1
->iov_len
;
885 error
= xio_copy_utox(&slcmd
->xio
, 0, iov1
->iov_base
,
891 * If we had mapped a DMA buffer, remove it
893 if (slcmd
->flags
& SLMSGF_LINMAP
) {
895 sl_local_munmap(slcmd
);
900 * Reply and handle unblocking
902 sl
->peer
->backend_reply(sl
->peer
, slcmd
, slmsg
);
903 if (sl
->wblocked
&& sl
->cmdbytes
< syslink_bufsize
) {
904 sl
->wblocked
= 0; /* MP race ok here */
905 wakeup(&sl
->wblocked
);
909 * slmsg has already been dealt with, make sure error is
910 * 0 so we do not double-free it.
924 * Close a syslink descriptor.
926 * Disassociate the syslink from the file descriptor and disconnect from
931 slfileop_close(struct file
*fp
)
936 * Disassociate the file pointer. Take ownership of the ref on the
941 fp
->f_ops
= &badfileops
;
945 * Shutdown both directions. The other side will not issue API
946 * calls to us after we've shutdown both directions.
948 shutdownsldesc(sl
, SHUT_RDWR
);
953 KKASSERT(sl
->cmdbytes
== 0);
954 KKASSERT(sl
->repbytes
== 0);
961 slfileop_stat (struct file
*fp
, struct stat
*sb
, struct ucred
*cred
)
968 slfileop_shutdown (struct file
*fp
, int how
)
970 shutdownsldesc((struct sldesc
*)fp
->f_data
, how
);
976 slfileop_ioctl (struct file
*fp
, u_long cmd
, caddr_t data
, struct ucred
*cred
)
983 slfileop_poll (struct file
*fp
, int events
, struct ucred
*cred
)
990 slfileop_kqfilter(struct file
*fp
, struct knote
*kn
)
995 /************************************************************************
996 * LOCAL MEMORY MAPPING *
997 ************************************************************************
999 * This feature is currently not implemented
1005 sl_local_mmap(struct slmsg
*slmsg
, char *base
, size_t len
)
1007 return (EOPNOTSUPP
);
1012 sl_local_munmap(struct slmsg
*slmsg
)
1021 sl_local_mmap(struct slmsg
*slmsg
, char *base
, size_t len
)
1023 struct vmspace
*vms
= curproc
->p_vmspace
;
1024 vm_offset_t addr
= (vm_offset_t
)base
;
1026 /* XXX check user address range */
1027 error
= vm_map_replace(
1029 (vm_offset_t
)base
, (vm_offset_t
)base
+ len
,
1030 slmsg
->xio
.xio_pages
[0]->object
,
1031 slmsg
->xio
.xio_pages
[0]->pindex
<< PAGE_SHIFT
,
1032 VM_PROT_READ
|VM_PROT_WRITE
,
1033 VM_PROT_READ
|VM_PROT_WRITE
,
1034 MAP_DISABLE_SYNCER
);
1037 slmsg
->flags
|= SLMSGF_LINMAP
;
1038 slmsg
->vmbase
= base
;
1039 slmsg
->vmsize
= len
;
1046 sl_local_munmap(struct slmsg
*slmsg
)
1048 if (slmsg
->flags
& SLMSGF_LINMAP
) {
1049 vm_map_remove(&curproc
->p_vmspace
->vm_map
,
1051 slmsg
->vmbase
+ slcmd
->vmsize
);
1052 slmsg
->flags
&= ~SLMSGF_LINMAP
;
1058 /************************************************************************
1059 * MESSAGE VALIDATION *
1060 ************************************************************************
1062 * Validate that the syslink message. Check that all headers and elements
1063 * conform. Correct the endian if necessary.
1065 * NOTE: If reverse endian needs to be corrected, SE_CMDF_UNTRANSLATED
1066 * is recursively flipped on all syslink_elm's in the message. As the
1067 * message traverses the mesh, multiple flips may occur. It is
1068 * up to the RPC protocol layer to correct opaque data payloads and
1069 * SE_CMDF_UNTRANSLATED prevents the protocol layer from misinterpreting
1070 * a command or reply element which has not been endian-corrected.
1074 syslink_validate_msg(struct syslink_msg
*msg
, int bytes
)
1081 * The raw message must be properly-aligned.
1083 if (bytes
& SL_ALIGNMASK
)
1088 * The message must at least contain the msgid, bytes, and
1091 if (bytes
< SL_MIN_PAD_SIZE
)
1095 * Fix the endian if it is reversed.
1097 if (msg
->sm_proto
& SM_PROTO_ENDIAN_REV
) {
1098 msg
->sm_msgid
= bswap64(msg
->sm_msgid
);
1099 msg
->sm_sessid
= bswap64(msg
->sm_sessid
);
1100 msg
->sm_bytes
= bswap16(msg
->sm_bytes
);
1101 msg
->sm_proto
= bswap16(msg
->sm_proto
);
1102 msg
->sm_rlabel
= bswap32(msg
->sm_rlabel
);
1103 if (msg
->sm_proto
& SM_PROTO_ENDIAN_REV
)
1111 * Validate the contents. For PADs, the entire payload is
1112 * ignored and the minimum message size can be as small as
1115 if (msg
->sm_proto
== SMPROTO_PAD
) {
1116 if (msg
->sm_bytes
< SL_MIN_PAD_SIZE
||
1117 msg
->sm_bytes
> bytes
) {
1120 /* ignore the entire payload, it can be garbage */
1122 if (msg
->sm_bytes
< SL_MIN_MSG_SIZE
||
1123 msg
->sm_bytes
> bytes
) {
1126 error
= syslink_validate_elm(
1129 offsetof(struct syslink_msg
,
1131 swapit
, SL_MAXDEPTH
);
1137 * The aligned payload size must be used to locate the
1138 * next syslink_msg in the buffer.
1140 aligned_reclen
= SL_MSG_ALIGN(msg
->sm_bytes
);
1141 bytes
-= aligned_reclen
;
1142 msg
= (void *)((char *)msg
+ aligned_reclen
);
1149 syslink_validate_elm(struct syslink_elm
*elm
, sl_reclen_t bytes
,
1150 int swapit
, int depth
)
1155 * If the buffer isn't big enough to fit the header, stop now!
1157 if (bytes
< SL_MIN_ELM_SIZE
)
1160 * All syslink_elm headers are recursively endian-adjusted. Opaque
1161 * data payloads are not.
1164 elm
->se_cmd
= bswap16(elm
->se_cmd
) ^ SE_CMDF_UNTRANSLATED
;
1165 elm
->se_bytes
= bswap16(elm
->se_bytes
);
1166 elm
->se_aux
= bswap32(elm
->se_aux
);
1170 * Check element size requirements.
1172 if (elm
->se_bytes
< SL_MIN_ELM_SIZE
|| elm
->se_bytes
> bytes
)
1176 * Recursively check structured payloads. A structured payload may
1177 * contain as few as 0 recursive elements.
1179 if (elm
->se_cmd
& SE_CMDF_STRUCTURED
) {
1182 bytes
-= SL_MIN_ELM_SIZE
;
1185 if (syslink_validate_elm(elm
, bytes
, swapit
, depth
- 1))
1187 aligned_reclen
= SL_MSG_ALIGN(elm
->se_bytes
);
1188 elm
= (void *)((char *)elm
+ aligned_reclen
);
1189 bytes
-= aligned_reclen
;
1195 /************************************************************************
1196 * BACKEND FUNCTIONS - USER DESCRIPTOR *
1197 ************************************************************************
1199 * Peer backend links are primarily used when userland creates a pair
1200 * of linked descriptors.
1204 * Do any required blocking / nbio handling for attempts to write to
1205 * a sldesc associated with a user descriptor.
1209 backend_wblocked_user(struct sldesc
*sl
, int nbio
, sl_proto_t proto
)
1212 int *bytesp
= (proto
& SM_PROTO_REPLY
) ? &sl
->repbytes
: &sl
->cmdbytes
;
1215 * Block until sufficient data is drained by the target. It is
1216 * ok to have a MP race against cmdbytes.
1218 if (*bytesp
>= syslink_bufsize
) {
1219 spin_lock_wr(&sl
->spin
);
1220 while (*bytesp
>= syslink_bufsize
) {
1221 if (sl
->flags
& SLF_WSHUTDOWN
) {
1230 error
= msleep(&sl
->wblocked
, &sl
->spin
,
1231 PCATCH
, "slwmsg", 0);
1235 spin_unlock_wr(&sl
->spin
);
1241 * Unconditionally write a syslink message to the sldesc associated with
1242 * a user descriptor. Command messages are also placed in a red-black
1243 * tree so their DMA tag (if any) can be accessed and so they can be
1244 * linked to any reply message.
1248 backend_write_user(struct sldesc
*sl
, struct slmsg
*slmsg
)
1252 spin_lock_wr(&sl
->spin
);
1253 if (sl
->flags
& SLF_RSHUTDOWN
) {
1255 * Not accepting new messages
1258 } else if (slmsg
->msg
->sm_proto
& SM_PROTO_REPLY
) {
1262 TAILQ_INSERT_TAIL(&sl
->inq
, slmsg
, tqnode
);
1263 sl
->repbytes
+= slmsg
->maxsize
;
1264 slmsg
->flags
|= SLMSGF_ONINQ
;
1266 } else if (RB_INSERT(slmsg_rb_tree
, &sl
->reply_rb_root
, slmsg
)) {
1268 * Write a command, but there was a msgid collision when
1269 * we tried to insert it into the RB tree.
1274 * Write a command, successful insertion into the RB tree.
1276 TAILQ_INSERT_TAIL(&sl
->inq
, slmsg
, tqnode
);
1277 sl
->cmdbytes
+= slmsg
->maxsize
;
1278 slmsg
->flags
|= SLMSGF_ONINQ
;
1281 spin_unlock_wr(&sl
->spin
);
1283 wakeup(&sl
->rwaiters
);
1288 * Our peer is replying a command we previously sent it back to us, along
1289 * with the reply message (if not NULL). We just queue the reply to
1290 * userland and free of the command.
1294 backend_reply_user(struct sldesc
*sl
, struct slmsg
*slcmd
, struct slmsg
*slrep
)
1300 spin_lock_wr(&sl
->spin
);
1301 if ((sl
->flags
& SLF_RSHUTDOWN
) == 0) {
1302 TAILQ_INSERT_TAIL(&sl
->inq
, slrep
, tqnode
);
1303 sl
->repbytes
+= slrep
->maxsize
;
1308 spin_unlock_wr(&sl
->spin
);
1310 sl
->peer
->backend_dispose(sl
->peer
, slrep
);
1311 else if (sl
->rwaiters
)
1312 wakeup(&sl
->rwaiters
);
1318 backend_dispose_user(struct sldesc
*sl
, struct slmsg
*slmsg
)
1323 /************************************************************************
1324 * KERNEL DRIVER OR FILESYSTEM API *
1325 ************************************************************************
1330 * Create a user<->kernel link, returning the user descriptor in *fdp
1331 * and the kernel descriptor in *kslp. 0 is returned on success, and an
1332 * error code is returned on failure.
1335 syslink_ukbackend(int *fdp
, struct sldesc
**kslp
)
1337 struct proc
*p
= curproc
;
1347 error
= falloc(p
, &fp
, &fd
);
1350 usl
= allocsldesc(NULL
);
1351 usl
->backend_wblocked
= backend_wblocked_user
;
1352 usl
->backend_write
= backend_write_user
;
1353 usl
->backend_reply
= backend_reply_user
;
1354 usl
->backend_dispose
= backend_dispose_user
;
1356 ksl
= allocsldesc(usl
->common
);
1358 ksl
->backend_wblocked
= backend_wblocked_kern
;
1359 ksl
->backend_write
= backend_write_kern
;
1360 ksl
->backend_reply
= backend_reply_kern
;
1361 ksl
->backend_dispose
= backend_dispose_kern
;
1365 setsldescfp(usl
, fp
);
1375 * Assign a unique message id, issue a syslink message to userland,
1376 * and wait for a reply.
1379 syslink_kdomsg(struct sldesc
*ksl
, struct slmsg
*slmsg
)
1381 struct syslink_msg
*msg
;
1385 * Finish initializing slmsg and post it to the red-black tree for
1386 * reply matching. If the message id is already in use we return
1387 * EEXIST, giving the originator the chance to roll a new msgid.
1390 slmsg
->msgsize
= msg
->sm_bytes
;
1391 if ((error
= syslink_validate_msg(msg
, msg
->sm_bytes
)) != 0)
1393 msg
->sm_msgid
= allocsysid();
1396 * Issue the request and wait for a matching reply or failure,
1397 * then remove the message from the matching tree and return.
1399 error
= ksl
->peer
->backend_write(ksl
->peer
, slmsg
);
1400 spin_lock_wr(&ksl
->spin
);
1402 while (slmsg
->rep
== NULL
) {
1403 error
= msleep(slmsg
, &ksl
->spin
, 0, "kwtmsg", 0);
1404 /* XXX ignore error for now */
1406 if (slmsg
->rep
== (struct slmsg
*)-1) {
1410 error
= slmsg
->rep
->msg
->sm_head
.se_aux
;
1413 spin_unlock_wr(&ksl
->spin
);
1418 * Similar to syslink_kdomsg but return immediately instead of
1419 * waiting for a reply. The kernel must supply a callback function
1420 * which will be made in the context of the user process replying
1424 syslink_ksendmsg(struct sldesc
*ksl
, struct slmsg
*slmsg
,
1425 void (*func
)(struct slmsg
*, void *, int), void *arg
)
1427 struct syslink_msg
*msg
;
1431 * Finish initializing slmsg and post it to the red-black tree for
1432 * reply matching. If the message id is already in use we return
1433 * EEXIST, giving the originator the chance to roll a new msgid.
1436 slmsg
->msgsize
= msg
->sm_bytes
;
1437 slmsg
->callback_func
= func
;
1438 slmsg
->callback_data
= arg
;
1439 if ((error
= syslink_validate_msg(msg
, msg
->sm_bytes
)) != 0)
1441 msg
->sm_msgid
= allocsysid();
1444 * Issue the request. If no error occured the operation will be
1445 * in progress, otherwise the operation is considered to have failed
1446 * and the caller can deallocate the slmsg.
1448 error
= ksl
->peer
->backend_write(ksl
->peer
, slmsg
);
1453 syslink_kwaitmsg(struct sldesc
*ksl
, struct slmsg
*slmsg
)
1457 spin_lock_wr(&ksl
->spin
);
1458 while (slmsg
->rep
== NULL
) {
1459 error
= msleep(slmsg
, &ksl
->spin
, 0, "kwtmsg", 0);
1460 /* XXX ignore error for now */
1462 if (slmsg
->rep
== (struct slmsg
*)-1) {
1466 error
= slmsg
->rep
->msg
->sm_head
.se_aux
;
1468 spin_unlock_wr(&ksl
->spin
);
1473 syslink_kallocmsg(void)
1475 return(objcache_get(sl_objcache_small
, M_WAITOK
));
1479 syslink_kfreemsg(struct sldesc
*ksl
, struct slmsg
*slmsg
)
1483 if ((rep
= slmsg
->rep
) != NULL
) {
1485 ksl
->peer
->backend_dispose(ksl
->peer
, rep
);
1487 slmsg
->callback_func
= NULL
;
1492 syslink_kshutdown(struct sldesc
*ksl
, int how
)
1494 shutdownsldesc(ksl
, how
);
1498 syslink_kclose(struct sldesc
*ksl
)
1500 shutdownsldesc(ksl
, SHUT_RDWR
);
1505 * Associate a DMA buffer with a kernel syslink message prior to it
1506 * being sent to userland. The DMA buffer is set up from the point
1507 * of view of the target.
1510 syslink_kdmabuf_pages(struct slmsg
*slmsg
, struct vm_page
**mbase
, int npages
)
1515 xflags
= XIOF_VMLINEAR
;
1516 if (slmsg
->msg
->sm_head
.se_cmd
& SE_CMDF_DMAR
)
1517 xflags
|= XIOF_READ
| XIOF_WRITE
;
1518 else if (slmsg
->msg
->sm_head
.se_cmd
& SE_CMDF_DMAW
)
1519 xflags
|= XIOF_READ
;
1520 error
= xio_init_pages(&slmsg
->xio
, mbase
, npages
, xflags
);
1521 slmsg
->flags
|= SLMSGF_HASXIO
;
1526 * Associate a DMA buffer with a kernel syslink message prior to it
1527 * being sent to userland. The DMA buffer is set up from the point
1528 * of view of the target.
1531 syslink_kdmabuf_data(struct slmsg
*slmsg
, char *base
, int bytes
)
1535 xflags
= XIOF_VMLINEAR
;
1536 if (slmsg
->msg
->sm_head
.se_cmd
& SE_CMDF_DMAR
)
1537 xflags
|= XIOF_READ
| XIOF_WRITE
;
1538 else if (slmsg
->msg
->sm_head
.se_cmd
& SE_CMDF_DMAW
)
1539 xflags
|= XIOF_READ
;
1540 xio_init_kbuf(&slmsg
->xio
, base
, bytes
);
1541 slmsg
->xio
.xio_flags
|= xflags
;
1542 slmsg
->flags
|= SLMSGF_HASXIO
;
1546 /************************************************************************
1547 * BACKEND FUNCTIONS FOR KERNEL API *
1548 ************************************************************************
1550 * These are the backend functions for a sldesc associated with a kernel
1555 * Our peer wants to write a syslink message to us and is asking us to
1556 * block if our input queue is full. We don't implement command reception
1557 * so don't block right now.
1561 backend_wblocked_kern(struct sldesc
*ksl
, int nbio
, sl_proto_t proto
)
1568 * Our peer is writing a request to the kernel. At the moment we do not
1573 backend_write_kern(struct sldesc
*ksl
, struct slmsg
*slmsg
)
1579 * Our peer wants to reply to a syslink message we sent it earlier. The
1580 * original command (that we passed to our peer), and the peer's reply
1581 * is specified. If the peer has failed slrep will be NULL.
1585 backend_reply_kern(struct sldesc
*ksl
, struct slmsg
*slcmd
, struct slmsg
*slrep
)
1589 spin_lock_wr(&ksl
->spin
);
1590 if (slrep
== NULL
) {
1591 slcmd
->rep
= (struct slmsg
*)-1;
1595 error
= slrep
->msg
->sm_head
.se_aux
;
1597 spin_unlock_wr(&ksl
->spin
);
1600 * Issue callback or wakeup a synchronous waiter.
1602 if (slcmd
->callback_func
) {
1603 slcmd
->callback_func(slcmd
, slcmd
->callback_data
, error
);
1610 * Any reply messages we sent to our peer are returned to us for disposal.
1611 * Since we do not currently accept commands from our peer, there will not
1612 * be any replies returned to the peer to dispose of.
1616 backend_dispose_kern(struct sldesc
*ksl
, struct slmsg
*slmsg
)
1618 panic("backend_dispose_kern: kernel can't accept commands so it "
1619 "certainly did not reply to one!");