kernel - Fix excessive call stack depth on stuck interrupt
[dragonfly.git] / sys / kern / sys_mqueue.c
blob54d18fc4fecc0a7241fa3734ba4b8546d7065303
1 /* $NetBSD: sys_mqueue.c,v 1.16 2009/04/11 23:05:26 christos Exp $ */
3 /*
4 * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
30 * Implementation of POSIX message queues.
31 * Defined in the Base Definitions volume of IEEE Std 1003.1-2001.
33 * Locking
35 * Global list of message queues (mqueue_head) and proc_t::p_mqueue_cnt
36 * counter are protected by mqlist_mtx lock. The very message queue and
37 * its members are protected by mqueue::mq_mtx.
39 * Lock order:
40 * mqlist_mtx
41 * -> mqueue::mq_mtx
44 #include <sys/param.h>
45 #include <sys/types.h>
46 #include <sys/errno.h>
47 #include <sys/fcntl.h>
48 #include <sys/file.h>
49 #include <sys/filedesc.h>
50 #include <sys/ucred.h>
51 #include <sys/priv.h>
52 #include <sys/kernel.h>
53 #include <sys/malloc.h>
54 #include <sys/mqueue.h>
55 #include <sys/proc.h>
56 #include <sys/queue.h>
57 #include <sys/event.h>
58 #include <sys/serialize.h>
59 #include <sys/signal.h>
60 #include <sys/signalvar.h>
61 #include <sys/spinlock.h>
62 #include <sys/spinlock2.h>
63 #include <sys/stat.h>
64 #include <sys/sysctl.h>
65 #include <sys/sysproto.h>
66 #include <sys/systm.h>
67 #include <sys/lock.h>
68 #include <sys/unistd.h>
69 #include <sys/vnode.h>
71 /* System-wide limits. */
72 static u_int mq_open_max = MQ_OPEN_MAX;
73 static u_int mq_prio_max = MQ_PRIO_MAX;
74 static u_int mq_max_msgsize = 16 * MQ_DEF_MSGSIZE;
75 static u_int mq_def_maxmsg = 32;
76 static u_int mq_max_maxmsg = 16 * 32;
78 struct lock mqlist_mtx;
79 static LIST_HEAD(, mqueue) mqueue_head =
80 LIST_HEAD_INITIALIZER(mqueue_head);
82 typedef struct file file_t; /* XXX: Should we put this in sys/types.h ? */
84 /* Function prototypes */
85 static int mq_stat_fop(file_t *, struct stat *, struct ucred *cred);
86 static int mq_close_fop(file_t *);
87 static int mq_kqfilter_fop(struct file *fp, struct knote *kn);
88 static void mqfilter_read_detach(struct knote *kn);
89 static void mqfilter_write_detach(struct knote *kn);
90 static int mqfilter_read(struct knote *kn, long hint);
91 static int mqfilter_write(struct knote *kn, long hint);
93 /* Some time-related utility functions */
94 static int tstohz(const struct timespec *ts);
96 /* File operations vector */
97 static struct fileops mqops = {
98 .fo_read = badfo_readwrite,
99 .fo_write = badfo_readwrite,
100 .fo_ioctl = badfo_ioctl,
101 .fo_stat = mq_stat_fop,
102 .fo_close = mq_close_fop,
103 .fo_kqfilter = mq_kqfilter_fop,
104 .fo_shutdown = badfo_shutdown
107 /* Define a new malloc type for message queues */
108 MALLOC_DECLARE(M_MQBUF);
109 MALLOC_DEFINE(M_MQBUF, "mqueues", "Buffers to message queues");
112 * Initialize POSIX message queue subsystem.
114 void
115 mqueue_sysinit(void)
117 lockinit(&mqlist_mtx, "mqlist_mtx", 0, LK_CANRECURSE);
121 * Free the message.
123 static void
124 mqueue_freemsg(struct mq_msg *msg, const size_t size)
126 kfree(msg, M_MQBUF);
130 * Destroy the message queue.
132 static void
133 mqueue_destroy(struct mqueue *mq)
135 struct mq_msg *msg;
136 size_t msz;
137 u_int i;
139 /* Note MQ_PQSIZE + 1. */
140 for (i = 0; i < MQ_PQSIZE + 1; i++) {
141 while ((msg = TAILQ_FIRST(&mq->mq_head[i])) != NULL) {
142 TAILQ_REMOVE(&mq->mq_head[i], msg, msg_queue);
143 msz = sizeof(struct mq_msg) + msg->msg_len;
144 mqueue_freemsg(msg, msz);
147 lockuninit(&mq->mq_mtx);
148 kfree(mq, M_MQBUF);
152 * Lookup for file name in general list of message queues.
153 * => locks the message queue
155 static void *
156 mqueue_lookup(char *name)
158 struct mqueue *mq;
160 KKASSERT(lockstatus(&mqlist_mtx, curthread));
162 LIST_FOREACH(mq, &mqueue_head, mq_list) {
163 if (strncmp(mq->mq_name, name, MQ_NAMELEN) == 0) {
164 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
165 return mq;
169 return NULL;
173 * mqueue_get: get the mqueue from the descriptor.
174 * => locks the message queue, if found.
175 * => holds a reference on the file descriptor.
177 static int
178 mqueue_get(struct lwp *l, mqd_t mqd, file_t **fpr)
180 struct mqueue *mq;
181 file_t *fp;
183 fp = holdfp(curproc->p_fd, (int)mqd, -1); /* XXX: Why -1 ? */
184 if (__predict_false(fp == NULL))
185 return EBADF;
187 if (__predict_false(fp->f_type != DTYPE_MQUEUE)) {
188 fdrop(fp);
189 return EBADF;
191 mq = fp->f_data;
192 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
194 *fpr = fp;
195 return 0;
199 * mqueue_linear_insert: perform linear insert according to the message
200 * priority into the reserved queue (MQ_PQRESQ). Reserved queue is a
201 * sorted list used only when mq_prio_max is increased via sysctl.
203 static inline void
204 mqueue_linear_insert(struct mqueue *mq, struct mq_msg *msg)
206 struct mq_msg *mit;
208 TAILQ_FOREACH(mit, &mq->mq_head[MQ_PQRESQ], msg_queue) {
209 if (msg->msg_prio > mit->msg_prio)
210 break;
212 if (mit == NULL) {
213 TAILQ_INSERT_TAIL(&mq->mq_head[MQ_PQRESQ], msg, msg_queue);
214 } else {
215 TAILQ_INSERT_BEFORE(mit, msg, msg_queue);
220 * Compute number of ticks in the specified amount of time.
222 static int
223 tstohz(const struct timespec *ts)
225 struct timeval tv;
228 * usec has great enough resolution for hz, so convert to a
229 * timeval and use tvtohz() above.
231 TIMESPEC_TO_TIMEVAL(&tv, ts);
232 return tvtohz_high(&tv); /* XXX Why _high() and not _low() ? */
236 * Converter from struct timespec to the ticks.
237 * Used by mq_timedreceive(), mq_timedsend().
240 abstimeout2timo(struct timespec *ts, int *timo)
242 struct timespec tsd;
243 int error;
245 error = itimespecfix(ts);
246 if (error) {
247 return error;
249 getnanotime(&tsd);
250 timespecsub(ts, &tsd);
251 if (ts->tv_sec < 0 || (ts->tv_sec == 0 && ts->tv_nsec <= 0)) {
252 return ETIMEDOUT;
254 *timo = tstohz(ts);
255 KKASSERT(*timo != 0);
257 return 0;
260 static int
261 mq_stat_fop(file_t *fp, struct stat *st, struct ucred *cred)
263 struct mqueue *mq = fp->f_data;
265 (void)memset(st, 0, sizeof(*st));
267 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
268 st->st_mode = mq->mq_mode;
269 st->st_uid = mq->mq_euid;
270 st->st_gid = mq->mq_egid;
271 st->st_atimespec = mq->mq_atime;
272 st->st_mtimespec = mq->mq_mtime;
273 /*st->st_ctimespec = st->st_birthtimespec = mq->mq_btime;*/
274 st->st_uid = fp->f_cred->cr_uid;
275 st->st_gid = fp->f_cred->cr_svgid;
276 lockmgr(&mq->mq_mtx, LK_RELEASE);
278 return 0;
281 static struct filterops mqfiltops_read =
282 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, mqfilter_read_detach, mqfilter_read };
283 static struct filterops mqfiltops_write =
284 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, mqfilter_write_detach, mqfilter_write };
286 static int
287 mq_kqfilter_fop(struct file *fp, struct knote *kn)
289 struct mqueue *mq = fp->f_data;
290 struct klist *klist;
292 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
294 switch (kn->kn_filter) {
295 case EVFILT_READ:
296 kn->kn_fop = &mqfiltops_read;
297 kn->kn_hook = (caddr_t)mq;
298 klist = &mq->mq_rkq.ki_note;
299 break;
300 case EVFILT_WRITE:
301 kn->kn_fop = &mqfiltops_write;
302 kn->kn_hook = (caddr_t)mq;
303 klist = &mq->mq_wkq.ki_note;
304 break;
305 default:
306 lockmgr(&mq->mq_mtx, LK_RELEASE);
307 return (EOPNOTSUPP);
310 knote_insert(klist, kn);
311 lockmgr(&mq->mq_mtx, LK_RELEASE);
313 return (0);
316 static void
317 mqfilter_read_detach(struct knote *kn)
319 struct mqueue *mq = (struct mqueue *)kn->kn_hook;
321 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
322 struct klist *klist = &mq->mq_rkq.ki_note;
323 knote_remove(klist, kn);
324 lockmgr(&mq->mq_mtx, LK_RELEASE);
327 static void
328 mqfilter_write_detach(struct knote *kn)
330 struct mqueue *mq = (struct mqueue *)kn->kn_hook;
332 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
333 struct klist *klist = &mq->mq_wkq.ki_note;
334 knote_remove(klist, kn);
335 lockmgr(&mq->mq_mtx, LK_RELEASE);
338 static int
339 mqfilter_read(struct knote *kn, long hint)
341 struct mqueue *mq = (struct mqueue *)kn->kn_hook;
342 struct mq_attr *mqattr;
343 int ready = 0;
345 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
346 mqattr = &mq->mq_attrib;
347 /* Ready for receiving, if there are messages in the queue */
348 if (mqattr->mq_curmsgs)
349 ready = 1;
350 lockmgr(&mq->mq_mtx, LK_RELEASE);
352 return (ready);
355 static int
356 mqfilter_write(struct knote *kn, long hint)
358 struct mqueue *mq = (struct mqueue *)kn->kn_hook;
359 struct mq_attr *mqattr;
360 int ready = 0;
362 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
363 mqattr = &mq->mq_attrib;
364 /* Ready for sending, if the message queue is not full */
365 if (mqattr->mq_curmsgs < mqattr->mq_maxmsg)
366 ready = 1;
367 lockmgr(&mq->mq_mtx, LK_RELEASE);
369 return (ready);
372 static int
373 mq_close_fop(file_t *fp)
375 struct proc *p = curproc;
376 struct mqueue *mq = fp->f_data;
377 bool destroy;
379 lockmgr(&mqlist_mtx, LK_EXCLUSIVE);
380 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
382 /* Decrease the counters */
383 p->p_mqueue_cnt--;
384 mq->mq_refcnt--;
386 /* Remove notification if registered for this process */
387 if (mq->mq_notify_proc == p)
388 mq->mq_notify_proc = NULL;
391 * If this is the last reference and mqueue is marked for unlink,
392 * remove and later destroy the message queue.
394 if (mq->mq_refcnt == 0 && (mq->mq_attrib.mq_flags & MQ_UNLINK)) {
395 LIST_REMOVE(mq, mq_list);
396 destroy = true;
397 } else
398 destroy = false;
400 lockmgr(&mq->mq_mtx, LK_RELEASE);
401 lockmgr(&mqlist_mtx, LK_RELEASE);
403 if (destroy)
404 mqueue_destroy(mq);
406 return 0;
410 * General mqueue system calls.
414 sys_mq_open(struct mq_open_args *uap)
416 /* {
417 syscallarg(const char *) name;
418 syscallarg(int) oflag;
419 syscallarg(mode_t) mode;
420 syscallarg(struct mq_attr) attr;
421 } */
422 struct thread *td = curthread;
423 struct proc *p = td->td_proc;
424 struct filedesc *fdp = p->p_fd;
425 struct mqueue *mq, *mq_new = NULL;
426 file_t *fp;
427 char *name;
428 int mqd, error, oflag;
430 /* Check access mode flags */
431 oflag = SCARG(uap, oflag);
432 if ((oflag & O_ACCMODE) == (O_WRONLY | O_RDWR)) {
433 return EINVAL;
436 /* Get the name from the user-space */
437 name = kmalloc(MQ_NAMELEN, M_MQBUF, M_WAITOK | M_ZERO | M_NULLOK);
438 if (name == NULL)
439 return (ENOMEM);
440 error = copyinstr(SCARG(uap, name), name, MQ_NAMELEN - 1, NULL);
441 if (error) {
442 kfree(name, M_MQBUF);
443 return error;
446 if (oflag & O_CREAT) {
447 struct mq_attr attr;
448 u_int i;
450 /* Check the limit */
451 if (p->p_mqueue_cnt == mq_open_max) {
452 kfree(name, M_MQBUF);
453 return EMFILE;
456 /* Empty name is invalid */
457 if (name[0] == '\0') {
458 kfree(name, M_MQBUF);
459 return EINVAL;
462 /* Check for mqueue attributes */
463 if (SCARG(uap, attr)) {
464 error = copyin(SCARG(uap, attr), &attr,
465 sizeof(struct mq_attr));
466 if (error) {
467 kfree(name, M_MQBUF);
468 return error;
470 if (attr.mq_maxmsg <= 0 ||
471 attr.mq_maxmsg > mq_max_maxmsg ||
472 attr.mq_msgsize <= 0 ||
473 attr.mq_msgsize > mq_max_msgsize) {
474 kfree(name, M_MQBUF);
475 return EINVAL;
477 attr.mq_curmsgs = 0;
478 } else {
479 memset(&attr, 0, sizeof(struct mq_attr));
480 attr.mq_maxmsg = mq_def_maxmsg;
481 attr.mq_msgsize =
482 MQ_DEF_MSGSIZE - sizeof(struct mq_msg);
486 * Allocate new mqueue, initialize data structures,
487 * copy the name, attributes and set the flag.
489 mq_new = kmalloc(sizeof(struct mqueue), M_MQBUF,
490 M_WAITOK | M_ZERO | M_NULLOK);
491 if (mq_new == NULL) {
492 kfree(name, M_MQBUF);
493 return (ENOMEM);
496 lockinit(&mq_new->mq_mtx, "mq_new->mq_mtx", 0, LK_CANRECURSE);
497 for (i = 0; i < (MQ_PQSIZE + 1); i++) {
498 TAILQ_INIT(&mq_new->mq_head[i]);
501 strlcpy(mq_new->mq_name, name, MQ_NAMELEN);
502 memcpy(&mq_new->mq_attrib, &attr, sizeof(struct mq_attr));
504 /*CTASSERT((O_MASK & (MQ_UNLINK | MQ_RECEIVE)) == 0);*/
505 /* mq_new->mq_attrib.mq_flags = (O_MASK & oflag); */
506 mq_new->mq_attrib.mq_flags = oflag;
508 /* Store mode and effective UID with GID */
509 mq_new->mq_mode = ((SCARG(uap, mode) &
510 ~p->p_fd->fd_cmask) & ALLPERMS) & ~S_ISTXT;
511 mq_new->mq_euid = td->td_ucred->cr_uid;
512 mq_new->mq_egid = td->td_ucred->cr_svgid;
515 /* Allocate file structure and descriptor */
516 error = falloc(td->td_lwp, &fp, &mqd);
517 if (error) {
518 if (mq_new)
519 mqueue_destroy(mq_new);
520 kfree(name, M_MQBUF);
521 return error;
523 fp->f_type = DTYPE_MQUEUE;
524 fp->f_flag = FFLAGS(oflag) & (FREAD | FWRITE);
525 fp->f_ops = &mqops;
527 /* Look up for mqueue with such name */
528 lockmgr(&mqlist_mtx, LK_EXCLUSIVE);
529 mq = mqueue_lookup(name);
530 if (mq) {
531 int acc_mode;
533 KKASSERT(lockstatus(&mq->mq_mtx, curthread));
535 /* Check if mqueue is not marked as unlinking */
536 if (mq->mq_attrib.mq_flags & MQ_UNLINK) {
537 error = EACCES;
538 goto exit;
540 /* Fail if O_EXCL is set, and mqueue already exists */
541 if ((oflag & O_CREAT) && (oflag & O_EXCL)) {
542 error = EEXIST;
543 goto exit;
547 * Check the permissions. Note the difference between
548 * VREAD/VWRITE and FREAD/FWRITE.
550 acc_mode = 0;
551 if (fp->f_flag & FREAD) {
552 acc_mode |= VREAD;
554 if (fp->f_flag & FWRITE) {
555 acc_mode |= VWRITE;
557 if (vaccess(VNON, mq->mq_mode, mq->mq_euid, mq->mq_egid,
558 acc_mode, td->td_ucred)) {
560 error = EACCES;
561 goto exit;
563 } else {
564 /* Fail if mqueue neither exists, nor we create it */
565 if ((oflag & O_CREAT) == 0) {
566 lockmgr(&mqlist_mtx, LK_RELEASE);
567 KKASSERT(mq_new == NULL);
568 fsetfd(fdp, NULL, mqd);
569 fp->f_ops = &badfileops;
570 fdrop(fp);
571 kfree(name, M_MQBUF);
572 return ENOENT;
575 /* Check the limit */
576 if (p->p_mqueue_cnt == mq_open_max) {
577 error = EMFILE;
578 goto exit;
581 /* Insert the queue to the list */
582 mq = mq_new;
583 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
584 LIST_INSERT_HEAD(&mqueue_head, mq, mq_list);
585 mq_new = NULL;
586 getnanotime(&mq->mq_btime);
587 mq->mq_atime = mq->mq_mtime = mq->mq_btime;
590 /* Increase the counters, and make descriptor ready */
591 p->p_mqueue_cnt++;
592 mq->mq_refcnt++;
593 fp->f_data = mq;
594 exit:
595 lockmgr(&mq->mq_mtx, LK_RELEASE);
596 lockmgr(&mqlist_mtx, LK_RELEASE);
598 if (mq_new)
599 mqueue_destroy(mq_new);
600 if (error) {
601 fsetfd(fdp, NULL, mqd);
602 fp->f_ops = &badfileops;
603 } else {
604 fsetfd(fdp, fp, mqd);
605 uap->sysmsg_result = mqd;
607 fdrop(fp);
608 kfree(name, M_MQBUF);
610 return error;
614 sys_mq_close(struct mq_close_args *uap)
616 return sys_close((void *)uap);
620 * Primary mq_receive1() function.
623 mq_receive1(struct lwp *l, mqd_t mqdes, void *msg_ptr, size_t msg_len,
624 unsigned *msg_prio, struct timespec *ts, ssize_t *mlen)
626 file_t *fp = NULL;
627 struct mqueue *mq;
628 struct mq_msg *msg = NULL;
629 struct mq_attr *mqattr;
630 u_int idx;
631 int error;
633 /* Get the message queue */
634 error = mqueue_get(l, mqdes, &fp);
635 if (error) {
636 return error;
638 mq = fp->f_data;
639 if ((fp->f_flag & FREAD) == 0) {
640 error = EBADF;
641 goto error;
643 getnanotime(&mq->mq_atime);
644 mqattr = &mq->mq_attrib;
646 /* Check the message size limits */
647 if (msg_len < mqattr->mq_msgsize) {
648 error = EMSGSIZE;
649 goto error;
652 /* Check if queue is empty */
653 while (mqattr->mq_curmsgs == 0) {
654 int t;
656 if (mqattr->mq_flags & O_NONBLOCK) {
657 error = EAGAIN;
658 goto error;
660 if (ts) {
661 error = abstimeout2timo(ts, &t);
662 if (error)
663 goto error;
664 } else
665 t = 0;
667 * Block until someone sends the message.
668 * While doing this, notification should not be sent.
670 mqattr->mq_flags |= MQ_RECEIVE;
671 error = lksleep(&mq->mq_send_cv, &mq->mq_mtx, PCATCH, "mqsend", t);
672 mqattr->mq_flags &= ~MQ_RECEIVE;
673 if (error || (mqattr->mq_flags & MQ_UNLINK)) {
674 error = (error == EWOULDBLOCK) ? ETIMEDOUT : EINTR;
675 goto error;
681 * Find the highest priority message, and remove it from the queue.
682 * At first, reserved queue is checked, bitmap is next.
684 msg = TAILQ_FIRST(&mq->mq_head[MQ_PQRESQ]);
685 if (__predict_true(msg == NULL)) {
686 idx = ffs(mq->mq_bitmap);
687 msg = TAILQ_FIRST(&mq->mq_head[idx]);
688 KKASSERT(msg != NULL);
689 } else {
690 idx = MQ_PQRESQ;
692 TAILQ_REMOVE(&mq->mq_head[idx], msg, msg_queue);
694 /* Unmark the bit, if last message. */
695 if (__predict_true(idx) && TAILQ_EMPTY(&mq->mq_head[idx])) {
696 KKASSERT((MQ_PQSIZE - idx) == msg->msg_prio);
697 mq->mq_bitmap &= ~(1 << --idx);
700 /* Decrement the counter and signal waiter, if any */
701 mqattr->mq_curmsgs--;
702 wakeup_one(&mq->mq_recv_cv);
704 /* Ready for sending now */
705 KNOTE(&mq->mq_wkq.ki_note, 0);
706 error:
707 lockmgr(&mq->mq_mtx, LK_RELEASE);
708 fdrop(fp);
709 if (error)
710 return error;
713 * Copy the data to the user-space.
714 * Note: According to POSIX, no message should be removed from the
715 * queue in case of fail - this would be violated.
717 *mlen = msg->msg_len;
718 error = copyout(msg->msg_ptr, msg_ptr, msg->msg_len);
719 if (error == 0 && msg_prio)
720 error = copyout(&msg->msg_prio, msg_prio, sizeof(unsigned));
721 mqueue_freemsg(msg, sizeof(struct mq_msg) + msg->msg_len);
723 return error;
727 sys_mq_receive(struct mq_receive_args *uap)
729 /* {
730 syscallarg(mqd_t) mqdes;
731 syscallarg(char *) msg_ptr;
732 syscallarg(size_t) msg_len;
733 syscallarg(unsigned *) msg_prio;
734 } */
735 ssize_t mlen;
736 int error;
738 error = mq_receive1(curthread->td_lwp, SCARG(uap, mqdes), SCARG(uap, msg_ptr),
739 SCARG(uap, msg_len), SCARG(uap, msg_prio), 0, &mlen);
740 if (error == 0)
741 uap->sysmsg_result = mlen;
743 return error;
747 sys_mq_timedreceive(struct mq_timedreceive_args *uap)
749 /* {
750 syscallarg(mqd_t) mqdes;
751 syscallarg(char *) msg_ptr;
752 syscallarg(size_t) msg_len;
753 syscallarg(unsigned *) msg_prio;
754 syscallarg(const struct timespec *) abs_timeout;
755 } */
756 int error;
757 ssize_t mlen;
758 struct timespec ts, *tsp;
760 /* Get and convert time value */
761 if (SCARG(uap, abs_timeout)) {
762 error = copyin(SCARG(uap, abs_timeout), &ts, sizeof(ts));
763 if (error)
764 return error;
765 tsp = &ts;
766 } else {
767 tsp = NULL;
770 error = mq_receive1(curthread->td_lwp, SCARG(uap, mqdes), SCARG(uap, msg_ptr),
771 SCARG(uap, msg_len), SCARG(uap, msg_prio), tsp, &mlen);
772 if (error == 0)
773 uap->sysmsg_result = mlen;
775 return error;
779 * Primary mq_send1() function.
782 mq_send1(struct lwp *l, mqd_t mqdes, const char *msg_ptr, size_t msg_len,
783 unsigned msg_prio, struct timespec *ts)
785 file_t *fp = NULL;
786 struct mqueue *mq;
787 struct mq_msg *msg;
788 struct mq_attr *mqattr;
789 struct proc *notify = NULL;
790 /*ksiginfo_t ksi;*/
791 size_t size;
792 int error;
794 /* Check the priority range */
795 if (msg_prio >= mq_prio_max)
796 return EINVAL;
798 /* Allocate a new message */
799 size = sizeof(struct mq_msg) + msg_len;
800 if (size > mq_max_msgsize)
801 return EMSGSIZE;
803 msg = kmalloc(size, M_MQBUF, M_WAITOK | M_NULLOK);
804 if (msg == NULL)
805 return (ENOMEM);
808 /* Get the data from user-space */
809 error = copyin(msg_ptr, msg->msg_ptr, msg_len);
810 if (error) {
811 mqueue_freemsg(msg, size);
812 return error;
814 msg->msg_len = msg_len;
815 msg->msg_prio = msg_prio;
817 /* Get the mqueue */
818 error = mqueue_get(l, mqdes, &fp);
819 if (error) {
820 mqueue_freemsg(msg, size);
821 return error;
823 mq = fp->f_data;
824 if ((fp->f_flag & FWRITE) == 0) {
825 error = EBADF;
826 goto error;
828 getnanotime(&mq->mq_mtime);
829 mqattr = &mq->mq_attrib;
831 /* Check the message size limit */
832 if (msg_len <= 0 || msg_len > mqattr->mq_msgsize) {
833 error = EMSGSIZE;
834 goto error;
837 /* Check if queue is full */
838 while (mqattr->mq_curmsgs >= mqattr->mq_maxmsg) {
839 int t;
841 if (mqattr->mq_flags & O_NONBLOCK) {
842 error = EAGAIN;
843 goto error;
845 if (ts) {
846 error = abstimeout2timo(ts, &t);
847 if (error)
848 goto error;
849 } else
850 t = 0;
851 /* Block until queue becomes available */
852 error = lksleep(&mq->mq_recv_cv, &mq->mq_mtx, PCATCH, "mqrecv", t);
853 if (error || (mqattr->mq_flags & MQ_UNLINK)) {
854 error = (error == EWOULDBLOCK) ? ETIMEDOUT : error;
855 goto error;
858 KKASSERT(mq->mq_attrib.mq_curmsgs < mq->mq_attrib.mq_maxmsg);
861 * Insert message into the queue, according to the priority.
862 * Note the difference between index and priority.
864 if (__predict_true(msg_prio < MQ_PQSIZE)) {
865 u_int idx = MQ_PQSIZE - msg_prio;
867 KKASSERT(idx != MQ_PQRESQ);
868 TAILQ_INSERT_TAIL(&mq->mq_head[idx], msg, msg_queue);
869 mq->mq_bitmap |= (1 << --idx);
870 } else {
871 mqueue_linear_insert(mq, msg);
874 /* Check for the notify */
875 if (mqattr->mq_curmsgs == 0 && mq->mq_notify_proc &&
876 (mqattr->mq_flags & MQ_RECEIVE) == 0 &&
877 mq->mq_sig_notify.sigev_notify == SIGEV_SIGNAL) {
878 /* Initialize the signal */
879 /*KSI_INIT(&ksi);*/
880 /*ksi.ksi_signo = mq->mq_sig_notify.sigev_signo;*/
881 /*ksi.ksi_code = SI_MESGQ;*/
882 /*ksi.ksi_value = mq->mq_sig_notify.sigev_value;*/
883 /* Unregister the process */
884 notify = mq->mq_notify_proc;
885 mq->mq_notify_proc = NULL;
888 /* Increment the counter and signal waiter, if any */
889 mqattr->mq_curmsgs++;
890 wakeup_one(&mq->mq_send_cv);
892 /* Ready for receiving now */
893 KNOTE(&mq->mq_rkq.ki_note, 0);
894 error:
895 if (error) {
896 lockmgr(&mq->mq_mtx, LK_RELEASE);
897 fdrop(fp);
898 mqueue_freemsg(msg, size);
899 } else if (notify) {
900 PHOLD(notify);
901 lockmgr(&mq->mq_mtx, LK_RELEASE);
902 fdrop(fp);
903 /* Send the notify, if needed */
904 /*kpsignal(notify, &ksi, NULL);*/
905 ksignal(notify, mq->mq_sig_notify.sigev_signo);
906 PRELE(notify);
907 } else {
908 lockmgr(&mq->mq_mtx, LK_RELEASE);
909 fdrop(fp);
911 return error;
915 sys_mq_send(struct mq_send_args *uap)
917 /* {
918 syscallarg(mqd_t) mqdes;
919 syscallarg(const char *) msg_ptr;
920 syscallarg(size_t) msg_len;
921 syscallarg(unsigned) msg_prio;
922 } */
924 return mq_send1(curthread->td_lwp, SCARG(uap, mqdes), SCARG(uap, msg_ptr),
925 SCARG(uap, msg_len), SCARG(uap, msg_prio), 0);
929 sys_mq_timedsend(struct mq_timedsend_args *uap)
931 /* {
932 syscallarg(mqd_t) mqdes;
933 syscallarg(const char *) msg_ptr;
934 syscallarg(size_t) msg_len;
935 syscallarg(unsigned) msg_prio;
936 syscallarg(const struct timespec *) abs_timeout;
937 } */
938 struct timespec ts, *tsp;
939 int error;
941 /* Get and convert time value */
942 if (SCARG(uap, abs_timeout)) {
943 error = copyin(SCARG(uap, abs_timeout), &ts, sizeof(ts));
944 if (error)
945 return error;
946 tsp = &ts;
947 } else {
948 tsp = NULL;
951 return mq_send1(curthread->td_lwp, SCARG(uap, mqdes), SCARG(uap, msg_ptr),
952 SCARG(uap, msg_len), SCARG(uap, msg_prio), tsp);
956 sys_mq_notify(struct mq_notify_args *uap)
958 /* {
959 syscallarg(mqd_t) mqdes;
960 syscallarg(const struct sigevent *) notification;
961 } */
962 file_t *fp = NULL;
963 struct mqueue *mq;
964 struct sigevent sig;
965 int error;
967 if (SCARG(uap, notification)) {
968 /* Get the signal from user-space */
969 error = copyin(SCARG(uap, notification), &sig,
970 sizeof(struct sigevent));
971 if (error)
972 return error;
973 if (sig.sigev_notify == SIGEV_SIGNAL &&
974 (sig.sigev_signo <= 0 || sig.sigev_signo >= NSIG))
975 return EINVAL;
978 error = mqueue_get(curthread->td_lwp, SCARG(uap, mqdes), &fp);
979 if (error)
980 return error;
981 mq = fp->f_data;
983 if (SCARG(uap, notification)) {
984 /* Register notification: set the signal and target process */
985 if (mq->mq_notify_proc == NULL) {
986 memcpy(&mq->mq_sig_notify, &sig,
987 sizeof(struct sigevent));
988 mq->mq_notify_proc = curproc;
989 } else {
990 /* Fail if someone else already registered */
991 error = EBUSY;
993 } else {
994 /* Unregister the notification */
995 mq->mq_notify_proc = NULL;
997 lockmgr(&mq->mq_mtx, LK_RELEASE);
998 fdrop(fp);
1000 return error;
1004 sys_mq_getattr(struct mq_getattr_args *uap)
1006 /* {
1007 syscallarg(mqd_t) mqdes;
1008 syscallarg(struct mq_attr *) mqstat;
1009 } */
1010 file_t *fp = NULL;
1011 struct mqueue *mq;
1012 struct mq_attr attr;
1013 int error;
1015 /* Get the message queue */
1016 error = mqueue_get(curthread->td_lwp, SCARG(uap, mqdes), &fp);
1017 if (error)
1018 return error;
1019 mq = fp->f_data;
1020 memcpy(&attr, &mq->mq_attrib, sizeof(struct mq_attr));
1021 lockmgr(&mq->mq_mtx, LK_RELEASE);
1022 fdrop(fp);
1024 return copyout(&attr, SCARG(uap, mqstat), sizeof(struct mq_attr));
1028 sys_mq_setattr(struct mq_setattr_args *uap)
1030 /* {
1031 syscallarg(mqd_t) mqdes;
1032 syscallarg(const struct mq_attr *) mqstat;
1033 syscallarg(struct mq_attr *) omqstat;
1034 } */
1035 file_t *fp = NULL;
1036 struct mqueue *mq;
1037 struct mq_attr attr;
1038 int error, nonblock;
1040 error = copyin(SCARG(uap, mqstat), &attr, sizeof(struct mq_attr));
1041 if (error)
1042 return error;
1043 nonblock = (attr.mq_flags & O_NONBLOCK);
1045 /* Get the message queue */
1046 error = mqueue_get(curthread->td_lwp, SCARG(uap, mqdes), &fp);
1047 if (error)
1048 return error;
1049 mq = fp->f_data;
1051 /* Copy the old attributes, if needed */
1052 if (SCARG(uap, omqstat)) {
1053 memcpy(&attr, &mq->mq_attrib, sizeof(struct mq_attr));
1056 /* Ignore everything, except O_NONBLOCK */
1057 if (nonblock)
1058 mq->mq_attrib.mq_flags |= O_NONBLOCK;
1059 else
1060 mq->mq_attrib.mq_flags &= ~O_NONBLOCK;
1062 lockmgr(&mq->mq_mtx, LK_RELEASE);
1063 fdrop(fp);
1066 * Copy the data to the user-space.
1067 * Note: According to POSIX, the new attributes should not be set in
1068 * case of fail - this would be violated.
1070 if (SCARG(uap, omqstat))
1071 error = copyout(&attr, SCARG(uap, omqstat),
1072 sizeof(struct mq_attr));
1074 return error;
1078 sys_mq_unlink(struct mq_unlink_args *uap)
1080 /* {
1081 syscallarg(const char *) name;
1082 } */
1083 struct thread *td = curthread;
1084 struct mqueue *mq;
1085 char *name;
1086 int error, refcnt = 0;
1088 /* Get the name from the user-space */
1089 name = kmalloc(MQ_NAMELEN, M_MQBUF, M_WAITOK | M_ZERO | M_NULLOK);
1090 if (name == NULL)
1091 return (ENOMEM);
1092 error = copyinstr(SCARG(uap, name), name, MQ_NAMELEN - 1, NULL);
1093 if (error) {
1094 kfree(name, M_MQBUF);
1095 return error;
1098 /* Lookup for this file */
1099 lockmgr(&mqlist_mtx, LK_EXCLUSIVE);
1100 mq = mqueue_lookup(name);
1101 if (mq == NULL) {
1102 error = ENOENT;
1103 goto error;
1106 /* Check the permissions */
1107 if (td->td_ucred->cr_uid != mq->mq_euid &&
1108 priv_check(td, PRIV_ROOT) != 0) {
1109 lockmgr(&mq->mq_mtx, LK_RELEASE);
1110 error = EACCES;
1111 goto error;
1114 /* Mark message queue as unlinking, before leaving the window */
1115 mq->mq_attrib.mq_flags |= MQ_UNLINK;
1117 /* Wake up all waiters, if there are such */
1118 wakeup(&mq->mq_send_cv);
1119 wakeup(&mq->mq_recv_cv);
1121 KNOTE(&mq->mq_rkq.ki_note, 0);
1122 KNOTE(&mq->mq_wkq.ki_note, 0);
1124 refcnt = mq->mq_refcnt;
1125 if (refcnt == 0)
1126 LIST_REMOVE(mq, mq_list);
1128 lockmgr(&mq->mq_mtx, LK_RELEASE);
1129 error:
1130 lockmgr(&mqlist_mtx, LK_RELEASE);
1133 * If there are no references - destroy the message
1134 * queue, otherwise, the last mq_close() will do that.
1136 if (error == 0 && refcnt == 0)
1137 mqueue_destroy(mq);
1139 kfree(name, M_MQBUF);
1140 return error;
1144 * SysCtl.
1146 SYSCTL_NODE(_kern, OID_AUTO, mqueue,
1147 CTLFLAG_RW, 0, "Message queue options");
1149 SYSCTL_INT(_kern_mqueue, OID_AUTO, mq_open_max,
1150 CTLFLAG_RW, &mq_open_max, 0,
1151 "Maximal number of message queue descriptors per process");
1153 SYSCTL_INT(_kern_mqueue, OID_AUTO, mq_prio_max,
1154 CTLFLAG_RW, &mq_prio_max, 0,
1155 "Maximal priority of the message");
1157 SYSCTL_INT(_kern_mqueue, OID_AUTO, mq_max_msgsize,
1158 CTLFLAG_RW, &mq_max_msgsize, 0,
1159 "Maximal allowed size of the message");
1161 SYSCTL_INT(_kern_mqueue, OID_AUTO, mq_def_maxmsg,
1162 CTLFLAG_RW, &mq_def_maxmsg, 0,
1163 "Default maximal message count");
1165 SYSCTL_INT(_kern_mqueue, OID_AUTO, mq_max_maxmsg,
1166 CTLFLAG_RW, &mq_max_maxmsg, 0,
1167 "Maximal allowed message count");
1169 SYSINIT(sys_mqueue_init, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, mqueue_sysinit, NULL);