Merge branch 'vendor/TNFTP'
[dragonfly.git] / sys / kern / sys_mqueue.c
blob89aa990a0fbb47bda548dc34e74d9b9a815c2871
1 /* $NetBSD: sys_mqueue.c,v 1.16 2009/04/11 23:05:26 christos Exp $ */
3 /*
4 * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
30 * Implementation of POSIX message queues.
31 * Defined in the Base Definitions volume of IEEE Std 1003.1-2001.
33 * Locking
35 * Global list of message queues (mqueue_head) and proc_t::p_mqueue_cnt
36 * counter are protected by mqlist_mtx lock. The very message queue and
37 * its members are protected by mqueue::mq_mtx.
39 * Lock order:
40 * mqlist_mtx
41 * -> mqueue::mq_mtx
44 #include <stdbool.h>
45 #include <sys/param.h>
46 #include <sys/types.h>
47 #include <sys/errno.h>
48 #include <sys/fcntl.h>
49 #include <sys/file.h>
50 #include <sys/filedesc.h>
51 #include <sys/ucred.h>
52 #include <sys/priv.h>
53 #include <sys/kernel.h>
54 #include <sys/malloc.h>
55 #include <sys/mqueue.h>
56 #include <sys/proc.h>
57 #include <sys/queue.h>
58 #include <sys/event.h>
59 #include <sys/serialize.h>
60 #include <sys/signal.h>
61 #include <sys/signalvar.h>
62 #include <sys/spinlock.h>
63 #include <sys/spinlock2.h>
64 #include <sys/stat.h>
65 #include <sys/sysctl.h>
66 #include <sys/sysproto.h>
67 #include <sys/systm.h>
68 #include <sys/lock.h>
69 #include <sys/unistd.h>
70 #include <sys/vnode.h>
72 /* System-wide limits. */
73 static u_int mq_open_max = MQ_OPEN_MAX;
74 static u_int mq_prio_max = MQ_PRIO_MAX;
75 static u_int mq_max_msgsize = 16 * MQ_DEF_MSGSIZE;
76 static u_int mq_def_maxmsg = 32;
77 static u_int mq_max_maxmsg = 16 * 32;
79 struct lock mqlist_mtx;
80 static LIST_HEAD(, mqueue) mqueue_head =
81 LIST_HEAD_INITIALIZER(mqueue_head);
83 typedef struct file file_t; /* XXX: Should we put this in sys/types.h ? */
85 /* Function prototypes */
86 static int mq_stat_fop(file_t *, struct stat *, struct ucred *cred);
87 static int mq_close_fop(file_t *);
88 static int mq_kqfilter_fop(struct file *fp, struct knote *kn);
89 static void mqfilter_read_detach(struct knote *kn);
90 static void mqfilter_write_detach(struct knote *kn);
91 static int mqfilter_read(struct knote *kn, long hint);
92 static int mqfilter_write(struct knote *kn, long hint);
94 /* Some time-related utility functions */
95 static int itimespecfix(struct timespec *ts);
96 static int tstohz(const struct timespec *ts);
98 /* File operations vector */
99 static struct fileops mqops = {
100 .fo_read = badfo_readwrite,
101 .fo_write = badfo_readwrite,
102 .fo_ioctl = badfo_ioctl,
103 .fo_stat = mq_stat_fop,
104 .fo_close = mq_close_fop,
105 .fo_kqfilter = mq_kqfilter_fop,
106 .fo_shutdown = badfo_shutdown
109 /* Define a new malloc type for message queues */
110 MALLOC_DECLARE(M_MQBUF);
111 MALLOC_DEFINE(M_MQBUF, "mqueues", "Buffers to message queues");
114 * Initialize POSIX message queue subsystem.
116 void
117 mqueue_sysinit(void)
119 lockinit(&mqlist_mtx, "mqlist_mtx", 0, LK_CANRECURSE);
123 * Free the message.
125 static void
126 mqueue_freemsg(struct mq_msg *msg, const size_t size)
128 kfree(msg, M_MQBUF);
132 * Destroy the message queue.
134 static void
135 mqueue_destroy(struct mqueue *mq)
137 struct mq_msg *msg;
138 size_t msz;
139 u_int i;
141 /* Note MQ_PQSIZE + 1. */
142 for (i = 0; i < MQ_PQSIZE + 1; i++) {
143 while ((msg = TAILQ_FIRST(&mq->mq_head[i])) != NULL) {
144 TAILQ_REMOVE(&mq->mq_head[i], msg, msg_queue);
145 msz = sizeof(struct mq_msg) + msg->msg_len;
146 mqueue_freemsg(msg, msz);
149 lockuninit(&mq->mq_mtx);
150 kfree(mq, M_MQBUF);
154 * Lookup for file name in general list of message queues.
155 * => locks the message queue
157 static void *
158 mqueue_lookup(char *name)
160 struct mqueue *mq;
162 KKASSERT(lockstatus(&mqlist_mtx, curthread));
164 LIST_FOREACH(mq, &mqueue_head, mq_list) {
165 if (strncmp(mq->mq_name, name, MQ_NAMELEN) == 0) {
166 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
167 return mq;
171 return NULL;
175 * mqueue_get: get the mqueue from the descriptor.
176 * => locks the message queue, if found.
177 * => holds a reference on the file descriptor.
179 static int
180 mqueue_get(struct lwp *l, mqd_t mqd, file_t **fpr)
182 struct mqueue *mq;
183 file_t *fp;
185 fp = holdfp(curproc->p_fd, (int)mqd, -1); /* XXX: Why -1 ? */
186 if (__predict_false(fp == NULL))
187 return EBADF;
189 if (__predict_false(fp->f_type != DTYPE_MQUEUE)) {
190 fdrop(fp);
191 return EBADF;
193 mq = fp->f_data;
194 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
196 *fpr = fp;
197 return 0;
201 * mqueue_linear_insert: perform linear insert according to the message
202 * priority into the reserved queue (MQ_PQRESQ). Reserved queue is a
203 * sorted list used only when mq_prio_max is increased via sysctl.
205 static inline void
206 mqueue_linear_insert(struct mqueue *mq, struct mq_msg *msg)
208 struct mq_msg *mit;
210 TAILQ_FOREACH(mit, &mq->mq_head[MQ_PQRESQ], msg_queue) {
211 if (msg->msg_prio > mit->msg_prio)
212 break;
214 if (mit == NULL) {
215 TAILQ_INSERT_TAIL(&mq->mq_head[MQ_PQRESQ], msg, msg_queue);
216 } else {
217 TAILQ_INSERT_BEFORE(mit, msg, msg_queue);
222 * Validate input.
225 itimespecfix(struct timespec *ts)
227 if (ts->tv_sec < 0 || ts->tv_nsec < 0 || ts->tv_nsec >= 1000000000)
228 return (EINVAL);
229 if (ts->tv_sec == 0 && ts->tv_nsec != 0 && ts->tv_nsec < nstick)
230 ts->tv_nsec = nstick;
231 return (0);
235 * Compute number of ticks in the specified amount of time.
238 tstohz(const struct timespec *ts)
240 struct timeval tv;
243 * usec has great enough resolution for hz, so convert to a
244 * timeval and use tvtohz() above.
246 TIMESPEC_TO_TIMEVAL(&tv, ts);
247 return tvtohz_high(&tv); /* XXX Why _high() and not _low() ? */
251 * Converter from struct timespec to the ticks.
252 * Used by mq_timedreceive(), mq_timedsend().
255 abstimeout2timo(struct timespec *ts, int *timo)
257 struct timespec tsd;
258 int error;
260 error = itimespecfix(ts);
261 if (error) {
262 return error;
264 getnanotime(&tsd);
265 timespecsub(ts, &tsd);
266 if (ts->tv_sec < 0 || (ts->tv_sec == 0 && ts->tv_nsec <= 0)) {
267 return ETIMEDOUT;
269 *timo = tstohz(ts);
270 KKASSERT(*timo != 0);
272 return 0;
275 static int
276 mq_stat_fop(file_t *fp, struct stat *st, struct ucred *cred)
278 struct mqueue *mq = fp->f_data;
280 (void)memset(st, 0, sizeof(*st));
282 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
283 st->st_mode = mq->mq_mode;
284 st->st_uid = mq->mq_euid;
285 st->st_gid = mq->mq_egid;
286 st->st_atimespec = mq->mq_atime;
287 st->st_mtimespec = mq->mq_mtime;
288 /*st->st_ctimespec = st->st_birthtimespec = mq->mq_btime;*/
289 st->st_uid = fp->f_cred->cr_uid;
290 st->st_gid = fp->f_cred->cr_svgid;
291 lockmgr(&mq->mq_mtx, LK_RELEASE);
293 return 0;
296 static struct filterops mqfiltops_read =
297 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, mqfilter_read_detach, mqfilter_read };
298 static struct filterops mqfiltops_write =
299 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, mqfilter_write_detach, mqfilter_write };
301 static int
302 mq_kqfilter_fop(struct file *fp, struct knote *kn)
304 struct mqueue *mq = fp->f_data;
305 struct klist *klist;
307 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
309 switch (kn->kn_filter) {
310 case EVFILT_READ:
311 kn->kn_fop = &mqfiltops_read;
312 kn->kn_hook = (caddr_t)mq;
313 klist = &mq->mq_rkq.ki_note;
314 break;
315 case EVFILT_WRITE:
316 kn->kn_fop = &mqfiltops_write;
317 kn->kn_hook = (caddr_t)mq;
318 klist = &mq->mq_wkq.ki_note;
319 break;
320 default:
321 lockmgr(&mq->mq_mtx, LK_RELEASE);
322 return (EOPNOTSUPP);
325 knote_insert(klist, kn);
326 lockmgr(&mq->mq_mtx, LK_RELEASE);
328 return (0);
331 static void
332 mqfilter_read_detach(struct knote *kn)
334 struct mqueue *mq = (struct mqueue *)kn->kn_hook;
336 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
337 struct klist *klist = &mq->mq_rkq.ki_note;
338 knote_remove(klist, kn);
339 lockmgr(&mq->mq_mtx, LK_RELEASE);
342 static void
343 mqfilter_write_detach(struct knote *kn)
345 struct mqueue *mq = (struct mqueue *)kn->kn_hook;
347 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
348 struct klist *klist = &mq->mq_wkq.ki_note;
349 knote_remove(klist, kn);
350 lockmgr(&mq->mq_mtx, LK_RELEASE);
353 static int
354 mqfilter_read(struct knote *kn, long hint)
356 struct mqueue *mq = (struct mqueue *)kn->kn_hook;
357 struct mq_attr *mqattr;
358 int ready = 0;
360 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
361 mqattr = &mq->mq_attrib;
362 /* Ready for receiving, if there are messages in the queue */
363 if (mqattr->mq_curmsgs)
364 ready = 1;
365 lockmgr(&mq->mq_mtx, LK_RELEASE);
367 return (ready);
370 static int
371 mqfilter_write(struct knote *kn, long hint)
373 struct mqueue *mq = (struct mqueue *)kn->kn_hook;
374 struct mq_attr *mqattr;
375 int ready = 0;
377 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
378 mqattr = &mq->mq_attrib;
379 /* Ready for sending, if the message queue is not full */
380 if (mqattr->mq_curmsgs < mqattr->mq_maxmsg)
381 ready = 1;
382 lockmgr(&mq->mq_mtx, LK_RELEASE);
384 return (ready);
387 static int
388 mq_close_fop(file_t *fp)
390 struct proc *p = curproc;
391 struct mqueue *mq = fp->f_data;
392 bool destroy;
394 lockmgr(&mqlist_mtx, LK_EXCLUSIVE);
395 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
397 /* Decrease the counters */
398 p->p_mqueue_cnt--;
399 mq->mq_refcnt--;
401 /* Remove notification if registered for this process */
402 if (mq->mq_notify_proc == p)
403 mq->mq_notify_proc = NULL;
406 * If this is the last reference and mqueue is marked for unlink,
407 * remove and later destroy the message queue.
409 if (mq->mq_refcnt == 0 && (mq->mq_attrib.mq_flags & MQ_UNLINK)) {
410 LIST_REMOVE(mq, mq_list);
411 destroy = true;
412 } else
413 destroy = false;
415 lockmgr(&mq->mq_mtx, LK_RELEASE);
416 lockmgr(&mqlist_mtx, LK_RELEASE);
418 if (destroy)
419 mqueue_destroy(mq);
421 return 0;
425 * General mqueue system calls.
429 sys_mq_open(struct mq_open_args *uap)
431 /* {
432 syscallarg(const char *) name;
433 syscallarg(int) oflag;
434 syscallarg(mode_t) mode;
435 syscallarg(struct mq_attr) attr;
436 } */
437 struct thread *td = curthread;
438 struct proc *p = td->td_proc;
439 struct filedesc *fdp = p->p_fd;
440 struct mqueue *mq, *mq_new = NULL;
441 file_t *fp;
442 char *name;
443 int mqd, error, oflag;
445 /* Check access mode flags */
446 oflag = SCARG(uap, oflag);
447 if ((oflag & O_ACCMODE) == (O_WRONLY | O_RDWR)) {
448 return EINVAL;
451 /* Get the name from the user-space */
452 name = kmalloc(MQ_NAMELEN, M_MQBUF, M_WAITOK | M_ZERO | M_NULLOK);
453 if (name == NULL)
454 return (ENOMEM);
455 error = copyinstr(SCARG(uap, name), name, MQ_NAMELEN - 1, NULL);
456 if (error) {
457 kfree(name, M_MQBUF);
458 return error;
461 if (oflag & O_CREAT) {
462 struct mq_attr attr;
463 u_int i;
465 /* Check the limit */
466 if (p->p_mqueue_cnt == mq_open_max) {
467 kfree(name, M_MQBUF);
468 return EMFILE;
471 /* Empty name is invalid */
472 if (name[0] == '\0') {
473 kfree(name, M_MQBUF);
474 return EINVAL;
477 /* Check for mqueue attributes */
478 if (SCARG(uap, attr)) {
479 error = copyin(SCARG(uap, attr), &attr,
480 sizeof(struct mq_attr));
481 if (error) {
482 kfree(name, M_MQBUF);
483 return error;
485 if (attr.mq_maxmsg <= 0 ||
486 attr.mq_maxmsg > mq_max_maxmsg ||
487 attr.mq_msgsize <= 0 ||
488 attr.mq_msgsize > mq_max_msgsize) {
489 kfree(name, M_MQBUF);
490 return EINVAL;
492 attr.mq_curmsgs = 0;
493 } else {
494 memset(&attr, 0, sizeof(struct mq_attr));
495 attr.mq_maxmsg = mq_def_maxmsg;
496 attr.mq_msgsize =
497 MQ_DEF_MSGSIZE - sizeof(struct mq_msg);
501 * Allocate new mqueue, initialize data structures,
502 * copy the name, attributes and set the flag.
504 mq_new = kmalloc(sizeof(struct mqueue), M_MQBUF,
505 M_WAITOK | M_ZERO | M_NULLOK);
506 if (mq_new == NULL) {
507 kfree(name, M_MQBUF);
508 return (ENOMEM);
511 lockinit(&mq_new->mq_mtx, "mq_new->mq_mtx", 0, LK_CANRECURSE);
512 for (i = 0; i < (MQ_PQSIZE + 1); i++) {
513 TAILQ_INIT(&mq_new->mq_head[i]);
516 strlcpy(mq_new->mq_name, name, MQ_NAMELEN);
517 memcpy(&mq_new->mq_attrib, &attr, sizeof(struct mq_attr));
519 /*CTASSERT((O_MASK & (MQ_UNLINK | MQ_RECEIVE)) == 0);*/
520 /* mq_new->mq_attrib.mq_flags = (O_MASK & oflag); */
521 mq_new->mq_attrib.mq_flags = oflag;
523 /* Store mode and effective UID with GID */
524 mq_new->mq_mode = ((SCARG(uap, mode) &
525 ~p->p_fd->fd_cmask) & ALLPERMS) & ~S_ISTXT;
526 mq_new->mq_euid = td->td_ucred->cr_uid;
527 mq_new->mq_egid = td->td_ucred->cr_svgid;
530 /* Allocate file structure and descriptor */
531 error = falloc(td->td_lwp, &fp, &mqd);
532 if (error) {
533 if (mq_new)
534 mqueue_destroy(mq_new);
535 kfree(name, M_MQBUF);
536 return error;
538 fp->f_type = DTYPE_MQUEUE;
539 fp->f_flag = FFLAGS(oflag) & (FREAD | FWRITE);
540 fp->f_ops = &mqops;
542 /* Look up for mqueue with such name */
543 lockmgr(&mqlist_mtx, LK_EXCLUSIVE);
544 mq = mqueue_lookup(name);
545 if (mq) {
546 int acc_mode;
548 KKASSERT(lockstatus(&mq->mq_mtx, curthread));
550 /* Check if mqueue is not marked as unlinking */
551 if (mq->mq_attrib.mq_flags & MQ_UNLINK) {
552 error = EACCES;
553 goto exit;
555 /* Fail if O_EXCL is set, and mqueue already exists */
556 if ((oflag & O_CREAT) && (oflag & O_EXCL)) {
557 error = EEXIST;
558 goto exit;
562 * Check the permissions. Note the difference between
563 * VREAD/VWRITE and FREAD/FWRITE.
565 acc_mode = 0;
566 if (fp->f_flag & FREAD) {
567 acc_mode |= VREAD;
569 if (fp->f_flag & FWRITE) {
570 acc_mode |= VWRITE;
572 if (vaccess(VNON, mq->mq_mode, mq->mq_euid, mq->mq_egid,
573 acc_mode, td->td_ucred)) {
575 error = EACCES;
576 goto exit;
578 } else {
579 /* Fail if mqueue neither exists, nor we create it */
580 if ((oflag & O_CREAT) == 0) {
581 lockmgr(&mqlist_mtx, LK_RELEASE);
582 KKASSERT(mq_new == NULL);
583 fsetfd(fdp, NULL, mqd);
584 fp->f_ops = &badfileops;
585 fdrop(fp);
586 kfree(name, M_MQBUF);
587 return ENOENT;
590 /* Check the limit */
591 if (p->p_mqueue_cnt == mq_open_max) {
592 error = EMFILE;
593 goto exit;
596 /* Insert the queue to the list */
597 mq = mq_new;
598 lockmgr(&mq->mq_mtx, LK_EXCLUSIVE);
599 LIST_INSERT_HEAD(&mqueue_head, mq, mq_list);
600 mq_new = NULL;
601 getnanotime(&mq->mq_btime);
602 mq->mq_atime = mq->mq_mtime = mq->mq_btime;
605 /* Increase the counters, and make descriptor ready */
606 p->p_mqueue_cnt++;
607 mq->mq_refcnt++;
608 fp->f_data = mq;
609 exit:
610 lockmgr(&mq->mq_mtx, LK_RELEASE);
611 lockmgr(&mqlist_mtx, LK_RELEASE);
613 if (mq_new)
614 mqueue_destroy(mq_new);
615 if (error) {
616 fsetfd(fdp, NULL, mqd);
617 fp->f_ops = &badfileops;
618 } else {
619 fsetfd(fdp, fp, mqd);
620 uap->sysmsg_result = mqd;
622 fdrop(fp);
623 kfree(name, M_MQBUF);
625 return error;
629 sys_mq_close(struct mq_close_args *uap)
631 return sys_close((void *)uap);
635 * Primary mq_receive1() function.
638 mq_receive1(struct lwp *l, mqd_t mqdes, void *msg_ptr, size_t msg_len,
639 unsigned *msg_prio, struct timespec *ts, ssize_t *mlen)
641 file_t *fp = NULL;
642 struct mqueue *mq;
643 struct mq_msg *msg = NULL;
644 struct mq_attr *mqattr;
645 u_int idx;
646 int error;
648 /* Get the message queue */
649 error = mqueue_get(l, mqdes, &fp);
650 if (error) {
651 return error;
653 mq = fp->f_data;
654 if ((fp->f_flag & FREAD) == 0) {
655 error = EBADF;
656 goto error;
658 getnanotime(&mq->mq_atime);
659 mqattr = &mq->mq_attrib;
661 /* Check the message size limits */
662 if (msg_len < mqattr->mq_msgsize) {
663 error = EMSGSIZE;
664 goto error;
667 /* Check if queue is empty */
668 while (mqattr->mq_curmsgs == 0) {
669 int t;
671 if (mqattr->mq_flags & O_NONBLOCK) {
672 error = EAGAIN;
673 goto error;
675 if (ts) {
676 error = abstimeout2timo(ts, &t);
677 if (error)
678 goto error;
679 } else
680 t = 0;
682 * Block until someone sends the message.
683 * While doing this, notification should not be sent.
685 mqattr->mq_flags |= MQ_RECEIVE;
686 error = lksleep(&mq->mq_send_cv, &mq->mq_mtx, PCATCH, "mqsend", t);
687 mqattr->mq_flags &= ~MQ_RECEIVE;
688 if (error || (mqattr->mq_flags & MQ_UNLINK)) {
689 error = (error == EWOULDBLOCK) ? ETIMEDOUT : EINTR;
690 goto error;
696 * Find the highest priority message, and remove it from the queue.
697 * At first, reserved queue is checked, bitmap is next.
699 msg = TAILQ_FIRST(&mq->mq_head[MQ_PQRESQ]);
700 if (__predict_true(msg == NULL)) {
701 idx = ffs(mq->mq_bitmap);
702 msg = TAILQ_FIRST(&mq->mq_head[idx]);
703 KKASSERT(msg != NULL);
704 } else {
705 idx = MQ_PQRESQ;
707 TAILQ_REMOVE(&mq->mq_head[idx], msg, msg_queue);
709 /* Unmark the bit, if last message. */
710 if (__predict_true(idx) && TAILQ_EMPTY(&mq->mq_head[idx])) {
711 KKASSERT((MQ_PQSIZE - idx) == msg->msg_prio);
712 mq->mq_bitmap &= ~(1 << --idx);
715 /* Decrement the counter and signal waiter, if any */
716 mqattr->mq_curmsgs--;
717 wakeup_one(&mq->mq_recv_cv);
719 /* Ready for sending now */
720 KNOTE(&mq->mq_wkq.ki_note, 0);
721 error:
722 lockmgr(&mq->mq_mtx, LK_RELEASE);
723 fdrop(fp);
724 if (error)
725 return error;
728 * Copy the data to the user-space.
729 * Note: According to POSIX, no message should be removed from the
730 * queue in case of fail - this would be violated.
732 *mlen = msg->msg_len;
733 error = copyout(msg->msg_ptr, msg_ptr, msg->msg_len);
734 if (error == 0 && msg_prio)
735 error = copyout(&msg->msg_prio, msg_prio, sizeof(unsigned));
736 mqueue_freemsg(msg, sizeof(struct mq_msg) + msg->msg_len);
738 return error;
742 sys_mq_receive(struct mq_receive_args *uap)
744 /* {
745 syscallarg(mqd_t) mqdes;
746 syscallarg(char *) msg_ptr;
747 syscallarg(size_t) msg_len;
748 syscallarg(unsigned *) msg_prio;
749 } */
750 ssize_t mlen;
751 int error;
753 error = mq_receive1(curthread->td_lwp, SCARG(uap, mqdes), SCARG(uap, msg_ptr),
754 SCARG(uap, msg_len), SCARG(uap, msg_prio), 0, &mlen);
755 if (error == 0)
756 uap->sysmsg_result = mlen;
758 return error;
762 sys_mq_timedreceive(struct mq_timedreceive_args *uap)
764 /* {
765 syscallarg(mqd_t) mqdes;
766 syscallarg(char *) msg_ptr;
767 syscallarg(size_t) msg_len;
768 syscallarg(unsigned *) msg_prio;
769 syscallarg(const struct timespec *) abs_timeout;
770 } */
771 int error;
772 ssize_t mlen;
773 struct timespec ts, *tsp;
775 /* Get and convert time value */
776 if (SCARG(uap, abs_timeout)) {
777 error = copyin(SCARG(uap, abs_timeout), &ts, sizeof(ts));
778 if (error)
779 return error;
780 tsp = &ts;
781 } else {
782 tsp = NULL;
785 error = mq_receive1(curthread->td_lwp, SCARG(uap, mqdes), SCARG(uap, msg_ptr),
786 SCARG(uap, msg_len), SCARG(uap, msg_prio), tsp, &mlen);
787 if (error == 0)
788 uap->sysmsg_result = mlen;
790 return error;
794 * Primary mq_send1() function.
797 mq_send1(struct lwp *l, mqd_t mqdes, const char *msg_ptr, size_t msg_len,
798 unsigned msg_prio, struct timespec *ts)
800 file_t *fp = NULL;
801 struct mqueue *mq;
802 struct mq_msg *msg;
803 struct mq_attr *mqattr;
804 struct proc *notify = NULL;
805 /*ksiginfo_t ksi;*/
806 size_t size;
807 int error;
809 /* Check the priority range */
810 if (msg_prio >= mq_prio_max)
811 return EINVAL;
813 /* Allocate a new message */
814 size = sizeof(struct mq_msg) + msg_len;
815 if (size > mq_max_msgsize)
816 return EMSGSIZE;
818 msg = kmalloc(size, M_MQBUF, M_WAITOK | M_NULLOK);
819 if (msg == NULL)
820 return (ENOMEM);
823 /* Get the data from user-space */
824 error = copyin(msg_ptr, msg->msg_ptr, msg_len);
825 if (error) {
826 mqueue_freemsg(msg, size);
827 return error;
829 msg->msg_len = msg_len;
830 msg->msg_prio = msg_prio;
832 /* Get the mqueue */
833 error = mqueue_get(l, mqdes, &fp);
834 if (error) {
835 mqueue_freemsg(msg, size);
836 return error;
838 mq = fp->f_data;
839 if ((fp->f_flag & FWRITE) == 0) {
840 error = EBADF;
841 goto error;
843 getnanotime(&mq->mq_mtime);
844 mqattr = &mq->mq_attrib;
846 /* Check the message size limit */
847 if (msg_len <= 0 || msg_len > mqattr->mq_msgsize) {
848 error = EMSGSIZE;
849 goto error;
852 /* Check if queue is full */
853 while (mqattr->mq_curmsgs >= mqattr->mq_maxmsg) {
854 int t;
856 if (mqattr->mq_flags & O_NONBLOCK) {
857 error = EAGAIN;
858 goto error;
860 if (ts) {
861 error = abstimeout2timo(ts, &t);
862 if (error)
863 goto error;
864 } else
865 t = 0;
866 /* Block until queue becomes available */
867 error = lksleep(&mq->mq_recv_cv, &mq->mq_mtx, PCATCH, "mqrecv", t);
868 if (error || (mqattr->mq_flags & MQ_UNLINK)) {
869 error = (error == EWOULDBLOCK) ? ETIMEDOUT : error;
870 goto error;
873 KKASSERT(mq->mq_attrib.mq_curmsgs < mq->mq_attrib.mq_maxmsg);
876 * Insert message into the queue, according to the priority.
877 * Note the difference between index and priority.
879 if (__predict_true(msg_prio < MQ_PQSIZE)) {
880 u_int idx = MQ_PQSIZE - msg_prio;
882 KKASSERT(idx != MQ_PQRESQ);
883 TAILQ_INSERT_TAIL(&mq->mq_head[idx], msg, msg_queue);
884 mq->mq_bitmap |= (1 << --idx);
885 } else {
886 mqueue_linear_insert(mq, msg);
889 /* Check for the notify */
890 if (mqattr->mq_curmsgs == 0 && mq->mq_notify_proc &&
891 (mqattr->mq_flags & MQ_RECEIVE) == 0 &&
892 mq->mq_sig_notify.sigev_notify == SIGEV_SIGNAL) {
893 /* Initialize the signal */
894 /*KSI_INIT(&ksi);*/
895 /*ksi.ksi_signo = mq->mq_sig_notify.sigev_signo;*/
896 /*ksi.ksi_code = SI_MESGQ;*/
897 /*ksi.ksi_value = mq->mq_sig_notify.sigev_value;*/
898 /* Unregister the process */
899 notify = mq->mq_notify_proc;
900 mq->mq_notify_proc = NULL;
903 /* Increment the counter and signal waiter, if any */
904 mqattr->mq_curmsgs++;
905 wakeup_one(&mq->mq_send_cv);
907 /* Ready for receiving now */
908 KNOTE(&mq->mq_rkq.ki_note, 0);
909 error:
910 lockmgr(&mq->mq_mtx, LK_RELEASE);
911 fdrop(fp);
913 if (error) {
914 mqueue_freemsg(msg, size);
915 } else if (notify) {
916 /* Send the notify, if needed */
917 lwkt_gettoken(&proc_token);
918 /*kpsignal(notify, &ksi, NULL);*/
919 ksignal(notify, mq->mq_sig_notify.sigev_signo);
920 lwkt_reltoken(&proc_token);
923 return error;
927 sys_mq_send(struct mq_send_args *uap)
929 /* {
930 syscallarg(mqd_t) mqdes;
931 syscallarg(const char *) msg_ptr;
932 syscallarg(size_t) msg_len;
933 syscallarg(unsigned) msg_prio;
934 } */
936 return mq_send1(curthread->td_lwp, SCARG(uap, mqdes), SCARG(uap, msg_ptr),
937 SCARG(uap, msg_len), SCARG(uap, msg_prio), 0);
941 sys_mq_timedsend(struct mq_timedsend_args *uap)
943 /* {
944 syscallarg(mqd_t) mqdes;
945 syscallarg(const char *) msg_ptr;
946 syscallarg(size_t) msg_len;
947 syscallarg(unsigned) msg_prio;
948 syscallarg(const struct timespec *) abs_timeout;
949 } */
950 struct timespec ts, *tsp;
951 int error;
953 /* Get and convert time value */
954 if (SCARG(uap, abs_timeout)) {
955 error = copyin(SCARG(uap, abs_timeout), &ts, sizeof(ts));
956 if (error)
957 return error;
958 tsp = &ts;
959 } else {
960 tsp = NULL;
963 return mq_send1(curthread->td_lwp, SCARG(uap, mqdes), SCARG(uap, msg_ptr),
964 SCARG(uap, msg_len), SCARG(uap, msg_prio), tsp);
968 sys_mq_notify(struct mq_notify_args *uap)
970 /* {
971 syscallarg(mqd_t) mqdes;
972 syscallarg(const struct sigevent *) notification;
973 } */
974 file_t *fp = NULL;
975 struct mqueue *mq;
976 struct sigevent sig;
977 int error;
979 if (SCARG(uap, notification)) {
980 /* Get the signal from user-space */
981 error = copyin(SCARG(uap, notification), &sig,
982 sizeof(struct sigevent));
983 if (error)
984 return error;
985 if (sig.sigev_notify == SIGEV_SIGNAL &&
986 (sig.sigev_signo <= 0 || sig.sigev_signo >= NSIG))
987 return EINVAL;
990 error = mqueue_get(curthread->td_lwp, SCARG(uap, mqdes), &fp);
991 if (error)
992 return error;
993 mq = fp->f_data;
995 if (SCARG(uap, notification)) {
996 /* Register notification: set the signal and target process */
997 if (mq->mq_notify_proc == NULL) {
998 memcpy(&mq->mq_sig_notify, &sig,
999 sizeof(struct sigevent));
1000 mq->mq_notify_proc = curproc;
1001 } else {
1002 /* Fail if someone else already registered */
1003 error = EBUSY;
1005 } else {
1006 /* Unregister the notification */
1007 mq->mq_notify_proc = NULL;
1009 lockmgr(&mq->mq_mtx, LK_RELEASE);
1010 fdrop(fp);
1012 return error;
1016 sys_mq_getattr(struct mq_getattr_args *uap)
1018 /* {
1019 syscallarg(mqd_t) mqdes;
1020 syscallarg(struct mq_attr *) mqstat;
1021 } */
1022 file_t *fp = NULL;
1023 struct mqueue *mq;
1024 struct mq_attr attr;
1025 int error;
1027 /* Get the message queue */
1028 error = mqueue_get(curthread->td_lwp, SCARG(uap, mqdes), &fp);
1029 if (error)
1030 return error;
1031 mq = fp->f_data;
1032 memcpy(&attr, &mq->mq_attrib, sizeof(struct mq_attr));
1033 lockmgr(&mq->mq_mtx, LK_RELEASE);
1034 fdrop(fp);
1036 return copyout(&attr, SCARG(uap, mqstat), sizeof(struct mq_attr));
1040 sys_mq_setattr(struct mq_setattr_args *uap)
1042 /* {
1043 syscallarg(mqd_t) mqdes;
1044 syscallarg(const struct mq_attr *) mqstat;
1045 syscallarg(struct mq_attr *) omqstat;
1046 } */
1047 file_t *fp = NULL;
1048 struct mqueue *mq;
1049 struct mq_attr attr;
1050 int error, nonblock;
1052 error = copyin(SCARG(uap, mqstat), &attr, sizeof(struct mq_attr));
1053 if (error)
1054 return error;
1055 nonblock = (attr.mq_flags & O_NONBLOCK);
1057 /* Get the message queue */
1058 error = mqueue_get(curthread->td_lwp, SCARG(uap, mqdes), &fp);
1059 if (error)
1060 return error;
1061 mq = fp->f_data;
1063 /* Copy the old attributes, if needed */
1064 if (SCARG(uap, omqstat)) {
1065 memcpy(&attr, &mq->mq_attrib, sizeof(struct mq_attr));
1068 /* Ignore everything, except O_NONBLOCK */
1069 if (nonblock)
1070 mq->mq_attrib.mq_flags |= O_NONBLOCK;
1071 else
1072 mq->mq_attrib.mq_flags &= ~O_NONBLOCK;
1074 lockmgr(&mq->mq_mtx, LK_RELEASE);
1075 fdrop(fp);
1078 * Copy the data to the user-space.
1079 * Note: According to POSIX, the new attributes should not be set in
1080 * case of fail - this would be violated.
1082 if (SCARG(uap, omqstat))
1083 error = copyout(&attr, SCARG(uap, omqstat),
1084 sizeof(struct mq_attr));
1086 return error;
1090 sys_mq_unlink(struct mq_unlink_args *uap)
1092 /* {
1093 syscallarg(const char *) name;
1094 } */
1095 struct thread *td = curthread;
1096 struct mqueue *mq;
1097 char *name;
1098 int error, refcnt = 0;
1100 /* Get the name from the user-space */
1101 name = kmalloc(MQ_NAMELEN, M_MQBUF, M_WAITOK | M_ZERO | M_NULLOK);
1102 if (name == NULL)
1103 return (ENOMEM);
1104 error = copyinstr(SCARG(uap, name), name, MQ_NAMELEN - 1, NULL);
1105 if (error) {
1106 kfree(name, M_MQBUF);
1107 return error;
1110 /* Lookup for this file */
1111 lockmgr(&mqlist_mtx, LK_EXCLUSIVE);
1112 mq = mqueue_lookup(name);
1113 if (mq == NULL) {
1114 error = ENOENT;
1115 goto error;
1118 /* Check the permissions */
1119 if (td->td_ucred->cr_uid != mq->mq_euid &&
1120 priv_check(td, PRIV_ROOT) != 0) {
1121 lockmgr(&mq->mq_mtx, LK_RELEASE);
1122 error = EACCES;
1123 goto error;
1126 /* Mark message queue as unlinking, before leaving the window */
1127 mq->mq_attrib.mq_flags |= MQ_UNLINK;
1129 /* Wake up all waiters, if there are such */
1130 wakeup(&mq->mq_send_cv);
1131 wakeup(&mq->mq_recv_cv);
1133 KNOTE(&mq->mq_rkq.ki_note, 0);
1134 KNOTE(&mq->mq_wkq.ki_note, 0);
1136 refcnt = mq->mq_refcnt;
1137 if (refcnt == 0)
1138 LIST_REMOVE(mq, mq_list);
1140 lockmgr(&mq->mq_mtx, LK_RELEASE);
1141 error:
1142 lockmgr(&mqlist_mtx, LK_RELEASE);
1145 * If there are no references - destroy the message
1146 * queue, otherwise, the last mq_close() will do that.
1148 if (error == 0 && refcnt == 0)
1149 mqueue_destroy(mq);
1151 kfree(name, M_MQBUF);
1152 return error;
1156 * SysCtl.
1158 SYSCTL_NODE(_kern, OID_AUTO, mqueue,
1159 CTLFLAG_RW, 0, "Message queue options");
1161 SYSCTL_INT(_kern_mqueue, OID_AUTO, mq_open_max,
1162 CTLFLAG_RW, &mq_open_max, 0,
1163 "Maximal number of message queue descriptors per process");
1165 SYSCTL_INT(_kern_mqueue, OID_AUTO, mq_prio_max,
1166 CTLFLAG_RW, &mq_prio_max, 0,
1167 "Maximal priority of the message");
1169 SYSCTL_INT(_kern_mqueue, OID_AUTO, mq_max_msgsize,
1170 CTLFLAG_RW, &mq_max_msgsize, 0,
1171 "Maximal allowed size of the message");
1173 SYSCTL_INT(_kern_mqueue, OID_AUTO, mq_def_maxmsg,
1174 CTLFLAG_RW, &mq_def_maxmsg, 0,
1175 "Default maximal message count");
1177 SYSCTL_INT(_kern_mqueue, OID_AUTO, mq_max_maxmsg,
1178 CTLFLAG_RW, &mq_max_maxmsg, 0,
1179 "Maximal allowed message count");
1181 SYSINIT(sys_mqueue_init, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, mqueue_sysinit, NULL);