bnxt_en: Add unsupported SFP+ module warnings.
[linux-2.6/btrfs-unstable.git] / ipc / msg.c
blob1471db9a7e6112b3316ae887b50c6d8d1352f171
1 /*
2 * linux/ipc/msg.c
3 * Copyright (C) 1992 Krishna Balasubramanian
5 * Removed all the remaining kerneld mess
6 * Catch the -EFAULT stuff properly
7 * Use GFP_KERNEL for messages as in 1.2
8 * Fixed up the unchecked user space derefs
9 * Copyright (C) 1998 Alan Cox & Andi Kleen
11 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
13 * mostly rewritten, threaded and wake-one semantics added
14 * MSGMAX limit removed, sysctl's added
15 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
20 * namespaces support
21 * OpenVZ, SWsoft Inc.
22 * Pavel Emelianov <xemul@openvz.org>
25 #include <linux/capability.h>
26 #include <linux/msg.h>
27 #include <linux/spinlock.h>
28 #include <linux/init.h>
29 #include <linux/mm.h>
30 #include <linux/proc_fs.h>
31 #include <linux/list.h>
32 #include <linux/security.h>
33 #include <linux/sched.h>
34 #include <linux/syscalls.h>
35 #include <linux/audit.h>
36 #include <linux/seq_file.h>
37 #include <linux/rwsem.h>
38 #include <linux/nsproxy.h>
39 #include <linux/ipc_namespace.h>
41 #include <asm/current.h>
42 #include <linux/uaccess.h>
43 #include "util.h"
45 /* one msg_receiver structure for each sleeping receiver */
46 struct msg_receiver {
47 struct list_head r_list;
48 struct task_struct *r_tsk;
50 int r_mode;
51 long r_msgtype;
52 long r_maxsize;
55 * Mark r_msg volatile so that the compiler
56 * does not try to get smart and optimize
57 * it. We rely on this for the lockless
58 * receive algorithm.
60 struct msg_msg *volatile r_msg;
63 /* one msg_sender for each sleeping sender */
64 struct msg_sender {
65 struct list_head list;
66 struct task_struct *tsk;
69 #define SEARCH_ANY 1
70 #define SEARCH_EQUAL 2
71 #define SEARCH_NOTEQUAL 3
72 #define SEARCH_LESSEQUAL 4
73 #define SEARCH_NUMBER 5
75 #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS])
77 static inline struct msg_queue *msq_obtain_object(struct ipc_namespace *ns, int id)
79 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&msg_ids(ns), id);
81 if (IS_ERR(ipcp))
82 return ERR_CAST(ipcp);
84 return container_of(ipcp, struct msg_queue, q_perm);
87 static inline struct msg_queue *msq_obtain_object_check(struct ipc_namespace *ns,
88 int id)
90 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&msg_ids(ns), id);
92 if (IS_ERR(ipcp))
93 return ERR_CAST(ipcp);
95 return container_of(ipcp, struct msg_queue, q_perm);
98 static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
100 ipc_rmid(&msg_ids(ns), &s->q_perm);
103 static void msg_rcu_free(struct rcu_head *head)
105 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
106 struct msg_queue *msq = ipc_rcu_to_struct(p);
108 security_msg_queue_free(msq);
109 ipc_rcu_free(head);
113 * newque - Create a new msg queue
114 * @ns: namespace
115 * @params: ptr to the structure that contains the key and msgflg
117 * Called with msg_ids.rwsem held (writer)
119 static int newque(struct ipc_namespace *ns, struct ipc_params *params)
121 struct msg_queue *msq;
122 int id, retval;
123 key_t key = params->key;
124 int msgflg = params->flg;
126 msq = ipc_rcu_alloc(sizeof(*msq));
127 if (!msq)
128 return -ENOMEM;
130 msq->q_perm.mode = msgflg & S_IRWXUGO;
131 msq->q_perm.key = key;
133 msq->q_perm.security = NULL;
134 retval = security_msg_queue_alloc(msq);
135 if (retval) {
136 ipc_rcu_putref(msq, ipc_rcu_free);
137 return retval;
140 msq->q_stime = msq->q_rtime = 0;
141 msq->q_ctime = get_seconds();
142 msq->q_cbytes = msq->q_qnum = 0;
143 msq->q_qbytes = ns->msg_ctlmnb;
144 msq->q_lspid = msq->q_lrpid = 0;
145 INIT_LIST_HEAD(&msq->q_messages);
146 INIT_LIST_HEAD(&msq->q_receivers);
147 INIT_LIST_HEAD(&msq->q_senders);
149 /* ipc_addid() locks msq upon success. */
150 id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
151 if (id < 0) {
152 ipc_rcu_putref(msq, msg_rcu_free);
153 return id;
156 ipc_unlock_object(&msq->q_perm);
157 rcu_read_unlock();
159 return msq->q_perm.id;
162 static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
164 mss->tsk = current;
165 __set_current_state(TASK_INTERRUPTIBLE);
166 list_add_tail(&mss->list, &msq->q_senders);
169 static inline void ss_del(struct msg_sender *mss)
171 if (mss->list.next != NULL)
172 list_del(&mss->list);
175 static void ss_wakeup(struct list_head *h, int kill)
177 struct msg_sender *mss, *t;
179 list_for_each_entry_safe(mss, t, h, list) {
180 if (kill)
181 mss->list.next = NULL;
182 wake_up_process(mss->tsk);
186 static void expunge_all(struct msg_queue *msq, int res)
188 struct msg_receiver *msr, *t;
190 list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
191 msr->r_msg = NULL; /* initialize expunge ordering */
192 wake_up_process(msr->r_tsk);
194 * Ensure that the wakeup is visible before setting r_msg as
195 * the receiving end depends on it: either spinning on a nil,
196 * or dealing with -EAGAIN cases. See lockless receive part 1
197 * and 2 in do_msgrcv().
199 smp_wmb(); /* barrier (B) */
200 msr->r_msg = ERR_PTR(res);
205 * freeque() wakes up waiters on the sender and receiver waiting queue,
206 * removes the message queue from message queue ID IDR, and cleans up all the
207 * messages associated with this queue.
209 * msg_ids.rwsem (writer) and the spinlock for this message queue are held
210 * before freeque() is called. msg_ids.rwsem remains locked on exit.
212 static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
214 struct msg_msg *msg, *t;
215 struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
217 expunge_all(msq, -EIDRM);
218 ss_wakeup(&msq->q_senders, 1);
219 msg_rmid(ns, msq);
220 ipc_unlock_object(&msq->q_perm);
221 rcu_read_unlock();
223 list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) {
224 atomic_dec(&ns->msg_hdrs);
225 free_msg(msg);
227 atomic_sub(msq->q_cbytes, &ns->msg_bytes);
228 ipc_rcu_putref(msq, msg_rcu_free);
232 * Called with msg_ids.rwsem and ipcp locked.
234 static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
236 struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
238 return security_msg_queue_associate(msq, msgflg);
241 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
243 struct ipc_namespace *ns;
244 static const struct ipc_ops msg_ops = {
245 .getnew = newque,
246 .associate = msg_security,
248 struct ipc_params msg_params;
250 ns = current->nsproxy->ipc_ns;
252 msg_params.key = key;
253 msg_params.flg = msgflg;
255 return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params);
258 static inline unsigned long
259 copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
261 switch (version) {
262 case IPC_64:
263 return copy_to_user(buf, in, sizeof(*in));
264 case IPC_OLD:
266 struct msqid_ds out;
268 memset(&out, 0, sizeof(out));
270 ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
272 out.msg_stime = in->msg_stime;
273 out.msg_rtime = in->msg_rtime;
274 out.msg_ctime = in->msg_ctime;
276 if (in->msg_cbytes > USHRT_MAX)
277 out.msg_cbytes = USHRT_MAX;
278 else
279 out.msg_cbytes = in->msg_cbytes;
280 out.msg_lcbytes = in->msg_cbytes;
282 if (in->msg_qnum > USHRT_MAX)
283 out.msg_qnum = USHRT_MAX;
284 else
285 out.msg_qnum = in->msg_qnum;
287 if (in->msg_qbytes > USHRT_MAX)
288 out.msg_qbytes = USHRT_MAX;
289 else
290 out.msg_qbytes = in->msg_qbytes;
291 out.msg_lqbytes = in->msg_qbytes;
293 out.msg_lspid = in->msg_lspid;
294 out.msg_lrpid = in->msg_lrpid;
296 return copy_to_user(buf, &out, sizeof(out));
298 default:
299 return -EINVAL;
303 static inline unsigned long
304 copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
306 switch (version) {
307 case IPC_64:
308 if (copy_from_user(out, buf, sizeof(*out)))
309 return -EFAULT;
310 return 0;
311 case IPC_OLD:
313 struct msqid_ds tbuf_old;
315 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
316 return -EFAULT;
318 out->msg_perm.uid = tbuf_old.msg_perm.uid;
319 out->msg_perm.gid = tbuf_old.msg_perm.gid;
320 out->msg_perm.mode = tbuf_old.msg_perm.mode;
322 if (tbuf_old.msg_qbytes == 0)
323 out->msg_qbytes = tbuf_old.msg_lqbytes;
324 else
325 out->msg_qbytes = tbuf_old.msg_qbytes;
327 return 0;
329 default:
330 return -EINVAL;
335 * This function handles some msgctl commands which require the rwsem
336 * to be held in write mode.
337 * NOTE: no locks must be held, the rwsem is taken inside this function.
339 static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
340 struct msqid_ds __user *buf, int version)
342 struct kern_ipc_perm *ipcp;
343 struct msqid64_ds uninitialized_var(msqid64);
344 struct msg_queue *msq;
345 int err;
347 if (cmd == IPC_SET) {
348 if (copy_msqid_from_user(&msqid64, buf, version))
349 return -EFAULT;
352 down_write(&msg_ids(ns).rwsem);
353 rcu_read_lock();
355 ipcp = ipcctl_pre_down_nolock(ns, &msg_ids(ns), msqid, cmd,
356 &msqid64.msg_perm, msqid64.msg_qbytes);
357 if (IS_ERR(ipcp)) {
358 err = PTR_ERR(ipcp);
359 goto out_unlock1;
362 msq = container_of(ipcp, struct msg_queue, q_perm);
364 err = security_msg_queue_msgctl(msq, cmd);
365 if (err)
366 goto out_unlock1;
368 switch (cmd) {
369 case IPC_RMID:
370 ipc_lock_object(&msq->q_perm);
371 /* freeque unlocks the ipc object and rcu */
372 freeque(ns, ipcp);
373 goto out_up;
374 case IPC_SET:
375 if (msqid64.msg_qbytes > ns->msg_ctlmnb &&
376 !capable(CAP_SYS_RESOURCE)) {
377 err = -EPERM;
378 goto out_unlock1;
381 ipc_lock_object(&msq->q_perm);
382 err = ipc_update_perm(&msqid64.msg_perm, ipcp);
383 if (err)
384 goto out_unlock0;
386 msq->q_qbytes = msqid64.msg_qbytes;
388 msq->q_ctime = get_seconds();
389 /* sleeping receivers might be excluded by
390 * stricter permissions.
392 expunge_all(msq, -EAGAIN);
393 /* sleeping senders might be able to send
394 * due to a larger queue size.
396 ss_wakeup(&msq->q_senders, 0);
397 break;
398 default:
399 err = -EINVAL;
400 goto out_unlock1;
403 out_unlock0:
404 ipc_unlock_object(&msq->q_perm);
405 out_unlock1:
406 rcu_read_unlock();
407 out_up:
408 up_write(&msg_ids(ns).rwsem);
409 return err;
412 static int msgctl_nolock(struct ipc_namespace *ns, int msqid,
413 int cmd, int version, void __user *buf)
415 int err;
416 struct msg_queue *msq;
418 switch (cmd) {
419 case IPC_INFO:
420 case MSG_INFO:
422 struct msginfo msginfo;
423 int max_id;
425 if (!buf)
426 return -EFAULT;
429 * We must not return kernel stack data.
430 * due to padding, it's not enough
431 * to set all member fields.
433 err = security_msg_queue_msgctl(NULL, cmd);
434 if (err)
435 return err;
437 memset(&msginfo, 0, sizeof(msginfo));
438 msginfo.msgmni = ns->msg_ctlmni;
439 msginfo.msgmax = ns->msg_ctlmax;
440 msginfo.msgmnb = ns->msg_ctlmnb;
441 msginfo.msgssz = MSGSSZ;
442 msginfo.msgseg = MSGSEG;
443 down_read(&msg_ids(ns).rwsem);
444 if (cmd == MSG_INFO) {
445 msginfo.msgpool = msg_ids(ns).in_use;
446 msginfo.msgmap = atomic_read(&ns->msg_hdrs);
447 msginfo.msgtql = atomic_read(&ns->msg_bytes);
448 } else {
449 msginfo.msgmap = MSGMAP;
450 msginfo.msgpool = MSGPOOL;
451 msginfo.msgtql = MSGTQL;
453 max_id = ipc_get_maxid(&msg_ids(ns));
454 up_read(&msg_ids(ns).rwsem);
455 if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
456 return -EFAULT;
457 return (max_id < 0) ? 0 : max_id;
460 case MSG_STAT:
461 case IPC_STAT:
463 struct msqid64_ds tbuf;
464 int success_return;
466 if (!buf)
467 return -EFAULT;
469 memset(&tbuf, 0, sizeof(tbuf));
471 rcu_read_lock();
472 if (cmd == MSG_STAT) {
473 msq = msq_obtain_object(ns, msqid);
474 if (IS_ERR(msq)) {
475 err = PTR_ERR(msq);
476 goto out_unlock;
478 success_return = msq->q_perm.id;
479 } else {
480 msq = msq_obtain_object_check(ns, msqid);
481 if (IS_ERR(msq)) {
482 err = PTR_ERR(msq);
483 goto out_unlock;
485 success_return = 0;
488 err = -EACCES;
489 if (ipcperms(ns, &msq->q_perm, S_IRUGO))
490 goto out_unlock;
492 err = security_msg_queue_msgctl(msq, cmd);
493 if (err)
494 goto out_unlock;
496 kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
497 tbuf.msg_stime = msq->q_stime;
498 tbuf.msg_rtime = msq->q_rtime;
499 tbuf.msg_ctime = msq->q_ctime;
500 tbuf.msg_cbytes = msq->q_cbytes;
501 tbuf.msg_qnum = msq->q_qnum;
502 tbuf.msg_qbytes = msq->q_qbytes;
503 tbuf.msg_lspid = msq->q_lspid;
504 tbuf.msg_lrpid = msq->q_lrpid;
505 rcu_read_unlock();
507 if (copy_msqid_to_user(buf, &tbuf, version))
508 return -EFAULT;
509 return success_return;
512 default:
513 return -EINVAL;
516 return err;
517 out_unlock:
518 rcu_read_unlock();
519 return err;
522 SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
524 int version;
525 struct ipc_namespace *ns;
527 if (msqid < 0 || cmd < 0)
528 return -EINVAL;
530 version = ipc_parse_version(&cmd);
531 ns = current->nsproxy->ipc_ns;
533 switch (cmd) {
534 case IPC_INFO:
535 case MSG_INFO:
536 case MSG_STAT: /* msqid is an index rather than a msg queue id */
537 case IPC_STAT:
538 return msgctl_nolock(ns, msqid, cmd, version, buf);
539 case IPC_SET:
540 case IPC_RMID:
541 return msgctl_down(ns, msqid, cmd, buf, version);
542 default:
543 return -EINVAL;
547 static int testmsg(struct msg_msg *msg, long type, int mode)
549 switch (mode) {
550 case SEARCH_ANY:
551 case SEARCH_NUMBER:
552 return 1;
553 case SEARCH_LESSEQUAL:
554 if (msg->m_type <= type)
555 return 1;
556 break;
557 case SEARCH_EQUAL:
558 if (msg->m_type == type)
559 return 1;
560 break;
561 case SEARCH_NOTEQUAL:
562 if (msg->m_type != type)
563 return 1;
564 break;
566 return 0;
569 static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
571 struct msg_receiver *msr, *t;
573 list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
574 if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
575 !security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
576 msr->r_msgtype, msr->r_mode)) {
578 list_del(&msr->r_list);
579 if (msr->r_maxsize < msg->m_ts) {
580 /* initialize pipelined send ordering */
581 msr->r_msg = NULL;
582 wake_up_process(msr->r_tsk);
583 /* barrier (B) see barrier comment below */
584 smp_wmb();
585 msr->r_msg = ERR_PTR(-E2BIG);
586 } else {
587 msr->r_msg = NULL;
588 msq->q_lrpid = task_pid_vnr(msr->r_tsk);
589 msq->q_rtime = get_seconds();
590 wake_up_process(msr->r_tsk);
592 * Ensure that the wakeup is visible before
593 * setting r_msg, as the receiving can otherwise
594 * exit - once r_msg is set, the receiver can
595 * continue. See lockless receive part 1 and 2
596 * in do_msgrcv(). Barrier (B).
598 smp_wmb();
599 msr->r_msg = msg;
601 return 1;
606 return 0;
609 long do_msgsnd(int msqid, long mtype, void __user *mtext,
610 size_t msgsz, int msgflg)
612 struct msg_queue *msq;
613 struct msg_msg *msg;
614 int err;
615 struct ipc_namespace *ns;
617 ns = current->nsproxy->ipc_ns;
619 if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
620 return -EINVAL;
621 if (mtype < 1)
622 return -EINVAL;
624 msg = load_msg(mtext, msgsz);
625 if (IS_ERR(msg))
626 return PTR_ERR(msg);
628 msg->m_type = mtype;
629 msg->m_ts = msgsz;
631 rcu_read_lock();
632 msq = msq_obtain_object_check(ns, msqid);
633 if (IS_ERR(msq)) {
634 err = PTR_ERR(msq);
635 goto out_unlock1;
638 ipc_lock_object(&msq->q_perm);
640 for (;;) {
641 struct msg_sender s;
643 err = -EACCES;
644 if (ipcperms(ns, &msq->q_perm, S_IWUGO))
645 goto out_unlock0;
647 /* raced with RMID? */
648 if (!ipc_valid_object(&msq->q_perm)) {
649 err = -EIDRM;
650 goto out_unlock0;
653 err = security_msg_queue_msgsnd(msq, msg, msgflg);
654 if (err)
655 goto out_unlock0;
657 if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
658 1 + msq->q_qnum <= msq->q_qbytes) {
659 break;
662 /* queue full, wait: */
663 if (msgflg & IPC_NOWAIT) {
664 err = -EAGAIN;
665 goto out_unlock0;
668 /* enqueue the sender and prepare to block */
669 ss_add(msq, &s);
671 if (!ipc_rcu_getref(msq)) {
672 err = -EIDRM;
673 goto out_unlock0;
676 ipc_unlock_object(&msq->q_perm);
677 rcu_read_unlock();
678 schedule();
680 rcu_read_lock();
681 ipc_lock_object(&msq->q_perm);
683 ipc_rcu_putref(msq, ipc_rcu_free);
684 /* raced with RMID? */
685 if (!ipc_valid_object(&msq->q_perm)) {
686 err = -EIDRM;
687 goto out_unlock0;
690 ss_del(&s);
692 if (signal_pending(current)) {
693 err = -ERESTARTNOHAND;
694 goto out_unlock0;
698 msq->q_lspid = task_tgid_vnr(current);
699 msq->q_stime = get_seconds();
701 if (!pipelined_send(msq, msg)) {
702 /* no one is waiting for this message, enqueue it */
703 list_add_tail(&msg->m_list, &msq->q_messages);
704 msq->q_cbytes += msgsz;
705 msq->q_qnum++;
706 atomic_add(msgsz, &ns->msg_bytes);
707 atomic_inc(&ns->msg_hdrs);
710 err = 0;
711 msg = NULL;
713 out_unlock0:
714 ipc_unlock_object(&msq->q_perm);
715 out_unlock1:
716 rcu_read_unlock();
717 if (msg != NULL)
718 free_msg(msg);
719 return err;
722 SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
723 int, msgflg)
725 long mtype;
727 if (get_user(mtype, &msgp->mtype))
728 return -EFAULT;
729 return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
732 static inline int convert_mode(long *msgtyp, int msgflg)
734 if (msgflg & MSG_COPY)
735 return SEARCH_NUMBER;
737 * find message of correct type.
738 * msgtyp = 0 => get first.
739 * msgtyp > 0 => get first message of matching type.
740 * msgtyp < 0 => get message with least type must be < abs(msgtype).
742 if (*msgtyp == 0)
743 return SEARCH_ANY;
744 if (*msgtyp < 0) {
745 *msgtyp = -*msgtyp;
746 return SEARCH_LESSEQUAL;
748 if (msgflg & MSG_EXCEPT)
749 return SEARCH_NOTEQUAL;
750 return SEARCH_EQUAL;
753 static long do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz)
755 struct msgbuf __user *msgp = dest;
756 size_t msgsz;
758 if (put_user(msg->m_type, &msgp->mtype))
759 return -EFAULT;
761 msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz;
762 if (store_msg(msgp->mtext, msg, msgsz))
763 return -EFAULT;
764 return msgsz;
767 #ifdef CONFIG_CHECKPOINT_RESTORE
769 * This function creates new kernel message structure, large enough to store
770 * bufsz message bytes.
772 static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz)
774 struct msg_msg *copy;
777 * Create dummy message to copy real message to.
779 copy = load_msg(buf, bufsz);
780 if (!IS_ERR(copy))
781 copy->m_ts = bufsz;
782 return copy;
785 static inline void free_copy(struct msg_msg *copy)
787 if (copy)
788 free_msg(copy);
790 #else
791 static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz)
793 return ERR_PTR(-ENOSYS);
796 static inline void free_copy(struct msg_msg *copy)
799 #endif
801 static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
803 struct msg_msg *msg, *found = NULL;
804 long count = 0;
806 list_for_each_entry(msg, &msq->q_messages, m_list) {
807 if (testmsg(msg, *msgtyp, mode) &&
808 !security_msg_queue_msgrcv(msq, msg, current,
809 *msgtyp, mode)) {
810 if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) {
811 *msgtyp = msg->m_type - 1;
812 found = msg;
813 } else if (mode == SEARCH_NUMBER) {
814 if (*msgtyp == count)
815 return msg;
816 } else
817 return msg;
818 count++;
822 return found ?: ERR_PTR(-EAGAIN);
825 long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg,
826 long (*msg_handler)(void __user *, struct msg_msg *, size_t))
828 int mode;
829 struct msg_queue *msq;
830 struct ipc_namespace *ns;
831 struct msg_msg *msg, *copy = NULL;
833 ns = current->nsproxy->ipc_ns;
835 if (msqid < 0 || (long) bufsz < 0)
836 return -EINVAL;
838 if (msgflg & MSG_COPY) {
839 if ((msgflg & MSG_EXCEPT) || !(msgflg & IPC_NOWAIT))
840 return -EINVAL;
841 copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax));
842 if (IS_ERR(copy))
843 return PTR_ERR(copy);
845 mode = convert_mode(&msgtyp, msgflg);
847 rcu_read_lock();
848 msq = msq_obtain_object_check(ns, msqid);
849 if (IS_ERR(msq)) {
850 rcu_read_unlock();
851 free_copy(copy);
852 return PTR_ERR(msq);
855 for (;;) {
856 struct msg_receiver msr_d;
858 msg = ERR_PTR(-EACCES);
859 if (ipcperms(ns, &msq->q_perm, S_IRUGO))
860 goto out_unlock1;
862 ipc_lock_object(&msq->q_perm);
864 /* raced with RMID? */
865 if (!ipc_valid_object(&msq->q_perm)) {
866 msg = ERR_PTR(-EIDRM);
867 goto out_unlock0;
870 msg = find_msg(msq, &msgtyp, mode);
871 if (!IS_ERR(msg)) {
873 * Found a suitable message.
874 * Unlink it from the queue.
876 if ((bufsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
877 msg = ERR_PTR(-E2BIG);
878 goto out_unlock0;
881 * If we are copying, then do not unlink message and do
882 * not update queue parameters.
884 if (msgflg & MSG_COPY) {
885 msg = copy_msg(msg, copy);
886 goto out_unlock0;
889 list_del(&msg->m_list);
890 msq->q_qnum--;
891 msq->q_rtime = get_seconds();
892 msq->q_lrpid = task_tgid_vnr(current);
893 msq->q_cbytes -= msg->m_ts;
894 atomic_sub(msg->m_ts, &ns->msg_bytes);
895 atomic_dec(&ns->msg_hdrs);
896 ss_wakeup(&msq->q_senders, 0);
898 goto out_unlock0;
901 /* No message waiting. Wait for a message */
902 if (msgflg & IPC_NOWAIT) {
903 msg = ERR_PTR(-ENOMSG);
904 goto out_unlock0;
907 list_add_tail(&msr_d.r_list, &msq->q_receivers);
908 msr_d.r_tsk = current;
909 msr_d.r_msgtype = msgtyp;
910 msr_d.r_mode = mode;
911 if (msgflg & MSG_NOERROR)
912 msr_d.r_maxsize = INT_MAX;
913 else
914 msr_d.r_maxsize = bufsz;
915 msr_d.r_msg = ERR_PTR(-EAGAIN);
916 __set_current_state(TASK_INTERRUPTIBLE);
918 ipc_unlock_object(&msq->q_perm);
919 rcu_read_unlock();
920 schedule();
922 /* Lockless receive, part 1:
923 * Disable preemption. We don't hold a reference to the queue
924 * and getting a reference would defeat the idea of a lockless
925 * operation, thus the code relies on rcu to guarantee the
926 * existence of msq:
927 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
928 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
929 * rcu_read_lock() prevents preemption between reading r_msg
930 * and acquiring the q_perm.lock in ipc_lock_object().
932 rcu_read_lock();
934 /* Lockless receive, part 2:
935 * Wait until pipelined_send or expunge_all are outside of
936 * wake_up_process(). There is a race with exit(), see
937 * ipc/mqueue.c for the details. The correct serialization
938 * ensures that a receiver cannot continue without the wakeup
939 * being visibible _before_ setting r_msg:
941 * CPU 0 CPU 1
942 * <loop receiver>
943 * smp_rmb(); (A) <-- pair -. <waker thread>
944 * <load ->r_msg> | msr->r_msg = NULL;
945 * | wake_up_process();
946 * <continue> `------> smp_wmb(); (B)
947 * msr->r_msg = msg;
949 * Where (A) orders the message value read and where (B) orders
950 * the write to the r_msg -- done in both pipelined_send and
951 * expunge_all.
953 for (;;) {
955 * Pairs with writer barrier in pipelined_send
956 * or expunge_all.
958 smp_rmb(); /* barrier (A) */
959 msg = (struct msg_msg *)msr_d.r_msg;
960 if (msg)
961 break;
964 * The cpu_relax() call is a compiler barrier
965 * which forces everything in this loop to be
966 * re-loaded.
968 cpu_relax();
971 /* Lockless receive, part 3:
972 * If there is a message or an error then accept it without
973 * locking.
975 if (msg != ERR_PTR(-EAGAIN))
976 goto out_unlock1;
978 /* Lockless receive, part 3:
979 * Acquire the queue spinlock.
981 ipc_lock_object(&msq->q_perm);
983 /* Lockless receive, part 4:
984 * Repeat test after acquiring the spinlock.
986 msg = (struct msg_msg *)msr_d.r_msg;
987 if (msg != ERR_PTR(-EAGAIN))
988 goto out_unlock0;
990 list_del(&msr_d.r_list);
991 if (signal_pending(current)) {
992 msg = ERR_PTR(-ERESTARTNOHAND);
993 goto out_unlock0;
996 ipc_unlock_object(&msq->q_perm);
999 out_unlock0:
1000 ipc_unlock_object(&msq->q_perm);
1001 out_unlock1:
1002 rcu_read_unlock();
1003 if (IS_ERR(msg)) {
1004 free_copy(copy);
1005 return PTR_ERR(msg);
1008 bufsz = msg_handler(buf, msg, bufsz);
1009 free_msg(msg);
1011 return bufsz;
1014 SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
1015 long, msgtyp, int, msgflg)
1017 return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill);
1021 void msg_init_ns(struct ipc_namespace *ns)
1023 ns->msg_ctlmax = MSGMAX;
1024 ns->msg_ctlmnb = MSGMNB;
1025 ns->msg_ctlmni = MSGMNI;
1027 atomic_set(&ns->msg_bytes, 0);
1028 atomic_set(&ns->msg_hdrs, 0);
1029 ipc_init_ids(&ns->ids[IPC_MSG_IDS]);
1032 #ifdef CONFIG_IPC_NS
1033 void msg_exit_ns(struct ipc_namespace *ns)
1035 free_ipcs(ns, &msg_ids(ns), freeque);
1036 idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr);
1038 #endif
1040 #ifdef CONFIG_PROC_FS
1041 static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
1043 struct user_namespace *user_ns = seq_user_ns(s);
1044 struct msg_queue *msq = it;
1046 seq_printf(s,
1047 "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
1048 msq->q_perm.key,
1049 msq->q_perm.id,
1050 msq->q_perm.mode,
1051 msq->q_cbytes,
1052 msq->q_qnum,
1053 msq->q_lspid,
1054 msq->q_lrpid,
1055 from_kuid_munged(user_ns, msq->q_perm.uid),
1056 from_kgid_munged(user_ns, msq->q_perm.gid),
1057 from_kuid_munged(user_ns, msq->q_perm.cuid),
1058 from_kgid_munged(user_ns, msq->q_perm.cgid),
1059 msq->q_stime,
1060 msq->q_rtime,
1061 msq->q_ctime);
1063 return 0;
1065 #endif
1067 void __init msg_init(void)
1069 msg_init_ns(&init_ipc_ns);
1071 ipc_init_proc_interface("sysvipc/msg",
1072 " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
1073 IPC_MSG_IDS, sysvipc_msg_proc_show);