[PATCH] sky2: turn off PHY IRQ on shutdown
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / ipc / msg.c
blob2b4fccf8ea55a9a2babc5f92b36fcdf6a1dd5a7d
1 /*
2 * linux/ipc/msg.c
3 * Copyright (C) 1992 Krishna Balasubramanian
5 * Removed all the remaining kerneld mess
6 * Catch the -EFAULT stuff properly
7 * Use GFP_KERNEL for messages as in 1.2
8 * Fixed up the unchecked user space derefs
9 * Copyright (C) 1998 Alan Cox & Andi Kleen
11 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
13 * mostly rewritten, threaded and wake-one semantics added
14 * MSGMAX limit removed, sysctl's added
15 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
21 #include <linux/capability.h>
22 #include <linux/slab.h>
23 #include <linux/msg.h>
24 #include <linux/spinlock.h>
25 #include <linux/init.h>
26 #include <linux/proc_fs.h>
27 #include <linux/list.h>
28 #include <linux/security.h>
29 #include <linux/sched.h>
30 #include <linux/syscalls.h>
31 #include <linux/audit.h>
32 #include <linux/seq_file.h>
33 #include <linux/mutex.h>
35 #include <asm/current.h>
36 #include <asm/uaccess.h>
37 #include "util.h"
39 /* sysctl: */
40 int msg_ctlmax = MSGMAX;
41 int msg_ctlmnb = MSGMNB;
42 int msg_ctlmni = MSGMNI;
45 * one msg_receiver structure for each sleeping receiver:
47 struct msg_receiver {
48 struct list_head r_list;
49 struct task_struct *r_tsk;
51 int r_mode;
52 long r_msgtype;
53 long r_maxsize;
55 volatile struct msg_msg *r_msg;
58 /* one msg_sender for each sleeping sender */
59 struct msg_sender {
60 struct list_head list;
61 struct task_struct *tsk;
64 #define SEARCH_ANY 1
65 #define SEARCH_EQUAL 2
66 #define SEARCH_NOTEQUAL 3
67 #define SEARCH_LESSEQUAL 4
69 static atomic_t msg_bytes = ATOMIC_INIT(0);
70 static atomic_t msg_hdrs = ATOMIC_INIT(0);
72 static struct ipc_ids msg_ids;
74 #define msg_lock(id) ((struct msg_queue *)ipc_lock(&msg_ids, id))
75 #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
76 #define msg_rmid(id) ((struct msg_queue *)ipc_rmid(&msg_ids, id))
77 #define msg_checkid(msq, msgid) ipc_checkid(&msg_ids, &msq->q_perm, msgid)
78 #define msg_buildid(id, seq) ipc_buildid(&msg_ids, id, seq)
80 static void freeque(struct msg_queue *msq, int id);
81 static int newque(key_t key, int msgflg);
82 #ifdef CONFIG_PROC_FS
83 static int sysvipc_msg_proc_show(struct seq_file *s, void *it);
84 #endif
86 void __init msg_init(void)
88 ipc_init_ids(&msg_ids, msg_ctlmni);
89 ipc_init_proc_interface("sysvipc/msg",
90 " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
91 &msg_ids,
92 sysvipc_msg_proc_show);
95 static int newque(key_t key, int msgflg)
97 struct msg_queue *msq;
98 int id, retval;
100 msq = ipc_rcu_alloc(sizeof(*msq));
101 if (!msq)
102 return -ENOMEM;
104 msq->q_perm.mode = msgflg & S_IRWXUGO;
105 msq->q_perm.key = key;
107 msq->q_perm.security = NULL;
108 retval = security_msg_queue_alloc(msq);
109 if (retval) {
110 ipc_rcu_putref(msq);
111 return retval;
114 id = ipc_addid(&msg_ids, &msq->q_perm, msg_ctlmni);
115 if (id == -1) {
116 security_msg_queue_free(msq);
117 ipc_rcu_putref(msq);
118 return -ENOSPC;
121 msq->q_id = msg_buildid(id, msq->q_perm.seq);
122 msq->q_stime = msq->q_rtime = 0;
123 msq->q_ctime = get_seconds();
124 msq->q_cbytes = msq->q_qnum = 0;
125 msq->q_qbytes = msg_ctlmnb;
126 msq->q_lspid = msq->q_lrpid = 0;
127 INIT_LIST_HEAD(&msq->q_messages);
128 INIT_LIST_HEAD(&msq->q_receivers);
129 INIT_LIST_HEAD(&msq->q_senders);
130 msg_unlock(msq);
132 return msq->q_id;
135 static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
137 mss->tsk = current;
138 current->state = TASK_INTERRUPTIBLE;
139 list_add_tail(&mss->list, &msq->q_senders);
142 static inline void ss_del(struct msg_sender *mss)
144 if (mss->list.next != NULL)
145 list_del(&mss->list);
148 static void ss_wakeup(struct list_head *h, int kill)
150 struct list_head *tmp;
152 tmp = h->next;
153 while (tmp != h) {
154 struct msg_sender *mss;
156 mss = list_entry(tmp, struct msg_sender, list);
157 tmp = tmp->next;
158 if (kill)
159 mss->list.next = NULL;
160 wake_up_process(mss->tsk);
164 static void expunge_all(struct msg_queue *msq, int res)
166 struct list_head *tmp;
168 tmp = msq->q_receivers.next;
169 while (tmp != &msq->q_receivers) {
170 struct msg_receiver *msr;
172 msr = list_entry(tmp, struct msg_receiver, r_list);
173 tmp = tmp->next;
174 msr->r_msg = NULL;
175 wake_up_process(msr->r_tsk);
176 smp_mb();
177 msr->r_msg = ERR_PTR(res);
182 * freeque() wakes up waiters on the sender and receiver waiting queue,
183 * removes the message queue from message queue ID
184 * array, and cleans up all the messages associated with this queue.
186 * msg_ids.mutex and the spinlock for this message queue is hold
187 * before freeque() is called. msg_ids.mutex remains locked on exit.
189 static void freeque(struct msg_queue *msq, int id)
191 struct list_head *tmp;
193 expunge_all(msq, -EIDRM);
194 ss_wakeup(&msq->q_senders, 1);
195 msq = msg_rmid(id);
196 msg_unlock(msq);
198 tmp = msq->q_messages.next;
199 while (tmp != &msq->q_messages) {
200 struct msg_msg *msg = list_entry(tmp, struct msg_msg, m_list);
202 tmp = tmp->next;
203 atomic_dec(&msg_hdrs);
204 free_msg(msg);
206 atomic_sub(msq->q_cbytes, &msg_bytes);
207 security_msg_queue_free(msq);
208 ipc_rcu_putref(msq);
211 asmlinkage long sys_msgget(key_t key, int msgflg)
213 struct msg_queue *msq;
214 int id, ret = -EPERM;
216 mutex_lock(&msg_ids.mutex);
217 if (key == IPC_PRIVATE)
218 ret = newque(key, msgflg);
219 else if ((id = ipc_findkey(&msg_ids, key)) == -1) { /* key not used */
220 if (!(msgflg & IPC_CREAT))
221 ret = -ENOENT;
222 else
223 ret = newque(key, msgflg);
224 } else if (msgflg & IPC_CREAT && msgflg & IPC_EXCL) {
225 ret = -EEXIST;
226 } else {
227 msq = msg_lock(id);
228 BUG_ON(msq == NULL);
229 if (ipcperms(&msq->q_perm, msgflg))
230 ret = -EACCES;
231 else {
232 int qid = msg_buildid(id, msq->q_perm.seq);
234 ret = security_msg_queue_associate(msq, msgflg);
235 if (!ret)
236 ret = qid;
238 msg_unlock(msq);
240 mutex_unlock(&msg_ids.mutex);
242 return ret;
245 static inline unsigned long
246 copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
248 switch(version) {
249 case IPC_64:
250 return copy_to_user(buf, in, sizeof(*in));
251 case IPC_OLD:
253 struct msqid_ds out;
255 memset(&out, 0, sizeof(out));
257 ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
259 out.msg_stime = in->msg_stime;
260 out.msg_rtime = in->msg_rtime;
261 out.msg_ctime = in->msg_ctime;
263 if (in->msg_cbytes > USHRT_MAX)
264 out.msg_cbytes = USHRT_MAX;
265 else
266 out.msg_cbytes = in->msg_cbytes;
267 out.msg_lcbytes = in->msg_cbytes;
269 if (in->msg_qnum > USHRT_MAX)
270 out.msg_qnum = USHRT_MAX;
271 else
272 out.msg_qnum = in->msg_qnum;
274 if (in->msg_qbytes > USHRT_MAX)
275 out.msg_qbytes = USHRT_MAX;
276 else
277 out.msg_qbytes = in->msg_qbytes;
278 out.msg_lqbytes = in->msg_qbytes;
280 out.msg_lspid = in->msg_lspid;
281 out.msg_lrpid = in->msg_lrpid;
283 return copy_to_user(buf, &out, sizeof(out));
285 default:
286 return -EINVAL;
290 struct msq_setbuf {
291 unsigned long qbytes;
292 uid_t uid;
293 gid_t gid;
294 mode_t mode;
297 static inline unsigned long
298 copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version)
300 switch(version) {
301 case IPC_64:
303 struct msqid64_ds tbuf;
305 if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
306 return -EFAULT;
308 out->qbytes = tbuf.msg_qbytes;
309 out->uid = tbuf.msg_perm.uid;
310 out->gid = tbuf.msg_perm.gid;
311 out->mode = tbuf.msg_perm.mode;
313 return 0;
315 case IPC_OLD:
317 struct msqid_ds tbuf_old;
319 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
320 return -EFAULT;
322 out->uid = tbuf_old.msg_perm.uid;
323 out->gid = tbuf_old.msg_perm.gid;
324 out->mode = tbuf_old.msg_perm.mode;
326 if (tbuf_old.msg_qbytes == 0)
327 out->qbytes = tbuf_old.msg_lqbytes;
328 else
329 out->qbytes = tbuf_old.msg_qbytes;
331 return 0;
333 default:
334 return -EINVAL;
338 asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
340 struct kern_ipc_perm *ipcp;
341 struct msq_setbuf setbuf;
342 struct msg_queue *msq;
343 int err, version;
345 if (msqid < 0 || cmd < 0)
346 return -EINVAL;
348 version = ipc_parse_version(&cmd);
350 switch (cmd) {
351 case IPC_INFO:
352 case MSG_INFO:
354 struct msginfo msginfo;
355 int max_id;
357 if (!buf)
358 return -EFAULT;
360 * We must not return kernel stack data.
361 * due to padding, it's not enough
362 * to set all member fields.
364 err = security_msg_queue_msgctl(NULL, cmd);
365 if (err)
366 return err;
368 memset(&msginfo, 0, sizeof(msginfo));
369 msginfo.msgmni = msg_ctlmni;
370 msginfo.msgmax = msg_ctlmax;
371 msginfo.msgmnb = msg_ctlmnb;
372 msginfo.msgssz = MSGSSZ;
373 msginfo.msgseg = MSGSEG;
374 mutex_lock(&msg_ids.mutex);
375 if (cmd == MSG_INFO) {
376 msginfo.msgpool = msg_ids.in_use;
377 msginfo.msgmap = atomic_read(&msg_hdrs);
378 msginfo.msgtql = atomic_read(&msg_bytes);
379 } else {
380 msginfo.msgmap = MSGMAP;
381 msginfo.msgpool = MSGPOOL;
382 msginfo.msgtql = MSGTQL;
384 max_id = msg_ids.max_id;
385 mutex_unlock(&msg_ids.mutex);
386 if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
387 return -EFAULT;
388 return (max_id < 0) ? 0 : max_id;
390 case MSG_STAT:
391 case IPC_STAT:
393 struct msqid64_ds tbuf;
394 int success_return;
396 if (!buf)
397 return -EFAULT;
398 if (cmd == MSG_STAT && msqid >= msg_ids.entries->size)
399 return -EINVAL;
401 memset(&tbuf, 0, sizeof(tbuf));
403 msq = msg_lock(msqid);
404 if (msq == NULL)
405 return -EINVAL;
407 if (cmd == MSG_STAT) {
408 success_return = msg_buildid(msqid, msq->q_perm.seq);
409 } else {
410 err = -EIDRM;
411 if (msg_checkid(msq, msqid))
412 goto out_unlock;
413 success_return = 0;
415 err = -EACCES;
416 if (ipcperms(&msq->q_perm, S_IRUGO))
417 goto out_unlock;
419 err = security_msg_queue_msgctl(msq, cmd);
420 if (err)
421 goto out_unlock;
423 kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
424 tbuf.msg_stime = msq->q_stime;
425 tbuf.msg_rtime = msq->q_rtime;
426 tbuf.msg_ctime = msq->q_ctime;
427 tbuf.msg_cbytes = msq->q_cbytes;
428 tbuf.msg_qnum = msq->q_qnum;
429 tbuf.msg_qbytes = msq->q_qbytes;
430 tbuf.msg_lspid = msq->q_lspid;
431 tbuf.msg_lrpid = msq->q_lrpid;
432 msg_unlock(msq);
433 if (copy_msqid_to_user(buf, &tbuf, version))
434 return -EFAULT;
435 return success_return;
437 case IPC_SET:
438 if (!buf)
439 return -EFAULT;
440 if (copy_msqid_from_user(&setbuf, buf, version))
441 return -EFAULT;
442 break;
443 case IPC_RMID:
444 break;
445 default:
446 return -EINVAL;
449 mutex_lock(&msg_ids.mutex);
450 msq = msg_lock(msqid);
451 err = -EINVAL;
452 if (msq == NULL)
453 goto out_up;
455 err = -EIDRM;
456 if (msg_checkid(msq, msqid))
457 goto out_unlock_up;
458 ipcp = &msq->q_perm;
460 err = audit_ipc_obj(ipcp);
461 if (err)
462 goto out_unlock_up;
463 if (cmd==IPC_SET) {
464 err = audit_ipc_set_perm(setbuf.qbytes, setbuf.uid, setbuf.gid,
465 setbuf.mode);
466 if (err)
467 goto out_unlock_up;
470 err = -EPERM;
471 if (current->euid != ipcp->cuid &&
472 current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN))
473 /* We _could_ check for CAP_CHOWN above, but we don't */
474 goto out_unlock_up;
476 err = security_msg_queue_msgctl(msq, cmd);
477 if (err)
478 goto out_unlock_up;
480 switch (cmd) {
481 case IPC_SET:
483 err = -EPERM;
484 if (setbuf.qbytes > msg_ctlmnb && !capable(CAP_SYS_RESOURCE))
485 goto out_unlock_up;
487 msq->q_qbytes = setbuf.qbytes;
489 ipcp->uid = setbuf.uid;
490 ipcp->gid = setbuf.gid;
491 ipcp->mode = (ipcp->mode & ~S_IRWXUGO) |
492 (S_IRWXUGO & setbuf.mode);
493 msq->q_ctime = get_seconds();
494 /* sleeping receivers might be excluded by
495 * stricter permissions.
497 expunge_all(msq, -EAGAIN);
498 /* sleeping senders might be able to send
499 * due to a larger queue size.
501 ss_wakeup(&msq->q_senders, 0);
502 msg_unlock(msq);
503 break;
505 case IPC_RMID:
506 freeque(msq, msqid);
507 break;
509 err = 0;
510 out_up:
511 mutex_unlock(&msg_ids.mutex);
512 return err;
513 out_unlock_up:
514 msg_unlock(msq);
515 goto out_up;
516 out_unlock:
517 msg_unlock(msq);
518 return err;
521 static int testmsg(struct msg_msg *msg, long type, int mode)
523 switch(mode)
525 case SEARCH_ANY:
526 return 1;
527 case SEARCH_LESSEQUAL:
528 if (msg->m_type <=type)
529 return 1;
530 break;
531 case SEARCH_EQUAL:
532 if (msg->m_type == type)
533 return 1;
534 break;
535 case SEARCH_NOTEQUAL:
536 if (msg->m_type != type)
537 return 1;
538 break;
540 return 0;
543 static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
545 struct list_head *tmp;
547 tmp = msq->q_receivers.next;
548 while (tmp != &msq->q_receivers) {
549 struct msg_receiver *msr;
551 msr = list_entry(tmp, struct msg_receiver, r_list);
552 tmp = tmp->next;
553 if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
554 !security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
555 msr->r_msgtype, msr->r_mode)) {
557 list_del(&msr->r_list);
558 if (msr->r_maxsize < msg->m_ts) {
559 msr->r_msg = NULL;
560 wake_up_process(msr->r_tsk);
561 smp_mb();
562 msr->r_msg = ERR_PTR(-E2BIG);
563 } else {
564 msr->r_msg = NULL;
565 msq->q_lrpid = msr->r_tsk->pid;
566 msq->q_rtime = get_seconds();
567 wake_up_process(msr->r_tsk);
568 smp_mb();
569 msr->r_msg = msg;
571 return 1;
575 return 0;
578 asmlinkage long
579 sys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg)
581 struct msg_queue *msq;
582 struct msg_msg *msg;
583 long mtype;
584 int err;
586 if (msgsz > msg_ctlmax || (long) msgsz < 0 || msqid < 0)
587 return -EINVAL;
588 if (get_user(mtype, &msgp->mtype))
589 return -EFAULT;
590 if (mtype < 1)
591 return -EINVAL;
593 msg = load_msg(msgp->mtext, msgsz);
594 if (IS_ERR(msg))
595 return PTR_ERR(msg);
597 msg->m_type = mtype;
598 msg->m_ts = msgsz;
600 msq = msg_lock(msqid);
601 err = -EINVAL;
602 if (msq == NULL)
603 goto out_free;
605 err= -EIDRM;
606 if (msg_checkid(msq, msqid))
607 goto out_unlock_free;
609 for (;;) {
610 struct msg_sender s;
612 err = -EACCES;
613 if (ipcperms(&msq->q_perm, S_IWUGO))
614 goto out_unlock_free;
616 err = security_msg_queue_msgsnd(msq, msg, msgflg);
617 if (err)
618 goto out_unlock_free;
620 if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
621 1 + msq->q_qnum <= msq->q_qbytes) {
622 break;
625 /* queue full, wait: */
626 if (msgflg & IPC_NOWAIT) {
627 err = -EAGAIN;
628 goto out_unlock_free;
630 ss_add(msq, &s);
631 ipc_rcu_getref(msq);
632 msg_unlock(msq);
633 schedule();
635 ipc_lock_by_ptr(&msq->q_perm);
636 ipc_rcu_putref(msq);
637 if (msq->q_perm.deleted) {
638 err = -EIDRM;
639 goto out_unlock_free;
641 ss_del(&s);
643 if (signal_pending(current)) {
644 err = -ERESTARTNOHAND;
645 goto out_unlock_free;
649 msq->q_lspid = current->tgid;
650 msq->q_stime = get_seconds();
652 if (!pipelined_send(msq, msg)) {
653 /* noone is waiting for this message, enqueue it */
654 list_add_tail(&msg->m_list, &msq->q_messages);
655 msq->q_cbytes += msgsz;
656 msq->q_qnum++;
657 atomic_add(msgsz, &msg_bytes);
658 atomic_inc(&msg_hdrs);
661 err = 0;
662 msg = NULL;
664 out_unlock_free:
665 msg_unlock(msq);
666 out_free:
667 if (msg != NULL)
668 free_msg(msg);
669 return err;
672 static inline int convert_mode(long *msgtyp, int msgflg)
675 * find message of correct type.
676 * msgtyp = 0 => get first.
677 * msgtyp > 0 => get first message of matching type.
678 * msgtyp < 0 => get message with least type must be < abs(msgtype).
680 if (*msgtyp == 0)
681 return SEARCH_ANY;
682 if (*msgtyp < 0) {
683 *msgtyp = -*msgtyp;
684 return SEARCH_LESSEQUAL;
686 if (msgflg & MSG_EXCEPT)
687 return SEARCH_NOTEQUAL;
688 return SEARCH_EQUAL;
691 asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
692 long msgtyp, int msgflg)
694 struct msg_queue *msq;
695 struct msg_msg *msg;
696 int mode;
698 if (msqid < 0 || (long) msgsz < 0)
699 return -EINVAL;
700 mode = convert_mode(&msgtyp, msgflg);
702 msq = msg_lock(msqid);
703 if (msq == NULL)
704 return -EINVAL;
706 msg = ERR_PTR(-EIDRM);
707 if (msg_checkid(msq, msqid))
708 goto out_unlock;
710 for (;;) {
711 struct msg_receiver msr_d;
712 struct list_head *tmp;
714 msg = ERR_PTR(-EACCES);
715 if (ipcperms(&msq->q_perm, S_IRUGO))
716 goto out_unlock;
718 msg = ERR_PTR(-EAGAIN);
719 tmp = msq->q_messages.next;
720 while (tmp != &msq->q_messages) {
721 struct msg_msg *walk_msg;
723 walk_msg = list_entry(tmp, struct msg_msg, m_list);
724 if (testmsg(walk_msg, msgtyp, mode) &&
725 !security_msg_queue_msgrcv(msq, walk_msg, current,
726 msgtyp, mode)) {
728 msg = walk_msg;
729 if (mode == SEARCH_LESSEQUAL &&
730 walk_msg->m_type != 1) {
731 msg = walk_msg;
732 msgtyp = walk_msg->m_type - 1;
733 } else {
734 msg = walk_msg;
735 break;
738 tmp = tmp->next;
740 if (!IS_ERR(msg)) {
742 * Found a suitable message.
743 * Unlink it from the queue.
745 if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
746 msg = ERR_PTR(-E2BIG);
747 goto out_unlock;
749 list_del(&msg->m_list);
750 msq->q_qnum--;
751 msq->q_rtime = get_seconds();
752 msq->q_lrpid = current->tgid;
753 msq->q_cbytes -= msg->m_ts;
754 atomic_sub(msg->m_ts, &msg_bytes);
755 atomic_dec(&msg_hdrs);
756 ss_wakeup(&msq->q_senders, 0);
757 msg_unlock(msq);
758 break;
760 /* No message waiting. Wait for a message */
761 if (msgflg & IPC_NOWAIT) {
762 msg = ERR_PTR(-ENOMSG);
763 goto out_unlock;
765 list_add_tail(&msr_d.r_list, &msq->q_receivers);
766 msr_d.r_tsk = current;
767 msr_d.r_msgtype = msgtyp;
768 msr_d.r_mode = mode;
769 if (msgflg & MSG_NOERROR)
770 msr_d.r_maxsize = INT_MAX;
771 else
772 msr_d.r_maxsize = msgsz;
773 msr_d.r_msg = ERR_PTR(-EAGAIN);
774 current->state = TASK_INTERRUPTIBLE;
775 msg_unlock(msq);
777 schedule();
779 /* Lockless receive, part 1:
780 * Disable preemption. We don't hold a reference to the queue
781 * and getting a reference would defeat the idea of a lockless
782 * operation, thus the code relies on rcu to guarantee the
783 * existance of msq:
784 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
785 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
786 * rcu_read_lock() prevents preemption between reading r_msg
787 * and the spin_lock() inside ipc_lock_by_ptr().
789 rcu_read_lock();
791 /* Lockless receive, part 2:
792 * Wait until pipelined_send or expunge_all are outside of
793 * wake_up_process(). There is a race with exit(), see
794 * ipc/mqueue.c for the details.
796 msg = (struct msg_msg*)msr_d.r_msg;
797 while (msg == NULL) {
798 cpu_relax();
799 msg = (struct msg_msg *)msr_d.r_msg;
802 /* Lockless receive, part 3:
803 * If there is a message or an error then accept it without
804 * locking.
806 if (msg != ERR_PTR(-EAGAIN)) {
807 rcu_read_unlock();
808 break;
811 /* Lockless receive, part 3:
812 * Acquire the queue spinlock.
814 ipc_lock_by_ptr(&msq->q_perm);
815 rcu_read_unlock();
817 /* Lockless receive, part 4:
818 * Repeat test after acquiring the spinlock.
820 msg = (struct msg_msg*)msr_d.r_msg;
821 if (msg != ERR_PTR(-EAGAIN))
822 goto out_unlock;
824 list_del(&msr_d.r_list);
825 if (signal_pending(current)) {
826 msg = ERR_PTR(-ERESTARTNOHAND);
827 out_unlock:
828 msg_unlock(msq);
829 break;
832 if (IS_ERR(msg))
833 return PTR_ERR(msg);
835 msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz;
836 if (put_user (msg->m_type, &msgp->mtype) ||
837 store_msg(msgp->mtext, msg, msgsz)) {
838 msgsz = -EFAULT;
840 free_msg(msg);
842 return msgsz;
845 #ifdef CONFIG_PROC_FS
846 static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
848 struct msg_queue *msq = it;
850 return seq_printf(s,
851 "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
852 msq->q_perm.key,
853 msq->q_id,
854 msq->q_perm.mode,
855 msq->q_cbytes,
856 msq->q_qnum,
857 msq->q_lspid,
858 msq->q_lrpid,
859 msq->q_perm.uid,
860 msq->q_perm.gid,
861 msq->q_perm.cuid,
862 msq->q_perm.cgid,
863 msq->q_stime,
864 msq->q_rtime,
865 msq->q_ctime);
867 #endif