3 * Copyright (C) 1992 Krishna Balasubramanian
5 * Removed all the remaining kerneld mess
6 * Catch the -EFAULT stuff properly
7 * Use GFP_KERNEL for messages as in 1.2
8 * Fixed up the unchecked user space derefs
9 * Copyright (C) 1998 Alan Cox & Andi Kleen
11 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
13 * mostly rewritten, threaded and wake-one semantics added
14 * MSGMAX limit removed, sysctl's added
15 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
22 * Pavel Emelianov <xemul@openvz.org>
25 #include <linux/capability.h>
26 #include <linux/msg.h>
27 #include <linux/spinlock.h>
28 #include <linux/init.h>
30 #include <linux/proc_fs.h>
31 #include <linux/list.h>
32 #include <linux/security.h>
33 #include <linux/sched/wake_q.h>
34 #include <linux/syscalls.h>
35 #include <linux/audit.h>
36 #include <linux/seq_file.h>
37 #include <linux/rwsem.h>
38 #include <linux/nsproxy.h>
39 #include <linux/ipc_namespace.h>
41 #include <asm/current.h>
42 #include <linux/uaccess.h>
45 /* one msg_receiver structure for each sleeping receiver */
47 struct list_head r_list
;
48 struct task_struct
*r_tsk
;
54 struct msg_msg
*r_msg
;
57 /* one msg_sender for each sleeping sender */
59 struct list_head list
;
60 struct task_struct
*tsk
;
65 #define SEARCH_EQUAL 2
66 #define SEARCH_NOTEQUAL 3
67 #define SEARCH_LESSEQUAL 4
68 #define SEARCH_NUMBER 5
70 #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS])
72 static inline struct msg_queue
*msq_obtain_object(struct ipc_namespace
*ns
, int id
)
74 struct kern_ipc_perm
*ipcp
= ipc_obtain_object_idr(&msg_ids(ns
), id
);
77 return ERR_CAST(ipcp
);
79 return container_of(ipcp
, struct msg_queue
, q_perm
);
82 static inline struct msg_queue
*msq_obtain_object_check(struct ipc_namespace
*ns
,
85 struct kern_ipc_perm
*ipcp
= ipc_obtain_object_check(&msg_ids(ns
), id
);
88 return ERR_CAST(ipcp
);
90 return container_of(ipcp
, struct msg_queue
, q_perm
);
93 static inline void msg_rmid(struct ipc_namespace
*ns
, struct msg_queue
*s
)
95 ipc_rmid(&msg_ids(ns
), &s
->q_perm
);
98 static void msg_rcu_free(struct rcu_head
*head
)
100 struct kern_ipc_perm
*p
= container_of(head
, struct kern_ipc_perm
, rcu
);
101 struct msg_queue
*msq
= container_of(p
, struct msg_queue
, q_perm
);
103 security_msg_queue_free(msq
);
108 * newque - Create a new msg queue
110 * @params: ptr to the structure that contains the key and msgflg
112 * Called with msg_ids.rwsem held (writer)
114 static int newque(struct ipc_namespace
*ns
, struct ipc_params
*params
)
116 struct msg_queue
*msq
;
118 key_t key
= params
->key
;
119 int msgflg
= params
->flg
;
121 msq
= kvmalloc(sizeof(*msq
), GFP_KERNEL
);
125 msq
->q_perm
.mode
= msgflg
& S_IRWXUGO
;
126 msq
->q_perm
.key
= key
;
128 msq
->q_perm
.security
= NULL
;
129 retval
= security_msg_queue_alloc(msq
);
135 msq
->q_stime
= msq
->q_rtime
= 0;
136 msq
->q_ctime
= get_seconds();
137 msq
->q_cbytes
= msq
->q_qnum
= 0;
138 msq
->q_qbytes
= ns
->msg_ctlmnb
;
139 msq
->q_lspid
= msq
->q_lrpid
= 0;
140 INIT_LIST_HEAD(&msq
->q_messages
);
141 INIT_LIST_HEAD(&msq
->q_receivers
);
142 INIT_LIST_HEAD(&msq
->q_senders
);
144 /* ipc_addid() locks msq upon success. */
145 retval
= ipc_addid(&msg_ids(ns
), &msq
->q_perm
, ns
->msg_ctlmni
);
147 call_rcu(&msq
->q_perm
.rcu
, msg_rcu_free
);
151 ipc_unlock_object(&msq
->q_perm
);
154 return msq
->q_perm
.id
;
157 static inline bool msg_fits_inqueue(struct msg_queue
*msq
, size_t msgsz
)
159 return msgsz
+ msq
->q_cbytes
<= msq
->q_qbytes
&&
160 1 + msq
->q_qnum
<= msq
->q_qbytes
;
163 static inline void ss_add(struct msg_queue
*msq
,
164 struct msg_sender
*mss
, size_t msgsz
)
168 __set_current_state(TASK_INTERRUPTIBLE
);
169 list_add_tail(&mss
->list
, &msq
->q_senders
);
172 static inline void ss_del(struct msg_sender
*mss
)
175 list_del(&mss
->list
);
178 static void ss_wakeup(struct msg_queue
*msq
,
179 struct wake_q_head
*wake_q
, bool kill
)
181 struct msg_sender
*mss
, *t
;
182 struct task_struct
*stop_tsk
= NULL
;
183 struct list_head
*h
= &msq
->q_senders
;
185 list_for_each_entry_safe(mss
, t
, h
, list
) {
187 mss
->list
.next
= NULL
;
190 * Stop at the first task we don't wakeup,
191 * we've already iterated the original
194 else if (stop_tsk
== mss
->tsk
)
197 * We are not in an EIDRM scenario here, therefore
198 * verify that we really need to wakeup the task.
199 * To maintain current semantics and wakeup order,
200 * move the sender to the tail on behalf of the
203 else if (!msg_fits_inqueue(msq
, mss
->msgsz
)) {
207 list_move_tail(&mss
->list
, &msq
->q_senders
);
211 wake_q_add(wake_q
, mss
->tsk
);
215 static void expunge_all(struct msg_queue
*msq
, int res
,
216 struct wake_q_head
*wake_q
)
218 struct msg_receiver
*msr
, *t
;
220 list_for_each_entry_safe(msr
, t
, &msq
->q_receivers
, r_list
) {
221 wake_q_add(wake_q
, msr
->r_tsk
);
222 WRITE_ONCE(msr
->r_msg
, ERR_PTR(res
));
227 * freeque() wakes up waiters on the sender and receiver waiting queue,
228 * removes the message queue from message queue ID IDR, and cleans up all the
229 * messages associated with this queue.
231 * msg_ids.rwsem (writer) and the spinlock for this message queue are held
232 * before freeque() is called. msg_ids.rwsem remains locked on exit.
234 static void freeque(struct ipc_namespace
*ns
, struct kern_ipc_perm
*ipcp
)
236 struct msg_msg
*msg
, *t
;
237 struct msg_queue
*msq
= container_of(ipcp
, struct msg_queue
, q_perm
);
238 DEFINE_WAKE_Q(wake_q
);
240 expunge_all(msq
, -EIDRM
, &wake_q
);
241 ss_wakeup(msq
, &wake_q
, true);
243 ipc_unlock_object(&msq
->q_perm
);
247 list_for_each_entry_safe(msg
, t
, &msq
->q_messages
, m_list
) {
248 atomic_dec(&ns
->msg_hdrs
);
251 atomic_sub(msq
->q_cbytes
, &ns
->msg_bytes
);
252 ipc_rcu_putref(&msq
->q_perm
, msg_rcu_free
);
256 * Called with msg_ids.rwsem and ipcp locked.
258 static inline int msg_security(struct kern_ipc_perm
*ipcp
, int msgflg
)
260 struct msg_queue
*msq
= container_of(ipcp
, struct msg_queue
, q_perm
);
262 return security_msg_queue_associate(msq
, msgflg
);
265 SYSCALL_DEFINE2(msgget
, key_t
, key
, int, msgflg
)
267 struct ipc_namespace
*ns
;
268 static const struct ipc_ops msg_ops
= {
270 .associate
= msg_security
,
272 struct ipc_params msg_params
;
274 ns
= current
->nsproxy
->ipc_ns
;
276 msg_params
.key
= key
;
277 msg_params
.flg
= msgflg
;
279 return ipcget(ns
, &msg_ids(ns
), &msg_ops
, &msg_params
);
282 static inline unsigned long
283 copy_msqid_to_user(void __user
*buf
, struct msqid64_ds
*in
, int version
)
287 return copy_to_user(buf
, in
, sizeof(*in
));
292 memset(&out
, 0, sizeof(out
));
294 ipc64_perm_to_ipc_perm(&in
->msg_perm
, &out
.msg_perm
);
296 out
.msg_stime
= in
->msg_stime
;
297 out
.msg_rtime
= in
->msg_rtime
;
298 out
.msg_ctime
= in
->msg_ctime
;
300 if (in
->msg_cbytes
> USHRT_MAX
)
301 out
.msg_cbytes
= USHRT_MAX
;
303 out
.msg_cbytes
= in
->msg_cbytes
;
304 out
.msg_lcbytes
= in
->msg_cbytes
;
306 if (in
->msg_qnum
> USHRT_MAX
)
307 out
.msg_qnum
= USHRT_MAX
;
309 out
.msg_qnum
= in
->msg_qnum
;
311 if (in
->msg_qbytes
> USHRT_MAX
)
312 out
.msg_qbytes
= USHRT_MAX
;
314 out
.msg_qbytes
= in
->msg_qbytes
;
315 out
.msg_lqbytes
= in
->msg_qbytes
;
317 out
.msg_lspid
= in
->msg_lspid
;
318 out
.msg_lrpid
= in
->msg_lrpid
;
320 return copy_to_user(buf
, &out
, sizeof(out
));
327 static inline unsigned long
328 copy_msqid_from_user(struct msqid64_ds
*out
, void __user
*buf
, int version
)
332 if (copy_from_user(out
, buf
, sizeof(*out
)))
337 struct msqid_ds tbuf_old
;
339 if (copy_from_user(&tbuf_old
, buf
, sizeof(tbuf_old
)))
342 out
->msg_perm
.uid
= tbuf_old
.msg_perm
.uid
;
343 out
->msg_perm
.gid
= tbuf_old
.msg_perm
.gid
;
344 out
->msg_perm
.mode
= tbuf_old
.msg_perm
.mode
;
346 if (tbuf_old
.msg_qbytes
== 0)
347 out
->msg_qbytes
= tbuf_old
.msg_lqbytes
;
349 out
->msg_qbytes
= tbuf_old
.msg_qbytes
;
359 * This function handles some msgctl commands which require the rwsem
360 * to be held in write mode.
361 * NOTE: no locks must be held, the rwsem is taken inside this function.
363 static int msgctl_down(struct ipc_namespace
*ns
, int msqid
, int cmd
,
364 struct msqid_ds __user
*buf
, int version
)
366 struct kern_ipc_perm
*ipcp
;
367 struct msqid64_ds
uninitialized_var(msqid64
);
368 struct msg_queue
*msq
;
371 if (cmd
== IPC_SET
) {
372 if (copy_msqid_from_user(&msqid64
, buf
, version
))
376 down_write(&msg_ids(ns
).rwsem
);
379 ipcp
= ipcctl_pre_down_nolock(ns
, &msg_ids(ns
), msqid
, cmd
,
380 &msqid64
.msg_perm
, msqid64
.msg_qbytes
);
386 msq
= container_of(ipcp
, struct msg_queue
, q_perm
);
388 err
= security_msg_queue_msgctl(msq
, cmd
);
394 ipc_lock_object(&msq
->q_perm
);
395 /* freeque unlocks the ipc object and rcu */
400 DEFINE_WAKE_Q(wake_q
);
402 if (msqid64
.msg_qbytes
> ns
->msg_ctlmnb
&&
403 !capable(CAP_SYS_RESOURCE
)) {
408 ipc_lock_object(&msq
->q_perm
);
409 err
= ipc_update_perm(&msqid64
.msg_perm
, ipcp
);
413 msq
->q_qbytes
= msqid64
.msg_qbytes
;
415 msq
->q_ctime
= get_seconds();
417 * Sleeping receivers might be excluded by
418 * stricter permissions.
420 expunge_all(msq
, -EAGAIN
, &wake_q
);
422 * Sleeping senders might be able to send
423 * due to a larger queue size.
425 ss_wakeup(msq
, &wake_q
, false);
426 ipc_unlock_object(&msq
->q_perm
);
437 ipc_unlock_object(&msq
->q_perm
);
441 up_write(&msg_ids(ns
).rwsem
);
445 static int msgctl_nolock(struct ipc_namespace
*ns
, int msqid
,
446 int cmd
, int version
, void __user
*buf
)
449 struct msg_queue
*msq
;
455 struct msginfo msginfo
;
462 * We must not return kernel stack data.
463 * due to padding, it's not enough
464 * to set all member fields.
466 err
= security_msg_queue_msgctl(NULL
, cmd
);
470 memset(&msginfo
, 0, sizeof(msginfo
));
471 msginfo
.msgmni
= ns
->msg_ctlmni
;
472 msginfo
.msgmax
= ns
->msg_ctlmax
;
473 msginfo
.msgmnb
= ns
->msg_ctlmnb
;
474 msginfo
.msgssz
= MSGSSZ
;
475 msginfo
.msgseg
= MSGSEG
;
476 down_read(&msg_ids(ns
).rwsem
);
477 if (cmd
== MSG_INFO
) {
478 msginfo
.msgpool
= msg_ids(ns
).in_use
;
479 msginfo
.msgmap
= atomic_read(&ns
->msg_hdrs
);
480 msginfo
.msgtql
= atomic_read(&ns
->msg_bytes
);
482 msginfo
.msgmap
= MSGMAP
;
483 msginfo
.msgpool
= MSGPOOL
;
484 msginfo
.msgtql
= MSGTQL
;
486 max_id
= ipc_get_maxid(&msg_ids(ns
));
487 up_read(&msg_ids(ns
).rwsem
);
488 if (copy_to_user(buf
, &msginfo
, sizeof(struct msginfo
)))
490 return (max_id
< 0) ? 0 : max_id
;
496 struct msqid64_ds tbuf
;
502 memset(&tbuf
, 0, sizeof(tbuf
));
505 if (cmd
== MSG_STAT
) {
506 msq
= msq_obtain_object(ns
, msqid
);
511 success_return
= msq
->q_perm
.id
;
513 msq
= msq_obtain_object_check(ns
, msqid
);
522 if (ipcperms(ns
, &msq
->q_perm
, S_IRUGO
))
525 err
= security_msg_queue_msgctl(msq
, cmd
);
529 kernel_to_ipc64_perm(&msq
->q_perm
, &tbuf
.msg_perm
);
530 tbuf
.msg_stime
= msq
->q_stime
;
531 tbuf
.msg_rtime
= msq
->q_rtime
;
532 tbuf
.msg_ctime
= msq
->q_ctime
;
533 tbuf
.msg_cbytes
= msq
->q_cbytes
;
534 tbuf
.msg_qnum
= msq
->q_qnum
;
535 tbuf
.msg_qbytes
= msq
->q_qbytes
;
536 tbuf
.msg_lspid
= msq
->q_lspid
;
537 tbuf
.msg_lrpid
= msq
->q_lrpid
;
540 if (copy_msqid_to_user(buf
, &tbuf
, version
))
542 return success_return
;
555 SYSCALL_DEFINE3(msgctl
, int, msqid
, int, cmd
, struct msqid_ds __user
*, buf
)
558 struct ipc_namespace
*ns
;
560 if (msqid
< 0 || cmd
< 0)
563 version
= ipc_parse_version(&cmd
);
564 ns
= current
->nsproxy
->ipc_ns
;
569 case MSG_STAT
: /* msqid is an index rather than a msg queue id */
571 return msgctl_nolock(ns
, msqid
, cmd
, version
, buf
);
574 return msgctl_down(ns
, msqid
, cmd
, buf
, version
);
580 static int testmsg(struct msg_msg
*msg
, long type
, int mode
)
586 case SEARCH_LESSEQUAL
:
587 if (msg
->m_type
<= type
)
591 if (msg
->m_type
== type
)
594 case SEARCH_NOTEQUAL
:
595 if (msg
->m_type
!= type
)
602 static inline int pipelined_send(struct msg_queue
*msq
, struct msg_msg
*msg
,
603 struct wake_q_head
*wake_q
)
605 struct msg_receiver
*msr
, *t
;
607 list_for_each_entry_safe(msr
, t
, &msq
->q_receivers
, r_list
) {
608 if (testmsg(msg
, msr
->r_msgtype
, msr
->r_mode
) &&
609 !security_msg_queue_msgrcv(msq
, msg
, msr
->r_tsk
,
610 msr
->r_msgtype
, msr
->r_mode
)) {
612 list_del(&msr
->r_list
);
613 if (msr
->r_maxsize
< msg
->m_ts
) {
614 wake_q_add(wake_q
, msr
->r_tsk
);
615 WRITE_ONCE(msr
->r_msg
, ERR_PTR(-E2BIG
));
617 msq
->q_lrpid
= task_pid_vnr(msr
->r_tsk
);
618 msq
->q_rtime
= get_seconds();
620 wake_q_add(wake_q
, msr
->r_tsk
);
621 WRITE_ONCE(msr
->r_msg
, msg
);
630 long do_msgsnd(int msqid
, long mtype
, void __user
*mtext
,
631 size_t msgsz
, int msgflg
)
633 struct msg_queue
*msq
;
636 struct ipc_namespace
*ns
;
637 DEFINE_WAKE_Q(wake_q
);
639 ns
= current
->nsproxy
->ipc_ns
;
641 if (msgsz
> ns
->msg_ctlmax
|| (long) msgsz
< 0 || msqid
< 0)
646 msg
= load_msg(mtext
, msgsz
);
654 msq
= msq_obtain_object_check(ns
, msqid
);
660 ipc_lock_object(&msq
->q_perm
);
666 if (ipcperms(ns
, &msq
->q_perm
, S_IWUGO
))
669 /* raced with RMID? */
670 if (!ipc_valid_object(&msq
->q_perm
)) {
675 err
= security_msg_queue_msgsnd(msq
, msg
, msgflg
);
679 if (msg_fits_inqueue(msq
, msgsz
))
682 /* queue full, wait: */
683 if (msgflg
& IPC_NOWAIT
) {
688 /* enqueue the sender and prepare to block */
689 ss_add(msq
, &s
, msgsz
);
691 if (!ipc_rcu_getref(&msq
->q_perm
)) {
696 ipc_unlock_object(&msq
->q_perm
);
701 ipc_lock_object(&msq
->q_perm
);
703 ipc_rcu_putref(&msq
->q_perm
, msg_rcu_free
);
704 /* raced with RMID? */
705 if (!ipc_valid_object(&msq
->q_perm
)) {
711 if (signal_pending(current
)) {
712 err
= -ERESTARTNOHAND
;
718 msq
->q_lspid
= task_tgid_vnr(current
);
719 msq
->q_stime
= get_seconds();
721 if (!pipelined_send(msq
, msg
, &wake_q
)) {
722 /* no one is waiting for this message, enqueue it */
723 list_add_tail(&msg
->m_list
, &msq
->q_messages
);
724 msq
->q_cbytes
+= msgsz
;
726 atomic_add(msgsz
, &ns
->msg_bytes
);
727 atomic_inc(&ns
->msg_hdrs
);
734 ipc_unlock_object(&msq
->q_perm
);
743 SYSCALL_DEFINE4(msgsnd
, int, msqid
, struct msgbuf __user
*, msgp
, size_t, msgsz
,
748 if (get_user(mtype
, &msgp
->mtype
))
750 return do_msgsnd(msqid
, mtype
, msgp
->mtext
, msgsz
, msgflg
);
753 static inline int convert_mode(long *msgtyp
, int msgflg
)
755 if (msgflg
& MSG_COPY
)
756 return SEARCH_NUMBER
;
758 * find message of correct type.
759 * msgtyp = 0 => get first.
760 * msgtyp > 0 => get first message of matching type.
761 * msgtyp < 0 => get message with least type must be < abs(msgtype).
766 if (*msgtyp
== LONG_MIN
) /* -LONG_MIN is undefined */
770 return SEARCH_LESSEQUAL
;
772 if (msgflg
& MSG_EXCEPT
)
773 return SEARCH_NOTEQUAL
;
777 static long do_msg_fill(void __user
*dest
, struct msg_msg
*msg
, size_t bufsz
)
779 struct msgbuf __user
*msgp
= dest
;
782 if (put_user(msg
->m_type
, &msgp
->mtype
))
785 msgsz
= (bufsz
> msg
->m_ts
) ? msg
->m_ts
: bufsz
;
786 if (store_msg(msgp
->mtext
, msg
, msgsz
))
791 #ifdef CONFIG_CHECKPOINT_RESTORE
793 * This function creates new kernel message structure, large enough to store
794 * bufsz message bytes.
796 static inline struct msg_msg
*prepare_copy(void __user
*buf
, size_t bufsz
)
798 struct msg_msg
*copy
;
801 * Create dummy message to copy real message to.
803 copy
= load_msg(buf
, bufsz
);
809 static inline void free_copy(struct msg_msg
*copy
)
815 static inline struct msg_msg
*prepare_copy(void __user
*buf
, size_t bufsz
)
817 return ERR_PTR(-ENOSYS
);
820 static inline void free_copy(struct msg_msg
*copy
)
825 static struct msg_msg
*find_msg(struct msg_queue
*msq
, long *msgtyp
, int mode
)
827 struct msg_msg
*msg
, *found
= NULL
;
830 list_for_each_entry(msg
, &msq
->q_messages
, m_list
) {
831 if (testmsg(msg
, *msgtyp
, mode
) &&
832 !security_msg_queue_msgrcv(msq
, msg
, current
,
834 if (mode
== SEARCH_LESSEQUAL
&& msg
->m_type
!= 1) {
835 *msgtyp
= msg
->m_type
- 1;
837 } else if (mode
== SEARCH_NUMBER
) {
838 if (*msgtyp
== count
)
846 return found
?: ERR_PTR(-EAGAIN
);
849 long do_msgrcv(int msqid
, void __user
*buf
, size_t bufsz
, long msgtyp
, int msgflg
,
850 long (*msg_handler
)(void __user
*, struct msg_msg
*, size_t))
853 struct msg_queue
*msq
;
854 struct ipc_namespace
*ns
;
855 struct msg_msg
*msg
, *copy
= NULL
;
856 DEFINE_WAKE_Q(wake_q
);
858 ns
= current
->nsproxy
->ipc_ns
;
860 if (msqid
< 0 || (long) bufsz
< 0)
863 if (msgflg
& MSG_COPY
) {
864 if ((msgflg
& MSG_EXCEPT
) || !(msgflg
& IPC_NOWAIT
))
866 copy
= prepare_copy(buf
, min_t(size_t, bufsz
, ns
->msg_ctlmax
));
868 return PTR_ERR(copy
);
870 mode
= convert_mode(&msgtyp
, msgflg
);
873 msq
= msq_obtain_object_check(ns
, msqid
);
881 struct msg_receiver msr_d
;
883 msg
= ERR_PTR(-EACCES
);
884 if (ipcperms(ns
, &msq
->q_perm
, S_IRUGO
))
887 ipc_lock_object(&msq
->q_perm
);
889 /* raced with RMID? */
890 if (!ipc_valid_object(&msq
->q_perm
)) {
891 msg
= ERR_PTR(-EIDRM
);
895 msg
= find_msg(msq
, &msgtyp
, mode
);
898 * Found a suitable message.
899 * Unlink it from the queue.
901 if ((bufsz
< msg
->m_ts
) && !(msgflg
& MSG_NOERROR
)) {
902 msg
= ERR_PTR(-E2BIG
);
906 * If we are copying, then do not unlink message and do
907 * not update queue parameters.
909 if (msgflg
& MSG_COPY
) {
910 msg
= copy_msg(msg
, copy
);
914 list_del(&msg
->m_list
);
916 msq
->q_rtime
= get_seconds();
917 msq
->q_lrpid
= task_tgid_vnr(current
);
918 msq
->q_cbytes
-= msg
->m_ts
;
919 atomic_sub(msg
->m_ts
, &ns
->msg_bytes
);
920 atomic_dec(&ns
->msg_hdrs
);
921 ss_wakeup(msq
, &wake_q
, false);
926 /* No message waiting. Wait for a message */
927 if (msgflg
& IPC_NOWAIT
) {
928 msg
= ERR_PTR(-ENOMSG
);
932 list_add_tail(&msr_d
.r_list
, &msq
->q_receivers
);
933 msr_d
.r_tsk
= current
;
934 msr_d
.r_msgtype
= msgtyp
;
936 if (msgflg
& MSG_NOERROR
)
937 msr_d
.r_maxsize
= INT_MAX
;
939 msr_d
.r_maxsize
= bufsz
;
940 msr_d
.r_msg
= ERR_PTR(-EAGAIN
);
941 __set_current_state(TASK_INTERRUPTIBLE
);
943 ipc_unlock_object(&msq
->q_perm
);
948 * Lockless receive, part 1:
949 * We don't hold a reference to the queue and getting a
950 * reference would defeat the idea of a lockless operation,
951 * thus the code relies on rcu to guarantee the existence of
953 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
954 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
959 * Lockless receive, part 2:
960 * The work in pipelined_send() and expunge_all():
961 * - Set pointer to message
962 * - Queue the receiver task for later wakeup
963 * - Wake up the process after the lock is dropped.
965 * Should the process wake up before this wakeup (due to a
966 * signal) it will either see the message and continue ...
968 msg
= READ_ONCE(msr_d
.r_msg
);
969 if (msg
!= ERR_PTR(-EAGAIN
))
973 * ... or see -EAGAIN, acquire the lock to check the message
976 ipc_lock_object(&msq
->q_perm
);
979 if (msg
!= ERR_PTR(-EAGAIN
))
982 list_del(&msr_d
.r_list
);
983 if (signal_pending(current
)) {
984 msg
= ERR_PTR(-ERESTARTNOHAND
);
988 ipc_unlock_object(&msq
->q_perm
);
992 ipc_unlock_object(&msq
->q_perm
);
1001 bufsz
= msg_handler(buf
, msg
, bufsz
);
1007 SYSCALL_DEFINE5(msgrcv
, int, msqid
, struct msgbuf __user
*, msgp
, size_t, msgsz
,
1008 long, msgtyp
, int, msgflg
)
1010 return do_msgrcv(msqid
, msgp
, msgsz
, msgtyp
, msgflg
, do_msg_fill
);
1014 void msg_init_ns(struct ipc_namespace
*ns
)
1016 ns
->msg_ctlmax
= MSGMAX
;
1017 ns
->msg_ctlmnb
= MSGMNB
;
1018 ns
->msg_ctlmni
= MSGMNI
;
1020 atomic_set(&ns
->msg_bytes
, 0);
1021 atomic_set(&ns
->msg_hdrs
, 0);
1022 ipc_init_ids(&ns
->ids
[IPC_MSG_IDS
]);
1025 #ifdef CONFIG_IPC_NS
1026 void msg_exit_ns(struct ipc_namespace
*ns
)
1028 free_ipcs(ns
, &msg_ids(ns
), freeque
);
1029 idr_destroy(&ns
->ids
[IPC_MSG_IDS
].ipcs_idr
);
1033 #ifdef CONFIG_PROC_FS
1034 static int sysvipc_msg_proc_show(struct seq_file
*s
, void *it
)
1036 struct user_namespace
*user_ns
= seq_user_ns(s
);
1037 struct msg_queue
*msq
= it
;
1040 "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
1048 from_kuid_munged(user_ns
, msq
->q_perm
.uid
),
1049 from_kgid_munged(user_ns
, msq
->q_perm
.gid
),
1050 from_kuid_munged(user_ns
, msq
->q_perm
.cuid
),
1051 from_kgid_munged(user_ns
, msq
->q_perm
.cgid
),
1060 void __init
msg_init(void)
1062 msg_init_ns(&init_ipc_ns
);
1064 ipc_init_proc_interface("sysvipc/msg",
1065 " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
1066 IPC_MSG_IDS
, sysvipc_msg_proc_show
);