3 * Copyright (C) 1992 Krishna Balasubramanian
5 * Removed all the remaining kerneld mess
6 * Catch the -EFAULT stuff properly
7 * Use GFP_KERNEL for messages as in 1.2
8 * Fixed up the unchecked user space derefs
9 * Copyright (C) 1998 Alan Cox & Andi Kleen
11 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
13 * mostly rewritten, threaded and wake-one semantics added
14 * MSGMAX limit removed, sysctl's added
15 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
22 * Pavel Emelianov <xemul@openvz.org>
25 #include <linux/capability.h>
26 #include <linux/slab.h>
27 #include <linux/msg.h>
28 #include <linux/spinlock.h>
29 #include <linux/init.h>
31 #include <linux/proc_fs.h>
32 #include <linux/list.h>
33 #include <linux/security.h>
34 #include <linux/sched.h>
35 #include <linux/syscalls.h>
36 #include <linux/audit.h>
37 #include <linux/seq_file.h>
38 #include <linux/rwsem.h>
39 #include <linux/nsproxy.h>
40 #include <linux/ipc_namespace.h>
42 #include <asm/current.h>
43 #include <asm/uaccess.h>
47 * one msg_receiver structure for each sleeping receiver:
50 struct list_head r_list
;
51 struct task_struct
*r_tsk
;
57 struct msg_msg
*volatile r_msg
;
60 /* one msg_sender for each sleeping sender */
62 struct list_head list
;
63 struct task_struct
*tsk
;
67 #define SEARCH_EQUAL 2
68 #define SEARCH_NOTEQUAL 3
69 #define SEARCH_LESSEQUAL 4
71 #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS])
73 #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
75 static void freeque(struct ipc_namespace
*, struct kern_ipc_perm
*);
76 static int newque(struct ipc_namespace
*, struct ipc_params
*);
78 static int sysvipc_msg_proc_show(struct seq_file
*s
, void *it
);
82 * Scale msgmni with the available lowmem size: the memory dedicated to msg
83 * queues should occupy at most 1/MSG_MEM_SCALE of lowmem.
84 * Also take into account the number of nsproxies created so far.
85 * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range.
87 void recompute_msgmni(struct ipc_namespace
*ns
)
90 unsigned long allowed
;
94 allowed
= (((i
.totalram
- i
.totalhigh
) / MSG_MEM_SCALE
) * i
.mem_unit
)
96 nb_ns
= atomic_read(&nr_ipc_ns
);
99 if (allowed
< MSGMNI
) {
100 ns
->msg_ctlmni
= MSGMNI
;
104 if (allowed
> IPCMNI
/ nb_ns
) {
105 ns
->msg_ctlmni
= IPCMNI
/ nb_ns
;
109 ns
->msg_ctlmni
= allowed
;
112 void msg_init_ns(struct ipc_namespace
*ns
)
114 ns
->msg_ctlmax
= MSGMAX
;
115 ns
->msg_ctlmnb
= MSGMNB
;
117 recompute_msgmni(ns
);
119 atomic_set(&ns
->msg_bytes
, 0);
120 atomic_set(&ns
->msg_hdrs
, 0);
121 ipc_init_ids(&ns
->ids
[IPC_MSG_IDS
]);
125 void msg_exit_ns(struct ipc_namespace
*ns
)
127 free_ipcs(ns
, &msg_ids(ns
), freeque
);
131 void __init
msg_init(void)
133 msg_init_ns(&init_ipc_ns
);
135 printk(KERN_INFO
"msgmni has been set to %d\n",
136 init_ipc_ns
.msg_ctlmni
);
138 ipc_init_proc_interface("sysvipc/msg",
139 " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
140 IPC_MSG_IDS
, sysvipc_msg_proc_show
);
144 * msg_lock_(check_) routines are called in the paths where the rw_mutex
147 static inline struct msg_queue
*msg_lock(struct ipc_namespace
*ns
, int id
)
149 struct kern_ipc_perm
*ipcp
= ipc_lock(&msg_ids(ns
), id
);
152 return (struct msg_queue
*)ipcp
;
154 return container_of(ipcp
, struct msg_queue
, q_perm
);
157 static inline struct msg_queue
*msg_lock_check(struct ipc_namespace
*ns
,
160 struct kern_ipc_perm
*ipcp
= ipc_lock_check(&msg_ids(ns
), id
);
163 return (struct msg_queue
*)ipcp
;
165 return container_of(ipcp
, struct msg_queue
, q_perm
);
168 static inline void msg_rmid(struct ipc_namespace
*ns
, struct msg_queue
*s
)
170 ipc_rmid(&msg_ids(ns
), &s
->q_perm
);
174 * newque - Create a new msg queue
176 * @params: ptr to the structure that contains the key and msgflg
178 * Called with msg_ids.rw_mutex held (writer)
180 static int newque(struct ipc_namespace
*ns
, struct ipc_params
*params
)
182 struct msg_queue
*msq
;
184 key_t key
= params
->key
;
185 int msgflg
= params
->flg
;
187 msq
= ipc_rcu_alloc(sizeof(*msq
));
191 msq
->q_perm
.mode
= msgflg
& S_IRWXUGO
;
192 msq
->q_perm
.key
= key
;
194 msq
->q_perm
.security
= NULL
;
195 retval
= security_msg_queue_alloc(msq
);
202 * ipc_addid() locks msq
204 id
= ipc_addid(&msg_ids(ns
), &msq
->q_perm
, ns
->msg_ctlmni
);
206 security_msg_queue_free(msq
);
211 msq
->q_stime
= msq
->q_rtime
= 0;
212 msq
->q_ctime
= get_seconds();
213 msq
->q_cbytes
= msq
->q_qnum
= 0;
214 msq
->q_qbytes
= ns
->msg_ctlmnb
;
215 msq
->q_lspid
= msq
->q_lrpid
= 0;
216 INIT_LIST_HEAD(&msq
->q_messages
);
217 INIT_LIST_HEAD(&msq
->q_receivers
);
218 INIT_LIST_HEAD(&msq
->q_senders
);
222 return msq
->q_perm
.id
;
225 static inline void ss_add(struct msg_queue
*msq
, struct msg_sender
*mss
)
228 current
->state
= TASK_INTERRUPTIBLE
;
229 list_add_tail(&mss
->list
, &msq
->q_senders
);
232 static inline void ss_del(struct msg_sender
*mss
)
234 if (mss
->list
.next
!= NULL
)
235 list_del(&mss
->list
);
238 static void ss_wakeup(struct list_head
*h
, int kill
)
240 struct list_head
*tmp
;
244 struct msg_sender
*mss
;
246 mss
= list_entry(tmp
, struct msg_sender
, list
);
249 mss
->list
.next
= NULL
;
250 wake_up_process(mss
->tsk
);
254 static void expunge_all(struct msg_queue
*msq
, int res
)
256 struct list_head
*tmp
;
258 tmp
= msq
->q_receivers
.next
;
259 while (tmp
!= &msq
->q_receivers
) {
260 struct msg_receiver
*msr
;
263 * Make sure that the wakeup doesnt preempt
264 * this CPU prematurely. (on PREEMPT_RT)
268 msr
= list_entry(tmp
, struct msg_receiver
, r_list
);
271 wake_up_process(msr
->r_tsk
); /* serializes */
272 msr
->r_msg
= ERR_PTR(res
);
279 * freeque() wakes up waiters on the sender and receiver waiting queue,
280 * removes the message queue from message queue ID IDR, and cleans up all the
281 * messages associated with this queue.
283 * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held
284 * before freeque() is called. msg_ids.rw_mutex remains locked on exit.
286 static void freeque(struct ipc_namespace
*ns
, struct kern_ipc_perm
*ipcp
)
288 struct list_head
*tmp
;
289 struct msg_queue
*msq
= container_of(ipcp
, struct msg_queue
, q_perm
);
291 expunge_all(msq
, -EIDRM
);
292 ss_wakeup(&msq
->q_senders
, 1);
296 tmp
= msq
->q_messages
.next
;
297 while (tmp
!= &msq
->q_messages
) {
298 struct msg_msg
*msg
= list_entry(tmp
, struct msg_msg
, m_list
);
301 atomic_dec(&ns
->msg_hdrs
);
304 atomic_sub(msq
->q_cbytes
, &ns
->msg_bytes
);
305 security_msg_queue_free(msq
);
310 * Called with msg_ids.rw_mutex and ipcp locked.
312 static inline int msg_security(struct kern_ipc_perm
*ipcp
, int msgflg
)
314 struct msg_queue
*msq
= container_of(ipcp
, struct msg_queue
, q_perm
);
316 return security_msg_queue_associate(msq
, msgflg
);
319 SYSCALL_DEFINE2(msgget
, key_t
, key
, int, msgflg
)
321 struct ipc_namespace
*ns
;
322 struct ipc_ops msg_ops
;
323 struct ipc_params msg_params
;
325 ns
= current
->nsproxy
->ipc_ns
;
327 msg_ops
.getnew
= newque
;
328 msg_ops
.associate
= msg_security
;
329 msg_ops
.more_checks
= NULL
;
331 msg_params
.key
= key
;
332 msg_params
.flg
= msgflg
;
334 return ipcget(ns
, &msg_ids(ns
), &msg_ops
, &msg_params
);
337 static inline unsigned long
338 copy_msqid_to_user(void __user
*buf
, struct msqid64_ds
*in
, int version
)
342 return copy_to_user(buf
, in
, sizeof(*in
));
347 memset(&out
, 0, sizeof(out
));
349 ipc64_perm_to_ipc_perm(&in
->msg_perm
, &out
.msg_perm
);
351 out
.msg_stime
= in
->msg_stime
;
352 out
.msg_rtime
= in
->msg_rtime
;
353 out
.msg_ctime
= in
->msg_ctime
;
355 if (in
->msg_cbytes
> USHORT_MAX
)
356 out
.msg_cbytes
= USHORT_MAX
;
358 out
.msg_cbytes
= in
->msg_cbytes
;
359 out
.msg_lcbytes
= in
->msg_cbytes
;
361 if (in
->msg_qnum
> USHORT_MAX
)
362 out
.msg_qnum
= USHORT_MAX
;
364 out
.msg_qnum
= in
->msg_qnum
;
366 if (in
->msg_qbytes
> USHORT_MAX
)
367 out
.msg_qbytes
= USHORT_MAX
;
369 out
.msg_qbytes
= in
->msg_qbytes
;
370 out
.msg_lqbytes
= in
->msg_qbytes
;
372 out
.msg_lspid
= in
->msg_lspid
;
373 out
.msg_lrpid
= in
->msg_lrpid
;
375 return copy_to_user(buf
, &out
, sizeof(out
));
382 static inline unsigned long
383 copy_msqid_from_user(struct msqid64_ds
*out
, void __user
*buf
, int version
)
387 if (copy_from_user(out
, buf
, sizeof(*out
)))
392 struct msqid_ds tbuf_old
;
394 if (copy_from_user(&tbuf_old
, buf
, sizeof(tbuf_old
)))
397 out
->msg_perm
.uid
= tbuf_old
.msg_perm
.uid
;
398 out
->msg_perm
.gid
= tbuf_old
.msg_perm
.gid
;
399 out
->msg_perm
.mode
= tbuf_old
.msg_perm
.mode
;
401 if (tbuf_old
.msg_qbytes
== 0)
402 out
->msg_qbytes
= tbuf_old
.msg_lqbytes
;
404 out
->msg_qbytes
= tbuf_old
.msg_qbytes
;
414 * This function handles some msgctl commands which require the rw_mutex
415 * to be held in write mode.
416 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
418 static int msgctl_down(struct ipc_namespace
*ns
, int msqid
, int cmd
,
419 struct msqid_ds __user
*buf
, int version
)
421 struct kern_ipc_perm
*ipcp
;
422 struct msqid64_ds msqid64
;
423 struct msg_queue
*msq
;
426 if (cmd
== IPC_SET
) {
427 if (copy_msqid_from_user(&msqid64
, buf
, version
))
431 ipcp
= ipcctl_pre_down(&msg_ids(ns
), msqid
, cmd
,
432 &msqid64
.msg_perm
, msqid64
.msg_qbytes
);
434 return PTR_ERR(ipcp
);
436 msq
= container_of(ipcp
, struct msg_queue
, q_perm
);
438 err
= security_msg_queue_msgctl(msq
, cmd
);
447 if (msqid64
.msg_qbytes
> ns
->msg_ctlmnb
&&
448 !capable(CAP_SYS_RESOURCE
)) {
453 msq
->q_qbytes
= msqid64
.msg_qbytes
;
455 ipc_update_perm(&msqid64
.msg_perm
, ipcp
);
456 msq
->q_ctime
= get_seconds();
457 /* sleeping receivers might be excluded by
458 * stricter permissions.
460 expunge_all(msq
, -EAGAIN
);
461 /* sleeping senders might be able to send
462 * due to a larger queue size.
464 ss_wakeup(&msq
->q_senders
, 0);
472 up_write(&msg_ids(ns
).rw_mutex
);
476 SYSCALL_DEFINE3(msgctl
, int, msqid
, int, cmd
, struct msqid_ds __user
*, buf
)
478 struct msg_queue
*msq
;
480 struct ipc_namespace
*ns
;
482 if (msqid
< 0 || cmd
< 0)
485 version
= ipc_parse_version(&cmd
);
486 ns
= current
->nsproxy
->ipc_ns
;
492 struct msginfo msginfo
;
498 * We must not return kernel stack data.
499 * due to padding, it's not enough
500 * to set all member fields.
502 err
= security_msg_queue_msgctl(NULL
, cmd
);
506 memset(&msginfo
, 0, sizeof(msginfo
));
507 msginfo
.msgmni
= ns
->msg_ctlmni
;
508 msginfo
.msgmax
= ns
->msg_ctlmax
;
509 msginfo
.msgmnb
= ns
->msg_ctlmnb
;
510 msginfo
.msgssz
= MSGSSZ
;
511 msginfo
.msgseg
= MSGSEG
;
512 down_read(&msg_ids(ns
).rw_mutex
);
513 if (cmd
== MSG_INFO
) {
514 msginfo
.msgpool
= msg_ids(ns
).in_use
;
515 msginfo
.msgmap
= atomic_read(&ns
->msg_hdrs
);
516 msginfo
.msgtql
= atomic_read(&ns
->msg_bytes
);
518 msginfo
.msgmap
= MSGMAP
;
519 msginfo
.msgpool
= MSGPOOL
;
520 msginfo
.msgtql
= MSGTQL
;
522 max_id
= ipc_get_maxid(&msg_ids(ns
));
523 up_read(&msg_ids(ns
).rw_mutex
);
524 if (copy_to_user(buf
, &msginfo
, sizeof(struct msginfo
)))
526 return (max_id
< 0) ? 0 : max_id
;
528 case MSG_STAT
: /* msqid is an index rather than a msg queue id */
531 struct msqid64_ds tbuf
;
537 if (cmd
== MSG_STAT
) {
538 msq
= msg_lock(ns
, msqid
);
541 success_return
= msq
->q_perm
.id
;
543 msq
= msg_lock_check(ns
, msqid
);
549 if (ipcperms(&msq
->q_perm
, S_IRUGO
))
552 err
= security_msg_queue_msgctl(msq
, cmd
);
556 memset(&tbuf
, 0, sizeof(tbuf
));
558 kernel_to_ipc64_perm(&msq
->q_perm
, &tbuf
.msg_perm
);
559 tbuf
.msg_stime
= msq
->q_stime
;
560 tbuf
.msg_rtime
= msq
->q_rtime
;
561 tbuf
.msg_ctime
= msq
->q_ctime
;
562 tbuf
.msg_cbytes
= msq
->q_cbytes
;
563 tbuf
.msg_qnum
= msq
->q_qnum
;
564 tbuf
.msg_qbytes
= msq
->q_qbytes
;
565 tbuf
.msg_lspid
= msq
->q_lspid
;
566 tbuf
.msg_lrpid
= msq
->q_lrpid
;
568 if (copy_msqid_to_user(buf
, &tbuf
, version
))
570 return success_return
;
574 err
= msgctl_down(ns
, msqid
, cmd
, buf
, version
);
585 static int testmsg(struct msg_msg
*msg
, long type
, int mode
)
591 case SEARCH_LESSEQUAL
:
592 if (msg
->m_type
<=type
)
596 if (msg
->m_type
== type
)
599 case SEARCH_NOTEQUAL
:
600 if (msg
->m_type
!= type
)
607 static inline int pipelined_send(struct msg_queue
*msq
, struct msg_msg
*msg
)
609 struct list_head
*tmp
;
611 tmp
= msq
->q_receivers
.next
;
612 while (tmp
!= &msq
->q_receivers
) {
613 struct msg_receiver
*msr
;
615 msr
= list_entry(tmp
, struct msg_receiver
, r_list
);
617 if (testmsg(msg
, msr
->r_msgtype
, msr
->r_mode
) &&
618 !security_msg_queue_msgrcv(msq
, msg
, msr
->r_tsk
,
619 msr
->r_msgtype
, msr
->r_mode
)) {
622 * Make sure that the wakeup doesnt preempt
623 * this CPU prematurely. (on PREEMPT_RT)
627 list_del(&msr
->r_list
);
628 if (msr
->r_maxsize
< msg
->m_ts
) {
630 wake_up_process(msr
->r_tsk
); /* serializes */
631 msr
->r_msg
= ERR_PTR(-E2BIG
);
634 msq
->q_lrpid
= task_pid_vnr(msr
->r_tsk
);
635 msq
->q_rtime
= get_seconds();
636 wake_up_process(msr
->r_tsk
); /* serializes */
648 long do_msgsnd(int msqid
, long mtype
, void __user
*mtext
,
649 size_t msgsz
, int msgflg
)
651 struct msg_queue
*msq
;
654 struct ipc_namespace
*ns
;
656 ns
= current
->nsproxy
->ipc_ns
;
658 if (msgsz
> ns
->msg_ctlmax
|| (long) msgsz
< 0 || msqid
< 0)
663 msg
= load_msg(mtext
, msgsz
);
670 msq
= msg_lock_check(ns
, msqid
);
680 if (ipcperms(&msq
->q_perm
, S_IWUGO
))
681 goto out_unlock_free
;
683 err
= security_msg_queue_msgsnd(msq
, msg
, msgflg
);
685 goto out_unlock_free
;
687 if (msgsz
+ msq
->q_cbytes
<= msq
->q_qbytes
&&
688 1 + msq
->q_qnum
<= msq
->q_qbytes
) {
692 /* queue full, wait: */
693 if (msgflg
& IPC_NOWAIT
) {
695 goto out_unlock_free
;
702 ipc_lock_by_ptr(&msq
->q_perm
);
704 if (msq
->q_perm
.deleted
) {
706 goto out_unlock_free
;
710 if (signal_pending(current
)) {
711 err
= -ERESTARTNOHAND
;
712 goto out_unlock_free
;
716 msq
->q_lspid
= task_tgid_vnr(current
);
717 msq
->q_stime
= get_seconds();
719 if (!pipelined_send(msq
, msg
)) {
720 /* noone is waiting for this message, enqueue it */
721 list_add_tail(&msg
->m_list
, &msq
->q_messages
);
722 msq
->q_cbytes
+= msgsz
;
724 atomic_add(msgsz
, &ns
->msg_bytes
);
725 atomic_inc(&ns
->msg_hdrs
);
739 SYSCALL_DEFINE4(msgsnd
, int, msqid
, struct msgbuf __user
*, msgp
, size_t, msgsz
,
744 if (get_user(mtype
, &msgp
->mtype
))
746 return do_msgsnd(msqid
, mtype
, msgp
->mtext
, msgsz
, msgflg
);
749 static inline int convert_mode(long *msgtyp
, int msgflg
)
752 * find message of correct type.
753 * msgtyp = 0 => get first.
754 * msgtyp > 0 => get first message of matching type.
755 * msgtyp < 0 => get message with least type must be < abs(msgtype).
761 return SEARCH_LESSEQUAL
;
763 if (msgflg
& MSG_EXCEPT
)
764 return SEARCH_NOTEQUAL
;
768 long do_msgrcv(int msqid
, long *pmtype
, void __user
*mtext
,
769 size_t msgsz
, long msgtyp
, int msgflg
)
771 struct msg_queue
*msq
;
774 struct ipc_namespace
*ns
;
776 if (msqid
< 0 || (long) msgsz
< 0)
778 mode
= convert_mode(&msgtyp
, msgflg
);
779 ns
= current
->nsproxy
->ipc_ns
;
781 msq
= msg_lock_check(ns
, msqid
);
786 struct msg_receiver msr_d
;
787 struct list_head
*tmp
;
789 msg
= ERR_PTR(-EACCES
);
790 if (ipcperms(&msq
->q_perm
, S_IRUGO
))
793 msg
= ERR_PTR(-EAGAIN
);
794 tmp
= msq
->q_messages
.next
;
795 while (tmp
!= &msq
->q_messages
) {
796 struct msg_msg
*walk_msg
;
798 walk_msg
= list_entry(tmp
, struct msg_msg
, m_list
);
799 if (testmsg(walk_msg
, msgtyp
, mode
) &&
800 !security_msg_queue_msgrcv(msq
, walk_msg
, current
,
804 if (mode
== SEARCH_LESSEQUAL
&&
805 walk_msg
->m_type
!= 1) {
807 msgtyp
= walk_msg
->m_type
- 1;
817 * Found a suitable message.
818 * Unlink it from the queue.
820 if ((msgsz
< msg
->m_ts
) && !(msgflg
& MSG_NOERROR
)) {
821 msg
= ERR_PTR(-E2BIG
);
824 list_del(&msg
->m_list
);
826 msq
->q_rtime
= get_seconds();
827 msq
->q_lrpid
= task_tgid_vnr(current
);
828 msq
->q_cbytes
-= msg
->m_ts
;
829 atomic_sub(msg
->m_ts
, &ns
->msg_bytes
);
830 atomic_dec(&ns
->msg_hdrs
);
831 ss_wakeup(&msq
->q_senders
, 0);
835 /* No message waiting. Wait for a message */
836 if (msgflg
& IPC_NOWAIT
) {
837 msg
= ERR_PTR(-ENOMSG
);
840 list_add_tail(&msr_d
.r_list
, &msq
->q_receivers
);
841 msr_d
.r_tsk
= current
;
842 msr_d
.r_msgtype
= msgtyp
;
844 if (msgflg
& MSG_NOERROR
)
845 msr_d
.r_maxsize
= INT_MAX
;
847 msr_d
.r_maxsize
= msgsz
;
848 msr_d
.r_msg
= ERR_PTR(-EAGAIN
);
849 current
->state
= TASK_INTERRUPTIBLE
;
854 /* Lockless receive, part 1:
855 * Disable preemption. We don't hold a reference to the queue
856 * and getting a reference would defeat the idea of a lockless
857 * operation, thus the code relies on rcu to guarantee the
859 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
860 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
861 * rcu_read_lock() prevents preemption between reading r_msg
862 * and the spin_lock() inside ipc_lock_by_ptr().
866 /* Lockless receive, part 2:
867 * Wait until pipelined_send or expunge_all are outside of
868 * wake_up_process(). There is a race with exit(), see
869 * ipc/mqueue.c for the details.
871 msg
= (struct msg_msg
*)msr_d
.r_msg
;
872 while (msg
== NULL
) {
874 msg
= (struct msg_msg
*)msr_d
.r_msg
;
877 /* Lockless receive, part 3:
878 * If there is a message or an error then accept it without
881 if (msg
!= ERR_PTR(-EAGAIN
)) {
886 /* Lockless receive, part 3:
887 * Acquire the queue spinlock.
889 ipc_lock_by_ptr(&msq
->q_perm
);
892 /* Lockless receive, part 4:
893 * Repeat test after acquiring the spinlock.
895 msg
= (struct msg_msg
*)msr_d
.r_msg
;
896 if (msg
!= ERR_PTR(-EAGAIN
))
899 list_del(&msr_d
.r_list
);
900 if (signal_pending(current
)) {
901 msg
= ERR_PTR(-ERESTARTNOHAND
);
910 msgsz
= (msgsz
> msg
->m_ts
) ? msg
->m_ts
: msgsz
;
911 *pmtype
= msg
->m_type
;
912 if (store_msg(mtext
, msg
, msgsz
))
920 SYSCALL_DEFINE5(msgrcv
, int, msqid
, struct msgbuf __user
*, msgp
, size_t, msgsz
,
921 long, msgtyp
, int, msgflg
)
925 err
= do_msgrcv(msqid
, &mtype
, msgp
->mtext
, msgsz
, msgtyp
, msgflg
);
929 if (put_user(mtype
, &msgp
->mtype
))
935 #ifdef CONFIG_PROC_FS
936 static int sysvipc_msg_proc_show(struct seq_file
*s
, void *it
)
938 struct msg_queue
*msq
= it
;
941 "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",