3 * Copyright (C) 1992 Krishna Balasubramanian
5 * Removed all the remaining kerneld mess
6 * Catch the -EFAULT stuff properly
7 * Use GFP_KERNEL for messages as in 1.2
8 * Fixed up the unchecked user space derefs
9 * Copyright (C) 1998 Alan Cox & Andi Kleen
11 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
13 * mostly rewritten, threaded and wake-one semantics added
14 * MSGMAX limit removed, sysctl's added
15 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
18 #include <linux/capability.h>
19 #include <linux/config.h>
20 #include <linux/slab.h>
21 #include <linux/msg.h>
22 #include <linux/spinlock.h>
23 #include <linux/init.h>
24 #include <linux/proc_fs.h>
25 #include <linux/list.h>
26 #include <linux/security.h>
27 #include <linux/sched.h>
28 #include <linux/syscalls.h>
29 #include <linux/audit.h>
30 #include <linux/seq_file.h>
31 #include <linux/mutex.h>
33 #include <asm/current.h>
34 #include <asm/uaccess.h>
38 int msg_ctlmax
= MSGMAX
;
39 int msg_ctlmnb
= MSGMNB
;
40 int msg_ctlmni
= MSGMNI
;
42 /* one msg_receiver structure for each sleeping receiver */
44 struct list_head r_list
;
45 struct task_struct
* r_tsk
;
51 struct msg_msg
* volatile r_msg
;
54 /* one msg_sender for each sleeping sender */
56 struct list_head list
;
57 struct task_struct
* tsk
;
61 #define SEARCH_EQUAL 2
62 #define SEARCH_NOTEQUAL 3
63 #define SEARCH_LESSEQUAL 4
65 static atomic_t msg_bytes
= ATOMIC_INIT(0);
66 static atomic_t msg_hdrs
= ATOMIC_INIT(0);
68 static struct ipc_ids msg_ids
;
70 #define msg_lock(id) ((struct msg_queue*)ipc_lock(&msg_ids,id))
71 #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
72 #define msg_rmid(id) ((struct msg_queue*)ipc_rmid(&msg_ids,id))
73 #define msg_checkid(msq, msgid) \
74 ipc_checkid(&msg_ids,&msq->q_perm,msgid)
75 #define msg_buildid(id, seq) \
76 ipc_buildid(&msg_ids, id, seq)
78 static void freeque (struct msg_queue
*msq
, int id
);
79 static int newque (key_t key
, int msgflg
);
81 static int sysvipc_msg_proc_show(struct seq_file
*s
, void *it
);
84 void __init
msg_init (void)
86 ipc_init_ids(&msg_ids
,msg_ctlmni
);
87 ipc_init_proc_interface("sysvipc/msg",
88 " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
90 sysvipc_msg_proc_show
);
93 static int newque (key_t key
, int msgflg
)
97 struct msg_queue
*msq
;
99 msq
= ipc_rcu_alloc(sizeof(*msq
));
103 msq
->q_perm
.mode
= (msgflg
& S_IRWXUGO
);
104 msq
->q_perm
.key
= key
;
106 msq
->q_perm
.security
= NULL
;
107 retval
= security_msg_queue_alloc(msq
);
113 id
= ipc_addid(&msg_ids
, &msq
->q_perm
, msg_ctlmni
);
115 security_msg_queue_free(msq
);
120 msq
->q_id
= msg_buildid(id
,msq
->q_perm
.seq
);
121 msq
->q_stime
= msq
->q_rtime
= 0;
122 msq
->q_ctime
= get_seconds();
123 msq
->q_cbytes
= msq
->q_qnum
= 0;
124 msq
->q_qbytes
= msg_ctlmnb
;
125 msq
->q_lspid
= msq
->q_lrpid
= 0;
126 INIT_LIST_HEAD(&msq
->q_messages
);
127 INIT_LIST_HEAD(&msq
->q_receivers
);
128 INIT_LIST_HEAD(&msq
->q_senders
);
134 static inline void ss_add(struct msg_queue
* msq
, struct msg_sender
* mss
)
137 current
->state
=TASK_INTERRUPTIBLE
;
138 list_add_tail(&mss
->list
,&msq
->q_senders
);
141 static inline void ss_del(struct msg_sender
* mss
)
143 if(mss
->list
.next
!= NULL
)
144 list_del(&mss
->list
);
147 static void ss_wakeup(struct list_head
* h
, int kill
)
149 struct list_head
*tmp
;
153 struct msg_sender
* mss
;
155 mss
= list_entry(tmp
,struct msg_sender
,list
);
159 wake_up_process(mss
->tsk
);
163 static void expunge_all(struct msg_queue
* msq
, int res
)
165 struct list_head
*tmp
;
167 tmp
= msq
->q_receivers
.next
;
168 while (tmp
!= &msq
->q_receivers
) {
169 struct msg_receiver
* msr
;
171 msr
= list_entry(tmp
,struct msg_receiver
,r_list
);
174 wake_up_process(msr
->r_tsk
);
176 msr
->r_msg
= ERR_PTR(res
);
180 * freeque() wakes up waiters on the sender and receiver waiting queue,
181 * removes the message queue from message queue ID
182 * array, and cleans up all the messages associated with this queue.
184 * msg_ids.mutex and the spinlock for this message queue is hold
185 * before freeque() is called. msg_ids.mutex remains locked on exit.
187 static void freeque (struct msg_queue
*msq
, int id
)
189 struct list_head
*tmp
;
191 expunge_all(msq
,-EIDRM
);
192 ss_wakeup(&msq
->q_senders
,1);
196 tmp
= msq
->q_messages
.next
;
197 while(tmp
!= &msq
->q_messages
) {
198 struct msg_msg
* msg
= list_entry(tmp
,struct msg_msg
,m_list
);
200 atomic_dec(&msg_hdrs
);
203 atomic_sub(msq
->q_cbytes
, &msg_bytes
);
204 security_msg_queue_free(msq
);
208 asmlinkage
long sys_msgget (key_t key
, int msgflg
)
210 int id
, ret
= -EPERM
;
211 struct msg_queue
*msq
;
213 mutex_lock(&msg_ids
.mutex
);
214 if (key
== IPC_PRIVATE
)
215 ret
= newque(key
, msgflg
);
216 else if ((id
= ipc_findkey(&msg_ids
, key
)) == -1) { /* key not used */
217 if (!(msgflg
& IPC_CREAT
))
220 ret
= newque(key
, msgflg
);
221 } else if (msgflg
& IPC_CREAT
&& msgflg
& IPC_EXCL
) {
226 if (ipcperms(&msq
->q_perm
, msgflg
))
229 int qid
= msg_buildid(id
, msq
->q_perm
.seq
);
230 ret
= security_msg_queue_associate(msq
, msgflg
);
236 mutex_unlock(&msg_ids
.mutex
);
240 static inline unsigned long copy_msqid_to_user(void __user
*buf
, struct msqid64_ds
*in
, int version
)
244 return copy_to_user (buf
, in
, sizeof(*in
));
249 memset(&out
,0,sizeof(out
));
251 ipc64_perm_to_ipc_perm(&in
->msg_perm
, &out
.msg_perm
);
253 out
.msg_stime
= in
->msg_stime
;
254 out
.msg_rtime
= in
->msg_rtime
;
255 out
.msg_ctime
= in
->msg_ctime
;
257 if(in
->msg_cbytes
> USHRT_MAX
)
258 out
.msg_cbytes
= USHRT_MAX
;
260 out
.msg_cbytes
= in
->msg_cbytes
;
261 out
.msg_lcbytes
= in
->msg_cbytes
;
263 if(in
->msg_qnum
> USHRT_MAX
)
264 out
.msg_qnum
= USHRT_MAX
;
266 out
.msg_qnum
= in
->msg_qnum
;
268 if(in
->msg_qbytes
> USHRT_MAX
)
269 out
.msg_qbytes
= USHRT_MAX
;
271 out
.msg_qbytes
= in
->msg_qbytes
;
272 out
.msg_lqbytes
= in
->msg_qbytes
;
274 out
.msg_lspid
= in
->msg_lspid
;
275 out
.msg_lrpid
= in
->msg_lrpid
;
277 return copy_to_user (buf
, &out
, sizeof(out
));
285 unsigned long qbytes
;
291 static inline unsigned long copy_msqid_from_user(struct msq_setbuf
*out
, void __user
*buf
, int version
)
296 struct msqid64_ds tbuf
;
298 if (copy_from_user (&tbuf
, buf
, sizeof (tbuf
)))
301 out
->qbytes
= tbuf
.msg_qbytes
;
302 out
->uid
= tbuf
.msg_perm
.uid
;
303 out
->gid
= tbuf
.msg_perm
.gid
;
304 out
->mode
= tbuf
.msg_perm
.mode
;
310 struct msqid_ds tbuf_old
;
312 if (copy_from_user (&tbuf_old
, buf
, sizeof (tbuf_old
)))
315 out
->uid
= tbuf_old
.msg_perm
.uid
;
316 out
->gid
= tbuf_old
.msg_perm
.gid
;
317 out
->mode
= tbuf_old
.msg_perm
.mode
;
319 if(tbuf_old
.msg_qbytes
== 0)
320 out
->qbytes
= tbuf_old
.msg_lqbytes
;
322 out
->qbytes
= tbuf_old
.msg_qbytes
;
331 asmlinkage
long sys_msgctl (int msqid
, int cmd
, struct msqid_ds __user
*buf
)
334 struct msg_queue
*msq
;
335 struct msq_setbuf setbuf
;
336 struct kern_ipc_perm
*ipcp
;
338 if (msqid
< 0 || cmd
< 0)
341 version
= ipc_parse_version(&cmd
);
347 struct msginfo msginfo
;
351 /* We must not return kernel stack data.
352 * due to padding, it's not enough
353 * to set all member fields.
356 err
= security_msg_queue_msgctl(NULL
, cmd
);
360 memset(&msginfo
,0,sizeof(msginfo
));
361 msginfo
.msgmni
= msg_ctlmni
;
362 msginfo
.msgmax
= msg_ctlmax
;
363 msginfo
.msgmnb
= msg_ctlmnb
;
364 msginfo
.msgssz
= MSGSSZ
;
365 msginfo
.msgseg
= MSGSEG
;
366 mutex_lock(&msg_ids
.mutex
);
367 if (cmd
== MSG_INFO
) {
368 msginfo
.msgpool
= msg_ids
.in_use
;
369 msginfo
.msgmap
= atomic_read(&msg_hdrs
);
370 msginfo
.msgtql
= atomic_read(&msg_bytes
);
372 msginfo
.msgmap
= MSGMAP
;
373 msginfo
.msgpool
= MSGPOOL
;
374 msginfo
.msgtql
= MSGTQL
;
376 max_id
= msg_ids
.max_id
;
377 mutex_unlock(&msg_ids
.mutex
);
378 if (copy_to_user (buf
, &msginfo
, sizeof(struct msginfo
)))
380 return (max_id
< 0) ? 0: max_id
;
385 struct msqid64_ds tbuf
;
389 if(cmd
== MSG_STAT
&& msqid
>= msg_ids
.entries
->size
)
392 memset(&tbuf
,0,sizeof(tbuf
));
394 msq
= msg_lock(msqid
);
398 if(cmd
== MSG_STAT
) {
399 success_return
= msg_buildid(msqid
, msq
->q_perm
.seq
);
402 if (msg_checkid(msq
,msqid
))
407 if (ipcperms (&msq
->q_perm
, S_IRUGO
))
410 err
= security_msg_queue_msgctl(msq
, cmd
);
414 kernel_to_ipc64_perm(&msq
->q_perm
, &tbuf
.msg_perm
);
415 tbuf
.msg_stime
= msq
->q_stime
;
416 tbuf
.msg_rtime
= msq
->q_rtime
;
417 tbuf
.msg_ctime
= msq
->q_ctime
;
418 tbuf
.msg_cbytes
= msq
->q_cbytes
;
419 tbuf
.msg_qnum
= msq
->q_qnum
;
420 tbuf
.msg_qbytes
= msq
->q_qbytes
;
421 tbuf
.msg_lspid
= msq
->q_lspid
;
422 tbuf
.msg_lrpid
= msq
->q_lrpid
;
424 if (copy_msqid_to_user(buf
, &tbuf
, version
))
426 return success_return
;
431 if (copy_msqid_from_user (&setbuf
, buf
, version
))
440 mutex_lock(&msg_ids
.mutex
);
441 msq
= msg_lock(msqid
);
447 if (msg_checkid(msq
,msqid
))
451 if (current
->euid
!= ipcp
->cuid
&&
452 current
->euid
!= ipcp
->uid
&& !capable(CAP_SYS_ADMIN
))
453 /* We _could_ check for CAP_CHOWN above, but we don't */
456 err
= security_msg_queue_msgctl(msq
, cmd
);
463 if ((err
= audit_ipc_perms(setbuf
.qbytes
, setbuf
.uid
, setbuf
.gid
, setbuf
.mode
, ipcp
)))
467 if (setbuf
.qbytes
> msg_ctlmnb
&& !capable(CAP_SYS_RESOURCE
))
470 msq
->q_qbytes
= setbuf
.qbytes
;
472 ipcp
->uid
= setbuf
.uid
;
473 ipcp
->gid
= setbuf
.gid
;
474 ipcp
->mode
= (ipcp
->mode
& ~S_IRWXUGO
) |
475 (S_IRWXUGO
& setbuf
.mode
);
476 msq
->q_ctime
= get_seconds();
477 /* sleeping receivers might be excluded by
478 * stricter permissions.
480 expunge_all(msq
,-EAGAIN
);
481 /* sleeping senders might be able to send
482 * due to a larger queue size.
484 ss_wakeup(&msq
->q_senders
,0);
489 freeque (msq
, msqid
);
494 mutex_unlock(&msg_ids
.mutex
);
504 static int testmsg(struct msg_msg
* msg
,long type
,int mode
)
510 case SEARCH_LESSEQUAL
:
511 if(msg
->m_type
<=type
)
515 if(msg
->m_type
== type
)
518 case SEARCH_NOTEQUAL
:
519 if(msg
->m_type
!= type
)
526 static inline int pipelined_send(struct msg_queue
* msq
, struct msg_msg
* msg
)
528 struct list_head
* tmp
;
530 tmp
= msq
->q_receivers
.next
;
531 while (tmp
!= &msq
->q_receivers
) {
532 struct msg_receiver
* msr
;
533 msr
= list_entry(tmp
,struct msg_receiver
,r_list
);
535 if(testmsg(msg
,msr
->r_msgtype
,msr
->r_mode
) &&
536 !security_msg_queue_msgrcv(msq
, msg
, msr
->r_tsk
, msr
->r_msgtype
, msr
->r_mode
)) {
537 list_del(&msr
->r_list
);
538 if(msr
->r_maxsize
< msg
->m_ts
) {
540 wake_up_process(msr
->r_tsk
);
542 msr
->r_msg
= ERR_PTR(-E2BIG
);
545 msq
->q_lrpid
= msr
->r_tsk
->pid
;
546 msq
->q_rtime
= get_seconds();
547 wake_up_process(msr
->r_tsk
);
557 asmlinkage
long sys_msgsnd (int msqid
, struct msgbuf __user
*msgp
, size_t msgsz
, int msgflg
)
559 struct msg_queue
*msq
;
564 if (msgsz
> msg_ctlmax
|| (long) msgsz
< 0 || msqid
< 0)
566 if (get_user(mtype
, &msgp
->mtype
))
571 msg
= load_msg(msgp
->mtext
, msgsz
);
578 msq
= msg_lock(msqid
);
584 if (msg_checkid(msq
,msqid
))
585 goto out_unlock_free
;
591 if (ipcperms(&msq
->q_perm
, S_IWUGO
))
592 goto out_unlock_free
;
594 err
= security_msg_queue_msgsnd(msq
, msg
, msgflg
);
596 goto out_unlock_free
;
598 if(msgsz
+ msq
->q_cbytes
<= msq
->q_qbytes
&&
599 1 + msq
->q_qnum
<= msq
->q_qbytes
) {
603 /* queue full, wait: */
604 if(msgflg
&IPC_NOWAIT
) {
606 goto out_unlock_free
;
613 ipc_lock_by_ptr(&msq
->q_perm
);
615 if (msq
->q_perm
.deleted
) {
617 goto out_unlock_free
;
621 if (signal_pending(current
)) {
623 goto out_unlock_free
;
627 msq
->q_lspid
= current
->tgid
;
628 msq
->q_stime
= get_seconds();
630 if(!pipelined_send(msq
,msg
)) {
631 /* noone is waiting for this message, enqueue it */
632 list_add_tail(&msg
->m_list
,&msq
->q_messages
);
633 msq
->q_cbytes
+= msgsz
;
635 atomic_add(msgsz
,&msg_bytes
);
636 atomic_inc(&msg_hdrs
);
650 static inline int convert_mode(long* msgtyp
, int msgflg
)
653 * find message of correct type.
654 * msgtyp = 0 => get first.
655 * msgtyp > 0 => get first message of matching type.
656 * msgtyp < 0 => get message with least type must be < abs(msgtype).
662 return SEARCH_LESSEQUAL
;
664 if(msgflg
& MSG_EXCEPT
)
665 return SEARCH_NOTEQUAL
;
669 asmlinkage
long sys_msgrcv (int msqid
, struct msgbuf __user
*msgp
, size_t msgsz
,
670 long msgtyp
, int msgflg
)
672 struct msg_queue
*msq
;
676 if (msqid
< 0 || (long) msgsz
< 0)
678 mode
= convert_mode(&msgtyp
,msgflg
);
680 msq
= msg_lock(msqid
);
684 msg
= ERR_PTR(-EIDRM
);
685 if (msg_checkid(msq
,msqid
))
689 struct msg_receiver msr_d
;
690 struct list_head
* tmp
;
692 msg
= ERR_PTR(-EACCES
);
693 if (ipcperms (&msq
->q_perm
, S_IRUGO
))
696 msg
= ERR_PTR(-EAGAIN
);
697 tmp
= msq
->q_messages
.next
;
698 while (tmp
!= &msq
->q_messages
) {
699 struct msg_msg
*walk_msg
;
700 walk_msg
= list_entry(tmp
,struct msg_msg
,m_list
);
701 if(testmsg(walk_msg
,msgtyp
,mode
) &&
702 !security_msg_queue_msgrcv(msq
, walk_msg
, current
, msgtyp
, mode
)) {
704 if(mode
== SEARCH_LESSEQUAL
&& walk_msg
->m_type
!= 1) {
706 msgtyp
=walk_msg
->m_type
-1;
715 /* Found a suitable message. Unlink it from the queue. */
716 if ((msgsz
< msg
->m_ts
) && !(msgflg
& MSG_NOERROR
)) {
717 msg
= ERR_PTR(-E2BIG
);
720 list_del(&msg
->m_list
);
722 msq
->q_rtime
= get_seconds();
723 msq
->q_lrpid
= current
->tgid
;
724 msq
->q_cbytes
-= msg
->m_ts
;
725 atomic_sub(msg
->m_ts
,&msg_bytes
);
726 atomic_dec(&msg_hdrs
);
727 ss_wakeup(&msq
->q_senders
,0);
731 /* No message waiting. Wait for a message */
732 if (msgflg
& IPC_NOWAIT
) {
733 msg
= ERR_PTR(-ENOMSG
);
736 list_add_tail(&msr_d
.r_list
,&msq
->q_receivers
);
737 msr_d
.r_tsk
= current
;
738 msr_d
.r_msgtype
= msgtyp
;
740 if(msgflg
& MSG_NOERROR
)
741 msr_d
.r_maxsize
= INT_MAX
;
743 msr_d
.r_maxsize
= msgsz
;
744 msr_d
.r_msg
= ERR_PTR(-EAGAIN
);
745 current
->state
= TASK_INTERRUPTIBLE
;
750 /* Lockless receive, part 1:
751 * Disable preemption. We don't hold a reference to the queue
752 * and getting a reference would defeat the idea of a lockless
753 * operation, thus the code relies on rcu to guarantee the
755 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
756 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
757 * rcu_read_lock() prevents preemption between reading r_msg
758 * and the spin_lock() inside ipc_lock_by_ptr().
762 /* Lockless receive, part 2:
763 * Wait until pipelined_send or expunge_all are outside of
764 * wake_up_process(). There is a race with exit(), see
765 * ipc/mqueue.c for the details.
767 msg
= (struct msg_msg
*) msr_d
.r_msg
;
768 while (msg
== NULL
) {
770 msg
= (struct msg_msg
*) msr_d
.r_msg
;
773 /* Lockless receive, part 3:
774 * If there is a message or an error then accept it without
777 if(msg
!= ERR_PTR(-EAGAIN
)) {
782 /* Lockless receive, part 3:
783 * Acquire the queue spinlock.
785 ipc_lock_by_ptr(&msq
->q_perm
);
788 /* Lockless receive, part 4:
789 * Repeat test after acquiring the spinlock.
791 msg
= (struct msg_msg
*)msr_d
.r_msg
;
792 if(msg
!= ERR_PTR(-EAGAIN
))
795 list_del(&msr_d
.r_list
);
796 if (signal_pending(current
)) {
797 msg
= ERR_PTR(-ERESTARTNOHAND
);
806 msgsz
= (msgsz
> msg
->m_ts
) ? msg
->m_ts
: msgsz
;
807 if (put_user (msg
->m_type
, &msgp
->mtype
) ||
808 store_msg(msgp
->mtext
, msg
, msgsz
)) {
815 #ifdef CONFIG_PROC_FS
816 static int sysvipc_msg_proc_show(struct seq_file
*s
, void *it
)
818 struct msg_queue
*msq
= it
;
821 "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",