3 * Copyright (C) 1992 Krishna Balasubramanian
5 * Removed all the remaining kerneld mess
6 * Catch the -EFAULT stuff properly
7 * Use GFP_KERNEL for messages as in 1.2
8 * Fixed up the unchecked user space derefs
9 * Copyright (C) 1998 Alan Cox & Andi Kleen
11 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
13 * mostly rewritten, threaded and wake-one semantics added
14 * MSGMAX limit removed, sysctl's added
15 * (c) 1999 Manfred Spraul <manfreds@colorfullife.com>
18 #include <linux/config.h>
19 #include <linux/slab.h>
20 #include <linux/msg.h>
21 #include <linux/spinlock.h>
22 #include <linux/init.h>
23 #include <linux/proc_fs.h>
24 #include <linux/list.h>
25 #include <linux/security.h>
26 #include <linux/sched.h>
27 #include <asm/current.h>
28 #include <asm/uaccess.h>
32 int msg_ctlmax
= MSGMAX
;
33 int msg_ctlmnb
= MSGMNB
;
34 int msg_ctlmni
= MSGMNI
;
36 /* one msg_receiver structure for each sleeping receiver */
38 struct list_head r_list
;
39 struct task_struct
* r_tsk
;
45 struct msg_msg
* volatile r_msg
;
48 /* one msg_sender for each sleeping sender */
50 struct list_head list
;
51 struct task_struct
* tsk
;
55 struct msg_msgseg
* next
;
56 /* the next part of the message follows immediately */
60 #define SEARCH_EQUAL 2
61 #define SEARCH_NOTEQUAL 3
62 #define SEARCH_LESSEQUAL 4
64 static atomic_t msg_bytes
= ATOMIC_INIT(0);
65 static atomic_t msg_hdrs
= ATOMIC_INIT(0);
67 static struct ipc_ids msg_ids
;
69 #define msg_lock(id) ((struct msg_queue*)ipc_lock(&msg_ids,id))
70 #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
71 #define msg_rmid(id) ((struct msg_queue*)ipc_rmid(&msg_ids,id))
72 #define msg_checkid(msq, msgid) \
73 ipc_checkid(&msg_ids,&msq->q_perm,msgid)
74 #define msg_buildid(id, seq) \
75 ipc_buildid(&msg_ids, id, seq)
77 static void freeque (struct msg_queue
*msq
, int id
);
78 static int newque (key_t key
, int msgflg
);
80 static int sysvipc_msg_read_proc(char *buffer
, char **start
, off_t offset
, int length
, int *eof
, void *data
);
83 void __init
msg_init (void)
85 ipc_init_ids(&msg_ids
,msg_ctlmni
);
88 create_proc_read_entry("sysvipc/msg", 0, 0, sysvipc_msg_read_proc
, NULL
);
92 static int newque (key_t key
, int msgflg
)
96 struct msg_queue
*msq
;
98 msq
= ipc_rcu_alloc(sizeof(*msq
));
102 msq
->q_perm
.mode
= (msgflg
& S_IRWXUGO
);
103 msq
->q_perm
.key
= key
;
105 msq
->q_perm
.security
= NULL
;
106 retval
= security_msg_queue_alloc(msq
);
108 ipc_rcu_free(msq
, sizeof(*msq
));
112 id
= ipc_addid(&msg_ids
, &msq
->q_perm
, msg_ctlmni
);
114 security_msg_queue_free(msq
);
115 ipc_rcu_free(msq
, sizeof(*msq
));
119 msq
->q_stime
= msq
->q_rtime
= 0;
120 msq
->q_ctime
= get_seconds();
121 msq
->q_cbytes
= msq
->q_qnum
= 0;
122 msq
->q_qbytes
= msg_ctlmnb
;
123 msq
->q_lspid
= msq
->q_lrpid
= 0;
124 INIT_LIST_HEAD(&msq
->q_messages
);
125 INIT_LIST_HEAD(&msq
->q_receivers
);
126 INIT_LIST_HEAD(&msq
->q_senders
);
129 return msg_buildid(id
,msq
->q_perm
.seq
);
132 static void free_msg(struct msg_msg
* msg
)
134 struct msg_msgseg
* seg
;
136 security_msg_msg_free(msg
);
141 struct msg_msgseg
* tmp
= seg
->next
;
147 static struct msg_msg
* load_msg(void* src
, int len
)
150 struct msg_msgseg
** pseg
;
155 if(alen
> DATALEN_MSG
)
158 msg
= (struct msg_msg
*) kmalloc (sizeof(*msg
) + alen
, GFP_KERNEL
);
160 return ERR_PTR(-ENOMEM
);
163 msg
->security
= NULL
;
165 if (copy_from_user(msg
+1, src
, alen
)) {
171 src
= ((char*)src
)+alen
;
174 struct msg_msgseg
* seg
;
176 if(alen
> DATALEN_SEG
)
178 seg
= (struct msg_msgseg
*) kmalloc (sizeof(*seg
) + alen
, GFP_KERNEL
);
185 if(copy_from_user (seg
+1, src
, alen
)) {
191 src
= ((char*)src
)+alen
;
194 err
= security_msg_msg_alloc(msg
);
205 static int store_msg(void* dest
, struct msg_msg
* msg
, int len
)
208 struct msg_msgseg
*seg
;
211 if(alen
> DATALEN_MSG
)
213 if(copy_to_user (dest
, msg
+1, alen
))
217 dest
= ((char*)dest
)+alen
;
221 if(alen
> DATALEN_SEG
)
223 if(copy_to_user (dest
, seg
+1, alen
))
226 dest
= ((char*)dest
)+alen
;
232 static inline void ss_add(struct msg_queue
* msq
, struct msg_sender
* mss
)
235 current
->state
=TASK_INTERRUPTIBLE
;
236 list_add_tail(&mss
->list
,&msq
->q_senders
);
239 static inline void ss_del(struct msg_sender
* mss
)
241 if(mss
->list
.next
!= NULL
)
242 list_del(&mss
->list
);
245 static void ss_wakeup(struct list_head
* h
, int kill
)
247 struct list_head
*tmp
;
251 struct msg_sender
* mss
;
253 mss
= list_entry(tmp
,struct msg_sender
,list
);
257 wake_up_process(mss
->tsk
);
261 static void expunge_all(struct msg_queue
* msq
, int res
)
263 struct list_head
*tmp
;
265 tmp
= msq
->q_receivers
.next
;
266 while (tmp
!= &msq
->q_receivers
) {
267 struct msg_receiver
* msr
;
269 msr
= list_entry(tmp
,struct msg_receiver
,r_list
);
271 msr
->r_msg
= ERR_PTR(res
);
272 wake_up_process(msr
->r_tsk
);
276 * freeque() wakes up waiters on the sender and receiver waiting queue,
277 * removes the message queue from message queue ID
278 * array, and cleans up all the messages associated with this queue.
280 * msg_ids.sem and the spinlock for this message queue is hold
281 * before freeque() is called. msg_ids.sem remains locked on exit.
283 static void freeque (struct msg_queue
*msq
, int id
)
285 struct list_head
*tmp
;
287 expunge_all(msq
,-EIDRM
);
288 ss_wakeup(&msq
->q_senders
,1);
292 tmp
= msq
->q_messages
.next
;
293 while(tmp
!= &msq
->q_messages
) {
294 struct msg_msg
* msg
= list_entry(tmp
,struct msg_msg
,m_list
);
296 atomic_dec(&msg_hdrs
);
299 atomic_sub(msq
->q_cbytes
, &msg_bytes
);
300 security_msg_queue_free(msq
);
301 ipc_rcu_free(msq
, sizeof(struct msg_queue
));
304 asmlinkage
long sys_msgget (key_t key
, int msgflg
)
306 int id
, ret
= -EPERM
;
307 struct msg_queue
*msq
;
310 if (key
== IPC_PRIVATE
)
311 ret
= newque(key
, msgflg
);
312 else if ((id
= ipc_findkey(&msg_ids
, key
)) == -1) { /* key not used */
313 if (!(msgflg
& IPC_CREAT
))
316 ret
= newque(key
, msgflg
);
317 } else if (msgflg
& IPC_CREAT
&& msgflg
& IPC_EXCL
) {
323 if (ipcperms(&msq
->q_perm
, msgflg
))
326 int qid
= msg_buildid(id
, msq
->q_perm
.seq
);
327 ret
= security_msg_queue_associate(msq
, msgflg
);
337 static inline unsigned long copy_msqid_to_user(void *buf
, struct msqid64_ds
*in
, int version
)
341 return copy_to_user (buf
, in
, sizeof(*in
));
346 memset(&out
,0,sizeof(out
));
348 ipc64_perm_to_ipc_perm(&in
->msg_perm
, &out
.msg_perm
);
350 out
.msg_stime
= in
->msg_stime
;
351 out
.msg_rtime
= in
->msg_rtime
;
352 out
.msg_ctime
= in
->msg_ctime
;
354 if(in
->msg_cbytes
> USHRT_MAX
)
355 out
.msg_cbytes
= USHRT_MAX
;
357 out
.msg_cbytes
= in
->msg_cbytes
;
358 out
.msg_lcbytes
= in
->msg_cbytes
;
360 if(in
->msg_qnum
> USHRT_MAX
)
361 out
.msg_qnum
= USHRT_MAX
;
363 out
.msg_qnum
= in
->msg_qnum
;
365 if(in
->msg_qbytes
> USHRT_MAX
)
366 out
.msg_qbytes
= USHRT_MAX
;
368 out
.msg_qbytes
= in
->msg_qbytes
;
369 out
.msg_lqbytes
= in
->msg_qbytes
;
371 out
.msg_lspid
= in
->msg_lspid
;
372 out
.msg_lrpid
= in
->msg_lrpid
;
374 return copy_to_user (buf
, &out
, sizeof(out
));
382 unsigned long qbytes
;
388 static inline unsigned long copy_msqid_from_user(struct msq_setbuf
*out
, void *buf
, int version
)
393 struct msqid64_ds tbuf
;
395 if (copy_from_user (&tbuf
, buf
, sizeof (tbuf
)))
398 out
->qbytes
= tbuf
.msg_qbytes
;
399 out
->uid
= tbuf
.msg_perm
.uid
;
400 out
->gid
= tbuf
.msg_perm
.gid
;
401 out
->mode
= tbuf
.msg_perm
.mode
;
407 struct msqid_ds tbuf_old
;
409 if (copy_from_user (&tbuf_old
, buf
, sizeof (tbuf_old
)))
412 out
->uid
= tbuf_old
.msg_perm
.uid
;
413 out
->gid
= tbuf_old
.msg_perm
.gid
;
414 out
->mode
= tbuf_old
.msg_perm
.mode
;
416 if(tbuf_old
.msg_qbytes
== 0)
417 out
->qbytes
= tbuf_old
.msg_lqbytes
;
419 out
->qbytes
= tbuf_old
.msg_qbytes
;
428 asmlinkage
long sys_msgctl (int msqid
, int cmd
, struct msqid_ds
*buf
)
431 struct msg_queue
*msq
;
432 struct msq_setbuf setbuf
;
433 struct kern_ipc_perm
*ipcp
;
435 if (msqid
< 0 || cmd
< 0)
438 version
= ipc_parse_version(&cmd
);
444 struct msginfo msginfo
;
448 /* We must not return kernel stack data.
449 * due to padding, it's not enough
450 * to set all member fields.
453 err
= security_msg_queue_msgctl(NULL
, cmd
);
457 memset(&msginfo
,0,sizeof(msginfo
));
458 msginfo
.msgmni
= msg_ctlmni
;
459 msginfo
.msgmax
= msg_ctlmax
;
460 msginfo
.msgmnb
= msg_ctlmnb
;
461 msginfo
.msgssz
= MSGSSZ
;
462 msginfo
.msgseg
= MSGSEG
;
464 if (cmd
== MSG_INFO
) {
465 msginfo
.msgpool
= msg_ids
.in_use
;
466 msginfo
.msgmap
= atomic_read(&msg_hdrs
);
467 msginfo
.msgtql
= atomic_read(&msg_bytes
);
469 msginfo
.msgmap
= MSGMAP
;
470 msginfo
.msgpool
= MSGPOOL
;
471 msginfo
.msgtql
= MSGTQL
;
473 max_id
= msg_ids
.max_id
;
475 if (copy_to_user (buf
, &msginfo
, sizeof(struct msginfo
)))
477 return (max_id
< 0) ? 0: max_id
;
482 struct msqid64_ds tbuf
;
486 if(cmd
== MSG_STAT
&& msqid
>= msg_ids
.size
)
489 memset(&tbuf
,0,sizeof(tbuf
));
491 msq
= msg_lock(msqid
);
495 if(cmd
== MSG_STAT
) {
496 success_return
= msg_buildid(msqid
, msq
->q_perm
.seq
);
499 if (msg_checkid(msq
,msqid
))
504 if (ipcperms (&msq
->q_perm
, S_IRUGO
))
507 err
= security_msg_queue_msgctl(msq
, cmd
);
511 kernel_to_ipc64_perm(&msq
->q_perm
, &tbuf
.msg_perm
);
512 tbuf
.msg_stime
= msq
->q_stime
;
513 tbuf
.msg_rtime
= msq
->q_rtime
;
514 tbuf
.msg_ctime
= msq
->q_ctime
;
515 tbuf
.msg_cbytes
= msq
->q_cbytes
;
516 tbuf
.msg_qnum
= msq
->q_qnum
;
517 tbuf
.msg_qbytes
= msq
->q_qbytes
;
518 tbuf
.msg_lspid
= msq
->q_lspid
;
519 tbuf
.msg_lrpid
= msq
->q_lrpid
;
521 if (copy_msqid_to_user(buf
, &tbuf
, version
))
523 return success_return
;
528 if (copy_msqid_from_user (&setbuf
, buf
, version
))
538 msq
= msg_lock(msqid
);
544 if (msg_checkid(msq
,msqid
))
548 if (current
->euid
!= ipcp
->cuid
&&
549 current
->euid
!= ipcp
->uid
&& !capable(CAP_SYS_ADMIN
))
550 /* We _could_ check for CAP_CHOWN above, but we don't */
553 err
= security_msg_queue_msgctl(msq
, cmd
);
560 if (setbuf
.qbytes
> msg_ctlmnb
&& !capable(CAP_SYS_RESOURCE
))
563 msq
->q_qbytes
= setbuf
.qbytes
;
565 ipcp
->uid
= setbuf
.uid
;
566 ipcp
->gid
= setbuf
.gid
;
567 ipcp
->mode
= (ipcp
->mode
& ~S_IRWXUGO
) |
568 (S_IRWXUGO
& setbuf
.mode
);
569 msq
->q_ctime
= get_seconds();
570 /* sleeping receivers might be excluded by
571 * stricter permissions.
573 expunge_all(msq
,-EAGAIN
);
574 /* sleeping senders might be able to send
575 * due to a larger queue size.
577 ss_wakeup(&msq
->q_senders
,0);
582 freeque (msq
, msqid
);
597 static int testmsg(struct msg_msg
* msg
,long type
,int mode
)
603 case SEARCH_LESSEQUAL
:
604 if(msg
->m_type
<=type
)
608 if(msg
->m_type
== type
)
611 case SEARCH_NOTEQUAL
:
612 if(msg
->m_type
!= type
)
619 static inline int pipelined_send(struct msg_queue
* msq
, struct msg_msg
* msg
)
621 struct list_head
* tmp
;
623 tmp
= msq
->q_receivers
.next
;
624 while (tmp
!= &msq
->q_receivers
) {
625 struct msg_receiver
* msr
;
626 msr
= list_entry(tmp
,struct msg_receiver
,r_list
);
628 if(testmsg(msg
,msr
->r_msgtype
,msr
->r_mode
) &&
629 !security_msg_queue_msgrcv(msq
, msg
, msr
->r_tsk
, msr
->r_msgtype
, msr
->r_mode
)) {
630 list_del(&msr
->r_list
);
631 if(msr
->r_maxsize
< msg
->m_ts
) {
632 msr
->r_msg
= ERR_PTR(-E2BIG
);
633 wake_up_process(msr
->r_tsk
);
636 msq
->q_lrpid
= msr
->r_tsk
->pid
;
637 msq
->q_rtime
= get_seconds();
638 wake_up_process(msr
->r_tsk
);
646 asmlinkage
long sys_msgsnd (int msqid
, struct msgbuf
*msgp
, size_t msgsz
, int msgflg
)
648 struct msg_queue
*msq
;
653 if (msgsz
> msg_ctlmax
|| (long) msgsz
< 0 || msqid
< 0)
655 if (get_user(mtype
, &msgp
->mtype
))
660 msg
= load_msg(msgp
->mtext
, msgsz
);
667 msq
= msg_lock(msqid
);
673 if (msg_checkid(msq
,msqid
))
674 goto out_unlock_free
;
677 if (ipcperms(&msq
->q_perm
, S_IWUGO
))
678 goto out_unlock_free
;
680 err
= security_msg_queue_msgsnd(msq
, msg
, msgflg
);
682 goto out_unlock_free
;
684 if(msgsz
+ msq
->q_cbytes
> msq
->q_qbytes
||
685 1 + msq
->q_qnum
> msq
->q_qbytes
) {
688 if(msgflg
&IPC_NOWAIT
) {
690 goto out_unlock_free
;
695 current
->state
= TASK_RUNNING
;
697 msq
= msg_lock(msqid
);
703 if (signal_pending(current
)) {
705 goto out_unlock_free
;
710 msq
->q_lspid
= current
->tgid
;
711 msq
->q_stime
= get_seconds();
713 if(!pipelined_send(msq
,msg
)) {
714 /* noone is waiting for this message, enqueue it */
715 list_add_tail(&msg
->m_list
,&msq
->q_messages
);
716 msq
->q_cbytes
+= msgsz
;
718 atomic_add(msgsz
,&msg_bytes
);
719 atomic_inc(&msg_hdrs
);
733 static inline int convert_mode(long* msgtyp
, int msgflg
)
736 * find message of correct type.
737 * msgtyp = 0 => get first.
738 * msgtyp > 0 => get first message of matching type.
739 * msgtyp < 0 => get message with least type must be < abs(msgtype).
745 return SEARCH_LESSEQUAL
;
747 if(msgflg
& MSG_EXCEPT
)
748 return SEARCH_NOTEQUAL
;
752 asmlinkage
long sys_msgrcv (int msqid
, struct msgbuf
*msgp
, size_t msgsz
,
753 long msgtyp
, int msgflg
)
755 struct msg_queue
*msq
;
756 struct msg_receiver msr_d
;
757 struct list_head
* tmp
;
758 struct msg_msg
* msg
, *found_msg
;
762 if (msqid
< 0 || (long) msgsz
< 0)
764 mode
= convert_mode(&msgtyp
,msgflg
);
766 msq
= msg_lock(msqid
);
771 if (msg_checkid(msq
,msqid
))
775 if (ipcperms (&msq
->q_perm
, S_IRUGO
))
778 tmp
= msq
->q_messages
.next
;
780 while (tmp
!= &msq
->q_messages
) {
781 msg
= list_entry(tmp
,struct msg_msg
,m_list
);
782 if(testmsg(msg
,msgtyp
,mode
) &&
783 !security_msg_queue_msgrcv(msq
, msg
, current
, msgtyp
, mode
)) {
785 if(mode
== SEARCH_LESSEQUAL
&& msg
->m_type
!= 1) {
787 msgtyp
=msg
->m_type
-1;
797 if ((msgsz
< msg
->m_ts
) && !(msgflg
& MSG_NOERROR
)) {
801 list_del(&msg
->m_list
);
803 msq
->q_rtime
= get_seconds();
804 msq
->q_lrpid
= current
->tgid
;
805 msq
->q_cbytes
-= msg
->m_ts
;
806 atomic_sub(msg
->m_ts
,&msg_bytes
);
807 atomic_dec(&msg_hdrs
);
808 ss_wakeup(&msq
->q_senders
,0);
811 msgsz
= (msgsz
> msg
->m_ts
) ? msg
->m_ts
: msgsz
;
812 if (put_user (msg
->m_type
, &msgp
->mtype
) ||
813 store_msg(msgp
->mtext
, msg
, msgsz
)) {
820 /* no message waiting. Prepare for pipelined
823 if (msgflg
& IPC_NOWAIT
) {
827 list_add_tail(&msr_d
.r_list
,&msq
->q_receivers
);
828 msr_d
.r_tsk
= current
;
829 msr_d
.r_msgtype
= msgtyp
;
831 if(msgflg
& MSG_NOERROR
)
832 msr_d
.r_maxsize
= INT_MAX
;
834 msr_d
.r_maxsize
= msgsz
;
835 msr_d
.r_msg
= ERR_PTR(-EAGAIN
);
836 current
->state
= TASK_INTERRUPTIBLE
;
842 * The below optimisation is buggy. A sleeping thread that is
843 * woken up checks if it got a message and if so, copies it to
844 * userspace and just returns without taking any locks.
845 * But this return to user space can be faster than the message
846 * send, and if the receiver immediately exits the
847 * wake_up_process performed by the sender will oops.
850 msg
= (struct msg_msg
*) msr_d
.r_msg
;
855 msq
= msg_lock(msqid
);
856 msg
= (struct msg_msg
*)msr_d
.r_msg
;
858 /* our message arived while we waited for
859 * the spinlock. Process it.
869 list_del(&msr_d
.r_list
);
870 if (signal_pending(current
))
882 #ifdef CONFIG_PROC_FS
883 static int sysvipc_msg_read_proc(char *buffer
, char **start
, off_t offset
, int length
, int *eof
, void *data
)
890 len
+= sprintf(buffer
, " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n");
892 for(i
= 0; i
<= msg_ids
.max_id
; i
++) {
893 struct msg_queue
* msq
;
896 len
+= sprintf(buffer
+ len
, "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
898 msg_buildid(i
,msq
->q_perm
.seq
),
918 if(pos
> offset
+ length
)
926 *start
= buffer
+ (offset
- begin
);
927 len
-= (offset
- begin
);