3 * Copyright (C) 1992 Krishna Balasubramanian
5 * Removed all the remaining kerneld mess
6 * Catch the -EFAULT stuff properly
7 * Use GFP_KERNEL for messages as in 1.2
8 * Fixed up the unchecked user space derefs
9 * Copyright (C) 1998 Alan Cox & Andi Kleen
11 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
13 * mostly rewritten, threaded and wake-one semantics added
14 * MSGMAX limit removed, sysctl's added
15 * (c) 1999 Manfred Spraul <manfreds@colorfullife.com>
18 #include <linux/config.h>
19 #include <linux/slab.h>
20 #include <linux/msg.h>
21 #include <linux/spinlock.h>
22 #include <linux/init.h>
23 #include <linux/proc_fs.h>
24 #include <linux/list.h>
25 #include <linux/security.h>
26 #include <asm/uaccess.h>
30 int msg_ctlmax
= MSGMAX
;
31 int msg_ctlmnb
= MSGMNB
;
32 int msg_ctlmni
= MSGMNI
;
34 /* one msg_receiver structure for each sleeping receiver */
36 struct list_head r_list
;
37 struct task_struct
* r_tsk
;
43 struct msg_msg
* volatile r_msg
;
46 /* one msg_sender for each sleeping sender */
48 struct list_head list
;
49 struct task_struct
* tsk
;
53 struct msg_msgseg
* next
;
54 /* the next part of the message follows immediately */
58 #define SEARCH_EQUAL 2
59 #define SEARCH_NOTEQUAL 3
60 #define SEARCH_LESSEQUAL 4
62 static atomic_t msg_bytes
= ATOMIC_INIT(0);
63 static atomic_t msg_hdrs
= ATOMIC_INIT(0);
65 static struct ipc_ids msg_ids
;
67 #define msg_lock(id) ((struct msg_queue*)ipc_lock(&msg_ids,id))
68 #define msg_unlock(id) ipc_unlock(&msg_ids,id)
69 #define msg_rmid(id) ((struct msg_queue*)ipc_rmid(&msg_ids,id))
70 #define msg_checkid(msq, msgid) \
71 ipc_checkid(&msg_ids,&msq->q_perm,msgid)
72 #define msg_buildid(id, seq) \
73 ipc_buildid(&msg_ids, id, seq)
75 static void freeque (int id
);
76 static int newque (key_t key
, int msgflg
);
78 static int sysvipc_msg_read_proc(char *buffer
, char **start
, off_t offset
, int length
, int *eof
, void *data
);
81 void __init
msg_init (void)
83 ipc_init_ids(&msg_ids
,msg_ctlmni
);
86 create_proc_read_entry("sysvipc/msg", 0, 0, sysvipc_msg_read_proc
, NULL
);
90 static int newque (key_t key
, int msgflg
)
94 struct msg_queue
*msq
;
96 msq
= (struct msg_queue
*) kmalloc (sizeof (*msq
), GFP_KERNEL
);
100 msq
->q_perm
.mode
= (msgflg
& S_IRWXUGO
);
101 msq
->q_perm
.key
= key
;
103 msq
->q_perm
.security
= NULL
;
104 retval
= security_ops
->msg_queue_alloc_security(msq
);
110 id
= ipc_addid(&msg_ids
, &msq
->q_perm
, msg_ctlmni
);
112 security_ops
->msg_queue_free_security(msq
);
117 msq
->q_stime
= msq
->q_rtime
= 0;
118 msq
->q_ctime
= CURRENT_TIME
;
119 msq
->q_cbytes
= msq
->q_qnum
= 0;
120 msq
->q_qbytes
= msg_ctlmnb
;
121 msq
->q_lspid
= msq
->q_lrpid
= 0;
122 INIT_LIST_HEAD(&msq
->q_messages
);
123 INIT_LIST_HEAD(&msq
->q_receivers
);
124 INIT_LIST_HEAD(&msq
->q_senders
);
127 return msg_buildid(id
,msq
->q_perm
.seq
);
130 static void free_msg(struct msg_msg
* msg
)
132 struct msg_msgseg
* seg
;
136 struct msg_msgseg
* tmp
= seg
->next
;
142 static struct msg_msg
* load_msg(void* src
, int len
)
145 struct msg_msgseg
** pseg
;
150 if(alen
> DATALEN_MSG
)
153 msg
= (struct msg_msg
*) kmalloc (sizeof(*msg
) + alen
, GFP_KERNEL
);
155 return ERR_PTR(-ENOMEM
);
159 if (copy_from_user(msg
+1, src
, alen
)) {
165 src
= ((char*)src
)+alen
;
168 struct msg_msgseg
* seg
;
170 if(alen
> DATALEN_SEG
)
172 seg
= (struct msg_msgseg
*) kmalloc (sizeof(*seg
) + alen
, GFP_KERNEL
);
179 if(copy_from_user (seg
+1, src
, alen
)) {
185 src
= ((char*)src
)+alen
;
194 static int store_msg(void* dest
, struct msg_msg
* msg
, int len
)
197 struct msg_msgseg
*seg
;
200 if(alen
> DATALEN_MSG
)
202 if(copy_to_user (dest
, msg
+1, alen
))
206 dest
= ((char*)dest
)+alen
;
210 if(alen
> DATALEN_SEG
)
212 if(copy_to_user (dest
, seg
+1, alen
))
215 dest
= ((char*)dest
)+alen
;
221 static inline void ss_add(struct msg_queue
* msq
, struct msg_sender
* mss
)
224 current
->state
=TASK_INTERRUPTIBLE
;
225 list_add_tail(&mss
->list
,&msq
->q_senders
);
228 static inline void ss_del(struct msg_sender
* mss
)
230 if(mss
->list
.next
!= NULL
)
231 list_del(&mss
->list
);
234 static void ss_wakeup(struct list_head
* h
, int kill
)
236 struct list_head
*tmp
;
240 struct msg_sender
* mss
;
242 mss
= list_entry(tmp
,struct msg_sender
,list
);
246 wake_up_process(mss
->tsk
);
250 static void expunge_all(struct msg_queue
* msq
, int res
)
252 struct list_head
*tmp
;
254 tmp
= msq
->q_receivers
.next
;
255 while (tmp
!= &msq
->q_receivers
) {
256 struct msg_receiver
* msr
;
258 msr
= list_entry(tmp
,struct msg_receiver
,r_list
);
260 msr
->r_msg
= ERR_PTR(res
);
261 wake_up_process(msr
->r_tsk
);
265 static void freeque (int id
)
267 struct msg_queue
*msq
;
268 struct list_head
*tmp
;
272 expunge_all(msq
,-EIDRM
);
273 ss_wakeup(&msq
->q_senders
,1);
276 tmp
= msq
->q_messages
.next
;
277 while(tmp
!= &msq
->q_messages
) {
278 struct msg_msg
* msg
= list_entry(tmp
,struct msg_msg
,m_list
);
280 atomic_dec(&msg_hdrs
);
283 atomic_sub(msq
->q_cbytes
, &msg_bytes
);
284 security_ops
->msg_queue_free_security(msq
);
288 asmlinkage
long sys_msgget (key_t key
, int msgflg
)
290 int id
, ret
= -EPERM
;
291 struct msg_queue
*msq
;
294 if (key
== IPC_PRIVATE
)
295 ret
= newque(key
, msgflg
);
296 else if ((id
= ipc_findkey(&msg_ids
, key
)) == -1) { /* key not used */
297 if (!(msgflg
& IPC_CREAT
))
300 ret
= newque(key
, msgflg
);
301 } else if (msgflg
& IPC_CREAT
&& msgflg
& IPC_EXCL
) {
307 if (ipcperms(&msq
->q_perm
, msgflg
))
310 ret
= msg_buildid(id
, msq
->q_perm
.seq
);
317 static inline unsigned long copy_msqid_to_user(void *buf
, struct msqid64_ds
*in
, int version
)
321 return copy_to_user (buf
, in
, sizeof(*in
));
326 memset(&out
,0,sizeof(out
));
328 ipc64_perm_to_ipc_perm(&in
->msg_perm
, &out
.msg_perm
);
330 out
.msg_stime
= in
->msg_stime
;
331 out
.msg_rtime
= in
->msg_rtime
;
332 out
.msg_ctime
= in
->msg_ctime
;
334 if(in
->msg_cbytes
> USHRT_MAX
)
335 out
.msg_cbytes
= USHRT_MAX
;
337 out
.msg_cbytes
= in
->msg_cbytes
;
338 out
.msg_lcbytes
= in
->msg_cbytes
;
340 if(in
->msg_qnum
> USHRT_MAX
)
341 out
.msg_qnum
= USHRT_MAX
;
343 out
.msg_qnum
= in
->msg_qnum
;
345 if(in
->msg_qbytes
> USHRT_MAX
)
346 out
.msg_qbytes
= USHRT_MAX
;
348 out
.msg_qbytes
= in
->msg_qbytes
;
349 out
.msg_lqbytes
= in
->msg_qbytes
;
351 out
.msg_lspid
= in
->msg_lspid
;
352 out
.msg_lrpid
= in
->msg_lrpid
;
354 return copy_to_user (buf
, &out
, sizeof(out
));
362 unsigned long qbytes
;
368 static inline unsigned long copy_msqid_from_user(struct msq_setbuf
*out
, void *buf
, int version
)
373 struct msqid64_ds tbuf
;
375 if (copy_from_user (&tbuf
, buf
, sizeof (tbuf
)))
378 out
->qbytes
= tbuf
.msg_qbytes
;
379 out
->uid
= tbuf
.msg_perm
.uid
;
380 out
->gid
= tbuf
.msg_perm
.gid
;
381 out
->mode
= tbuf
.msg_perm
.mode
;
387 struct msqid_ds tbuf_old
;
389 if (copy_from_user (&tbuf_old
, buf
, sizeof (tbuf_old
)))
392 out
->uid
= tbuf_old
.msg_perm
.uid
;
393 out
->gid
= tbuf_old
.msg_perm
.gid
;
394 out
->mode
= tbuf_old
.msg_perm
.mode
;
396 if(tbuf_old
.msg_qbytes
== 0)
397 out
->qbytes
= tbuf_old
.msg_lqbytes
;
399 out
->qbytes
= tbuf_old
.msg_qbytes
;
408 asmlinkage
long sys_msgctl (int msqid
, int cmd
, struct msqid_ds
*buf
)
411 struct msg_queue
*msq
;
412 struct msq_setbuf setbuf
;
413 struct kern_ipc_perm
*ipcp
;
415 if (msqid
< 0 || cmd
< 0)
418 version
= ipc_parse_version(&cmd
);
424 struct msginfo msginfo
;
428 /* We must not return kernel stack data.
429 * due to padding, it's not enough
430 * to set all member fields.
432 memset(&msginfo
,0,sizeof(msginfo
));
433 msginfo
.msgmni
= msg_ctlmni
;
434 msginfo
.msgmax
= msg_ctlmax
;
435 msginfo
.msgmnb
= msg_ctlmnb
;
436 msginfo
.msgssz
= MSGSSZ
;
437 msginfo
.msgseg
= MSGSEG
;
439 if (cmd
== MSG_INFO
) {
440 msginfo
.msgpool
= msg_ids
.in_use
;
441 msginfo
.msgmap
= atomic_read(&msg_hdrs
);
442 msginfo
.msgtql
= atomic_read(&msg_bytes
);
444 msginfo
.msgmap
= MSGMAP
;
445 msginfo
.msgpool
= MSGPOOL
;
446 msginfo
.msgtql
= MSGTQL
;
448 max_id
= msg_ids
.max_id
;
450 if (copy_to_user (buf
, &msginfo
, sizeof(struct msginfo
)))
452 return (max_id
< 0) ? 0: max_id
;
457 struct msqid64_ds tbuf
;
461 if(cmd
== MSG_STAT
&& msqid
>= msg_ids
.size
)
464 memset(&tbuf
,0,sizeof(tbuf
));
466 msq
= msg_lock(msqid
);
470 if(cmd
== MSG_STAT
) {
471 success_return
= msg_buildid(msqid
, msq
->q_perm
.seq
);
474 if (msg_checkid(msq
,msqid
))
479 if (ipcperms (&msq
->q_perm
, S_IRUGO
))
482 kernel_to_ipc64_perm(&msq
->q_perm
, &tbuf
.msg_perm
);
483 tbuf
.msg_stime
= msq
->q_stime
;
484 tbuf
.msg_rtime
= msq
->q_rtime
;
485 tbuf
.msg_ctime
= msq
->q_ctime
;
486 tbuf
.msg_cbytes
= msq
->q_cbytes
;
487 tbuf
.msg_qnum
= msq
->q_qnum
;
488 tbuf
.msg_qbytes
= msq
->q_qbytes
;
489 tbuf
.msg_lspid
= msq
->q_lspid
;
490 tbuf
.msg_lrpid
= msq
->q_lrpid
;
492 if (copy_msqid_to_user(buf
, &tbuf
, version
))
494 return success_return
;
499 if (copy_msqid_from_user (&setbuf
, buf
, version
))
509 msq
= msg_lock(msqid
);
515 if (msg_checkid(msq
,msqid
))
519 if (current
->euid
!= ipcp
->cuid
&&
520 current
->euid
!= ipcp
->uid
&& !capable(CAP_SYS_ADMIN
))
521 /* We _could_ check for CAP_CHOWN above, but we don't */
527 if (setbuf
.qbytes
> msg_ctlmnb
&& !capable(CAP_SYS_RESOURCE
))
529 msq
->q_qbytes
= setbuf
.qbytes
;
531 ipcp
->uid
= setbuf
.uid
;
532 ipcp
->gid
= setbuf
.gid
;
533 ipcp
->mode
= (ipcp
->mode
& ~S_IRWXUGO
) |
534 (S_IRWXUGO
& setbuf
.mode
);
535 msq
->q_ctime
= CURRENT_TIME
;
536 /* sleeping receivers might be excluded by
537 * stricter permissions.
539 expunge_all(msq
,-EAGAIN
);
540 /* sleeping senders might be able to send
541 * due to a larger queue size.
543 ss_wakeup(&msq
->q_senders
,0);
563 static int testmsg(struct msg_msg
* msg
,long type
,int mode
)
569 case SEARCH_LESSEQUAL
:
570 if(msg
->m_type
<=type
)
574 if(msg
->m_type
== type
)
577 case SEARCH_NOTEQUAL
:
578 if(msg
->m_type
!= type
)
585 static int inline pipelined_send(struct msg_queue
* msq
, struct msg_msg
* msg
)
587 struct list_head
* tmp
;
589 tmp
= msq
->q_receivers
.next
;
590 while (tmp
!= &msq
->q_receivers
) {
591 struct msg_receiver
* msr
;
592 msr
= list_entry(tmp
,struct msg_receiver
,r_list
);
594 if(testmsg(msg
,msr
->r_msgtype
,msr
->r_mode
)) {
595 list_del(&msr
->r_list
);
596 if(msr
->r_maxsize
< msg
->m_ts
) {
597 msr
->r_msg
= ERR_PTR(-E2BIG
);
598 wake_up_process(msr
->r_tsk
);
601 msq
->q_lrpid
= msr
->r_tsk
->pid
;
602 msq
->q_rtime
= CURRENT_TIME
;
603 wake_up_process(msr
->r_tsk
);
611 asmlinkage
long sys_msgsnd (int msqid
, struct msgbuf
*msgp
, size_t msgsz
, int msgflg
)
613 struct msg_queue
*msq
;
618 if (msgsz
> msg_ctlmax
|| (long) msgsz
< 0 || msqid
< 0)
620 if (get_user(mtype
, &msgp
->mtype
))
625 msg
= load_msg(msgp
->mtext
, msgsz
);
632 msq
= msg_lock(msqid
);
638 if (msg_checkid(msq
,msqid
))
639 goto out_unlock_free
;
642 if (ipcperms(&msq
->q_perm
, S_IWUGO
))
643 goto out_unlock_free
;
645 if(msgsz
+ msq
->q_cbytes
> msq
->q_qbytes
||
646 1 + msq
->q_qnum
> msq
->q_qbytes
) {
649 if(msgflg
&IPC_NOWAIT
) {
651 goto out_unlock_free
;
656 current
->state
= TASK_RUNNING
;
658 msq
= msg_lock(msqid
);
664 if (signal_pending(current
)) {
666 goto out_unlock_free
;
671 msq
->q_lspid
= current
->pid
;
672 msq
->q_stime
= CURRENT_TIME
;
674 if(!pipelined_send(msq
,msg
)) {
675 /* noone is waiting for this message, enqueue it */
676 list_add_tail(&msg
->m_list
,&msq
->q_messages
);
677 msq
->q_cbytes
+= msgsz
;
679 atomic_add(msgsz
,&msg_bytes
);
680 atomic_inc(&msg_hdrs
);
694 static int inline convert_mode(long* msgtyp
, int msgflg
)
697 * find message of correct type.
698 * msgtyp = 0 => get first.
699 * msgtyp > 0 => get first message of matching type.
700 * msgtyp < 0 => get message with least type must be < abs(msgtype).
706 return SEARCH_LESSEQUAL
;
708 if(msgflg
& MSG_EXCEPT
)
709 return SEARCH_NOTEQUAL
;
713 asmlinkage
long sys_msgrcv (int msqid
, struct msgbuf
*msgp
, size_t msgsz
,
714 long msgtyp
, int msgflg
)
716 struct msg_queue
*msq
;
717 struct msg_receiver msr_d
;
718 struct list_head
* tmp
;
719 struct msg_msg
* msg
, *found_msg
;
723 if (msqid
< 0 || (long) msgsz
< 0)
725 mode
= convert_mode(&msgtyp
,msgflg
);
727 msq
= msg_lock(msqid
);
732 if (msg_checkid(msq
,msqid
))
736 if (ipcperms (&msq
->q_perm
, S_IRUGO
))
739 tmp
= msq
->q_messages
.next
;
741 while (tmp
!= &msq
->q_messages
) {
742 msg
= list_entry(tmp
,struct msg_msg
,m_list
);
743 if(testmsg(msg
,msgtyp
,mode
)) {
745 if(mode
== SEARCH_LESSEQUAL
&& msg
->m_type
!= 1) {
747 msgtyp
=msg
->m_type
-1;
757 if ((msgsz
< msg
->m_ts
) && !(msgflg
& MSG_NOERROR
)) {
761 list_del(&msg
->m_list
);
763 msq
->q_rtime
= CURRENT_TIME
;
764 msq
->q_lrpid
= current
->pid
;
765 msq
->q_cbytes
-= msg
->m_ts
;
766 atomic_sub(msg
->m_ts
,&msg_bytes
);
767 atomic_dec(&msg_hdrs
);
768 ss_wakeup(&msq
->q_senders
,0);
771 msgsz
= (msgsz
> msg
->m_ts
) ? msg
->m_ts
: msgsz
;
772 if (put_user (msg
->m_type
, &msgp
->mtype
) ||
773 store_msg(msgp
->mtext
, msg
, msgsz
)) {
781 /* no message waiting. Prepare for pipelined
784 if (msgflg
& IPC_NOWAIT
) {
788 list_add_tail(&msr_d
.r_list
,&msq
->q_receivers
);
789 msr_d
.r_tsk
= current
;
790 msr_d
.r_msgtype
= msgtyp
;
792 if(msgflg
& MSG_NOERROR
)
793 msr_d
.r_maxsize
= INT_MAX
;
795 msr_d
.r_maxsize
= msgsz
;
796 msr_d
.r_msg
= ERR_PTR(-EAGAIN
);
797 current
->state
= TASK_INTERRUPTIBLE
;
801 current
->state
= TASK_RUNNING
;
803 msg
= (struct msg_msg
*) msr_d
.r_msg
;
810 msg
= (struct msg_msg
*)msr_d
.r_msg
;
812 /* our message arived while we waited for
813 * the spinlock. Process it.
823 list_del(&msr_d
.r_list
);
824 if (signal_pending(current
))
836 #ifdef CONFIG_PROC_FS
837 static int sysvipc_msg_read_proc(char *buffer
, char **start
, off_t offset
, int length
, int *eof
, void *data
)
844 len
+= sprintf(buffer
, " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n");
846 for(i
= 0; i
<= msg_ids
.max_id
; i
++) {
847 struct msg_queue
* msq
;
850 len
+= sprintf(buffer
+ len
, "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
852 msg_buildid(i
,msq
->q_perm
.seq
),
872 if(pos
> offset
+ length
)
880 *start
= buffer
+ (offset
- begin
);
881 len
-= (offset
- begin
);