2 * POSIX message queues filesystem for Linux.
4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
5 * Michal Wronski (Michal.Wronski@motorola.com)
7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
9 * Manfred Spraul (manfred@colorfullife.com)
11 * This file is released under the GPL.
14 #include <linux/capability.h>
15 #include <linux/init.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/mount.h>
19 #include <linux/namei.h>
20 #include <linux/sysctl.h>
21 #include <linux/poll.h>
22 #include <linux/mqueue.h>
23 #include <linux/msg.h>
24 #include <linux/skbuff.h>
25 #include <linux/netlink.h>
26 #include <linux/syscalls.h>
27 #include <linux/signal.h>
31 #define MQUEUE_MAGIC 0x19800202
32 #define DIRENT_SIZE 20
33 #define FILENT_SIZE 80
39 #define STATE_PENDING 1
44 #define CTL_QUEUESMAX 2
46 #define CTL_MSGSIZEMAX 4
49 #define DFLT_QUEUESMAX 256 /* max number of message queues */
50 #define DFLT_MSGMAX 10 /* max number of messages in each queue */
51 #define HARD_MSGMAX (131072/sizeof(void*))
52 #define DFLT_MSGSIZEMAX 8192 /* max message size */
54 #define NOTIFY_COOKIE_LEN 32
56 struct ext_wait_queue
{ /* queue of sleeping tasks */
57 struct task_struct
*task
;
58 struct list_head list
;
59 struct msg_msg
*msg
; /* ptr of loaded message */
60 int state
; /* one of STATE_* values */
63 struct mqueue_inode_info
{
65 struct inode vfs_inode
;
66 wait_queue_head_t wait_q
;
68 struct msg_msg
**messages
;
71 struct sigevent notify
;
73 struct user_struct
*user
; /* user who created, for accounting */
74 struct sock
*notify_sock
;
75 struct sk_buff
*notify_cookie
;
77 /* for tasks waiting for free space and messages, respectively */
78 struct ext_wait_queue e_wait_q
[2];
80 unsigned long qsize
; /* size of queue in memory (sum of all msgs) */
83 static struct inode_operations mqueue_dir_inode_operations
;
84 static struct file_operations mqueue_file_operations
;
85 static struct super_operations mqueue_super_ops
;
86 static void remove_notification(struct mqueue_inode_info
*info
);
88 static spinlock_t mq_lock
;
89 static kmem_cache_t
*mqueue_inode_cachep
;
90 static struct vfsmount
*mqueue_mnt
;
92 static unsigned int queues_count
;
93 static unsigned int queues_max
= DFLT_QUEUESMAX
;
94 static unsigned int msg_max
= DFLT_MSGMAX
;
95 static unsigned int msgsize_max
= DFLT_MSGSIZEMAX
;
97 static struct ctl_table_header
* mq_sysctl_table
;
99 static inline struct mqueue_inode_info
*MQUEUE_I(struct inode
*inode
)
101 return container_of(inode
, struct mqueue_inode_info
, vfs_inode
);
104 static struct inode
*mqueue_get_inode(struct super_block
*sb
, int mode
,
105 struct mq_attr
*attr
)
109 inode
= new_inode(sb
);
111 inode
->i_mode
= mode
;
112 inode
->i_uid
= current
->fsuid
;
113 inode
->i_gid
= current
->fsgid
;
114 inode
->i_blksize
= PAGE_CACHE_SIZE
;
116 inode
->i_mtime
= inode
->i_ctime
= inode
->i_atime
=
120 struct mqueue_inode_info
*info
;
121 struct task_struct
*p
= current
;
122 struct user_struct
*u
= p
->user
;
123 unsigned long mq_bytes
, mq_msg_tblsz
;
125 inode
->i_fop
= &mqueue_file_operations
;
126 inode
->i_size
= FILENT_SIZE
;
127 /* mqueue specific info */
128 info
= MQUEUE_I(inode
);
129 spin_lock_init(&info
->lock
);
130 init_waitqueue_head(&info
->wait_q
);
131 INIT_LIST_HEAD(&info
->e_wait_q
[0].list
);
132 INIT_LIST_HEAD(&info
->e_wait_q
[1].list
);
133 info
->messages
= NULL
;
134 info
->notify_owner
= 0;
136 info
->user
= NULL
; /* set when all is ok */
137 memset(&info
->attr
, 0, sizeof(info
->attr
));
138 info
->attr
.mq_maxmsg
= DFLT_MSGMAX
;
139 info
->attr
.mq_msgsize
= DFLT_MSGSIZEMAX
;
141 info
->attr
.mq_maxmsg
= attr
->mq_maxmsg
;
142 info
->attr
.mq_msgsize
= attr
->mq_msgsize
;
144 mq_msg_tblsz
= info
->attr
.mq_maxmsg
* sizeof(struct msg_msg
*);
145 mq_bytes
= (mq_msg_tblsz
+
146 (info
->attr
.mq_maxmsg
* info
->attr
.mq_msgsize
));
149 if (u
->mq_bytes
+ mq_bytes
< u
->mq_bytes
||
150 u
->mq_bytes
+ mq_bytes
>
151 p
->signal
->rlim
[RLIMIT_MSGQUEUE
].rlim_cur
) {
152 spin_unlock(&mq_lock
);
155 u
->mq_bytes
+= mq_bytes
;
156 spin_unlock(&mq_lock
);
158 info
->messages
= kmalloc(mq_msg_tblsz
, GFP_KERNEL
);
159 if (!info
->messages
) {
161 u
->mq_bytes
-= mq_bytes
;
162 spin_unlock(&mq_lock
);
166 info
->user
= get_uid(u
);
167 } else if (S_ISDIR(mode
)) {
169 /* Some things misbehave if size == 0 on a directory */
170 inode
->i_size
= 2 * DIRENT_SIZE
;
171 inode
->i_op
= &mqueue_dir_inode_operations
;
172 inode
->i_fop
= &simple_dir_operations
;
177 make_bad_inode(inode
);
182 static int mqueue_fill_super(struct super_block
*sb
, void *data
, int silent
)
186 sb
->s_blocksize
= PAGE_CACHE_SIZE
;
187 sb
->s_blocksize_bits
= PAGE_CACHE_SHIFT
;
188 sb
->s_magic
= MQUEUE_MAGIC
;
189 sb
->s_op
= &mqueue_super_ops
;
191 inode
= mqueue_get_inode(sb
, S_IFDIR
| S_ISVTX
| S_IRWXUGO
, NULL
);
195 sb
->s_root
= d_alloc_root(inode
);
204 static struct super_block
*mqueue_get_sb(struct file_system_type
*fs_type
,
205 int flags
, const char *dev_name
,
208 return get_sb_single(fs_type
, flags
, data
, mqueue_fill_super
);
211 static void init_once(void *foo
, kmem_cache_t
* cachep
, unsigned long flags
)
213 struct mqueue_inode_info
*p
= (struct mqueue_inode_info
*) foo
;
215 if ((flags
& (SLAB_CTOR_VERIFY
| SLAB_CTOR_CONSTRUCTOR
)) ==
216 SLAB_CTOR_CONSTRUCTOR
)
217 inode_init_once(&p
->vfs_inode
);
220 static struct inode
*mqueue_alloc_inode(struct super_block
*sb
)
222 struct mqueue_inode_info
*ei
;
224 ei
= kmem_cache_alloc(mqueue_inode_cachep
, SLAB_KERNEL
);
227 return &ei
->vfs_inode
;
230 static void mqueue_destroy_inode(struct inode
*inode
)
232 kmem_cache_free(mqueue_inode_cachep
, MQUEUE_I(inode
));
235 static void mqueue_delete_inode(struct inode
*inode
)
237 struct mqueue_inode_info
*info
;
238 struct user_struct
*user
;
239 unsigned long mq_bytes
;
242 if (S_ISDIR(inode
->i_mode
)) {
246 info
= MQUEUE_I(inode
);
247 spin_lock(&info
->lock
);
248 for (i
= 0; i
< info
->attr
.mq_curmsgs
; i
++)
249 free_msg(info
->messages
[i
]);
250 kfree(info
->messages
);
251 spin_unlock(&info
->lock
);
255 mq_bytes
= (info
->attr
.mq_maxmsg
* sizeof(struct msg_msg
*) +
256 (info
->attr
.mq_maxmsg
* info
->attr
.mq_msgsize
));
260 user
->mq_bytes
-= mq_bytes
;
262 spin_unlock(&mq_lock
);
267 static int mqueue_create(struct inode
*dir
, struct dentry
*dentry
,
268 int mode
, struct nameidata
*nd
)
271 struct mq_attr
*attr
= dentry
->d_fsdata
;
275 if (queues_count
>= queues_max
&& !capable(CAP_SYS_RESOURCE
)) {
280 spin_unlock(&mq_lock
);
282 inode
= mqueue_get_inode(dir
->i_sb
, mode
, attr
);
290 dir
->i_size
+= DIRENT_SIZE
;
291 dir
->i_ctime
= dir
->i_mtime
= dir
->i_atime
= CURRENT_TIME
;
293 d_instantiate(dentry
, inode
);
297 spin_unlock(&mq_lock
);
301 static int mqueue_unlink(struct inode
*dir
, struct dentry
*dentry
)
303 struct inode
*inode
= dentry
->d_inode
;
305 dir
->i_ctime
= dir
->i_mtime
= dir
->i_atime
= CURRENT_TIME
;
306 dir
->i_size
-= DIRENT_SIZE
;
313 * This is routine for system read from queue file.
314 * To avoid mess with doing here some sort of mq_receive we allow
315 * to read only queue size & notification info (the only values
316 * that are interesting from user point of view and aren't accessible
317 * through std routines)
319 static ssize_t
mqueue_read_file(struct file
*filp
, char __user
*u_data
,
320 size_t count
, loff_t
* off
)
322 struct mqueue_inode_info
*info
= MQUEUE_I(filp
->f_dentry
->d_inode
);
323 char buffer
[FILENT_SIZE
];
330 spin_lock(&info
->lock
);
331 snprintf(buffer
, sizeof(buffer
),
332 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
334 info
->notify_owner
? info
->notify
.sigev_notify
: 0,
335 (info
->notify_owner
&&
336 info
->notify
.sigev_notify
== SIGEV_SIGNAL
) ?
337 info
->notify
.sigev_signo
: 0,
339 spin_unlock(&info
->lock
);
340 buffer
[sizeof(buffer
)-1] = '\0';
341 slen
= strlen(buffer
)+1;
347 if (o
+ count
> slen
)
350 if (copy_to_user(u_data
, buffer
+ o
, count
))
354 filp
->f_dentry
->d_inode
->i_atime
= filp
->f_dentry
->d_inode
->i_ctime
= CURRENT_TIME
;
358 static int mqueue_flush_file(struct file
*filp
)
360 struct mqueue_inode_info
*info
= MQUEUE_I(filp
->f_dentry
->d_inode
);
362 spin_lock(&info
->lock
);
363 if (current
->tgid
== info
->notify_owner
)
364 remove_notification(info
);
366 spin_unlock(&info
->lock
);
370 static unsigned int mqueue_poll_file(struct file
*filp
, struct poll_table_struct
*poll_tab
)
372 struct mqueue_inode_info
*info
= MQUEUE_I(filp
->f_dentry
->d_inode
);
375 poll_wait(filp
, &info
->wait_q
, poll_tab
);
377 spin_lock(&info
->lock
);
378 if (info
->attr
.mq_curmsgs
)
379 retval
= POLLIN
| POLLRDNORM
;
381 if (info
->attr
.mq_curmsgs
< info
->attr
.mq_maxmsg
)
382 retval
|= POLLOUT
| POLLWRNORM
;
383 spin_unlock(&info
->lock
);
388 /* Adds current to info->e_wait_q[sr] before element with smaller prio */
389 static void wq_add(struct mqueue_inode_info
*info
, int sr
,
390 struct ext_wait_queue
*ewp
)
392 struct ext_wait_queue
*walk
;
396 list_for_each_entry(walk
, &info
->e_wait_q
[sr
].list
, list
) {
397 if (walk
->task
->static_prio
<= current
->static_prio
) {
398 list_add_tail(&ewp
->list
, &walk
->list
);
402 list_add_tail(&ewp
->list
, &info
->e_wait_q
[sr
].list
);
406 * Puts current task to sleep. Caller must hold queue lock. After return
410 static int wq_sleep(struct mqueue_inode_info
*info
, int sr
,
411 long timeout
, struct ext_wait_queue
*ewp
)
416 wq_add(info
, sr
, ewp
);
419 set_current_state(TASK_INTERRUPTIBLE
);
421 spin_unlock(&info
->lock
);
422 time
= schedule_timeout(timeout
);
424 while (ewp
->state
== STATE_PENDING
)
427 if (ewp
->state
== STATE_READY
) {
431 spin_lock(&info
->lock
);
432 if (ewp
->state
== STATE_READY
) {
436 if (signal_pending(current
)) {
437 retval
= -ERESTARTSYS
;
445 list_del(&ewp
->list
);
447 spin_unlock(&info
->lock
);
453 * Returns waiting task that should be serviced first or NULL if none exists
455 static struct ext_wait_queue
*wq_get_first_waiter(
456 struct mqueue_inode_info
*info
, int sr
)
458 struct list_head
*ptr
;
460 ptr
= info
->e_wait_q
[sr
].list
.prev
;
461 if (ptr
== &info
->e_wait_q
[sr
].list
)
463 return list_entry(ptr
, struct ext_wait_queue
, list
);
466 /* Auxiliary functions to manipulate messages' list */
467 static void msg_insert(struct msg_msg
*ptr
, struct mqueue_inode_info
*info
)
471 k
= info
->attr
.mq_curmsgs
- 1;
472 while (k
>= 0 && info
->messages
[k
]->m_type
>= ptr
->m_type
) {
473 info
->messages
[k
+ 1] = info
->messages
[k
];
476 info
->attr
.mq_curmsgs
++;
477 info
->qsize
+= ptr
->m_ts
;
478 info
->messages
[k
+ 1] = ptr
;
481 static inline struct msg_msg
*msg_get(struct mqueue_inode_info
*info
)
483 info
->qsize
-= info
->messages
[--info
->attr
.mq_curmsgs
]->m_ts
;
484 return info
->messages
[info
->attr
.mq_curmsgs
];
487 static inline void set_cookie(struct sk_buff
*skb
, char code
)
489 ((char*)skb
->data
)[NOTIFY_COOKIE_LEN
-1] = code
;
493 * The next function is only to split too long sys_mq_timedsend
495 static void __do_notify(struct mqueue_inode_info
*info
)
498 * invoked when there is registered process and there isn't process
499 * waiting synchronously for message AND state of queue changed from
500 * empty to not empty. Here we are sure that no one is waiting
502 if (info
->notify_owner
&&
503 info
->attr
.mq_curmsgs
== 1) {
504 struct siginfo sig_i
;
505 switch (info
->notify
.sigev_notify
) {
511 sig_i
.si_signo
= info
->notify
.sigev_signo
;
513 sig_i
.si_code
= SI_MESGQ
;
514 sig_i
.si_value
= info
->notify
.sigev_value
;
515 sig_i
.si_pid
= current
->tgid
;
516 sig_i
.si_uid
= current
->uid
;
518 kill_proc_info(info
->notify
.sigev_signo
,
519 &sig_i
, info
->notify_owner
);
522 set_cookie(info
->notify_cookie
, NOTIFY_WOKENUP
);
523 netlink_sendskb(info
->notify_sock
,
524 info
->notify_cookie
, 0);
527 /* after notification unregisters process */
528 info
->notify_owner
= 0;
530 wake_up(&info
->wait_q
);
533 static long prepare_timeout(const struct timespec __user
*u_arg
)
535 struct timespec ts
, nowts
;
539 if (unlikely(copy_from_user(&ts
, u_arg
,
540 sizeof(struct timespec
))))
543 if (unlikely(ts
.tv_nsec
< 0 || ts
.tv_sec
< 0
544 || ts
.tv_nsec
>= NSEC_PER_SEC
))
546 nowts
= CURRENT_TIME
;
547 /* first subtract as jiffies can't be too big */
548 ts
.tv_sec
-= nowts
.tv_sec
;
549 if (ts
.tv_nsec
< nowts
.tv_nsec
) {
550 ts
.tv_nsec
+= NSEC_PER_SEC
;
553 ts
.tv_nsec
-= nowts
.tv_nsec
;
557 timeout
= timespec_to_jiffies(&ts
) + 1;
559 return MAX_SCHEDULE_TIMEOUT
;
564 static void remove_notification(struct mqueue_inode_info
*info
)
566 if (info
->notify_owner
!= 0 &&
567 info
->notify
.sigev_notify
== SIGEV_THREAD
) {
568 set_cookie(info
->notify_cookie
, NOTIFY_REMOVED
);
569 netlink_sendskb(info
->notify_sock
, info
->notify_cookie
, 0);
571 info
->notify_owner
= 0;
574 static int mq_attr_ok(struct mq_attr
*attr
)
576 if (attr
->mq_maxmsg
<= 0 || attr
->mq_msgsize
<= 0)
578 if (capable(CAP_SYS_RESOURCE
)) {
579 if (attr
->mq_maxmsg
> HARD_MSGMAX
)
582 if (attr
->mq_maxmsg
> msg_max
||
583 attr
->mq_msgsize
> msgsize_max
)
586 /* check for overflow */
587 if (attr
->mq_msgsize
> ULONG_MAX
/attr
->mq_maxmsg
)
589 if ((unsigned long)(attr
->mq_maxmsg
* attr
->mq_msgsize
) +
590 (attr
->mq_maxmsg
* sizeof (struct msg_msg
*)) <
591 (unsigned long)(attr
->mq_maxmsg
* attr
->mq_msgsize
))
597 * Invoked when creating a new queue via sys_mq_open
599 static struct file
*do_create(struct dentry
*dir
, struct dentry
*dentry
,
600 int oflag
, mode_t mode
, struct mq_attr __user
*u_attr
)
607 if (copy_from_user(&attr
, u_attr
, sizeof(attr
)))
610 if (!mq_attr_ok(&attr
))
612 /* store for use during create */
613 dentry
->d_fsdata
= &attr
;
616 mode
&= ~current
->fs
->umask
;
617 ret
= vfs_create(dir
->d_inode
, dentry
, mode
, NULL
);
618 dentry
->d_fsdata
= NULL
;
622 return dentry_open(dentry
, mqueue_mnt
, oflag
);
630 /* Opens existing queue */
631 static struct file
*do_open(struct dentry
*dentry
, int oflag
)
633 static int oflag2acc
[O_ACCMODE
] = { MAY_READ
, MAY_WRITE
,
634 MAY_READ
| MAY_WRITE
};
636 if ((oflag
& O_ACCMODE
) == (O_RDWR
| O_WRONLY
)) {
639 return ERR_PTR(-EINVAL
);
642 if (permission(dentry
->d_inode
, oflag2acc
[oflag
& O_ACCMODE
], NULL
)) {
645 return ERR_PTR(-EACCES
);
648 return dentry_open(dentry
, mqueue_mnt
, oflag
);
651 asmlinkage
long sys_mq_open(const char __user
*u_name
, int oflag
, mode_t mode
,
652 struct mq_attr __user
*u_attr
)
654 struct dentry
*dentry
;
659 if (IS_ERR(name
= getname(u_name
)))
660 return PTR_ERR(name
);
662 fd
= get_unused_fd();
666 mutex_lock(&mqueue_mnt
->mnt_root
->d_inode
->i_mutex
);
667 dentry
= lookup_one_len(name
, mqueue_mnt
->mnt_root
, strlen(name
));
668 if (IS_ERR(dentry
)) {
669 error
= PTR_ERR(dentry
);
674 if (oflag
& O_CREAT
) {
675 if (dentry
->d_inode
) { /* entry already exists */
679 filp
= do_open(dentry
, oflag
);
681 filp
= do_create(mqueue_mnt
->mnt_root
, dentry
,
682 oflag
, mode
, u_attr
);
686 if (!dentry
->d_inode
)
688 filp
= do_open(dentry
, oflag
);
692 error
= PTR_ERR(filp
);
696 set_close_on_exec(fd
, 1);
697 fd_install(fd
, filp
);
708 mutex_unlock(&mqueue_mnt
->mnt_root
->d_inode
->i_mutex
);
714 asmlinkage
long sys_mq_unlink(const char __user
*u_name
)
718 struct dentry
*dentry
;
719 struct inode
*inode
= NULL
;
721 name
= getname(u_name
);
723 return PTR_ERR(name
);
725 mutex_lock(&mqueue_mnt
->mnt_root
->d_inode
->i_mutex
);
726 dentry
= lookup_one_len(name
, mqueue_mnt
->mnt_root
, strlen(name
));
727 if (IS_ERR(dentry
)) {
728 err
= PTR_ERR(dentry
);
732 if (!dentry
->d_inode
) {
737 inode
= dentry
->d_inode
;
739 atomic_inc(&inode
->i_count
);
741 err
= vfs_unlink(dentry
->d_parent
->d_inode
, dentry
);
746 mutex_unlock(&mqueue_mnt
->mnt_root
->d_inode
->i_mutex
);
754 /* Pipelined send and receive functions.
756 * If a receiver finds no waiting message, then it registers itself in the
757 * list of waiting receivers. A sender checks that list before adding the new
758 * message into the message array. If there is a waiting receiver, then it
759 * bypasses the message array and directly hands the message over to the
761 * The receiver accepts the message and returns without grabbing the queue
762 * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers
763 * are necessary. The same algorithm is used for sysv semaphores, see
764 * ipc/sem.c fore more details.
766 * The same algorithm is used for senders.
769 /* pipelined_send() - send a message directly to the task waiting in
770 * sys_mq_timedreceive() (without inserting message into a queue).
772 static inline void pipelined_send(struct mqueue_inode_info
*info
,
773 struct msg_msg
*message
,
774 struct ext_wait_queue
*receiver
)
776 receiver
->msg
= message
;
777 list_del(&receiver
->list
);
778 receiver
->state
= STATE_PENDING
;
779 wake_up_process(receiver
->task
);
781 receiver
->state
= STATE_READY
;
784 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
785 * gets its message and put to the queue (we have one free place for sure). */
786 static inline void pipelined_receive(struct mqueue_inode_info
*info
)
788 struct ext_wait_queue
*sender
= wq_get_first_waiter(info
, SEND
);
792 wake_up_interruptible(&info
->wait_q
);
795 msg_insert(sender
->msg
, info
);
796 list_del(&sender
->list
);
797 sender
->state
= STATE_PENDING
;
798 wake_up_process(sender
->task
);
800 sender
->state
= STATE_READY
;
803 asmlinkage
long sys_mq_timedsend(mqd_t mqdes
, const char __user
*u_msg_ptr
,
804 size_t msg_len
, unsigned int msg_prio
,
805 const struct timespec __user
*u_abs_timeout
)
809 struct ext_wait_queue wait
;
810 struct ext_wait_queue
*receiver
;
811 struct msg_msg
*msg_ptr
;
812 struct mqueue_inode_info
*info
;
816 if (unlikely(msg_prio
>= (unsigned long) MQ_PRIO_MAX
))
819 timeout
= prepare_timeout(u_abs_timeout
);
826 inode
= filp
->f_dentry
->d_inode
;
827 if (unlikely(filp
->f_op
!= &mqueue_file_operations
))
829 info
= MQUEUE_I(inode
);
831 if (unlikely(!(filp
->f_mode
& FMODE_WRITE
)))
834 if (unlikely(msg_len
> info
->attr
.mq_msgsize
)) {
839 /* First try to allocate memory, before doing anything with
840 * existing queues. */
841 msg_ptr
= load_msg(u_msg_ptr
, msg_len
);
842 if (IS_ERR(msg_ptr
)) {
843 ret
= PTR_ERR(msg_ptr
);
846 msg_ptr
->m_ts
= msg_len
;
847 msg_ptr
->m_type
= msg_prio
;
849 spin_lock(&info
->lock
);
851 if (info
->attr
.mq_curmsgs
== info
->attr
.mq_maxmsg
) {
852 if (filp
->f_flags
& O_NONBLOCK
) {
853 spin_unlock(&info
->lock
);
855 } else if (unlikely(timeout
< 0)) {
856 spin_unlock(&info
->lock
);
860 wait
.msg
= (void *) msg_ptr
;
861 wait
.state
= STATE_NONE
;
862 ret
= wq_sleep(info
, SEND
, timeout
, &wait
);
867 receiver
= wq_get_first_waiter(info
, RECV
);
869 pipelined_send(info
, msg_ptr
, receiver
);
871 /* adds message to the queue */
872 msg_insert(msg_ptr
, info
);
875 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
=
877 spin_unlock(&info
->lock
);
886 asmlinkage ssize_t
sys_mq_timedreceive(mqd_t mqdes
, char __user
*u_msg_ptr
,
887 size_t msg_len
, unsigned int __user
*u_msg_prio
,
888 const struct timespec __user
*u_abs_timeout
)
892 struct msg_msg
*msg_ptr
;
895 struct mqueue_inode_info
*info
;
896 struct ext_wait_queue wait
;
898 timeout
= prepare_timeout(u_abs_timeout
);
905 inode
= filp
->f_dentry
->d_inode
;
906 if (unlikely(filp
->f_op
!= &mqueue_file_operations
))
908 info
= MQUEUE_I(inode
);
910 if (unlikely(!(filp
->f_mode
& FMODE_READ
)))
913 /* checks if buffer is big enough */
914 if (unlikely(msg_len
< info
->attr
.mq_msgsize
)) {
919 spin_lock(&info
->lock
);
920 if (info
->attr
.mq_curmsgs
== 0) {
921 if (filp
->f_flags
& O_NONBLOCK
) {
922 spin_unlock(&info
->lock
);
925 } else if (unlikely(timeout
< 0)) {
926 spin_unlock(&info
->lock
);
931 wait
.state
= STATE_NONE
;
932 ret
= wq_sleep(info
, RECV
, timeout
, &wait
);
936 msg_ptr
= msg_get(info
);
938 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
=
941 /* There is now free space in queue. */
942 pipelined_receive(info
);
943 spin_unlock(&info
->lock
);
949 if ((u_msg_prio
&& put_user(msg_ptr
->m_type
, u_msg_prio
)) ||
950 store_msg(u_msg_ptr
, msg_ptr
, msg_ptr
->m_ts
)) {
962 * Notes: the case when user wants us to deregister (with NULL as pointer)
963 * and he isn't currently owner of notification, will be silently discarded.
964 * It isn't explicitly defined in the POSIX.
966 asmlinkage
long sys_mq_notify(mqd_t mqdes
,
967 const struct sigevent __user
*u_notification
)
973 struct sigevent notification
;
974 struct mqueue_inode_info
*info
;
979 if (u_notification
!= NULL
) {
980 if (copy_from_user(¬ification
, u_notification
,
981 sizeof(struct sigevent
)))
984 if (unlikely(notification
.sigev_notify
!= SIGEV_NONE
&&
985 notification
.sigev_notify
!= SIGEV_SIGNAL
&&
986 notification
.sigev_notify
!= SIGEV_THREAD
))
988 if (notification
.sigev_notify
== SIGEV_SIGNAL
&&
989 !valid_signal(notification
.sigev_signo
)) {
992 if (notification
.sigev_notify
== SIGEV_THREAD
) {
993 /* create the notify skb */
994 nc
= alloc_skb(NOTIFY_COOKIE_LEN
, GFP_KERNEL
);
999 if (copy_from_user(nc
->data
,
1000 notification
.sigev_value
.sival_ptr
,
1001 NOTIFY_COOKIE_LEN
)) {
1005 /* TODO: add a header? */
1006 skb_put(nc
, NOTIFY_COOKIE_LEN
);
1007 /* and attach it to the socket */
1009 filp
= fget(notification
.sigev_signo
);
1013 sock
= netlink_getsockbyfilp(filp
);
1016 ret
= PTR_ERR(sock
);
1021 ret
= netlink_attachskb(sock
, nc
, 0,
1022 MAX_SCHEDULE_TIMEOUT
, NULL
);
1038 inode
= filp
->f_dentry
->d_inode
;
1039 if (unlikely(filp
->f_op
!= &mqueue_file_operations
))
1041 info
= MQUEUE_I(inode
);
1044 spin_lock(&info
->lock
);
1045 if (u_notification
== NULL
) {
1046 if (info
->notify_owner
== current
->tgid
) {
1047 remove_notification(info
);
1048 inode
->i_atime
= inode
->i_ctime
= CURRENT_TIME
;
1050 } else if (info
->notify_owner
!= 0) {
1053 switch (notification
.sigev_notify
) {
1055 info
->notify
.sigev_notify
= SIGEV_NONE
;
1058 info
->notify_sock
= sock
;
1059 info
->notify_cookie
= nc
;
1062 info
->notify
.sigev_notify
= SIGEV_THREAD
;
1065 info
->notify
.sigev_signo
= notification
.sigev_signo
;
1066 info
->notify
.sigev_value
= notification
.sigev_value
;
1067 info
->notify
.sigev_notify
= SIGEV_SIGNAL
;
1070 info
->notify_owner
= current
->tgid
;
1071 inode
->i_atime
= inode
->i_ctime
= CURRENT_TIME
;
1073 spin_unlock(&info
->lock
);
1078 netlink_detachskb(sock
, nc
);
1085 asmlinkage
long sys_mq_getsetattr(mqd_t mqdes
,
1086 const struct mq_attr __user
*u_mqstat
,
1087 struct mq_attr __user
*u_omqstat
)
1090 struct mq_attr mqstat
, omqstat
;
1092 struct inode
*inode
;
1093 struct mqueue_inode_info
*info
;
1095 if (u_mqstat
!= NULL
) {
1096 if (copy_from_user(&mqstat
, u_mqstat
, sizeof(struct mq_attr
)))
1098 if (mqstat
.mq_flags
& (~O_NONBLOCK
))
1107 inode
= filp
->f_dentry
->d_inode
;
1108 if (unlikely(filp
->f_op
!= &mqueue_file_operations
))
1110 info
= MQUEUE_I(inode
);
1112 spin_lock(&info
->lock
);
1114 omqstat
= info
->attr
;
1115 omqstat
.mq_flags
= filp
->f_flags
& O_NONBLOCK
;
1117 if (mqstat
.mq_flags
& O_NONBLOCK
)
1118 filp
->f_flags
|= O_NONBLOCK
;
1120 filp
->f_flags
&= ~O_NONBLOCK
;
1122 inode
->i_atime
= inode
->i_ctime
= CURRENT_TIME
;
1125 spin_unlock(&info
->lock
);
1128 if (u_omqstat
!= NULL
&& copy_to_user(u_omqstat
, &omqstat
,
1129 sizeof(struct mq_attr
)))
1138 static struct inode_operations mqueue_dir_inode_operations
= {
1139 .lookup
= simple_lookup
,
1140 .create
= mqueue_create
,
1141 .unlink
= mqueue_unlink
,
1144 static struct file_operations mqueue_file_operations
= {
1145 .flush
= mqueue_flush_file
,
1146 .poll
= mqueue_poll_file
,
1147 .read
= mqueue_read_file
,
1150 static struct super_operations mqueue_super_ops
= {
1151 .alloc_inode
= mqueue_alloc_inode
,
1152 .destroy_inode
= mqueue_destroy_inode
,
1153 .statfs
= simple_statfs
,
1154 .delete_inode
= mqueue_delete_inode
,
1155 .drop_inode
= generic_delete_inode
,
1158 static struct file_system_type mqueue_fs_type
= {
1160 .get_sb
= mqueue_get_sb
,
1161 .kill_sb
= kill_litter_super
,
1164 static int msg_max_limit_min
= DFLT_MSGMAX
;
1165 static int msg_max_limit_max
= HARD_MSGMAX
;
1167 static int msg_maxsize_limit_min
= DFLT_MSGSIZEMAX
;
1168 static int msg_maxsize_limit_max
= INT_MAX
;
1170 static ctl_table mq_sysctls
[] = {
1172 .ctl_name
= CTL_QUEUESMAX
,
1173 .procname
= "queues_max",
1174 .data
= &queues_max
,
1175 .maxlen
= sizeof(int),
1177 .proc_handler
= &proc_dointvec
,
1180 .ctl_name
= CTL_MSGMAX
,
1181 .procname
= "msg_max",
1183 .maxlen
= sizeof(int),
1185 .proc_handler
= &proc_dointvec_minmax
,
1186 .extra1
= &msg_max_limit_min
,
1187 .extra2
= &msg_max_limit_max
,
1190 .ctl_name
= CTL_MSGSIZEMAX
,
1191 .procname
= "msgsize_max",
1192 .data
= &msgsize_max
,
1193 .maxlen
= sizeof(int),
1195 .proc_handler
= &proc_dointvec_minmax
,
1196 .extra1
= &msg_maxsize_limit_min
,
1197 .extra2
= &msg_maxsize_limit_max
,
1202 static ctl_table mq_sysctl_dir
[] = {
1204 .ctl_name
= FS_MQUEUE
,
1205 .procname
= "mqueue",
1207 .child
= mq_sysctls
,
1212 static ctl_table mq_sysctl_root
[] = {
1217 .child
= mq_sysctl_dir
,
1222 static int __init
init_mqueue_fs(void)
1226 mqueue_inode_cachep
= kmem_cache_create("mqueue_inode_cache",
1227 sizeof(struct mqueue_inode_info
), 0,
1228 SLAB_HWCACHE_ALIGN
, init_once
, NULL
);
1229 if (mqueue_inode_cachep
== NULL
)
1232 /* ignore failues - they are not fatal */
1233 mq_sysctl_table
= register_sysctl_table(mq_sysctl_root
, 0);
1235 error
= register_filesystem(&mqueue_fs_type
);
1239 if (IS_ERR(mqueue_mnt
= kern_mount(&mqueue_fs_type
))) {
1240 error
= PTR_ERR(mqueue_mnt
);
1241 goto out_filesystem
;
1244 /* internal initialization - not common for vfs */
1246 spin_lock_init(&mq_lock
);
1251 unregister_filesystem(&mqueue_fs_type
);
1253 if (mq_sysctl_table
)
1254 unregister_sysctl_table(mq_sysctl_table
);
1255 if (kmem_cache_destroy(mqueue_inode_cachep
)) {
1257 "mqueue_inode_cache: not all structures were freed\n");
1262 __initcall(init_mqueue_fs
);