[PATCH] x86_64: Report local APIC ID when initializing CPU
[linux-2.6/zen-sources.git] / ipc / mqueue.c
blob85c52fd26bff0a612fb14f4574e9fc5d8d2246cd
1 /*
2 * POSIX message queues filesystem for Linux.
4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
5 * Michal Wronski (Michal.Wronski@motorola.com)
7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
9 * Manfred Spraul (manfred@colorfullife.com)
11 * This file is released under the GPL.
14 #include <linux/capability.h>
15 #include <linux/init.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/mount.h>
19 #include <linux/namei.h>
20 #include <linux/sysctl.h>
21 #include <linux/poll.h>
22 #include <linux/mqueue.h>
23 #include <linux/msg.h>
24 #include <linux/skbuff.h>
25 #include <linux/netlink.h>
26 #include <linux/syscalls.h>
27 #include <linux/signal.h>
28 #include <net/sock.h>
29 #include "util.h"
31 #define MQUEUE_MAGIC 0x19800202
32 #define DIRENT_SIZE 20
33 #define FILENT_SIZE 80
35 #define SEND 0
36 #define RECV 1
38 #define STATE_NONE 0
39 #define STATE_PENDING 1
40 #define STATE_READY 2
42 /* used by sysctl */
43 #define FS_MQUEUE 1
44 #define CTL_QUEUESMAX 2
45 #define CTL_MSGMAX 3
46 #define CTL_MSGSIZEMAX 4
48 /* default values */
49 #define DFLT_QUEUESMAX 256 /* max number of message queues */
50 #define DFLT_MSGMAX 10 /* max number of messages in each queue */
51 #define HARD_MSGMAX (131072/sizeof(void*))
52 #define DFLT_MSGSIZEMAX 8192 /* max message size */
55 struct ext_wait_queue { /* queue of sleeping tasks */
56 struct task_struct *task;
57 struct list_head list;
58 struct msg_msg *msg; /* ptr of loaded message */
59 int state; /* one of STATE_* values */
62 struct mqueue_inode_info {
63 spinlock_t lock;
64 struct inode vfs_inode;
65 wait_queue_head_t wait_q;
67 struct msg_msg **messages;
68 struct mq_attr attr;
70 struct sigevent notify;
71 pid_t notify_owner;
72 struct user_struct *user; /* user who created, for accounting */
73 struct sock *notify_sock;
74 struct sk_buff *notify_cookie;
76 /* for tasks waiting for free space and messages, respectively */
77 struct ext_wait_queue e_wait_q[2];
79 unsigned long qsize; /* size of queue in memory (sum of all msgs) */
82 static struct inode_operations mqueue_dir_inode_operations;
83 static struct file_operations mqueue_file_operations;
84 static struct super_operations mqueue_super_ops;
85 static void remove_notification(struct mqueue_inode_info *info);
87 static spinlock_t mq_lock;
88 static kmem_cache_t *mqueue_inode_cachep;
89 static struct vfsmount *mqueue_mnt;
91 static unsigned int queues_count;
92 static unsigned int queues_max = DFLT_QUEUESMAX;
93 static unsigned int msg_max = DFLT_MSGMAX;
94 static unsigned int msgsize_max = DFLT_MSGSIZEMAX;
96 static struct ctl_table_header * mq_sysctl_table;
98 static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
100 return container_of(inode, struct mqueue_inode_info, vfs_inode);
103 static struct inode *mqueue_get_inode(struct super_block *sb, int mode,
104 struct mq_attr *attr)
106 struct inode *inode;
108 inode = new_inode(sb);
109 if (inode) {
110 inode->i_mode = mode;
111 inode->i_uid = current->fsuid;
112 inode->i_gid = current->fsgid;
113 inode->i_blksize = PAGE_CACHE_SIZE;
114 inode->i_blocks = 0;
115 inode->i_mtime = inode->i_ctime = inode->i_atime =
116 CURRENT_TIME;
118 if (S_ISREG(mode)) {
119 struct mqueue_inode_info *info;
120 struct task_struct *p = current;
121 struct user_struct *u = p->user;
122 unsigned long mq_bytes, mq_msg_tblsz;
124 inode->i_fop = &mqueue_file_operations;
125 inode->i_size = FILENT_SIZE;
126 /* mqueue specific info */
127 info = MQUEUE_I(inode);
128 spin_lock_init(&info->lock);
129 init_waitqueue_head(&info->wait_q);
130 INIT_LIST_HEAD(&info->e_wait_q[0].list);
131 INIT_LIST_HEAD(&info->e_wait_q[1].list);
132 info->messages = NULL;
133 info->notify_owner = 0;
134 info->qsize = 0;
135 info->user = NULL; /* set when all is ok */
136 memset(&info->attr, 0, sizeof(info->attr));
137 info->attr.mq_maxmsg = DFLT_MSGMAX;
138 info->attr.mq_msgsize = DFLT_MSGSIZEMAX;
139 if (attr) {
140 info->attr.mq_maxmsg = attr->mq_maxmsg;
141 info->attr.mq_msgsize = attr->mq_msgsize;
143 mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *);
144 mq_bytes = (mq_msg_tblsz +
145 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
147 spin_lock(&mq_lock);
148 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
149 u->mq_bytes + mq_bytes >
150 p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur) {
151 spin_unlock(&mq_lock);
152 goto out_inode;
154 u->mq_bytes += mq_bytes;
155 spin_unlock(&mq_lock);
157 info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
158 if (!info->messages) {
159 spin_lock(&mq_lock);
160 u->mq_bytes -= mq_bytes;
161 spin_unlock(&mq_lock);
162 goto out_inode;
164 /* all is ok */
165 info->user = get_uid(u);
166 } else if (S_ISDIR(mode)) {
167 inode->i_nlink++;
168 /* Some things misbehave if size == 0 on a directory */
169 inode->i_size = 2 * DIRENT_SIZE;
170 inode->i_op = &mqueue_dir_inode_operations;
171 inode->i_fop = &simple_dir_operations;
174 return inode;
175 out_inode:
176 make_bad_inode(inode);
177 iput(inode);
178 return NULL;
181 static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
183 struct inode *inode;
185 sb->s_blocksize = PAGE_CACHE_SIZE;
186 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
187 sb->s_magic = MQUEUE_MAGIC;
188 sb->s_op = &mqueue_super_ops;
190 inode = mqueue_get_inode(sb, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
191 if (!inode)
192 return -ENOMEM;
194 sb->s_root = d_alloc_root(inode);
195 if (!sb->s_root) {
196 iput(inode);
197 return -ENOMEM;
200 return 0;
203 static struct super_block *mqueue_get_sb(struct file_system_type *fs_type,
204 int flags, const char *dev_name,
205 void *data)
207 return get_sb_single(fs_type, flags, data, mqueue_fill_super);
210 static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags)
212 struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
214 if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
215 SLAB_CTOR_CONSTRUCTOR)
216 inode_init_once(&p->vfs_inode);
219 static struct inode *mqueue_alloc_inode(struct super_block *sb)
221 struct mqueue_inode_info *ei;
223 ei = kmem_cache_alloc(mqueue_inode_cachep, SLAB_KERNEL);
224 if (!ei)
225 return NULL;
226 return &ei->vfs_inode;
229 static void mqueue_destroy_inode(struct inode *inode)
231 kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
234 static void mqueue_delete_inode(struct inode *inode)
236 struct mqueue_inode_info *info;
237 struct user_struct *user;
238 unsigned long mq_bytes;
239 int i;
241 if (S_ISDIR(inode->i_mode)) {
242 clear_inode(inode);
243 return;
245 info = MQUEUE_I(inode);
246 spin_lock(&info->lock);
247 for (i = 0; i < info->attr.mq_curmsgs; i++)
248 free_msg(info->messages[i]);
249 kfree(info->messages);
250 spin_unlock(&info->lock);
252 clear_inode(inode);
254 mq_bytes = (info->attr.mq_maxmsg * sizeof(struct msg_msg *) +
255 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
256 user = info->user;
257 if (user) {
258 spin_lock(&mq_lock);
259 user->mq_bytes -= mq_bytes;
260 queues_count--;
261 spin_unlock(&mq_lock);
262 free_uid(user);
266 static int mqueue_create(struct inode *dir, struct dentry *dentry,
267 int mode, struct nameidata *nd)
269 struct inode *inode;
270 struct mq_attr *attr = dentry->d_fsdata;
271 int error;
273 spin_lock(&mq_lock);
274 if (queues_count >= queues_max && !capable(CAP_SYS_RESOURCE)) {
275 error = -ENOSPC;
276 goto out_lock;
278 queues_count++;
279 spin_unlock(&mq_lock);
281 inode = mqueue_get_inode(dir->i_sb, mode, attr);
282 if (!inode) {
283 error = -ENOMEM;
284 spin_lock(&mq_lock);
285 queues_count--;
286 goto out_lock;
289 dir->i_size += DIRENT_SIZE;
290 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
292 d_instantiate(dentry, inode);
293 dget(dentry);
294 return 0;
295 out_lock:
296 spin_unlock(&mq_lock);
297 return error;
300 static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
302 struct inode *inode = dentry->d_inode;
304 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
305 dir->i_size -= DIRENT_SIZE;
306 inode->i_nlink--;
307 dput(dentry);
308 return 0;
312 * This is routine for system read from queue file.
313 * To avoid mess with doing here some sort of mq_receive we allow
314 * to read only queue size & notification info (the only values
315 * that are interesting from user point of view and aren't accessible
316 * through std routines)
318 static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
319 size_t count, loff_t * off)
321 struct mqueue_inode_info *info = MQUEUE_I(filp->f_dentry->d_inode);
322 char buffer[FILENT_SIZE];
323 size_t slen;
324 loff_t o;
326 if (!count)
327 return 0;
329 spin_lock(&info->lock);
330 snprintf(buffer, sizeof(buffer),
331 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
332 info->qsize,
333 info->notify_owner ? info->notify.sigev_notify : 0,
334 (info->notify_owner &&
335 info->notify.sigev_notify == SIGEV_SIGNAL) ?
336 info->notify.sigev_signo : 0,
337 info->notify_owner);
338 spin_unlock(&info->lock);
339 buffer[sizeof(buffer)-1] = '\0';
340 slen = strlen(buffer)+1;
342 o = *off;
343 if (o > slen)
344 return 0;
346 if (o + count > slen)
347 count = slen - o;
349 if (copy_to_user(u_data, buffer + o, count))
350 return -EFAULT;
352 *off = o + count;
353 filp->f_dentry->d_inode->i_atime = filp->f_dentry->d_inode->i_ctime = CURRENT_TIME;
354 return count;
357 static int mqueue_flush_file(struct file *filp)
359 struct mqueue_inode_info *info = MQUEUE_I(filp->f_dentry->d_inode);
361 spin_lock(&info->lock);
362 if (current->tgid == info->notify_owner)
363 remove_notification(info);
365 spin_unlock(&info->lock);
366 return 0;
369 static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
371 struct mqueue_inode_info *info = MQUEUE_I(filp->f_dentry->d_inode);
372 int retval = 0;
374 poll_wait(filp, &info->wait_q, poll_tab);
376 spin_lock(&info->lock);
377 if (info->attr.mq_curmsgs)
378 retval = POLLIN | POLLRDNORM;
380 if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
381 retval |= POLLOUT | POLLWRNORM;
382 spin_unlock(&info->lock);
384 return retval;
387 /* Adds current to info->e_wait_q[sr] before element with smaller prio */
388 static void wq_add(struct mqueue_inode_info *info, int sr,
389 struct ext_wait_queue *ewp)
391 struct ext_wait_queue *walk;
393 ewp->task = current;
395 list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
396 if (walk->task->static_prio <= current->static_prio) {
397 list_add_tail(&ewp->list, &walk->list);
398 return;
401 list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
405 * Puts current task to sleep. Caller must hold queue lock. After return
406 * lock isn't held.
407 * sr: SEND or RECV
409 static int wq_sleep(struct mqueue_inode_info *info, int sr,
410 long timeout, struct ext_wait_queue *ewp)
412 int retval;
413 signed long time;
415 wq_add(info, sr, ewp);
417 for (;;) {
418 set_current_state(TASK_INTERRUPTIBLE);
420 spin_unlock(&info->lock);
421 time = schedule_timeout(timeout);
423 while (ewp->state == STATE_PENDING)
424 cpu_relax();
426 if (ewp->state == STATE_READY) {
427 retval = 0;
428 goto out;
430 spin_lock(&info->lock);
431 if (ewp->state == STATE_READY) {
432 retval = 0;
433 goto out_unlock;
435 if (signal_pending(current)) {
436 retval = -ERESTARTSYS;
437 break;
439 if (time == 0) {
440 retval = -ETIMEDOUT;
441 break;
444 list_del(&ewp->list);
445 out_unlock:
446 spin_unlock(&info->lock);
447 out:
448 return retval;
452 * Returns waiting task that should be serviced first or NULL if none exists
454 static struct ext_wait_queue *wq_get_first_waiter(
455 struct mqueue_inode_info *info, int sr)
457 struct list_head *ptr;
459 ptr = info->e_wait_q[sr].list.prev;
460 if (ptr == &info->e_wait_q[sr].list)
461 return NULL;
462 return list_entry(ptr, struct ext_wait_queue, list);
465 /* Auxiliary functions to manipulate messages' list */
466 static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info)
468 int k;
470 k = info->attr.mq_curmsgs - 1;
471 while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) {
472 info->messages[k + 1] = info->messages[k];
473 k--;
475 info->attr.mq_curmsgs++;
476 info->qsize += ptr->m_ts;
477 info->messages[k + 1] = ptr;
480 static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
482 info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts;
483 return info->messages[info->attr.mq_curmsgs];
486 static inline void set_cookie(struct sk_buff *skb, char code)
488 ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
492 * The next function is only to split too long sys_mq_timedsend
494 static void __do_notify(struct mqueue_inode_info *info)
496 /* notification
497 * invoked when there is registered process and there isn't process
498 * waiting synchronously for message AND state of queue changed from
499 * empty to not empty. Here we are sure that no one is waiting
500 * synchronously. */
501 if (info->notify_owner &&
502 info->attr.mq_curmsgs == 1) {
503 struct siginfo sig_i;
504 switch (info->notify.sigev_notify) {
505 case SIGEV_NONE:
506 break;
507 case SIGEV_SIGNAL:
508 /* sends signal */
510 sig_i.si_signo = info->notify.sigev_signo;
511 sig_i.si_errno = 0;
512 sig_i.si_code = SI_MESGQ;
513 sig_i.si_value = info->notify.sigev_value;
514 sig_i.si_pid = current->tgid;
515 sig_i.si_uid = current->uid;
517 kill_proc_info(info->notify.sigev_signo,
518 &sig_i, info->notify_owner);
519 break;
520 case SIGEV_THREAD:
521 set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
522 netlink_sendskb(info->notify_sock,
523 info->notify_cookie, 0);
524 break;
526 /* after notification unregisters process */
527 info->notify_owner = 0;
529 wake_up(&info->wait_q);
532 static long prepare_timeout(const struct timespec __user *u_arg)
534 struct timespec ts, nowts;
535 long timeout;
537 if (u_arg) {
538 if (unlikely(copy_from_user(&ts, u_arg,
539 sizeof(struct timespec))))
540 return -EFAULT;
542 if (unlikely(ts.tv_nsec < 0 || ts.tv_sec < 0
543 || ts.tv_nsec >= NSEC_PER_SEC))
544 return -EINVAL;
545 nowts = CURRENT_TIME;
546 /* first subtract as jiffies can't be too big */
547 ts.tv_sec -= nowts.tv_sec;
548 if (ts.tv_nsec < nowts.tv_nsec) {
549 ts.tv_nsec += NSEC_PER_SEC;
550 ts.tv_sec--;
552 ts.tv_nsec -= nowts.tv_nsec;
553 if (ts.tv_sec < 0)
554 return 0;
556 timeout = timespec_to_jiffies(&ts) + 1;
557 } else
558 return MAX_SCHEDULE_TIMEOUT;
560 return timeout;
563 static void remove_notification(struct mqueue_inode_info *info)
565 if (info->notify_owner != 0 &&
566 info->notify.sigev_notify == SIGEV_THREAD) {
567 set_cookie(info->notify_cookie, NOTIFY_REMOVED);
568 netlink_sendskb(info->notify_sock, info->notify_cookie, 0);
570 info->notify_owner = 0;
573 static int mq_attr_ok(struct mq_attr *attr)
575 if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
576 return 0;
577 if (capable(CAP_SYS_RESOURCE)) {
578 if (attr->mq_maxmsg > HARD_MSGMAX)
579 return 0;
580 } else {
581 if (attr->mq_maxmsg > msg_max ||
582 attr->mq_msgsize > msgsize_max)
583 return 0;
585 /* check for overflow */
586 if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
587 return 0;
588 if ((unsigned long)(attr->mq_maxmsg * attr->mq_msgsize) +
589 (attr->mq_maxmsg * sizeof (struct msg_msg *)) <
590 (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize))
591 return 0;
592 return 1;
596 * Invoked when creating a new queue via sys_mq_open
598 static struct file *do_create(struct dentry *dir, struct dentry *dentry,
599 int oflag, mode_t mode, struct mq_attr __user *u_attr)
601 struct mq_attr attr;
602 int ret;
604 if (u_attr) {
605 ret = -EFAULT;
606 if (copy_from_user(&attr, u_attr, sizeof(attr)))
607 goto out;
608 ret = -EINVAL;
609 if (!mq_attr_ok(&attr))
610 goto out;
611 /* store for use during create */
612 dentry->d_fsdata = &attr;
615 mode &= ~current->fs->umask;
616 ret = vfs_create(dir->d_inode, dentry, mode, NULL);
617 dentry->d_fsdata = NULL;
618 if (ret)
619 goto out;
621 return dentry_open(dentry, mqueue_mnt, oflag);
623 out:
624 dput(dentry);
625 mntput(mqueue_mnt);
626 return ERR_PTR(ret);
629 /* Opens existing queue */
630 static struct file *do_open(struct dentry *dentry, int oflag)
632 static int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
633 MAY_READ | MAY_WRITE };
635 if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) {
636 dput(dentry);
637 mntput(mqueue_mnt);
638 return ERR_PTR(-EINVAL);
641 if (permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE], NULL)) {
642 dput(dentry);
643 mntput(mqueue_mnt);
644 return ERR_PTR(-EACCES);
647 return dentry_open(dentry, mqueue_mnt, oflag);
650 asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode,
651 struct mq_attr __user *u_attr)
653 struct dentry *dentry;
654 struct file *filp;
655 char *name;
656 int fd, error;
658 if (IS_ERR(name = getname(u_name)))
659 return PTR_ERR(name);
661 fd = get_unused_fd();
662 if (fd < 0)
663 goto out_putname;
665 mutex_lock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
666 dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name));
667 if (IS_ERR(dentry)) {
668 error = PTR_ERR(dentry);
669 goto out_err;
671 mntget(mqueue_mnt);
673 if (oflag & O_CREAT) {
674 if (dentry->d_inode) { /* entry already exists */
675 error = -EEXIST;
676 if (oflag & O_EXCL)
677 goto out;
678 filp = do_open(dentry, oflag);
679 } else {
680 filp = do_create(mqueue_mnt->mnt_root, dentry,
681 oflag, mode, u_attr);
683 } else {
684 error = -ENOENT;
685 if (!dentry->d_inode)
686 goto out;
687 filp = do_open(dentry, oflag);
690 if (IS_ERR(filp)) {
691 error = PTR_ERR(filp);
692 goto out_putfd;
695 set_close_on_exec(fd, 1);
696 fd_install(fd, filp);
697 goto out_upsem;
699 out:
700 dput(dentry);
701 mntput(mqueue_mnt);
702 out_putfd:
703 put_unused_fd(fd);
704 out_err:
705 fd = error;
706 out_upsem:
707 mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
708 out_putname:
709 putname(name);
710 return fd;
713 asmlinkage long sys_mq_unlink(const char __user *u_name)
715 int err;
716 char *name;
717 struct dentry *dentry;
718 struct inode *inode = NULL;
720 name = getname(u_name);
721 if (IS_ERR(name))
722 return PTR_ERR(name);
724 mutex_lock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
725 dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name));
726 if (IS_ERR(dentry)) {
727 err = PTR_ERR(dentry);
728 goto out_unlock;
731 if (!dentry->d_inode) {
732 err = -ENOENT;
733 goto out_err;
736 inode = dentry->d_inode;
737 if (inode)
738 atomic_inc(&inode->i_count);
740 err = vfs_unlink(dentry->d_parent->d_inode, dentry);
741 out_err:
742 dput(dentry);
744 out_unlock:
745 mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex);
746 putname(name);
747 if (inode)
748 iput(inode);
750 return err;
753 /* Pipelined send and receive functions.
755 * If a receiver finds no waiting message, then it registers itself in the
756 * list of waiting receivers. A sender checks that list before adding the new
757 * message into the message array. If there is a waiting receiver, then it
758 * bypasses the message array and directly hands the message over to the
759 * receiver.
760 * The receiver accepts the message and returns without grabbing the queue
761 * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers
762 * are necessary. The same algorithm is used for sysv semaphores, see
763 * ipc/sem.c fore more details.
765 * The same algorithm is used for senders.
768 /* pipelined_send() - send a message directly to the task waiting in
769 * sys_mq_timedreceive() (without inserting message into a queue).
771 static inline void pipelined_send(struct mqueue_inode_info *info,
772 struct msg_msg *message,
773 struct ext_wait_queue *receiver)
775 receiver->msg = message;
776 list_del(&receiver->list);
777 receiver->state = STATE_PENDING;
778 wake_up_process(receiver->task);
779 smp_wmb();
780 receiver->state = STATE_READY;
783 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
784 * gets its message and put to the queue (we have one free place for sure). */
785 static inline void pipelined_receive(struct mqueue_inode_info *info)
787 struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
789 if (!sender) {
790 /* for poll */
791 wake_up_interruptible(&info->wait_q);
792 return;
794 msg_insert(sender->msg, info);
795 list_del(&sender->list);
796 sender->state = STATE_PENDING;
797 wake_up_process(sender->task);
798 smp_wmb();
799 sender->state = STATE_READY;
802 asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
803 size_t msg_len, unsigned int msg_prio,
804 const struct timespec __user *u_abs_timeout)
806 struct file *filp;
807 struct inode *inode;
808 struct ext_wait_queue wait;
809 struct ext_wait_queue *receiver;
810 struct msg_msg *msg_ptr;
811 struct mqueue_inode_info *info;
812 long timeout;
813 int ret;
815 if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
816 return -EINVAL;
818 timeout = prepare_timeout(u_abs_timeout);
820 ret = -EBADF;
821 filp = fget(mqdes);
822 if (unlikely(!filp))
823 goto out;
825 inode = filp->f_dentry->d_inode;
826 if (unlikely(filp->f_op != &mqueue_file_operations))
827 goto out_fput;
828 info = MQUEUE_I(inode);
830 if (unlikely(!(filp->f_mode & FMODE_WRITE)))
831 goto out_fput;
833 if (unlikely(msg_len > info->attr.mq_msgsize)) {
834 ret = -EMSGSIZE;
835 goto out_fput;
838 /* First try to allocate memory, before doing anything with
839 * existing queues. */
840 msg_ptr = load_msg(u_msg_ptr, msg_len);
841 if (IS_ERR(msg_ptr)) {
842 ret = PTR_ERR(msg_ptr);
843 goto out_fput;
845 msg_ptr->m_ts = msg_len;
846 msg_ptr->m_type = msg_prio;
848 spin_lock(&info->lock);
850 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
851 if (filp->f_flags & O_NONBLOCK) {
852 spin_unlock(&info->lock);
853 ret = -EAGAIN;
854 } else if (unlikely(timeout < 0)) {
855 spin_unlock(&info->lock);
856 ret = timeout;
857 } else {
858 wait.task = current;
859 wait.msg = (void *) msg_ptr;
860 wait.state = STATE_NONE;
861 ret = wq_sleep(info, SEND, timeout, &wait);
863 if (ret < 0)
864 free_msg(msg_ptr);
865 } else {
866 receiver = wq_get_first_waiter(info, RECV);
867 if (receiver) {
868 pipelined_send(info, msg_ptr, receiver);
869 } else {
870 /* adds message to the queue */
871 msg_insert(msg_ptr, info);
872 __do_notify(info);
874 inode->i_atime = inode->i_mtime = inode->i_ctime =
875 CURRENT_TIME;
876 spin_unlock(&info->lock);
877 ret = 0;
879 out_fput:
880 fput(filp);
881 out:
882 return ret;
885 asmlinkage ssize_t sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
886 size_t msg_len, unsigned int __user *u_msg_prio,
887 const struct timespec __user *u_abs_timeout)
889 long timeout;
890 ssize_t ret;
891 struct msg_msg *msg_ptr;
892 struct file *filp;
893 struct inode *inode;
894 struct mqueue_inode_info *info;
895 struct ext_wait_queue wait;
897 timeout = prepare_timeout(u_abs_timeout);
899 ret = -EBADF;
900 filp = fget(mqdes);
901 if (unlikely(!filp))
902 goto out;
904 inode = filp->f_dentry->d_inode;
905 if (unlikely(filp->f_op != &mqueue_file_operations))
906 goto out_fput;
907 info = MQUEUE_I(inode);
909 if (unlikely(!(filp->f_mode & FMODE_READ)))
910 goto out_fput;
912 /* checks if buffer is big enough */
913 if (unlikely(msg_len < info->attr.mq_msgsize)) {
914 ret = -EMSGSIZE;
915 goto out_fput;
918 spin_lock(&info->lock);
919 if (info->attr.mq_curmsgs == 0) {
920 if (filp->f_flags & O_NONBLOCK) {
921 spin_unlock(&info->lock);
922 ret = -EAGAIN;
923 msg_ptr = NULL;
924 } else if (unlikely(timeout < 0)) {
925 spin_unlock(&info->lock);
926 ret = timeout;
927 msg_ptr = NULL;
928 } else {
929 wait.task = current;
930 wait.state = STATE_NONE;
931 ret = wq_sleep(info, RECV, timeout, &wait);
932 msg_ptr = wait.msg;
934 } else {
935 msg_ptr = msg_get(info);
937 inode->i_atime = inode->i_mtime = inode->i_ctime =
938 CURRENT_TIME;
940 /* There is now free space in queue. */
941 pipelined_receive(info);
942 spin_unlock(&info->lock);
943 ret = 0;
945 if (ret == 0) {
946 ret = msg_ptr->m_ts;
948 if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
949 store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
950 ret = -EFAULT;
952 free_msg(msg_ptr);
954 out_fput:
955 fput(filp);
956 out:
957 return ret;
961 * Notes: the case when user wants us to deregister (with NULL as pointer)
962 * and he isn't currently owner of notification, will be silently discarded.
963 * It isn't explicitly defined in the POSIX.
965 asmlinkage long sys_mq_notify(mqd_t mqdes,
966 const struct sigevent __user *u_notification)
968 int ret;
969 struct file *filp;
970 struct sock *sock;
971 struct inode *inode;
972 struct sigevent notification;
973 struct mqueue_inode_info *info;
974 struct sk_buff *nc;
976 nc = NULL;
977 sock = NULL;
978 if (u_notification != NULL) {
979 if (copy_from_user(&notification, u_notification,
980 sizeof(struct sigevent)))
981 return -EFAULT;
983 if (unlikely(notification.sigev_notify != SIGEV_NONE &&
984 notification.sigev_notify != SIGEV_SIGNAL &&
985 notification.sigev_notify != SIGEV_THREAD))
986 return -EINVAL;
987 if (notification.sigev_notify == SIGEV_SIGNAL &&
988 !valid_signal(notification.sigev_signo)) {
989 return -EINVAL;
991 if (notification.sigev_notify == SIGEV_THREAD) {
992 /* create the notify skb */
993 nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
994 ret = -ENOMEM;
995 if (!nc)
996 goto out;
997 ret = -EFAULT;
998 if (copy_from_user(nc->data,
999 notification.sigev_value.sival_ptr,
1000 NOTIFY_COOKIE_LEN)) {
1001 goto out;
1004 /* TODO: add a header? */
1005 skb_put(nc, NOTIFY_COOKIE_LEN);
1006 /* and attach it to the socket */
1007 retry:
1008 filp = fget(notification.sigev_signo);
1009 ret = -EBADF;
1010 if (!filp)
1011 goto out;
1012 sock = netlink_getsockbyfilp(filp);
1013 fput(filp);
1014 if (IS_ERR(sock)) {
1015 ret = PTR_ERR(sock);
1016 sock = NULL;
1017 goto out;
1020 ret = netlink_attachskb(sock, nc, 0,
1021 MAX_SCHEDULE_TIMEOUT, NULL);
1022 if (ret == 1)
1023 goto retry;
1024 if (ret) {
1025 sock = NULL;
1026 nc = NULL;
1027 goto out;
1032 ret = -EBADF;
1033 filp = fget(mqdes);
1034 if (!filp)
1035 goto out;
1037 inode = filp->f_dentry->d_inode;
1038 if (unlikely(filp->f_op != &mqueue_file_operations))
1039 goto out_fput;
1040 info = MQUEUE_I(inode);
1042 ret = 0;
1043 spin_lock(&info->lock);
1044 if (u_notification == NULL) {
1045 if (info->notify_owner == current->tgid) {
1046 remove_notification(info);
1047 inode->i_atime = inode->i_ctime = CURRENT_TIME;
1049 } else if (info->notify_owner != 0) {
1050 ret = -EBUSY;
1051 } else {
1052 switch (notification.sigev_notify) {
1053 case SIGEV_NONE:
1054 info->notify.sigev_notify = SIGEV_NONE;
1055 break;
1056 case SIGEV_THREAD:
1057 info->notify_sock = sock;
1058 info->notify_cookie = nc;
1059 sock = NULL;
1060 nc = NULL;
1061 info->notify.sigev_notify = SIGEV_THREAD;
1062 break;
1063 case SIGEV_SIGNAL:
1064 info->notify.sigev_signo = notification.sigev_signo;
1065 info->notify.sigev_value = notification.sigev_value;
1066 info->notify.sigev_notify = SIGEV_SIGNAL;
1067 break;
1069 info->notify_owner = current->tgid;
1070 inode->i_atime = inode->i_ctime = CURRENT_TIME;
1072 spin_unlock(&info->lock);
1073 out_fput:
1074 fput(filp);
1075 out:
1076 if (sock) {
1077 netlink_detachskb(sock, nc);
1078 } else if (nc) {
1079 dev_kfree_skb(nc);
1081 return ret;
1084 asmlinkage long sys_mq_getsetattr(mqd_t mqdes,
1085 const struct mq_attr __user *u_mqstat,
1086 struct mq_attr __user *u_omqstat)
1088 int ret;
1089 struct mq_attr mqstat, omqstat;
1090 struct file *filp;
1091 struct inode *inode;
1092 struct mqueue_inode_info *info;
1094 if (u_mqstat != NULL) {
1095 if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr)))
1096 return -EFAULT;
1097 if (mqstat.mq_flags & (~O_NONBLOCK))
1098 return -EINVAL;
1101 ret = -EBADF;
1102 filp = fget(mqdes);
1103 if (!filp)
1104 goto out;
1106 inode = filp->f_dentry->d_inode;
1107 if (unlikely(filp->f_op != &mqueue_file_operations))
1108 goto out_fput;
1109 info = MQUEUE_I(inode);
1111 spin_lock(&info->lock);
1113 omqstat = info->attr;
1114 omqstat.mq_flags = filp->f_flags & O_NONBLOCK;
1115 if (u_mqstat) {
1116 if (mqstat.mq_flags & O_NONBLOCK)
1117 filp->f_flags |= O_NONBLOCK;
1118 else
1119 filp->f_flags &= ~O_NONBLOCK;
1121 inode->i_atime = inode->i_ctime = CURRENT_TIME;
1124 spin_unlock(&info->lock);
1126 ret = 0;
1127 if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat,
1128 sizeof(struct mq_attr)))
1129 ret = -EFAULT;
1131 out_fput:
1132 fput(filp);
1133 out:
1134 return ret;
1137 static struct inode_operations mqueue_dir_inode_operations = {
1138 .lookup = simple_lookup,
1139 .create = mqueue_create,
1140 .unlink = mqueue_unlink,
1143 static struct file_operations mqueue_file_operations = {
1144 .flush = mqueue_flush_file,
1145 .poll = mqueue_poll_file,
1146 .read = mqueue_read_file,
1149 static struct super_operations mqueue_super_ops = {
1150 .alloc_inode = mqueue_alloc_inode,
1151 .destroy_inode = mqueue_destroy_inode,
1152 .statfs = simple_statfs,
1153 .delete_inode = mqueue_delete_inode,
1154 .drop_inode = generic_delete_inode,
1157 static struct file_system_type mqueue_fs_type = {
1158 .name = "mqueue",
1159 .get_sb = mqueue_get_sb,
1160 .kill_sb = kill_litter_super,
1163 static int msg_max_limit_min = DFLT_MSGMAX;
1164 static int msg_max_limit_max = HARD_MSGMAX;
1166 static int msg_maxsize_limit_min = DFLT_MSGSIZEMAX;
1167 static int msg_maxsize_limit_max = INT_MAX;
1169 static ctl_table mq_sysctls[] = {
1171 .ctl_name = CTL_QUEUESMAX,
1172 .procname = "queues_max",
1173 .data = &queues_max,
1174 .maxlen = sizeof(int),
1175 .mode = 0644,
1176 .proc_handler = &proc_dointvec,
1179 .ctl_name = CTL_MSGMAX,
1180 .procname = "msg_max",
1181 .data = &msg_max,
1182 .maxlen = sizeof(int),
1183 .mode = 0644,
1184 .proc_handler = &proc_dointvec_minmax,
1185 .extra1 = &msg_max_limit_min,
1186 .extra2 = &msg_max_limit_max,
1189 .ctl_name = CTL_MSGSIZEMAX,
1190 .procname = "msgsize_max",
1191 .data = &msgsize_max,
1192 .maxlen = sizeof(int),
1193 .mode = 0644,
1194 .proc_handler = &proc_dointvec_minmax,
1195 .extra1 = &msg_maxsize_limit_min,
1196 .extra2 = &msg_maxsize_limit_max,
1198 { .ctl_name = 0 }
1201 static ctl_table mq_sysctl_dir[] = {
1203 .ctl_name = FS_MQUEUE,
1204 .procname = "mqueue",
1205 .mode = 0555,
1206 .child = mq_sysctls,
1208 { .ctl_name = 0 }
1211 static ctl_table mq_sysctl_root[] = {
1213 .ctl_name = CTL_FS,
1214 .procname = "fs",
1215 .mode = 0555,
1216 .child = mq_sysctl_dir,
1218 { .ctl_name = 0 }
1221 static int __init init_mqueue_fs(void)
1223 int error;
1225 mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1226 sizeof(struct mqueue_inode_info), 0,
1227 SLAB_HWCACHE_ALIGN, init_once, NULL);
1228 if (mqueue_inode_cachep == NULL)
1229 return -ENOMEM;
1231 /* ignore failues - they are not fatal */
1232 mq_sysctl_table = register_sysctl_table(mq_sysctl_root, 0);
1234 error = register_filesystem(&mqueue_fs_type);
1235 if (error)
1236 goto out_sysctl;
1238 if (IS_ERR(mqueue_mnt = kern_mount(&mqueue_fs_type))) {
1239 error = PTR_ERR(mqueue_mnt);
1240 goto out_filesystem;
1243 /* internal initialization - not common for vfs */
1244 queues_count = 0;
1245 spin_lock_init(&mq_lock);
1247 return 0;
1249 out_filesystem:
1250 unregister_filesystem(&mqueue_fs_type);
1251 out_sysctl:
1252 if (mq_sysctl_table)
1253 unregister_sysctl_table(mq_sysctl_table);
1254 if (kmem_cache_destroy(mqueue_inode_cachep)) {
1255 printk(KERN_INFO
1256 "mqueue_inode_cache: not all structures were freed\n");
1258 return error;
1261 __initcall(init_mqueue_fs);