KVM: x86 emulator: Re-add VendorSpecific tag to VMMCALL insn
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / ipc / shm.c
blob729acb7e31487f67660a3202d818f1f55b4fd6e5
1 /*
2 * linux/ipc/shm.c
3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
16 * support for audit of ipc object properties and permission changes
17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
19 * namespaces support
20 * OpenVZ, SWsoft Inc.
21 * Pavel Emelianov <xemul@openvz.org>
24 #include <linux/slab.h>
25 #include <linux/mm.h>
26 #include <linux/hugetlb.h>
27 #include <linux/shm.h>
28 #include <linux/init.h>
29 #include <linux/file.h>
30 #include <linux/mman.h>
31 #include <linux/shmem_fs.h>
32 #include <linux/security.h>
33 #include <linux/syscalls.h>
34 #include <linux/audit.h>
35 #include <linux/capability.h>
36 #include <linux/ptrace.h>
37 #include <linux/seq_file.h>
38 #include <linux/rwsem.h>
39 #include <linux/nsproxy.h>
40 #include <linux/mount.h>
41 #include <linux/ipc_namespace.h>
43 #include <asm/uaccess.h>
45 #include "util.h"
47 struct shm_file_data {
48 int id;
49 struct ipc_namespace *ns;
50 struct file *file;
51 const struct vm_operations_struct *vm_ops;
54 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
56 static const struct file_operations shm_file_operations;
57 static const struct vm_operations_struct shm_vm_ops;
59 #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
61 #define shm_unlock(shp) \
62 ipc_unlock(&(shp)->shm_perm)
64 static int newseg(struct ipc_namespace *, struct ipc_params *);
65 static void shm_open(struct vm_area_struct *vma);
66 static void shm_close(struct vm_area_struct *vma);
67 static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
68 #ifdef CONFIG_PROC_FS
69 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
70 #endif
72 void shm_init_ns(struct ipc_namespace *ns)
74 ns->shm_ctlmax = SHMMAX;
75 ns->shm_ctlall = SHMALL;
76 ns->shm_ctlmni = SHMMNI;
77 ns->shm_tot = 0;
78 ipc_init_ids(&shm_ids(ns));
82 * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
83 * Only shm_ids.rw_mutex remains locked on exit.
85 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
87 struct shmid_kernel *shp;
88 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
90 if (shp->shm_nattch){
91 shp->shm_perm.mode |= SHM_DEST;
92 /* Do not find it any more */
93 shp->shm_perm.key = IPC_PRIVATE;
94 shm_unlock(shp);
95 } else
96 shm_destroy(ns, shp);
99 #ifdef CONFIG_IPC_NS
100 void shm_exit_ns(struct ipc_namespace *ns)
102 free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
103 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
105 #endif
107 void __init shm_init (void)
109 shm_init_ns(&init_ipc_ns);
110 ipc_init_proc_interface("sysvipc/shm",
111 #if BITS_PER_LONG <= 32
112 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
113 #else
114 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
115 #endif
116 IPC_SHM_IDS, sysvipc_shm_proc_show);
120 * shm_lock_(check_) routines are called in the paths where the rw_mutex
121 * is not necessarily held.
123 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
125 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
127 if (IS_ERR(ipcp))
128 return (struct shmid_kernel *)ipcp;
130 return container_of(ipcp, struct shmid_kernel, shm_perm);
133 static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
134 int id)
136 struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
138 if (IS_ERR(ipcp))
139 return (struct shmid_kernel *)ipcp;
141 return container_of(ipcp, struct shmid_kernel, shm_perm);
144 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
146 ipc_rmid(&shm_ids(ns), &s->shm_perm);
150 /* This is called by fork, once for every shm attach. */
151 static void shm_open(struct vm_area_struct *vma)
153 struct file *file = vma->vm_file;
154 struct shm_file_data *sfd = shm_file_data(file);
155 struct shmid_kernel *shp;
157 shp = shm_lock(sfd->ns, sfd->id);
158 BUG_ON(IS_ERR(shp));
159 shp->shm_atim = get_seconds();
160 shp->shm_lprid = task_tgid_vnr(current);
161 shp->shm_nattch++;
162 shm_unlock(shp);
166 * shm_destroy - free the struct shmid_kernel
168 * @ns: namespace
169 * @shp: struct to free
171 * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
172 * but returns with shp unlocked and freed.
174 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
176 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
177 shm_rmid(ns, shp);
178 shm_unlock(shp);
179 if (!is_file_hugepages(shp->shm_file))
180 shmem_lock(shp->shm_file, 0, shp->mlock_user);
181 else if (shp->mlock_user)
182 user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
183 shp->mlock_user);
184 fput (shp->shm_file);
185 security_shm_free(shp);
186 ipc_rcu_putref(shp);
190 * remove the attach descriptor vma.
191 * free memory for segment if it is marked destroyed.
192 * The descriptor has already been removed from the current->mm->mmap list
193 * and will later be kfree()d.
195 static void shm_close(struct vm_area_struct *vma)
197 struct file * file = vma->vm_file;
198 struct shm_file_data *sfd = shm_file_data(file);
199 struct shmid_kernel *shp;
200 struct ipc_namespace *ns = sfd->ns;
202 down_write(&shm_ids(ns).rw_mutex);
203 /* remove from the list of attaches of the shm segment */
204 shp = shm_lock(ns, sfd->id);
205 BUG_ON(IS_ERR(shp));
206 shp->shm_lprid = task_tgid_vnr(current);
207 shp->shm_dtim = get_seconds();
208 shp->shm_nattch--;
209 if(shp->shm_nattch == 0 &&
210 shp->shm_perm.mode & SHM_DEST)
211 shm_destroy(ns, shp);
212 else
213 shm_unlock(shp);
214 up_write(&shm_ids(ns).rw_mutex);
217 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
219 struct file *file = vma->vm_file;
220 struct shm_file_data *sfd = shm_file_data(file);
222 return sfd->vm_ops->fault(vma, vmf);
225 #ifdef CONFIG_NUMA
226 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
228 struct file *file = vma->vm_file;
229 struct shm_file_data *sfd = shm_file_data(file);
230 int err = 0;
231 if (sfd->vm_ops->set_policy)
232 err = sfd->vm_ops->set_policy(vma, new);
233 return err;
236 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
237 unsigned long addr)
239 struct file *file = vma->vm_file;
240 struct shm_file_data *sfd = shm_file_data(file);
241 struct mempolicy *pol = NULL;
243 if (sfd->vm_ops->get_policy)
244 pol = sfd->vm_ops->get_policy(vma, addr);
245 else if (vma->vm_policy)
246 pol = vma->vm_policy;
248 return pol;
250 #endif
252 static int shm_mmap(struct file * file, struct vm_area_struct * vma)
254 struct shm_file_data *sfd = shm_file_data(file);
255 int ret;
257 ret = sfd->file->f_op->mmap(sfd->file, vma);
258 if (ret != 0)
259 return ret;
260 sfd->vm_ops = vma->vm_ops;
261 #ifdef CONFIG_MMU
262 BUG_ON(!sfd->vm_ops->fault);
263 #endif
264 vma->vm_ops = &shm_vm_ops;
265 shm_open(vma);
267 return ret;
270 static int shm_release(struct inode *ino, struct file *file)
272 struct shm_file_data *sfd = shm_file_data(file);
274 put_ipc_ns(sfd->ns);
275 shm_file_data(file) = NULL;
276 kfree(sfd);
277 return 0;
280 static int shm_fsync(struct file *file, int datasync)
282 struct shm_file_data *sfd = shm_file_data(file);
284 if (!sfd->file->f_op->fsync)
285 return -EINVAL;
286 return sfd->file->f_op->fsync(sfd->file, datasync);
289 static unsigned long shm_get_unmapped_area(struct file *file,
290 unsigned long addr, unsigned long len, unsigned long pgoff,
291 unsigned long flags)
293 struct shm_file_data *sfd = shm_file_data(file);
294 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
295 pgoff, flags);
298 static const struct file_operations shm_file_operations = {
299 .mmap = shm_mmap,
300 .fsync = shm_fsync,
301 .release = shm_release,
302 #ifndef CONFIG_MMU
303 .get_unmapped_area = shm_get_unmapped_area,
304 #endif
305 .llseek = noop_llseek,
308 static const struct file_operations shm_file_operations_huge = {
309 .mmap = shm_mmap,
310 .fsync = shm_fsync,
311 .release = shm_release,
312 .get_unmapped_area = shm_get_unmapped_area,
313 .llseek = noop_llseek,
316 int is_file_shm_hugepages(struct file *file)
318 return file->f_op == &shm_file_operations_huge;
321 static const struct vm_operations_struct shm_vm_ops = {
322 .open = shm_open, /* callback for a new vm-area open */
323 .close = shm_close, /* callback for when the vm-area is released */
324 .fault = shm_fault,
325 #if defined(CONFIG_NUMA)
326 .set_policy = shm_set_policy,
327 .get_policy = shm_get_policy,
328 #endif
332 * newseg - Create a new shared memory segment
333 * @ns: namespace
334 * @params: ptr to the structure that contains key, size and shmflg
336 * Called with shm_ids.rw_mutex held as a writer.
339 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
341 key_t key = params->key;
342 int shmflg = params->flg;
343 size_t size = params->u.size;
344 int error;
345 struct shmid_kernel *shp;
346 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
347 struct file * file;
348 char name[13];
349 int id;
350 int acctflag = 0;
352 if (size < SHMMIN || size > ns->shm_ctlmax)
353 return -EINVAL;
355 if (ns->shm_tot + numpages > ns->shm_ctlall)
356 return -ENOSPC;
358 shp = ipc_rcu_alloc(sizeof(*shp));
359 if (!shp)
360 return -ENOMEM;
362 shp->shm_perm.key = key;
363 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
364 shp->mlock_user = NULL;
366 shp->shm_perm.security = NULL;
367 error = security_shm_alloc(shp);
368 if (error) {
369 ipc_rcu_putref(shp);
370 return error;
373 sprintf (name, "SYSV%08x", key);
374 if (shmflg & SHM_HUGETLB) {
375 /* hugetlb_file_setup applies strict accounting */
376 if (shmflg & SHM_NORESERVE)
377 acctflag = VM_NORESERVE;
378 file = hugetlb_file_setup(name, size, acctflag,
379 &shp->mlock_user, HUGETLB_SHMFS_INODE);
380 } else {
382 * Do not allow no accounting for OVERCOMMIT_NEVER, even
383 * if it's asked for.
385 if ((shmflg & SHM_NORESERVE) &&
386 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
387 acctflag = VM_NORESERVE;
388 file = shmem_file_setup(name, size, acctflag);
390 error = PTR_ERR(file);
391 if (IS_ERR(file))
392 goto no_file;
394 id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
395 if (id < 0) {
396 error = id;
397 goto no_id;
400 shp->shm_cprid = task_tgid_vnr(current);
401 shp->shm_lprid = 0;
402 shp->shm_atim = shp->shm_dtim = 0;
403 shp->shm_ctim = get_seconds();
404 shp->shm_segsz = size;
405 shp->shm_nattch = 0;
406 shp->shm_file = file;
408 * shmid gets reported as "inode#" in /proc/pid/maps.
409 * proc-ps tools use this. Changing this will break them.
411 file->f_dentry->d_inode->i_ino = shp->shm_perm.id;
413 ns->shm_tot += numpages;
414 error = shp->shm_perm.id;
415 shm_unlock(shp);
416 return error;
418 no_id:
419 if (is_file_hugepages(file) && shp->mlock_user)
420 user_shm_unlock(size, shp->mlock_user);
421 fput(file);
422 no_file:
423 security_shm_free(shp);
424 ipc_rcu_putref(shp);
425 return error;
429 * Called with shm_ids.rw_mutex and ipcp locked.
431 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
433 struct shmid_kernel *shp;
435 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
436 return security_shm_associate(shp, shmflg);
440 * Called with shm_ids.rw_mutex and ipcp locked.
442 static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
443 struct ipc_params *params)
445 struct shmid_kernel *shp;
447 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
448 if (shp->shm_segsz < params->u.size)
449 return -EINVAL;
451 return 0;
454 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
456 struct ipc_namespace *ns;
457 struct ipc_ops shm_ops;
458 struct ipc_params shm_params;
460 ns = current->nsproxy->ipc_ns;
462 shm_ops.getnew = newseg;
463 shm_ops.associate = shm_security;
464 shm_ops.more_checks = shm_more_checks;
466 shm_params.key = key;
467 shm_params.flg = shmflg;
468 shm_params.u.size = size;
470 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
473 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
475 switch(version) {
476 case IPC_64:
477 return copy_to_user(buf, in, sizeof(*in));
478 case IPC_OLD:
480 struct shmid_ds out;
482 memset(&out, 0, sizeof(out));
483 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
484 out.shm_segsz = in->shm_segsz;
485 out.shm_atime = in->shm_atime;
486 out.shm_dtime = in->shm_dtime;
487 out.shm_ctime = in->shm_ctime;
488 out.shm_cpid = in->shm_cpid;
489 out.shm_lpid = in->shm_lpid;
490 out.shm_nattch = in->shm_nattch;
492 return copy_to_user(buf, &out, sizeof(out));
494 default:
495 return -EINVAL;
499 static inline unsigned long
500 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
502 switch(version) {
503 case IPC_64:
504 if (copy_from_user(out, buf, sizeof(*out)))
505 return -EFAULT;
506 return 0;
507 case IPC_OLD:
509 struct shmid_ds tbuf_old;
511 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
512 return -EFAULT;
514 out->shm_perm.uid = tbuf_old.shm_perm.uid;
515 out->shm_perm.gid = tbuf_old.shm_perm.gid;
516 out->shm_perm.mode = tbuf_old.shm_perm.mode;
518 return 0;
520 default:
521 return -EINVAL;
525 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
527 switch(version) {
528 case IPC_64:
529 return copy_to_user(buf, in, sizeof(*in));
530 case IPC_OLD:
532 struct shminfo out;
534 if(in->shmmax > INT_MAX)
535 out.shmmax = INT_MAX;
536 else
537 out.shmmax = (int)in->shmmax;
539 out.shmmin = in->shmmin;
540 out.shmmni = in->shmmni;
541 out.shmseg = in->shmseg;
542 out.shmall = in->shmall;
544 return copy_to_user(buf, &out, sizeof(out));
546 default:
547 return -EINVAL;
552 * Calculate and add used RSS and swap pages of a shm.
553 * Called with shm_ids.rw_mutex held as a reader
555 static void shm_add_rss_swap(struct shmid_kernel *shp,
556 unsigned long *rss_add, unsigned long *swp_add)
558 struct inode *inode;
560 inode = shp->shm_file->f_path.dentry->d_inode;
562 if (is_file_hugepages(shp->shm_file)) {
563 struct address_space *mapping = inode->i_mapping;
564 struct hstate *h = hstate_file(shp->shm_file);
565 *rss_add += pages_per_huge_page(h) * mapping->nrpages;
566 } else {
567 #ifdef CONFIG_SHMEM
568 struct shmem_inode_info *info = SHMEM_I(inode);
569 spin_lock(&info->lock);
570 *rss_add += inode->i_mapping->nrpages;
571 *swp_add += info->swapped;
572 spin_unlock(&info->lock);
573 #else
574 *rss_add += inode->i_mapping->nrpages;
575 #endif
580 * Called with shm_ids.rw_mutex held as a reader
582 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
583 unsigned long *swp)
585 int next_id;
586 int total, in_use;
588 *rss = 0;
589 *swp = 0;
591 in_use = shm_ids(ns).in_use;
593 for (total = 0, next_id = 0; total < in_use; next_id++) {
594 struct kern_ipc_perm *ipc;
595 struct shmid_kernel *shp;
597 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
598 if (ipc == NULL)
599 continue;
600 shp = container_of(ipc, struct shmid_kernel, shm_perm);
602 shm_add_rss_swap(shp, rss, swp);
604 total++;
609 * This function handles some shmctl commands which require the rw_mutex
610 * to be held in write mode.
611 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
613 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
614 struct shmid_ds __user *buf, int version)
616 struct kern_ipc_perm *ipcp;
617 struct shmid64_ds shmid64;
618 struct shmid_kernel *shp;
619 int err;
621 if (cmd == IPC_SET) {
622 if (copy_shmid_from_user(&shmid64, buf, version))
623 return -EFAULT;
626 ipcp = ipcctl_pre_down(ns, &shm_ids(ns), shmid, cmd,
627 &shmid64.shm_perm, 0);
628 if (IS_ERR(ipcp))
629 return PTR_ERR(ipcp);
631 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
633 err = security_shm_shmctl(shp, cmd);
634 if (err)
635 goto out_unlock;
636 switch (cmd) {
637 case IPC_RMID:
638 do_shm_rmid(ns, ipcp);
639 goto out_up;
640 case IPC_SET:
641 ipc_update_perm(&shmid64.shm_perm, ipcp);
642 shp->shm_ctim = get_seconds();
643 break;
644 default:
645 err = -EINVAL;
647 out_unlock:
648 shm_unlock(shp);
649 out_up:
650 up_write(&shm_ids(ns).rw_mutex);
651 return err;
654 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
656 struct shmid_kernel *shp;
657 int err, version;
658 struct ipc_namespace *ns;
660 if (cmd < 0 || shmid < 0) {
661 err = -EINVAL;
662 goto out;
665 version = ipc_parse_version(&cmd);
666 ns = current->nsproxy->ipc_ns;
668 switch (cmd) { /* replace with proc interface ? */
669 case IPC_INFO:
671 struct shminfo64 shminfo;
673 err = security_shm_shmctl(NULL, cmd);
674 if (err)
675 return err;
677 memset(&shminfo, 0, sizeof(shminfo));
678 shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
679 shminfo.shmmax = ns->shm_ctlmax;
680 shminfo.shmall = ns->shm_ctlall;
682 shminfo.shmmin = SHMMIN;
683 if(copy_shminfo_to_user (buf, &shminfo, version))
684 return -EFAULT;
686 down_read(&shm_ids(ns).rw_mutex);
687 err = ipc_get_maxid(&shm_ids(ns));
688 up_read(&shm_ids(ns).rw_mutex);
690 if(err<0)
691 err = 0;
692 goto out;
694 case SHM_INFO:
696 struct shm_info shm_info;
698 err = security_shm_shmctl(NULL, cmd);
699 if (err)
700 return err;
702 memset(&shm_info, 0, sizeof(shm_info));
703 down_read(&shm_ids(ns).rw_mutex);
704 shm_info.used_ids = shm_ids(ns).in_use;
705 shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
706 shm_info.shm_tot = ns->shm_tot;
707 shm_info.swap_attempts = 0;
708 shm_info.swap_successes = 0;
709 err = ipc_get_maxid(&shm_ids(ns));
710 up_read(&shm_ids(ns).rw_mutex);
711 if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
712 err = -EFAULT;
713 goto out;
716 err = err < 0 ? 0 : err;
717 goto out;
719 case SHM_STAT:
720 case IPC_STAT:
722 struct shmid64_ds tbuf;
723 int result;
725 if (cmd == SHM_STAT) {
726 shp = shm_lock(ns, shmid);
727 if (IS_ERR(shp)) {
728 err = PTR_ERR(shp);
729 goto out;
731 result = shp->shm_perm.id;
732 } else {
733 shp = shm_lock_check(ns, shmid);
734 if (IS_ERR(shp)) {
735 err = PTR_ERR(shp);
736 goto out;
738 result = 0;
740 err = -EACCES;
741 if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
742 goto out_unlock;
743 err = security_shm_shmctl(shp, cmd);
744 if (err)
745 goto out_unlock;
746 memset(&tbuf, 0, sizeof(tbuf));
747 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
748 tbuf.shm_segsz = shp->shm_segsz;
749 tbuf.shm_atime = shp->shm_atim;
750 tbuf.shm_dtime = shp->shm_dtim;
751 tbuf.shm_ctime = shp->shm_ctim;
752 tbuf.shm_cpid = shp->shm_cprid;
753 tbuf.shm_lpid = shp->shm_lprid;
754 tbuf.shm_nattch = shp->shm_nattch;
755 shm_unlock(shp);
756 if(copy_shmid_to_user (buf, &tbuf, version))
757 err = -EFAULT;
758 else
759 err = result;
760 goto out;
762 case SHM_LOCK:
763 case SHM_UNLOCK:
765 struct file *uninitialized_var(shm_file);
767 lru_add_drain_all(); /* drain pagevecs to lru lists */
769 shp = shm_lock_check(ns, shmid);
770 if (IS_ERR(shp)) {
771 err = PTR_ERR(shp);
772 goto out;
775 audit_ipc_obj(&(shp->shm_perm));
777 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
778 uid_t euid = current_euid();
779 err = -EPERM;
780 if (euid != shp->shm_perm.uid &&
781 euid != shp->shm_perm.cuid)
782 goto out_unlock;
783 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
784 goto out_unlock;
787 err = security_shm_shmctl(shp, cmd);
788 if (err)
789 goto out_unlock;
791 if(cmd==SHM_LOCK) {
792 struct user_struct *user = current_user();
793 if (!is_file_hugepages(shp->shm_file)) {
794 err = shmem_lock(shp->shm_file, 1, user);
795 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
796 shp->shm_perm.mode |= SHM_LOCKED;
797 shp->mlock_user = user;
800 } else if (!is_file_hugepages(shp->shm_file)) {
801 shmem_lock(shp->shm_file, 0, shp->mlock_user);
802 shp->shm_perm.mode &= ~SHM_LOCKED;
803 shp->mlock_user = NULL;
805 shm_unlock(shp);
806 goto out;
808 case IPC_RMID:
809 case IPC_SET:
810 err = shmctl_down(ns, shmid, cmd, buf, version);
811 return err;
812 default:
813 return -EINVAL;
816 out_unlock:
817 shm_unlock(shp);
818 out:
819 return err;
823 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
825 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
826 * "raddr" thing points to kernel space, and there has to be a wrapper around
827 * this.
829 long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
831 struct shmid_kernel *shp;
832 unsigned long addr;
833 unsigned long size;
834 struct file * file;
835 int err;
836 unsigned long flags;
837 unsigned long prot;
838 int acc_mode;
839 unsigned long user_addr;
840 struct ipc_namespace *ns;
841 struct shm_file_data *sfd;
842 struct path path;
843 fmode_t f_mode;
845 err = -EINVAL;
846 if (shmid < 0)
847 goto out;
848 else if ((addr = (ulong)shmaddr)) {
849 if (addr & (SHMLBA-1)) {
850 if (shmflg & SHM_RND)
851 addr &= ~(SHMLBA-1); /* round down */
852 else
853 #ifndef __ARCH_FORCE_SHMLBA
854 if (addr & ~PAGE_MASK)
855 #endif
856 goto out;
858 flags = MAP_SHARED | MAP_FIXED;
859 } else {
860 if ((shmflg & SHM_REMAP))
861 goto out;
863 flags = MAP_SHARED;
866 if (shmflg & SHM_RDONLY) {
867 prot = PROT_READ;
868 acc_mode = S_IRUGO;
869 f_mode = FMODE_READ;
870 } else {
871 prot = PROT_READ | PROT_WRITE;
872 acc_mode = S_IRUGO | S_IWUGO;
873 f_mode = FMODE_READ | FMODE_WRITE;
875 if (shmflg & SHM_EXEC) {
876 prot |= PROT_EXEC;
877 acc_mode |= S_IXUGO;
881 * We cannot rely on the fs check since SYSV IPC does have an
882 * additional creator id...
884 ns = current->nsproxy->ipc_ns;
885 shp = shm_lock_check(ns, shmid);
886 if (IS_ERR(shp)) {
887 err = PTR_ERR(shp);
888 goto out;
891 err = -EACCES;
892 if (ipcperms(ns, &shp->shm_perm, acc_mode))
893 goto out_unlock;
895 err = security_shm_shmat(shp, shmaddr, shmflg);
896 if (err)
897 goto out_unlock;
899 path = shp->shm_file->f_path;
900 path_get(&path);
901 shp->shm_nattch++;
902 size = i_size_read(path.dentry->d_inode);
903 shm_unlock(shp);
905 err = -ENOMEM;
906 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
907 if (!sfd)
908 goto out_put_dentry;
910 file = alloc_file(&path, f_mode,
911 is_file_hugepages(shp->shm_file) ?
912 &shm_file_operations_huge :
913 &shm_file_operations);
914 if (!file)
915 goto out_free;
917 file->private_data = sfd;
918 file->f_mapping = shp->shm_file->f_mapping;
919 sfd->id = shp->shm_perm.id;
920 sfd->ns = get_ipc_ns(ns);
921 sfd->file = shp->shm_file;
922 sfd->vm_ops = NULL;
924 down_write(&current->mm->mmap_sem);
925 if (addr && !(shmflg & SHM_REMAP)) {
926 err = -EINVAL;
927 if (find_vma_intersection(current->mm, addr, addr + size))
928 goto invalid;
930 * If shm segment goes below stack, make sure there is some
931 * space left for the stack to grow (at least 4 pages).
933 if (addr < current->mm->start_stack &&
934 addr > current->mm->start_stack - size - PAGE_SIZE * 5)
935 goto invalid;
938 user_addr = do_mmap (file, addr, size, prot, flags, 0);
939 *raddr = user_addr;
940 err = 0;
941 if (IS_ERR_VALUE(user_addr))
942 err = (long)user_addr;
943 invalid:
944 up_write(&current->mm->mmap_sem);
946 fput(file);
948 out_nattch:
949 down_write(&shm_ids(ns).rw_mutex);
950 shp = shm_lock(ns, shmid);
951 BUG_ON(IS_ERR(shp));
952 shp->shm_nattch--;
953 if(shp->shm_nattch == 0 &&
954 shp->shm_perm.mode & SHM_DEST)
955 shm_destroy(ns, shp);
956 else
957 shm_unlock(shp);
958 up_write(&shm_ids(ns).rw_mutex);
960 out:
961 return err;
963 out_unlock:
964 shm_unlock(shp);
965 goto out;
967 out_free:
968 kfree(sfd);
969 out_put_dentry:
970 path_put(&path);
971 goto out_nattch;
974 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
976 unsigned long ret;
977 long err;
979 err = do_shmat(shmid, shmaddr, shmflg, &ret);
980 if (err)
981 return err;
982 force_successful_syscall_return();
983 return (long)ret;
987 * detach and kill segment if marked destroyed.
988 * The work is done in shm_close.
990 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
992 struct mm_struct *mm = current->mm;
993 struct vm_area_struct *vma;
994 unsigned long addr = (unsigned long)shmaddr;
995 int retval = -EINVAL;
996 #ifdef CONFIG_MMU
997 loff_t size = 0;
998 struct vm_area_struct *next;
999 #endif
1001 if (addr & ~PAGE_MASK)
1002 return retval;
1004 down_write(&mm->mmap_sem);
1007 * This function tries to be smart and unmap shm segments that
1008 * were modified by partial mlock or munmap calls:
1009 * - It first determines the size of the shm segment that should be
1010 * unmapped: It searches for a vma that is backed by shm and that
1011 * started at address shmaddr. It records it's size and then unmaps
1012 * it.
1013 * - Then it unmaps all shm vmas that started at shmaddr and that
1014 * are within the initially determined size.
1015 * Errors from do_munmap are ignored: the function only fails if
1016 * it's called with invalid parameters or if it's called to unmap
1017 * a part of a vma. Both calls in this function are for full vmas,
1018 * the parameters are directly copied from the vma itself and always
1019 * valid - therefore do_munmap cannot fail. (famous last words?)
1022 * If it had been mremap()'d, the starting address would not
1023 * match the usual checks anyway. So assume all vma's are
1024 * above the starting address given.
1026 vma = find_vma(mm, addr);
1028 #ifdef CONFIG_MMU
1029 while (vma) {
1030 next = vma->vm_next;
1033 * Check if the starting address would match, i.e. it's
1034 * a fragment created by mprotect() and/or munmap(), or it
1035 * otherwise it starts at this address with no hassles.
1037 if ((vma->vm_ops == &shm_vm_ops) &&
1038 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1041 size = vma->vm_file->f_path.dentry->d_inode->i_size;
1042 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1044 * We discovered the size of the shm segment, so
1045 * break out of here and fall through to the next
1046 * loop that uses the size information to stop
1047 * searching for matching vma's.
1049 retval = 0;
1050 vma = next;
1051 break;
1053 vma = next;
1057 * We need look no further than the maximum address a fragment
1058 * could possibly have landed at. Also cast things to loff_t to
1059 * prevent overflows and make comparisons vs. equal-width types.
1061 size = PAGE_ALIGN(size);
1062 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1063 next = vma->vm_next;
1065 /* finding a matching vma now does not alter retval */
1066 if ((vma->vm_ops == &shm_vm_ops) &&
1067 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
1069 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1070 vma = next;
1073 #else /* CONFIG_MMU */
1074 /* under NOMMU conditions, the exact address to be destroyed must be
1075 * given */
1076 retval = -EINVAL;
1077 if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1078 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1079 retval = 0;
1082 #endif
1084 up_write(&mm->mmap_sem);
1085 return retval;
1088 #ifdef CONFIG_PROC_FS
1089 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1091 struct shmid_kernel *shp = it;
1092 unsigned long rss = 0, swp = 0;
1094 shm_add_rss_swap(shp, &rss, &swp);
1096 #if BITS_PER_LONG <= 32
1097 #define SIZE_SPEC "%10lu"
1098 #else
1099 #define SIZE_SPEC "%21lu"
1100 #endif
1102 return seq_printf(s,
1103 "%10d %10d %4o " SIZE_SPEC " %5u %5u "
1104 "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
1105 SIZE_SPEC " " SIZE_SPEC "\n",
1106 shp->shm_perm.key,
1107 shp->shm_perm.id,
1108 shp->shm_perm.mode,
1109 shp->shm_segsz,
1110 shp->shm_cprid,
1111 shp->shm_lprid,
1112 shp->shm_nattch,
1113 shp->shm_perm.uid,
1114 shp->shm_perm.gid,
1115 shp->shm_perm.cuid,
1116 shp->shm_perm.cgid,
1117 shp->shm_atim,
1118 shp->shm_dtim,
1119 shp->shm_ctim,
1120 rss * PAGE_SIZE,
1121 swp * PAGE_SIZE);
1123 #endif