drm/i915: Only update i845/i865 CURBASE when disabled (v2)
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / ipc / shm.c
blob52ed77eb9713a932804cc8bb2b430ce4cdff4750
1 /*
2 * linux/ipc/shm.c
3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
16 * support for audit of ipc object properties and permission changes
17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
19 * namespaces support
20 * OpenVZ, SWsoft Inc.
21 * Pavel Emelianov <xemul@openvz.org>
24 #include <linux/slab.h>
25 #include <linux/mm.h>
26 #include <linux/hugetlb.h>
27 #include <linux/shm.h>
28 #include <linux/init.h>
29 #include <linux/file.h>
30 #include <linux/mman.h>
31 #include <linux/shmem_fs.h>
32 #include <linux/security.h>
33 #include <linux/syscalls.h>
34 #include <linux/audit.h>
35 #include <linux/capability.h>
36 #include <linux/ptrace.h>
37 #include <linux/seq_file.h>
38 #include <linux/rwsem.h>
39 #include <linux/nsproxy.h>
40 #include <linux/mount.h>
41 #include <linux/ipc_namespace.h>
43 #include <asm/uaccess.h>
45 #include "util.h"
47 struct shm_file_data {
48 int id;
49 struct ipc_namespace *ns;
50 struct file *file;
51 const struct vm_operations_struct *vm_ops;
54 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
56 static const struct file_operations shm_file_operations;
57 static const struct vm_operations_struct shm_vm_ops;
59 #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
61 #define shm_unlock(shp) \
62 ipc_unlock(&(shp)->shm_perm)
64 static int newseg(struct ipc_namespace *, struct ipc_params *);
65 static void shm_open(struct vm_area_struct *vma);
66 static void shm_close(struct vm_area_struct *vma);
67 static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
68 #ifdef CONFIG_PROC_FS
69 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
70 #endif
72 void shm_init_ns(struct ipc_namespace *ns)
74 ns->shm_ctlmax = SHMMAX;
75 ns->shm_ctlall = SHMALL;
76 ns->shm_ctlmni = SHMMNI;
77 ns->shm_tot = 0;
78 ipc_init_ids(&shm_ids(ns));
82 * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
83 * Only shm_ids.rw_mutex remains locked on exit.
85 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
87 struct shmid_kernel *shp;
88 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
90 if (shp->shm_nattch){
91 shp->shm_perm.mode |= SHM_DEST;
92 /* Do not find it any more */
93 shp->shm_perm.key = IPC_PRIVATE;
94 shm_unlock(shp);
95 } else
96 shm_destroy(ns, shp);
99 #ifdef CONFIG_IPC_NS
100 void shm_exit_ns(struct ipc_namespace *ns)
102 free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
103 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
105 #endif
107 void __init shm_init (void)
109 shm_init_ns(&init_ipc_ns);
110 ipc_init_proc_interface("sysvipc/shm",
111 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
112 IPC_SHM_IDS, sysvipc_shm_proc_show);
116 * shm_lock_(check_) routines are called in the paths where the rw_mutex
117 * is not necessarily held.
119 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
121 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
123 if (IS_ERR(ipcp))
124 return (struct shmid_kernel *)ipcp;
126 return container_of(ipcp, struct shmid_kernel, shm_perm);
129 static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
130 int id)
132 struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
134 if (IS_ERR(ipcp))
135 return (struct shmid_kernel *)ipcp;
137 return container_of(ipcp, struct shmid_kernel, shm_perm);
140 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
142 ipc_rmid(&shm_ids(ns), &s->shm_perm);
146 /* This is called by fork, once for every shm attach. */
147 static void shm_open(struct vm_area_struct *vma)
149 struct file *file = vma->vm_file;
150 struct shm_file_data *sfd = shm_file_data(file);
151 struct shmid_kernel *shp;
153 shp = shm_lock(sfd->ns, sfd->id);
154 BUG_ON(IS_ERR(shp));
155 shp->shm_atim = get_seconds();
156 shp->shm_lprid = task_tgid_vnr(current);
157 shp->shm_nattch++;
158 shm_unlock(shp);
162 * shm_destroy - free the struct shmid_kernel
164 * @ns: namespace
165 * @shp: struct to free
167 * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
168 * but returns with shp unlocked and freed.
170 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
172 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
173 shm_rmid(ns, shp);
174 shm_unlock(shp);
175 if (!is_file_hugepages(shp->shm_file))
176 shmem_lock(shp->shm_file, 0, shp->mlock_user);
177 else if (shp->mlock_user)
178 user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
179 shp->mlock_user);
180 fput (shp->shm_file);
181 security_shm_free(shp);
182 ipc_rcu_putref(shp);
186 * remove the attach descriptor vma.
187 * free memory for segment if it is marked destroyed.
188 * The descriptor has already been removed from the current->mm->mmap list
189 * and will later be kfree()d.
191 static void shm_close(struct vm_area_struct *vma)
193 struct file * file = vma->vm_file;
194 struct shm_file_data *sfd = shm_file_data(file);
195 struct shmid_kernel *shp;
196 struct ipc_namespace *ns = sfd->ns;
198 down_write(&shm_ids(ns).rw_mutex);
199 /* remove from the list of attaches of the shm segment */
200 shp = shm_lock(ns, sfd->id);
201 BUG_ON(IS_ERR(shp));
202 shp->shm_lprid = task_tgid_vnr(current);
203 shp->shm_dtim = get_seconds();
204 shp->shm_nattch--;
205 if(shp->shm_nattch == 0 &&
206 shp->shm_perm.mode & SHM_DEST)
207 shm_destroy(ns, shp);
208 else
209 shm_unlock(shp);
210 up_write(&shm_ids(ns).rw_mutex);
213 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
215 struct file *file = vma->vm_file;
216 struct shm_file_data *sfd = shm_file_data(file);
218 return sfd->vm_ops->fault(vma, vmf);
221 #ifdef CONFIG_NUMA
222 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
224 struct file *file = vma->vm_file;
225 struct shm_file_data *sfd = shm_file_data(file);
226 int err = 0;
227 if (sfd->vm_ops->set_policy)
228 err = sfd->vm_ops->set_policy(vma, new);
229 return err;
232 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
233 unsigned long addr)
235 struct file *file = vma->vm_file;
236 struct shm_file_data *sfd = shm_file_data(file);
237 struct mempolicy *pol = NULL;
239 if (sfd->vm_ops->get_policy)
240 pol = sfd->vm_ops->get_policy(vma, addr);
241 else if (vma->vm_policy)
242 pol = vma->vm_policy;
244 return pol;
246 #endif
248 static int shm_mmap(struct file * file, struct vm_area_struct * vma)
250 struct shm_file_data *sfd = shm_file_data(file);
251 int ret;
253 ret = sfd->file->f_op->mmap(sfd->file, vma);
254 if (ret != 0)
255 return ret;
256 sfd->vm_ops = vma->vm_ops;
257 #ifdef CONFIG_MMU
258 BUG_ON(!sfd->vm_ops->fault);
259 #endif
260 vma->vm_ops = &shm_vm_ops;
261 shm_open(vma);
263 return ret;
266 static int shm_release(struct inode *ino, struct file *file)
268 struct shm_file_data *sfd = shm_file_data(file);
270 put_ipc_ns(sfd->ns);
271 shm_file_data(file) = NULL;
272 kfree(sfd);
273 return 0;
276 static int shm_fsync(struct file *file, int datasync)
278 struct shm_file_data *sfd = shm_file_data(file);
280 if (!sfd->file->f_op->fsync)
281 return -EINVAL;
282 return sfd->file->f_op->fsync(sfd->file, datasync);
285 static unsigned long shm_get_unmapped_area(struct file *file,
286 unsigned long addr, unsigned long len, unsigned long pgoff,
287 unsigned long flags)
289 struct shm_file_data *sfd = shm_file_data(file);
290 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
291 pgoff, flags);
294 static const struct file_operations shm_file_operations = {
295 .mmap = shm_mmap,
296 .fsync = shm_fsync,
297 .release = shm_release,
298 #ifndef CONFIG_MMU
299 .get_unmapped_area = shm_get_unmapped_area,
300 #endif
303 static const struct file_operations shm_file_operations_huge = {
304 .mmap = shm_mmap,
305 .fsync = shm_fsync,
306 .release = shm_release,
307 .get_unmapped_area = shm_get_unmapped_area,
310 int is_file_shm_hugepages(struct file *file)
312 return file->f_op == &shm_file_operations_huge;
315 static const struct vm_operations_struct shm_vm_ops = {
316 .open = shm_open, /* callback for a new vm-area open */
317 .close = shm_close, /* callback for when the vm-area is released */
318 .fault = shm_fault,
319 #if defined(CONFIG_NUMA)
320 .set_policy = shm_set_policy,
321 .get_policy = shm_get_policy,
322 #endif
326 * newseg - Create a new shared memory segment
327 * @ns: namespace
328 * @params: ptr to the structure that contains key, size and shmflg
330 * Called with shm_ids.rw_mutex held as a writer.
333 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
335 key_t key = params->key;
336 int shmflg = params->flg;
337 size_t size = params->u.size;
338 int error;
339 struct shmid_kernel *shp;
340 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
341 struct file * file;
342 char name[13];
343 int id;
344 int acctflag = 0;
346 if (size < SHMMIN || size > ns->shm_ctlmax)
347 return -EINVAL;
349 if (ns->shm_tot + numpages > ns->shm_ctlall)
350 return -ENOSPC;
352 shp = ipc_rcu_alloc(sizeof(*shp));
353 if (!shp)
354 return -ENOMEM;
356 shp->shm_perm.key = key;
357 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
358 shp->mlock_user = NULL;
360 shp->shm_perm.security = NULL;
361 error = security_shm_alloc(shp);
362 if (error) {
363 ipc_rcu_putref(shp);
364 return error;
367 sprintf (name, "SYSV%08x", key);
368 if (shmflg & SHM_HUGETLB) {
369 /* hugetlb_file_setup applies strict accounting */
370 if (shmflg & SHM_NORESERVE)
371 acctflag = VM_NORESERVE;
372 file = hugetlb_file_setup(name, size, acctflag,
373 &shp->mlock_user, HUGETLB_SHMFS_INODE);
374 } else {
376 * Do not allow no accounting for OVERCOMMIT_NEVER, even
377 * if it's asked for.
379 if ((shmflg & SHM_NORESERVE) &&
380 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
381 acctflag = VM_NORESERVE;
382 file = shmem_file_setup(name, size, acctflag);
384 error = PTR_ERR(file);
385 if (IS_ERR(file))
386 goto no_file;
388 id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
389 if (id < 0) {
390 error = id;
391 goto no_id;
394 shp->shm_cprid = task_tgid_vnr(current);
395 shp->shm_lprid = 0;
396 shp->shm_atim = shp->shm_dtim = 0;
397 shp->shm_ctim = get_seconds();
398 shp->shm_segsz = size;
399 shp->shm_nattch = 0;
400 shp->shm_file = file;
402 * shmid gets reported as "inode#" in /proc/pid/maps.
403 * proc-ps tools use this. Changing this will break them.
405 file->f_dentry->d_inode->i_ino = shp->shm_perm.id;
407 ns->shm_tot += numpages;
408 error = shp->shm_perm.id;
409 shm_unlock(shp);
410 return error;
412 no_id:
413 if (is_file_hugepages(file) && shp->mlock_user)
414 user_shm_unlock(size, shp->mlock_user);
415 fput(file);
416 no_file:
417 security_shm_free(shp);
418 ipc_rcu_putref(shp);
419 return error;
423 * Called with shm_ids.rw_mutex and ipcp locked.
425 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
427 struct shmid_kernel *shp;
429 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
430 return security_shm_associate(shp, shmflg);
434 * Called with shm_ids.rw_mutex and ipcp locked.
436 static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
437 struct ipc_params *params)
439 struct shmid_kernel *shp;
441 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
442 if (shp->shm_segsz < params->u.size)
443 return -EINVAL;
445 return 0;
448 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
450 struct ipc_namespace *ns;
451 struct ipc_ops shm_ops;
452 struct ipc_params shm_params;
454 ns = current->nsproxy->ipc_ns;
456 shm_ops.getnew = newseg;
457 shm_ops.associate = shm_security;
458 shm_ops.more_checks = shm_more_checks;
460 shm_params.key = key;
461 shm_params.flg = shmflg;
462 shm_params.u.size = size;
464 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
467 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
469 switch(version) {
470 case IPC_64:
471 return copy_to_user(buf, in, sizeof(*in));
472 case IPC_OLD:
474 struct shmid_ds out;
476 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
477 out.shm_segsz = in->shm_segsz;
478 out.shm_atime = in->shm_atime;
479 out.shm_dtime = in->shm_dtime;
480 out.shm_ctime = in->shm_ctime;
481 out.shm_cpid = in->shm_cpid;
482 out.shm_lpid = in->shm_lpid;
483 out.shm_nattch = in->shm_nattch;
485 return copy_to_user(buf, &out, sizeof(out));
487 default:
488 return -EINVAL;
492 static inline unsigned long
493 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
495 switch(version) {
496 case IPC_64:
497 if (copy_from_user(out, buf, sizeof(*out)))
498 return -EFAULT;
499 return 0;
500 case IPC_OLD:
502 struct shmid_ds tbuf_old;
504 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
505 return -EFAULT;
507 out->shm_perm.uid = tbuf_old.shm_perm.uid;
508 out->shm_perm.gid = tbuf_old.shm_perm.gid;
509 out->shm_perm.mode = tbuf_old.shm_perm.mode;
511 return 0;
513 default:
514 return -EINVAL;
518 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
520 switch(version) {
521 case IPC_64:
522 return copy_to_user(buf, in, sizeof(*in));
523 case IPC_OLD:
525 struct shminfo out;
527 if(in->shmmax > INT_MAX)
528 out.shmmax = INT_MAX;
529 else
530 out.shmmax = (int)in->shmmax;
532 out.shmmin = in->shmmin;
533 out.shmmni = in->shmmni;
534 out.shmseg = in->shmseg;
535 out.shmall = in->shmall;
537 return copy_to_user(buf, &out, sizeof(out));
539 default:
540 return -EINVAL;
545 * Called with shm_ids.rw_mutex held as a reader
547 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
548 unsigned long *swp)
550 int next_id;
551 int total, in_use;
553 *rss = 0;
554 *swp = 0;
556 in_use = shm_ids(ns).in_use;
558 for (total = 0, next_id = 0; total < in_use; next_id++) {
559 struct kern_ipc_perm *ipc;
560 struct shmid_kernel *shp;
561 struct inode *inode;
563 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
564 if (ipc == NULL)
565 continue;
566 shp = container_of(ipc, struct shmid_kernel, shm_perm);
568 inode = shp->shm_file->f_path.dentry->d_inode;
570 if (is_file_hugepages(shp->shm_file)) {
571 struct address_space *mapping = inode->i_mapping;
572 struct hstate *h = hstate_file(shp->shm_file);
573 *rss += pages_per_huge_page(h) * mapping->nrpages;
574 } else {
575 #ifdef CONFIG_SHMEM
576 struct shmem_inode_info *info = SHMEM_I(inode);
577 spin_lock(&info->lock);
578 *rss += inode->i_mapping->nrpages;
579 *swp += info->swapped;
580 spin_unlock(&info->lock);
581 #else
582 *rss += inode->i_mapping->nrpages;
583 #endif
586 total++;
591 * This function handles some shmctl commands which require the rw_mutex
592 * to be held in write mode.
593 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
595 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
596 struct shmid_ds __user *buf, int version)
598 struct kern_ipc_perm *ipcp;
599 struct shmid64_ds shmid64;
600 struct shmid_kernel *shp;
601 int err;
603 if (cmd == IPC_SET) {
604 if (copy_shmid_from_user(&shmid64, buf, version))
605 return -EFAULT;
608 ipcp = ipcctl_pre_down(&shm_ids(ns), shmid, cmd, &shmid64.shm_perm, 0);
609 if (IS_ERR(ipcp))
610 return PTR_ERR(ipcp);
612 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
614 err = security_shm_shmctl(shp, cmd);
615 if (err)
616 goto out_unlock;
617 switch (cmd) {
618 case IPC_RMID:
619 do_shm_rmid(ns, ipcp);
620 goto out_up;
621 case IPC_SET:
622 ipc_update_perm(&shmid64.shm_perm, ipcp);
623 shp->shm_ctim = get_seconds();
624 break;
625 default:
626 err = -EINVAL;
628 out_unlock:
629 shm_unlock(shp);
630 out_up:
631 up_write(&shm_ids(ns).rw_mutex);
632 return err;
635 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
637 struct shmid_kernel *shp;
638 int err, version;
639 struct ipc_namespace *ns;
641 if (cmd < 0 || shmid < 0) {
642 err = -EINVAL;
643 goto out;
646 version = ipc_parse_version(&cmd);
647 ns = current->nsproxy->ipc_ns;
649 switch (cmd) { /* replace with proc interface ? */
650 case IPC_INFO:
652 struct shminfo64 shminfo;
654 err = security_shm_shmctl(NULL, cmd);
655 if (err)
656 return err;
658 memset(&shminfo, 0, sizeof(shminfo));
659 shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
660 shminfo.shmmax = ns->shm_ctlmax;
661 shminfo.shmall = ns->shm_ctlall;
663 shminfo.shmmin = SHMMIN;
664 if(copy_shminfo_to_user (buf, &shminfo, version))
665 return -EFAULT;
667 down_read(&shm_ids(ns).rw_mutex);
668 err = ipc_get_maxid(&shm_ids(ns));
669 up_read(&shm_ids(ns).rw_mutex);
671 if(err<0)
672 err = 0;
673 goto out;
675 case SHM_INFO:
677 struct shm_info shm_info;
679 err = security_shm_shmctl(NULL, cmd);
680 if (err)
681 return err;
683 memset(&shm_info, 0, sizeof(shm_info));
684 down_read(&shm_ids(ns).rw_mutex);
685 shm_info.used_ids = shm_ids(ns).in_use;
686 shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
687 shm_info.shm_tot = ns->shm_tot;
688 shm_info.swap_attempts = 0;
689 shm_info.swap_successes = 0;
690 err = ipc_get_maxid(&shm_ids(ns));
691 up_read(&shm_ids(ns).rw_mutex);
692 if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
693 err = -EFAULT;
694 goto out;
697 err = err < 0 ? 0 : err;
698 goto out;
700 case SHM_STAT:
701 case IPC_STAT:
703 struct shmid64_ds tbuf;
704 int result;
706 if (cmd == SHM_STAT) {
707 shp = shm_lock(ns, shmid);
708 if (IS_ERR(shp)) {
709 err = PTR_ERR(shp);
710 goto out;
712 result = shp->shm_perm.id;
713 } else {
714 shp = shm_lock_check(ns, shmid);
715 if (IS_ERR(shp)) {
716 err = PTR_ERR(shp);
717 goto out;
719 result = 0;
721 err = -EACCES;
722 if (ipcperms (&shp->shm_perm, S_IRUGO))
723 goto out_unlock;
724 err = security_shm_shmctl(shp, cmd);
725 if (err)
726 goto out_unlock;
727 memset(&tbuf, 0, sizeof(tbuf));
728 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
729 tbuf.shm_segsz = shp->shm_segsz;
730 tbuf.shm_atime = shp->shm_atim;
731 tbuf.shm_dtime = shp->shm_dtim;
732 tbuf.shm_ctime = shp->shm_ctim;
733 tbuf.shm_cpid = shp->shm_cprid;
734 tbuf.shm_lpid = shp->shm_lprid;
735 tbuf.shm_nattch = shp->shm_nattch;
736 shm_unlock(shp);
737 if(copy_shmid_to_user (buf, &tbuf, version))
738 err = -EFAULT;
739 else
740 err = result;
741 goto out;
743 case SHM_LOCK:
744 case SHM_UNLOCK:
746 struct file *uninitialized_var(shm_file);
748 lru_add_drain_all(); /* drain pagevecs to lru lists */
750 shp = shm_lock_check(ns, shmid);
751 if (IS_ERR(shp)) {
752 err = PTR_ERR(shp);
753 goto out;
756 audit_ipc_obj(&(shp->shm_perm));
758 if (!capable(CAP_IPC_LOCK)) {
759 uid_t euid = current_euid();
760 err = -EPERM;
761 if (euid != shp->shm_perm.uid &&
762 euid != shp->shm_perm.cuid)
763 goto out_unlock;
764 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
765 goto out_unlock;
768 err = security_shm_shmctl(shp, cmd);
769 if (err)
770 goto out_unlock;
772 if(cmd==SHM_LOCK) {
773 struct user_struct *user = current_user();
774 if (!is_file_hugepages(shp->shm_file)) {
775 err = shmem_lock(shp->shm_file, 1, user);
776 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
777 shp->shm_perm.mode |= SHM_LOCKED;
778 shp->mlock_user = user;
781 } else if (!is_file_hugepages(shp->shm_file)) {
782 shmem_lock(shp->shm_file, 0, shp->mlock_user);
783 shp->shm_perm.mode &= ~SHM_LOCKED;
784 shp->mlock_user = NULL;
786 shm_unlock(shp);
787 goto out;
789 case IPC_RMID:
790 case IPC_SET:
791 err = shmctl_down(ns, shmid, cmd, buf, version);
792 return err;
793 default:
794 return -EINVAL;
797 out_unlock:
798 shm_unlock(shp);
799 out:
800 return err;
804 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
806 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
807 * "raddr" thing points to kernel space, and there has to be a wrapper around
808 * this.
810 long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
812 struct shmid_kernel *shp;
813 unsigned long addr;
814 unsigned long size;
815 struct file * file;
816 int err;
817 unsigned long flags;
818 unsigned long prot;
819 int acc_mode;
820 unsigned long user_addr;
821 struct ipc_namespace *ns;
822 struct shm_file_data *sfd;
823 struct path path;
824 fmode_t f_mode;
826 err = -EINVAL;
827 if (shmid < 0)
828 goto out;
829 else if ((addr = (ulong)shmaddr)) {
830 if (addr & (SHMLBA-1)) {
831 if (shmflg & SHM_RND)
832 addr &= ~(SHMLBA-1); /* round down */
833 else
834 #ifndef __ARCH_FORCE_SHMLBA
835 if (addr & ~PAGE_MASK)
836 #endif
837 goto out;
839 flags = MAP_SHARED | MAP_FIXED;
840 } else {
841 if ((shmflg & SHM_REMAP))
842 goto out;
844 flags = MAP_SHARED;
847 if (shmflg & SHM_RDONLY) {
848 prot = PROT_READ;
849 acc_mode = S_IRUGO;
850 f_mode = FMODE_READ;
851 } else {
852 prot = PROT_READ | PROT_WRITE;
853 acc_mode = S_IRUGO | S_IWUGO;
854 f_mode = FMODE_READ | FMODE_WRITE;
856 if (shmflg & SHM_EXEC) {
857 prot |= PROT_EXEC;
858 acc_mode |= S_IXUGO;
862 * We cannot rely on the fs check since SYSV IPC does have an
863 * additional creator id...
865 ns = current->nsproxy->ipc_ns;
866 shp = shm_lock_check(ns, shmid);
867 if (IS_ERR(shp)) {
868 err = PTR_ERR(shp);
869 goto out;
872 err = -EACCES;
873 if (ipcperms(&shp->shm_perm, acc_mode))
874 goto out_unlock;
876 err = security_shm_shmat(shp, shmaddr, shmflg);
877 if (err)
878 goto out_unlock;
880 path = shp->shm_file->f_path;
881 path_get(&path);
882 shp->shm_nattch++;
883 size = i_size_read(path.dentry->d_inode);
884 shm_unlock(shp);
886 err = -ENOMEM;
887 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
888 if (!sfd)
889 goto out_put_dentry;
891 file = alloc_file(&path, f_mode,
892 is_file_hugepages(shp->shm_file) ?
893 &shm_file_operations_huge :
894 &shm_file_operations);
895 if (!file)
896 goto out_free;
898 file->private_data = sfd;
899 file->f_mapping = shp->shm_file->f_mapping;
900 sfd->id = shp->shm_perm.id;
901 sfd->ns = get_ipc_ns(ns);
902 sfd->file = shp->shm_file;
903 sfd->vm_ops = NULL;
905 down_write(&current->mm->mmap_sem);
906 if (addr && !(shmflg & SHM_REMAP)) {
907 err = -EINVAL;
908 if (find_vma_intersection(current->mm, addr, addr + size))
909 goto invalid;
911 * If shm segment goes below stack, make sure there is some
912 * space left for the stack to grow (at least 4 pages).
914 if (addr < current->mm->start_stack &&
915 addr > current->mm->start_stack - size - PAGE_SIZE * 5)
916 goto invalid;
919 user_addr = do_mmap (file, addr, size, prot, flags, 0);
920 *raddr = user_addr;
921 err = 0;
922 if (IS_ERR_VALUE(user_addr))
923 err = (long)user_addr;
924 invalid:
925 up_write(&current->mm->mmap_sem);
927 fput(file);
929 out_nattch:
930 down_write(&shm_ids(ns).rw_mutex);
931 shp = shm_lock(ns, shmid);
932 BUG_ON(IS_ERR(shp));
933 shp->shm_nattch--;
934 if(shp->shm_nattch == 0 &&
935 shp->shm_perm.mode & SHM_DEST)
936 shm_destroy(ns, shp);
937 else
938 shm_unlock(shp);
939 up_write(&shm_ids(ns).rw_mutex);
941 out:
942 return err;
944 out_unlock:
945 shm_unlock(shp);
946 goto out;
948 out_free:
949 kfree(sfd);
950 out_put_dentry:
951 path_put(&path);
952 goto out_nattch;
955 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
957 unsigned long ret;
958 long err;
960 err = do_shmat(shmid, shmaddr, shmflg, &ret);
961 if (err)
962 return err;
963 force_successful_syscall_return();
964 return (long)ret;
968 * detach and kill segment if marked destroyed.
969 * The work is done in shm_close.
971 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
973 struct mm_struct *mm = current->mm;
974 struct vm_area_struct *vma;
975 unsigned long addr = (unsigned long)shmaddr;
976 int retval = -EINVAL;
977 #ifdef CONFIG_MMU
978 loff_t size = 0;
979 struct vm_area_struct *next;
980 #endif
982 if (addr & ~PAGE_MASK)
983 return retval;
985 down_write(&mm->mmap_sem);
988 * This function tries to be smart and unmap shm segments that
989 * were modified by partial mlock or munmap calls:
990 * - It first determines the size of the shm segment that should be
991 * unmapped: It searches for a vma that is backed by shm and that
992 * started at address shmaddr. It records it's size and then unmaps
993 * it.
994 * - Then it unmaps all shm vmas that started at shmaddr and that
995 * are within the initially determined size.
996 * Errors from do_munmap are ignored: the function only fails if
997 * it's called with invalid parameters or if it's called to unmap
998 * a part of a vma. Both calls in this function are for full vmas,
999 * the parameters are directly copied from the vma itself and always
1000 * valid - therefore do_munmap cannot fail. (famous last words?)
1003 * If it had been mremap()'d, the starting address would not
1004 * match the usual checks anyway. So assume all vma's are
1005 * above the starting address given.
1007 vma = find_vma(mm, addr);
1009 #ifdef CONFIG_MMU
1010 while (vma) {
1011 next = vma->vm_next;
1014 * Check if the starting address would match, i.e. it's
1015 * a fragment created by mprotect() and/or munmap(), or it
1016 * otherwise it starts at this address with no hassles.
1018 if ((vma->vm_ops == &shm_vm_ops) &&
1019 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1022 size = vma->vm_file->f_path.dentry->d_inode->i_size;
1023 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1025 * We discovered the size of the shm segment, so
1026 * break out of here and fall through to the next
1027 * loop that uses the size information to stop
1028 * searching for matching vma's.
1030 retval = 0;
1031 vma = next;
1032 break;
1034 vma = next;
1038 * We need look no further than the maximum address a fragment
1039 * could possibly have landed at. Also cast things to loff_t to
1040 * prevent overflows and make comparisions vs. equal-width types.
1042 size = PAGE_ALIGN(size);
1043 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1044 next = vma->vm_next;
1046 /* finding a matching vma now does not alter retval */
1047 if ((vma->vm_ops == &shm_vm_ops) &&
1048 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
1050 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1051 vma = next;
1054 #else /* CONFIG_MMU */
1055 /* under NOMMU conditions, the exact address to be destroyed must be
1056 * given */
1057 retval = -EINVAL;
1058 if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1059 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1060 retval = 0;
1063 #endif
1065 up_write(&mm->mmap_sem);
1066 return retval;
1069 #ifdef CONFIG_PROC_FS
1070 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1072 struct shmid_kernel *shp = it;
1074 #if BITS_PER_LONG <= 32
1075 #define SIZE_SPEC "%10lu"
1076 #else
1077 #define SIZE_SPEC "%21lu"
1078 #endif
1080 return seq_printf(s,
1081 "%10d %10d %4o " SIZE_SPEC " %5u %5u "
1082 "%5lu %5u %5u %5u %5u %10lu %10lu %10lu\n",
1083 shp->shm_perm.key,
1084 shp->shm_perm.id,
1085 shp->shm_perm.mode,
1086 shp->shm_segsz,
1087 shp->shm_cprid,
1088 shp->shm_lprid,
1089 shp->shm_nattch,
1090 shp->shm_perm.uid,
1091 shp->shm_perm.gid,
1092 shp->shm_perm.cuid,
1093 shp->shm_perm.cgid,
1094 shp->shm_atim,
1095 shp->shm_dtim,
1096 shp->shm_ctim);
1098 #endif