3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
16 * support for audit of ipc object properties and permission changes
17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
21 * Pavel Emelianov <xemul@openvz.org>
24 #include <linux/slab.h>
26 #include <linux/hugetlb.h>
27 #include <linux/shm.h>
28 #include <linux/init.h>
29 #include <linux/file.h>
30 #include <linux/mman.h>
31 #include <linux/shmem_fs.h>
32 #include <linux/security.h>
33 #include <linux/syscalls.h>
34 #include <linux/audit.h>
35 #include <linux/capability.h>
36 #include <linux/ptrace.h>
37 #include <linux/seq_file.h>
38 #include <linux/mutex.h>
39 #include <linux/nsproxy.h>
40 #include <linux/mount.h>
42 #include <asm/uaccess.h>
46 struct shm_file_data
{
48 struct ipc_namespace
*ns
;
50 const struct vm_operations_struct
*vm_ops
;
53 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
55 static const struct file_operations shm_file_operations
;
56 static struct vm_operations_struct shm_vm_ops
;
58 static struct ipc_ids init_shm_ids
;
60 #define shm_ids(ns) (*((ns)->ids[IPC_SHM_IDS]))
62 #define shm_lock(ns, id) \
63 ((struct shmid_kernel*)ipc_lock(&shm_ids(ns),id))
64 #define shm_unlock(shp) \
65 ipc_unlock(&(shp)->shm_perm)
66 #define shm_get(ns, id) \
67 ((struct shmid_kernel*)ipc_get(&shm_ids(ns),id))
68 #define shm_buildid(ns, id, seq) \
69 ipc_buildid(&shm_ids(ns), id, seq)
71 static int newseg (struct ipc_namespace
*ns
, key_t key
,
72 int shmflg
, size_t size
);
73 static void shm_open(struct vm_area_struct
*vma
);
74 static void shm_close(struct vm_area_struct
*vma
);
75 static void shm_destroy (struct ipc_namespace
*ns
, struct shmid_kernel
*shp
);
77 static int sysvipc_shm_proc_show(struct seq_file
*s
, void *it
);
80 static void __ipc_init
__shm_init_ns(struct ipc_namespace
*ns
, struct ipc_ids
*ids
)
82 ns
->ids
[IPC_SHM_IDS
] = ids
;
83 ns
->shm_ctlmax
= SHMMAX
;
84 ns
->shm_ctlall
= SHMALL
;
85 ns
->shm_ctlmni
= SHMMNI
;
90 static void do_shm_rmid(struct ipc_namespace
*ns
, struct shmid_kernel
*shp
)
93 shp
->shm_perm
.mode
|= SHM_DEST
;
94 /* Do not find it any more */
95 shp
->shm_perm
.key
= IPC_PRIVATE
;
102 int shm_init_ns(struct ipc_namespace
*ns
)
106 ids
= kmalloc(sizeof(struct ipc_ids
), GFP_KERNEL
);
110 __shm_init_ns(ns
, ids
);
114 void shm_exit_ns(struct ipc_namespace
*ns
)
117 struct shmid_kernel
*shp
;
119 mutex_lock(&shm_ids(ns
).mutex
);
120 for (i
= 0; i
<= shm_ids(ns
).max_id
; i
++) {
121 shp
= shm_lock(ns
, i
);
125 do_shm_rmid(ns
, shp
);
127 mutex_unlock(&shm_ids(ns
).mutex
);
129 ipc_fini_ids(ns
->ids
[IPC_SHM_IDS
]);
130 kfree(ns
->ids
[IPC_SHM_IDS
]);
131 ns
->ids
[IPC_SHM_IDS
] = NULL
;
135 void __init
shm_init (void)
137 __shm_init_ns(&init_ipc_ns
, &init_shm_ids
);
138 ipc_init_proc_interface("sysvipc/shm",
139 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
140 IPC_SHM_IDS
, sysvipc_shm_proc_show
);
143 static inline int shm_checkid(struct ipc_namespace
*ns
,
144 struct shmid_kernel
*s
, int id
)
146 if (ipc_checkid(&shm_ids(ns
), &s
->shm_perm
, id
))
151 static inline struct shmid_kernel
*shm_rmid(struct ipc_namespace
*ns
, int id
)
153 return (struct shmid_kernel
*)ipc_rmid(&shm_ids(ns
), id
);
156 static inline int shm_addid(struct ipc_namespace
*ns
, struct shmid_kernel
*shp
)
158 return ipc_addid(&shm_ids(ns
), &shp
->shm_perm
, ns
->shm_ctlmni
);
163 /* This is called by fork, once for every shm attach. */
164 static void shm_open(struct vm_area_struct
*vma
)
166 struct file
*file
= vma
->vm_file
;
167 struct shm_file_data
*sfd
= shm_file_data(file
);
168 struct shmid_kernel
*shp
;
170 shp
= shm_lock(sfd
->ns
, sfd
->id
);
172 shp
->shm_atim
= get_seconds();
173 shp
->shm_lprid
= current
->tgid
;
179 * shm_destroy - free the struct shmid_kernel
181 * @shp: struct to free
183 * It has to be called with shp and shm_ids.mutex locked,
184 * but returns with shp unlocked and freed.
186 static void shm_destroy(struct ipc_namespace
*ns
, struct shmid_kernel
*shp
)
188 ns
->shm_tot
-= (shp
->shm_segsz
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
189 shm_rmid(ns
, shp
->id
);
191 if (!is_file_hugepages(shp
->shm_file
))
192 shmem_lock(shp
->shm_file
, 0, shp
->mlock_user
);
194 user_shm_unlock(shp
->shm_file
->f_path
.dentry
->d_inode
->i_size
,
196 fput (shp
->shm_file
);
197 security_shm_free(shp
);
202 * remove the attach descriptor vma.
203 * free memory for segment if it is marked destroyed.
204 * The descriptor has already been removed from the current->mm->mmap list
205 * and will later be kfree()d.
207 static void shm_close(struct vm_area_struct
*vma
)
209 struct file
* file
= vma
->vm_file
;
210 struct shm_file_data
*sfd
= shm_file_data(file
);
211 struct shmid_kernel
*shp
;
212 struct ipc_namespace
*ns
= sfd
->ns
;
214 mutex_lock(&shm_ids(ns
).mutex
);
215 /* remove from the list of attaches of the shm segment */
216 shp
= shm_lock(ns
, sfd
->id
);
218 shp
->shm_lprid
= current
->tgid
;
219 shp
->shm_dtim
= get_seconds();
221 if(shp
->shm_nattch
== 0 &&
222 shp
->shm_perm
.mode
& SHM_DEST
)
223 shm_destroy(ns
, shp
);
226 mutex_unlock(&shm_ids(ns
).mutex
);
229 static struct page
*shm_nopage(struct vm_area_struct
*vma
,
230 unsigned long address
, int *type
)
232 struct file
*file
= vma
->vm_file
;
233 struct shm_file_data
*sfd
= shm_file_data(file
);
235 return sfd
->vm_ops
->nopage(vma
, address
, type
);
239 int shm_set_policy(struct vm_area_struct
*vma
, struct mempolicy
*new)
241 struct file
*file
= vma
->vm_file
;
242 struct shm_file_data
*sfd
= shm_file_data(file
);
244 if (sfd
->vm_ops
->set_policy
)
245 err
= sfd
->vm_ops
->set_policy(vma
, new);
249 struct mempolicy
*shm_get_policy(struct vm_area_struct
*vma
, unsigned long addr
)
251 struct file
*file
= vma
->vm_file
;
252 struct shm_file_data
*sfd
= shm_file_data(file
);
253 struct mempolicy
*pol
= NULL
;
255 if (sfd
->vm_ops
->get_policy
)
256 pol
= sfd
->vm_ops
->get_policy(vma
, addr
);
258 pol
= vma
->vm_policy
;
263 static int shm_mmap(struct file
* file
, struct vm_area_struct
* vma
)
265 struct shm_file_data
*sfd
= shm_file_data(file
);
268 ret
= sfd
->file
->f_op
->mmap(sfd
->file
, vma
);
271 sfd
->vm_ops
= vma
->vm_ops
;
272 vma
->vm_ops
= &shm_vm_ops
;
278 static int shm_release(struct inode
*ino
, struct file
*file
)
280 struct shm_file_data
*sfd
= shm_file_data(file
);
283 shm_file_data(file
) = NULL
;
288 static int shm_fsync(struct file
*file
, struct dentry
*dentry
, int datasync
)
290 int (*fsync
) (struct file
*, struct dentry
*, int datasync
);
291 struct shm_file_data
*sfd
= shm_file_data(file
);
294 fsync
= sfd
->file
->f_op
->fsync
;
296 ret
= fsync(sfd
->file
, sfd
->file
->f_path
.dentry
, datasync
);
300 static unsigned long shm_get_unmapped_area(struct file
*file
,
301 unsigned long addr
, unsigned long len
, unsigned long pgoff
,
304 struct shm_file_data
*sfd
= shm_file_data(file
);
305 return get_unmapped_area(sfd
->file
, addr
, len
, pgoff
, flags
);
308 int is_file_shm_hugepages(struct file
*file
)
312 if (file
->f_op
== &shm_file_operations
) {
313 struct shm_file_data
*sfd
;
314 sfd
= shm_file_data(file
);
315 ret
= is_file_hugepages(sfd
->file
);
320 static const struct file_operations shm_file_operations
= {
323 .release
= shm_release
,
324 .get_unmapped_area
= shm_get_unmapped_area
,
327 static struct vm_operations_struct shm_vm_ops
= {
328 .open
= shm_open
, /* callback for a new vm-area open */
329 .close
= shm_close
, /* callback for when the vm-area is released */
330 .nopage
= shm_nopage
,
331 #if defined(CONFIG_NUMA)
332 .set_policy
= shm_set_policy
,
333 .get_policy
= shm_get_policy
,
337 static int newseg (struct ipc_namespace
*ns
, key_t key
, int shmflg
, size_t size
)
340 struct shmid_kernel
*shp
;
341 int numpages
= (size
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
346 if (size
< SHMMIN
|| size
> ns
->shm_ctlmax
)
349 if (ns
->shm_tot
+ numpages
> ns
->shm_ctlall
)
352 shp
= ipc_rcu_alloc(sizeof(*shp
));
356 shp
->shm_perm
.key
= key
;
357 shp
->shm_perm
.mode
= (shmflg
& S_IRWXUGO
);
358 shp
->mlock_user
= NULL
;
360 shp
->shm_perm
.security
= NULL
;
361 error
= security_shm_alloc(shp
);
367 if (shmflg
& SHM_HUGETLB
) {
368 /* hugetlb_zero_setup takes care of mlock user accounting */
369 file
= hugetlb_zero_setup(size
);
370 shp
->mlock_user
= current
->user
;
372 int acctflag
= VM_ACCOUNT
;
374 * Do not allow no accounting for OVERCOMMIT_NEVER, even
377 if ((shmflg
& SHM_NORESERVE
) &&
378 sysctl_overcommit_memory
!= OVERCOMMIT_NEVER
)
380 sprintf (name
, "SYSV%08x", key
);
381 file
= shmem_file_setup(name
, size
, acctflag
);
383 error
= PTR_ERR(file
);
388 id
= shm_addid(ns
, shp
);
392 shp
->shm_cprid
= current
->tgid
;
394 shp
->shm_atim
= shp
->shm_dtim
= 0;
395 shp
->shm_ctim
= get_seconds();
396 shp
->shm_segsz
= size
;
398 shp
->id
= shm_buildid(ns
, id
, shp
->shm_perm
.seq
);
399 shp
->shm_file
= file
;
401 ns
->shm_tot
+= numpages
;
408 security_shm_free(shp
);
413 asmlinkage
long sys_shmget (key_t key
, size_t size
, int shmflg
)
415 struct shmid_kernel
*shp
;
417 struct ipc_namespace
*ns
;
419 ns
= current
->nsproxy
->ipc_ns
;
421 mutex_lock(&shm_ids(ns
).mutex
);
422 if (key
== IPC_PRIVATE
) {
423 err
= newseg(ns
, key
, shmflg
, size
);
424 } else if ((id
= ipc_findkey(&shm_ids(ns
), key
)) == -1) {
425 if (!(shmflg
& IPC_CREAT
))
428 err
= newseg(ns
, key
, shmflg
, size
);
429 } else if ((shmflg
& IPC_CREAT
) && (shmflg
& IPC_EXCL
)) {
432 shp
= shm_lock(ns
, id
);
434 if (shp
->shm_segsz
< size
)
436 else if (ipcperms(&shp
->shm_perm
, shmflg
))
439 int shmid
= shm_buildid(ns
, id
, shp
->shm_perm
.seq
);
440 err
= security_shm_associate(shp
, shmflg
);
446 mutex_unlock(&shm_ids(ns
).mutex
);
451 static inline unsigned long copy_shmid_to_user(void __user
*buf
, struct shmid64_ds
*in
, int version
)
455 return copy_to_user(buf
, in
, sizeof(*in
));
460 ipc64_perm_to_ipc_perm(&in
->shm_perm
, &out
.shm_perm
);
461 out
.shm_segsz
= in
->shm_segsz
;
462 out
.shm_atime
= in
->shm_atime
;
463 out
.shm_dtime
= in
->shm_dtime
;
464 out
.shm_ctime
= in
->shm_ctime
;
465 out
.shm_cpid
= in
->shm_cpid
;
466 out
.shm_lpid
= in
->shm_lpid
;
467 out
.shm_nattch
= in
->shm_nattch
;
469 return copy_to_user(buf
, &out
, sizeof(out
));
482 static inline unsigned long copy_shmid_from_user(struct shm_setbuf
*out
, void __user
*buf
, int version
)
487 struct shmid64_ds tbuf
;
489 if (copy_from_user(&tbuf
, buf
, sizeof(tbuf
)))
492 out
->uid
= tbuf
.shm_perm
.uid
;
493 out
->gid
= tbuf
.shm_perm
.gid
;
494 out
->mode
= tbuf
.shm_perm
.mode
;
500 struct shmid_ds tbuf_old
;
502 if (copy_from_user(&tbuf_old
, buf
, sizeof(tbuf_old
)))
505 out
->uid
= tbuf_old
.shm_perm
.uid
;
506 out
->gid
= tbuf_old
.shm_perm
.gid
;
507 out
->mode
= tbuf_old
.shm_perm
.mode
;
516 static inline unsigned long copy_shminfo_to_user(void __user
*buf
, struct shminfo64
*in
, int version
)
520 return copy_to_user(buf
, in
, sizeof(*in
));
525 if(in
->shmmax
> INT_MAX
)
526 out
.shmmax
= INT_MAX
;
528 out
.shmmax
= (int)in
->shmmax
;
530 out
.shmmin
= in
->shmmin
;
531 out
.shmmni
= in
->shmmni
;
532 out
.shmseg
= in
->shmseg
;
533 out
.shmall
= in
->shmall
;
535 return copy_to_user(buf
, &out
, sizeof(out
));
542 static void shm_get_stat(struct ipc_namespace
*ns
, unsigned long *rss
,
550 for (i
= 0; i
<= shm_ids(ns
).max_id
; i
++) {
551 struct shmid_kernel
*shp
;
554 shp
= shm_get(ns
, i
);
558 inode
= shp
->shm_file
->f_path
.dentry
->d_inode
;
560 if (is_file_hugepages(shp
->shm_file
)) {
561 struct address_space
*mapping
= inode
->i_mapping
;
562 *rss
+= (HPAGE_SIZE
/PAGE_SIZE
)*mapping
->nrpages
;
564 struct shmem_inode_info
*info
= SHMEM_I(inode
);
565 spin_lock(&info
->lock
);
566 *rss
+= inode
->i_mapping
->nrpages
;
567 *swp
+= info
->swapped
;
568 spin_unlock(&info
->lock
);
573 asmlinkage
long sys_shmctl (int shmid
, int cmd
, struct shmid_ds __user
*buf
)
575 struct shm_setbuf setbuf
;
576 struct shmid_kernel
*shp
;
578 struct ipc_namespace
*ns
;
580 if (cmd
< 0 || shmid
< 0) {
585 version
= ipc_parse_version(&cmd
);
586 ns
= current
->nsproxy
->ipc_ns
;
588 switch (cmd
) { /* replace with proc interface ? */
591 struct shminfo64 shminfo
;
593 err
= security_shm_shmctl(NULL
, cmd
);
597 memset(&shminfo
,0,sizeof(shminfo
));
598 shminfo
.shmmni
= shminfo
.shmseg
= ns
->shm_ctlmni
;
599 shminfo
.shmmax
= ns
->shm_ctlmax
;
600 shminfo
.shmall
= ns
->shm_ctlall
;
602 shminfo
.shmmin
= SHMMIN
;
603 if(copy_shminfo_to_user (buf
, &shminfo
, version
))
605 /* reading a integer is always atomic */
606 err
= shm_ids(ns
).max_id
;
613 struct shm_info shm_info
;
615 err
= security_shm_shmctl(NULL
, cmd
);
619 memset(&shm_info
,0,sizeof(shm_info
));
620 mutex_lock(&shm_ids(ns
).mutex
);
621 shm_info
.used_ids
= shm_ids(ns
).in_use
;
622 shm_get_stat (ns
, &shm_info
.shm_rss
, &shm_info
.shm_swp
);
623 shm_info
.shm_tot
= ns
->shm_tot
;
624 shm_info
.swap_attempts
= 0;
625 shm_info
.swap_successes
= 0;
626 err
= shm_ids(ns
).max_id
;
627 mutex_unlock(&shm_ids(ns
).mutex
);
628 if(copy_to_user (buf
, &shm_info
, sizeof(shm_info
))) {
633 err
= err
< 0 ? 0 : err
;
639 struct shmid64_ds tbuf
;
641 memset(&tbuf
, 0, sizeof(tbuf
));
642 shp
= shm_lock(ns
, shmid
);
646 } else if(cmd
==SHM_STAT
) {
648 if (shmid
> shm_ids(ns
).max_id
)
650 result
= shm_buildid(ns
, shmid
, shp
->shm_perm
.seq
);
652 err
= shm_checkid(ns
, shp
,shmid
);
658 if (ipcperms (&shp
->shm_perm
, S_IRUGO
))
660 err
= security_shm_shmctl(shp
, cmd
);
663 kernel_to_ipc64_perm(&shp
->shm_perm
, &tbuf
.shm_perm
);
664 tbuf
.shm_segsz
= shp
->shm_segsz
;
665 tbuf
.shm_atime
= shp
->shm_atim
;
666 tbuf
.shm_dtime
= shp
->shm_dtim
;
667 tbuf
.shm_ctime
= shp
->shm_ctim
;
668 tbuf
.shm_cpid
= shp
->shm_cprid
;
669 tbuf
.shm_lpid
= shp
->shm_lprid
;
670 tbuf
.shm_nattch
= shp
->shm_nattch
;
672 if(copy_shmid_to_user (buf
, &tbuf
, version
))
681 shp
= shm_lock(ns
, shmid
);
686 err
= shm_checkid(ns
, shp
,shmid
);
690 err
= audit_ipc_obj(&(shp
->shm_perm
));
694 if (!capable(CAP_IPC_LOCK
)) {
696 if (current
->euid
!= shp
->shm_perm
.uid
&&
697 current
->euid
!= shp
->shm_perm
.cuid
)
699 if (cmd
== SHM_LOCK
&&
700 !current
->signal
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
)
704 err
= security_shm_shmctl(shp
, cmd
);
709 struct user_struct
* user
= current
->user
;
710 if (!is_file_hugepages(shp
->shm_file
)) {
711 err
= shmem_lock(shp
->shm_file
, 1, user
);
713 shp
->shm_perm
.mode
|= SHM_LOCKED
;
714 shp
->mlock_user
= user
;
717 } else if (!is_file_hugepages(shp
->shm_file
)) {
718 shmem_lock(shp
->shm_file
, 0, shp
->mlock_user
);
719 shp
->shm_perm
.mode
&= ~SHM_LOCKED
;
720 shp
->mlock_user
= NULL
;
728 * We cannot simply remove the file. The SVID states
729 * that the block remains until the last person
730 * detaches from it, then is deleted. A shmat() on
731 * an RMID segment is legal in older Linux and if
732 * we change it apps break...
734 * Instead we set a destroyed flag, and then blow
735 * the name away when the usage hits zero.
737 mutex_lock(&shm_ids(ns
).mutex
);
738 shp
= shm_lock(ns
, shmid
);
742 err
= shm_checkid(ns
, shp
, shmid
);
746 err
= audit_ipc_obj(&(shp
->shm_perm
));
750 if (current
->euid
!= shp
->shm_perm
.uid
&&
751 current
->euid
!= shp
->shm_perm
.cuid
&&
752 !capable(CAP_SYS_ADMIN
)) {
757 err
= security_shm_shmctl(shp
, cmd
);
761 do_shm_rmid(ns
, shp
);
762 mutex_unlock(&shm_ids(ns
).mutex
);
768 if (copy_shmid_from_user (&setbuf
, buf
, version
)) {
772 mutex_lock(&shm_ids(ns
).mutex
);
773 shp
= shm_lock(ns
, shmid
);
777 err
= shm_checkid(ns
, shp
,shmid
);
780 err
= audit_ipc_obj(&(shp
->shm_perm
));
783 err
= audit_ipc_set_perm(0, setbuf
.uid
, setbuf
.gid
, setbuf
.mode
);
787 if (current
->euid
!= shp
->shm_perm
.uid
&&
788 current
->euid
!= shp
->shm_perm
.cuid
&&
789 !capable(CAP_SYS_ADMIN
)) {
793 err
= security_shm_shmctl(shp
, cmd
);
797 shp
->shm_perm
.uid
= setbuf
.uid
;
798 shp
->shm_perm
.gid
= setbuf
.gid
;
799 shp
->shm_perm
.mode
= (shp
->shm_perm
.mode
& ~S_IRWXUGO
)
800 | (setbuf
.mode
& S_IRWXUGO
);
801 shp
->shm_ctim
= get_seconds();
814 mutex_unlock(&shm_ids(ns
).mutex
);
823 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
825 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
826 * "raddr" thing points to kernel space, and there has to be a wrapper around
829 long do_shmat(int shmid
, char __user
*shmaddr
, int shmflg
, ulong
*raddr
)
831 struct shmid_kernel
*shp
;
839 unsigned long user_addr
;
840 struct ipc_namespace
*ns
;
841 struct shm_file_data
*sfd
;
848 else if ((addr
= (ulong
)shmaddr
)) {
849 if (addr
& (SHMLBA
-1)) {
850 if (shmflg
& SHM_RND
)
851 addr
&= ~(SHMLBA
-1); /* round down */
853 #ifndef __ARCH_FORCE_SHMLBA
854 if (addr
& ~PAGE_MASK
)
858 flags
= MAP_SHARED
| MAP_FIXED
;
860 if ((shmflg
& SHM_REMAP
))
866 if (shmflg
& SHM_RDONLY
) {
871 prot
= PROT_READ
| PROT_WRITE
;
872 acc_mode
= S_IRUGO
| S_IWUGO
;
873 f_mode
= FMODE_READ
| FMODE_WRITE
;
875 if (shmflg
& SHM_EXEC
) {
881 * We cannot rely on the fs check since SYSV IPC does have an
882 * additional creator id...
884 ns
= current
->nsproxy
->ipc_ns
;
885 shp
= shm_lock(ns
, shmid
);
889 err
= shm_checkid(ns
, shp
,shmid
);
894 if (ipcperms(&shp
->shm_perm
, acc_mode
))
897 err
= security_shm_shmat(shp
, shmaddr
, shmflg
);
901 path
.dentry
= dget(shp
->shm_file
->f_path
.dentry
);
902 path
.mnt
= mntget(shp
->shm_file
->f_path
.mnt
);
904 size
= i_size_read(path
.dentry
->d_inode
);
908 sfd
= kzalloc(sizeof(*sfd
), GFP_KERNEL
);
913 file
= get_empty_filp();
917 file
->f_op
= &shm_file_operations
;
918 file
->private_data
= sfd
;
920 file
->f_mapping
= shp
->shm_file
->f_mapping
;
921 file
->f_mode
= f_mode
;
923 sfd
->ns
= get_ipc_ns(ns
);
924 sfd
->file
= shp
->shm_file
;
927 down_write(¤t
->mm
->mmap_sem
);
928 if (addr
&& !(shmflg
& SHM_REMAP
)) {
930 if (find_vma_intersection(current
->mm
, addr
, addr
+ size
))
933 * If shm segment goes below stack, make sure there is some
934 * space left for the stack to grow (at least 4 pages).
936 if (addr
< current
->mm
->start_stack
&&
937 addr
> current
->mm
->start_stack
- size
- PAGE_SIZE
* 5)
941 user_addr
= do_mmap (file
, addr
, size
, prot
, flags
, 0);
944 if (IS_ERR_VALUE(user_addr
))
945 err
= (long)user_addr
;
947 up_write(¤t
->mm
->mmap_sem
);
952 mutex_lock(&shm_ids(ns
).mutex
);
953 shp
= shm_lock(ns
, shmid
);
956 if(shp
->shm_nattch
== 0 &&
957 shp
->shm_perm
.mode
& SHM_DEST
)
958 shm_destroy(ns
, shp
);
961 mutex_unlock(&shm_ids(ns
).mutex
);
978 asmlinkage
long sys_shmat(int shmid
, char __user
*shmaddr
, int shmflg
)
983 err
= do_shmat(shmid
, shmaddr
, shmflg
, &ret
);
986 force_successful_syscall_return();
991 * detach and kill segment if marked destroyed.
992 * The work is done in shm_close.
994 asmlinkage
long sys_shmdt(char __user
*shmaddr
)
996 struct mm_struct
*mm
= current
->mm
;
997 struct vm_area_struct
*vma
, *next
;
998 unsigned long addr
= (unsigned long)shmaddr
;
1000 int retval
= -EINVAL
;
1002 if (addr
& ~PAGE_MASK
)
1005 down_write(&mm
->mmap_sem
);
1008 * This function tries to be smart and unmap shm segments that
1009 * were modified by partial mlock or munmap calls:
1010 * - It first determines the size of the shm segment that should be
1011 * unmapped: It searches for a vma that is backed by shm and that
1012 * started at address shmaddr. It records it's size and then unmaps
1014 * - Then it unmaps all shm vmas that started at shmaddr and that
1015 * are within the initially determined size.
1016 * Errors from do_munmap are ignored: the function only fails if
1017 * it's called with invalid parameters or if it's called to unmap
1018 * a part of a vma. Both calls in this function are for full vmas,
1019 * the parameters are directly copied from the vma itself and always
1020 * valid - therefore do_munmap cannot fail. (famous last words?)
1023 * If it had been mremap()'d, the starting address would not
1024 * match the usual checks anyway. So assume all vma's are
1025 * above the starting address given.
1027 vma
= find_vma(mm
, addr
);
1030 next
= vma
->vm_next
;
1033 * Check if the starting address would match, i.e. it's
1034 * a fragment created by mprotect() and/or munmap(), or it
1035 * otherwise it starts at this address with no hassles.
1037 if ((vma
->vm_ops
== &shm_vm_ops
) &&
1038 (vma
->vm_start
- addr
)/PAGE_SIZE
== vma
->vm_pgoff
) {
1041 size
= vma
->vm_file
->f_path
.dentry
->d_inode
->i_size
;
1042 do_munmap(mm
, vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
1044 * We discovered the size of the shm segment, so
1045 * break out of here and fall through to the next
1046 * loop that uses the size information to stop
1047 * searching for matching vma's.
1057 * We need look no further than the maximum address a fragment
1058 * could possibly have landed at. Also cast things to loff_t to
1059 * prevent overflows and make comparisions vs. equal-width types.
1061 size
= PAGE_ALIGN(size
);
1062 while (vma
&& (loff_t
)(vma
->vm_end
- addr
) <= size
) {
1063 next
= vma
->vm_next
;
1065 /* finding a matching vma now does not alter retval */
1066 if ((vma
->vm_ops
== &shm_vm_ops
) &&
1067 (vma
->vm_start
- addr
)/PAGE_SIZE
== vma
->vm_pgoff
)
1069 do_munmap(mm
, vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
1073 up_write(&mm
->mmap_sem
);
1077 #ifdef CONFIG_PROC_FS
1078 static int sysvipc_shm_proc_show(struct seq_file
*s
, void *it
)
1080 struct shmid_kernel
*shp
= it
;
1083 #define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
1084 #define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
1086 if (sizeof(size_t) <= sizeof(int))
1087 format
= SMALL_STRING
;
1089 format
= BIG_STRING
;
1090 return seq_printf(s
, format
,