3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
18 #include <linux/config.h>
19 #include <linux/slab.h>
21 #include <linux/hugetlb.h>
22 #include <linux/shm.h>
23 #include <linux/init.h>
24 #include <linux/file.h>
25 #include <linux/mman.h>
26 #include <linux/shmem_fs.h>
27 #include <linux/security.h>
28 #include <linux/syscalls.h>
29 #include <linux/audit.h>
30 #include <linux/ptrace.h>
31 #include <linux/seq_file.h>
33 #include <asm/uaccess.h>
37 #define shm_flags shm_perm.mode
39 static struct file_operations shm_file_operations
;
40 static struct vm_operations_struct shm_vm_ops
;
42 static struct ipc_ids shm_ids
;
44 #define shm_lock(id) ((struct shmid_kernel*)ipc_lock(&shm_ids,id))
45 #define shm_unlock(shp) ipc_unlock(&(shp)->shm_perm)
46 #define shm_get(id) ((struct shmid_kernel*)ipc_get(&shm_ids,id))
47 #define shm_buildid(id, seq) \
48 ipc_buildid(&shm_ids, id, seq)
50 static int newseg (key_t key
, int shmflg
, size_t size
);
51 static void shm_open (struct vm_area_struct
*shmd
);
52 static void shm_close (struct vm_area_struct
*shmd
);
54 static int sysvipc_shm_proc_show(struct seq_file
*s
, void *it
);
57 size_t shm_ctlmax
= SHMMAX
;
58 size_t shm_ctlall
= SHMALL
;
59 int shm_ctlmni
= SHMMNI
;
61 static int shm_tot
; /* total number of shared memory pages */
63 void __init
shm_init (void)
65 ipc_init_ids(&shm_ids
, 1);
66 ipc_init_proc_interface("sysvipc/shm",
67 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
69 sysvipc_shm_proc_show
);
72 static inline int shm_checkid(struct shmid_kernel
*s
, int id
)
74 if (ipc_checkid(&shm_ids
,&s
->shm_perm
,id
))
79 static inline struct shmid_kernel
*shm_rmid(int id
)
81 return (struct shmid_kernel
*)ipc_rmid(&shm_ids
,id
);
84 static inline int shm_addid(struct shmid_kernel
*shp
)
86 return ipc_addid(&shm_ids
, &shp
->shm_perm
, shm_ctlmni
);
91 static inline void shm_inc (int id
) {
92 struct shmid_kernel
*shp
;
94 if(!(shp
= shm_lock(id
)))
96 shp
->shm_atim
= get_seconds();
97 shp
->shm_lprid
= current
->tgid
;
102 /* This is called by fork, once for every shm attach. */
103 static void shm_open (struct vm_area_struct
*shmd
)
105 shm_inc (shmd
->vm_file
->f_dentry
->d_inode
->i_ino
);
109 * shm_destroy - free the struct shmid_kernel
111 * @shp: struct to free
113 * It has to be called with shp and shm_ids.sem locked,
114 * but returns with shp unlocked and freed.
116 static void shm_destroy (struct shmid_kernel
*shp
)
118 shm_tot
-= (shp
->shm_segsz
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
121 if (!is_file_hugepages(shp
->shm_file
))
122 shmem_lock(shp
->shm_file
, 0, shp
->mlock_user
);
124 user_shm_unlock(shp
->shm_file
->f_dentry
->d_inode
->i_size
,
126 fput (shp
->shm_file
);
127 security_shm_free(shp
);
132 * remove the attach descriptor shmd.
133 * free memory for segment if it is marked destroyed.
134 * The descriptor has already been removed from the current->mm->mmap list
135 * and will later be kfree()d.
137 static void shm_close (struct vm_area_struct
*shmd
)
139 struct file
* file
= shmd
->vm_file
;
140 int id
= file
->f_dentry
->d_inode
->i_ino
;
141 struct shmid_kernel
*shp
;
144 /* remove from the list of attaches of the shm segment */
145 if(!(shp
= shm_lock(id
)))
147 shp
->shm_lprid
= current
->tgid
;
148 shp
->shm_dtim
= get_seconds();
150 if(shp
->shm_nattch
== 0 &&
151 shp
->shm_flags
& SHM_DEST
)
158 static int shm_mmap(struct file
* file
, struct vm_area_struct
* vma
)
161 vma
->vm_ops
= &shm_vm_ops
;
162 shm_inc(file
->f_dentry
->d_inode
->i_ino
);
166 static struct file_operations shm_file_operations
= {
170 static struct vm_operations_struct shm_vm_ops
= {
171 .open
= shm_open
, /* callback for a new vm-area open */
172 .close
= shm_close
, /* callback for when the vm-area is released */
173 .nopage
= shmem_nopage
,
174 #if defined(CONFIG_NUMA) && defined(CONFIG_SHMEM)
175 .set_policy
= shmem_set_policy
,
176 .get_policy
= shmem_get_policy
,
180 static int newseg (key_t key
, int shmflg
, size_t size
)
183 struct shmid_kernel
*shp
;
184 int numpages
= (size
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
189 if (size
< SHMMIN
|| size
> shm_ctlmax
)
192 if (shm_tot
+ numpages
>= shm_ctlall
)
195 shp
= ipc_rcu_alloc(sizeof(*shp
));
199 shp
->shm_perm
.key
= key
;
200 shp
->shm_flags
= (shmflg
& S_IRWXUGO
);
201 shp
->mlock_user
= NULL
;
203 shp
->shm_perm
.security
= NULL
;
204 error
= security_shm_alloc(shp
);
210 if (shmflg
& SHM_HUGETLB
) {
211 /* hugetlb_zero_setup takes care of mlock user accounting */
212 file
= hugetlb_zero_setup(size
);
213 shp
->mlock_user
= current
->user
;
215 int acctflag
= VM_ACCOUNT
;
217 * Do not allow no accounting for OVERCOMMIT_NEVER, even
220 if ((shmflg
& SHM_NORESERVE
) &&
221 sysctl_overcommit_memory
!= OVERCOMMIT_NEVER
)
223 sprintf (name
, "SYSV%08x", key
);
224 file
= shmem_file_setup(name
, size
, acctflag
);
226 error
= PTR_ERR(file
);
235 shp
->shm_cprid
= current
->tgid
;
237 shp
->shm_atim
= shp
->shm_dtim
= 0;
238 shp
->shm_ctim
= get_seconds();
239 shp
->shm_segsz
= size
;
241 shp
->id
= shm_buildid(id
,shp
->shm_perm
.seq
);
242 shp
->shm_file
= file
;
243 file
->f_dentry
->d_inode
->i_ino
= shp
->id
;
245 /* Hugetlb ops would have already been assigned. */
246 if (!(shmflg
& SHM_HUGETLB
))
247 file
->f_op
= &shm_file_operations
;
256 security_shm_free(shp
);
261 asmlinkage
long sys_shmget (key_t key
, size_t size
, int shmflg
)
263 struct shmid_kernel
*shp
;
267 if (key
== IPC_PRIVATE
) {
268 err
= newseg(key
, shmflg
, size
);
269 } else if ((id
= ipc_findkey(&shm_ids
, key
)) == -1) {
270 if (!(shmflg
& IPC_CREAT
))
273 err
= newseg(key
, shmflg
, size
);
274 } else if ((shmflg
& IPC_CREAT
) && (shmflg
& IPC_EXCL
)) {
280 if (shp
->shm_segsz
< size
)
282 else if (ipcperms(&shp
->shm_perm
, shmflg
))
285 int shmid
= shm_buildid(id
, shp
->shm_perm
.seq
);
286 err
= security_shm_associate(shp
, shmflg
);
297 static inline unsigned long copy_shmid_to_user(void __user
*buf
, struct shmid64_ds
*in
, int version
)
301 return copy_to_user(buf
, in
, sizeof(*in
));
306 ipc64_perm_to_ipc_perm(&in
->shm_perm
, &out
.shm_perm
);
307 out
.shm_segsz
= in
->shm_segsz
;
308 out
.shm_atime
= in
->shm_atime
;
309 out
.shm_dtime
= in
->shm_dtime
;
310 out
.shm_ctime
= in
->shm_ctime
;
311 out
.shm_cpid
= in
->shm_cpid
;
312 out
.shm_lpid
= in
->shm_lpid
;
313 out
.shm_nattch
= in
->shm_nattch
;
315 return copy_to_user(buf
, &out
, sizeof(out
));
328 static inline unsigned long copy_shmid_from_user(struct shm_setbuf
*out
, void __user
*buf
, int version
)
333 struct shmid64_ds tbuf
;
335 if (copy_from_user(&tbuf
, buf
, sizeof(tbuf
)))
338 out
->uid
= tbuf
.shm_perm
.uid
;
339 out
->gid
= tbuf
.shm_perm
.gid
;
340 out
->mode
= tbuf
.shm_flags
;
346 struct shmid_ds tbuf_old
;
348 if (copy_from_user(&tbuf_old
, buf
, sizeof(tbuf_old
)))
351 out
->uid
= tbuf_old
.shm_perm
.uid
;
352 out
->gid
= tbuf_old
.shm_perm
.gid
;
353 out
->mode
= tbuf_old
.shm_flags
;
362 static inline unsigned long copy_shminfo_to_user(void __user
*buf
, struct shminfo64
*in
, int version
)
366 return copy_to_user(buf
, in
, sizeof(*in
));
371 if(in
->shmmax
> INT_MAX
)
372 out
.shmmax
= INT_MAX
;
374 out
.shmmax
= (int)in
->shmmax
;
376 out
.shmmin
= in
->shmmin
;
377 out
.shmmni
= in
->shmmni
;
378 out
.shmseg
= in
->shmseg
;
379 out
.shmall
= in
->shmall
;
381 return copy_to_user(buf
, &out
, sizeof(out
));
388 static void shm_get_stat(unsigned long *rss
, unsigned long *swp
)
395 for (i
= 0; i
<= shm_ids
.max_id
; i
++) {
396 struct shmid_kernel
*shp
;
403 inode
= shp
->shm_file
->f_dentry
->d_inode
;
405 if (is_file_hugepages(shp
->shm_file
)) {
406 struct address_space
*mapping
= inode
->i_mapping
;
407 *rss
+= (HPAGE_SIZE
/PAGE_SIZE
)*mapping
->nrpages
;
409 struct shmem_inode_info
*info
= SHMEM_I(inode
);
410 spin_lock(&info
->lock
);
411 *rss
+= inode
->i_mapping
->nrpages
;
412 *swp
+= info
->swapped
;
413 spin_unlock(&info
->lock
);
418 asmlinkage
long sys_shmctl (int shmid
, int cmd
, struct shmid_ds __user
*buf
)
420 struct shm_setbuf setbuf
;
421 struct shmid_kernel
*shp
;
424 if (cmd
< 0 || shmid
< 0) {
429 version
= ipc_parse_version(&cmd
);
431 switch (cmd
) { /* replace with proc interface ? */
434 struct shminfo64 shminfo
;
436 err
= security_shm_shmctl(NULL
, cmd
);
440 memset(&shminfo
,0,sizeof(shminfo
));
441 shminfo
.shmmni
= shminfo
.shmseg
= shm_ctlmni
;
442 shminfo
.shmmax
= shm_ctlmax
;
443 shminfo
.shmall
= shm_ctlall
;
445 shminfo
.shmmin
= SHMMIN
;
446 if(copy_shminfo_to_user (buf
, &shminfo
, version
))
448 /* reading a integer is always atomic */
456 struct shm_info shm_info
;
458 err
= security_shm_shmctl(NULL
, cmd
);
462 memset(&shm_info
,0,sizeof(shm_info
));
464 shm_info
.used_ids
= shm_ids
.in_use
;
465 shm_get_stat (&shm_info
.shm_rss
, &shm_info
.shm_swp
);
466 shm_info
.shm_tot
= shm_tot
;
467 shm_info
.swap_attempts
= 0;
468 shm_info
.swap_successes
= 0;
469 err
= shm_ids
.max_id
;
471 if(copy_to_user (buf
, &shm_info
, sizeof(shm_info
))) {
476 err
= err
< 0 ? 0 : err
;
482 struct shmid64_ds tbuf
;
484 memset(&tbuf
, 0, sizeof(tbuf
));
485 shp
= shm_lock(shmid
);
489 } else if(cmd
==SHM_STAT
) {
491 if (shmid
> shm_ids
.max_id
)
493 result
= shm_buildid(shmid
, shp
->shm_perm
.seq
);
495 err
= shm_checkid(shp
,shmid
);
501 if (ipcperms (&shp
->shm_perm
, S_IRUGO
))
503 err
= security_shm_shmctl(shp
, cmd
);
506 kernel_to_ipc64_perm(&shp
->shm_perm
, &tbuf
.shm_perm
);
507 tbuf
.shm_segsz
= shp
->shm_segsz
;
508 tbuf
.shm_atime
= shp
->shm_atim
;
509 tbuf
.shm_dtime
= shp
->shm_dtim
;
510 tbuf
.shm_ctime
= shp
->shm_ctim
;
511 tbuf
.shm_cpid
= shp
->shm_cprid
;
512 tbuf
.shm_lpid
= shp
->shm_lprid
;
513 if (!is_file_hugepages(shp
->shm_file
))
514 tbuf
.shm_nattch
= shp
->shm_nattch
;
516 tbuf
.shm_nattch
= file_count(shp
->shm_file
) - 1;
518 if(copy_shmid_to_user (buf
, &tbuf
, version
))
527 shp
= shm_lock(shmid
);
532 err
= shm_checkid(shp
,shmid
);
536 if (!capable(CAP_IPC_LOCK
)) {
538 if (current
->euid
!= shp
->shm_perm
.uid
&&
539 current
->euid
!= shp
->shm_perm
.cuid
)
541 if (cmd
== SHM_LOCK
&&
542 !current
->signal
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
)
546 err
= security_shm_shmctl(shp
, cmd
);
551 struct user_struct
* user
= current
->user
;
552 if (!is_file_hugepages(shp
->shm_file
)) {
553 err
= shmem_lock(shp
->shm_file
, 1, user
);
555 shp
->shm_flags
|= SHM_LOCKED
;
556 shp
->mlock_user
= user
;
559 } else if (!is_file_hugepages(shp
->shm_file
)) {
560 shmem_lock(shp
->shm_file
, 0, shp
->mlock_user
);
561 shp
->shm_flags
&= ~SHM_LOCKED
;
562 shp
->mlock_user
= NULL
;
570 * We cannot simply remove the file. The SVID states
571 * that the block remains until the last person
572 * detaches from it, then is deleted. A shmat() on
573 * an RMID segment is legal in older Linux and if
574 * we change it apps break...
576 * Instead we set a destroyed flag, and then blow
577 * the name away when the usage hits zero.
580 shp
= shm_lock(shmid
);
584 err
= shm_checkid(shp
, shmid
);
588 if (current
->euid
!= shp
->shm_perm
.uid
&&
589 current
->euid
!= shp
->shm_perm
.cuid
&&
590 !capable(CAP_SYS_ADMIN
)) {
595 err
= security_shm_shmctl(shp
, cmd
);
599 if (shp
->shm_nattch
){
600 shp
->shm_flags
|= SHM_DEST
;
601 /* Do not find it any more */
602 shp
->shm_perm
.key
= IPC_PRIVATE
;
612 if (copy_shmid_from_user (&setbuf
, buf
, version
)) {
616 if ((err
= audit_ipc_perms(0, setbuf
.uid
, setbuf
.gid
, setbuf
.mode
)))
619 shp
= shm_lock(shmid
);
623 err
= shm_checkid(shp
,shmid
);
627 if (current
->euid
!= shp
->shm_perm
.uid
&&
628 current
->euid
!= shp
->shm_perm
.cuid
&&
629 !capable(CAP_SYS_ADMIN
)) {
633 err
= security_shm_shmctl(shp
, cmd
);
637 shp
->shm_perm
.uid
= setbuf
.uid
;
638 shp
->shm_perm
.gid
= setbuf
.gid
;
639 shp
->shm_flags
= (shp
->shm_flags
& ~S_IRWXUGO
)
640 | (setbuf
.mode
& S_IRWXUGO
);
641 shp
->shm_ctim
= get_seconds();
663 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
665 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
666 * "raddr" thing points to kernel space, and there has to be a wrapper around
669 long do_shmat(int shmid
, char __user
*shmaddr
, int shmflg
, ulong
*raddr
)
671 struct shmid_kernel
*shp
;
678 unsigned long o_flags
;
685 } else if ((addr
= (ulong
)shmaddr
)) {
686 if (addr
& (SHMLBA
-1)) {
687 if (shmflg
& SHM_RND
)
688 addr
&= ~(SHMLBA
-1); /* round down */
690 #ifndef __ARCH_FORCE_SHMLBA
691 if (addr
& ~PAGE_MASK
)
695 flags
= MAP_SHARED
| MAP_FIXED
;
697 if ((shmflg
& SHM_REMAP
))
703 if (shmflg
& SHM_RDONLY
) {
708 prot
= PROT_READ
| PROT_WRITE
;
710 acc_mode
= S_IRUGO
| S_IWUGO
;
712 if (shmflg
& SHM_EXEC
) {
718 * We cannot rely on the fs check since SYSV IPC does have an
719 * additional creator id...
721 shp
= shm_lock(shmid
);
726 err
= shm_checkid(shp
,shmid
);
731 if (ipcperms(&shp
->shm_perm
, acc_mode
)) {
737 err
= security_shm_shmat(shp
, shmaddr
, shmflg
);
743 file
= shp
->shm_file
;
744 size
= i_size_read(file
->f_dentry
->d_inode
);
748 down_write(¤t
->mm
->mmap_sem
);
749 if (addr
&& !(shmflg
& SHM_REMAP
)) {
750 user_addr
= ERR_PTR(-EINVAL
);
751 if (find_vma_intersection(current
->mm
, addr
, addr
+ size
))
754 * If shm segment goes below stack, make sure there is some
755 * space left for the stack to grow (at least 4 pages).
757 if (addr
< current
->mm
->start_stack
&&
758 addr
> current
->mm
->start_stack
- size
- PAGE_SIZE
* 5)
762 user_addr
= (void*) do_mmap (file
, addr
, size
, prot
, flags
, 0);
765 up_write(¤t
->mm
->mmap_sem
);
768 if(!(shp
= shm_lock(shmid
)))
771 if(shp
->shm_nattch
== 0 &&
772 shp
->shm_flags
& SHM_DEST
)
778 *raddr
= (unsigned long) user_addr
;
780 if (IS_ERR(user_addr
))
781 err
= PTR_ERR(user_addr
);
786 asmlinkage
long sys_shmat(int shmid
, char __user
*shmaddr
, int shmflg
)
791 err
= do_shmat(shmid
, shmaddr
, shmflg
, &ret
);
794 force_successful_syscall_return();
799 * detach and kill segment if marked destroyed.
800 * The work is done in shm_close.
802 asmlinkage
long sys_shmdt(char __user
*shmaddr
)
804 struct mm_struct
*mm
= current
->mm
;
805 struct vm_area_struct
*vma
, *next
;
806 unsigned long addr
= (unsigned long)shmaddr
;
808 int retval
= -EINVAL
;
810 down_write(&mm
->mmap_sem
);
813 * This function tries to be smart and unmap shm segments that
814 * were modified by partial mlock or munmap calls:
815 * - It first determines the size of the shm segment that should be
816 * unmapped: It searches for a vma that is backed by shm and that
817 * started at address shmaddr. It records it's size and then unmaps
819 * - Then it unmaps all shm vmas that started at shmaddr and that
820 * are within the initially determined size.
821 * Errors from do_munmap are ignored: the function only fails if
822 * it's called with invalid parameters or if it's called to unmap
823 * a part of a vma. Both calls in this function are for full vmas,
824 * the parameters are directly copied from the vma itself and always
825 * valid - therefore do_munmap cannot fail. (famous last words?)
828 * If it had been mremap()'d, the starting address would not
829 * match the usual checks anyway. So assume all vma's are
830 * above the starting address given.
832 vma
= find_vma(mm
, addr
);
838 * Check if the starting address would match, i.e. it's
839 * a fragment created by mprotect() and/or munmap(), or it
840 * otherwise it starts at this address with no hassles.
842 if ((vma
->vm_ops
== &shm_vm_ops
|| is_vm_hugetlb_page(vma
)) &&
843 (vma
->vm_start
- addr
)/PAGE_SIZE
== vma
->vm_pgoff
) {
846 size
= vma
->vm_file
->f_dentry
->d_inode
->i_size
;
847 do_munmap(mm
, vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
849 * We discovered the size of the shm segment, so
850 * break out of here and fall through to the next
851 * loop that uses the size information to stop
852 * searching for matching vma's.
862 * We need look no further than the maximum address a fragment
863 * could possibly have landed at. Also cast things to loff_t to
864 * prevent overflows and make comparisions vs. equal-width types.
866 while (vma
&& (loff_t
)(vma
->vm_end
- addr
) <= size
) {
869 /* finding a matching vma now does not alter retval */
870 if ((vma
->vm_ops
== &shm_vm_ops
|| is_vm_hugetlb_page(vma
)) &&
871 (vma
->vm_start
- addr
)/PAGE_SIZE
== vma
->vm_pgoff
)
873 do_munmap(mm
, vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
877 up_write(&mm
->mmap_sem
);
881 #ifdef CONFIG_PROC_FS
882 static int sysvipc_shm_proc_show(struct seq_file
*s
, void *it
)
884 struct shmid_kernel
*shp
= it
;
887 #define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
888 #define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
890 if (sizeof(size_t) <= sizeof(int))
891 format
= SMALL_STRING
;
894 return seq_printf(s
, format
,
901 is_file_hugepages(shp
->shm_file
) ? (file_count(shp
->shm_file
) - 1) : shp
->shm_nattch
,