3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
18 #include <linux/config.h>
19 #include <linux/malloc.h>
20 #include <linux/shm.h>
21 #include <linux/init.h>
22 #include <linux/file.h>
23 #include <linux/mman.h>
24 #include <linux/proc_fs.h>
25 #include <asm/uaccess.h>
29 struct shmid_kernel
/* private to the kernel */
31 struct kern_ipc_perm shm_perm
;
32 struct file
* shm_file
;
34 unsigned long shm_nattch
;
35 unsigned long shm_segsz
;
43 #define shm_flags shm_perm.mode
45 static struct file_operations shm_file_operations
;
46 static struct vm_operations_struct shm_vm_ops
;
48 static struct ipc_ids shm_ids
;
50 #define shm_lock(id) ((struct shmid_kernel*)ipc_lock(&shm_ids,id))
51 #define shm_unlock(id) ipc_unlock(&shm_ids,id)
52 #define shm_lockall() ipc_lockall(&shm_ids)
53 #define shm_unlockall() ipc_unlockall(&shm_ids)
54 #define shm_get(id) ((struct shmid_kernel*)ipc_get(&shm_ids,id))
55 #define shm_buildid(id, seq) \
56 ipc_buildid(&shm_ids, id, seq)
58 static int newseg (key_t key
, int shmflg
, size_t size
);
59 static void shm_open (struct vm_area_struct
*shmd
);
60 static void shm_close (struct vm_area_struct
*shmd
);
62 static int sysvipc_shm_read_proc(char *buffer
, char **start
, off_t offset
, int length
, int *eof
, void *data
);
65 size_t shm_ctlmax
= SHMMAX
;
66 size_t shm_ctlall
= SHMALL
;
67 int shm_ctlmni
= SHMMNI
;
69 static int shm_tot
; /* total number of shared memory pages */
71 void __init
shm_init (void)
73 ipc_init_ids(&shm_ids
, 1);
74 create_proc_read_entry("sysvipc/shm", 0, 0, sysvipc_shm_read_proc
, NULL
);
77 static inline int shm_checkid(struct shmid_kernel
*s
, int id
)
79 if (ipc_checkid(&shm_ids
,&s
->shm_perm
,id
))
84 static inline struct shmid_kernel
*shm_rmid(int id
)
86 return (struct shmid_kernel
*)ipc_rmid(&shm_ids
,id
);
89 static inline int shm_addid(struct shmid_kernel
*shp
)
91 return ipc_addid(&shm_ids
, &shp
->shm_perm
, shm_ctlmni
+1);
96 static inline void shm_inc (int id
) {
97 struct shmid_kernel
*shp
;
99 if(!(shp
= shm_lock(id
)))
101 shp
->shm_atim
= CURRENT_TIME
;
102 shp
->shm_lprid
= current
->pid
;
107 /* This is called by fork, once for every shm attach. */
108 static void shm_open (struct vm_area_struct
*shmd
)
110 shm_inc (shmd
->vm_file
->f_dentry
->d_inode
->i_ino
);
114 * shm_destroy - free the struct shmid_kernel
116 * @shp: struct to free
118 * It has to be called with shp and shm_ids.sem locked
120 static void shm_destroy (struct shmid_kernel
*shp
)
122 shm_tot
-= (shp
->shm_segsz
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
124 fput (shp
->shm_file
);
129 * remove the attach descriptor shmd.
130 * free memory for segment if it is marked destroyed.
131 * The descriptor has already been removed from the current->mm->mmap list
132 * and will later be kfree()d.
134 static void shm_close (struct vm_area_struct
*shmd
)
136 struct file
* file
= shmd
->vm_file
;
137 int id
= file
->f_dentry
->d_inode
->i_ino
;
138 struct shmid_kernel
*shp
;
141 /* remove from the list of attaches of the shm segment */
142 if(!(shp
= shm_lock(id
)))
144 shp
->shm_lprid
= current
->pid
;
145 shp
->shm_dtim
= CURRENT_TIME
;
147 if(shp
->shm_nattch
== 0 &&
148 shp
->shm_flags
& SHM_DEST
)
155 static int shm_mmap(struct file
* file
, struct vm_area_struct
* vma
)
157 UPDATE_ATIME(file
->f_dentry
->d_inode
);
158 vma
->vm_ops
= &shm_vm_ops
;
159 shm_inc(file
->f_dentry
->d_inode
->i_ino
);
163 static struct file_operations shm_file_operations
= {
167 static struct vm_operations_struct shm_vm_ops
= {
168 open
: shm_open
, /* callback for a new vm-area open */
169 close
: shm_close
, /* callback for when the vm-area is released */
170 nopage
: shmem_nopage
,
173 static int newseg (key_t key
, int shmflg
, size_t size
)
176 struct shmid_kernel
*shp
;
177 int numpages
= (size
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
182 if (size
< SHMMIN
|| size
> shm_ctlmax
)
185 if (shm_tot
+ numpages
>= shm_ctlall
)
188 shp
= (struct shmid_kernel
*) kmalloc (sizeof (*shp
), GFP_USER
);
191 sprintf (name
, "SYSV%08x", key
);
192 file
= shmem_file_setup(name
, size
);
193 error
= PTR_ERR(file
);
201 shp
->shm_perm
.key
= key
;
202 shp
->shm_flags
= (shmflg
& S_IRWXUGO
);
203 shp
->shm_cprid
= current
->pid
;
205 shp
->shm_atim
= shp
->shm_dtim
= 0;
206 shp
->shm_ctim
= CURRENT_TIME
;
207 shp
->shm_segsz
= size
;
209 shp
->id
= shm_buildid(id
,shp
->shm_perm
.seq
);
210 shp
->shm_file
= file
;
211 file
->f_dentry
->d_inode
->i_ino
= shp
->id
;
212 file
->f_op
= &shm_file_operations
;
224 asmlinkage
long sys_shmget (key_t key
, size_t size
, int shmflg
)
226 struct shmid_kernel
*shp
;
230 if (key
== IPC_PRIVATE
) {
231 err
= newseg(key
, shmflg
, size
);
232 } else if ((id
= ipc_findkey(&shm_ids
, key
)) == -1) {
233 if (!(shmflg
& IPC_CREAT
))
236 err
= newseg(key
, shmflg
, size
);
237 } else if ((shmflg
& IPC_CREAT
) && (shmflg
& IPC_EXCL
)) {
243 if (shp
->shm_segsz
< size
)
245 else if (ipcperms(&shp
->shm_perm
, shmflg
))
248 err
= shm_buildid(id
, shp
->shm_perm
.seq
);
255 static inline unsigned long copy_shmid_to_user(void *buf
, struct shmid64_ds
*in
, int version
)
259 return copy_to_user(buf
, in
, sizeof(*in
));
264 ipc64_perm_to_ipc_perm(&in
->shm_perm
, &out
.shm_perm
);
265 out
.shm_segsz
= in
->shm_segsz
;
266 out
.shm_atime
= in
->shm_atime
;
267 out
.shm_dtime
= in
->shm_dtime
;
268 out
.shm_ctime
= in
->shm_ctime
;
269 out
.shm_cpid
= in
->shm_cpid
;
270 out
.shm_lpid
= in
->shm_lpid
;
271 out
.shm_nattch
= in
->shm_nattch
;
273 return copy_to_user(buf
, &out
, sizeof(out
));
286 static inline unsigned long copy_shmid_from_user(struct shm_setbuf
*out
, void *buf
, int version
)
291 struct shmid64_ds tbuf
;
293 if (copy_from_user(&tbuf
, buf
, sizeof(tbuf
)))
296 out
->uid
= tbuf
.shm_perm
.uid
;
297 out
->gid
= tbuf
.shm_perm
.gid
;
298 out
->mode
= tbuf
.shm_flags
;
304 struct shmid_ds tbuf_old
;
306 if (copy_from_user(&tbuf_old
, buf
, sizeof(tbuf_old
)))
309 out
->uid
= tbuf_old
.shm_perm
.uid
;
310 out
->gid
= tbuf_old
.shm_perm
.gid
;
311 out
->mode
= tbuf_old
.shm_flags
;
320 static inline unsigned long copy_shminfo_to_user(void *buf
, struct shminfo64
*in
, int version
)
324 return copy_to_user(buf
, in
, sizeof(*in
));
329 if(in
->shmmax
> INT_MAX
)
330 out
.shmmax
= INT_MAX
;
332 out
.shmmax
= (int)in
->shmmax
;
334 out
.shmmin
= in
->shmmin
;
335 out
.shmmni
= in
->shmmni
;
336 out
.shmseg
= in
->shmseg
;
337 out
.shmall
= in
->shmall
;
339 return copy_to_user(buf
, &out
, sizeof(out
));
346 static void shm_get_stat (unsigned long *rss
, unsigned long *swp
)
353 for(i
= 0; i
<= shm_ids
.max_id
; i
++) {
354 struct shmid_kernel
* shp
;
355 struct inode
* inode
;
360 inode
= shp
->shm_file
->f_dentry
->d_inode
;
361 spin_lock (&inode
->u
.shmem_i
.lock
);
362 *rss
+= inode
->i_mapping
->nrpages
;
363 *swp
+= inode
->u
.shmem_i
.swapped
;
364 spin_unlock (&inode
->u
.shmem_i
.lock
);
368 asmlinkage
long sys_shmctl (int shmid
, int cmd
, struct shmid_ds
*buf
)
370 struct shm_setbuf setbuf
;
371 struct shmid_kernel
*shp
;
374 if (cmd
< 0 || shmid
< 0)
377 version
= ipc_parse_version(&cmd
);
379 switch (cmd
) { /* replace with proc interface ? */
382 struct shminfo64 shminfo
;
384 memset(&shminfo
,0,sizeof(shminfo
));
385 shminfo
.shmmni
= shminfo
.shmseg
= shm_ctlmni
;
386 shminfo
.shmmax
= shm_ctlmax
;
387 shminfo
.shmall
= shm_ctlall
;
389 shminfo
.shmmin
= SHMMIN
;
390 if(copy_shminfo_to_user (buf
, &shminfo
, version
))
392 /* reading a integer is always atomic */
400 struct shm_info shm_info
;
402 memset(&shm_info
,0,sizeof(shm_info
));
405 shm_info
.used_ids
= shm_ids
.in_use
;
406 shm_get_stat (&shm_info
.shm_rss
, &shm_info
.shm_swp
);
407 shm_info
.shm_tot
= shm_tot
;
408 shm_info
.swap_attempts
= 0;
409 shm_info
.swap_successes
= 0;
410 err
= shm_ids
.max_id
;
413 if(copy_to_user (buf
, &shm_info
, sizeof(shm_info
)))
416 return err
< 0 ? 0 : err
;
421 struct shmid64_ds tbuf
;
423 memset(&tbuf
, 0, sizeof(tbuf
));
424 shp
= shm_lock(shmid
);
429 if (shmid
> shm_ids
.max_id
)
431 result
= shm_buildid(shmid
, shp
->shm_perm
.seq
);
433 err
= shm_checkid(shp
,shmid
);
439 if (ipcperms (&shp
->shm_perm
, S_IRUGO
))
441 kernel_to_ipc64_perm(&shp
->shm_perm
, &tbuf
.shm_perm
);
442 tbuf
.shm_segsz
= shp
->shm_segsz
;
443 tbuf
.shm_atime
= shp
->shm_atim
;
444 tbuf
.shm_dtime
= shp
->shm_dtim
;
445 tbuf
.shm_ctime
= shp
->shm_ctim
;
446 tbuf
.shm_cpid
= shp
->shm_cprid
;
447 tbuf
.shm_lpid
= shp
->shm_lprid
;
448 tbuf
.shm_nattch
= shp
->shm_nattch
;
450 if(copy_shmid_to_user (buf
, &tbuf
, version
))
457 /* Allow superuser to lock segment in memory */
458 /* Should the pages be faulted in here or leave it to user? */
459 /* need to determine interaction with current->swappable */
460 if (!capable(CAP_IPC_LOCK
))
463 shp
= shm_lock(shmid
);
466 err
= shm_checkid(shp
,shmid
);
470 shp
->shm_file
->f_dentry
->d_inode
->u
.shmem_i
.locked
= 1;
471 shp
->shm_flags
|= SHM_LOCKED
;
473 shp
->shm_file
->f_dentry
->d_inode
->u
.shmem_i
.locked
= 0;
474 shp
->shm_flags
&= ~SHM_LOCKED
;
482 * We cannot simply remove the file. The SVID states
483 * that the block remains until the last person
484 * detaches from it, then is deleted. A shmat() on
485 * an RMID segment is legal in older Linux and if
486 * we change it apps break...
488 * Instead we set a destroyed flag, and then blow
489 * the name away when the usage hits zero.
492 shp
= shm_lock(shmid
);
496 err
= shm_checkid(shp
, shmid
);
498 if (shp
->shm_nattch
){
499 shp
->shm_flags
|= SHM_DEST
;
500 /* Do not find it any more */
501 shp
->shm_perm
.key
= IPC_PRIVATE
;
513 if(copy_shmid_from_user (&setbuf
, buf
, version
))
516 shp
= shm_lock(shmid
);
520 err
= shm_checkid(shp
,shmid
);
524 if (current
->euid
!= shp
->shm_perm
.uid
&&
525 current
->euid
!= shp
->shm_perm
.cuid
&&
526 !capable(CAP_SYS_ADMIN
)) {
530 shp
->shm_perm
.uid
= setbuf
.uid
;
531 shp
->shm_perm
.gid
= setbuf
.gid
;
532 shp
->shm_flags
= (shp
->shm_flags
& ~S_IRWXUGO
)
533 | (setbuf
.mode
& S_IRWXUGO
);
534 shp
->shm_ctim
= CURRENT_TIME
;
554 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
556 asmlinkage
long sys_shmat (int shmid
, char *shmaddr
, int shmflg
, ulong
*raddr
)
558 struct shmid_kernel
*shp
;
564 unsigned long o_flags
;
571 if ((addr
= (ulong
)shmaddr
)) {
572 if (addr
& (SHMLBA
-1)) {
573 if (shmflg
& SHM_RND
)
574 addr
&= ~(SHMLBA
-1); /* round down */
578 flags
= MAP_SHARED
| MAP_FIXED
;
582 if (shmflg
& SHM_RDONLY
) {
587 prot
= PROT_READ
| PROT_WRITE
;
589 acc_mode
= S_IRUGO
| S_IWUGO
;
593 * We cannot rely on the fs check since SYSV IPC does have an
594 * aditional creator id...
596 shp
= shm_lock(shmid
);
599 if (ipcperms(&shp
->shm_perm
, acc_mode
)) {
603 file
= shp
->shm_file
;
607 down(¤t
->mm
->mmap_sem
);
608 user_addr
= (void *) do_mmap (file
, addr
, file
->f_dentry
->d_inode
->i_size
, prot
, flags
, 0);
609 up(¤t
->mm
->mmap_sem
);
612 if(!(shp
= shm_lock(shmid
)))
615 if(shp
->shm_nattch
== 0 &&
616 shp
->shm_flags
& SHM_DEST
)
621 *raddr
= (unsigned long) user_addr
;
623 if (IS_ERR(user_addr
))
624 err
= PTR_ERR(user_addr
);
630 * detach and kill segment if marked destroyed.
631 * The work is done in shm_close.
633 asmlinkage
long sys_shmdt (char *shmaddr
)
635 struct mm_struct
*mm
= current
->mm
;
636 struct vm_area_struct
*shmd
, *shmdnext
;
639 for (shmd
= mm
->mmap
; shmd
; shmd
= shmdnext
) {
640 shmdnext
= shmd
->vm_next
;
641 if (shmd
->vm_ops
== &shm_vm_ops
642 && shmd
->vm_start
- (shmd
->vm_pgoff
<< PAGE_SHIFT
) == (ulong
) shmaddr
)
643 do_munmap(mm
, shmd
->vm_start
, shmd
->vm_end
- shmd
->vm_start
);
649 #ifdef CONFIG_PROC_FS
650 static int sysvipc_shm_read_proc(char *buffer
, char **start
, off_t offset
, int length
, int *eof
, void *data
)
657 len
+= sprintf(buffer
, " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n");
659 for(i
= 0; i
<= shm_ids
.max_id
; i
++) {
660 struct shmid_kernel
* shp
;
664 #define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
665 #define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
668 if (sizeof(size_t) <= sizeof(int))
669 format
= SMALL_STRING
;
672 len
+= sprintf(buffer
+ len
, format
,
674 shm_buildid(i
, shp
->shm_perm
.seq
),
694 if(pos
> offset
+ length
)
701 *start
= buffer
+ (offset
- begin
);
702 len
-= (offset
- begin
);