3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
16 * support for audit of ipc object properties and permission changes
17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
20 #include <linux/config.h>
21 #include <linux/slab.h>
23 #include <linux/hugetlb.h>
24 #include <linux/shm.h>
25 #include <linux/init.h>
26 #include <linux/file.h>
27 #include <linux/mman.h>
28 #include <linux/shmem_fs.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/audit.h>
32 #include <linux/capability.h>
33 #include <linux/ptrace.h>
34 #include <linux/seq_file.h>
35 #include <linux/mutex.h>
37 #include <asm/uaccess.h>
41 static struct file_operations shm_file_operations
;
42 static struct vm_operations_struct shm_vm_ops
;
44 static struct ipc_ids shm_ids
;
46 #define shm_lock(id) ((struct shmid_kernel*)ipc_lock(&shm_ids,id))
47 #define shm_unlock(shp) ipc_unlock(&(shp)->shm_perm)
48 #define shm_get(id) ((struct shmid_kernel*)ipc_get(&shm_ids,id))
49 #define shm_buildid(id, seq) \
50 ipc_buildid(&shm_ids, id, seq)
52 static int newseg (key_t key
, int shmflg
, size_t size
);
53 static void shm_open (struct vm_area_struct
*shmd
);
54 static void shm_close (struct vm_area_struct
*shmd
);
56 static int sysvipc_shm_proc_show(struct seq_file
*s
, void *it
);
59 size_t shm_ctlmax
= SHMMAX
;
60 size_t shm_ctlall
= SHMALL
;
61 int shm_ctlmni
= SHMMNI
;
63 static int shm_tot
; /* total number of shared memory pages */
65 void __init
shm_init (void)
67 ipc_init_ids(&shm_ids
, 1);
68 ipc_init_proc_interface("sysvipc/shm",
69 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
71 sysvipc_shm_proc_show
);
74 static inline int shm_checkid(struct shmid_kernel
*s
, int id
)
76 if (ipc_checkid(&shm_ids
,&s
->shm_perm
,id
))
81 static inline struct shmid_kernel
*shm_rmid(int id
)
83 return (struct shmid_kernel
*)ipc_rmid(&shm_ids
,id
);
86 static inline int shm_addid(struct shmid_kernel
*shp
)
88 return ipc_addid(&shm_ids
, &shp
->shm_perm
, shm_ctlmni
);
93 static inline void shm_inc (int id
) {
94 struct shmid_kernel
*shp
;
98 shp
->shm_atim
= get_seconds();
99 shp
->shm_lprid
= current
->tgid
;
104 /* This is called by fork, once for every shm attach. */
105 static void shm_open (struct vm_area_struct
*shmd
)
107 shm_inc (shmd
->vm_file
->f_dentry
->d_inode
->i_ino
);
111 * shm_destroy - free the struct shmid_kernel
113 * @shp: struct to free
115 * It has to be called with shp and shm_ids.mutex locked,
116 * but returns with shp unlocked and freed.
118 static void shm_destroy (struct shmid_kernel
*shp
)
120 shm_tot
-= (shp
->shm_segsz
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
123 if (!is_file_hugepages(shp
->shm_file
))
124 shmem_lock(shp
->shm_file
, 0, shp
->mlock_user
);
126 user_shm_unlock(shp
->shm_file
->f_dentry
->d_inode
->i_size
,
128 fput (shp
->shm_file
);
129 security_shm_free(shp
);
134 * remove the attach descriptor shmd.
135 * free memory for segment if it is marked destroyed.
136 * The descriptor has already been removed from the current->mm->mmap list
137 * and will later be kfree()d.
139 static void shm_close (struct vm_area_struct
*shmd
)
141 struct file
* file
= shmd
->vm_file
;
142 int id
= file
->f_dentry
->d_inode
->i_ino
;
143 struct shmid_kernel
*shp
;
145 mutex_lock(&shm_ids
.mutex
);
146 /* remove from the list of attaches of the shm segment */
149 shp
->shm_lprid
= current
->tgid
;
150 shp
->shm_dtim
= get_seconds();
152 if(shp
->shm_nattch
== 0 &&
153 shp
->shm_perm
.mode
& SHM_DEST
)
157 mutex_unlock(&shm_ids
.mutex
);
160 static int shm_mmap(struct file
* file
, struct vm_area_struct
* vma
)
164 ret
= shmem_mmap(file
, vma
);
166 vma
->vm_ops
= &shm_vm_ops
;
167 if (!(vma
->vm_flags
& VM_WRITE
))
168 vma
->vm_flags
&= ~VM_MAYWRITE
;
169 shm_inc(file
->f_dentry
->d_inode
->i_ino
);
175 static struct file_operations shm_file_operations
= {
178 .get_unmapped_area
= shmem_get_unmapped_area
,
182 static struct vm_operations_struct shm_vm_ops
= {
183 .open
= shm_open
, /* callback for a new vm-area open */
184 .close
= shm_close
, /* callback for when the vm-area is released */
185 .nopage
= shmem_nopage
,
186 #if defined(CONFIG_NUMA) && defined(CONFIG_SHMEM)
187 .set_policy
= shmem_set_policy
,
188 .get_policy
= shmem_get_policy
,
192 static int newseg (key_t key
, int shmflg
, size_t size
)
195 struct shmid_kernel
*shp
;
196 int numpages
= (size
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
201 if (size
< SHMMIN
|| size
> shm_ctlmax
)
204 if (shm_tot
+ numpages
>= shm_ctlall
)
207 shp
= ipc_rcu_alloc(sizeof(*shp
));
211 shp
->shm_perm
.key
= key
;
212 shp
->shm_perm
.mode
= (shmflg
& S_IRWXUGO
);
213 shp
->mlock_user
= NULL
;
215 shp
->shm_perm
.security
= NULL
;
216 error
= security_shm_alloc(shp
);
222 if (shmflg
& SHM_HUGETLB
) {
223 /* hugetlb_zero_setup takes care of mlock user accounting */
224 file
= hugetlb_zero_setup(size
);
225 shp
->mlock_user
= current
->user
;
227 int acctflag
= VM_ACCOUNT
;
229 * Do not allow no accounting for OVERCOMMIT_NEVER, even
232 if ((shmflg
& SHM_NORESERVE
) &&
233 sysctl_overcommit_memory
!= OVERCOMMIT_NEVER
)
235 sprintf (name
, "SYSV%08x", key
);
236 file
= shmem_file_setup(name
, size
, acctflag
);
238 error
= PTR_ERR(file
);
247 shp
->shm_cprid
= current
->tgid
;
249 shp
->shm_atim
= shp
->shm_dtim
= 0;
250 shp
->shm_ctim
= get_seconds();
251 shp
->shm_segsz
= size
;
253 shp
->id
= shm_buildid(id
,shp
->shm_perm
.seq
);
254 shp
->shm_file
= file
;
255 file
->f_dentry
->d_inode
->i_ino
= shp
->id
;
257 /* Hugetlb ops would have already been assigned. */
258 if (!(shmflg
& SHM_HUGETLB
))
259 file
->f_op
= &shm_file_operations
;
268 security_shm_free(shp
);
273 asmlinkage
long sys_shmget (key_t key
, size_t size
, int shmflg
)
275 struct shmid_kernel
*shp
;
278 mutex_lock(&shm_ids
.mutex
);
279 if (key
== IPC_PRIVATE
) {
280 err
= newseg(key
, shmflg
, size
);
281 } else if ((id
= ipc_findkey(&shm_ids
, key
)) == -1) {
282 if (!(shmflg
& IPC_CREAT
))
285 err
= newseg(key
, shmflg
, size
);
286 } else if ((shmflg
& IPC_CREAT
) && (shmflg
& IPC_EXCL
)) {
291 if (shp
->shm_segsz
< size
)
293 else if (ipcperms(&shp
->shm_perm
, shmflg
))
296 int shmid
= shm_buildid(id
, shp
->shm_perm
.seq
);
297 err
= security_shm_associate(shp
, shmflg
);
303 mutex_unlock(&shm_ids
.mutex
);
308 static inline unsigned long copy_shmid_to_user(void __user
*buf
, struct shmid64_ds
*in
, int version
)
312 return copy_to_user(buf
, in
, sizeof(*in
));
317 ipc64_perm_to_ipc_perm(&in
->shm_perm
, &out
.shm_perm
);
318 out
.shm_segsz
= in
->shm_segsz
;
319 out
.shm_atime
= in
->shm_atime
;
320 out
.shm_dtime
= in
->shm_dtime
;
321 out
.shm_ctime
= in
->shm_ctime
;
322 out
.shm_cpid
= in
->shm_cpid
;
323 out
.shm_lpid
= in
->shm_lpid
;
324 out
.shm_nattch
= in
->shm_nattch
;
326 return copy_to_user(buf
, &out
, sizeof(out
));
339 static inline unsigned long copy_shmid_from_user(struct shm_setbuf
*out
, void __user
*buf
, int version
)
344 struct shmid64_ds tbuf
;
346 if (copy_from_user(&tbuf
, buf
, sizeof(tbuf
)))
349 out
->uid
= tbuf
.shm_perm
.uid
;
350 out
->gid
= tbuf
.shm_perm
.gid
;
351 out
->mode
= tbuf
.shm_perm
.mode
;
357 struct shmid_ds tbuf_old
;
359 if (copy_from_user(&tbuf_old
, buf
, sizeof(tbuf_old
)))
362 out
->uid
= tbuf_old
.shm_perm
.uid
;
363 out
->gid
= tbuf_old
.shm_perm
.gid
;
364 out
->mode
= tbuf_old
.shm_perm
.mode
;
373 static inline unsigned long copy_shminfo_to_user(void __user
*buf
, struct shminfo64
*in
, int version
)
377 return copy_to_user(buf
, in
, sizeof(*in
));
382 if(in
->shmmax
> INT_MAX
)
383 out
.shmmax
= INT_MAX
;
385 out
.shmmax
= (int)in
->shmmax
;
387 out
.shmmin
= in
->shmmin
;
388 out
.shmmni
= in
->shmmni
;
389 out
.shmseg
= in
->shmseg
;
390 out
.shmall
= in
->shmall
;
392 return copy_to_user(buf
, &out
, sizeof(out
));
399 static void shm_get_stat(unsigned long *rss
, unsigned long *swp
)
406 for (i
= 0; i
<= shm_ids
.max_id
; i
++) {
407 struct shmid_kernel
*shp
;
414 inode
= shp
->shm_file
->f_dentry
->d_inode
;
416 if (is_file_hugepages(shp
->shm_file
)) {
417 struct address_space
*mapping
= inode
->i_mapping
;
418 *rss
+= (HPAGE_SIZE
/PAGE_SIZE
)*mapping
->nrpages
;
420 struct shmem_inode_info
*info
= SHMEM_I(inode
);
421 spin_lock(&info
->lock
);
422 *rss
+= inode
->i_mapping
->nrpages
;
423 *swp
+= info
->swapped
;
424 spin_unlock(&info
->lock
);
429 asmlinkage
long sys_shmctl (int shmid
, int cmd
, struct shmid_ds __user
*buf
)
431 struct shm_setbuf setbuf
;
432 struct shmid_kernel
*shp
;
435 if (cmd
< 0 || shmid
< 0) {
440 version
= ipc_parse_version(&cmd
);
442 switch (cmd
) { /* replace with proc interface ? */
445 struct shminfo64 shminfo
;
447 err
= security_shm_shmctl(NULL
, cmd
);
451 memset(&shminfo
,0,sizeof(shminfo
));
452 shminfo
.shmmni
= shminfo
.shmseg
= shm_ctlmni
;
453 shminfo
.shmmax
= shm_ctlmax
;
454 shminfo
.shmall
= shm_ctlall
;
456 shminfo
.shmmin
= SHMMIN
;
457 if(copy_shminfo_to_user (buf
, &shminfo
, version
))
459 /* reading a integer is always atomic */
467 struct shm_info shm_info
;
469 err
= security_shm_shmctl(NULL
, cmd
);
473 memset(&shm_info
,0,sizeof(shm_info
));
474 mutex_lock(&shm_ids
.mutex
);
475 shm_info
.used_ids
= shm_ids
.in_use
;
476 shm_get_stat (&shm_info
.shm_rss
, &shm_info
.shm_swp
);
477 shm_info
.shm_tot
= shm_tot
;
478 shm_info
.swap_attempts
= 0;
479 shm_info
.swap_successes
= 0;
480 err
= shm_ids
.max_id
;
481 mutex_unlock(&shm_ids
.mutex
);
482 if(copy_to_user (buf
, &shm_info
, sizeof(shm_info
))) {
487 err
= err
< 0 ? 0 : err
;
493 struct shmid64_ds tbuf
;
495 memset(&tbuf
, 0, sizeof(tbuf
));
496 shp
= shm_lock(shmid
);
500 } else if(cmd
==SHM_STAT
) {
502 if (shmid
> shm_ids
.max_id
)
504 result
= shm_buildid(shmid
, shp
->shm_perm
.seq
);
506 err
= shm_checkid(shp
,shmid
);
512 if (ipcperms (&shp
->shm_perm
, S_IRUGO
))
514 err
= security_shm_shmctl(shp
, cmd
);
517 kernel_to_ipc64_perm(&shp
->shm_perm
, &tbuf
.shm_perm
);
518 tbuf
.shm_segsz
= shp
->shm_segsz
;
519 tbuf
.shm_atime
= shp
->shm_atim
;
520 tbuf
.shm_dtime
= shp
->shm_dtim
;
521 tbuf
.shm_ctime
= shp
->shm_ctim
;
522 tbuf
.shm_cpid
= shp
->shm_cprid
;
523 tbuf
.shm_lpid
= shp
->shm_lprid
;
524 if (!is_file_hugepages(shp
->shm_file
))
525 tbuf
.shm_nattch
= shp
->shm_nattch
;
527 tbuf
.shm_nattch
= file_count(shp
->shm_file
) - 1;
529 if(copy_shmid_to_user (buf
, &tbuf
, version
))
538 shp
= shm_lock(shmid
);
543 err
= shm_checkid(shp
,shmid
);
547 err
= audit_ipc_obj(&(shp
->shm_perm
));
551 if (!capable(CAP_IPC_LOCK
)) {
553 if (current
->euid
!= shp
->shm_perm
.uid
&&
554 current
->euid
!= shp
->shm_perm
.cuid
)
556 if (cmd
== SHM_LOCK
&&
557 !current
->signal
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
)
561 err
= security_shm_shmctl(shp
, cmd
);
566 struct user_struct
* user
= current
->user
;
567 if (!is_file_hugepages(shp
->shm_file
)) {
568 err
= shmem_lock(shp
->shm_file
, 1, user
);
570 shp
->shm_perm
.mode
|= SHM_LOCKED
;
571 shp
->mlock_user
= user
;
574 } else if (!is_file_hugepages(shp
->shm_file
)) {
575 shmem_lock(shp
->shm_file
, 0, shp
->mlock_user
);
576 shp
->shm_perm
.mode
&= ~SHM_LOCKED
;
577 shp
->mlock_user
= NULL
;
585 * We cannot simply remove the file. The SVID states
586 * that the block remains until the last person
587 * detaches from it, then is deleted. A shmat() on
588 * an RMID segment is legal in older Linux and if
589 * we change it apps break...
591 * Instead we set a destroyed flag, and then blow
592 * the name away when the usage hits zero.
594 mutex_lock(&shm_ids
.mutex
);
595 shp
= shm_lock(shmid
);
599 err
= shm_checkid(shp
, shmid
);
603 err
= audit_ipc_obj(&(shp
->shm_perm
));
607 if (current
->euid
!= shp
->shm_perm
.uid
&&
608 current
->euid
!= shp
->shm_perm
.cuid
&&
609 !capable(CAP_SYS_ADMIN
)) {
614 err
= security_shm_shmctl(shp
, cmd
);
618 if (shp
->shm_nattch
){
619 shp
->shm_perm
.mode
|= SHM_DEST
;
620 /* Do not find it any more */
621 shp
->shm_perm
.key
= IPC_PRIVATE
;
625 mutex_unlock(&shm_ids
.mutex
);
631 if (copy_shmid_from_user (&setbuf
, buf
, version
)) {
635 mutex_lock(&shm_ids
.mutex
);
636 shp
= shm_lock(shmid
);
640 err
= shm_checkid(shp
,shmid
);
643 err
= audit_ipc_obj(&(shp
->shm_perm
));
646 err
= audit_ipc_set_perm(0, setbuf
.uid
, setbuf
.gid
, setbuf
.mode
, &(shp
->shm_perm
));
650 if (current
->euid
!= shp
->shm_perm
.uid
&&
651 current
->euid
!= shp
->shm_perm
.cuid
&&
652 !capable(CAP_SYS_ADMIN
)) {
656 err
= security_shm_shmctl(shp
, cmd
);
660 shp
->shm_perm
.uid
= setbuf
.uid
;
661 shp
->shm_perm
.gid
= setbuf
.gid
;
662 shp
->shm_perm
.mode
= (shp
->shm_perm
.mode
& ~S_IRWXUGO
)
663 | (setbuf
.mode
& S_IRWXUGO
);
664 shp
->shm_ctim
= get_seconds();
677 mutex_unlock(&shm_ids
.mutex
);
686 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
688 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
689 * "raddr" thing points to kernel space, and there has to be a wrapper around
692 long do_shmat(int shmid
, char __user
*shmaddr
, int shmflg
, ulong
*raddr
)
694 struct shmid_kernel
*shp
;
701 unsigned long o_flags
;
708 } else if ((addr
= (ulong
)shmaddr
)) {
709 if (addr
& (SHMLBA
-1)) {
710 if (shmflg
& SHM_RND
)
711 addr
&= ~(SHMLBA
-1); /* round down */
713 #ifndef __ARCH_FORCE_SHMLBA
714 if (addr
& ~PAGE_MASK
)
718 flags
= MAP_SHARED
| MAP_FIXED
;
720 if ((shmflg
& SHM_REMAP
))
726 if (shmflg
& SHM_RDONLY
) {
731 prot
= PROT_READ
| PROT_WRITE
;
733 acc_mode
= S_IRUGO
| S_IWUGO
;
735 if (shmflg
& SHM_EXEC
) {
741 * We cannot rely on the fs check since SYSV IPC does have an
742 * additional creator id...
744 shp
= shm_lock(shmid
);
749 err
= shm_checkid(shp
,shmid
);
754 if (ipcperms(&shp
->shm_perm
, acc_mode
)) {
760 err
= security_shm_shmat(shp
, shmaddr
, shmflg
);
766 file
= shp
->shm_file
;
767 size
= i_size_read(file
->f_dentry
->d_inode
);
771 down_write(¤t
->mm
->mmap_sem
);
772 if (addr
&& !(shmflg
& SHM_REMAP
)) {
773 user_addr
= ERR_PTR(-EINVAL
);
774 if (find_vma_intersection(current
->mm
, addr
, addr
+ size
))
777 * If shm segment goes below stack, make sure there is some
778 * space left for the stack to grow (at least 4 pages).
780 if (addr
< current
->mm
->start_stack
&&
781 addr
> current
->mm
->start_stack
- size
- PAGE_SIZE
* 5)
785 user_addr
= (void*) do_mmap (file
, addr
, size
, prot
, flags
, 0);
788 up_write(¤t
->mm
->mmap_sem
);
790 mutex_lock(&shm_ids
.mutex
);
791 shp
= shm_lock(shmid
);
794 if(shp
->shm_nattch
== 0 &&
795 shp
->shm_perm
.mode
& SHM_DEST
)
799 mutex_unlock(&shm_ids
.mutex
);
801 *raddr
= (unsigned long) user_addr
;
803 if (IS_ERR(user_addr
))
804 err
= PTR_ERR(user_addr
);
809 asmlinkage
long sys_shmat(int shmid
, char __user
*shmaddr
, int shmflg
)
814 err
= do_shmat(shmid
, shmaddr
, shmflg
, &ret
);
817 force_successful_syscall_return();
822 * detach and kill segment if marked destroyed.
823 * The work is done in shm_close.
825 asmlinkage
long sys_shmdt(char __user
*shmaddr
)
827 struct mm_struct
*mm
= current
->mm
;
828 struct vm_area_struct
*vma
, *next
;
829 unsigned long addr
= (unsigned long)shmaddr
;
831 int retval
= -EINVAL
;
833 if (addr
& ~PAGE_MASK
)
836 down_write(&mm
->mmap_sem
);
839 * This function tries to be smart and unmap shm segments that
840 * were modified by partial mlock or munmap calls:
841 * - It first determines the size of the shm segment that should be
842 * unmapped: It searches for a vma that is backed by shm and that
843 * started at address shmaddr. It records it's size and then unmaps
845 * - Then it unmaps all shm vmas that started at shmaddr and that
846 * are within the initially determined size.
847 * Errors from do_munmap are ignored: the function only fails if
848 * it's called with invalid parameters or if it's called to unmap
849 * a part of a vma. Both calls in this function are for full vmas,
850 * the parameters are directly copied from the vma itself and always
851 * valid - therefore do_munmap cannot fail. (famous last words?)
854 * If it had been mremap()'d, the starting address would not
855 * match the usual checks anyway. So assume all vma's are
856 * above the starting address given.
858 vma
= find_vma(mm
, addr
);
864 * Check if the starting address would match, i.e. it's
865 * a fragment created by mprotect() and/or munmap(), or it
866 * otherwise it starts at this address with no hassles.
868 if ((vma
->vm_ops
== &shm_vm_ops
|| is_vm_hugetlb_page(vma
)) &&
869 (vma
->vm_start
- addr
)/PAGE_SIZE
== vma
->vm_pgoff
) {
872 size
= vma
->vm_file
->f_dentry
->d_inode
->i_size
;
873 do_munmap(mm
, vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
875 * We discovered the size of the shm segment, so
876 * break out of here and fall through to the next
877 * loop that uses the size information to stop
878 * searching for matching vma's.
888 * We need look no further than the maximum address a fragment
889 * could possibly have landed at. Also cast things to loff_t to
890 * prevent overflows and make comparisions vs. equal-width types.
892 size
= PAGE_ALIGN(size
);
893 while (vma
&& (loff_t
)(vma
->vm_end
- addr
) <= size
) {
896 /* finding a matching vma now does not alter retval */
897 if ((vma
->vm_ops
== &shm_vm_ops
|| is_vm_hugetlb_page(vma
)) &&
898 (vma
->vm_start
- addr
)/PAGE_SIZE
== vma
->vm_pgoff
)
900 do_munmap(mm
, vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
904 up_write(&mm
->mmap_sem
);
908 #ifdef CONFIG_PROC_FS
909 static int sysvipc_shm_proc_show(struct seq_file
*s
, void *it
)
911 struct shmid_kernel
*shp
= it
;
914 #define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
915 #define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
917 if (sizeof(size_t) <= sizeof(int))
918 format
= SMALL_STRING
;
921 return seq_printf(s
, format
,
928 is_file_hugepages(shp
->shm_file
) ? (file_count(shp
->shm_file
) - 1) : shp
->shm_nattch
,