3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
14 #include <linux/config.h>
15 #include <linux/malloc.h>
16 #include <linux/shm.h>
17 #include <linux/swap.h>
18 #include <linux/smp_lock.h>
19 #include <linux/init.h>
20 #include <linux/vmalloc.h>
21 #include <linux/pagemap.h>
22 #include <linux/proc_fs.h>
23 #include <linux/highmem.h>
25 #include <asm/uaccess.h>
26 #include <asm/pgtable.h>
28 extern int ipcperms (struct ipc_perm
*ipcp
, short shmflg
);
29 static int findkey (key_t key
);
30 static int newseg (key_t key
, int shmflg
, size_t size
);
31 static int shm_map (struct vm_area_struct
*shmd
);
32 static void killseg (int id
);
33 static void shm_open (struct vm_area_struct
*shmd
);
34 static void shm_close (struct vm_area_struct
*shmd
);
35 static struct page
* shm_nopage(struct vm_area_struct
*, unsigned long, int);
36 static int shm_swapout(struct page
*, struct file
*);
38 static int sysvipc_shm_read_proc(char *buffer
, char **start
, off_t offset
, int length
, int *eof
, void *data
);
41 static int shm_tot
= 0; /* total number of shared memory pages */
42 static int shm_rss
= 0; /* number of shared memory pages that are in memory */
43 static int shm_swp
= 0; /* number of shared memory pages that are in swap */
44 static int max_shmid
= 0; /* every used id is <= max_shmid */
45 static DECLARE_WAIT_QUEUE_HEAD(shm_wait
); /* calling findkey() may need to wait */
46 static struct shmid_kernel
*shm_segs
[SHMMNI
];
48 static unsigned short shm_seq
= 0; /* incremented, for recognizing stale ids */
50 spinlock_t shm_lock
= SPIN_LOCK_UNLOCKED
;
53 static ulong swap_attempts
= 0;
54 static ulong swap_successes
= 0;
55 static ulong used_segs
= 0;
57 void __init
shm_init (void)
61 for (id
= 0; id
< SHMMNI
; id
++)
62 shm_segs
[id
] = (struct shmid_kernel
*) IPC_UNUSED
;
63 shm_tot
= shm_rss
= shm_seq
= max_shmid
= used_segs
= 0;
64 init_waitqueue_head(&shm_wait
);
66 create_proc_read_entry("sysvipc/shm", 0, 0, sysvipc_shm_read_proc
, NULL
);
71 static int findkey (key_t key
)
74 struct shmid_kernel
*shp
;
76 for (id
= 0; id
<= max_shmid
; id
++) {
77 if ((shp
= shm_segs
[id
]) == IPC_NOID
) {
78 DECLARE_WAITQUEUE(wait
, current
);
80 add_wait_queue(&shm_wait
, &wait
);
82 set_current_state(TASK_UNINTERRUPTIBLE
);
83 if ((shp
= shm_segs
[id
]) != IPC_NOID
)
85 spin_unlock(&shm_lock
);
89 __set_current_state(TASK_RUNNING
);
90 remove_wait_queue(&shm_wait
, &wait
);
92 if (shp
== IPC_UNUSED
)
94 if (key
== shp
->u
.shm_perm
.key
)
101 * allocate new shmid_kernel and pgtable. protected by shm_segs[id] = NOID.
103 static int newseg (key_t key
, int shmflg
, size_t size
)
105 struct shmid_kernel
*shp
;
106 int numpages
= (size
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
111 if (shm_tot
+ numpages
>= SHMALL
)
113 for (id
= 0; id
< SHMMNI
; id
++)
114 if (shm_segs
[id
] == IPC_UNUSED
) {
115 shm_segs
[id
] = (struct shmid_kernel
*) IPC_NOID
;
121 spin_unlock(&shm_lock
);
122 shp
= (struct shmid_kernel
*) kmalloc (sizeof (*shp
), GFP_KERNEL
);
124 spin_lock(&shm_lock
);
125 shm_segs
[id
] = (struct shmid_kernel
*) IPC_UNUSED
;
130 shp
->shm_pages
= (pte_t
*) vmalloc (numpages
*sizeof(pte_t
));
132 if (!shp
->shm_pages
) {
134 spin_lock(&shm_lock
);
135 shm_segs
[id
] = (struct shmid_kernel
*) IPC_UNUSED
;
140 memset(shp
->shm_pages
, 0, numpages
*sizeof(pte_t
));
142 shp
->u
.shm_perm
.key
= key
;
143 shp
->u
.shm_perm
.mode
= (shmflg
& S_IRWXUGO
);
144 shp
->u
.shm_perm
.cuid
= shp
->u
.shm_perm
.uid
= current
->euid
;
145 shp
->u
.shm_perm
.cgid
= shp
->u
.shm_perm
.gid
= current
->egid
;
146 shp
->u
.shm_segsz
= size
;
147 shp
->u
.shm_cpid
= current
->pid
;
148 shp
->attaches
= NULL
;
149 shp
->u
.shm_lpid
= shp
->u
.shm_nattch
= 0;
150 shp
->u
.shm_atime
= shp
->u
.shm_dtime
= 0;
151 shp
->u
.shm_ctime
= CURRENT_TIME
;
152 shp
->shm_npages
= numpages
;
154 spin_lock(&shm_lock
);
157 shp
->u
.shm_perm
.seq
= shm_seq
;
164 return (unsigned int) shp
->u
.shm_perm
.seq
* SHMMNI
+ id
;
167 size_t shmmax
= SHMMAX
;
169 asmlinkage
long sys_shmget (key_t key
, size_t size
, int shmflg
)
171 struct shmid_kernel
*shp
;
174 down(¤t
->mm
->mmap_sem
);
175 spin_lock(&shm_lock
);
178 } else if (key
== IPC_PRIVATE
) {
179 err
= newseg(key
, shmflg
, size
);
180 } else if ((id
= findkey (key
)) == -1) {
181 if (!(shmflg
& IPC_CREAT
))
184 err
= newseg(key
, shmflg
, size
);
185 } else if ((shmflg
& IPC_CREAT
) && (shmflg
& IPC_EXCL
)) {
189 if (shp
->u
.shm_perm
.mode
& SHM_DEST
)
191 else if (size
> shp
->u
.shm_segsz
)
193 else if (ipcperms (&shp
->u
.shm_perm
, shmflg
))
196 err
= (int) shp
->u
.shm_perm
.seq
* SHMMNI
+ id
;
198 spin_unlock(&shm_lock
);
199 up(¤t
->mm
->mmap_sem
);
204 * Only called after testing nattch and SHM_DEST.
205 * Here pages, pgtable and shmid_kernel are freed.
207 static void killseg (int id
)
209 struct shmid_kernel
*shp
;
214 if (shp
== IPC_NOID
|| shp
== IPC_UNUSED
)
216 shp
->u
.shm_perm
.seq
++; /* for shmat */
217 shm_seq
= (shm_seq
+1) % ((unsigned)(1<<31)/SHMMNI
); /* increment, but avoid overflow */
218 shm_segs
[id
] = (struct shmid_kernel
*) IPC_UNUSED
;
221 while (max_shmid
&& (shm_segs
[--max_shmid
] == IPC_UNUSED
));
224 spin_unlock(&shm_lock
);
225 numpages
= shp
->shm_npages
;
226 for (i
= 0, rss
= 0, swp
= 0; i
< numpages
; i
++) {
228 pte
= shp
->shm_pages
[i
];
231 if (pte_present(pte
)) {
232 __free_page (pte_page(pte
));
236 swap_free(pte_to_swp_entry(pte
));
242 vfree(shp
->shm_pages
);
245 spin_lock(&shm_lock
);
252 asmlinkage
long sys_shmctl (int shmid
, int cmd
, struct shmid_ds
*buf
)
254 struct shmid_ds tbuf
;
255 struct shmid_kernel
*shp
;
256 struct ipc_perm
*ipcp
;
257 int id
, err
= -EINVAL
;
259 if (cmd
< 0 || shmid
< 0)
261 if (cmd
== IPC_SET
) {
263 if(copy_from_user (&tbuf
, buf
, sizeof (*buf
)))
266 spin_lock(&shm_lock
);
268 switch (cmd
) { /* replace with proc interface ? */
271 struct shminfo shminfo
;
275 shminfo
.shmmni
= SHMMNI
;
276 shminfo
.shmmax
= shmmax
;
277 shminfo
.shmmin
= SHMMIN
;
278 shminfo
.shmall
= SHMALL
;
279 shminfo
.shmseg
= SHMSEG
;
280 spin_unlock(&shm_lock
);
281 if(copy_to_user (buf
, &shminfo
, sizeof(struct shminfo
)))
283 spin_lock(&shm_lock
);
289 struct shm_info shm_info
;
291 shm_info
.used_ids
= used_segs
;
292 shm_info
.shm_rss
= shm_rss
;
293 shm_info
.shm_tot
= shm_tot
;
294 shm_info
.shm_swp
= shm_swp
;
295 shm_info
.swap_attempts
= swap_attempts
;
296 shm_info
.swap_successes
= swap_successes
;
297 spin_unlock(&shm_lock
);
298 if(copy_to_user (buf
, &shm_info
, sizeof(shm_info
)))
300 spin_lock(&shm_lock
);
306 if (shmid
> max_shmid
)
308 shp
= shm_segs
[shmid
];
309 if (shp
== IPC_UNUSED
|| shp
== IPC_NOID
)
311 if (ipcperms (&shp
->u
.shm_perm
, S_IRUGO
))
313 id
= (unsigned int) shp
->u
.shm_perm
.seq
* SHMMNI
+ shmid
;
315 spin_unlock(&shm_lock
);
316 if(copy_to_user (buf
, &shp
->u
, sizeof(*buf
)))
318 spin_lock(&shm_lock
);
323 shp
= shm_segs
[id
= (unsigned int) shmid
% SHMMNI
];
325 if (shp
== IPC_UNUSED
|| shp
== IPC_NOID
)
328 if (shp
->u
.shm_perm
.seq
!= (unsigned int) shmid
/ SHMMNI
)
330 ipcp
= &shp
->u
.shm_perm
;
335 if (!capable(CAP_IPC_LOCK
))
338 if (!(ipcp
->mode
& SHM_LOCKED
))
340 ipcp
->mode
&= ~SHM_LOCKED
;
343 /* Allow superuser to lock segment in memory */
344 /* Should the pages be faulted in here or leave it to user? */
345 /* need to determine interaction with current->swappable */
347 if (!capable(CAP_IPC_LOCK
))
350 if (ipcp
->mode
& SHM_LOCKED
)
352 ipcp
->mode
|= SHM_LOCKED
;
356 if (ipcperms (ipcp
, S_IRUGO
))
359 spin_unlock(&shm_lock
);
360 if(copy_to_user (buf
, &shp
->u
, sizeof(shp
->u
)))
362 spin_lock(&shm_lock
);
365 if (current
->euid
== shp
->u
.shm_perm
.uid
||
366 current
->euid
== shp
->u
.shm_perm
.cuid
||
367 capable(CAP_SYS_ADMIN
)) {
368 ipcp
->uid
= tbuf
.shm_perm
.uid
;
369 ipcp
->gid
= tbuf
.shm_perm
.gid
;
370 ipcp
->mode
= (ipcp
->mode
& ~S_IRWXUGO
)
371 | (tbuf
.shm_perm
.mode
& S_IRWXUGO
);
372 shp
->u
.shm_ctime
= CURRENT_TIME
;
378 if (current
->euid
== shp
->u
.shm_perm
.uid
||
379 current
->euid
== shp
->u
.shm_perm
.cuid
||
380 capable(CAP_SYS_ADMIN
)) {
381 shp
->u
.shm_perm
.mode
|= SHM_DEST
;
382 if (shp
->u
.shm_nattch
<= 0)
394 spin_unlock(&shm_lock
);
400 * The per process internal structure for managing segments is
401 * `struct vm_area_struct'.
402 * A shmat will add to and shmdt will remove from the list.
403 * shmd->vm_mm the attacher
404 * shmd->vm_start virt addr of attach, multiple of SHMLBA
405 * shmd->vm_end multiple of SHMLBA
406 * shmd->vm_next next attach for task
407 * shmd->vm_next_share next attach for segment
408 * shmd->vm_pgoff offset into segment (in pages)
409 * shmd->vm_private_data signature for this attach
412 static struct vm_operations_struct shm_vm_ops
= {
413 shm_open
, /* open - callback for a new vm-area open */
414 shm_close
, /* close - callback for when the vm-area is released */
415 NULL
, /* no need to sync pages at unmap */
419 shm_nopage
, /* nopage */
421 shm_swapout
/* swapout */
424 /* Insert shmd into the list shp->attaches */
425 static inline void insert_attach (struct shmid_kernel
* shp
, struct vm_area_struct
* shmd
)
427 if((shmd
->vm_next_share
= shp
->attaches
) != NULL
)
428 shp
->attaches
->vm_pprev_share
= &shmd
->vm_next_share
;
429 shp
->attaches
= shmd
;
430 shmd
->vm_pprev_share
= &shp
->attaches
;
433 /* Remove shmd from list shp->attaches */
434 static inline void remove_attach (struct shmid_kernel
* shp
, struct vm_area_struct
* shmd
)
436 if(shmd
->vm_next_share
)
437 shmd
->vm_next_share
->vm_pprev_share
= shmd
->vm_pprev_share
;
438 *shmd
->vm_pprev_share
= shmd
->vm_next_share
;
442 * ensure page tables exist
443 * mark page table entries with shm_sgn.
445 static int shm_map (struct vm_area_struct
*shmd
)
449 /* clear old mappings */
450 do_munmap(shmd
->vm_start
, shmd
->vm_end
- shmd
->vm_start
);
452 /* add new mapping */
453 tmp
= shmd
->vm_end
- shmd
->vm_start
;
454 if((current
->mm
->total_vm
<< PAGE_SHIFT
) + tmp
455 > (unsigned long) current
->rlim
[RLIMIT_AS
].rlim_cur
)
457 current
->mm
->total_vm
+= tmp
>> PAGE_SHIFT
;
458 vmlist_modify_lock(current
->mm
);
459 insert_vm_struct(current
->mm
, shmd
);
460 merge_segments(current
->mm
, shmd
->vm_start
, shmd
->vm_end
);
461 vmlist_modify_unlock(current
->mm
);
467 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
469 asmlinkage
long sys_shmat (int shmid
, char *shmaddr
, int shmflg
, ulong
*raddr
)
471 struct shmid_kernel
*shp
;
472 struct vm_area_struct
*shmd
;
478 down(¤t
->mm
->mmap_sem
);
479 spin_lock(&shm_lock
);
483 shp
= shm_segs
[id
= (unsigned int) shmid
% SHMMNI
];
484 if (shp
== IPC_UNUSED
|| shp
== IPC_NOID
)
487 if (!(addr
= (ulong
) shmaddr
)) {
488 if (shmflg
& SHM_REMAP
)
493 if (!(addr
= get_unmapped_area(addr
, (unsigned long)shp
->u
.shm_segsz
)))
495 if(addr
& (SHMLBA
- 1)) {
496 addr
= (addr
+ (SHMLBA
- 1)) & ~(SHMLBA
- 1);
499 } else if (addr
& (SHMLBA
-1)) {
500 if (shmflg
& SHM_RND
)
501 addr
&= ~(SHMLBA
-1); /* round down */
506 * Check if addr exceeds TASK_SIZE (from do_mmap)
508 len
= PAGE_SIZE
*shp
->shm_npages
;
510 if (addr
>= TASK_SIZE
|| len
> TASK_SIZE
|| addr
> TASK_SIZE
- len
)
513 * If shm segment goes below stack, make sure there is some
514 * space left for the stack to grow (presently 4 pages).
516 if (addr
< current
->mm
->start_stack
&&
517 addr
> current
->mm
->start_stack
- PAGE_SIZE
*(shp
->shm_npages
+ 4))
519 if (!(shmflg
& SHM_REMAP
) && find_vma_intersection(current
->mm
, addr
, addr
+ (unsigned long)shp
->u
.shm_segsz
))
523 if (ipcperms(&shp
->u
.shm_perm
, shmflg
& SHM_RDONLY
? S_IRUGO
: S_IRUGO
|S_IWUGO
))
526 if (shp
->u
.shm_perm
.seq
!= (unsigned int) shmid
/ SHMMNI
)
529 spin_unlock(&shm_lock
);
531 shmd
= kmem_cache_alloc(vm_area_cachep
, SLAB_KERNEL
);
532 spin_lock(&shm_lock
);
535 if ((shp
!= shm_segs
[id
]) || (shp
->u
.shm_perm
.seq
!= (unsigned int) shmid
/ SHMMNI
)) {
536 kmem_cache_free(vm_area_cachep
, shmd
);
541 shmd
->vm_private_data
= shm_segs
+ id
;
542 shmd
->vm_start
= addr
;
543 shmd
->vm_end
= addr
+ shp
->shm_npages
* PAGE_SIZE
;
544 shmd
->vm_mm
= current
->mm
;
545 shmd
->vm_page_prot
= (shmflg
& SHM_RDONLY
) ? PAGE_READONLY
: PAGE_SHARED
;
546 shmd
->vm_flags
= VM_SHM
| VM_MAYSHARE
| VM_SHARED
547 | VM_MAYREAD
| VM_MAYEXEC
| VM_READ
| VM_EXEC
548 | ((shmflg
& SHM_RDONLY
) ? 0 : VM_MAYWRITE
| VM_WRITE
);
549 shmd
->vm_file
= NULL
;
551 shmd
->vm_ops
= &shm_vm_ops
;
553 shp
->u
.shm_nattch
++; /* prevent destruction */
554 spin_unlock(&shm_lock
);
555 err
= shm_map (shmd
);
556 spin_lock(&shm_lock
);
560 insert_attach(shp
,shmd
); /* insert shmd into shp->attaches */
562 shp
->u
.shm_lpid
= current
->pid
;
563 shp
->u
.shm_atime
= CURRENT_TIME
;
568 spin_unlock(&shm_lock
);
569 up(¤t
->mm
->mmap_sem
);
573 if (--shp
->u
.shm_nattch
<= 0 && shp
->u
.shm_perm
.mode
& SHM_DEST
)
575 spin_unlock(&shm_lock
);
576 up(¤t
->mm
->mmap_sem
);
577 kmem_cache_free(vm_area_cachep
, shmd
);
581 /* This is called by fork, once for every shm attach. */
582 static void shm_open (struct vm_area_struct
*shmd
)
584 struct shmid_kernel
*shp
;
586 spin_lock(&shm_lock
);
587 shp
= *(struct shmid_kernel
**) shmd
->vm_private_data
;
588 insert_attach(shp
,shmd
); /* insert shmd into shp->attaches */
590 shp
->u
.shm_atime
= CURRENT_TIME
;
591 shp
->u
.shm_lpid
= current
->pid
;
592 spin_unlock(&shm_lock
);
596 * remove the attach descriptor shmd.
597 * free memory for segment if it is marked destroyed.
598 * The descriptor has already been removed from the current->mm->mmap list
599 * and will later be kfree()d.
601 static void shm_close (struct vm_area_struct
*shmd
)
603 struct shmid_kernel
*shp
;
605 spin_lock(&shm_lock
);
606 /* remove from the list of attaches of the shm segment */
607 shp
= *(struct shmid_kernel
**) shmd
->vm_private_data
;
608 remove_attach(shp
,shmd
); /* remove from shp->attaches */
609 shp
->u
.shm_lpid
= current
->pid
;
610 shp
->u
.shm_dtime
= CURRENT_TIME
;
611 if (--shp
->u
.shm_nattch
<= 0 && shp
->u
.shm_perm
.mode
& SHM_DEST
) {
612 unsigned int id
= (struct shmid_kernel
**)shmd
->vm_private_data
- shm_segs
;
615 spin_unlock(&shm_lock
);
619 * detach and kill segment if marked destroyed.
620 * The work is done in shm_close.
622 asmlinkage
long sys_shmdt (char *shmaddr
)
624 struct vm_area_struct
*shmd
, *shmdnext
;
626 down(¤t
->mm
->mmap_sem
);
627 for (shmd
= current
->mm
->mmap
; shmd
; shmd
= shmdnext
) {
628 shmdnext
= shmd
->vm_next
;
629 if (shmd
->vm_ops
== &shm_vm_ops
630 && shmd
->vm_start
- (shmd
->vm_pgoff
<< PAGE_SHIFT
) == (ulong
) shmaddr
)
631 do_munmap(shmd
->vm_start
, shmd
->vm_end
- shmd
->vm_start
);
633 up(¤t
->mm
->mmap_sem
);
638 * Enter the shm page into the SHM data structures.
640 * The way "nopage" is done, we don't actually have to
641 * do anything here: nopage will have filled in the shm
642 * data structures already, and shm_swap_out() will just
645 static int shm_swapout(struct page
* page
, struct file
*file
)
651 * page not present ... go through shm_pages
653 static struct page
* shm_nopage(struct vm_area_struct
* shmd
, unsigned long address
, int no_share
)
656 struct shmid_kernel
*shp
;
660 shp
= *(struct shmid_kernel
**) shmd
->vm_private_data
;
661 idx
= (address
- shmd
->vm_start
) >> PAGE_SHIFT
;
662 idx
+= shmd
->vm_pgoff
;
664 spin_lock(&shm_lock
);
666 pte
= shp
->shm_pages
[idx
];
667 if (!pte_present(pte
)) {
669 spin_unlock(&shm_lock
);
670 page
= get_free_highpage(GFP_HIGHUSER
);
673 clear_highpage(page
);
674 spin_lock(&shm_lock
);
675 if (pte_val(pte
) != pte_val(shp
->shm_pages
[idx
]))
678 swp_entry_t entry
= pte_to_swp_entry(pte
);
680 spin_unlock(&shm_lock
);
681 page
= lookup_swap_cache(entry
);
684 swapin_readahead(entry
);
685 page
= read_swap_cache(entry
);
690 delete_from_swap_cache(page
);
691 page
= replace_with_highmem(page
);
695 spin_lock(&shm_lock
);
697 pte
= shp
->shm_pages
[idx
];
698 if (pte_present(pte
))
702 pte
= pte_mkdirty(mk_pte(page
, PAGE_SHARED
));
703 shp
->shm_pages
[idx
] = pte
;
705 --current
->maj_flt
; /* was incremented in do_no_page */
708 /* pte_val(pte) == shp->shm_pages[idx] */
709 get_page(pte_page(pte
));
710 spin_unlock(&shm_lock
);
712 return pte_page(pte
);
719 free_page_and_swap_cache(page
);
722 return (struct page
*)(-1);
726 * Goes through counter = (shm_rss >> prio) present shm pages.
728 static unsigned long swap_id
= 0; /* currently being swapped */
729 static unsigned long swap_idx
= 0; /* next to swap */
731 int shm_swap (int prio
, int gfp_mask
)
734 struct shmid_kernel
*shp
;
735 swp_entry_t swap_entry
;
736 unsigned long id
, idx
;
739 struct page
* page_map
;
741 counter
= shm_rss
>> prio
;
745 swap_entry
= get_swap_page();
746 if (!swap_entry
.val
) {
752 spin_lock(&shm_lock
);
754 shp
= shm_segs
[swap_id
];
755 if (shp
== IPC_UNUSED
|| shp
== IPC_NOID
|| shp
->u
.shm_perm
.mode
& SHM_LOCKED
) {
758 if (++swap_id
> max_shmid
) {
770 if (idx
>= shp
->shm_npages
)
773 page
= shp
->shm_pages
[idx
];
774 if (!pte_present(page
))
776 page_map
= pte_page(page
);
777 if ((gfp_mask
& __GFP_DMA
) && !PageDMA(page_map
))
779 if (!(gfp_mask
& __GFP_HIGHMEM
) && PageHighMem(page_map
))
783 if (--counter
< 0) { /* failed */
785 spin_unlock(&shm_lock
);
787 swap_free(swap_entry
);
791 if (page_count(page_map
))
793 if (!(page_map
= prepare_highmem_swapout(page_map
)))
795 shp
->shm_pages
[idx
] = swp_entry_to_pte(swap_entry
);
799 spin_unlock(&shm_lock
);
802 swap_duplicate(swap_entry
);
803 add_to_swap_cache(page_map
, swap_entry
);
804 rw_swap_page(WRITE
, page_map
, 0);
807 __free_page(page_map
);
812 * Free the swap entry and set the new pte for the shm page.
814 static void shm_unuse_page(struct shmid_kernel
*shp
, unsigned long idx
,
815 swp_entry_t entry
, struct page
*page
)
819 pte
= pte_mkdirty(mk_pte(page
, PAGE_SHARED
));
820 shp
->shm_pages
[idx
] = pte
;
825 spin_unlock(&shm_lock
);
833 * unuse_shm() search for an eventually swapped out shm page.
835 void shm_unuse(swp_entry_t entry
, struct page
*page
)
839 spin_lock(&shm_lock
);
840 for (i
= 0; i
< SHMMNI
; i
++) {
841 struct shmid_kernel
*seg
= shm_segs
[i
];
842 if ((seg
== IPC_UNUSED
) || (seg
== IPC_NOID
))
844 for (n
= 0; n
< seg
->shm_npages
; n
++) {
845 if (pte_none(seg
->shm_pages
[n
]))
847 if (pte_present(seg
->shm_pages
[n
]))
849 if (pte_to_swp_entry(seg
->shm_pages
[n
]).val
== entry
.val
) {
850 shm_unuse_page(seg
, n
, entry
, page
);
855 spin_unlock(&shm_lock
);
858 #ifdef CONFIG_PROC_FS
859 static int sysvipc_shm_read_proc(char *buffer
, char **start
, off_t offset
, int length
, int *eof
, void *data
)
865 len
+= sprintf(buffer
, " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n");
867 spin_lock(&shm_lock
);
868 for(i
= 0; i
< SHMMNI
; i
++)
869 if(shm_segs
[i
] != IPC_UNUSED
) {
870 #define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
871 #define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
874 if (sizeof(size_t) <= sizeof(int))
875 format
= SMALL_STRING
;
878 len
+= sprintf(buffer
+ len
, format
,
879 shm_segs
[i
]->u
.shm_perm
.key
,
880 shm_segs
[i
]->u
.shm_perm
.seq
* SHMMNI
+ i
,
881 shm_segs
[i
]->u
.shm_perm
.mode
,
882 shm_segs
[i
]->u
.shm_segsz
,
883 shm_segs
[i
]->u
.shm_cpid
,
884 shm_segs
[i
]->u
.shm_lpid
,
885 shm_segs
[i
]->u
.shm_nattch
,
886 shm_segs
[i
]->u
.shm_perm
.uid
,
887 shm_segs
[i
]->u
.shm_perm
.gid
,
888 shm_segs
[i
]->u
.shm_perm
.cuid
,
889 shm_segs
[i
]->u
.shm_perm
.cgid
,
890 shm_segs
[i
]->u
.shm_atime
,
891 shm_segs
[i
]->u
.shm_dtime
,
892 shm_segs
[i
]->u
.shm_ctime
);
899 if(pos
> offset
+ length
)
904 *start
= buffer
+ (offset
- begin
);
905 len
-= (offset
- begin
);
910 spin_unlock(&shm_lock
);