2 #include <linux/hugetlb.h>
3 #include <linux/mount.h>
4 #include <linux/seq_file.h>
5 #include <linux/highmem.h>
6 #include <linux/ptrace.h>
7 #include <linux/pagemap.h>
8 #include <linux/mempolicy.h>
9 #include <linux/swap.h>
10 #include <linux/swapops.h>
13 #include <asm/uaccess.h>
14 #include <asm/tlbflush.h>
17 void task_mem(struct seq_file
*m
, struct mm_struct
*mm
)
19 unsigned long data
, text
, lib
;
20 unsigned long hiwater_vm
, total_vm
, hiwater_rss
, total_rss
;
23 * Note: to minimize their overhead, mm maintains hiwater_vm and
24 * hiwater_rss only when about to *lower* total_vm or rss. Any
25 * collector of these hiwater stats must therefore get total_vm
26 * and rss too, which will usually be the higher. Barriers? not
27 * worth the effort, such snapshots can always be inconsistent.
29 hiwater_vm
= total_vm
= mm
->total_vm
;
30 if (hiwater_vm
< mm
->hiwater_vm
)
31 hiwater_vm
= mm
->hiwater_vm
;
32 hiwater_rss
= total_rss
= get_mm_rss(mm
);
33 if (hiwater_rss
< mm
->hiwater_rss
)
34 hiwater_rss
= mm
->hiwater_rss
;
36 data
= mm
->total_vm
- mm
->shared_vm
- mm
->stack_vm
;
37 text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
)) >> 10;
38 lib
= (mm
->exec_vm
<< (PAGE_SHIFT
-10)) - text
;
50 hiwater_vm
<< (PAGE_SHIFT
-10),
51 (total_vm
- mm
->reserved_vm
) << (PAGE_SHIFT
-10),
52 mm
->locked_vm
<< (PAGE_SHIFT
-10),
53 hiwater_rss
<< (PAGE_SHIFT
-10),
54 total_rss
<< (PAGE_SHIFT
-10),
55 data
<< (PAGE_SHIFT
-10),
56 mm
->stack_vm
<< (PAGE_SHIFT
-10), text
, lib
,
57 (PTRS_PER_PTE
*sizeof(pte_t
)*mm
->nr_ptes
) >> 10);
60 unsigned long task_vsize(struct mm_struct
*mm
)
62 return PAGE_SIZE
* mm
->total_vm
;
65 int task_statm(struct mm_struct
*mm
, int *shared
, int *text
,
66 int *data
, int *resident
)
68 *shared
= get_mm_counter(mm
, file_rss
);
69 *text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
))
71 *data
= mm
->total_vm
- mm
->shared_vm
;
72 *resident
= *shared
+ get_mm_counter(mm
, anon_rss
);
76 static void pad_len_spaces(struct seq_file
*m
, int len
)
78 len
= 25 + sizeof(void*) * 6 - len
;
81 seq_printf(m
, "%*c", len
, ' ');
84 static void vma_stop(struct proc_maps_private
*priv
, struct vm_area_struct
*vma
)
86 if (vma
&& vma
!= priv
->tail_vma
) {
87 struct mm_struct
*mm
= vma
->vm_mm
;
88 up_read(&mm
->mmap_sem
);
93 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
95 struct proc_maps_private
*priv
= m
->private;
96 unsigned long last_addr
= m
->version
;
98 struct vm_area_struct
*vma
, *tail_vma
= NULL
;
101 /* Clear the per syscall fields in priv */
103 priv
->tail_vma
= NULL
;
106 * We remember last_addr rather than next_addr to hit with
107 * mmap_cache most of the time. We have zero last_addr at
108 * the beginning and also after lseek. We will have -1 last_addr
109 * after the end of the vmas.
112 if (last_addr
== -1UL)
115 priv
->task
= get_pid_task(priv
->pid
, PIDTYPE_PID
);
119 mm
= mm_for_maps(priv
->task
);
123 tail_vma
= get_gate_vma(priv
->task
);
124 priv
->tail_vma
= tail_vma
;
126 /* Start with last addr hint */
127 vma
= find_vma(mm
, last_addr
);
128 if (last_addr
&& vma
) {
134 * Check the vma index is within the range and do
135 * sequential scan until m_index.
138 if ((unsigned long)l
< mm
->map_count
) {
145 if (l
!= mm
->map_count
)
146 tail_vma
= NULL
; /* After gate vma */
152 /* End of vmas has been reached */
153 m
->version
= (tail_vma
!= NULL
)? 0: -1UL;
154 up_read(&mm
->mmap_sem
);
159 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
161 struct proc_maps_private
*priv
= m
->private;
162 struct vm_area_struct
*vma
= v
;
163 struct vm_area_struct
*tail_vma
= priv
->tail_vma
;
166 if (vma
&& (vma
!= tail_vma
) && vma
->vm_next
)
169 return (vma
!= tail_vma
)? tail_vma
: NULL
;
172 static void m_stop(struct seq_file
*m
, void *v
)
174 struct proc_maps_private
*priv
= m
->private;
175 struct vm_area_struct
*vma
= v
;
179 put_task_struct(priv
->task
);
182 static int do_maps_open(struct inode
*inode
, struct file
*file
,
183 const struct seq_operations
*ops
)
185 struct proc_maps_private
*priv
;
187 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
189 priv
->pid
= proc_pid(inode
);
190 ret
= seq_open(file
, ops
);
192 struct seq_file
*m
= file
->private_data
;
201 static void show_map_vma(struct seq_file
*m
, struct vm_area_struct
*vma
)
203 struct mm_struct
*mm
= vma
->vm_mm
;
204 struct file
*file
= vma
->vm_file
;
205 int flags
= vma
->vm_flags
;
206 unsigned long ino
= 0;
207 unsigned long long pgoff
= 0;
212 struct inode
*inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
213 dev
= inode
->i_sb
->s_dev
;
215 pgoff
= ((loff_t
)vma
->vm_pgoff
) << PAGE_SHIFT
;
218 seq_printf(m
, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
221 flags
& VM_READ
? 'r' : '-',
222 flags
& VM_WRITE
? 'w' : '-',
223 flags
& VM_EXEC
? 'x' : '-',
224 flags
& VM_MAYSHARE
? 's' : 'p',
226 MAJOR(dev
), MINOR(dev
), ino
, &len
);
229 * Print the dentry name for named mappings, and a
230 * special [heap] marker for the heap:
233 pad_len_spaces(m
, len
);
234 seq_path(m
, &file
->f_path
, "\n");
236 const char *name
= arch_vma_name(vma
);
239 if (vma
->vm_start
<= mm
->start_brk
&&
240 vma
->vm_end
>= mm
->brk
) {
242 } else if (vma
->vm_start
<= mm
->start_stack
&&
243 vma
->vm_end
>= mm
->start_stack
) {
251 pad_len_spaces(m
, len
);
258 static int show_map(struct seq_file
*m
, void *v
)
260 struct vm_area_struct
*vma
= v
;
261 struct proc_maps_private
*priv
= m
->private;
262 struct task_struct
*task
= priv
->task
;
264 show_map_vma(m
, vma
);
266 if (m
->count
< m
->size
) /* vma is copied successfully */
267 m
->version
= (vma
!= get_gate_vma(task
))? vma
->vm_start
: 0;
271 static const struct seq_operations proc_pid_maps_op
= {
278 static int maps_open(struct inode
*inode
, struct file
*file
)
280 return do_maps_open(inode
, file
, &proc_pid_maps_op
);
283 const struct file_operations proc_maps_operations
= {
287 .release
= seq_release_private
,
291 * Proportional Set Size(PSS): my share of RSS.
293 * PSS of a process is the count of pages it has in memory, where each
294 * page is divided by the number of processes sharing it. So if a
295 * process has 1000 pages all to itself, and 1000 shared with one other
296 * process, its PSS will be 1500.
298 * To keep (accumulated) division errors low, we adopt a 64bit
299 * fixed-point pss counter to minimize division errors. So (pss >>
300 * PSS_SHIFT) would be the real byte count.
302 * A shift of 12 before division means (assuming 4K page size):
303 * - 1M 3-user-pages add up to 8KB errors;
304 * - supports mapcount up to 2^24, or 16M;
305 * - supports PSS up to 2^52 bytes, or 4PB.
309 #ifdef CONFIG_PROC_PAGE_MONITOR
310 struct mem_size_stats
{
311 struct vm_area_struct
*vma
;
312 unsigned long resident
;
313 unsigned long shared_clean
;
314 unsigned long shared_dirty
;
315 unsigned long private_clean
;
316 unsigned long private_dirty
;
317 unsigned long referenced
;
322 static int smaps_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
323 struct mm_walk
*walk
)
325 struct mem_size_stats
*mss
= walk
->private;
326 struct vm_area_struct
*vma
= mss
->vma
;
332 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
333 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
) {
336 if (is_swap_pte(ptent
)) {
337 mss
->swap
+= PAGE_SIZE
;
341 if (!pte_present(ptent
))
344 mss
->resident
+= PAGE_SIZE
;
346 page
= vm_normal_page(vma
, addr
, ptent
);
350 /* Accumulate the size in pages that have been accessed. */
351 if (pte_young(ptent
) || PageReferenced(page
))
352 mss
->referenced
+= PAGE_SIZE
;
353 mapcount
= page_mapcount(page
);
355 if (pte_dirty(ptent
))
356 mss
->shared_dirty
+= PAGE_SIZE
;
358 mss
->shared_clean
+= PAGE_SIZE
;
359 mss
->pss
+= (PAGE_SIZE
<< PSS_SHIFT
) / mapcount
;
361 if (pte_dirty(ptent
))
362 mss
->private_dirty
+= PAGE_SIZE
;
364 mss
->private_clean
+= PAGE_SIZE
;
365 mss
->pss
+= (PAGE_SIZE
<< PSS_SHIFT
);
368 pte_unmap_unlock(pte
- 1, ptl
);
373 static int show_smap(struct seq_file
*m
, void *v
)
375 struct proc_maps_private
*priv
= m
->private;
376 struct task_struct
*task
= priv
->task
;
377 struct vm_area_struct
*vma
= v
;
378 struct mem_size_stats mss
;
379 struct mm_walk smaps_walk
= {
380 .pmd_entry
= smaps_pte_range
,
385 memset(&mss
, 0, sizeof mss
);
387 if (vma
->vm_mm
&& !is_vm_hugetlb_page(vma
))
388 walk_page_range(vma
->vm_start
, vma
->vm_end
, &smaps_walk
);
390 show_map_vma(m
, vma
);
396 "Shared_Clean: %8lu kB\n"
397 "Shared_Dirty: %8lu kB\n"
398 "Private_Clean: %8lu kB\n"
399 "Private_Dirty: %8lu kB\n"
400 "Referenced: %8lu kB\n"
402 "KernelPageSize: %8lu kB\n"
403 "MMUPageSize: %8lu kB\n",
404 (vma
->vm_end
- vma
->vm_start
) >> 10,
406 (unsigned long)(mss
.pss
>> (10 + PSS_SHIFT
)),
407 mss
.shared_clean
>> 10,
408 mss
.shared_dirty
>> 10,
409 mss
.private_clean
>> 10,
410 mss
.private_dirty
>> 10,
411 mss
.referenced
>> 10,
413 vma_kernel_pagesize(vma
) >> 10,
414 vma_mmu_pagesize(vma
) >> 10);
416 if (m
->count
< m
->size
) /* vma is copied successfully */
417 m
->version
= (vma
!= get_gate_vma(task
)) ? vma
->vm_start
: 0;
421 static const struct seq_operations proc_pid_smaps_op
= {
428 static int smaps_open(struct inode
*inode
, struct file
*file
)
430 return do_maps_open(inode
, file
, &proc_pid_smaps_op
);
433 const struct file_operations proc_smaps_operations
= {
437 .release
= seq_release_private
,
440 static int clear_refs_pte_range(pmd_t
*pmd
, unsigned long addr
,
441 unsigned long end
, struct mm_walk
*walk
)
443 struct vm_area_struct
*vma
= walk
->private;
448 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
449 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
) {
451 if (!pte_present(ptent
))
454 page
= vm_normal_page(vma
, addr
, ptent
);
458 /* Clear accessed and referenced bits. */
459 ptep_test_and_clear_young(vma
, addr
, pte
);
460 ClearPageReferenced(page
);
462 pte_unmap_unlock(pte
- 1, ptl
);
467 static ssize_t
clear_refs_write(struct file
*file
, const char __user
*buf
,
468 size_t count
, loff_t
*ppos
)
470 struct task_struct
*task
;
471 char buffer
[PROC_NUMBUF
], *end
;
472 struct mm_struct
*mm
;
473 struct vm_area_struct
*vma
;
475 memset(buffer
, 0, sizeof(buffer
));
476 if (count
> sizeof(buffer
) - 1)
477 count
= sizeof(buffer
) - 1;
478 if (copy_from_user(buffer
, buf
, count
))
480 if (!simple_strtol(buffer
, &end
, 0))
484 task
= get_proc_task(file
->f_path
.dentry
->d_inode
);
487 mm
= get_task_mm(task
);
489 struct mm_walk clear_refs_walk
= {
490 .pmd_entry
= clear_refs_pte_range
,
493 down_read(&mm
->mmap_sem
);
494 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
495 clear_refs_walk
.private = vma
;
496 if (!is_vm_hugetlb_page(vma
))
497 walk_page_range(vma
->vm_start
, vma
->vm_end
,
501 up_read(&mm
->mmap_sem
);
504 put_task_struct(task
);
505 if (end
- buffer
== 0)
510 const struct file_operations proc_clear_refs_operations
= {
511 .write
= clear_refs_write
,
515 u64 __user
*out
, *end
;
518 #define PM_ENTRY_BYTES sizeof(u64)
519 #define PM_STATUS_BITS 3
520 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
521 #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
522 #define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
523 #define PM_PSHIFT_BITS 6
524 #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
525 #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
526 #define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
527 #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1)
528 #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK)
530 #define PM_PRESENT PM_STATUS(4LL)
531 #define PM_SWAP PM_STATUS(2LL)
532 #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT)
533 #define PM_END_OF_BUFFER 1
535 static int add_to_pagemap(unsigned long addr
, u64 pfn
,
536 struct pagemapread
*pm
)
538 if (put_user(pfn
, pm
->out
))
541 if (pm
->out
>= pm
->end
)
542 return PM_END_OF_BUFFER
;
546 static int pagemap_pte_hole(unsigned long start
, unsigned long end
,
547 struct mm_walk
*walk
)
549 struct pagemapread
*pm
= walk
->private;
552 for (addr
= start
; addr
< end
; addr
+= PAGE_SIZE
) {
553 err
= add_to_pagemap(addr
, PM_NOT_PRESENT
, pm
);
560 static u64
swap_pte_to_pagemap_entry(pte_t pte
)
562 swp_entry_t e
= pte_to_swp_entry(pte
);
563 return swp_type(e
) | (swp_offset(e
) << MAX_SWAPFILES_SHIFT
);
566 static u64
pte_to_pagemap_entry(pte_t pte
)
569 if (is_swap_pte(pte
))
570 pme
= PM_PFRAME(swap_pte_to_pagemap_entry(pte
))
571 | PM_PSHIFT(PAGE_SHIFT
) | PM_SWAP
;
572 else if (pte_present(pte
))
573 pme
= PM_PFRAME(pte_pfn(pte
))
574 | PM_PSHIFT(PAGE_SHIFT
) | PM_PRESENT
;
578 static int pagemap_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
579 struct mm_walk
*walk
)
581 struct vm_area_struct
*vma
;
582 struct pagemapread
*pm
= walk
->private;
586 /* find the first VMA at or above 'addr' */
587 vma
= find_vma(walk
->mm
, addr
);
588 for (; addr
!= end
; addr
+= PAGE_SIZE
) {
589 u64 pfn
= PM_NOT_PRESENT
;
591 /* check to see if we've left 'vma' behind
592 * and need a new, higher one */
593 if (vma
&& (addr
>= vma
->vm_end
))
594 vma
= find_vma(walk
->mm
, addr
);
596 /* check that 'vma' actually covers this address,
597 * and that it isn't a huge page vma */
598 if (vma
&& (vma
->vm_start
<= addr
) &&
599 !is_vm_hugetlb_page(vma
)) {
600 pte
= pte_offset_map(pmd
, addr
);
601 pfn
= pte_to_pagemap_entry(*pte
);
602 /* unmap before userspace copy */
605 err
= add_to_pagemap(addr
, pfn
, pm
);
616 * /proc/pid/pagemap - an array mapping virtual pages to pfns
618 * For each page in the address space, this file contains one 64-bit entry
619 * consisting of the following:
621 * Bits 0-55 page frame number (PFN) if present
622 * Bits 0-4 swap type if swapped
623 * Bits 5-55 swap offset if swapped
624 * Bits 55-60 page shift (page size = 1<<page shift)
625 * Bit 61 reserved for future use
626 * Bit 62 page swapped
627 * Bit 63 page present
629 * If the page is not present but in swap, then the PFN contains an
630 * encoding of the swap file number and the page's offset into the
631 * swap. Unmapped pages return a null PFN. This allows determining
632 * precisely which pages are mapped (or in swap) and comparing mapped
633 * pages between processes.
635 * Efficient users of this interface will use /proc/pid/maps to
636 * determine which areas of memory are actually mapped and llseek to
637 * skip over unmapped regions.
639 static ssize_t
pagemap_read(struct file
*file
, char __user
*buf
,
640 size_t count
, loff_t
*ppos
)
642 struct task_struct
*task
= get_proc_task(file
->f_path
.dentry
->d_inode
);
643 struct page
**pages
, *page
;
644 unsigned long uaddr
, uend
;
645 struct mm_struct
*mm
;
646 struct pagemapread pm
;
649 struct mm_walk pagemap_walk
= {};
652 unsigned long start_vaddr
;
653 unsigned long end_vaddr
;
659 if (!ptrace_may_access(task
, PTRACE_MODE_READ
))
663 /* file position must be aligned */
664 if ((*ppos
% PM_ENTRY_BYTES
) || (count
% PM_ENTRY_BYTES
))
672 mm
= get_task_mm(task
);
677 uaddr
= (unsigned long)buf
& PAGE_MASK
;
678 uend
= (unsigned long)(buf
+ count
);
679 pagecount
= (PAGE_ALIGN(uend
) - uaddr
) / PAGE_SIZE
;
683 pages
= kcalloc(pagecount
, sizeof(struct page
*), GFP_KERNEL
);
688 down_read(¤t
->mm
->mmap_sem
);
689 ret
= get_user_pages(current
, current
->mm
, uaddr
, pagecount
,
691 up_read(¤t
->mm
->mmap_sem
);
696 if (ret
!= pagecount
) {
702 pm
.out
= (u64 __user
*)buf
;
703 pm
.end
= (u64 __user
*)(buf
+ count
);
705 pagemap_walk
.pmd_entry
= pagemap_pte_range
;
706 pagemap_walk
.pte_hole
= pagemap_pte_hole
;
707 pagemap_walk
.mm
= mm
;
708 pagemap_walk
.private = &pm
;
711 svpfn
= src
/ PM_ENTRY_BYTES
;
712 start_vaddr
= svpfn
<< PAGE_SHIFT
;
713 end_vaddr
= TASK_SIZE_OF(task
);
715 /* watch out for wraparound */
716 if (svpfn
> TASK_SIZE_OF(task
) >> PAGE_SHIFT
)
717 start_vaddr
= end_vaddr
;
720 * The odds are that this will stop walking way
721 * before end_vaddr, because the length of the
722 * user buffer is tracked in "pm", and the walk
723 * will stop when we hit the end of the buffer.
725 ret
= walk_page_range(start_vaddr
, end_vaddr
, &pagemap_walk
);
726 if (ret
== PM_END_OF_BUFFER
)
728 /* don't need mmap_sem for these, but this looks cleaner */
729 *ppos
+= (char __user
*)pm
.out
- buf
;
731 ret
= (char __user
*)pm
.out
- buf
;
734 for (; pagecount
; pagecount
--) {
735 page
= pages
[pagecount
-1];
736 if (!PageReserved(page
))
738 page_cache_release(page
);
745 put_task_struct(task
);
750 const struct file_operations proc_pagemap_operations
= {
751 .llseek
= mem_lseek
, /* borrow this */
752 .read
= pagemap_read
,
754 #endif /* CONFIG_PROC_PAGE_MONITOR */
757 extern int show_numa_map(struct seq_file
*m
, void *v
);
759 static const struct seq_operations proc_pid_numa_maps_op
= {
763 .show
= show_numa_map
,
766 static int numa_maps_open(struct inode
*inode
, struct file
*file
)
768 return do_maps_open(inode
, file
, &proc_pid_numa_maps_op
);
771 const struct file_operations proc_numa_maps_operations
= {
772 .open
= numa_maps_open
,
775 .release
= seq_release_private
,