2 #include <linux/hugetlb.h>
3 #include <linux/mount.h>
4 #include <linux/seq_file.h>
5 #include <linux/highmem.h>
6 #include <linux/ptrace.h>
7 #include <linux/pagemap.h>
8 #include <linux/ptrace.h>
9 #include <linux/mempolicy.h>
10 #include <linux/swap.h>
11 #include <linux/swapops.h>
12 #include <linux/seq_file.h>
15 #include <asm/uaccess.h>
16 #include <asm/tlbflush.h>
19 void task_mem(struct seq_file
*m
, struct mm_struct
*mm
)
21 unsigned long data
, text
, lib
;
22 unsigned long hiwater_vm
, total_vm
, hiwater_rss
, total_rss
;
25 * Note: to minimize their overhead, mm maintains hiwater_vm and
26 * hiwater_rss only when about to *lower* total_vm or rss. Any
27 * collector of these hiwater stats must therefore get total_vm
28 * and rss too, which will usually be the higher. Barriers? not
29 * worth the effort, such snapshots can always be inconsistent.
31 hiwater_vm
= total_vm
= mm
->total_vm
;
32 if (hiwater_vm
< mm
->hiwater_vm
)
33 hiwater_vm
= mm
->hiwater_vm
;
34 hiwater_rss
= total_rss
= get_mm_rss(mm
);
35 if (hiwater_rss
< mm
->hiwater_rss
)
36 hiwater_rss
= mm
->hiwater_rss
;
38 data
= mm
->total_vm
- mm
->shared_vm
- mm
->stack_vm
;
39 text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
)) >> 10;
40 lib
= (mm
->exec_vm
<< (PAGE_SHIFT
-10)) - text
;
52 hiwater_vm
<< (PAGE_SHIFT
-10),
53 (total_vm
- mm
->reserved_vm
) << (PAGE_SHIFT
-10),
54 mm
->locked_vm
<< (PAGE_SHIFT
-10),
55 hiwater_rss
<< (PAGE_SHIFT
-10),
56 total_rss
<< (PAGE_SHIFT
-10),
57 data
<< (PAGE_SHIFT
-10),
58 mm
->stack_vm
<< (PAGE_SHIFT
-10), text
, lib
,
59 (PTRS_PER_PTE
*sizeof(pte_t
)*mm
->nr_ptes
) >> 10);
62 unsigned long task_vsize(struct mm_struct
*mm
)
64 return PAGE_SIZE
* mm
->total_vm
;
67 int task_statm(struct mm_struct
*mm
, int *shared
, int *text
,
68 int *data
, int *resident
)
70 *shared
= get_mm_counter(mm
, file_rss
);
71 *text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
))
73 *data
= mm
->total_vm
- mm
->shared_vm
;
74 *resident
= *shared
+ get_mm_counter(mm
, anon_rss
);
78 int proc_exe_link(struct inode
*inode
, struct path
*path
)
80 struct vm_area_struct
* vma
;
82 struct task_struct
*task
= get_proc_task(inode
);
83 struct mm_struct
* mm
= NULL
;
86 mm
= get_task_mm(task
);
87 put_task_struct(task
);
91 down_read(&mm
->mmap_sem
);
95 if ((vma
->vm_flags
& VM_EXECUTABLE
) && vma
->vm_file
)
101 *path
= vma
->vm_file
->f_path
;
102 path_get(&vma
->vm_file
->f_path
);
106 up_read(&mm
->mmap_sem
);
112 static void pad_len_spaces(struct seq_file
*m
, int len
)
114 len
= 25 + sizeof(void*) * 6 - len
;
117 seq_printf(m
, "%*c", len
, ' ');
120 static void vma_stop(struct proc_maps_private
*priv
, struct vm_area_struct
*vma
)
122 if (vma
&& vma
!= priv
->tail_vma
) {
123 struct mm_struct
*mm
= vma
->vm_mm
;
124 up_read(&mm
->mmap_sem
);
129 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
131 struct proc_maps_private
*priv
= m
->private;
132 unsigned long last_addr
= m
->version
;
133 struct mm_struct
*mm
;
134 struct vm_area_struct
*vma
, *tail_vma
= NULL
;
137 /* Clear the per syscall fields in priv */
139 priv
->tail_vma
= NULL
;
142 * We remember last_addr rather than next_addr to hit with
143 * mmap_cache most of the time. We have zero last_addr at
144 * the beginning and also after lseek. We will have -1 last_addr
145 * after the end of the vmas.
148 if (last_addr
== -1UL)
151 priv
->task
= get_pid_task(priv
->pid
, PIDTYPE_PID
);
155 mm
= mm_for_maps(priv
->task
);
159 tail_vma
= get_gate_vma(priv
->task
);
160 priv
->tail_vma
= tail_vma
;
162 /* Start with last addr hint */
163 vma
= find_vma(mm
, last_addr
);
164 if (last_addr
&& vma
) {
170 * Check the vma index is within the range and do
171 * sequential scan until m_index.
174 if ((unsigned long)l
< mm
->map_count
) {
181 if (l
!= mm
->map_count
)
182 tail_vma
= NULL
; /* After gate vma */
188 /* End of vmas has been reached */
189 m
->version
= (tail_vma
!= NULL
)? 0: -1UL;
190 up_read(&mm
->mmap_sem
);
195 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
197 struct proc_maps_private
*priv
= m
->private;
198 struct vm_area_struct
*vma
= v
;
199 struct vm_area_struct
*tail_vma
= priv
->tail_vma
;
202 if (vma
&& (vma
!= tail_vma
) && vma
->vm_next
)
205 return (vma
!= tail_vma
)? tail_vma
: NULL
;
208 static void m_stop(struct seq_file
*m
, void *v
)
210 struct proc_maps_private
*priv
= m
->private;
211 struct vm_area_struct
*vma
= v
;
215 put_task_struct(priv
->task
);
218 static int do_maps_open(struct inode
*inode
, struct file
*file
,
219 const struct seq_operations
*ops
)
221 struct proc_maps_private
*priv
;
223 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
225 priv
->pid
= proc_pid(inode
);
226 ret
= seq_open(file
, ops
);
228 struct seq_file
*m
= file
->private_data
;
237 static int show_map(struct seq_file
*m
, void *v
)
239 struct proc_maps_private
*priv
= m
->private;
240 struct task_struct
*task
= priv
->task
;
241 struct vm_area_struct
*vma
= v
;
242 struct mm_struct
*mm
= vma
->vm_mm
;
243 struct file
*file
= vma
->vm_file
;
244 int flags
= vma
->vm_flags
;
245 unsigned long ino
= 0;
249 if (maps_protect
&& !ptrace_may_attach(task
))
253 struct inode
*inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
254 dev
= inode
->i_sb
->s_dev
;
258 seq_printf(m
, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
261 flags
& VM_READ
? 'r' : '-',
262 flags
& VM_WRITE
? 'w' : '-',
263 flags
& VM_EXEC
? 'x' : '-',
264 flags
& VM_MAYSHARE
? 's' : 'p',
265 vma
->vm_pgoff
<< PAGE_SHIFT
,
266 MAJOR(dev
), MINOR(dev
), ino
, &len
);
269 * Print the dentry name for named mappings, and a
270 * special [heap] marker for the heap:
273 pad_len_spaces(m
, len
);
274 seq_path(m
, &file
->f_path
, "\n");
276 const char *name
= arch_vma_name(vma
);
279 if (vma
->vm_start
<= mm
->start_brk
&&
280 vma
->vm_end
>= mm
->brk
) {
282 } else if (vma
->vm_start
<= mm
->start_stack
&&
283 vma
->vm_end
>= mm
->start_stack
) {
291 pad_len_spaces(m
, len
);
297 if (m
->count
< m
->size
) /* vma is copied successfully */
298 m
->version
= (vma
!= get_gate_vma(task
))? vma
->vm_start
: 0;
302 static const struct seq_operations proc_pid_maps_op
= {
309 static int maps_open(struct inode
*inode
, struct file
*file
)
311 return do_maps_open(inode
, file
, &proc_pid_maps_op
);
314 const struct file_operations proc_maps_operations
= {
318 .release
= seq_release_private
,
322 * Proportional Set Size(PSS): my share of RSS.
324 * PSS of a process is the count of pages it has in memory, where each
325 * page is divided by the number of processes sharing it. So if a
326 * process has 1000 pages all to itself, and 1000 shared with one other
327 * process, its PSS will be 1500.
329 * To keep (accumulated) division errors low, we adopt a 64bit
330 * fixed-point pss counter to minimize division errors. So (pss >>
331 * PSS_SHIFT) would be the real byte count.
333 * A shift of 12 before division means (assuming 4K page size):
334 * - 1M 3-user-pages add up to 8KB errors;
335 * - supports mapcount up to 2^24, or 16M;
336 * - supports PSS up to 2^52 bytes, or 4PB.
340 #ifdef CONFIG_PROC_PAGE_MONITOR
341 struct mem_size_stats
343 struct vm_area_struct
*vma
;
344 unsigned long resident
;
345 unsigned long shared_clean
;
346 unsigned long shared_dirty
;
347 unsigned long private_clean
;
348 unsigned long private_dirty
;
349 unsigned long referenced
;
353 static int smaps_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
356 struct mem_size_stats
*mss
= private;
357 struct vm_area_struct
*vma
= mss
->vma
;
363 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
364 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
) {
366 if (!pte_present(ptent
))
369 mss
->resident
+= PAGE_SIZE
;
371 page
= vm_normal_page(vma
, addr
, ptent
);
375 /* Accumulate the size in pages that have been accessed. */
376 if (pte_young(ptent
) || PageReferenced(page
))
377 mss
->referenced
+= PAGE_SIZE
;
378 mapcount
= page_mapcount(page
);
380 if (pte_dirty(ptent
))
381 mss
->shared_dirty
+= PAGE_SIZE
;
383 mss
->shared_clean
+= PAGE_SIZE
;
384 mss
->pss
+= (PAGE_SIZE
<< PSS_SHIFT
) / mapcount
;
386 if (pte_dirty(ptent
))
387 mss
->private_dirty
+= PAGE_SIZE
;
389 mss
->private_clean
+= PAGE_SIZE
;
390 mss
->pss
+= (PAGE_SIZE
<< PSS_SHIFT
);
393 pte_unmap_unlock(pte
- 1, ptl
);
398 static struct mm_walk smaps_walk
= { .pmd_entry
= smaps_pte_range
};
400 static int show_smap(struct seq_file
*m
, void *v
)
402 struct vm_area_struct
*vma
= v
;
403 struct mem_size_stats mss
;
406 memset(&mss
, 0, sizeof mss
);
408 if (vma
->vm_mm
&& !is_vm_hugetlb_page(vma
))
409 walk_page_range(vma
->vm_mm
, vma
->vm_start
, vma
->vm_end
,
412 ret
= show_map(m
, v
);
420 "Shared_Clean: %8lu kB\n"
421 "Shared_Dirty: %8lu kB\n"
422 "Private_Clean: %8lu kB\n"
423 "Private_Dirty: %8lu kB\n"
424 "Referenced: %8lu kB\n",
425 (vma
->vm_end
- vma
->vm_start
) >> 10,
427 (unsigned long)(mss
.pss
>> (10 + PSS_SHIFT
)),
428 mss
.shared_clean
>> 10,
429 mss
.shared_dirty
>> 10,
430 mss
.private_clean
>> 10,
431 mss
.private_dirty
>> 10,
432 mss
.referenced
>> 10);
437 static const struct seq_operations proc_pid_smaps_op
= {
444 static int smaps_open(struct inode
*inode
, struct file
*file
)
446 return do_maps_open(inode
, file
, &proc_pid_smaps_op
);
449 const struct file_operations proc_smaps_operations
= {
453 .release
= seq_release_private
,
456 static int clear_refs_pte_range(pmd_t
*pmd
, unsigned long addr
,
457 unsigned long end
, void *private)
459 struct vm_area_struct
*vma
= private;
464 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
465 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
) {
467 if (!pte_present(ptent
))
470 page
= vm_normal_page(vma
, addr
, ptent
);
474 /* Clear accessed and referenced bits. */
475 ptep_test_and_clear_young(vma
, addr
, pte
);
476 ClearPageReferenced(page
);
478 pte_unmap_unlock(pte
- 1, ptl
);
483 static struct mm_walk clear_refs_walk
= { .pmd_entry
= clear_refs_pte_range
};
485 static ssize_t
clear_refs_write(struct file
*file
, const char __user
*buf
,
486 size_t count
, loff_t
*ppos
)
488 struct task_struct
*task
;
489 char buffer
[PROC_NUMBUF
], *end
;
490 struct mm_struct
*mm
;
491 struct vm_area_struct
*vma
;
493 memset(buffer
, 0, sizeof(buffer
));
494 if (count
> sizeof(buffer
) - 1)
495 count
= sizeof(buffer
) - 1;
496 if (copy_from_user(buffer
, buf
, count
))
498 if (!simple_strtol(buffer
, &end
, 0))
502 task
= get_proc_task(file
->f_path
.dentry
->d_inode
);
505 mm
= get_task_mm(task
);
507 down_read(&mm
->mmap_sem
);
508 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
)
509 if (!is_vm_hugetlb_page(vma
))
510 walk_page_range(mm
, vma
->vm_start
, vma
->vm_end
,
511 &clear_refs_walk
, vma
);
513 up_read(&mm
->mmap_sem
);
516 put_task_struct(task
);
517 if (end
- buffer
== 0)
522 const struct file_operations proc_clear_refs_operations
= {
523 .write
= clear_refs_write
,
527 char __user
*out
, *end
;
530 #define PM_ENTRY_BYTES sizeof(u64)
531 #define PM_STATUS_BITS 3
532 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
533 #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
534 #define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
535 #define PM_PSHIFT_BITS 6
536 #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
537 #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
538 #define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
539 #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1)
540 #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK)
542 #define PM_PRESENT PM_STATUS(4LL)
543 #define PM_SWAP PM_STATUS(2LL)
544 #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT)
545 #define PM_END_OF_BUFFER 1
547 static int add_to_pagemap(unsigned long addr
, u64 pfn
,
548 struct pagemapread
*pm
)
551 * Make sure there's room in the buffer for an
552 * entire entry. Otherwise, only copy part of
555 if (pm
->out
+ PM_ENTRY_BYTES
>= pm
->end
) {
556 if (copy_to_user(pm
->out
, &pfn
, pm
->end
- pm
->out
))
559 return PM_END_OF_BUFFER
;
562 if (put_user(pfn
, pm
->out
))
564 pm
->out
+= PM_ENTRY_BYTES
;
568 static int pagemap_pte_hole(unsigned long start
, unsigned long end
,
571 struct pagemapread
*pm
= private;
574 for (addr
= start
; addr
< end
; addr
+= PAGE_SIZE
) {
575 err
= add_to_pagemap(addr
, PM_NOT_PRESENT
, pm
);
582 u64
swap_pte_to_pagemap_entry(pte_t pte
)
584 swp_entry_t e
= pte_to_swp_entry(pte
);
585 return swp_type(e
) | (swp_offset(e
) << MAX_SWAPFILES_SHIFT
);
588 static int pagemap_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
591 struct pagemapread
*pm
= private;
595 for (; addr
!= end
; addr
+= PAGE_SIZE
) {
596 u64 pfn
= PM_NOT_PRESENT
;
597 pte
= pte_offset_map(pmd
, addr
);
598 if (is_swap_pte(*pte
))
599 pfn
= PM_PFRAME(swap_pte_to_pagemap_entry(*pte
))
600 | PM_PSHIFT(PAGE_SHIFT
) | PM_SWAP
;
601 else if (pte_present(*pte
))
602 pfn
= PM_PFRAME(pte_pfn(*pte
))
603 | PM_PSHIFT(PAGE_SHIFT
) | PM_PRESENT
;
604 /* unmap so we're not in atomic when we copy to userspace */
606 err
= add_to_pagemap(addr
, pfn
, pm
);
616 static struct mm_walk pagemap_walk
= {
617 .pmd_entry
= pagemap_pte_range
,
618 .pte_hole
= pagemap_pte_hole
622 * /proc/pid/pagemap - an array mapping virtual pages to pfns
624 * For each page in the address space, this file contains one 64-bit entry
625 * consisting of the following:
627 * Bits 0-55 page frame number (PFN) if present
628 * Bits 0-4 swap type if swapped
629 * Bits 5-55 swap offset if swapped
630 * Bits 55-60 page shift (page size = 1<<page shift)
631 * Bit 61 reserved for future use
632 * Bit 62 page swapped
633 * Bit 63 page present
635 * If the page is not present but in swap, then the PFN contains an
636 * encoding of the swap file number and the page's offset into the
637 * swap. Unmapped pages return a null PFN. This allows determining
638 * precisely which pages are mapped (or in swap) and comparing mapped
639 * pages between processes.
641 * Efficient users of this interface will use /proc/pid/maps to
642 * determine which areas of memory are actually mapped and llseek to
643 * skip over unmapped regions.
645 static ssize_t
pagemap_read(struct file
*file
, char __user
*buf
,
646 size_t count
, loff_t
*ppos
)
648 struct task_struct
*task
= get_proc_task(file
->f_path
.dentry
->d_inode
);
649 struct page
**pages
, *page
;
650 unsigned long uaddr
, uend
;
651 struct mm_struct
*mm
;
652 struct pagemapread pm
;
660 if (!ptrace_may_attach(task
))
664 /* file position must be aligned */
665 if (*ppos
% PM_ENTRY_BYTES
)
669 mm
= get_task_mm(task
);
674 uaddr
= (unsigned long)buf
& PAGE_MASK
;
675 uend
= (unsigned long)(buf
+ count
);
676 pagecount
= (PAGE_ALIGN(uend
) - uaddr
) / PAGE_SIZE
;
677 pages
= kmalloc(pagecount
* sizeof(struct page
*), GFP_KERNEL
);
681 down_read(¤t
->mm
->mmap_sem
);
682 ret
= get_user_pages(current
, current
->mm
, uaddr
, pagecount
,
684 up_read(¤t
->mm
->mmap_sem
);
689 if (ret
!= pagecount
) {
696 pm
.end
= buf
+ count
;
698 if (!ptrace_may_attach(task
)) {
701 unsigned long src
= *ppos
;
702 unsigned long svpfn
= src
/ PM_ENTRY_BYTES
;
703 unsigned long start_vaddr
= svpfn
<< PAGE_SHIFT
;
704 unsigned long end_vaddr
= TASK_SIZE_OF(task
);
706 /* watch out for wraparound */
707 if (svpfn
> TASK_SIZE_OF(task
) >> PAGE_SHIFT
)
708 start_vaddr
= end_vaddr
;
711 * The odds are that this will stop walking way
712 * before end_vaddr, because the length of the
713 * user buffer is tracked in "pm", and the walk
714 * will stop when we hit the end of the buffer.
716 ret
= walk_page_range(mm
, start_vaddr
, end_vaddr
,
718 if (ret
== PM_END_OF_BUFFER
)
720 /* don't need mmap_sem for these, but this looks cleaner */
721 *ppos
+= pm
.out
- buf
;
727 for (; pagecount
; pagecount
--) {
728 page
= pages
[pagecount
-1];
729 if (!PageReserved(page
))
731 page_cache_release(page
);
738 put_task_struct(task
);
743 const struct file_operations proc_pagemap_operations
= {
744 .llseek
= mem_lseek
, /* borrow this */
745 .read
= pagemap_read
,
747 #endif /* CONFIG_PROC_PAGE_MONITOR */
750 extern int show_numa_map(struct seq_file
*m
, void *v
);
752 static int show_numa_map_checked(struct seq_file
*m
, void *v
)
754 struct proc_maps_private
*priv
= m
->private;
755 struct task_struct
*task
= priv
->task
;
757 if (maps_protect
&& !ptrace_may_attach(task
))
760 return show_numa_map(m
, v
);
763 static const struct seq_operations proc_pid_numa_maps_op
= {
767 .show
= show_numa_map_checked
770 static int numa_maps_open(struct inode
*inode
, struct file
*file
)
772 return do_maps_open(inode
, file
, &proc_pid_numa_maps_op
);
775 const struct file_operations proc_numa_maps_operations
= {
776 .open
= numa_maps_open
,
779 .release
= seq_release_private
,