2 #include <linux/hugetlb.h>
3 #include <linux/mount.h>
4 #include <linux/seq_file.h>
5 #include <linux/highmem.h>
6 #include <linux/pagemap.h>
7 #include <linux/mempolicy.h>
10 #include <asm/uaccess.h>
11 #include <asm/tlbflush.h>
14 char *task_mem(struct mm_struct
*mm
, char *buffer
)
16 unsigned long data
, text
, lib
;
17 unsigned long hiwater_vm
, total_vm
, hiwater_rss
, total_rss
;
20 * Note: to minimize their overhead, mm maintains hiwater_vm and
21 * hiwater_rss only when about to *lower* total_vm or rss. Any
22 * collector of these hiwater stats must therefore get total_vm
23 * and rss too, which will usually be the higher. Barriers? not
24 * worth the effort, such snapshots can always be inconsistent.
26 hiwater_vm
= total_vm
= mm
->total_vm
;
27 if (hiwater_vm
< mm
->hiwater_vm
)
28 hiwater_vm
= mm
->hiwater_vm
;
29 hiwater_rss
= total_rss
= get_mm_rss(mm
);
30 if (hiwater_rss
< mm
->hiwater_rss
)
31 hiwater_rss
= mm
->hiwater_rss
;
33 data
= mm
->total_vm
- mm
->shared_vm
- mm
->stack_vm
;
34 text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
)) >> 10;
35 lib
= (mm
->exec_vm
<< (PAGE_SHIFT
-10)) - text
;
36 buffer
+= sprintf(buffer
,
47 hiwater_vm
<< (PAGE_SHIFT
-10),
48 (total_vm
- mm
->reserved_vm
) << (PAGE_SHIFT
-10),
49 mm
->locked_vm
<< (PAGE_SHIFT
-10),
50 hiwater_rss
<< (PAGE_SHIFT
-10),
51 total_rss
<< (PAGE_SHIFT
-10),
52 data
<< (PAGE_SHIFT
-10),
53 mm
->stack_vm
<< (PAGE_SHIFT
-10), text
, lib
,
54 (PTRS_PER_PTE
*sizeof(pte_t
)*mm
->nr_ptes
) >> 10);
58 unsigned long task_vsize(struct mm_struct
*mm
)
60 return PAGE_SIZE
* mm
->total_vm
;
63 int task_statm(struct mm_struct
*mm
, int *shared
, int *text
,
64 int *data
, int *resident
)
66 *shared
= get_mm_counter(mm
, file_rss
);
67 *text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
))
69 *data
= mm
->total_vm
- mm
->shared_vm
;
70 *resident
= *shared
+ get_mm_counter(mm
, anon_rss
);
74 int proc_exe_link(struct inode
*inode
, struct dentry
**dentry
, struct vfsmount
**mnt
)
76 struct vm_area_struct
* vma
;
78 struct task_struct
*task
= proc_task(inode
);
79 struct mm_struct
* mm
= get_task_mm(task
);
83 down_read(&mm
->mmap_sem
);
87 if ((vma
->vm_flags
& VM_EXECUTABLE
) && vma
->vm_file
)
93 *mnt
= mntget(vma
->vm_file
->f_vfsmnt
);
94 *dentry
= dget(vma
->vm_file
->f_dentry
);
98 up_read(&mm
->mmap_sem
);
104 static void pad_len_spaces(struct seq_file
*m
, int len
)
106 len
= 25 + sizeof(void*) * 6 - len
;
109 seq_printf(m
, "%*c", len
, ' ');
112 struct mem_size_stats
114 unsigned long resident
;
115 unsigned long shared_clean
;
116 unsigned long shared_dirty
;
117 unsigned long private_clean
;
118 unsigned long private_dirty
;
121 static int show_map_internal(struct seq_file
*m
, void *v
, struct mem_size_stats
*mss
)
123 struct task_struct
*task
= m
->private;
124 struct vm_area_struct
*vma
= v
;
125 struct mm_struct
*mm
= vma
->vm_mm
;
126 struct file
*file
= vma
->vm_file
;
127 int flags
= vma
->vm_flags
;
128 unsigned long ino
= 0;
133 struct inode
*inode
= vma
->vm_file
->f_dentry
->d_inode
;
134 dev
= inode
->i_sb
->s_dev
;
138 seq_printf(m
, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
141 flags
& VM_READ
? 'r' : '-',
142 flags
& VM_WRITE
? 'w' : '-',
143 flags
& VM_EXEC
? 'x' : '-',
144 flags
& VM_MAYSHARE
? 's' : 'p',
145 vma
->vm_pgoff
<< PAGE_SHIFT
,
146 MAJOR(dev
), MINOR(dev
), ino
, &len
);
149 * Print the dentry name for named mappings, and a
150 * special [heap] marker for the heap:
153 pad_len_spaces(m
, len
);
154 seq_path(m
, file
->f_vfsmnt
, file
->f_dentry
, "\n");
157 if (vma
->vm_start
<= mm
->start_brk
&&
158 vma
->vm_end
>= mm
->brk
) {
159 pad_len_spaces(m
, len
);
160 seq_puts(m
, "[heap]");
162 if (vma
->vm_start
<= mm
->start_stack
&&
163 vma
->vm_end
>= mm
->start_stack
) {
165 pad_len_spaces(m
, len
);
166 seq_puts(m
, "[stack]");
170 pad_len_spaces(m
, len
);
171 seq_puts(m
, "[vdso]");
180 "Shared_Clean: %8lu kB\n"
181 "Shared_Dirty: %8lu kB\n"
182 "Private_Clean: %8lu kB\n"
183 "Private_Dirty: %8lu kB\n",
184 (vma
->vm_end
- vma
->vm_start
) >> 10,
186 mss
->shared_clean
>> 10,
187 mss
->shared_dirty
>> 10,
188 mss
->private_clean
>> 10,
189 mss
->private_dirty
>> 10);
191 if (m
->count
< m
->size
) /* vma is copied successfully */
192 m
->version
= (vma
!= get_gate_vma(task
))? vma
->vm_start
: 0;
196 static int show_map(struct seq_file
*m
, void *v
)
198 return show_map_internal(m
, v
, 0);
201 static void smaps_pte_range(struct vm_area_struct
*vma
, pmd_t
*pmd
,
202 unsigned long addr
, unsigned long end
,
203 struct mem_size_stats
*mss
)
210 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
213 if (!pte_present(ptent
))
216 mss
->resident
+= PAGE_SIZE
;
217 pfn
= pte_pfn(ptent
);
221 page
= pfn_to_page(pfn
);
222 if (page_count(page
) >= 2) {
223 if (pte_dirty(ptent
))
224 mss
->shared_dirty
+= PAGE_SIZE
;
226 mss
->shared_clean
+= PAGE_SIZE
;
228 if (pte_dirty(ptent
))
229 mss
->private_dirty
+= PAGE_SIZE
;
231 mss
->private_clean
+= PAGE_SIZE
;
233 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
234 pte_unmap_unlock(pte
- 1, ptl
);
238 static inline void smaps_pmd_range(struct vm_area_struct
*vma
, pud_t
*pud
,
239 unsigned long addr
, unsigned long end
,
240 struct mem_size_stats
*mss
)
245 pmd
= pmd_offset(pud
, addr
);
247 next
= pmd_addr_end(addr
, end
);
248 if (pmd_none_or_clear_bad(pmd
))
250 smaps_pte_range(vma
, pmd
, addr
, next
, mss
);
251 } while (pmd
++, addr
= next
, addr
!= end
);
254 static inline void smaps_pud_range(struct vm_area_struct
*vma
, pgd_t
*pgd
,
255 unsigned long addr
, unsigned long end
,
256 struct mem_size_stats
*mss
)
261 pud
= pud_offset(pgd
, addr
);
263 next
= pud_addr_end(addr
, end
);
264 if (pud_none_or_clear_bad(pud
))
266 smaps_pmd_range(vma
, pud
, addr
, next
, mss
);
267 } while (pud
++, addr
= next
, addr
!= end
);
270 static inline void smaps_pgd_range(struct vm_area_struct
*vma
,
271 unsigned long addr
, unsigned long end
,
272 struct mem_size_stats
*mss
)
277 pgd
= pgd_offset(vma
->vm_mm
, addr
);
279 next
= pgd_addr_end(addr
, end
);
280 if (pgd_none_or_clear_bad(pgd
))
282 smaps_pud_range(vma
, pgd
, addr
, next
, mss
);
283 } while (pgd
++, addr
= next
, addr
!= end
);
286 static int show_smap(struct seq_file
*m
, void *v
)
288 struct vm_area_struct
*vma
= v
;
289 struct mem_size_stats mss
;
291 memset(&mss
, 0, sizeof mss
);
293 smaps_pgd_range(vma
, vma
->vm_start
, vma
->vm_end
, &mss
);
294 return show_map_internal(m
, v
, &mss
);
297 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
299 struct task_struct
*task
= m
->private;
300 unsigned long last_addr
= m
->version
;
301 struct mm_struct
*mm
;
302 struct vm_area_struct
*vma
, *tail_vma
;
306 * We remember last_addr rather than next_addr to hit with
307 * mmap_cache most of the time. We have zero last_addr at
308 * the beginning and also after lseek. We will have -1 last_addr
309 * after the end of the vmas.
312 if (last_addr
== -1UL)
315 mm
= get_task_mm(task
);
319 tail_vma
= get_gate_vma(task
);
320 down_read(&mm
->mmap_sem
);
322 /* Start with last addr hint */
323 if (last_addr
&& (vma
= find_vma(mm
, last_addr
))) {
329 * Check the vma index is within the range and do
330 * sequential scan until m_index.
333 if ((unsigned long)l
< mm
->map_count
) {
340 if (l
!= mm
->map_count
)
341 tail_vma
= NULL
; /* After gate vma */
347 /* End of vmas has been reached */
348 m
->version
= (tail_vma
!= NULL
)? 0: -1UL;
349 up_read(&mm
->mmap_sem
);
354 static void m_stop(struct seq_file
*m
, void *v
)
356 struct task_struct
*task
= m
->private;
357 struct vm_area_struct
*vma
= v
;
358 if (vma
&& vma
!= get_gate_vma(task
)) {
359 struct mm_struct
*mm
= vma
->vm_mm
;
360 up_read(&mm
->mmap_sem
);
365 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
367 struct task_struct
*task
= m
->private;
368 struct vm_area_struct
*vma
= v
;
369 struct vm_area_struct
*tail_vma
= get_gate_vma(task
);
372 if (vma
&& (vma
!= tail_vma
) && vma
->vm_next
)
375 return (vma
!= tail_vma
)? tail_vma
: NULL
;
378 struct seq_operations proc_pid_maps_op
= {
385 struct seq_operations proc_pid_smaps_op
= {
397 unsigned long mapped
;
398 unsigned long mapcount_max
;
399 unsigned long node
[MAX_NUMNODES
];
403 * Calculate numa node maps for a vma
405 static struct numa_maps
*get_numa_maps(const struct vm_area_struct
*vma
)
409 struct mm_struct
*mm
= vma
->vm_mm
;
411 struct numa_maps
*md
= kmalloc(sizeof(struct numa_maps
), GFP_KERNEL
);
418 md
->mapcount_max
= 0;
422 for (vaddr
= vma
->vm_start
; vaddr
< vma
->vm_end
; vaddr
+= PAGE_SIZE
) {
423 page
= follow_page(mm
, vaddr
, 0);
425 int count
= page_mapcount(page
);
429 if (count
> md
->mapcount_max
)
430 md
->mapcount_max
= count
;
434 md
->node
[page_to_nid(page
)]++;
441 static int show_numa_map(struct seq_file
*m
, void *v
)
443 struct task_struct
*task
= m
->private;
444 struct vm_area_struct
*vma
= v
;
445 struct mempolicy
*pol
;
446 struct numa_maps
*md
;
454 md
= get_numa_maps(vma
);
458 seq_printf(m
, "%08lx", vma
->vm_start
);
459 pol
= get_vma_policy(task
, vma
, vma
->vm_start
);
461 switch (pol
->policy
) {
463 seq_printf(m
, " prefer=%d", pol
->v
.preferred_node
);
466 seq_printf(m
, " bind={");
468 for (z
= pol
->v
.zonelist
->zones
; *z
; z
++) {
474 seq_printf(m
, "%d/%s", (*z
)->zone_pgdat
->node_id
,
479 case MPOL_INTERLEAVE
:
480 seq_printf(m
, " interleave={");
483 if (node_isset(n
, pol
->v
.nodes
)) {
488 seq_printf(m
, "%d",n
);
494 seq_printf(m
," default");
497 seq_printf(m
, " MaxRef=%lu Pages=%lu Mapped=%lu",
498 md
->mapcount_max
, md
->pages
, md
->mapped
);
500 seq_printf(m
," Anon=%lu",md
->anon
);
502 for_each_online_node(n
) {
504 seq_printf(m
, " N%d=%lu", n
, md
->node
[n
]);
508 if (m
->count
< m
->size
) /* vma is copied successfully */
509 m
->version
= (vma
!= get_gate_vma(task
)) ? vma
->vm_start
: 0;
513 struct seq_operations proc_pid_numa_maps_op
= {
517 .show
= show_numa_map