[PATCH] mm: update_hiwaters just in time
[linux-2.6.22.y-op.git] / fs / proc / task_mmu.c
blob7c89b454904949dc1cfef2dbba2f20e469138f9d
1 #include <linux/mm.h>
2 #include <linux/hugetlb.h>
3 #include <linux/mount.h>
4 #include <linux/seq_file.h>
5 #include <linux/highmem.h>
6 #include <linux/pagemap.h>
7 #include <linux/mempolicy.h>
9 #include <asm/elf.h>
10 #include <asm/uaccess.h>
11 #include <asm/tlbflush.h>
12 #include "internal.h"
14 char *task_mem(struct mm_struct *mm, char *buffer)
16 unsigned long data, text, lib;
17 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
20 * Note: to minimize their overhead, mm maintains hiwater_vm and
21 * hiwater_rss only when about to *lower* total_vm or rss. Any
22 * collector of these hiwater stats must therefore get total_vm
23 * and rss too, which will usually be the higher. Barriers? not
24 * worth the effort, such snapshots can always be inconsistent.
26 hiwater_vm = total_vm = mm->total_vm;
27 if (hiwater_vm < mm->hiwater_vm)
28 hiwater_vm = mm->hiwater_vm;
29 hiwater_rss = total_rss = get_mm_rss(mm);
30 if (hiwater_rss < mm->hiwater_rss)
31 hiwater_rss = mm->hiwater_rss;
33 data = mm->total_vm - mm->shared_vm - mm->stack_vm;
34 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
35 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
36 buffer += sprintf(buffer,
37 "VmPeak:\t%8lu kB\n"
38 "VmSize:\t%8lu kB\n"
39 "VmLck:\t%8lu kB\n"
40 "VmHWM:\t%8lu kB\n"
41 "VmRSS:\t%8lu kB\n"
42 "VmData:\t%8lu kB\n"
43 "VmStk:\t%8lu kB\n"
44 "VmExe:\t%8lu kB\n"
45 "VmLib:\t%8lu kB\n"
46 "VmPTE:\t%8lu kB\n",
47 hiwater_vm << (PAGE_SHIFT-10),
48 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
49 mm->locked_vm << (PAGE_SHIFT-10),
50 hiwater_rss << (PAGE_SHIFT-10),
51 total_rss << (PAGE_SHIFT-10),
52 data << (PAGE_SHIFT-10),
53 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
54 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
55 return buffer;
58 unsigned long task_vsize(struct mm_struct *mm)
60 return PAGE_SIZE * mm->total_vm;
63 int task_statm(struct mm_struct *mm, int *shared, int *text,
64 int *data, int *resident)
66 *shared = get_mm_counter(mm, file_rss);
67 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
68 >> PAGE_SHIFT;
69 *data = mm->total_vm - mm->shared_vm;
70 *resident = *shared + get_mm_counter(mm, anon_rss);
71 return mm->total_vm;
74 int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
76 struct vm_area_struct * vma;
77 int result = -ENOENT;
78 struct task_struct *task = proc_task(inode);
79 struct mm_struct * mm = get_task_mm(task);
81 if (!mm)
82 goto out;
83 down_read(&mm->mmap_sem);
85 vma = mm->mmap;
86 while (vma) {
87 if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
88 break;
89 vma = vma->vm_next;
92 if (vma) {
93 *mnt = mntget(vma->vm_file->f_vfsmnt);
94 *dentry = dget(vma->vm_file->f_dentry);
95 result = 0;
98 up_read(&mm->mmap_sem);
99 mmput(mm);
100 out:
101 return result;
104 static void pad_len_spaces(struct seq_file *m, int len)
106 len = 25 + sizeof(void*) * 6 - len;
107 if (len < 1)
108 len = 1;
109 seq_printf(m, "%*c", len, ' ');
112 struct mem_size_stats
114 unsigned long resident;
115 unsigned long shared_clean;
116 unsigned long shared_dirty;
117 unsigned long private_clean;
118 unsigned long private_dirty;
121 static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
123 struct task_struct *task = m->private;
124 struct vm_area_struct *vma = v;
125 struct mm_struct *mm = vma->vm_mm;
126 struct file *file = vma->vm_file;
127 int flags = vma->vm_flags;
128 unsigned long ino = 0;
129 dev_t dev = 0;
130 int len;
132 if (file) {
133 struct inode *inode = vma->vm_file->f_dentry->d_inode;
134 dev = inode->i_sb->s_dev;
135 ino = inode->i_ino;
138 seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
139 vma->vm_start,
140 vma->vm_end,
141 flags & VM_READ ? 'r' : '-',
142 flags & VM_WRITE ? 'w' : '-',
143 flags & VM_EXEC ? 'x' : '-',
144 flags & VM_MAYSHARE ? 's' : 'p',
145 vma->vm_pgoff << PAGE_SHIFT,
146 MAJOR(dev), MINOR(dev), ino, &len);
149 * Print the dentry name for named mappings, and a
150 * special [heap] marker for the heap:
152 if (file) {
153 pad_len_spaces(m, len);
154 seq_path(m, file->f_vfsmnt, file->f_dentry, "\n");
155 } else {
156 if (mm) {
157 if (vma->vm_start <= mm->start_brk &&
158 vma->vm_end >= mm->brk) {
159 pad_len_spaces(m, len);
160 seq_puts(m, "[heap]");
161 } else {
162 if (vma->vm_start <= mm->start_stack &&
163 vma->vm_end >= mm->start_stack) {
165 pad_len_spaces(m, len);
166 seq_puts(m, "[stack]");
169 } else {
170 pad_len_spaces(m, len);
171 seq_puts(m, "[vdso]");
174 seq_putc(m, '\n');
176 if (mss)
177 seq_printf(m,
178 "Size: %8lu kB\n"
179 "Rss: %8lu kB\n"
180 "Shared_Clean: %8lu kB\n"
181 "Shared_Dirty: %8lu kB\n"
182 "Private_Clean: %8lu kB\n"
183 "Private_Dirty: %8lu kB\n",
184 (vma->vm_end - vma->vm_start) >> 10,
185 mss->resident >> 10,
186 mss->shared_clean >> 10,
187 mss->shared_dirty >> 10,
188 mss->private_clean >> 10,
189 mss->private_dirty >> 10);
191 if (m->count < m->size) /* vma is copied successfully */
192 m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
193 return 0;
196 static int show_map(struct seq_file *m, void *v)
198 return show_map_internal(m, v, 0);
201 static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
202 unsigned long addr, unsigned long end,
203 struct mem_size_stats *mss)
205 pte_t *pte, ptent;
206 unsigned long pfn;
207 struct page *page;
209 pte = pte_offset_map(pmd, addr);
210 do {
211 ptent = *pte;
212 if (pte_none(ptent) || !pte_present(ptent))
213 continue;
215 mss->resident += PAGE_SIZE;
216 pfn = pte_pfn(ptent);
217 if (!pfn_valid(pfn))
218 continue;
220 page = pfn_to_page(pfn);
221 if (page_count(page) >= 2) {
222 if (pte_dirty(ptent))
223 mss->shared_dirty += PAGE_SIZE;
224 else
225 mss->shared_clean += PAGE_SIZE;
226 } else {
227 if (pte_dirty(ptent))
228 mss->private_dirty += PAGE_SIZE;
229 else
230 mss->private_clean += PAGE_SIZE;
232 } while (pte++, addr += PAGE_SIZE, addr != end);
233 pte_unmap(pte - 1);
234 cond_resched_lock(&vma->vm_mm->page_table_lock);
237 static inline void smaps_pmd_range(struct vm_area_struct *vma, pud_t *pud,
238 unsigned long addr, unsigned long end,
239 struct mem_size_stats *mss)
241 pmd_t *pmd;
242 unsigned long next;
244 pmd = pmd_offset(pud, addr);
245 do {
246 next = pmd_addr_end(addr, end);
247 if (pmd_none_or_clear_bad(pmd))
248 continue;
249 smaps_pte_range(vma, pmd, addr, next, mss);
250 } while (pmd++, addr = next, addr != end);
253 static inline void smaps_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
254 unsigned long addr, unsigned long end,
255 struct mem_size_stats *mss)
257 pud_t *pud;
258 unsigned long next;
260 pud = pud_offset(pgd, addr);
261 do {
262 next = pud_addr_end(addr, end);
263 if (pud_none_or_clear_bad(pud))
264 continue;
265 smaps_pmd_range(vma, pud, addr, next, mss);
266 } while (pud++, addr = next, addr != end);
269 static inline void smaps_pgd_range(struct vm_area_struct *vma,
270 unsigned long addr, unsigned long end,
271 struct mem_size_stats *mss)
273 pgd_t *pgd;
274 unsigned long next;
276 pgd = pgd_offset(vma->vm_mm, addr);
277 do {
278 next = pgd_addr_end(addr, end);
279 if (pgd_none_or_clear_bad(pgd))
280 continue;
281 smaps_pud_range(vma, pgd, addr, next, mss);
282 } while (pgd++, addr = next, addr != end);
285 static int show_smap(struct seq_file *m, void *v)
287 struct vm_area_struct *vma = v;
288 struct mm_struct *mm = vma->vm_mm;
289 struct mem_size_stats mss;
291 memset(&mss, 0, sizeof mss);
293 if (mm) {
294 spin_lock(&mm->page_table_lock);
295 smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss);
296 spin_unlock(&mm->page_table_lock);
299 return show_map_internal(m, v, &mss);
302 static void *m_start(struct seq_file *m, loff_t *pos)
304 struct task_struct *task = m->private;
305 unsigned long last_addr = m->version;
306 struct mm_struct *mm;
307 struct vm_area_struct *vma, *tail_vma;
308 loff_t l = *pos;
311 * We remember last_addr rather than next_addr to hit with
312 * mmap_cache most of the time. We have zero last_addr at
313 * the beginning and also after lseek. We will have -1 last_addr
314 * after the end of the vmas.
317 if (last_addr == -1UL)
318 return NULL;
320 mm = get_task_mm(task);
321 if (!mm)
322 return NULL;
324 tail_vma = get_gate_vma(task);
325 down_read(&mm->mmap_sem);
327 /* Start with last addr hint */
328 if (last_addr && (vma = find_vma(mm, last_addr))) {
329 vma = vma->vm_next;
330 goto out;
334 * Check the vma index is within the range and do
335 * sequential scan until m_index.
337 vma = NULL;
338 if ((unsigned long)l < mm->map_count) {
339 vma = mm->mmap;
340 while (l-- && vma)
341 vma = vma->vm_next;
342 goto out;
345 if (l != mm->map_count)
346 tail_vma = NULL; /* After gate vma */
348 out:
349 if (vma)
350 return vma;
352 /* End of vmas has been reached */
353 m->version = (tail_vma != NULL)? 0: -1UL;
354 up_read(&mm->mmap_sem);
355 mmput(mm);
356 return tail_vma;
359 static void m_stop(struct seq_file *m, void *v)
361 struct task_struct *task = m->private;
362 struct vm_area_struct *vma = v;
363 if (vma && vma != get_gate_vma(task)) {
364 struct mm_struct *mm = vma->vm_mm;
365 up_read(&mm->mmap_sem);
366 mmput(mm);
370 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
372 struct task_struct *task = m->private;
373 struct vm_area_struct *vma = v;
374 struct vm_area_struct *tail_vma = get_gate_vma(task);
376 (*pos)++;
377 if (vma && (vma != tail_vma) && vma->vm_next)
378 return vma->vm_next;
379 m_stop(m, v);
380 return (vma != tail_vma)? tail_vma: NULL;
383 struct seq_operations proc_pid_maps_op = {
384 .start = m_start,
385 .next = m_next,
386 .stop = m_stop,
387 .show = show_map
390 struct seq_operations proc_pid_smaps_op = {
391 .start = m_start,
392 .next = m_next,
393 .stop = m_stop,
394 .show = show_smap
397 #ifdef CONFIG_NUMA
399 struct numa_maps {
400 unsigned long pages;
401 unsigned long anon;
402 unsigned long mapped;
403 unsigned long mapcount_max;
404 unsigned long node[MAX_NUMNODES];
408 * Calculate numa node maps for a vma
410 static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma)
412 struct page *page;
413 unsigned long vaddr;
414 struct mm_struct *mm = vma->vm_mm;
415 int i;
416 struct numa_maps *md = kmalloc(sizeof(struct numa_maps), GFP_KERNEL);
418 if (!md)
419 return NULL;
420 md->pages = 0;
421 md->anon = 0;
422 md->mapped = 0;
423 md->mapcount_max = 0;
424 for_each_node(i)
425 md->node[i] =0;
427 spin_lock(&mm->page_table_lock);
428 for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) {
429 page = follow_page(mm, vaddr, 0);
430 if (page) {
431 int count = page_mapcount(page);
433 if (count)
434 md->mapped++;
435 if (count > md->mapcount_max)
436 md->mapcount_max = count;
437 md->pages++;
438 if (PageAnon(page))
439 md->anon++;
440 md->node[page_to_nid(page)]++;
443 spin_unlock(&mm->page_table_lock);
444 return md;
447 static int show_numa_map(struct seq_file *m, void *v)
449 struct task_struct *task = m->private;
450 struct vm_area_struct *vma = v;
451 struct mempolicy *pol;
452 struct numa_maps *md;
453 struct zone **z;
454 int n;
455 int first;
457 if (!vma->vm_mm)
458 return 0;
460 md = get_numa_maps(vma);
461 if (!md)
462 return 0;
464 seq_printf(m, "%08lx", vma->vm_start);
465 pol = get_vma_policy(task, vma, vma->vm_start);
466 /* Print policy */
467 switch (pol->policy) {
468 case MPOL_PREFERRED:
469 seq_printf(m, " prefer=%d", pol->v.preferred_node);
470 break;
471 case MPOL_BIND:
472 seq_printf(m, " bind={");
473 first = 1;
474 for (z = pol->v.zonelist->zones; *z; z++) {
476 if (!first)
477 seq_putc(m, ',');
478 else
479 first = 0;
480 seq_printf(m, "%d/%s", (*z)->zone_pgdat->node_id,
481 (*z)->name);
483 seq_putc(m, '}');
484 break;
485 case MPOL_INTERLEAVE:
486 seq_printf(m, " interleave={");
487 first = 1;
488 for_each_node(n) {
489 if (node_isset(n, pol->v.nodes)) {
490 if (!first)
491 seq_putc(m,',');
492 else
493 first = 0;
494 seq_printf(m, "%d",n);
497 seq_putc(m, '}');
498 break;
499 default:
500 seq_printf(m," default");
501 break;
503 seq_printf(m, " MaxRef=%lu Pages=%lu Mapped=%lu",
504 md->mapcount_max, md->pages, md->mapped);
505 if (md->anon)
506 seq_printf(m," Anon=%lu",md->anon);
508 for_each_online_node(n) {
509 if (md->node[n])
510 seq_printf(m, " N%d=%lu", n, md->node[n]);
512 seq_putc(m, '\n');
513 kfree(md);
514 if (m->count < m->size) /* vma is copied successfully */
515 m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;
516 return 0;
519 struct seq_operations proc_pid_numa_maps_op = {
520 .start = m_start,
521 .next = m_next,
522 .stop = m_stop,
523 .show = show_numa_map
525 #endif