2 #include <linux/hugetlb.h>
3 #include <linux/huge_mm.h>
4 #include <linux/mount.h>
5 #include <linux/seq_file.h>
6 #include <linux/highmem.h>
7 #include <linux/ptrace.h>
8 #include <linux/slab.h>
9 #include <linux/pagemap.h>
10 #include <linux/mempolicy.h>
11 #include <linux/rmap.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
16 #include <asm/uaccess.h>
17 #include <asm/tlbflush.h>
20 void task_mem(struct seq_file
*m
, struct mm_struct
*mm
)
22 unsigned long data
, text
, lib
, swap
;
23 unsigned long hiwater_vm
, total_vm
, hiwater_rss
, total_rss
;
26 * Note: to minimize their overhead, mm maintains hiwater_vm and
27 * hiwater_rss only when about to *lower* total_vm or rss. Any
28 * collector of these hiwater stats must therefore get total_vm
29 * and rss too, which will usually be the higher. Barriers? not
30 * worth the effort, such snapshots can always be inconsistent.
32 hiwater_vm
= total_vm
= mm
->total_vm
;
33 if (hiwater_vm
< mm
->hiwater_vm
)
34 hiwater_vm
= mm
->hiwater_vm
;
35 hiwater_rss
= total_rss
= get_mm_rss(mm
);
36 if (hiwater_rss
< mm
->hiwater_rss
)
37 hiwater_rss
= mm
->hiwater_rss
;
39 data
= mm
->total_vm
- mm
->shared_vm
- mm
->stack_vm
;
40 text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
)) >> 10;
41 lib
= (mm
->exec_vm
<< (PAGE_SHIFT
-10)) - text
;
42 swap
= get_mm_counter(mm
, MM_SWAPENTS
);
55 hiwater_vm
<< (PAGE_SHIFT
-10),
56 (total_vm
- mm
->reserved_vm
) << (PAGE_SHIFT
-10),
57 mm
->locked_vm
<< (PAGE_SHIFT
-10),
58 hiwater_rss
<< (PAGE_SHIFT
-10),
59 total_rss
<< (PAGE_SHIFT
-10),
60 data
<< (PAGE_SHIFT
-10),
61 mm
->stack_vm
<< (PAGE_SHIFT
-10), text
, lib
,
62 (PTRS_PER_PTE
*sizeof(pte_t
)*mm
->nr_ptes
) >> 10,
63 swap
<< (PAGE_SHIFT
-10));
66 unsigned long task_vsize(struct mm_struct
*mm
)
68 return PAGE_SIZE
* mm
->total_vm
;
71 unsigned long task_statm(struct mm_struct
*mm
,
72 unsigned long *shared
, unsigned long *text
,
73 unsigned long *data
, unsigned long *resident
)
75 *shared
= get_mm_counter(mm
, MM_FILEPAGES
);
76 *text
= (PAGE_ALIGN(mm
->end_code
) - (mm
->start_code
& PAGE_MASK
))
78 *data
= mm
->total_vm
- mm
->shared_vm
;
79 *resident
= *shared
+ get_mm_counter(mm
, MM_ANONPAGES
);
83 static void pad_len_spaces(struct seq_file
*m
, int len
)
85 len
= 25 + sizeof(void*) * 6 - len
;
88 seq_printf(m
, "%*c", len
, ' ');
91 static void vma_stop(struct proc_maps_private
*priv
, struct vm_area_struct
*vma
)
93 if (vma
&& vma
!= priv
->tail_vma
) {
94 struct mm_struct
*mm
= vma
->vm_mm
;
95 up_read(&mm
->mmap_sem
);
100 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
102 struct proc_maps_private
*priv
= m
->private;
103 unsigned long last_addr
= m
->version
;
104 struct mm_struct
*mm
;
105 struct vm_area_struct
*vma
, *tail_vma
= NULL
;
108 /* Clear the per syscall fields in priv */
110 priv
->tail_vma
= NULL
;
113 * We remember last_addr rather than next_addr to hit with
114 * mmap_cache most of the time. We have zero last_addr at
115 * the beginning and also after lseek. We will have -1 last_addr
116 * after the end of the vmas.
119 if (last_addr
== -1UL)
122 priv
->task
= get_pid_task(priv
->pid
, PIDTYPE_PID
);
126 mm
= mm_for_maps(priv
->task
);
129 down_read(&mm
->mmap_sem
);
131 tail_vma
= get_gate_vma(priv
->task
);
132 priv
->tail_vma
= tail_vma
;
134 /* Start with last addr hint */
135 vma
= find_vma(mm
, last_addr
);
136 if (last_addr
&& vma
) {
142 * Check the vma index is within the range and do
143 * sequential scan until m_index.
146 if ((unsigned long)l
< mm
->map_count
) {
153 if (l
!= mm
->map_count
)
154 tail_vma
= NULL
; /* After gate vma */
160 /* End of vmas has been reached */
161 m
->version
= (tail_vma
!= NULL
)? 0: -1UL;
162 up_read(&mm
->mmap_sem
);
167 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
169 struct proc_maps_private
*priv
= m
->private;
170 struct vm_area_struct
*vma
= v
;
171 struct vm_area_struct
*tail_vma
= priv
->tail_vma
;
174 if (vma
&& (vma
!= tail_vma
) && vma
->vm_next
)
177 return (vma
!= tail_vma
)? tail_vma
: NULL
;
180 static void m_stop(struct seq_file
*m
, void *v
)
182 struct proc_maps_private
*priv
= m
->private;
183 struct vm_area_struct
*vma
= v
;
187 put_task_struct(priv
->task
);
190 static int do_maps_open(struct inode
*inode
, struct file
*file
,
191 const struct seq_operations
*ops
)
193 struct proc_maps_private
*priv
;
195 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
197 priv
->pid
= proc_pid(inode
);
198 ret
= seq_open(file
, ops
);
200 struct seq_file
*m
= file
->private_data
;
209 static void show_map_vma(struct seq_file
*m
, struct vm_area_struct
*vma
)
211 struct mm_struct
*mm
= vma
->vm_mm
;
212 struct file
*file
= vma
->vm_file
;
213 int flags
= vma
->vm_flags
;
214 unsigned long ino
= 0;
215 unsigned long long pgoff
= 0;
221 struct inode
*inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
222 dev
= inode
->i_sb
->s_dev
;
224 pgoff
= ((loff_t
)vma
->vm_pgoff
) << PAGE_SHIFT
;
227 /* We don't show the stack guard page in /proc/maps */
228 start
= vma
->vm_start
;
229 if (vma
->vm_flags
& VM_GROWSDOWN
)
230 if (!vma_stack_continue(vma
->vm_prev
, vma
->vm_start
))
233 seq_printf(m
, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
236 flags
& VM_READ
? 'r' : '-',
237 flags
& VM_WRITE
? 'w' : '-',
238 flags
& VM_EXEC
? 'x' : '-',
239 flags
& VM_MAYSHARE
? 's' : 'p',
241 MAJOR(dev
), MINOR(dev
), ino
, &len
);
244 * Print the dentry name for named mappings, and a
245 * special [heap] marker for the heap:
248 pad_len_spaces(m
, len
);
249 seq_path(m
, &file
->f_path
, "\n");
251 const char *name
= arch_vma_name(vma
);
254 if (vma
->vm_start
<= mm
->brk
&&
255 vma
->vm_end
>= mm
->start_brk
) {
257 } else if (vma
->vm_start
<= mm
->start_stack
&&
258 vma
->vm_end
>= mm
->start_stack
) {
266 pad_len_spaces(m
, len
);
273 static int show_map(struct seq_file
*m
, void *v
)
275 struct vm_area_struct
*vma
= v
;
276 struct proc_maps_private
*priv
= m
->private;
277 struct task_struct
*task
= priv
->task
;
279 show_map_vma(m
, vma
);
281 if (m
->count
< m
->size
) /* vma is copied successfully */
282 m
->version
= (vma
!= get_gate_vma(task
))? vma
->vm_start
: 0;
286 static const struct seq_operations proc_pid_maps_op
= {
293 static int maps_open(struct inode
*inode
, struct file
*file
)
295 return do_maps_open(inode
, file
, &proc_pid_maps_op
);
298 const struct file_operations proc_maps_operations
= {
302 .release
= seq_release_private
,
306 * Proportional Set Size(PSS): my share of RSS.
308 * PSS of a process is the count of pages it has in memory, where each
309 * page is divided by the number of processes sharing it. So if a
310 * process has 1000 pages all to itself, and 1000 shared with one other
311 * process, its PSS will be 1500.
313 * To keep (accumulated) division errors low, we adopt a 64bit
314 * fixed-point pss counter to minimize division errors. So (pss >>
315 * PSS_SHIFT) would be the real byte count.
317 * A shift of 12 before division means (assuming 4K page size):
318 * - 1M 3-user-pages add up to 8KB errors;
319 * - supports mapcount up to 2^24, or 16M;
320 * - supports PSS up to 2^52 bytes, or 4PB.
324 #ifdef CONFIG_PROC_PAGE_MONITOR
325 struct mem_size_stats
{
326 struct vm_area_struct
*vma
;
327 unsigned long resident
;
328 unsigned long shared_clean
;
329 unsigned long shared_dirty
;
330 unsigned long private_clean
;
331 unsigned long private_dirty
;
332 unsigned long referenced
;
333 unsigned long anonymous
;
334 unsigned long anonymous_thp
;
340 static void smaps_pte_entry(pte_t ptent
, unsigned long addr
,
341 unsigned long ptent_size
, struct mm_walk
*walk
)
343 struct mem_size_stats
*mss
= walk
->private;
344 struct vm_area_struct
*vma
= mss
->vma
;
348 if (is_swap_pte(ptent
)) {
349 mss
->swap
+= ptent_size
;
353 if (!pte_present(ptent
))
356 page
= vm_normal_page(vma
, addr
, ptent
);
361 mss
->anonymous
+= ptent_size
;
363 mss
->resident
+= ptent_size
;
364 /* Accumulate the size in pages that have been accessed. */
365 if (pte_young(ptent
) || PageReferenced(page
))
366 mss
->referenced
+= ptent_size
;
367 mapcount
= page_mapcount(page
);
369 if (pte_dirty(ptent
) || PageDirty(page
))
370 mss
->shared_dirty
+= ptent_size
;
372 mss
->shared_clean
+= ptent_size
;
373 mss
->pss
+= (ptent_size
<< PSS_SHIFT
) / mapcount
;
375 if (pte_dirty(ptent
) || PageDirty(page
))
376 mss
->private_dirty
+= ptent_size
;
378 mss
->private_clean
+= ptent_size
;
379 mss
->pss
+= (ptent_size
<< PSS_SHIFT
);
383 static int smaps_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
384 struct mm_walk
*walk
)
386 struct mem_size_stats
*mss
= walk
->private;
387 struct vm_area_struct
*vma
= mss
->vma
;
391 spin_lock(&walk
->mm
->page_table_lock
);
392 if (pmd_trans_huge(*pmd
)) {
393 if (pmd_trans_splitting(*pmd
)) {
394 spin_unlock(&walk
->mm
->page_table_lock
);
395 wait_split_huge_page(vma
->anon_vma
, pmd
);
397 smaps_pte_entry(*(pte_t
*)pmd
, addr
,
398 HPAGE_PMD_SIZE
, walk
);
399 spin_unlock(&walk
->mm
->page_table_lock
);
400 mss
->anonymous_thp
+= HPAGE_PMD_SIZE
;
404 spin_unlock(&walk
->mm
->page_table_lock
);
407 * The mmap_sem held all the way back in m_start() is what
408 * keeps khugepaged out of here and from collapsing things
411 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
412 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
)
413 smaps_pte_entry(*pte
, addr
, PAGE_SIZE
, walk
);
414 pte_unmap_unlock(pte
- 1, ptl
);
419 static int show_smap(struct seq_file
*m
, void *v
)
421 struct proc_maps_private
*priv
= m
->private;
422 struct task_struct
*task
= priv
->task
;
423 struct vm_area_struct
*vma
= v
;
424 struct mem_size_stats mss
;
425 struct mm_walk smaps_walk
= {
426 .pmd_entry
= smaps_pte_range
,
431 memset(&mss
, 0, sizeof mss
);
433 /* mmap_sem is held in m_start */
434 if (vma
->vm_mm
&& !is_vm_hugetlb_page(vma
))
435 walk_page_range(vma
->vm_start
, vma
->vm_end
, &smaps_walk
);
437 show_map_vma(m
, vma
);
443 "Shared_Clean: %8lu kB\n"
444 "Shared_Dirty: %8lu kB\n"
445 "Private_Clean: %8lu kB\n"
446 "Private_Dirty: %8lu kB\n"
447 "Referenced: %8lu kB\n"
448 "Anonymous: %8lu kB\n"
449 "AnonHugePages: %8lu kB\n"
451 "KernelPageSize: %8lu kB\n"
452 "MMUPageSize: %8lu kB\n"
454 (vma
->vm_end
- vma
->vm_start
) >> 10,
456 (unsigned long)(mss
.pss
>> (10 + PSS_SHIFT
)),
457 mss
.shared_clean
>> 10,
458 mss
.shared_dirty
>> 10,
459 mss
.private_clean
>> 10,
460 mss
.private_dirty
>> 10,
461 mss
.referenced
>> 10,
463 mss
.anonymous_thp
>> 10,
465 vma_kernel_pagesize(vma
) >> 10,
466 vma_mmu_pagesize(vma
) >> 10,
467 (vma
->vm_flags
& VM_LOCKED
) ?
468 (unsigned long)(mss
.pss
>> (10 + PSS_SHIFT
)) : 0);
470 if (m
->count
< m
->size
) /* vma is copied successfully */
471 m
->version
= (vma
!= get_gate_vma(task
)) ? vma
->vm_start
: 0;
475 static const struct seq_operations proc_pid_smaps_op
= {
482 static int smaps_open(struct inode
*inode
, struct file
*file
)
484 return do_maps_open(inode
, file
, &proc_pid_smaps_op
);
487 const struct file_operations proc_smaps_operations
= {
491 .release
= seq_release_private
,
494 static int clear_refs_pte_range(pmd_t
*pmd
, unsigned long addr
,
495 unsigned long end
, struct mm_walk
*walk
)
497 struct vm_area_struct
*vma
= walk
->private;
502 split_huge_page_pmd(walk
->mm
, pmd
);
504 pte
= pte_offset_map_lock(vma
->vm_mm
, pmd
, addr
, &ptl
);
505 for (; addr
!= end
; pte
++, addr
+= PAGE_SIZE
) {
507 if (!pte_present(ptent
))
510 page
= vm_normal_page(vma
, addr
, ptent
);
514 /* Clear accessed and referenced bits. */
515 ptep_test_and_clear_young(vma
, addr
, pte
);
516 ClearPageReferenced(page
);
518 pte_unmap_unlock(pte
- 1, ptl
);
523 #define CLEAR_REFS_ALL 1
524 #define CLEAR_REFS_ANON 2
525 #define CLEAR_REFS_MAPPED 3
527 static ssize_t
clear_refs_write(struct file
*file
, const char __user
*buf
,
528 size_t count
, loff_t
*ppos
)
530 struct task_struct
*task
;
531 char buffer
[PROC_NUMBUF
];
532 struct mm_struct
*mm
;
533 struct vm_area_struct
*vma
;
536 memset(buffer
, 0, sizeof(buffer
));
537 if (count
> sizeof(buffer
) - 1)
538 count
= sizeof(buffer
) - 1;
539 if (copy_from_user(buffer
, buf
, count
))
541 if (strict_strtol(strstrip(buffer
), 10, &type
))
543 if (type
< CLEAR_REFS_ALL
|| type
> CLEAR_REFS_MAPPED
)
545 task
= get_proc_task(file
->f_path
.dentry
->d_inode
);
548 mm
= get_task_mm(task
);
550 struct mm_walk clear_refs_walk
= {
551 .pmd_entry
= clear_refs_pte_range
,
554 down_read(&mm
->mmap_sem
);
555 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
556 clear_refs_walk
.private = vma
;
557 if (is_vm_hugetlb_page(vma
))
560 * Writing 1 to /proc/pid/clear_refs affects all pages.
562 * Writing 2 to /proc/pid/clear_refs only affects
565 * Writing 3 to /proc/pid/clear_refs only affects file
568 if (type
== CLEAR_REFS_ANON
&& vma
->vm_file
)
570 if (type
== CLEAR_REFS_MAPPED
&& !vma
->vm_file
)
572 walk_page_range(vma
->vm_start
, vma
->vm_end
,
576 up_read(&mm
->mmap_sem
);
579 put_task_struct(task
);
584 const struct file_operations proc_clear_refs_operations
= {
585 .write
= clear_refs_write
,
586 .llseek
= noop_llseek
,
594 #define PM_ENTRY_BYTES sizeof(u64)
595 #define PM_STATUS_BITS 3
596 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
597 #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
598 #define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
599 #define PM_PSHIFT_BITS 6
600 #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
601 #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
602 #define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
603 #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1)
604 #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK)
606 #define PM_PRESENT PM_STATUS(4LL)
607 #define PM_SWAP PM_STATUS(2LL)
608 #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT)
609 #define PM_END_OF_BUFFER 1
611 static int add_to_pagemap(unsigned long addr
, u64 pfn
,
612 struct pagemapread
*pm
)
614 pm
->buffer
[pm
->pos
++] = pfn
;
615 if (pm
->pos
>= pm
->len
)
616 return PM_END_OF_BUFFER
;
620 static int pagemap_pte_hole(unsigned long start
, unsigned long end
,
621 struct mm_walk
*walk
)
623 struct pagemapread
*pm
= walk
->private;
626 for (addr
= start
; addr
< end
; addr
+= PAGE_SIZE
) {
627 err
= add_to_pagemap(addr
, PM_NOT_PRESENT
, pm
);
634 static u64
swap_pte_to_pagemap_entry(pte_t pte
)
636 swp_entry_t e
= pte_to_swp_entry(pte
);
637 return swp_type(e
) | (swp_offset(e
) << MAX_SWAPFILES_SHIFT
);
640 static u64
pte_to_pagemap_entry(pte_t pte
)
643 if (is_swap_pte(pte
))
644 pme
= PM_PFRAME(swap_pte_to_pagemap_entry(pte
))
645 | PM_PSHIFT(PAGE_SHIFT
) | PM_SWAP
;
646 else if (pte_present(pte
))
647 pme
= PM_PFRAME(pte_pfn(pte
))
648 | PM_PSHIFT(PAGE_SHIFT
) | PM_PRESENT
;
652 static int pagemap_pte_range(pmd_t
*pmd
, unsigned long addr
, unsigned long end
,
653 struct mm_walk
*walk
)
655 struct vm_area_struct
*vma
;
656 struct pagemapread
*pm
= walk
->private;
660 split_huge_page_pmd(walk
->mm
, pmd
);
662 /* find the first VMA at or above 'addr' */
663 vma
= find_vma(walk
->mm
, addr
);
664 for (; addr
!= end
; addr
+= PAGE_SIZE
) {
665 u64 pfn
= PM_NOT_PRESENT
;
667 /* check to see if we've left 'vma' behind
668 * and need a new, higher one */
669 if (vma
&& (addr
>= vma
->vm_end
))
670 vma
= find_vma(walk
->mm
, addr
);
672 /* check that 'vma' actually covers this address,
673 * and that it isn't a huge page vma */
674 if (vma
&& (vma
->vm_start
<= addr
) &&
675 !is_vm_hugetlb_page(vma
)) {
676 pte
= pte_offset_map(pmd
, addr
);
677 pfn
= pte_to_pagemap_entry(*pte
);
678 /* unmap before userspace copy */
681 err
= add_to_pagemap(addr
, pfn
, pm
);
691 #ifdef CONFIG_HUGETLB_PAGE
692 static u64
huge_pte_to_pagemap_entry(pte_t pte
, int offset
)
695 if (pte_present(pte
))
696 pme
= PM_PFRAME(pte_pfn(pte
) + offset
)
697 | PM_PSHIFT(PAGE_SHIFT
) | PM_PRESENT
;
701 /* This function walks within one hugetlb entry in the single call */
702 static int pagemap_hugetlb_range(pte_t
*pte
, unsigned long hmask
,
703 unsigned long addr
, unsigned long end
,
704 struct mm_walk
*walk
)
706 struct pagemapread
*pm
= walk
->private;
710 for (; addr
!= end
; addr
+= PAGE_SIZE
) {
711 int offset
= (addr
& ~hmask
) >> PAGE_SHIFT
;
712 pfn
= huge_pte_to_pagemap_entry(*pte
, offset
);
713 err
= add_to_pagemap(addr
, pfn
, pm
);
722 #endif /* HUGETLB_PAGE */
725 * /proc/pid/pagemap - an array mapping virtual pages to pfns
727 * For each page in the address space, this file contains one 64-bit entry
728 * consisting of the following:
730 * Bits 0-55 page frame number (PFN) if present
731 * Bits 0-4 swap type if swapped
732 * Bits 5-55 swap offset if swapped
733 * Bits 55-60 page shift (page size = 1<<page shift)
734 * Bit 61 reserved for future use
735 * Bit 62 page swapped
736 * Bit 63 page present
738 * If the page is not present but in swap, then the PFN contains an
739 * encoding of the swap file number and the page's offset into the
740 * swap. Unmapped pages return a null PFN. This allows determining
741 * precisely which pages are mapped (or in swap) and comparing mapped
742 * pages between processes.
744 * Efficient users of this interface will use /proc/pid/maps to
745 * determine which areas of memory are actually mapped and llseek to
746 * skip over unmapped regions.
748 #define PAGEMAP_WALK_SIZE (PMD_SIZE)
749 #define PAGEMAP_WALK_MASK (PMD_MASK)
750 static ssize_t
pagemap_read(struct file
*file
, char __user
*buf
,
751 size_t count
, loff_t
*ppos
)
753 struct task_struct
*task
= get_proc_task(file
->f_path
.dentry
->d_inode
);
754 struct mm_struct
*mm
;
755 struct pagemapread pm
;
757 struct mm_walk pagemap_walk
= {};
760 unsigned long start_vaddr
;
761 unsigned long end_vaddr
;
768 if (!ptrace_may_access(task
, PTRACE_MODE_READ
))
772 /* file position must be aligned */
773 if ((*ppos
% PM_ENTRY_BYTES
) || (count
% PM_ENTRY_BYTES
))
781 mm
= get_task_mm(task
);
785 pm
.len
= PM_ENTRY_BYTES
* (PAGEMAP_WALK_SIZE
>> PAGE_SHIFT
);
786 pm
.buffer
= kmalloc(pm
.len
, GFP_TEMPORARY
);
791 pagemap_walk
.pmd_entry
= pagemap_pte_range
;
792 pagemap_walk
.pte_hole
= pagemap_pte_hole
;
793 #ifdef CONFIG_HUGETLB_PAGE
794 pagemap_walk
.hugetlb_entry
= pagemap_hugetlb_range
;
796 pagemap_walk
.mm
= mm
;
797 pagemap_walk
.private = &pm
;
800 svpfn
= src
/ PM_ENTRY_BYTES
;
801 start_vaddr
= svpfn
<< PAGE_SHIFT
;
802 end_vaddr
= TASK_SIZE_OF(task
);
804 /* watch out for wraparound */
805 if (svpfn
> TASK_SIZE_OF(task
) >> PAGE_SHIFT
)
806 start_vaddr
= end_vaddr
;
809 * The odds are that this will stop walking way
810 * before end_vaddr, because the length of the
811 * user buffer is tracked in "pm", and the walk
812 * will stop when we hit the end of the buffer.
815 while (count
&& (start_vaddr
< end_vaddr
)) {
820 end
= (start_vaddr
+ PAGEMAP_WALK_SIZE
) & PAGEMAP_WALK_MASK
;
822 if (end
< start_vaddr
|| end
> end_vaddr
)
824 down_read(&mm
->mmap_sem
);
825 ret
= walk_page_range(start_vaddr
, end
, &pagemap_walk
);
826 up_read(&mm
->mmap_sem
);
829 len
= min(count
, PM_ENTRY_BYTES
* pm
.pos
);
830 if (copy_to_user(buf
, pm
.buffer
, len
)) {
839 if (!ret
|| ret
== PM_END_OF_BUFFER
)
847 put_task_struct(task
);
852 const struct file_operations proc_pagemap_operations
= {
853 .llseek
= mem_lseek
, /* borrow this */
854 .read
= pagemap_read
,
856 #endif /* CONFIG_PROC_PAGE_MONITOR */
859 extern int show_numa_map(struct seq_file
*m
, void *v
);
861 static const struct seq_operations proc_pid_numa_maps_op
= {
865 .show
= show_numa_map
,
868 static int numa_maps_open(struct inode
*inode
, struct file
*file
)
870 return do_maps_open(inode
, file
, &proc_pid_numa_maps_op
);
873 const struct file_operations proc_numa_maps_operations
= {
874 .open
= numa_maps_open
,
877 .release
= seq_release_private
,