Cleanup eeprom reading code.
[linux-2.6/linux-mips.git] / include / asm-generic / rmap.h
blob7de7730e9cc3c05df4a90d8357cfa491de704143
1 #ifndef _GENERIC_RMAP_H
2 #define _GENERIC_RMAP_H
3 /*
4 * linux/include/asm-generic/rmap.h
6 * Architecture dependent parts of the reverse mapping code,
7 * this version should work for most architectures with a
8 * 'normal' page table layout.
10 * We use the struct page of the page table page to find out
11 * the process and full address of a page table entry:
12 * - page->mapping points to the process' mm_struct
13 * - page->index has the high bits of the address
14 * - the lower bits of the address are calculated from the
15 * offset of the page table entry within the page table page
17 * For CONFIG_HIGHPTE, we need to represent the address of a pte in a
18 * scalar pte_addr_t. The pfn of the pte's page is shifted left by PAGE_SIZE
19 * bits and is then ORed with the byte offset of the pte within its page.
21 * For CONFIG_HIGHMEM4G, the pte_addr_t is 32 bits. 20 for the pfn, 12 for
22 * the offset.
24 * For CONFIG_HIGHMEM64G, the pte_addr_t is 64 bits. 52 for the pfn, 12 for
25 * the offset.
27 #include <linux/mm.h>
29 static inline void pgtable_add_rmap(struct page * page, struct mm_struct * mm, unsigned long address)
31 #ifdef BROKEN_PPC_PTE_ALLOC_ONE
32 /* OK, so PPC calls pte_alloc() before mem_map[] is setup ... ;( */
33 extern int mem_init_done;
35 if (!mem_init_done)
36 return;
37 #endif
38 page->mapping = (void *)mm;
39 page->index = address & ~((PTRS_PER_PTE * PAGE_SIZE) - 1);
40 inc_page_state(nr_page_table_pages);
43 static inline void pgtable_remove_rmap(struct page * page)
45 page->mapping = NULL;
46 page->index = 0;
47 dec_page_state(nr_page_table_pages);
50 static inline struct mm_struct * ptep_to_mm(pte_t * ptep)
52 struct page * page = kmap_atomic_to_page(ptep);
53 return (struct mm_struct *) page->mapping;
56 static inline unsigned long ptep_to_address(pte_t * ptep)
58 struct page * page = kmap_atomic_to_page(ptep);
59 unsigned long low_bits;
60 low_bits = ((unsigned long)ptep & ~PAGE_MASK) * PTRS_PER_PTE;
61 return page->index + low_bits;
64 #ifdef CONFIG_HIGHPTE
65 static inline pte_addr_t ptep_to_paddr(pte_t *ptep)
67 pte_addr_t paddr;
68 paddr = ((pte_addr_t)page_to_pfn(kmap_atomic_to_page(ptep))) << PAGE_SHIFT;
69 return paddr + (pte_addr_t)((unsigned long)ptep & ~PAGE_MASK);
71 #else
72 static inline pte_addr_t ptep_to_paddr(pte_t *ptep)
74 return (pte_addr_t)ptep;
76 #endif
78 #ifndef CONFIG_HIGHPTE
79 static inline pte_t *rmap_ptep_map(pte_addr_t pte_paddr)
81 return (pte_t *)pte_paddr;
84 static inline void rmap_ptep_unmap(pte_t *pte)
86 return;
88 #endif
90 #endif /* _GENERIC_RMAP_H */