2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * This file contains the functions and defines necessary to modify and use
15 * the TILE page table tree.
18 #ifndef _ASM_TILE_PGTABLE_H
19 #define _ASM_TILE_PGTABLE_H
21 #include <hv/hypervisor.h>
25 #include <linux/bitops.h>
26 #include <linux/threads.h>
27 #include <linux/slab.h>
28 #include <linux/list.h>
29 #include <linux/spinlock.h>
30 #include <asm/processor.h>
31 #include <asm/fixmap.h>
34 struct vm_area_struct
;
37 * ZERO_PAGE is a global shared page that is always zero: used
38 * for zero-mapped memory areas etc..
40 extern unsigned long empty_zero_page
[PAGE_SIZE
/sizeof(unsigned long)];
41 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
43 extern pgd_t swapper_pg_dir
[];
44 extern pgprot_t swapper_pgprot
;
45 extern struct kmem_cache
*pgd_cache
;
46 extern spinlock_t pgd_lock
;
47 extern struct list_head pgd_list
;
50 * The very last slots in the pgd_t are for addresses unusable by Linux
51 * (pgd_addr_invalid() returns true). So we use them for the list structure.
52 * The x86 code we are modelled on uses the page->private/index fields
53 * (older 2.6 kernels) or the lru list (newer 2.6 kernels), but since
54 * our pgds are so much smaller than a page, it seems a waste to
55 * spend a whole page on each pgd.
57 #define PGD_LIST_OFFSET \
58 ((PTRS_PER_PGD * sizeof(pgd_t)) - sizeof(struct list_head))
59 #define pgd_to_list(pgd) \
60 ((struct list_head *)((char *)(pgd) + PGD_LIST_OFFSET))
61 #define list_to_pgd(list) \
62 ((pgd_t *)((char *)(list) - PGD_LIST_OFFSET))
64 extern void pgtable_cache_init(void);
65 extern void paging_init(void);
66 extern void set_page_homes(void);
68 #define FIRST_USER_ADDRESS 0
70 #define _PAGE_PRESENT HV_PTE_PRESENT
71 #define _PAGE_HUGE_PAGE HV_PTE_PAGE
72 #define _PAGE_READABLE HV_PTE_READABLE
73 #define _PAGE_WRITABLE HV_PTE_WRITABLE
74 #define _PAGE_EXECUTABLE HV_PTE_EXECUTABLE
75 #define _PAGE_ACCESSED HV_PTE_ACCESSED
76 #define _PAGE_DIRTY HV_PTE_DIRTY
77 #define _PAGE_GLOBAL HV_PTE_GLOBAL
78 #define _PAGE_USER HV_PTE_USER
81 * All the "standard" bits. Cache-control bits are managed elsewhere.
82 * This is used to test for valid level-2 page table pointers by checking
83 * all the bits, and to mask away the cache control bits for mprotect.
98 __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
100 __pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \
101 _PAGE_USER | _PAGE_ACCESSED)
103 #define PAGE_SHARED_EXEC \
104 __pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \
105 _PAGE_EXECUTABLE | _PAGE_USER | _PAGE_ACCESSED)
106 #define PAGE_COPY_NOEXEC \
107 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE)
108 #define PAGE_COPY_EXEC \
109 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \
110 _PAGE_READABLE | _PAGE_EXECUTABLE)
113 #define PAGE_READONLY \
114 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE)
115 #define PAGE_READONLY_EXEC \
116 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \
117 _PAGE_READABLE | _PAGE_EXECUTABLE)
119 #define _PAGE_KERNEL_RO \
120 (_PAGE_PRESENT | _PAGE_GLOBAL | _PAGE_READABLE | _PAGE_ACCESSED)
121 #define _PAGE_KERNEL \
122 (_PAGE_KERNEL_RO | _PAGE_WRITABLE | _PAGE_DIRTY)
123 #define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXECUTABLE)
125 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
126 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO)
127 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
129 #define page_to_kpgprot(p) PAGE_KERNEL
132 * We could tighten these up, but for now writable or executable
135 #define __P000 PAGE_NONE
136 #define __P001 PAGE_READONLY
137 #define __P010 PAGE_COPY /* this is write-only, which we won't support */
138 #define __P011 PAGE_COPY
139 #define __P100 PAGE_READONLY_EXEC
140 #define __P101 PAGE_READONLY_EXEC
141 #define __P110 PAGE_COPY_EXEC
142 #define __P111 PAGE_COPY_EXEC
144 #define __S000 PAGE_NONE
145 #define __S001 PAGE_READONLY
146 #define __S010 PAGE_SHARED
147 #define __S011 PAGE_SHARED
148 #define __S100 PAGE_READONLY_EXEC
149 #define __S101 PAGE_READONLY_EXEC
150 #define __S110 PAGE_SHARED_EXEC
151 #define __S111 PAGE_SHARED_EXEC
154 * All the normal _PAGE_ALL bits are ignored for PMDs, except PAGE_PRESENT
155 * and PAGE_HUGE_PAGE, which must be one and zero, respectively.
156 * We set the ignored bits to zero.
158 #define _PAGE_TABLE _PAGE_PRESENT
160 /* Inherit the caching flags from the old protection bits. */
161 #define pgprot_modify(oldprot, newprot) \
162 (pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val }
164 /* Just setting the PFN to zero suffices. */
165 #define pte_pgprot(x) hv_pte_set_pfn((x), 0)
168 * For PTEs and PDEs, we must clear the Present bit first when
169 * clearing a page table entry, so clear the bottom half first and
170 * enforce ordering with a barrier.
172 static inline void __pte_clear(pte_t
*ptep
)
177 u32
*tmp
= (u32
*)ptep
;
183 #define pte_clear(mm, addr, ptep) __pte_clear(ptep)
186 * The following only work if pte_present() is true.
187 * Undefined behaviour if not..
189 #define pte_present hv_pte_get_present
190 #define pte_user hv_pte_get_user
191 #define pte_read hv_pte_get_readable
192 #define pte_dirty hv_pte_get_dirty
193 #define pte_young hv_pte_get_accessed
194 #define pte_write hv_pte_get_writable
195 #define pte_exec hv_pte_get_executable
196 #define pte_huge hv_pte_get_page
197 #define pte_rdprotect hv_pte_clear_readable
198 #define pte_exprotect hv_pte_clear_executable
199 #define pte_mkclean hv_pte_clear_dirty
200 #define pte_mkold hv_pte_clear_accessed
201 #define pte_wrprotect hv_pte_clear_writable
202 #define pte_mksmall hv_pte_clear_page
203 #define pte_mkread hv_pte_set_readable
204 #define pte_mkexec hv_pte_set_executable
205 #define pte_mkdirty hv_pte_set_dirty
206 #define pte_mkyoung hv_pte_set_accessed
207 #define pte_mkwrite hv_pte_set_writable
208 #define pte_mkhuge hv_pte_set_page
210 #define pte_special(pte) 0
211 #define pte_mkspecial(pte) (pte)
214 * Use some spare bits in the PTE for user-caching tags.
216 #define pte_set_forcecache hv_pte_set_client0
217 #define pte_get_forcecache hv_pte_get_client0
218 #define pte_clear_forcecache hv_pte_clear_client0
219 #define pte_set_anyhome hv_pte_set_client1
220 #define pte_get_anyhome hv_pte_get_client1
221 #define pte_clear_anyhome hv_pte_clear_client1
224 * A migrating PTE has PAGE_PRESENT clear but all the other bits preserved.
226 #define pte_migrating hv_pte_get_migrating
227 #define pte_mkmigrate(x) hv_pte_set_migrating(hv_pte_clear_present(x))
228 #define pte_donemigrate(x) hv_pte_set_present(hv_pte_clear_migrating(x))
230 #define pte_ERROR(e) \
231 pr_err("%s:%d: bad pte 0x%016llx.\n", __FILE__, __LINE__, pte_val(e))
232 #define pgd_ERROR(e) \
233 pr_err("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e))
235 /* Return PA and protection info for a given kernel VA. */
236 int va_to_cpa_and_pte(void *va
, phys_addr_t
*cpa
, pte_t
*pte
);
239 * __set_pte() ensures we write the 64-bit PTE with 32-bit words in
240 * the right order on 32-bit platforms and also allows us to write
241 * hooks to check valid PTEs, etc., if we want.
243 void __set_pte(pte_t
*ptep
, pte_t pte
);
246 * set_pte() sets the given PTE and also sanity-checks the
247 * requested PTE against the page homecaching. Unspecified parts
248 * of the PTE are filled in when it is written to memory, i.e. all
249 * caching attributes if "!forcecache", or the home cpu if "anyhome".
251 extern void set_pte(pte_t
*ptep
, pte_t pte
);
252 #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
253 #define set_pte_atomic(pteptr, pteval) set_pte(pteptr, pteval)
255 #define pte_page(x) pfn_to_page(pte_pfn(x))
257 static inline int pte_none(pte_t pte
)
262 static inline unsigned long pte_pfn(pte_t pte
)
264 return hv_pte_get_pfn(pte
);
267 /* Set or get the remote cache cpu in a pgprot with remote caching. */
268 extern pgprot_t
set_remote_cache_cpu(pgprot_t prot
, int cpu
);
269 extern int get_remote_cache_cpu(pgprot_t prot
);
271 static inline pte_t
pfn_pte(unsigned long pfn
, pgprot_t prot
)
273 return hv_pte_set_pfn(prot
, pfn
);
276 /* Support for priority mappings. */
277 extern void start_mm_caching(struct mm_struct
*mm
);
278 extern void check_mm_caching(struct mm_struct
*prev
, struct mm_struct
*next
);
281 * Support non-linear file mappings (see sys_remap_file_pages).
282 * This is defined by CLIENT1 set but CLIENT0 and _PAGE_PRESENT clear, and the
283 * file offset in the 32 high bits.
285 #define _PAGE_FILE HV_PTE_CLIENT1
286 #define PTE_FILE_MAX_BITS 32
287 #define pte_file(pte) (hv_pte_get_client1(pte) && !hv_pte_get_client0(pte))
288 #define pte_to_pgoff(pte) ((pte).val >> 32)
289 #define pgoff_to_pte(off) ((pte_t) { (((long long)(off)) << 32) | _PAGE_FILE })
292 * Encode and de-code a swap entry (see <linux/swapops.h>).
293 * We put the swap file type+offset in the 32 high bits;
294 * I believe we can just leave the low bits clear.
296 #define __swp_type(swp) ((swp).val & 0x1f)
297 #define __swp_offset(swp) ((swp).val >> 5)
298 #define __swp_entry(type, off) ((swp_entry_t) { (type) | ((off) << 5) })
299 #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).val >> 32 })
300 #define __swp_entry_to_pte(swp) ((pte_t) { (((long long) ((swp).val)) << 32) })
303 * Conversion functions: convert a page and protection to a page entry,
304 * and a page entry and page directory to the page they refer to.
307 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
310 * If we are doing an mprotect(), just accept the new vma->vm_page_prot
311 * value and combine it with the PFN from the old PTE to get a new PTE.
313 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
315 return pfn_pte(hv_pte_get_pfn(pte
), newprot
);
319 * The pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
321 * This macro returns the index of the entry in the pgd page which would
322 * control the given virtual address.
324 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
327 * pgd_offset() returns a (pgd_t *)
328 * pgd_index() is used get the offset into the pgd page's array of pgd_t's.
330 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
333 * A shortcut which implies the use of the kernel's pgd, instead
336 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
338 #if defined(CONFIG_HIGHPTE)
339 extern pte_t
*pte_offset_map(pmd_t
*, unsigned long address
);
340 #define pte_unmap(pte) kunmap_atomic(pte)
342 #define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
343 #define pte_unmap(pte) do { } while (0)
346 /* Clear a non-executable kernel PTE and flush it from the TLB. */
347 #define kpte_clear_flush(ptep, vaddr) \
349 pte_clear(&init_mm, (vaddr), (ptep)); \
350 local_flush_tlb_page(FLUSH_NONEXEC, (vaddr), PAGE_SIZE); \
354 * The kernel page tables contain what we need, and we flush when we
355 * change specific page table entries.
357 #define update_mmu_cache(vma, address, pte) do { } while (0)
359 #ifdef CONFIG_FLATMEM
360 #define kern_addr_valid(addr) (1)
361 #endif /* CONFIG_FLATMEM */
363 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
364 remap_pfn_range(vma, vaddr, pfn, size, prot)
366 extern void vmalloc_sync_all(void);
368 #endif /* !__ASSEMBLY__ */
371 #include <asm/pgtable_64.h>
373 #include <asm/pgtable_32.h>
378 static inline int pmd_none(pmd_t pmd
)
381 * Only check low word on 32-bit platforms, since it might be
382 * out of sync with upper half.
384 return (unsigned long)pmd_val(pmd
) == 0;
387 static inline int pmd_present(pmd_t pmd
)
389 return pmd_val(pmd
) & _PAGE_PRESENT
;
392 static inline int pmd_bad(pmd_t pmd
)
394 return ((pmd_val(pmd
) & _PAGE_ALL
) != _PAGE_TABLE
);
397 static inline unsigned long pages_to_mb(unsigned long npg
)
399 return npg
>> (20 - PAGE_SHIFT
);
403 * The pmd can be thought of an array like this: pmd_t[PTRS_PER_PMD]
405 * This function returns the index of the entry in the pmd which would
406 * control the given virtual address.
408 static inline unsigned long pmd_index(unsigned long address
)
410 return (address
>> PMD_SHIFT
) & (PTRS_PER_PMD
- 1);
414 * A given kernel pmd_t maps to a specific virtual address (either a
415 * kernel huge page or a kernel pte_t table). Since kernel pte_t
416 * tables can be aligned at sub-page granularity, this function can
417 * return non-page-aligned pointers, despite its name.
419 static inline unsigned long pmd_page_vaddr(pmd_t pmd
)
422 (phys_addr_t
)pmd_ptfn(pmd
) << HV_LOG2_PAGE_TABLE_ALIGN
;
423 return (unsigned long)__va(pa
);
427 * A pmd_t points to the base of a huge page or to a pte_t array.
428 * If a pte_t array, since we can have multiple per page, we don't
429 * have a one-to-one mapping of pmd_t's to pages. However, this is
430 * OK for pte_lockptr(), since we just end up with potentially one
431 * lock being used for several pte_t arrays.
433 #define pmd_page(pmd) pfn_to_page(HV_PTFN_TO_PFN(pmd_ptfn(pmd)))
436 * The pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
438 * This macro returns the index of the entry in the pte page which would
439 * control the given virtual address.
441 static inline unsigned long pte_index(unsigned long address
)
443 return (address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
446 static inline pte_t
*pte_offset_kernel(pmd_t
*pmd
, unsigned long address
)
448 return (pte_t
*)pmd_page_vaddr(*pmd
) + pte_index(address
);
451 static inline int pmd_huge_page(pmd_t pmd
)
453 return pmd_val(pmd
) & _PAGE_HUGE_PAGE
;
456 #include <asm-generic/pgtable.h>
458 /* Support /proc/NN/pgtable API. */
460 int arch_proc_pgtable_show(struct seq_file
*m
, struct mm_struct
*mm
,
461 unsigned long vaddr
, pte_t
*ptep
, void **datap
);
463 #endif /* !__ASSEMBLY__ */
465 #endif /* _ASM_TILE_PGTABLE_H */