1 #include <linux/config.h>
8 #include <asm/processor.h> /* For TASK_SIZE */
12 extern void local_flush_tlb_all(void);
13 extern void local_flush_tlb_mm(struct mm_struct
*mm
);
14 extern void local_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
);
15 extern void local_flush_tlb_range(struct mm_struct
*mm
, unsigned long start
,
18 #define flush_tlb_all local_flush_tlb_all
19 #define flush_tlb_mm local_flush_tlb_mm
20 #define flush_tlb_page local_flush_tlb_page
21 #define flush_tlb_range local_flush_tlb_range
24 * No cache flushing is required when address mappings are
25 * changed, because the caches on PowerPCs are physically
27 * Also, when SMP we use the coherency (M) bit of the
28 * BATs and PTEs. -- Cort
30 #define flush_cache_all() do { } while (0)
31 #define flush_cache_mm(mm) do { } while (0)
32 #define flush_cache_range(mm, a, b) do { } while (0)
33 #define flush_cache_page(vma, p) do { } while (0)
35 extern void flush_icache_range(unsigned long, unsigned long);
36 extern void flush_page_to_ram(unsigned long);
38 extern unsigned long va_to_phys(unsigned long address
);
39 extern pte_t
*va_to_pte(struct task_struct
*tsk
, unsigned long address
);
40 extern unsigned long ioremap_bot
, ioremap_base
;
41 #endif /* __ASSEMBLY__ */
44 * The PowerPC MMU uses a hash table containing PTEs, together with
45 * a set of 16 segment registers (on 32-bit implementations), to define
46 * the virtual to physical address mapping.
48 * We use the hash table as an extended TLB, i.e. a cache of currently
49 * active mappings. We maintain a two-level page table tree, much like
50 * that used by the i386, for the sake of the Linux memory management code.
51 * Low-level assembler code in head.S (procedure hash_page) is responsible
52 * for extracting ptes from the tree and putting them into the hash table
53 * when necessary, and updating the accessed and modified bits in the
58 * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk.
59 * We also use the two level tables, but we can put the real bits in them
60 * needed for the TLB and tablewalk. These definitions require Mx_CTR.PPM = 0,
61 * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1. The level 2 descriptor has
62 * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit
63 * based upon user/super access. The TLB does not have accessed nor write
64 * protect. We assume that if the TLB get loaded with an entry it is
65 * accessed, and overload the changed bit for write protect. We use
66 * two bits in the software pte that are supposed to be set to zero in
67 * the TLB entry (24 and 25) for these indicators. Although the level 1
68 * descriptor contains the guarded and writethrough/copyback bits, we can
69 * set these at the page level since they get copied from the Mx_TWC
70 * register when the TLB entry is loaded. We will use bit 27 for guard, since
71 * that is where it exists in the MD_TWC, and bit 26 for writethrough.
72 * These will get masked from the level 2 descriptor at TLB load time, and
73 * copied to the MD_TWC before it gets loaded.
76 /* PMD_SHIFT determines the size of the area mapped by the second-level page tables */
78 #define PMD_SIZE (1UL << PMD_SHIFT)
79 #define PMD_MASK (~(PMD_SIZE-1))
81 /* PGDIR_SHIFT determines what a third-level page table entry can map */
82 #define PGDIR_SHIFT 22
83 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
84 #define PGDIR_MASK (~(PGDIR_SIZE-1))
87 * entries per page directory level: our page-table tree is two-level, so
88 * we don't really have any PMD directory.
90 #define PTRS_PER_PTE 1024
91 #define PTRS_PER_PMD 1
92 #define PTRS_PER_PGD 1024
93 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
96 * Just any arbitrary offset to the start of the vmalloc VM area: the
97 * current 64MB value just means that there will be a 64MB "hole" after the
98 * physical memory until the kernel virtual memory starts. That means that
99 * any out-of-bounds memory accesses will hopefully be caught.
100 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
101 * area for the same reason. ;)
103 * We no longer map larger than phys RAM with the BATs so we don't have
104 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
105 * about clashes between our early calls to ioremap() that start growing down
106 * from ioremap_base being run into the VM area allocations (growing upwards
107 * from VMALLOC_START). For this reason we have ioremap_bot to check when
108 * we actually run into our mappings setup in the early boot with the VM
109 * system. This really does become a problem for machines with good amounts
112 #define VMALLOC_OFFSET (0x4000000) /* 64M */
113 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
114 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
115 #define VMALLOC_END ioremap_bot
118 * Bits in a linux-style PTE. These match the bits in the
119 * (hardware-defined) PowerPC PTE as closely as possible.
122 /* Definitions for 60x, 740/750, etc. */
123 #define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
124 #define _PAGE_USER 0x002 /* matches one of the PP bits */
125 #define _PAGE_RW 0x004 /* software: user write access allowed */
126 #define _PAGE_GUARDED 0x008
127 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
128 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
129 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
130 #define _PAGE_DIRTY 0x080 /* C: page changed */
131 #define _PAGE_ACCESSED 0x100 /* R: page referenced */
132 #define _PAGE_HWWRITE 0x200 /* software: _PAGE_RW & _PAGE_DIRTY */
133 #define _PAGE_SHARED 0
136 /* Definitions for 8xx embedded chips. */
137 #define _PAGE_PRESENT 0x0001 /* Page is valid */
138 #define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */
139 #define _PAGE_SHARED 0x0004 /* No ASID (context) compare */
141 /* These four software bits must be masked out when the entry is loaded
144 #define _PAGE_GUARDED 0x0010 /* software: guarded access */
145 #define _PAGE_WRITETHRU 0x0020 /* software: use writethrough cache */
146 #define _PAGE_RW 0x0040 /* software: user write access allowed */
147 #define _PAGE_ACCESSED 0x0080 /* software: page referenced */
149 #define _PAGE_DIRTY 0x0100 /* C: page changed (write protect) */
150 #define _PAGE_USER 0x0800 /* One of the PP bits, the other must be 0 */
152 /* This is used to enable or disable the actual hardware write
155 #define _PAGE_HWWRITE _PAGE_DIRTY
157 #endif /* CONFIG_8xx */
159 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
162 #define _PAGE_BASE _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT
164 #define _PAGE_BASE _PAGE_PRESENT | _PAGE_ACCESSED
166 #define _PAGE_WRENABLE _PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE
168 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
170 #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | \
172 #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
173 #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
174 #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED)
175 #define PAGE_KERNEL_CI __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | \
179 * The PowerPC can only do execute protection on a segment (256MB) basis,
180 * not on a page basis. So we consider execute permission the same as read.
181 * Also, write permissions imply read permissions.
182 * This is the closest we can get..
184 #define __P000 PAGE_NONE
185 #define __P001 PAGE_READONLY
186 #define __P010 PAGE_COPY
187 #define __P011 PAGE_COPY
188 #define __P100 PAGE_READONLY
189 #define __P101 PAGE_READONLY
190 #define __P110 PAGE_COPY
191 #define __P111 PAGE_COPY
193 #define __S000 PAGE_NONE
194 #define __S001 PAGE_READONLY
195 #define __S010 PAGE_SHARED
196 #define __S011 PAGE_SHARED
197 #define __S100 PAGE_READONLY
198 #define __S101 PAGE_READONLY
199 #define __S110 PAGE_SHARED
200 #define __S111 PAGE_SHARED
203 * BAD_PAGETABLE is used when we need a bogus page-table, while
204 * BAD_PAGE is used for a bogus page.
206 * ZERO_PAGE is a global shared page that is always zero: used
207 * for zero-mapped memory areas etc..
210 extern pte_t
__bad_page(void);
211 extern pte_t
* __bad_pagetable(void);
213 extern unsigned long empty_zero_page
[1024];
215 #define BAD_PAGETABLE __bad_pagetable()
216 #define BAD_PAGE __bad_page()
217 #define ZERO_PAGE(vaddr) ((unsigned long) empty_zero_page)
219 /* number of bits that fit into a memory pointer */
220 #define BITS_PER_PTR (8*sizeof(unsigned long))
222 /* to align the pointer to a pointer address */
223 #define PTR_MASK (~(sizeof(void*)-1))
225 /* sizeof(void*) == 1<<SIZEOF_PTR_LOG2 */
226 /* 64-bit machines, beware! SRB. */
227 #define SIZEOF_PTR_LOG2 2
230 extern inline int pte_none(pte_t pte
) { return !pte_val(pte
); }
231 extern inline int pte_present(pte_t pte
) { return pte_val(pte
) & _PAGE_PRESENT
; }
232 extern inline void pte_clear(pte_t
*ptep
) { pte_val(*ptep
) = 0; }
234 extern inline int pmd_none(pmd_t pmd
) { return !pmd_val(pmd
); }
235 extern inline int pmd_bad(pmd_t pmd
) { return (pmd_val(pmd
) & ~PAGE_MASK
) != 0; }
236 extern inline int pmd_present(pmd_t pmd
) { return (pmd_val(pmd
) & PAGE_MASK
) != 0; }
237 extern inline void pmd_clear(pmd_t
* pmdp
) { pmd_val(*pmdp
) = 0; }
241 * The "pgd_xxx()" functions here are trivial for a folded two-level
242 * setup: the pgd is never bad, and a pmd always exists (as it's folded
243 * into the pgd entry)
245 extern inline int pgd_none(pgd_t pgd
) { return 0; }
246 extern inline int pgd_bad(pgd_t pgd
) { return 0; }
247 extern inline int pgd_present(pgd_t pgd
) { return 1; }
248 extern inline void pgd_clear(pgd_t
* pgdp
) { }
251 * The following only work if pte_present() is true.
252 * Undefined behaviour if not..
254 extern inline int pte_read(pte_t pte
) { return pte_val(pte
) & _PAGE_USER
; }
255 extern inline int pte_write(pte_t pte
) { return pte_val(pte
) & _PAGE_RW
; }
256 extern inline int pte_exec(pte_t pte
) { return pte_val(pte
) & _PAGE_USER
; }
257 extern inline int pte_dirty(pte_t pte
) { return pte_val(pte
) & _PAGE_DIRTY
; }
258 extern inline int pte_young(pte_t pte
) { return pte_val(pte
) & _PAGE_ACCESSED
; }
260 extern inline void pte_uncache(pte_t pte
) { pte_val(pte
) |= _PAGE_NO_CACHE
; }
261 extern inline void pte_cache(pte_t pte
) { pte_val(pte
) &= ~_PAGE_NO_CACHE
; }
263 extern inline pte_t
pte_rdprotect(pte_t pte
) {
264 pte_val(pte
) &= ~_PAGE_USER
; return pte
; }
265 extern inline pte_t
pte_exprotect(pte_t pte
) {
266 pte_val(pte
) &= ~_PAGE_USER
; return pte
; }
267 extern inline pte_t
pte_wrprotect(pte_t pte
) {
268 pte_val(pte
) &= ~(_PAGE_RW
| _PAGE_HWWRITE
); return pte
; }
269 extern inline pte_t
pte_mkclean(pte_t pte
) {
270 pte_val(pte
) &= ~(_PAGE_DIRTY
| _PAGE_HWWRITE
); return pte
; }
271 extern inline pte_t
pte_mkold(pte_t pte
) {
272 pte_val(pte
) &= ~_PAGE_ACCESSED
; return pte
; }
274 extern inline pte_t
pte_mkread(pte_t pte
) {
275 pte_val(pte
) |= _PAGE_USER
; return pte
; }
276 extern inline pte_t
pte_mkexec(pte_t pte
) {
277 pte_val(pte
) |= _PAGE_USER
; return pte
; }
278 extern inline pte_t
pte_mkwrite(pte_t pte
)
280 pte_val(pte
) |= _PAGE_RW
;
281 if (pte_val(pte
) & _PAGE_DIRTY
)
282 pte_val(pte
) |= _PAGE_HWWRITE
;
285 extern inline pte_t
pte_mkdirty(pte_t pte
)
287 pte_val(pte
) |= _PAGE_DIRTY
;
288 if (pte_val(pte
) & _PAGE_RW
)
289 pte_val(pte
) |= _PAGE_HWWRITE
;
292 extern inline pte_t
pte_mkyoung(pte_t pte
) {
293 pte_val(pte
) |= _PAGE_ACCESSED
; return pte
; }
295 /* Certain architectures need to do special things when pte's
296 * within a page table are directly modified. Thus, the following
297 * hook is made available.
300 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
302 extern inline void set_pte(pte_t
*pteptr
, pte_t pteval
)
304 unsigned long val
= pte_val(pteval
);
305 extern void xmon(void *);
307 if ((val
& _PAGE_PRESENT
) && ((val
< 0x111000 || (val
& 0x800)
308 || ((val
& _PAGE_HWWRITE
) && (~val
& (_PAGE_RW
|_PAGE_DIRTY
)))) {
309 printk("bad pte val %lx ptr=%p\n", val
, pteptr
);
317 * Conversion functions: convert a page and protection to a page entry,
318 * and a page entry and page directory to the page they refer to.
321 static inline pte_t
mk_pte_phys(unsigned long page
, pgprot_t pgprot
)
322 { pte_t pte
; pte_val(pte
) = (page
) | pgprot_val(pgprot
); return pte
; }
324 extern inline pte_t
mk_pte(unsigned long page
, pgprot_t pgprot
)
325 { pte_t pte
; pte_val(pte
) = __pa(page
) | pgprot_val(pgprot
); return pte
; }
327 extern inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
328 { pte_val(pte
) = (pte_val(pte
) & _PAGE_CHG_MASK
) | pgprot_val(newprot
); return pte
; }
330 extern inline unsigned long pte_page(pte_t pte
)
331 { return (unsigned long) __va(pte_val(pte
) & PAGE_MASK
); }
333 extern inline unsigned long pmd_page(pmd_t pmd
)
334 { return pmd_val(pmd
); }
337 /* to find an entry in a kernel page-table-directory */
338 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
340 /* to find an entry in a page-table-directory */
341 extern inline pgd_t
* pgd_offset(struct mm_struct
* mm
, unsigned long address
)
343 return mm
->pgd
+ (address
>> PGDIR_SHIFT
);
346 /* Find an entry in the second-level page table.. */
347 extern inline pmd_t
* pmd_offset(pgd_t
* dir
, unsigned long address
)
349 return (pmd_t
*) dir
;
352 /* Find an entry in the third-level page table.. */
353 extern inline pte_t
* pte_offset(pmd_t
* dir
, unsigned long address
)
355 return (pte_t
*) pmd_page(*dir
) + ((address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1));
359 * This is handled very differently on the PPC since out page tables
360 * are all 0's and I want to be able to use these zero'd pages elsewhere
361 * as well - it gives us quite a speedup.
363 * Note that the SMP/UP versions are the same but we don't need a
364 * per cpu list of zero pages because we do the zero-ing with the cache
365 * off and the access routines are lock-free but the pgt cache stuff
366 * is per-cpu since it isn't done with any lock-free access routines
367 * (although I think we need arch-specific routines so I can do lock-free).
369 * I need to generalize this so we can use it for other arch's as well.
373 #define quicklists cpu_data[smp_processor_id()]
375 extern struct pgtable_cache_struct
{
376 unsigned long *pgd_cache
;
377 unsigned long *pte_cache
;
378 unsigned long pgtable_cache_sz
;
382 #define pgd_quicklist (quicklists.pgd_cache)
383 #define pmd_quicklist ((unsigned long *)0)
384 #define pte_quicklist (quicklists.pte_cache)
385 #define pgtable_cache_size (quicklists.pgtable_cache_sz)
387 extern unsigned long *zero_cache
; /* head linked list of pre-zero'd pages */
388 extern unsigned long zero_sz
; /* # currently pre-zero'd pages */
389 extern unsigned long zeropage_hits
; /* # zero'd pages request that we've done */
390 extern unsigned long zeropage_calls
; /* # zero'd pages request that've been made */
391 extern unsigned long zerototal
; /* # pages zero'd over time */
393 #define zero_quicklist (zero_cache)
394 #define zero_cache_sz (zero_sz)
395 #define zero_cache_calls (zeropage_calls)
396 #define zero_cache_hits (zeropage_hits)
397 #define zero_cache_total (zerototal)
399 /* return a pre-zero'd page from the list, return NULL if none available -- Cort */
400 extern unsigned long get_zero_page_fast(void);
402 extern __inline__ pgd_t
*get_pgd_slow(void)
404 pgd_t
*ret
/* = (pgd_t *)__get_free_page(GFP_KERNEL)*/, *init
;
406 if ( (ret
= (pgd_t
*)get_zero_page_fast()) == NULL
)
408 if ( (ret
= (pgd_t
*)__get_free_page(GFP_KERNEL
)) != NULL
)
409 memset (ret
, 0, USER_PTRS_PER_PGD
* sizeof(pgd_t
));
412 init
= pgd_offset(&init_mm
, 0);
413 /*memset (ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));*/
414 memcpy (ret
+ USER_PTRS_PER_PGD
, init
+ USER_PTRS_PER_PGD
,
415 (PTRS_PER_PGD
- USER_PTRS_PER_PGD
) * sizeof(pgd_t
));
420 extern __inline__ pgd_t
*get_pgd_fast(void)
424 if((ret
= pgd_quicklist
) != NULL
) {
425 pgd_quicklist
= (unsigned long *)(*ret
);
427 pgtable_cache_size
--;
429 ret
= (unsigned long *)get_pgd_slow();
433 extern __inline__
void free_pgd_fast(pgd_t
*pgd
)
435 *(unsigned long *)pgd
= (unsigned long) pgd_quicklist
;
436 pgd_quicklist
= (unsigned long *) pgd
;
437 pgtable_cache_size
++;
440 extern __inline__
void free_pgd_slow(pgd_t
*pgd
)
442 free_page((unsigned long)pgd
);
445 extern pte_t
*get_pte_slow(pmd_t
*pmd
, unsigned long address_preadjusted
);
447 extern __inline__ pte_t
*get_pte_fast(void)
451 if((ret
= (unsigned long *)pte_quicklist
) != NULL
) {
452 pte_quicklist
= (unsigned long *)(*ret
);
454 pgtable_cache_size
--;
459 extern __inline__
void free_pte_fast(pte_t
*pte
)
461 *(unsigned long *)pte
= (unsigned long) pte_quicklist
;
462 pte_quicklist
= (unsigned long *) pte
;
463 pgtable_cache_size
++;
466 extern __inline__
void free_pte_slow(pte_t
*pte
)
468 free_page((unsigned long)pte
);
471 /* We don't use pmd cache, so this is a dummy routine */
472 extern __inline__ pmd_t
*get_pmd_fast(void)
477 extern __inline__
void free_pmd_fast(pmd_t
*pmd
)
481 extern __inline__
void free_pmd_slow(pmd_t
*pmd
)
485 extern void __bad_pte(pmd_t
*pmd
);
487 #define pte_free_kernel(pte) free_pte_fast(pte)
488 #define pte_free(pte) free_pte_fast(pte)
489 #define pgd_free(pgd) free_pgd_fast(pgd)
490 #define pgd_alloc() get_pgd_fast()
492 extern inline pte_t
* pte_alloc(pmd_t
* pmd
, unsigned long address
)
494 address
= (address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
495 if (pmd_none(*pmd
)) {
496 pte_t
* page
= (pte_t
*) get_pte_fast();
499 return get_pte_slow(pmd
, address
);
500 pmd_val(*pmd
) = (unsigned long) page
;
501 return page
+ address
;
507 return (pte_t
*) pmd_page(*pmd
) + address
;
511 * allocating and freeing a pmd is trivial: the 1-entry pmd is
512 * inside the pgd, so has no extra memory associated with it.
514 extern inline void pmd_free(pmd_t
* pmd
)
518 extern inline pmd_t
* pmd_alloc(pgd_t
* pgd
, unsigned long address
)
520 return (pmd_t
*) pgd
;
523 #define pmd_free_kernel pmd_free
524 #define pmd_alloc_kernel pmd_alloc
525 #define pte_alloc_kernel pte_alloc
527 extern int do_check_pgt_cache(int, int);
529 extern inline void set_pgdir(unsigned long address
, pgd_t entry
)
531 struct task_struct
* p
;
537 read_lock(&tasklist_lock
);
541 *pgd_offset(p
->mm
,address
) = entry
;
543 read_unlock(&tasklist_lock
);
545 for (pgd
= (pgd_t
*)pgd_quicklist
; pgd
; pgd
= (pgd_t
*)*(unsigned long *)pgd
)
546 pgd
[address
>> PGDIR_SHIFT
] = entry
;
548 /* To pgd_alloc/pgd_free, one holds master kernel lock and so does our callee, so we can
549 modify pgd caches of other CPUs as well. -jj */
550 for (i
= 0; i
< NR_CPUS
; i
++)
551 for (pgd
= (pgd_t
*)cpu_data
[i
].pgd_cache
; pgd
; pgd
= (pgd_t
*)*(unsigned long *)pgd
)
552 pgd
[address
>> PGDIR_SHIFT
] = entry
;
556 extern pgd_t swapper_pg_dir
[1024];
558 extern __inline__ pte_t
*find_pte(struct mm_struct
*mm
,unsigned long va
)
566 dir
= pgd_offset( mm
, va
);
569 pmd
= pmd_offset(dir
, va
& PAGE_MASK
);
570 if (pmd
&& pmd_present(*pmd
))
572 pte
= pte_offset(pmd
, va
);
573 if (pte
&& pte_present(*pte
))
576 flush_tlb_page(find_vma(mm
,va
),va
);
584 * Page tables may have changed. We don't need to do anything here
585 * as entries are faulted into the hash table by the low-level
586 * data/instruction access exception handlers.
588 #define update_mmu_cache(vma, addr, pte) do { } while (0)
591 * When flushing the tlb entry for a page, we also need to flush the
592 * hash table entry. flush_hash_page is assembler (for speed) in head.S.
594 extern void flush_hash_segments(unsigned low_vsid
, unsigned high_vsid
);
595 extern void flush_hash_page(unsigned context
, unsigned long va
);
598 #define SWP_TYPE(entry) (((entry) >> 1) & 0x7f)
599 #define SWP_OFFSET(entry) ((entry) >> 8)
600 #define SWP_ENTRY(type,offset) (((type) << 1) | ((offset) << 8))
602 #define module_map vmalloc
603 #define module_unmap vfree
606 /* For virtual address to physical address conversion */
607 extern void cache_clear(__u32 addr
, int length
);
608 extern void cache_push(__u32 addr
, int length
);
609 extern int mm_end_of_chunk (unsigned long addr
, int len
);
610 extern unsigned long iopa(unsigned long addr
);
611 extern unsigned long mm_ptov(unsigned long addr
) __attribute__ ((const));
613 /* Values for nocacheflag and cmode */
614 /* These are not used by the APUS kernel_map, but prevents
615 compilation errors. */
616 #define KERNELMAP_FULL_CACHING 0
617 #define KERNELMAP_NOCACHE_SER 1
618 #define KERNELMAP_NOCACHE_NONSER 2
619 #define KERNELMAP_NO_COPYBACK 3
622 * Map some physical address range into the kernel address space.
624 extern unsigned long kernel_map(unsigned long paddr
, unsigned long size
,
625 int nocacheflag
, unsigned long *memavailp
);
628 * Set cache mode of (kernel space) address range.
630 extern void kernel_set_cachemode (unsigned long address
, unsigned long size
,
633 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
634 #define PageSkip(page) (0)
635 #define kern_addr_valid(addr) (1)
637 #define io_remap_page_range remap_page_range
640 #define __tlbia() asm volatile ("tlbia" : : )
642 extern inline void local_flush_tlb_all(void)
644 extern inline void local_flush_tlb_mm(struct mm_struct
*mm
)
646 extern inline void local_flush_tlb_page(struct vm_area_struct
*vma
,
647 unsigned long vmaddr
)
649 extern inline void local_flush_tlb_range(struct mm_struct
*mm
,
650 unsigned long start
, unsigned long end
)
652 extern inline void flush_hash_page(unsigned context
, unsigned long va
)
657 #endif /* _PPC_PGTABLE_H */