1 #include <linux/config.h>
8 #include <asm/processor.h> /* For TASK_SIZE */
12 extern void local_flush_tlb_all(void);
13 extern void local_flush_tlb_mm(struct mm_struct
*mm
);
14 extern void local_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
);
15 extern void local_flush_tlb_range(struct mm_struct
*mm
, unsigned long start
,
18 #define flush_tlb_all local_flush_tlb_all
19 #define flush_tlb_mm local_flush_tlb_mm
20 #define flush_tlb_page local_flush_tlb_page
21 #define flush_tlb_range local_flush_tlb_range
23 extern __inline__
void flush_tlb_pgtables(struct mm_struct
*mm
, unsigned long start
, unsigned long end
)
25 /* PPC has hw page tables. */
29 * No cache flushing is required when address mappings are
30 * changed, because the caches on PowerPCs are physically
32 * Also, when SMP we use the coherency (M) bit of the
33 * BATs and PTEs. -- Cort
35 #define flush_cache_all() do { } while (0)
36 #define flush_cache_mm(mm) do { } while (0)
37 #define flush_cache_range(mm, a, b) do { } while (0)
38 #define flush_cache_page(vma, p) do { } while (0)
40 extern void flush_icache_range(unsigned long, unsigned long);
41 extern void flush_page_to_ram(unsigned long);
43 extern unsigned long va_to_phys(unsigned long address
);
44 extern pte_t
*va_to_pte(struct task_struct
*tsk
, unsigned long address
);
45 extern unsigned long ioremap_bot
, ioremap_base
;
46 #endif /* __ASSEMBLY__ */
48 * The PowerPC MMU uses a hash table containing PTEs, together with
49 * a set of 16 segment registers (on 32-bit implementations), to define
50 * the virtual to physical address mapping.
52 * We use the hash table as an extended TLB, i.e. a cache of currently
53 * active mappings. We maintain a two-level page table tree, much like
54 * that used by the i386, for the sake of the Linux memory management code.
55 * Low-level assembler code in head.S (procedure hash_page) is responsible
56 * for extracting ptes from the tree and putting them into the hash table
57 * when necessary, and updating the accessed and modified bits in the
60 * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk.
61 * We also use the two level tables, but we can put the real bits in them
62 * needed for the TLB and tablewalk. These definitions require Mx_CTR.PPM = 0,
63 * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1. The level 2 descriptor has
64 * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit
65 * based upon user/super access. The TLB does not have accessed nor write
66 * protect. We assume that if the TLB get loaded with an entry it is
67 * accessed, and overload the changed bit for write protect. We use
68 * two bits in the software pte that are supposed to be set to zero in
69 * the TLB entry (24 and 25) for these indicators. Although the level 1
70 * descriptor contains the guarded and writethrough/copyback bits, we can
71 * set these at the page level since they get copied from the Mx_TWC
72 * register when the TLB entry is loaded. We will use bit 27 for guard, since
73 * that is where it exists in the MD_TWC, and bit 26 for writethrough.
74 * These will get masked from the level 2 descriptor at TLB load time, and
75 * copied to the MD_TWC before it gets loaded.
78 /* PMD_SHIFT determines the size of the area mapped by the second-level page tables */
80 #define PMD_SIZE (1UL << PMD_SHIFT)
81 #define PMD_MASK (~(PMD_SIZE-1))
83 /* PGDIR_SHIFT determines what a third-level page table entry can map */
84 #define PGDIR_SHIFT 22
85 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
86 #define PGDIR_MASK (~(PGDIR_SIZE-1))
89 * entries per page directory level: our page-table tree is two-level, so
90 * we don't really have any PMD directory.
92 #define PTRS_PER_PTE 1024
93 #define PTRS_PER_PMD 1
94 #define PTRS_PER_PGD 1024
95 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
97 /* Just any arbitrary offset to the start of the vmalloc VM area: the
98 * current 64MB value just means that there will be a 64MB "hole" after the
99 * physical memory until the kernel virtual memory starts. That means that
100 * any out-of-bounds memory accesses will hopefully be caught.
101 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
102 * area for the same reason. ;)
104 * We no longer map larger than phys RAM with the BATs so we don't have
105 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
106 * about clashes between our early calls to ioremap() that start growing down
107 * from ioremap_base being run into the VM area allocations (growing upwards
108 * from VMALLOC_START). For this reason we have ioremap_bot to check when
109 * we actually run into our mappings setup in the early boot with the VM
110 * system. This really does become a problem for machines with good amounts
113 #define VMALLOC_OFFSET (0x4000000) /* 64M */
114 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
115 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
116 #define VMALLOC_END ioremap_bot
119 * Bits in a linux-style PTE. These match the bits in the
120 * (hardware-defined) PowerPC PTE as closely as possible.
123 #define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
124 #define _PAGE_USER 0x002 /* matches one of the PP bits */
125 #define _PAGE_RW 0x004 /* software: user write access allowed */
126 #define _PAGE_GUARDED 0x008
127 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
128 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
129 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
130 #define _PAGE_DIRTY 0x080 /* C: page changed */
131 #define _PAGE_ACCESSED 0x100 /* R: page referenced */
132 #define _PAGE_HWWRITE 0x200 /* software: _PAGE_RW & _PAGE_DIRTY */
133 #define _PAGE_SHARED 0
136 #define _PAGE_PRESENT 0x0001 /* Page is valid */
137 #define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */
138 #define _PAGE_SHARED 0x0004 /* No ASID (context) compare */
140 /* These four software bits must be masked out when the entry is loaded
143 #define _PAGE_GUARDED 0x0010 /* software: guarded access */
144 #define _PAGE_WRITETHRU 0x0020 /* software: use writethrough cache */
145 #define _PAGE_RW 0x0040 /* software: user write access allowed */
146 #define _PAGE_ACCESSED 0x0080 /* software: page referenced */
148 #define _PAGE_DIRTY 0x0100 /* C: page changed (write protect) */
149 #define _PAGE_USER 0x0800 /* One of the PP bits, the other must be 0 */
151 /* This is used to enable or disable the actual hardware write
154 #define _PAGE_HWWRITE _PAGE_DIRTY
156 #endif /* CONFIG_8xx */
158 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
161 #define _PAGE_BASE _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT
163 #define _PAGE_BASE _PAGE_PRESENT | _PAGE_ACCESSED
165 #define _PAGE_WRENABLE _PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE
167 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
169 #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | \
171 #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
172 #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
173 #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED)
174 #define PAGE_KERNEL_CI __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | \
178 * The PowerPC can only do execute protection on a segment (256MB) basis,
179 * not on a page basis. So we consider execute permission the same as read.
180 * Also, write permissions imply read permissions.
181 * This is the closest we can get..
183 #define __P000 PAGE_NONE
184 #define __P001 PAGE_READONLY
185 #define __P010 PAGE_COPY
186 #define __P011 PAGE_COPY
187 #define __P100 PAGE_READONLY
188 #define __P101 PAGE_READONLY
189 #define __P110 PAGE_COPY
190 #define __P111 PAGE_COPY
192 #define __S000 PAGE_NONE
193 #define __S001 PAGE_READONLY
194 #define __S010 PAGE_SHARED
195 #define __S011 PAGE_SHARED
196 #define __S100 PAGE_READONLY
197 #define __S101 PAGE_READONLY
198 #define __S110 PAGE_SHARED
199 #define __S111 PAGE_SHARED
202 * BAD_PAGETABLE is used when we need a bogus page-table, while
203 * BAD_PAGE is used for a bogus page.
205 * ZERO_PAGE is a global shared page that is always zero: used
206 * for zero-mapped memory areas etc..
209 extern pte_t
__bad_page(void);
210 extern pte_t
* __bad_pagetable(void);
212 extern unsigned long empty_zero_page
[1024];
214 #define BAD_PAGETABLE __bad_pagetable()
215 #define BAD_PAGE __bad_page()
216 #define ZERO_PAGE(vaddr) ((unsigned long) empty_zero_page)
218 /* number of bits that fit into a memory pointer */
219 #define BITS_PER_PTR (8*sizeof(unsigned long))
221 /* to align the pointer to a pointer address */
222 #define PTR_MASK (~(sizeof(void*)-1))
224 /* sizeof(void*) == 1<<SIZEOF_PTR_LOG2 */
225 /* 64-bit machines, beware! SRB. */
226 #define SIZEOF_PTR_LOG2 2
228 /* to set the page-dir */
229 /* tsk is a task_struct and pgdir is a pte_t */
231 #define SET_PAGE_DIR(tsk,pgdir) \
232 ((tsk)->tss.pg_tables = (unsigned long *)(pgdir))
233 #else /* CONFIG_8xx */
234 #define SET_PAGE_DIR(tsk,pgdir) \
236 unsigned long __pgdir = (unsigned long)pgdir; \
237 ((tsk)->tss.pg_tables = (unsigned long *)(__pgdir)); \
238 asm("mtspr %0,%1 \n\t" : : "i"(M_TWB), "r"(__pa(__pgdir))); \
240 #endif /* CONFIG_8xx */
243 extern inline int pte_none(pte_t pte
) { return !pte_val(pte
); }
244 extern inline int pte_present(pte_t pte
) { return pte_val(pte
) & _PAGE_PRESENT
; }
245 extern inline void pte_clear(pte_t
*ptep
) { pte_val(*ptep
) = 0; }
247 extern inline int pmd_none(pmd_t pmd
) { return !pmd_val(pmd
); }
248 extern inline int pmd_bad(pmd_t pmd
) { return (pmd_val(pmd
) & ~PAGE_MASK
) != 0; }
249 extern inline int pmd_present(pmd_t pmd
) { return (pmd_val(pmd
) & PAGE_MASK
) != 0; }
250 extern inline void pmd_clear(pmd_t
* pmdp
) { pmd_val(*pmdp
) = 0; }
254 * The "pgd_xxx()" functions here are trivial for a folded two-level
255 * setup: the pgd is never bad, and a pmd always exists (as it's folded
256 * into the pgd entry)
258 extern inline int pgd_none(pgd_t pgd
) { return 0; }
259 extern inline int pgd_bad(pgd_t pgd
) { return 0; }
260 extern inline int pgd_present(pgd_t pgd
) { return 1; }
261 extern inline void pgd_clear(pgd_t
* pgdp
) { }
264 * The following only work if pte_present() is true.
265 * Undefined behaviour if not..
267 extern inline int pte_read(pte_t pte
) { return pte_val(pte
) & _PAGE_USER
; }
268 extern inline int pte_write(pte_t pte
) { return pte_val(pte
) & _PAGE_RW
; }
269 extern inline int pte_exec(pte_t pte
) { return pte_val(pte
) & _PAGE_USER
; }
270 extern inline int pte_dirty(pte_t pte
) { return pte_val(pte
) & _PAGE_DIRTY
; }
271 extern inline int pte_young(pte_t pte
) { return pte_val(pte
) & _PAGE_ACCESSED
; }
273 extern inline void pte_uncache(pte_t pte
) { pte_val(pte
) |= _PAGE_NO_CACHE
; }
274 extern inline void pte_cache(pte_t pte
) { pte_val(pte
) &= ~_PAGE_NO_CACHE
; }
276 extern inline pte_t
pte_rdprotect(pte_t pte
) {
277 pte_val(pte
) &= ~_PAGE_USER
; return pte
; }
278 extern inline pte_t
pte_exprotect(pte_t pte
) {
279 pte_val(pte
) &= ~_PAGE_USER
; return pte
; }
280 extern inline pte_t
pte_wrprotect(pte_t pte
) {
281 pte_val(pte
) &= ~(_PAGE_RW
| _PAGE_HWWRITE
); return pte
; }
282 extern inline pte_t
pte_mkclean(pte_t pte
) {
283 pte_val(pte
) &= ~(_PAGE_DIRTY
| _PAGE_HWWRITE
); return pte
; }
284 extern inline pte_t
pte_mkold(pte_t pte
) {
285 pte_val(pte
) &= ~_PAGE_ACCESSED
; return pte
; }
287 extern inline pte_t
pte_mkread(pte_t pte
) {
288 pte_val(pte
) |= _PAGE_USER
; return pte
; }
289 extern inline pte_t
pte_mkexec(pte_t pte
) {
290 pte_val(pte
) |= _PAGE_USER
; return pte
; }
291 extern inline pte_t
pte_mkwrite(pte_t pte
)
293 pte_val(pte
) |= _PAGE_RW
;
294 if (pte_val(pte
) & _PAGE_DIRTY
)
295 pte_val(pte
) |= _PAGE_HWWRITE
;
298 extern inline pte_t
pte_mkdirty(pte_t pte
)
300 pte_val(pte
) |= _PAGE_DIRTY
;
301 if (pte_val(pte
) & _PAGE_RW
)
302 pte_val(pte
) |= _PAGE_HWWRITE
;
305 extern inline pte_t
pte_mkyoung(pte_t pte
) {
306 pte_val(pte
) |= _PAGE_ACCESSED
; return pte
; }
308 /* Certain architectures need to do special things when pte's
309 * within a page table are directly modified. Thus, the following
310 * hook is made available.
313 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
315 extern inline void set_pte(pte_t
*pteptr
, pte_t pteval
)
317 unsigned long val
= pte_val(pteval
);
318 extern void xmon(void *);
320 if ((val
& _PAGE_PRESENT
) && ((val
< 0x111000 || (val
& 0x800)
321 || ((val
& _PAGE_HWWRITE
) && (~val
& (_PAGE_RW
|_PAGE_DIRTY
)))) {
322 printk("bad pte val %lx ptr=%p\n", val
, pteptr
);
330 * Conversion functions: convert a page and protection to a page entry,
331 * and a page entry and page directory to the page they refer to.
334 static inline pte_t
mk_pte_phys(unsigned long page
, pgprot_t pgprot
)
335 { pte_t pte
; pte_val(pte
) = (page
) | pgprot_val(pgprot
); return pte
; }
337 extern inline pte_t
mk_pte(unsigned long page
, pgprot_t pgprot
)
338 { pte_t pte
; pte_val(pte
) = __pa(page
) | pgprot_val(pgprot
); return pte
; }
340 extern inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
341 { pte_val(pte
) = (pte_val(pte
) & _PAGE_CHG_MASK
) | pgprot_val(newprot
); return pte
; }
343 extern inline unsigned long pte_page(pte_t pte
)
344 { return (unsigned long) __va(pte_val(pte
) & PAGE_MASK
); }
346 extern inline unsigned long pmd_page(pmd_t pmd
)
347 { return pmd_val(pmd
); }
350 /* to find an entry in a kernel page-table-directory */
351 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
353 /* to find an entry in a page-table-directory */
354 extern inline pgd_t
* pgd_offset(struct mm_struct
* mm
, unsigned long address
)
356 return mm
->pgd
+ (address
>> PGDIR_SHIFT
);
359 /* Find an entry in the second-level page table.. */
360 extern inline pmd_t
* pmd_offset(pgd_t
* dir
, unsigned long address
)
362 return (pmd_t
*) dir
;
365 /* Find an entry in the third-level page table.. */
366 extern inline pte_t
* pte_offset(pmd_t
* dir
, unsigned long address
)
368 return (pte_t
*) pmd_page(*dir
) + ((address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1));
372 * This is handled very differently on the PPC since out page tables
373 * are all 0's and I want to be able to use these zero'd pages elsewhere
374 * as well - it gives us quite a speedup.
376 * Note that the SMP/UP versions are the same but we don't need a
377 * per cpu list of zero pages because we do the zero-ing with the cache
378 * off and the access routines are lock-free but the pgt cache stuff
379 * is per-cpu since it isn't done with any lock-free access routines
380 * (although I think we need arch-specific routines so I can do lock-free).
382 * I need to generalize this so we can use it for other arch's as well.
386 #define quicklists cpu_data[smp_processor_id()]
388 extern struct pgtable_cache_struct
{
389 unsigned long *pgd_cache
;
390 unsigned long *pte_cache
;
391 unsigned long pgtable_cache_sz
;
395 #define pgd_quicklist (quicklists.pgd_cache)
396 #define pmd_quicklist ((unsigned long *)0)
397 #define pte_quicklist (quicklists.pte_cache)
398 #define pgtable_cache_size (quicklists.pgtable_cache_sz)
400 extern unsigned long *zero_cache
; /* head linked list of pre-zero'd pages */
401 extern unsigned long zero_sz
; /* # currently pre-zero'd pages */
402 extern unsigned long zeropage_hits
; /* # zero'd pages request that we've done */
403 extern unsigned long zeropage_calls
; /* # zero'd pages request that've been made */
404 extern unsigned long zerototal
; /* # pages zero'd over time */
406 #define zero_quicklist (zero_cache)
407 #define zero_cache_sz (zero_sz)
408 #define zero_cache_calls (zeropage_calls)
409 #define zero_cache_hits (zeropage_hits)
410 #define zero_cache_total (zerototal)
412 /* return a pre-zero'd page from the list, return NULL if none available -- Cort */
413 extern unsigned long get_zero_page_fast(void);
415 extern __inline__ pgd_t
*get_pgd_slow(void)
417 pgd_t
*ret
/* = (pgd_t *)__get_free_page(GFP_KERNEL)*/, *init
;
419 if ( (ret
= (pgd_t
*)get_zero_page_fast()) == NULL
)
421 if ( (ret
= (pgd_t
*)__get_free_page(GFP_KERNEL
)) != NULL
)
422 memset (ret
, 0, USER_PTRS_PER_PGD
* sizeof(pgd_t
));
425 init
= pgd_offset(&init_mm
, 0);
426 /*memset (ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));*/
427 memcpy (ret
+ USER_PTRS_PER_PGD
, init
+ USER_PTRS_PER_PGD
,
428 (PTRS_PER_PGD
- USER_PTRS_PER_PGD
) * sizeof(pgd_t
));
433 extern __inline__ pgd_t
*get_pgd_fast(void)
437 if((ret
= pgd_quicklist
) != NULL
) {
438 pgd_quicklist
= (unsigned long *)(*ret
);
440 pgtable_cache_size
--;
442 ret
= (unsigned long *)get_pgd_slow();
446 extern __inline__
void free_pgd_fast(pgd_t
*pgd
)
448 *(unsigned long *)pgd
= (unsigned long) pgd_quicklist
;
449 pgd_quicklist
= (unsigned long *) pgd
;
450 pgtable_cache_size
++;
453 extern __inline__
void free_pgd_slow(pgd_t
*pgd
)
455 free_page((unsigned long)pgd
);
458 extern pte_t
*get_pte_slow(pmd_t
*pmd
, unsigned long address_preadjusted
);
460 extern __inline__ pte_t
*get_pte_fast(void)
464 if((ret
= (unsigned long *)pte_quicklist
) != NULL
) {
465 pte_quicklist
= (unsigned long *)(*ret
);
467 pgtable_cache_size
--;
472 extern __inline__
void free_pte_fast(pte_t
*pte
)
474 *(unsigned long *)pte
= (unsigned long) pte_quicklist
;
475 pte_quicklist
= (unsigned long *) pte
;
476 pgtable_cache_size
++;
479 extern __inline__
void free_pte_slow(pte_t
*pte
)
481 free_page((unsigned long)pte
);
484 /* We don't use pmd cache, so this is a dummy routine */
485 extern __inline__ pmd_t
*get_pmd_fast(void)
490 extern __inline__
void free_pmd_fast(pmd_t
*pmd
)
494 extern __inline__
void free_pmd_slow(pmd_t
*pmd
)
498 extern void __bad_pte(pmd_t
*pmd
);
500 #define pte_free_kernel(pte) free_pte_fast(pte)
501 #define pte_free(pte) free_pte_fast(pte)
502 #define pgd_free(pgd) free_pgd_fast(pgd)
503 #define pgd_alloc() get_pgd_fast()
505 extern inline pte_t
* pte_alloc(pmd_t
* pmd
, unsigned long address
)
507 address
= (address
>> PAGE_SHIFT
) & (PTRS_PER_PTE
- 1);
508 if (pmd_none(*pmd
)) {
509 pte_t
* page
= (pte_t
*) get_pte_fast();
512 return get_pte_slow(pmd
, address
);
513 pmd_val(*pmd
) = (unsigned long) page
;
514 return page
+ address
;
520 return (pte_t
*) pmd_page(*pmd
) + address
;
524 * allocating and freeing a pmd is trivial: the 1-entry pmd is
525 * inside the pgd, so has no extra memory associated with it.
527 extern inline void pmd_free(pmd_t
* pmd
)
531 extern inline pmd_t
* pmd_alloc(pgd_t
* pgd
, unsigned long address
)
533 return (pmd_t
*) pgd
;
536 #define pmd_free_kernel pmd_free
537 #define pmd_alloc_kernel pmd_alloc
538 #define pte_alloc_kernel pte_alloc
540 extern int do_check_pgt_cache(int, int);
542 extern inline void set_pgdir(unsigned long address
, pgd_t entry
)
544 struct task_struct
* p
;
550 read_lock(&tasklist_lock
);
554 *pgd_offset(p
->mm
,address
) = entry
;
556 read_unlock(&tasklist_lock
);
558 for (pgd
= (pgd_t
*)pgd_quicklist
; pgd
; pgd
= (pgd_t
*)*(unsigned long *)pgd
)
559 pgd
[address
>> PGDIR_SHIFT
] = entry
;
561 /* To pgd_alloc/pgd_free, one holds master kernel lock and so does our callee, so we can
562 modify pgd caches of other CPUs as well. -jj */
563 for (i
= 0; i
< NR_CPUS
; i
++)
564 for (pgd
= (pgd_t
*)cpu_data
[i
].pgd_cache
; pgd
; pgd
= (pgd_t
*)*(unsigned long *)pgd
)
565 pgd
[address
>> PGDIR_SHIFT
] = entry
;
569 extern pgd_t swapper_pg_dir
[1024];
571 extern __inline__ pte_t
*find_pte(struct mm_struct
*mm
,unsigned long va
)
579 dir
= pgd_offset( mm
, va
);
582 pmd
= pmd_offset(dir
, va
& PAGE_MASK
);
583 if (pmd
&& pmd_present(*pmd
))
585 pte
= pte_offset(pmd
, va
);
586 if (pte
&& pte_present(*pte
))
589 flush_tlb_page(find_vma(mm
,va
),va
);
597 * Page tables may have changed. We don't need to do anything here
598 * as entries are faulted into the hash table by the low-level
599 * data/instruction access exception handlers.
601 #define update_mmu_cache(vma, addr, pte) do { } while (0)
604 * When flushing the tlb entry for a page, we also need to flush the
605 * hash table entry. flush_hash_page is assembler (for speed) in head.S.
607 extern void flush_hash_segments(unsigned low_vsid
, unsigned high_vsid
);
608 extern void flush_hash_page(unsigned context
, unsigned long va
);
611 #define SWP_TYPE(entry) (((entry) >> 1) & 0x7f)
612 #define SWP_OFFSET(entry) ((entry) >> 8)
613 #define SWP_ENTRY(type,offset) (((type) << 1) | ((offset) << 8))
615 #define module_map vmalloc
616 #define module_unmap vfree
619 /* For virtual address to physical address conversion */
620 extern void cache_clear(__u32 addr
, int length
);
621 extern void cache_push(__u32 addr
, int length
);
622 extern int mm_end_of_chunk (unsigned long addr
, int len
);
623 extern unsigned long iopa(unsigned long addr
);
624 extern unsigned long mm_ptov(unsigned long addr
) __attribute__ ((const));
626 /* Values for nocacheflag and cmode */
627 /* These are not used by the APUS kernel_map, but prevents
628 compilation errors. */
629 #define KERNELMAP_FULL_CACHING 0
630 #define KERNELMAP_NOCACHE_SER 1
631 #define KERNELMAP_NOCACHE_NONSER 2
632 #define KERNELMAP_NO_COPYBACK 3
635 * Map some physical address range into the kernel address space.
637 extern unsigned long kernel_map(unsigned long paddr
, unsigned long size
,
638 int nocacheflag
, unsigned long *memavailp
);
641 * Set cache mode of (kernel space) address range.
643 extern void kernel_set_cachemode (unsigned long address
, unsigned long size
,
646 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
647 #define PageSkip(page) (0)
648 #define kern_addr_valid(addr) (1)
651 #endif /* _PPC_PGTABLE_H */