Import 2.3.18pre1
[davej-history.git] / include / asm-alpha / pgtable.h
blob004924b93b6fcae7e80d4c1f820202f233bb29b1
1 #ifndef _ALPHA_PGTABLE_H
2 #define _ALPHA_PGTABLE_H
4 /*
5 * This file contains the functions and defines necessary to modify and use
6 * the Alpha page table tree.
8 * This hopefully works with any standard Alpha page-size, as defined
9 * in <asm/page.h> (currently 8192).
11 #include <linux/config.h>
12 #include <linux/spinlock.h> /* For the task lock */
14 #include <asm/system.h>
15 #include <asm/processor.h> /* For TASK_SIZE */
16 #include <asm/mmu_context.h>
17 #include <asm/machvec.h>
20 /* Caches aren't brain-dead on the Alpha. */
21 #define flush_cache_all() do { } while (0)
22 #define flush_cache_mm(mm) do { } while (0)
23 #define flush_cache_range(mm, start, end) do { } while (0)
24 #define flush_cache_page(vma, vmaddr) do { } while (0)
25 #define flush_page_to_ram(page) do { } while (0)
26 #define flush_icache_range(start, end) do { } while (0)
29 * Use a few helper functions to hide the ugly broken ASN
30 * numbers on early Alphas (ev4 and ev45)
33 #ifndef __EXTERN_INLINE
34 #define __EXTERN_INLINE extern inline
35 #define __MMU_EXTERN_INLINE
36 #endif
38 __EXTERN_INLINE void
39 ev4_flush_tlb_current(struct mm_struct *mm)
41 tbiap();
44 __EXTERN_INLINE void
45 ev4_flush_tlb_other(struct mm_struct *mm)
49 extern void ev5_flush_tlb_current(struct mm_struct *mm);
51 __EXTERN_INLINE void
52 ev5_flush_tlb_other(struct mm_struct *mm)
54 mm->context = 0;
58 * Flush just one page in the current TLB set.
59 * We need to be very careful about the icache here, there
60 * is no way to invalidate a specific icache page..
63 __EXTERN_INLINE void
64 ev4_flush_tlb_current_page(struct mm_struct * mm,
65 struct vm_area_struct *vma,
66 unsigned long addr)
68 tbi(2 + ((vma->vm_flags & VM_EXEC) != 0), addr);
71 __EXTERN_INLINE void
72 ev5_flush_tlb_current_page(struct mm_struct * mm,
73 struct vm_area_struct *vma,
74 unsigned long addr)
76 if (vma->vm_flags & VM_EXEC)
77 ev5_flush_tlb_current(mm);
78 else
79 tbi(2, addr);
83 #ifdef CONFIG_ALPHA_GENERIC
84 # define flush_tlb_current alpha_mv.mv_flush_tlb_current
85 # define flush_tlb_other alpha_mv.mv_flush_tlb_other
86 # define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page
87 #else
88 # ifdef CONFIG_ALPHA_EV4
89 # define flush_tlb_current ev4_flush_tlb_current
90 # define flush_tlb_other ev4_flush_tlb_other
91 # define flush_tlb_current_page ev4_flush_tlb_current_page
92 # else
93 # define flush_tlb_current ev5_flush_tlb_current
94 # define flush_tlb_other ev5_flush_tlb_other
95 # define flush_tlb_current_page ev5_flush_tlb_current_page
96 # endif
97 #endif
99 #ifdef __MMU_EXTERN_INLINE
100 #undef __EXTERN_INLINE
101 #undef __MMU_EXTERN_INLINE
102 #endif
105 * Flush current user mapping.
107 static inline void flush_tlb(void)
109 flush_tlb_current(current->mm);
112 #ifndef __SMP__
114 * Flush everything (kernel mapping may also have
115 * changed due to vmalloc/vfree)
117 static inline void flush_tlb_all(void)
119 tbia();
123 * Flush a specified user mapping
125 static inline void flush_tlb_mm(struct mm_struct *mm)
127 if (mm != current->mm)
128 flush_tlb_other(mm);
129 else
130 flush_tlb_current(mm);
134 * Page-granular tlb flush.
136 * do a tbisd (type = 2) normally, and a tbis (type = 3)
137 * if it is an executable mapping. We want to avoid the
138 * itlb flush, because that potentially also does a
139 * icache flush.
141 static inline void flush_tlb_page(struct vm_area_struct *vma,
142 unsigned long addr)
144 struct mm_struct * mm = vma->vm_mm;
146 if (mm != current->mm)
147 flush_tlb_other(mm);
148 else
149 flush_tlb_current_page(mm, vma, addr);
153 * Flush a specified range of user mapping: on the
154 * Alpha we flush the whole user tlb.
156 static inline void flush_tlb_range(struct mm_struct *mm,
157 unsigned long start, unsigned long end)
159 flush_tlb_mm(mm);
162 #else /* __SMP__ */
164 extern void flush_tlb_all(void);
165 extern void flush_tlb_mm(struct mm_struct *);
166 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
167 extern void flush_tlb_range(struct mm_struct *, unsigned long, unsigned long);
169 #endif /* __SMP__ */
171 /* Certain architectures need to do special things when PTEs
172 * within a page table are directly modified. Thus, the following
173 * hook is made available.
175 #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
177 /* PMD_SHIFT determines the size of the area a second-level page table can map */
178 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
179 #define PMD_SIZE (1UL << PMD_SHIFT)
180 #define PMD_MASK (~(PMD_SIZE-1))
182 /* PGDIR_SHIFT determines what a third-level page table entry can map */
183 #define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
184 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
185 #define PGDIR_MASK (~(PGDIR_SIZE-1))
188 * Entries per page directory level: the Alpha is three-level, with
189 * all levels having a one-page page table.
191 * The PGD is special: the last entry is reserved for self-mapping.
193 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
194 #define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
195 #define PTRS_PER_PGD ((1UL << (PAGE_SHIFT-3))-1)
196 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
198 /* Number of pointers that fit on a page: this will go away. */
199 #define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3))
201 #define VMALLOC_START 0xFFFFFE0000000000
202 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
203 #define VMALLOC_END (~0UL)
206 * OSF/1 PAL-code-imposed page table bits
208 #define _PAGE_VALID 0x0001
209 #define _PAGE_FOR 0x0002 /* used for page protection (fault on read) */
210 #define _PAGE_FOW 0x0004 /* used for page protection (fault on write) */
211 #define _PAGE_FOE 0x0008 /* used for page protection (fault on exec) */
212 #define _PAGE_ASM 0x0010
213 #define _PAGE_KRE 0x0100 /* xxx - see below on the "accessed" bit */
214 #define _PAGE_URE 0x0200 /* xxx */
215 #define _PAGE_KWE 0x1000 /* used to do the dirty bit in software */
216 #define _PAGE_UWE 0x2000 /* used to do the dirty bit in software */
218 /* .. and these are ours ... */
219 #define _PAGE_DIRTY 0x20000
220 #define _PAGE_ACCESSED 0x40000
223 * NOTE! The "accessed" bit isn't necessarily exact: it can be kept exactly
224 * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it.
225 * Under Linux/AXP, the "accessed" bit just means "read", and I'll just use
226 * the KRE/URE bits to watch for it. That way we don't need to overload the
227 * KWE/UWE bits with both handling dirty and accessed.
229 * Note that the kernel uses the accessed bit just to check whether to page
230 * out a page or not, so it doesn't have to be exact anyway.
233 #define __DIRTY_BITS (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE)
234 #define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE)
236 #define _PFN_MASK 0xFFFFFFFF00000000
238 #define _PAGE_TABLE (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS)
239 #define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS)
242 * All the normal masks have the "page accessed" bits on, as any time they are used,
243 * the page is accessed. They are cleared only by the page-out routines
245 #define PAGE_NONE __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE)
246 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
247 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
248 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
249 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
251 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
253 #define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW))
254 #define _PAGE_S(x) _PAGE_NORMAL(x)
257 * The hardware can handle write-only mappings, but as the Alpha
258 * architecture does byte-wide writes with a read-modify-write
259 * sequence, it's not practical to have write-without-read privs.
260 * Thus the "-w- -> rw-" and "-wx -> rwx" mapping here (and in
261 * arch/alpha/mm/fault.c)
263 /* xwr */
264 #define __P000 _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
265 #define __P001 _PAGE_P(_PAGE_FOE | _PAGE_FOW)
266 #define __P010 _PAGE_P(_PAGE_FOE)
267 #define __P011 _PAGE_P(_PAGE_FOE)
268 #define __P100 _PAGE_P(_PAGE_FOW | _PAGE_FOR)
269 #define __P101 _PAGE_P(_PAGE_FOW)
270 #define __P110 _PAGE_P(0)
271 #define __P111 _PAGE_P(0)
273 #define __S000 _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
274 #define __S001 _PAGE_S(_PAGE_FOE | _PAGE_FOW)
275 #define __S010 _PAGE_S(_PAGE_FOE)
276 #define __S011 _PAGE_S(_PAGE_FOE)
277 #define __S100 _PAGE_S(_PAGE_FOW | _PAGE_FOR)
278 #define __S101 _PAGE_S(_PAGE_FOW)
279 #define __S110 _PAGE_S(0)
280 #define __S111 _PAGE_S(0)
283 * BAD_PAGETABLE is used when we need a bogus page-table, while
284 * BAD_PAGE is used for a bogus page.
286 * ZERO_PAGE is a global shared page that is always zero: used
287 * for zero-mapped memory areas etc..
289 extern pte_t __bad_page(void);
290 extern pmd_t * __bad_pagetable(void);
292 extern unsigned long __zero_page(void);
294 #define BAD_PAGETABLE __bad_pagetable()
295 #define BAD_PAGE __bad_page()
296 #define ZERO_PAGE(vaddr) (PAGE_OFFSET+0x30A000)
298 /* number of bits that fit into a memory pointer */
299 #define BITS_PER_PTR (8*sizeof(unsigned long))
301 /* to align the pointer to a pointer address */
302 #define PTR_MASK (~(sizeof(void*)-1))
304 /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
305 #define SIZEOF_PTR_LOG2 3
307 /* to find an entry in a page-table */
308 #define PAGE_PTR(address) \
309 ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
312 * On certain platforms whose physical address space can overlap KSEG,
313 * namely EV6 and above, we must re-twiddle the physaddr to restore the
314 * correct high-order bits.
317 #if defined(CONFIG_ALPHA_GENERIC) && defined(USE_48_BIT_KSEG)
318 #error "EV6-only feature in a generic kernel"
319 #endif
320 #if defined(CONFIG_ALPHA_GENERIC) || \
321 (defined(CONFIG_ALPHA_EV6) && !defined(USE_48_BIT_KSEG))
322 #define PHYS_TWIDDLE(phys) \
323 ((((phys) & 0xc0000000000UL) == 0x40000000000UL) \
324 ? ((phys) ^= 0xc0000000000UL) : (phys))
325 #else
326 #define PHYS_TWIDDLE(phys) (phys)
327 #endif
330 * Conversion functions: convert a page and protection to a page entry,
331 * and a page entry and page directory to the page they refer to.
333 extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot)
334 { pte_t pte; pte_val(pte) = ((page-PAGE_OFFSET) << (32-PAGE_SHIFT)) | pgprot_val(pgprot); return pte; }
336 extern inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
337 { pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpage) << (32-PAGE_SHIFT)) | pgprot_val(pgprot); return pte; }
339 extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
340 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
342 extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
343 { pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
345 extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
346 { pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
348 extern inline unsigned long pte_page(pte_t pte)
349 { return PAGE_OFFSET + ((pte_val(pte) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
351 extern inline unsigned long pmd_page(pmd_t pmd)
352 { return PAGE_OFFSET + ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
354 extern inline unsigned long pgd_page(pgd_t pgd)
355 { return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
357 extern inline int pte_none(pte_t pte) { return !pte_val(pte); }
358 extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_VALID; }
359 extern inline void pte_clear(pte_t *ptep) { pte_val(*ptep) = 0; }
361 extern inline int pmd_none(pmd_t pmd) { return !pmd_val(pmd); }
362 extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE; }
363 extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _PAGE_VALID; }
364 extern inline void pmd_clear(pmd_t * pmdp) { pmd_val(*pmdp) = 0; }
366 extern inline int pgd_none(pgd_t pgd) { return !pgd_val(pgd); }
367 extern inline int pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE; }
368 extern inline int pgd_present(pgd_t pgd) { return pgd_val(pgd) & _PAGE_VALID; }
369 extern inline void pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = 0; }
372 * The following only work if pte_present() is true.
373 * Undefined behaviour if not..
375 extern inline int pte_read(pte_t pte) { return !(pte_val(pte) & _PAGE_FOR); }
376 extern inline int pte_write(pte_t pte) { return !(pte_val(pte) & _PAGE_FOW); }
377 extern inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_FOE); }
378 extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
379 extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
381 extern inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOW; return pte; }
382 extern inline pte_t pte_rdprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOR; return pte; }
383 extern inline pte_t pte_exprotect(pte_t pte) { pte_val(pte) |= _PAGE_FOE; return pte; }
384 extern inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
385 extern inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~(__ACCESS_BITS); return pte; }
386 extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_FOW; return pte; }
387 extern inline pte_t pte_mkread(pte_t pte) { pte_val(pte) &= ~_PAGE_FOR; return pte; }
388 extern inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) &= ~_PAGE_FOE; return pte; }
389 extern inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= __DIRTY_BITS; return pte; }
390 extern inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= __ACCESS_BITS; return pte; }
392 #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
394 /* to find an entry in a kernel page-table-directory */
395 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
397 /* to find an entry in a page-table-directory. */
398 extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
400 return mm->pgd + ((address >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1));
403 /* Find an entry in the second-level page table.. */
404 extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
406 return (pmd_t *) pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
409 /* Find an entry in the third-level page table.. */
410 extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
412 return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
416 * Allocate and free page tables. The xxx_kernel() versions are
417 * used to allocate a kernel page table - this turns on ASN bits
418 * if any.
420 #ifndef __SMP__
421 extern struct pgtable_cache_struct {
422 unsigned long *pgd_cache;
423 unsigned long *pte_cache;
424 unsigned long pgtable_cache_sz;
425 } quicklists;
426 #else
427 #include <asm/smp.h>
428 #define quicklists cpu_data[smp_processor_id()]
429 #endif
430 #define pgd_quicklist (quicklists.pgd_cache)
431 #define pmd_quicklist ((unsigned long *)0)
432 #define pte_quicklist (quicklists.pte_cache)
433 #define pgtable_cache_size (quicklists.pgtable_cache_sz)
435 extern __inline__ pgd_t *get_pgd_slow(void)
437 pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL), *init;
439 if (ret) {
440 init = pgd_offset(&init_mm, 0);
441 memset (ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
442 memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
443 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
445 pgd_val(ret[PTRS_PER_PGD])
446 = pte_val(mk_pte((unsigned long)ret, PAGE_KERNEL));
448 return ret;
451 extern __inline__ pgd_t *get_pgd_fast(void)
453 unsigned long *ret;
455 if((ret = pgd_quicklist) != NULL) {
456 pgd_quicklist = (unsigned long *)(*ret);
457 ret[0] = ret[1];
458 pgtable_cache_size--;
459 } else
460 ret = (unsigned long *)get_pgd_slow();
461 return (pgd_t *)ret;
464 extern __inline__ void free_pgd_fast(pgd_t *pgd)
466 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
467 pgd_quicklist = (unsigned long *) pgd;
468 pgtable_cache_size++;
471 extern __inline__ void free_pgd_slow(pgd_t *pgd)
473 free_page((unsigned long)pgd);
476 extern pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long address_premasked);
478 extern __inline__ pmd_t *get_pmd_fast(void)
480 unsigned long *ret;
482 if((ret = (unsigned long *)pte_quicklist) != NULL) {
483 pte_quicklist = (unsigned long *)(*ret);
484 ret[0] = ret[1];
485 pgtable_cache_size--;
487 return (pmd_t *)ret;
490 extern __inline__ void free_pmd_fast(pmd_t *pmd)
492 *(unsigned long *)pmd = (unsigned long) pte_quicklist;
493 pte_quicklist = (unsigned long *) pmd;
494 pgtable_cache_size++;
497 extern __inline__ void free_pmd_slow(pmd_t *pmd)
499 free_page((unsigned long)pmd);
502 extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
504 extern __inline__ pte_t *get_pte_fast(void)
506 unsigned long *ret;
508 if((ret = (unsigned long *)pte_quicklist) != NULL) {
509 pte_quicklist = (unsigned long *)(*ret);
510 ret[0] = ret[1];
511 pgtable_cache_size--;
513 return (pte_t *)ret;
516 extern __inline__ void free_pte_fast(pte_t *pte)
518 *(unsigned long *)pte = (unsigned long) pte_quicklist;
519 pte_quicklist = (unsigned long *) pte;
520 pgtable_cache_size++;
523 extern __inline__ void free_pte_slow(pte_t *pte)
525 free_page((unsigned long)pte);
528 extern void __bad_pte(pmd_t *pmd);
529 extern void __bad_pmd(pgd_t *pgd);
531 #define pte_free_kernel(pte) free_pte_fast(pte)
532 #define pte_free(pte) free_pte_fast(pte)
533 #define pmd_free_kernel(pmd) free_pmd_fast(pmd)
534 #define pmd_free(pmd) free_pmd_fast(pmd)
535 #define pgd_free(pgd) free_pgd_fast(pgd)
536 #define pgd_alloc() get_pgd_fast()
538 extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address)
540 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
541 if (pmd_none(*pmd)) {
542 pte_t *page = get_pte_fast();
544 if (!page)
545 return get_pte_slow(pmd, address);
546 pmd_set(pmd, page);
547 return page + address;
549 if (pmd_bad(*pmd)) {
550 __bad_pte(pmd);
551 return NULL;
553 return (pte_t *) pmd_page(*pmd) + address;
556 extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
558 address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
559 if (pgd_none(*pgd)) {
560 pmd_t *page = get_pmd_fast();
562 if (!page)
563 return get_pmd_slow(pgd, address);
564 pgd_set(pgd, page);
565 return page + address;
567 if (pgd_bad(*pgd)) {
568 __bad_pmd(pgd);
569 return NULL;
571 return (pmd_t *) pgd_page(*pgd) + address;
574 #define pte_alloc_kernel pte_alloc
575 #define pmd_alloc_kernel pmd_alloc
577 extern int do_check_pgt_cache(int, int);
579 extern inline void set_pgdir(unsigned long address, pgd_t entry)
581 struct task_struct * p;
582 pgd_t *pgd;
584 read_lock(&tasklist_lock);
585 for_each_task(p) {
586 if (!p->mm)
587 continue;
588 *pgd_offset(p->mm,address) = entry;
590 read_unlock(&tasklist_lock);
591 for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
592 pgd[(address >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1)] = entry;
595 extern pgd_t swapper_pg_dir[1024];
598 * The Alpha doesn't have any external MMU info: the kernel page
599 * tables contain all the necessary information.
601 extern inline void update_mmu_cache(struct vm_area_struct * vma,
602 unsigned long address, pte_t pte)
607 * Non-present pages: high 24 bits are offset, next 8 bits type,
608 * low 32 bits zero.
610 extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
611 { pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
613 #define SWP_TYPE(entry) (((entry) >> 32) & 0xff)
614 #define SWP_OFFSET(entry) ((entry) >> 40)
615 #define SWP_ENTRY(type,offset) pte_val(mk_swap_pte((type),(offset)))
617 #define module_map vmalloc
618 #define module_unmap vfree
620 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
621 #define PageSkip(page) (0)
622 #define kern_addr_valid(addr) (1)
624 #define io_remap_page_range(start, busaddr, size, prot) \
625 remap_page_range(start, virt_to_phys(ioremap(busaddr)), size, prot)
627 #endif /* _ALPHA_PGTABLE_H */