4 /* PAGE_SHIFT determines the page size */
6 #define PAGE_SIZE (1UL << PAGE_SHIFT)
7 #define PAGE_MASK (~(PAGE_SIZE-1))
13 #define STRICT_MM_TYPECHECKS
16 * A _lot_ of the kernel time is spent clearing pages, so
17 * do this as fast as we possibly can. Also, doing this
18 * as a separate inline function (rather than memset())
19 * results in clearer kernel profiles as we see _who_ is
20 * doing page clearing or copying.
22 static inline void clear_page(void * page
)
24 unsigned long count
= PAGE_SIZE
/64;
25 unsigned long *ptr
= (unsigned long *)page
;
41 #define clear_user_page(page, vaddr) clear_page(page)
43 static inline void copy_page(void * _to
, void * _from
)
45 unsigned long count
= PAGE_SIZE
/64;
46 unsigned long *to
= (unsigned long *)_to
;
47 unsigned long *from
= (unsigned long *)_from
;
50 unsigned long a
,b
,c
,d
,e
,f
,g
,h
;
73 #define copy_user_page(to, from, vaddr) copy_page(to, from)
75 #ifdef STRICT_MM_TYPECHECKS
77 * These are used to make use of C type-checking..
79 typedef struct { unsigned long pte
; } pte_t
;
80 typedef struct { unsigned long pmd
; } pmd_t
;
81 typedef struct { unsigned long pgd
; } pgd_t
;
82 typedef struct { unsigned long pgprot
; } pgprot_t
;
84 #define pte_val(x) ((x).pte)
85 #define pmd_val(x) ((x).pmd)
86 #define pgd_val(x) ((x).pgd)
87 #define pgprot_val(x) ((x).pgprot)
89 #define __pte(x) ((pte_t) { (x) } )
90 #define __pgd(x) ((pgd_t) { (x) } )
91 #define __pgprot(x) ((pgprot_t) { (x) } )
95 * .. while these make it easier on the compiler
97 typedef unsigned long pte_t
;
98 typedef unsigned long pmd_t
;
99 typedef unsigned long pgd_t
;
100 typedef unsigned long pgprot_t
;
102 #define pte_val(x) (x)
103 #define pmd_val(x) (x)
104 #define pgd_val(x) (x)
105 #define pgprot_val(x) (x)
109 #define __pgprot(x) (x)
111 #endif /* STRICT_MM_TYPECHECKS */
113 #define BUG() __asm__ __volatile__("call_pal 129 # bugchk")
114 #define PAGE_BUG(page) BUG()
116 /* Pure 2^n version of get_order */
117 extern __inline__
int get_order(unsigned long size
)
121 size
= (size
-1) >> (PAGE_SHIFT
-1);
130 #endif /* !ASSEMBLY */
132 /* to align the pointer to the (next) page boundary */
133 #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
135 #ifdef USE_48_BIT_KSEG
136 #define PAGE_OFFSET 0xffff800000000000
138 #define PAGE_OFFSET 0xfffffc0000000000
141 #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
142 #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
143 #define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
144 #define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
146 #endif /* __KERNEL__ */
148 #endif /* _ALPHA_PAGE_H */