Replace extern inline with static inline.
[linux-2.6/linux-mips.git] / include / asm-mips64 / page.h
blob2e792bd188dc3346cbcb19b9ff24ee81893bce73
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_PAGE_H
10 #define _ASM_PAGE_H
12 #include <linux/config.h>
14 /* PAGE_SHIFT determines the page size */
15 #define PAGE_SHIFT 12
16 #define PAGE_SIZE (1UL << PAGE_SHIFT)
17 #define PAGE_MASK (~(PAGE_SIZE-1))
19 #ifdef __KERNEL__
21 #ifndef __ASSEMBLY__
23 extern void (*_clear_page)(void * page);
24 extern void (*_copy_page)(void * to, void * from);
26 #define clear_page(page) _clear_page((void *)(page))
27 #define copy_page(to, from) _copy_page((void *)(to), (void *)(from))
29 extern unsigned long shm_align_mask;
31 static inline unsigned long pages_do_alias(unsigned long addr1,
32 unsigned long addr2)
34 return (addr1 ^ addr2) & shm_align_mask;
37 struct page;
39 static inline void clear_user_page(void *addr, unsigned long vaddr,
40 struct page *page)
42 extern void (*flush_data_cache_page)(unsigned long addr);
44 clear_page(addr);
45 if (pages_do_alias((unsigned long) addr, vaddr))
46 flush_data_cache_page((unsigned long)addr);
49 static inline void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
50 struct page *to)
52 extern void (*flush_data_cache_page)(unsigned long addr);
54 copy_page(vto, vfrom);
55 if (pages_do_alias((unsigned long)vto, vaddr))
56 flush_data_cache_page((unsigned long)vto);
60 * These are used to make use of C type-checking..
62 typedef struct { unsigned long pte; } pte_t;
63 typedef struct { unsigned long pmd; } pmd_t;
64 typedef struct { unsigned long pgd; } pgd_t;
65 typedef struct { unsigned long pgprot; } pgprot_t;
67 #define pte_val(x) ((x).pte)
68 #define pmd_val(x) ((x).pmd)
69 #define pgd_val(x) ((x).pgd)
70 #define pgprot_val(x) ((x).pgprot)
72 #define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
74 #define __pte(x) ((pte_t) { (x) } )
75 #define __pmd(x) ((pmd_t) { (x) } )
76 #define __pgd(x) ((pgd_t) { (x) } )
77 #define __pgprot(x) ((pgprot_t) { (x) } )
79 /* Pure 2^n version of get_order */
80 static __inline__ int get_order(unsigned long size)
82 int order;
84 size = (size-1) >> (PAGE_SHIFT-1);
85 order = -1;
86 do {
87 size >>= 1;
88 order++;
89 } while (size);
90 return order;
93 #endif /* !__ASSEMBLY__ */
95 /* to align the pointer to the (next) page boundary */
96 #define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
99 * This handles the memory map.
101 #ifdef CONFIG_NONCOHERENT_IO
102 #define PAGE_OFFSET 0x9800000000000000UL
103 #else
104 #define PAGE_OFFSET 0xa800000000000000UL
105 #endif
107 #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
108 #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
110 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
112 #ifndef CONFIG_DISCONTIGMEM
113 #define pfn_to_page(pfn) (mem_map + (pfn))
114 #define page_to_pfn(page) ((unsigned long)((page) - mem_map))
115 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
117 #define pfn_valid(pfn) ((pfn) < max_mapnr)
118 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
119 #endif
121 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
122 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
124 #define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE)
125 #define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET)
127 #endif /* defined (__KERNEL__) */
129 #endif /* _ASM_PAGE_H */