x86: fix 64-bit sections
[linux-2.6/kmemtrace.git] / include / asm-sh / mmu.h
blobeb0358c097d01a3c236b83400e03aa2ac5b757b7
1 #ifndef __MMU_H
2 #define __MMU_H
4 /* Default "unsigned long" context */
5 typedef unsigned long mm_context_id_t[NR_CPUS];
7 typedef struct {
8 #ifdef CONFIG_MMU
9 mm_context_id_t id;
10 void *vdso;
11 #else
12 struct vm_list_struct *vmlist;
13 unsigned long end_brk;
14 #endif
15 } mm_context_t;
18 * Privileged Space Mapping Buffer (PMB) definitions
20 #define PMB_PASCR 0xff000070
21 #define PMB_IRMCR 0xff000078
23 #define PMB_ADDR 0xf6100000
24 #define PMB_DATA 0xf7100000
25 #define PMB_ENTRY_MAX 16
26 #define PMB_E_MASK 0x0000000f
27 #define PMB_E_SHIFT 8
29 #define PMB_SZ_16M 0x00000000
30 #define PMB_SZ_64M 0x00000010
31 #define PMB_SZ_128M 0x00000080
32 #define PMB_SZ_512M 0x00000090
33 #define PMB_SZ_MASK PMB_SZ_512M
34 #define PMB_C 0x00000008
35 #define PMB_WT 0x00000001
36 #define PMB_UB 0x00000200
37 #define PMB_V 0x00000100
39 #define PMB_NO_ENTRY (-1)
41 struct pmb_entry;
43 struct pmb_entry {
44 unsigned long vpn;
45 unsigned long ppn;
46 unsigned long flags;
49 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
50 * PMB_NO_ENTRY to search for a free one
52 int entry;
54 struct pmb_entry *next;
55 /* Adjacent entry link for contiguous multi-entry mappings */
56 struct pmb_entry *link;
59 /* arch/sh/mm/pmb.c */
60 int __set_pmb_entry(unsigned long vpn, unsigned long ppn,
61 unsigned long flags, int *entry);
62 int set_pmb_entry(struct pmb_entry *pmbe);
63 void clear_pmb_entry(struct pmb_entry *pmbe);
64 struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
65 unsigned long flags);
66 void pmb_free(struct pmb_entry *pmbe);
67 long pmb_remap(unsigned long virt, unsigned long phys,
68 unsigned long size, unsigned long flags);
69 void pmb_unmap(unsigned long addr);
71 #endif /* __MMU_H */