4 * Privileged Space Mapping Buffer (PMB) Support.
6 * Copyright (C) 2005, 2006, 2007 Paul Mundt
8 * P1/P2 Section mapping definitions from map32.h, which was:
10 * Copyright 2003 (c) Lineo Solutions,Inc.
12 * This file is subject to the terms and conditions of the GNU General Public
13 * License. See the file "COPYING" in the main directory of this archive
16 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/bitops.h>
21 #include <linux/debugfs.h>
23 #include <linux/seq_file.h>
24 #include <linux/err.h>
25 #include <asm/system.h>
26 #include <asm/uaccess.h>
27 #include <asm/pgtable.h>
30 #include <asm/mmu_context.h>
32 #define NR_PMB_ENTRIES 16
34 static struct kmem_cache
*pmb_cache
;
35 static unsigned long pmb_map
;
37 static struct pmb_entry pmb_init_map
[] = {
38 /* vpn ppn flags (ub/sz/c/wt) */
40 /* P1 Section Mappings */
41 { 0x80000000, 0x00000000, PMB_SZ_64M
| PMB_C
, },
42 { 0x84000000, 0x04000000, PMB_SZ_64M
| PMB_C
, },
43 { 0x88000000, 0x08000000, PMB_SZ_128M
| PMB_C
, },
44 { 0x90000000, 0x10000000, PMB_SZ_64M
| PMB_C
, },
45 { 0x94000000, 0x14000000, PMB_SZ_64M
| PMB_C
, },
46 { 0x98000000, 0x18000000, PMB_SZ_64M
| PMB_C
, },
48 /* P2 Section Mappings */
49 { 0xa0000000, 0x00000000, PMB_UB
| PMB_SZ_64M
| PMB_WT
, },
50 { 0xa4000000, 0x04000000, PMB_UB
| PMB_SZ_64M
| PMB_WT
, },
51 { 0xa8000000, 0x08000000, PMB_UB
| PMB_SZ_128M
| PMB_WT
, },
52 { 0xb0000000, 0x10000000, PMB_UB
| PMB_SZ_64M
| PMB_WT
, },
53 { 0xb4000000, 0x14000000, PMB_UB
| PMB_SZ_64M
| PMB_WT
, },
54 { 0xb8000000, 0x18000000, PMB_UB
| PMB_SZ_64M
| PMB_WT
, },
57 static inline unsigned long mk_pmb_entry(unsigned int entry
)
59 return (entry
& PMB_E_MASK
) << PMB_E_SHIFT
;
62 static inline unsigned long mk_pmb_addr(unsigned int entry
)
64 return mk_pmb_entry(entry
) | PMB_ADDR
;
67 static inline unsigned long mk_pmb_data(unsigned int entry
)
69 return mk_pmb_entry(entry
) | PMB_DATA
;
72 static DEFINE_SPINLOCK(pmb_list_lock
);
73 static struct pmb_entry
*pmb_list
;
75 static inline void pmb_list_add(struct pmb_entry
*pmbe
)
77 struct pmb_entry
**p
, *tmp
;
80 while ((tmp
= *p
) != NULL
)
87 static inline void pmb_list_del(struct pmb_entry
*pmbe
)
89 struct pmb_entry
**p
, *tmp
;
91 for (p
= &pmb_list
; (tmp
= *p
); p
= &tmp
->next
)
98 struct pmb_entry
*pmb_alloc(unsigned long vpn
, unsigned long ppn
,
101 struct pmb_entry
*pmbe
;
103 pmbe
= kmem_cache_alloc(pmb_cache
, GFP_KERNEL
);
105 return ERR_PTR(-ENOMEM
);
111 spin_lock_irq(&pmb_list_lock
);
113 spin_unlock_irq(&pmb_list_lock
);
118 void pmb_free(struct pmb_entry
*pmbe
)
120 spin_lock_irq(&pmb_list_lock
);
122 spin_unlock_irq(&pmb_list_lock
);
124 kmem_cache_free(pmb_cache
, pmbe
);
128 * Must be in P2 for __set_pmb_entry()
130 int __set_pmb_entry(unsigned long vpn
, unsigned long ppn
,
131 unsigned long flags
, int *entry
)
133 unsigned int pos
= *entry
;
135 if (unlikely(pos
== PMB_NO_ENTRY
))
136 pos
= find_first_zero_bit(&pmb_map
, NR_PMB_ENTRIES
);
139 if (unlikely(pos
> NR_PMB_ENTRIES
))
142 if (test_and_set_bit(pos
, &pmb_map
)) {
143 pos
= find_first_zero_bit(&pmb_map
, NR_PMB_ENTRIES
);
147 ctrl_outl(vpn
| PMB_V
, mk_pmb_addr(pos
));
149 #ifdef CONFIG_CACHE_WRITETHROUGH
151 * When we are in 32-bit address extended mode, CCR.CB becomes
152 * invalid, so care must be taken to manually adjust cacheable
155 if (likely(flags
& PMB_C
))
159 ctrl_outl(ppn
| flags
| PMB_V
, mk_pmb_data(pos
));
166 int __uses_jump_to_uncached
set_pmb_entry(struct pmb_entry
*pmbe
)
171 ret
= __set_pmb_entry(pmbe
->vpn
, pmbe
->ppn
, pmbe
->flags
, &pmbe
->entry
);
177 void __uses_jump_to_uncached
clear_pmb_entry(struct pmb_entry
*pmbe
)
179 unsigned int entry
= pmbe
->entry
;
183 * Don't allow clearing of wired init entries, P1 or P2 access
184 * without a corresponding mapping in the PMB will lead to reset
187 if (unlikely(entry
< ARRAY_SIZE(pmb_init_map
) ||
188 entry
>= NR_PMB_ENTRIES
))
194 addr
= mk_pmb_addr(entry
);
195 ctrl_outl(ctrl_inl(addr
) & ~PMB_V
, addr
);
197 addr
= mk_pmb_data(entry
);
198 ctrl_outl(ctrl_inl(addr
) & ~PMB_V
, addr
);
202 clear_bit(entry
, &pmb_map
);
210 { .size
= 0x20000000, .flag
= PMB_SZ_512M
, },
211 { .size
= 0x08000000, .flag
= PMB_SZ_128M
, },
212 { .size
= 0x04000000, .flag
= PMB_SZ_64M
, },
213 { .size
= 0x01000000, .flag
= PMB_SZ_16M
, },
216 long pmb_remap(unsigned long vaddr
, unsigned long phys
,
217 unsigned long size
, unsigned long flags
)
219 struct pmb_entry
*pmbp
;
220 unsigned long wanted
;
223 /* Convert typical pgprot value to the PMB equivalent */
224 if (flags
& _PAGE_CACHABLE
) {
225 if (flags
& _PAGE_WT
)
230 pmb_flags
= PMB_WT
| PMB_UB
;
236 for (i
= 0; i
< ARRAY_SIZE(pmb_sizes
); i
++) {
237 struct pmb_entry
*pmbe
;
240 if (size
< pmb_sizes
[i
].size
)
243 pmbe
= pmb_alloc(vaddr
, phys
, pmb_flags
| pmb_sizes
[i
].flag
);
245 return PTR_ERR(pmbe
);
247 ret
= set_pmb_entry(pmbe
);
253 phys
+= pmb_sizes
[i
].size
;
254 vaddr
+= pmb_sizes
[i
].size
;
255 size
-= pmb_sizes
[i
].size
;
258 * Link adjacent entries that span multiple PMB entries
259 * for easier tear-down.
267 if (size
>= 0x1000000)
270 return wanted
- size
;
273 void pmb_unmap(unsigned long addr
)
275 struct pmb_entry
**p
, *pmbe
;
277 for (p
= &pmb_list
; (pmbe
= *p
); p
= &pmbe
->next
)
278 if (pmbe
->vpn
== addr
)
284 WARN_ON(!test_bit(pmbe
->entry
, &pmb_map
));
287 struct pmb_entry
*pmblink
= pmbe
;
289 clear_pmb_entry(pmbe
);
290 pmbe
= pmblink
->link
;
296 static void pmb_cache_ctor(struct kmem_cache
*cachep
, void *pmb
)
298 struct pmb_entry
*pmbe
= pmb
;
300 memset(pmb
, 0, sizeof(struct pmb_entry
));
302 pmbe
->entry
= PMB_NO_ENTRY
;
305 static int __uses_jump_to_uncached
pmb_init(void)
307 unsigned int nr_entries
= ARRAY_SIZE(pmb_init_map
);
308 unsigned int entry
, i
;
310 BUG_ON(unlikely(nr_entries
>= NR_PMB_ENTRIES
));
312 pmb_cache
= kmem_cache_create("pmb", sizeof(struct pmb_entry
), 0,
313 SLAB_PANIC
, pmb_cache_ctor
);
318 * Ordering is important, P2 must be mapped in the PMB before we
319 * can set PMB.SE, and P1 must be mapped before we jump back to
322 for (entry
= 0; entry
< nr_entries
; entry
++) {
323 struct pmb_entry
*pmbe
= pmb_init_map
+ entry
;
325 __set_pmb_entry(pmbe
->vpn
, pmbe
->ppn
, pmbe
->flags
, &entry
);
328 ctrl_outl(0, PMB_IRMCR
);
330 /* PMB.SE and UB[7] */
331 ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR
);
333 /* Flush out the TLB */
342 arch_initcall(pmb_init
);
344 static int pmb_seq_show(struct seq_file
*file
, void *iter
)
348 seq_printf(file
, "V: Valid, C: Cacheable, WT: Write-Through\n"
349 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
350 seq_printf(file
, "ety vpn ppn size flags\n");
352 for (i
= 0; i
< NR_PMB_ENTRIES
; i
++) {
353 unsigned long addr
, data
;
357 addr
= ctrl_inl(mk_pmb_addr(i
));
358 data
= ctrl_inl(mk_pmb_data(i
));
360 size
= data
& PMB_SZ_MASK
;
361 sz_str
= (size
== PMB_SZ_16M
) ? " 16MB":
362 (size
== PMB_SZ_64M
) ? " 64MB":
363 (size
== PMB_SZ_128M
) ? "128MB":
366 /* 02: V 0x88 0x08 128MB C CB B */
367 seq_printf(file
, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
368 i
, ((addr
& PMB_V
) && (data
& PMB_V
)) ? 'V' : ' ',
369 (addr
>> 24) & 0xff, (data
>> 24) & 0xff,
370 sz_str
, (data
& PMB_C
) ? 'C' : ' ',
371 (data
& PMB_WT
) ? "WT" : "CB",
372 (data
& PMB_UB
) ? "UB" : " B");
378 static int pmb_debugfs_open(struct inode
*inode
, struct file
*file
)
380 return single_open(file
, pmb_seq_show
, NULL
);
383 static const struct file_operations pmb_debugfs_fops
= {
384 .owner
= THIS_MODULE
,
385 .open
= pmb_debugfs_open
,
388 .release
= seq_release
,
391 static int __init
pmb_debugfs_init(void)
393 struct dentry
*dentry
;
395 dentry
= debugfs_create_file("pmb", S_IFREG
| S_IRUGO
,
396 sh_debugfs_root
, NULL
, &pmb_debugfs_fops
);
398 return PTR_ERR(dentry
);
402 postcore_initcall(pmb_debugfs_init
);