Initial maemo platform support
[maemo-rb.git] / firmware / target / mips / mmu-mips.c
blob31b07409d539f73d6e7ab43bf9ee72339ce91f9c
1 /***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
10 * Copyright (C) 2009 by Maurus Cuelenaere
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
22 #include "config.h"
23 #include "mips.h"
24 #include "mipsregs.h"
25 #include "system.h"
26 #include "mmu-mips.h"
28 #define BARRIER \
29 __asm__ __volatile__( \
30 " .set noreorder \n" \
31 " nop \n" \
32 " nop \n" \
33 " nop \n" \
34 " nop \n" \
35 " nop \n" \
36 " nop \n" \
37 " .set reorder \n");
39 #define DEFAULT_PAGE_SHIFT PL_4K
40 #define DEFAULT_PAGE_MASK PM_4K
41 #define UNIQUE_ENTRYHI(idx, ps) (A_K0BASE + ((idx) << (ps + 1)))
42 #define ASID_MASK M_EntryHiASID
43 #define VPN2_SHIFT S_EntryHiVPN2
44 #define PFN_SHIFT S_EntryLoPFN
45 #define PFN_MASK 0xffffff
46 static void local_flush_tlb_all(void)
48 unsigned long old_ctx;
49 int entry;
50 unsigned int old_irq = disable_irq_save();
52 /* Save old context and create impossible VPN2 value */
53 old_ctx = read_c0_entryhi();
54 write_c0_entrylo0(0);
55 write_c0_entrylo1(0);
56 BARRIER;
58 /* Blast 'em all away. */
59 for(entry = 0; entry < 32; entry++)
61 /* Make sure all entries differ. */
62 write_c0_entryhi(UNIQUE_ENTRYHI(entry, DEFAULT_PAGE_SHIFT));
63 write_c0_index(entry);
64 BARRIER;
65 tlb_write_indexed();
67 BARRIER;
68 write_c0_entryhi(old_ctx);
70 restore_irq(old_irq);
73 static void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
74 unsigned long entryhi, unsigned long pagemask)
76 unsigned long wired;
77 unsigned long old_pagemask;
78 unsigned long old_ctx;
79 unsigned int old_irq = disable_irq_save();
81 old_ctx = read_c0_entryhi() & ASID_MASK;
82 old_pagemask = read_c0_pagemask();
83 wired = read_c0_wired();
84 write_c0_wired(wired + 1);
85 write_c0_index(wired);
86 BARRIER;
87 write_c0_pagemask(pagemask);
88 write_c0_entryhi(entryhi);
89 write_c0_entrylo0(entrylo0);
90 write_c0_entrylo1(entrylo1);
91 BARRIER;
92 tlb_write_indexed();
93 BARRIER;
95 write_c0_entryhi(old_ctx);
96 BARRIER;
97 write_c0_pagemask(old_pagemask);
98 local_flush_tlb_all();
99 restore_irq(old_irq);
102 void map_address(unsigned long virtual, unsigned long physical,
103 unsigned long length, unsigned int cache_flags)
105 unsigned long entry0 = (physical & PFN_MASK) << PFN_SHIFT;
106 unsigned long entry1 = ((physical+length) & PFN_MASK) << PFN_SHIFT;
107 unsigned long entryhi = virtual & ~VPN2_SHIFT;
109 entry0 |= (M_EntryLoG | M_EntryLoV | (cache_flags << S_EntryLoC) );
110 entry1 |= (M_EntryLoG | M_EntryLoV | (cache_flags << S_EntryLoC) );
112 add_wired_entry(entry0, entry1, entryhi, DEFAULT_PAGE_MASK);
115 void mmu_init(void)
117 write_c0_pagemask(DEFAULT_PAGE_MASK);
118 write_c0_wired(0);
119 write_c0_framemask(0);
121 local_flush_tlb_all();
123 map_address(0x80000000, 0x80000000, 0x4000, K_CacheAttrC);
124 map_address(0x80004000, 0x80004000, MEMORYSIZE * 0x100000, K_CacheAttrC);
128 #define SYNC_WB() __asm__ __volatile__ ("sync")
130 #define __CACHE_OP(op, addr) \
131 __asm__ __volatile__( \
132 " .set noreorder \n" \
133 " .set mips32\n\t \n" \
134 " cache %0, %1 \n" \
135 " .set mips0 \n" \
136 " .set reorder \n" \
138 : "i" (op), "m" (*(unsigned char *)(addr)))
140 void __flush_dcache_line(unsigned long addr)
142 __CACHE_OP(DCHitWBInv, addr);
143 SYNC_WB();
146 void __icache_invalidate_all(void)
148 unsigned int i;
150 asm volatile (".set noreorder \n"
151 ".set mips32 \n"
152 "mtc0 $0, $28 \n" /* TagLo */
153 "mtc0 $0, $29 \n" /* TagHi */
154 ".set mips0 \n"
155 ".set reorder \n"
157 for(i=A_K0BASE; i<A_K0BASE+CACHE_SIZE; i+=CACHE_LINE_SIZE)
158 __CACHE_OP(ICIndexStTag, i);
160 /* invalidate btb */
161 asm volatile (
162 ".set mips32 \n"
163 "mfc0 %0, $16, 7 \n"
164 "nop \n"
165 "ori %0, 2 \n"
166 "mtc0 %0, $16, 7 \n"
167 ".set mips0 \n"
169 : "r" (i));
172 void cpucache_commit_discard(void)
174 __icache_invalidate_all();
176 void cpucache_invalidate(void) __attribute__((alias("cpucache_commit_discard")));
178 void __dcache_invalidate_all(void)
180 unsigned int i;
182 asm volatile (".set noreorder \n"
183 ".set mips32 \n"
184 "mtc0 $0, $28 \n"
185 "mtc0 $0, $29 \n"
186 ".set mips0 \n"
187 ".set reorder \n"
189 for (i=A_K0BASE; i<A_K0BASE+CACHE_SIZE; i+=CACHE_LINE_SIZE)
190 __CACHE_OP(DCIndexStTag, i);
193 void __dcache_writeback_all(void) __attribute__ ((section(".icode")));
194 void __dcache_writeback_all(void)
196 unsigned int i;
197 for(i=A_K0BASE; i<A_K0BASE+CACHE_SIZE; i+=CACHE_LINE_SIZE)
198 __CACHE_OP(DCIndexWBInv, i);
200 SYNC_WB();
203 void dma_cache_wback_inv(unsigned long addr, unsigned long size)
205 unsigned long end, a;
207 if (size >= CACHE_SIZE)
208 __dcache_writeback_all();
209 else
211 unsigned long dc_lsize = CACHE_LINE_SIZE;
213 a = addr & ~(dc_lsize - 1);
214 end = (addr + size - 1) & ~(dc_lsize - 1);
215 for(; a < end; a += dc_lsize)
216 __flush_dcache_line(a);