1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2009 by Maurus Cuelenaere
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
29 __asm__ __volatile__( \
30 " .set noreorder \n" \
39 #define DEFAULT_PAGE_SHIFT PL_4K
40 #define DEFAULT_PAGE_MASK PM_4K
41 #define UNIQUE_ENTRYHI(idx, ps) (A_K0BASE + ((idx) << (ps + 1)))
42 #define ASID_MASK M_EntryHiASID
43 #define VPN2_SHIFT S_EntryHiVPN2
44 #define PFN_SHIFT S_EntryLoPFN
45 #define PFN_MASK 0xffffff
46 static void local_flush_tlb_all(void)
48 unsigned long old_ctx
;
50 unsigned int old_irq
= disable_irq_save();
52 /* Save old context and create impossible VPN2 value */
53 old_ctx
= read_c0_entryhi();
58 /* Blast 'em all away. */
59 for(entry
= 0; entry
< 32; entry
++)
61 /* Make sure all entries differ. */
62 write_c0_entryhi(UNIQUE_ENTRYHI(entry
, DEFAULT_PAGE_SHIFT
));
63 write_c0_index(entry
);
68 write_c0_entryhi(old_ctx
);
73 static void add_wired_entry(unsigned long entrylo0
, unsigned long entrylo1
,
74 unsigned long entryhi
, unsigned long pagemask
)
77 unsigned long old_pagemask
;
78 unsigned long old_ctx
;
79 unsigned int old_irq
= disable_irq_save();
81 old_ctx
= read_c0_entryhi() & ASID_MASK
;
82 old_pagemask
= read_c0_pagemask();
83 wired
= read_c0_wired();
84 write_c0_wired(wired
+ 1);
85 write_c0_index(wired
);
87 write_c0_pagemask(pagemask
);
88 write_c0_entryhi(entryhi
);
89 write_c0_entrylo0(entrylo0
);
90 write_c0_entrylo1(entrylo1
);
95 write_c0_entryhi(old_ctx
);
97 write_c0_pagemask(old_pagemask
);
98 local_flush_tlb_all();
102 void map_address(unsigned long virtual, unsigned long physical
,
103 unsigned long length
, unsigned int cache_flags
)
105 unsigned long entry0
= (physical
& PFN_MASK
) << PFN_SHIFT
;
106 unsigned long entry1
= ((physical
+length
) & PFN_MASK
) << PFN_SHIFT
;
107 unsigned long entryhi
= virtual & ~VPN2_SHIFT
;
109 entry0
|= (M_EntryLoG
| M_EntryLoV
| (cache_flags
<< S_EntryLoC
) );
110 entry1
|= (M_EntryLoG
| M_EntryLoV
| (cache_flags
<< S_EntryLoC
) );
112 add_wired_entry(entry0
, entry1
, entryhi
, DEFAULT_PAGE_MASK
);
117 write_c0_pagemask(DEFAULT_PAGE_MASK
);
119 write_c0_framemask(0);
121 local_flush_tlb_all();
123 map_address(0x80000000, 0x80000000, 0x4000, K_CacheAttrC);
124 map_address(0x80004000, 0x80004000, MEMORYSIZE * 0x100000, K_CacheAttrC);
128 #define SYNC_WB() __asm__ __volatile__ ("sync")
130 #define __CACHE_OP(op, addr) \
131 __asm__ __volatile__( \
132 " .set noreorder \n" \
133 " .set mips32\n\t \n" \
138 : "i" (op), "m" (*(unsigned char *)(addr)))
140 void __flush_dcache_line(unsigned long addr
)
142 __CACHE_OP(DCHitWBInv
, addr
);
146 void __icache_invalidate_all(void)
150 asm volatile (".set noreorder \n"
152 "mtc0 $0, $28 \n" /* TagLo */
153 "mtc0 $0, $29 \n" /* TagHi */
157 for(i
=A_K0BASE
; i
<A_K0BASE
+CACHE_SIZE
; i
+=CACHE_LINE_SIZE
)
158 __CACHE_OP(ICIndexStTag
, i
);
172 void cpucache_commit_discard(void)
174 __icache_invalidate_all();
176 void cpucache_invalidate(void) __attribute__((alias("cpucache_commit_discard")));
178 void __dcache_invalidate_all(void)
182 asm volatile (".set noreorder \n"
189 for (i
=A_K0BASE
; i
<A_K0BASE
+CACHE_SIZE
; i
+=CACHE_LINE_SIZE
)
190 __CACHE_OP(DCIndexStTag
, i
);
193 void __dcache_writeback_all(void) __attribute__ ((section(".icode")));
194 void __dcache_writeback_all(void)
197 for(i
=A_K0BASE
; i
<A_K0BASE
+CACHE_SIZE
; i
+=CACHE_LINE_SIZE
)
198 __CACHE_OP(DCIndexWBInv
, i
);
203 void dma_cache_wback_inv(unsigned long addr
, unsigned long size
)
205 unsigned long end
, a
;
207 if (size
>= CACHE_SIZE
)
208 __dcache_writeback_all();
211 unsigned long dc_lsize
= CACHE_LINE_SIZE
;
213 a
= addr
& ~(dc_lsize
- 1);
214 end
= (addr
+ size
- 1) & ~(dc_lsize
- 1);
215 for(; a
< end
; a
+= dc_lsize
)
216 __flush_dcache_line(a
);