1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2006,2007 by Greg White
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
24 /* Used by ARMv4 & ARMv5 CPUs with cp15 register and MMU */
26 #if CONFIG_CPU == TCC7801 || CONFIG_CPU == AT91SAM9260
27 /* MMU present but unused */
28 #define HAVE_TEST_AND_CLEAN_CACHE
30 #elif CONFIG_CPU == DM320 || CONFIG_CPU == AS3525v2
32 #define HAVE_TEST_AND_CLEAN_CACHE
34 #elif CONFIG_CPU == AS3525
38 #elif CONFIG_CPU == S3C2440
42 #elif CONFIG_CPU == S5L8701
47 #error Cache settings unknown for this CPU !
49 #endif /* CPU specific configuration */
51 @ Index format: 31:26 = index, N:5 = segment, remainder = SBZ
52 @ assume 64-way set associative separate I/D caches
53 @ CACHE_SIZE = N (kB) = N*2^10 B
54 @ number of lines = N*2^(10-CACHEALIGN_BITS)
56 @ Segment loops = N*2^(10-CACHEALIGN_BITS-6) = N*2^(4-CACHEALIGN_BITS)
57 @ Segment loops = N/2^(CACHEALIGN_BITS - 4)
58 @ Segment loops = N/(1<<(CACHEALIGN_BITS - 4))
62 #if CACHEALIGN_BITS == 4
63 #define INDEX_STEPS CACHE_SIZE
64 #elif CACHEALIGN_BITS == 5
65 #define INDEX_STEPS (CACHE_SIZE/2)
66 #endif /* CACHEALIGN_BITS */
68 @ assume 64-way set associative separate I/D caches (log2(64) == 6)
69 @ Index format: 31:26 = index, M:N = segment, remainder = SBZ
70 @ Segment bits = log2(cache size in bytes / cache line size in byte) - Index bits (== 6)
73 #endif /* CACHE_SIZE */
81 * void ttb_init(void);
83 .section .text, "ax", %progbits
86 .type ttb_init, %function
88 ldr r0, =TTB_BASE_ADDR @
90 mcr p15, 0, r0, c2, c0, 0 @ Set the TTB base address
91 mcr p15, 0, r1, c3, c0, 0 @ Set all domains to manager status
93 .size ttb_init, .-ttb_init
96 * void map_section(unsigned int pa, unsigned int va, int mb, int flags);
98 .section .text, "ax", %progbits
101 .type map_section, %function
108 @ pa |= (flags | 0x412);
110 @ 10: superuser - r/w, user - no access
112 @ 3,2: Cache flags (flags (r3))
113 @ 1: Section signature
118 @ unsigned int* ttbPtr = TTB_BASE + (va >> 20);
119 @ sections are 1MB size
121 ldr r3, =TTB_BASE_ADDR
122 add r1, r3, r1, lsl #0x2
124 @ Add MB to pa, flags are already present in pa, but addition
125 @ should not effect them
127 @ for( ; mb>0; mb--, pa += (1 << 20))
136 add r0, r0, #0x100000
141 .size map_section, .-map_section
144 * void enable_mmu(void);
146 .section .text, "ax", %progbits
149 .type enable_mmu, %function
152 mcr p15, 0, r0, c8, c7, 0 @ invalidate TLB
153 mcr p15, 0, r0, c7, c7,0 @ invalidate both i and dcache
154 mrc p15, 0, r0, c1, c0, 0 @
155 orr r0, r0, #1 @ enable mmu bit, i and dcache
156 orr r0, r0, #1<<2 @ enable dcache
157 orr r0, r0, #1<<12 @ enable icache
158 mcr p15, 0, r0, c1, c0, 0 @
164 .size enable_mmu, .-enable_mmu
171 /** Cache coherency **/
174 * Invalidate DCache for this range
176 * void invalidate_dcache_range(const void *base, unsigned int size);
178 .section .text, "ax", %progbits
180 .global invalidate_dcache_range
181 .type invalidate_dcache_range, %function
182 @ MVA format: 31:5 = Modified virtual address, 4:0 = SBZ
183 invalidate_dcache_range:
184 add r1, r0, r1 @ size -> end
185 cmp r1, r0 @ end <= start?
187 bic r0, r0, #31 @ Align start to cache line (down)
189 mcr p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
192 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
195 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
198 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
201 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
204 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
207 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
210 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
215 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
217 .size invalidate_dcache_range, .-invalidate_dcache_range
220 * clean DCache for this range
221 * forces DCache writeback for the specified range
222 * void clean_dcache_range(const void *base, unsigned int size);
224 .section .text, "ax", %progbits
226 .global clean_dcache_range
227 .type clean_dcache_range, %function
228 @ MVA format: 31:5 = Modified virtual address, 4:0 = SBZ
230 add r1, r0, r1 @ size -> end
231 cmp r1, r0 @ end <= start?
233 bic r0, r0, #31 @ Align start to cache line (down)
235 mcr p15, 0, r0, c7, c10, 1 @ Clean line by MVA
238 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
241 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
244 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
247 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
250 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
253 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
256 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
259 bhi 1b @clean_start @
261 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
263 .size clean_dcache_range, .-clean_dcache_range
267 * Dump DCache for this range
268 * will *NOT* do write back except for buffer edges not on a line boundary
269 * void dump_dcache_range(const void *base, unsigned int size);
271 .section .text, "ax", %progbits
273 .global dump_dcache_range
274 .type dump_dcache_range, %function
275 @ MVA format: 31:5 = Modified virtual address, 4:0 = SBZ
277 add r1, r0, r1 @ size -> end
278 cmp r1, r0 @ end <= start?
280 tst r0, #31 @ Check first line for bits set
281 bicne r0, r0, #31 @ Clear low five bits (down)
282 mcrne p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
283 @ if not cache aligned
284 addne r0, r0, #32 @ Move to the next cache line
286 tst r1, #31 @ Check last line for bits set
287 bicne r1, r1, #31 @ Clear low five bits (down)
288 mcrne p15, 0, r1, c7, c14, 1 @ Clean and invalidate line by MVA
289 @ if not cache aligned
290 cmp r1, r0 @ end <= start now?
292 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
295 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
298 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
301 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
304 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
307 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
310 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
313 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
316 bhi 1b @ dump_start @
318 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
320 .size dump_dcache_range, .-dump_dcache_range
321 #endif /* unused function */
324 * Cleans entire DCache
325 * void clean_dcache(void);
327 .section .text, "ax", %progbits
330 .type clean_dcache, %function
331 .global cpucache_flush @ Alias
334 #ifdef HAVE_TEST_AND_CLEAN_CACHE
335 mrc p15, 0, r15, c7, c10, 3 @ test and clean dcache
339 mov r1, #0x00000000 @
341 mcr p15, 0, r1, c7, c10, 2 @ Clean entry by index
342 add r0, r1, #(1<<CACHEALIGN_BITS)
343 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
344 .rept INDEX_STEPS - 2 /* 2 steps already executed */
345 add r0, r0, #(1<<CACHEALIGN_BITS)
346 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
348 adds r1, r1, #0x04000000 @ will wrap to zero at loop end
349 bne 1b @ clean_start @
350 #endif /* HAVE_TEST_AND_CLEAN_CACHE */
351 mcr p15, 0, r1, c7, c10, 4 @ Drain write buffer
353 .size clean_dcache, .-clean_dcache
356 * Invalidate entire DCache
358 * void invalidate_dcache(void);
360 .section .icode, "ax", %progbits
362 .global invalidate_dcache
363 .type invalidate_dcache, %function
365 #ifdef HAVE_TEST_AND_CLEAN_CACHE
366 mrc p15, 0, r15, c7, c14, 3 @ test, clean and invalidate dcache
367 bne invalidate_dcache
370 mov r1, #0x00000000 @
372 mcr p15, 0, r1, c7, c14, 2 @ Clean and invalidate entry by index
373 add r0, r1, #(1<<CACHEALIGN_BITS)
374 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
375 .rept INDEX_STEPS - 2 /* 2 steps already executed */
376 add r0, r0, #(1<<CACHEALIGN_BITS)
377 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
379 adds r1, r1, #0x04000000 @ will wrap to zero at loop end
381 #endif /* HAVE_TEST_AND_CLEAN_CACHE */
382 mcr p15, 0, r1, c7, c10, 4 @ Drain write buffer
384 .size invalidate_dcache, .-invalidate_dcache
387 * Invalidate entire ICache and DCache
389 * void invalidate_idcache(void);
391 .section .icode, "ax", %progbits
393 .global invalidate_idcache
394 .type invalidate_idcache, %function
395 .global cpucache_invalidate @ Alias
398 mov r2, lr @ save lr to r1, call uses r0 only
399 bl invalidate_dcache @ Clean and invalidate entire DCache
400 mcr p15, 0, r1, c7, c5, 0 @ Invalidate ICache (r1=0 from call)
402 .size invalidate_idcache, .-invalidate_idcache