1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2006,2007 by Greg White
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
25 /* Used by ARMv4 & ARMv5 CPUs with cp15 register and MMU */
27 #if CONFIG_CPU == TCC7801 || CONFIG_CPU == AT91SAM9260
28 /* MMU present but unused */
29 #define HAVE_TEST_AND_CLEAN_CACHE
31 #elif CONFIG_CPU == DM320 || CONFIG_CPU == AS3525v2 || CONFIG_CPU == S5L8702
33 #define HAVE_TEST_AND_CLEAN_CACHE
35 #elif CONFIG_CPU == AS3525
39 #elif CONFIG_CPU == S3C2440
43 #elif CONFIG_CPU == S5L8700 || CONFIG_CPU == S5L8701
47 #elif CONFIG_CPU == IMX233
48 #define HAVE_TEST_AND_CLEAN_CACHE
53 #error Cache settings unknown for this CPU !
55 #endif /* CPU specific configuration */
57 @ Index format: 31:26 = index, N:5 = segment, remainder = SBZ
58 @ assume 64-way set associative separate I/D caches
59 @ CACHE_SIZE = N (kB) = N*2^10 B
60 @ number of lines = N*2^(10-CACHEALIGN_BITS)
62 @ Segment loops = N*2^(10-CACHEALIGN_BITS-6) = N*2^(4-CACHEALIGN_BITS)
63 @ Segment loops = N/2^(CACHEALIGN_BITS - 4)
64 @ Segment loops = N/(1<<(CACHEALIGN_BITS - 4))
68 #if CACHEALIGN_BITS == 4
69 #define INDEX_STEPS CACHE_SIZE
70 #elif CACHEALIGN_BITS == 5
71 #define INDEX_STEPS (CACHE_SIZE/2)
72 #endif /* CACHEALIGN_BITS */
74 @ assume 64-way set associative separate I/D caches (log2(64) == 6)
75 @ Index format: 31:26 = index, M:N = segment, remainder = SBZ
76 @ Segment bits = log2(cache size in bytes / cache line size in byte) - Index bits (== 6)
79 #endif /* CACHE_SIZE */
87 * void ttb_init(void);
89 .section .text.ttb_init, "ax", %progbits
92 .type ttb_init, %function
94 ldr r0, =TTB_BASE_ADDR @
96 mcr p15, 0, r0, c2, c0, 0 @ Set the TTB base address
97 mcr p15, 0, r1, c3, c0, 0 @ Set all domains to manager status
99 .size ttb_init, .-ttb_init
102 * void map_section(unsigned int pa, unsigned int va, int mb, int flags);
104 .section .text.map_section, "ax", %progbits
107 .type map_section, %function
114 @ pa |= (flags | 0x412);
116 @ 10: superuser - r/w, user - no access
118 @ 3,2: Cache flags (flags (r3))
119 @ 1: Section signature
124 @ unsigned int* ttbPtr = TTB_BASE + (va >> 20);
125 @ sections are 1MB size
127 ldr r3, =TTB_BASE_ADDR
128 add r1, r3, r1, lsl #0x2
130 @ Add MB to pa, flags are already present in pa, but addition
131 @ should not effect them
133 @ for( ; mb>0; mb--, pa += (1 << 20))
142 add r0, r0, #0x100000
147 .size map_section, .-map_section
150 * void enable_mmu(void);
152 .section .text.enable_mmu, "ax", %progbits
155 .type enable_mmu, %function
158 mcr p15, 0, r0, c8, c7, 0 @ invalidate TLB
159 mcr p15, 0, r0, c7, c7,0 @ invalidate both i and dcache
160 mrc p15, 0, r0, c1, c0, 0 @
161 orr r0, r0, #1 @ enable mmu bit, i and dcache
162 orr r0, r0, #1<<2 @ enable dcache
163 orr r0, r0, #1<<12 @ enable icache
164 mcr p15, 0, r0, c1, c0, 0 @
170 .size enable_mmu, .-enable_mmu
177 /** Cache coherency **/
180 * Write DCache back to RAM for the given range and remove cache lines
181 * from DCache afterwards
182 * void commit_discard_dcache_range(const void *base, unsigned int size);
184 .section .text.commit_discard_dcache_range, "ax", %progbits
186 .global commit_discard_dcache_range
187 .type commit_discard_dcache_range, %function
189 @ MVA format: 31:5 = Modified virtual address, 4:0 = SBZ
190 commit_discard_dcache_range:
191 add r1, r0, r1 @ size -> end
192 cmp r1, r0 @ end <= start?
194 bic r0, r0, #31 @ Align start to cache line (down)
196 mcr p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
199 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
202 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
205 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
208 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
211 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
214 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
217 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
222 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
224 .size commit_discard_dcache_range, .-commit_discard_dcache_range
227 * Write DCache back to RAM for the given range
228 * void commit_dcache_range(const void *base, unsigned int size);
230 .section .text.commit_dcache_range, "ax", %progbits
232 .global commit_dcache_range
233 .type commit_dcache_range, %function
235 @ MVA format: 31:5 = Modified virtual address, 4:0 = SBZ
237 add r1, r0, r1 @ size -> end
238 cmp r1, r0 @ end <= start?
240 bic r0, r0, #31 @ Align start to cache line (down)
242 mcr p15, 0, r0, c7, c10, 1 @ Clean line by MVA
245 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
248 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
251 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
254 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
257 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
260 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
263 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
266 bhi 1b @clean_start @
268 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
270 .size commit_dcache_range, .-commit_dcache_range
273 * Remove cache lines for the given range from DCache
274 * will *NOT* do write back except for buffer edges not on a line boundary
275 * void discard_dcache_range(const void *base, unsigned int size);
277 .section .text.discard_dcache_range, "ax", %progbits
279 .global discard_dcache_range
280 .type discard_dcache_range, %function
282 @ MVA format: 31:5 = Modified virtual address, 4:0 = SBZ
283 discard_dcache_range:
284 add r1, r0, r1 @ size -> end
285 cmp r1, r0 @ end <= start?
287 tst r0, #31 @ Check first line for bits set
288 bicne r0, r0, #31 @ Clear low five bits (down)
289 mcrne p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
290 @ if not cache aligned
291 addne r0, r0, #32 @ Move to the next cache line
293 tst r1, #31 @ Check last line for bits set
294 bicne r1, r1, #31 @ Clear low five bits (down)
295 mcrne p15, 0, r1, c7, c14, 1 @ Clean and invalidate line by MVA
296 @ if not cache aligned
297 cmp r1, r0 @ end <= start now?
299 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
302 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
305 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
308 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
311 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
314 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
317 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
320 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
323 bhi 1b @ discard_start @
325 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
327 .size discard_dcache_range, .-discard_dcache_range
330 * Write entire DCache back to RAM
331 * void commit_dcache(void);
333 .section .text.commit_dcache, "ax", %progbits
335 .global commit_dcache
336 .type commit_dcache, %function
339 #ifdef HAVE_TEST_AND_CLEAN_CACHE
340 mrc p15, 0, r15, c7, c10, 3 @ test and clean dcache
344 mov r1, #0x00000000 @
346 mcr p15, 0, r1, c7, c10, 2 @ Clean entry by index
347 add r0, r1, #(1<<CACHEALIGN_BITS)
348 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
349 .rept INDEX_STEPS - 2 /* 2 steps already executed */
350 add r0, r0, #(1<<CACHEALIGN_BITS)
351 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
353 adds r1, r1, #0x04000000 @ will wrap to zero at loop end
354 bne 1b @ commit_start @
355 #endif /* HAVE_TEST_AND_CLEAN_CACHE */
356 mcr p15, 0, r1, c7, c10, 4 @ Drain write buffer
358 .size commit_dcache, .-commit_dcache
361 * Commit and discard entire DCache, will do writeback
362 * void commit_discard_dcache(void);
364 .section .icode.commit_discard_dcache, "ax", %progbits
366 .global commit_discard_dcache
367 .type commit_discard_dcache, %function
369 commit_discard_dcache:
370 #ifdef HAVE_TEST_AND_CLEAN_CACHE
371 mrc p15, 0, r15, c7, c14, 3 @ test, clean and invalidate dcache
372 bne commit_discard_dcache
375 mov r1, #0x00000000 @
377 mcr p15, 0, r1, c7, c14, 2 @ Clean and invalidate entry by index
378 add r0, r1, #(1<<CACHEALIGN_BITS)
379 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
380 .rept INDEX_STEPS - 2 /* 2 steps already executed */
381 add r0, r0, #(1<<CACHEALIGN_BITS)
382 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
384 adds r1, r1, #0x04000000 @ will wrap to zero at loop end
386 #endif /* HAVE_TEST_AND_CLEAN_CACHE */
387 mcr p15, 0, r1, c7, c10, 4 @ Drain write buffer
389 .size commit_discard_dcache, .-commit_discard_dcache
392 * Discards the entire ICache, and commit+discards the entire DCache
393 * void commit_discard_idcache(void);
395 .section .icode.commit_discard_idcache, "ax", %progbits
397 .global commit_discard_idcache
398 .type commit_discard_idcache, %function
400 commit_discard_idcache:
401 mov r2, lr @ save lr to r1, call uses r0 only
402 bl commit_discard_dcache @ commit and discard entire DCache
403 mcr p15, 0, r1, c7, c5, 0 @ Invalidate ICache (r1=0 from call)
405 .size commit_discard_idcache, .-commit_discard_idcache