1 /***************************************************************************
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
10 * Copyright (C) 2006,2007 by Greg White
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
20 ****************************************************************************/
24 #if CONFIG_CPU == IMX31L
25 /* TTB routines not used */
27 /** Cache coherency **/
30 * Invalidate DCache for this range
32 * void invalidate_dcache_range(const void *base, unsigned int size)
34 .section .text, "ax", %progbits
36 .global invalidate_dcache_range
37 .type invalidate_dcache_range, %function
38 @ MVA format: 31:5 = Modified virtual address, 4:0 = Ignored
39 invalidate_dcache_range:
40 add r1, r0, r1 @ size -> end
41 cmp r1, r0 @ end <= start?
42 subhi r1, r1, #1 @ round it down
44 mcrrhi p15, 0, r1, r0, c14 @ Clean and invalidate DCache range
45 mcrhi p15, 0, r2, c7, c10, 4 @ Data synchronization barrier
47 .size invalidate_dcache_range, .-invalidate_dcache_range
50 * clean DCache for this range
51 * forces DCache writeback for the specified range
52 * void clean_dcache_range(const void *base, unsigned int size);
54 .section .text, "ax", %progbits
56 .global clean_dcache_range
57 .type clean_dcache_range, %function
58 @ MVA format: 31:5 = Modified virtual address, 4:0 = Ignored
60 add r1, r0, r1 @ size -> end
61 cmp r1, r0 @ end <= start?
62 subhi r1, r1, #1 @ round it down
64 mcrrhi p15, 0, r1, r0, c12 @ Clean DCache range
65 mcrhi p15, 0, r2, c7, c10, 4 @ Data synchronization barrier
67 .size clean_dcache_range, .-clean_dcache_range
70 * Dump DCache for this range
71 * will *NOT* do write back except for buffer edges not on a line boundary
72 * void dump_dcache_range(const void *base, unsigned int size);
74 .section .text, "ax", %progbits
76 .global dump_dcache_range
77 .type dump_dcache_range, %function
78 @ MVA format (mcr): 31:5 = Modified virtual address, 4:0 = SBZ
79 @ MVA format (mcrr): 31:5 = Modified virtual address, 4:0 = Ignored
81 add r1, r0, r1 @ size -> end
82 cmp r1, r0 @ end <= start?
84 tst r0, #31 @ Check first line for bits set
85 bicne r0, r0, #31 @ Clear low five bits (down)
86 mcrne p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
87 @ if not cache aligned
88 addne r0, r0, #32 @ Move to the next cache line
90 tst r1, #31 @ Check last line for bits set
91 bicne r1, r1, #31 @ Clear low five bits (down)
92 mcrne p15, 0, r1, c7, c14, 1 @ Clean and invalidate line by MVA
93 @ if not cache aligned
94 sub r1, r1, #32 @ Move to the previous cache line
95 cmp r1, r0 @ end < start now?
96 mcrrhs p15, 0, r1, r0, c6 @ Invalidate DCache range
98 mcr p15, 0, r0, c7, c10, 4 @ Data synchronization barrier
100 .size dump_dcache_range, .-dump_dcache_range
104 * Cleans entire DCache
105 * void clean_dcache(void);
107 .section .text, "ax", %progbits
110 .type clean_dcache, %function
111 .global cpucache_flush @ Alias
115 mcr p15, 0, r0, c7, c10, 0 @ Clean entire DCache
116 mcr p15, 0, r0, c7, c10, 4 @ Data synchronization barrier
118 .size clean_dcache, .-clean_dcache
121 * Invalidate entire DCache
123 * void invalidate_dcache(void);
125 .section .text, "ax", %progbits
127 .global invalidate_dcache
128 .type invalidate_dcache, %function
131 mcr p15, 0, r0, c7, c14, 0 @ Clean and invalidate entire DCache
132 mcr p15, 0, r0, c7, c10, 4 @ Data synchronization barrier
134 .size invalidate_dcache, .-invalidate_dcache
137 * Invalidate entire ICache and DCache
139 * void invalidate_idcache(void);
141 .section .text, "ax", %progbits
143 .global invalidate_idcache
144 .type invalidate_idcache, %function
145 .global cpucache_invalidate @ Alias
149 mcr p15, 0, r0, c7, c14, 0 @ Clean and invalidate entire DCache
150 mcr p15, 0, r0, c7, c5, 0 @ Invalidate entire ICache
151 @ Also flushes the branch target cache
152 mcr p15, 0, r0, c7, c10, 4 @ Data synchronization barrier
153 mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer (IMB)
155 .size invalidate_idcache, .-invalidate_idcache
161 * void ttb_init(void);
163 .section .text, "ax", %progbits
166 .type ttb_init, %function
168 ldr r0, =TTB_BASE_ADDR @
170 mcr p15, 0, r0, c2, c0, 0 @ Set the TTB base address
171 mcr p15, 0, r1, c3, c0, 0 @ Set all domains to manager status
173 .size ttb_init, .-ttb_init
176 * void map_section(unsigned int pa, unsigned int va, int mb, int flags);
178 .section .text, "ax", %progbits
181 .type map_section, %function
188 @ pa |= (flags | 0x412);
190 @ 10: superuser - r/w, user - no access
192 @ 3,2: Cache flags (flags (r3))
193 @ 1: Section signature
198 @ unsigned int* ttbPtr = TTB_BASE + (va >> 20);
199 @ sections are 1MB size
201 ldr r3, =TTB_BASE_ADDR
202 add r1, r3, r1, lsl #0x2
204 @ Add MB to pa, flags are already present in pa, but addition
205 @ should not effect them
207 @ for( ; mb>0; mb--, pa += (1 << 20))
216 add r0, r0, #0x100000
221 .size map_section, .-map_section
224 * void enable_mmu(void);
226 .section .text, "ax", %progbits
229 .type enable_mmu, %function
232 mcr p15, 0, r0, c8, c7, 0 @ invalidate TLB
233 mcr p15, 0, r0, c7, c7,0 @ invalidate both i and dcache
234 mrc p15, 0, r0, c1, c0, 0 @
235 orr r0, r0, #1 @ enable mmu bit, i and dcache
236 orr r0, r0, #1<<2 @ enable dcache
237 orr r0, r0, #1<<12 @ enable icache
238 mcr p15, 0, r0, c1, c0, 0 @
244 .size enable_mmu, .-enable_mmu
247 /** Cache coherency **/
250 * Invalidate DCache for this range
252 * void invalidate_dcache_range(const void *base, unsigned int size);
254 .section .text, "ax", %progbits
256 .global invalidate_dcache_range
257 .type invalidate_dcache_range, %function
258 @ MVA format: 31:5 = Modified virtual address, 4:0 = SBZ
259 invalidate_dcache_range:
260 add r1, r0, r1 @ size -> end
261 cmp r1, r0 @ end <= start?
263 bic r0, r0, #31 @ Align start to cache line (down)
265 mcr p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
268 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
271 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
274 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
277 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
280 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
283 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
286 mcrhi p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
291 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
293 .size invalidate_dcache_range, .-invalidate_dcache_range
296 * clean DCache for this range
297 * forces DCache writeback for the specified range
298 * void clean_dcache_range(const void *base, unsigned int size);
300 .section .text, "ax", %progbits
302 .global clean_dcache_range
303 .type clean_dcache_range, %function
304 @ MVA format: 31:5 = Modified virtual address, 4:0 = SBZ
306 add r1, r0, r1 @ size -> end
307 cmp r1, r0 @ end <= start?
309 bic r0, r0, #31 @ Align start to cache line (down)
311 mcr p15, 0, r0, c7, c10, 1 @ Clean line by MVA
314 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
317 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
320 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
323 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
326 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
329 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
332 mcrhi p15, 0, r0, c7, c10, 1 @ Clean line by MVA
335 bhi 1b @clean_start @
337 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
339 .size clean_dcache_range, .-clean_dcache_range
342 * Dump DCache for this range
343 * will *NOT* do write back except for buffer edges not on a line boundary
344 * void dump_dcache_range(const void *base, unsigned int size);
346 .section .text, "ax", %progbits
348 .global dump_dcache_range
349 .type dump_dcache_range, %function
350 @ MVA format: 31:5 = Modified virtual address, 4:0 = SBZ
352 add r1, r0, r1 @ size -> end
353 cmp r1, r0 @ end <= start?
355 tst r0, #31 @ Check first line for bits set
356 bicne r0, r0, #31 @ Clear low five bits (down)
357 mcrne p15, 0, r0, c7, c14, 1 @ Clean and invalidate line by MVA
358 @ if not cache aligned
359 addne r0, r0, #32 @ Move to the next cache line
361 tst r1, #31 @ Check last line for bits set
362 bicne r1, r1, #31 @ Clear low five bits (down)
363 mcrne p15, 0, r1, c7, c14, 1 @ Clean and invalidate line by MVA
364 @ if not cache aligned
365 cmp r1, r0 @ end <= start now?
367 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
370 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
373 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
376 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
379 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
382 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
385 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
388 mcrhi p15, 0, r0, c7, c6, 1 @ Invalidate line by MVA
391 bhi 1b @ dump_start @
393 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
395 .size dump_dcache_range, .-dump_dcache_range
398 * Cleans entire DCache
399 * void clean_dcache(void);
401 .section .text, "ax", %progbits
404 .type clean_dcache, %function
405 .global cpucache_flush @ Alias
408 @ Index format: 31:26 = index, 7:5 = segment, remainder = SBZ
409 mov r0, #0x00000000 @
411 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
412 add r0, r0, #0x00000020 @
413 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
414 add r0, r0, #0x00000020 @
415 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
416 add r0, r0, #0x00000020 @
417 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
418 add r0, r0, #0x00000020 @
419 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
420 add r0, r0, #0x00000020 @
421 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
422 add r0, r0, #0x00000020 @
423 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
424 add r0, r0, #0x00000020 @
425 mcr p15, 0, r0, c7, c10, 2 @ Clean entry by index
426 sub r0, r0, #0x000000e0 @
427 adds r0, r0, #0x04000000 @ will wrap to zero at loop end
428 bne 1b @ clean_start @
429 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
431 .size clean_dcache, .-clean_dcache
434 * Invalidate entire DCache
436 * void invalidate_dcache(void);
438 .section .text, "ax", %progbits
440 .global invalidate_dcache
441 .type invalidate_dcache, %function
443 @ Index format: 31:26 = index, 7:5 = segment, remainder = SBZ
444 mov r0, #0x00000000 @
446 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
447 add r0, r0, #0x00000020 @
448 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
449 add r0, r0, #0x00000020 @
450 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
451 add r0, r0, #0x00000020 @
452 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
453 add r0, r0, #0x00000020 @
454 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
455 add r0, r0, #0x00000020 @
456 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
457 add r0, r0, #0x00000020 @
458 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
459 add r0, r0, #0x00000020 @
460 mcr p15, 0, r0, c7, c14, 2 @ Clean and invalidate entry by index
461 sub r0, r0, #0x000000e0 @
462 adds r0, r0, #0x04000000 @ will wrap to zero at loop end
464 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
466 .size invalidate_dcache, .-invalidate_dcache
469 * Invalidate entire ICache and DCache
471 * void invalidate_idcache(void);
473 .section .text, "ax", %progbits
475 .global invalidate_idcache
476 .type invalidate_idcache, %function
477 .global cpucache_invalidate @ Alias
480 mov r1, lr @ save lr to r1, call uses r0 only
481 bl invalidate_dcache @ Clean and invalidate entire DCache
482 mcr p15, 0, r0, c7, c5, 0 @ Invalidate ICache (r0=0 from call)
484 .size invalidate_idcache, .-invalidate_idcache