2 * linux/arch/arm/mm/proc-xsc3.S
4 * Original Author: Matthew Gilbert
5 * Current Maintainer: Deepak Saxena <dsaxena@plexity.net>
7 * Copyright 2004 (C) Intel Corp.
8 * Copyright 2005 (c) MontaVista Software, Inc.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * MMU functions for the Intel XScale3 Core (XSC3). The XSC3 core is an
15 * extension to Intel's original XScale core that adds the following
18 * - ARMv6 Supersections
19 * - Low Locality Reference pages (replaces mini-cache)
22 * - Cache-coherency if chipset supports it
24 * Based on orignal XScale code by Nicolas Pitre
27 #include <linux/linkage.h>
28 #include <linux/init.h>
29 #include <asm/assembler.h>
30 #include <asm/procinfo.h>
31 #include <asm/hardware.h>
32 #include <asm/pgtable.h>
33 #include <asm/pgtable-hwdef.h>
35 #include <asm/ptrace.h>
36 #include "proc-macros.S"
39 * This is the maximum size of an area which will be flushed. If the
40 * area is larger than this, then we flush the whole cache.
42 #define MAX_AREA_SIZE 32768
45 * The cache line size of the I and D cache.
47 #define CACHELINESIZE 32
50 * The size of the data cache.
52 #define CACHESIZE 32768
55 * Run with L2 enabled.
57 #define L2_CACHE_ENABLE 1
60 * Enable the Branch Target Buffer (can cause crashes, see erratum #42.)
65 * This macro is used to wait for a CP15 write and is needed
66 * when we have to ensure that the last operation to the co-pro
67 * was completed before continuing with operation.
69 .macro cpwait_ret, lr, rd
70 mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15
71 sub pc, \lr, \rd, LSR #32 @ wait for completion and
72 @ flush instruction pipeline
76 * This macro cleans & invalidates the entire xsc3 dcache by set & way.
79 .macro clean_d_cache rd, rs
82 1: mcr p15, 0, \rd, c7, c14, 2 @ clean/inv set/way
83 adds \rd, \rd, #0x40000000
92 * cpu_xsc3_proc_init()
94 * Nothing too exciting at the moment
96 ENTRY(cpu_xsc3_proc_init)
100 * cpu_xsc3_proc_fin()
102 ENTRY(cpu_xsc3_proc_fin)
104 mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
106 bl xsc3_flush_kern_cache_all @ clean caches
107 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
108 bic r0, r0, #0x1800 @ ...IZ...........
109 bic r0, r0, #0x0006 @ .............CA.
110 mcr p15, 0, r0, c1, c0, 0 @ disable caches
114 * cpu_xsc3_reset(loc)
116 * Perform a soft reset of the system. Put the CPU into the
117 * same state as it would be if it had been reset, and branch
118 * to what would be the reset vector.
120 * loc: location to jump to for soft reset
123 ENTRY(cpu_xsc3_reset)
124 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
125 msr cpsr_c, r1 @ reset CPSR
126 mrc p15, 0, r1, c1, c0, 0 @ ctrl register
127 bic r1, r1, #0x0086 @ ........B....CA.
128 bic r1, r1, #0x3900 @ ..VIZ..S........
129 mcr p15, 0, r1, c1, c0, 0 @ ctrl register
130 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches & BTB
131 bic r1, r1, #0x0001 @ ...............M
132 mcr p15, 0, r1, c1, c0, 0 @ ctrl register
133 @ CAUTION: MMU turned off from this point. We count on the pipeline
134 @ already containing those two last instructions to survive.
135 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
141 * Cause the processor to idle
143 * For now we do nothing but go to idle mode for every case
145 * XScale supports clock switching, but using idle mode support
146 * allows external hardware to react to system state changes.
148 MMG: Come back to this one.
152 ENTRY(cpu_xsc3_do_idle)
154 mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE
157 /* ================================= CACHE ================================ */
160 * flush_user_cache_all()
162 * Invalidate all cache entries in a particular address
165 ENTRY(xsc3_flush_user_cache_all)
169 * flush_kern_cache_all()
171 * Clean and invalidate the entire cache.
173 ENTRY(xsc3_flush_kern_cache_all)
179 mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB
180 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write Buffer
181 mcrne p15, 0, ip, c7, c5, 4 @ Prefetch Flush
185 * flush_user_cache_range(start, end, vm_flags)
187 * Invalidate a range of cache entries in the specified
190 * - start - start address (may not be aligned)
191 * - end - end address (exclusive, may not be aligned)
192 * - vma - vma_area_struct describing address space
195 ENTRY(xsc3_flush_user_cache_range)
197 sub r3, r1, r0 @ calculate total size
198 cmp r3, #MAX_AREA_SIZE
199 bhs __flush_whole_cache
202 mcrne p15, 0, r0, c7, c5, 1 @ Invalidate I cache line
203 mcr p15, 0, r0, c7, c14, 1 @ Clean/invalidate D cache line
204 add r0, r0, #CACHELINESIZE
208 mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB
209 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write Buffer
210 mcrne p15, 0, ip, c7, c5, 4 @ Prefetch Flush
214 * coherent_kern_range(start, end)
216 * Ensure coherency between the Icache and the Dcache in the
217 * region described by start. If you have non-snooping
218 * Harvard caches, you need to implement this function.
220 * - start - virtual start address
221 * - end - virtual end address
223 * Note: single I-cache line invalidation isn't used here since
224 * it also trashes the mini I-cache used by JTAG debuggers.
226 ENTRY(xsc3_coherent_kern_range)
228 ENTRY(xsc3_coherent_user_range)
229 bic r0, r0, #CACHELINESIZE - 1
230 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
231 add r0, r0, #CACHELINESIZE
235 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
236 mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer
237 mcr p15, 0, r0, c7, c5, 4 @ Prefetch Flush
241 * flush_kern_dcache_page(void *page)
243 * Ensure no D cache aliasing occurs, either with itself or
246 * - addr - page aligned address
248 ENTRY(xsc3_flush_kern_dcache_page)
250 1: mcr p15, 0, r0, c7, c14, 1 @ Clean/Invalidate D Cache line
251 add r0, r0, #CACHELINESIZE
255 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
256 mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer
257 mcr p15, 0, r0, c7, c5, 4 @ Prefetch Flush
261 * dma_inv_range(start, end)
263 * Invalidate (discard) the specified virtual address range.
264 * May not write back any entries. If 'start' or 'end'
265 * are not cache line aligned, those lines must be written
268 * - start - virtual start address
269 * - end - virtual end address
271 ENTRY(xsc3_dma_inv_range)
272 tst r0, #CACHELINESIZE - 1
273 bic r0, r0, #CACHELINESIZE - 1
274 mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D entry
275 mcrne p15, 1, r0, c7, c11, 1 @ clean L2 D entry
276 tst r1, #CACHELINESIZE - 1
277 mcrne p15, 0, r1, c7, c10, 1 @ clean L1 D entry
278 mcrne p15, 1, r1, c7, c11, 1 @ clean L2 D entry
279 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate L1 D entry
280 mcr p15, 1, r0, c7, c7, 1 @ Invalidate L2 D cache line
281 add r0, r0, #CACHELINESIZE
284 mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer
288 * dma_clean_range(start, end)
290 * Clean the specified virtual address range.
292 * - start - virtual start address
293 * - end - virtual end address
295 ENTRY(xsc3_dma_clean_range)
296 bic r0, r0, #CACHELINESIZE - 1
297 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D entry
298 mcr p15, 1, r0, c7, c11, 1 @ clean L2 D entry
299 add r0, r0, #CACHELINESIZE
302 mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer
306 * dma_flush_range(start, end)
308 * Clean and invalidate the specified virtual address range.
310 * - start - virtual start address
311 * - end - virtual end address
313 ENTRY(xsc3_dma_flush_range)
314 bic r0, r0, #CACHELINESIZE - 1
315 1: mcr p15, 0, r0, c7, c14, 1 @ Clean/invalidate L1 D cache line
316 mcr p15, 1, r0, c7, c11, 1 @ Clean L2 D cache line
317 mcr p15, 1, r0, c7, c7, 1 @ Invalidate L2 D cache line
318 add r0, r0, #CACHELINESIZE
321 mcr p15, 0, r0, c7, c10, 4 @ Drain Write Buffer
324 ENTRY(xsc3_cache_fns)
325 .long xsc3_flush_kern_cache_all
326 .long xsc3_flush_user_cache_all
327 .long xsc3_flush_user_cache_range
328 .long xsc3_coherent_kern_range
329 .long xsc3_coherent_user_range
330 .long xsc3_flush_kern_dcache_page
331 .long xsc3_dma_inv_range
332 .long xsc3_dma_clean_range
333 .long xsc3_dma_flush_range
335 ENTRY(cpu_xsc3_dcache_clean_area)
336 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
337 add r0, r0, #CACHELINESIZE
338 subs r1, r1, #CACHELINESIZE
342 /* =============================== PageTable ============================== */
345 * cpu_xsc3_switch_mm(pgd)
347 * Set the translation base pointer to be as described by pgd.
349 * pgd: new page tables
352 ENTRY(cpu_xsc3_switch_mm)
354 mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB
355 mcr p15, 0, ip, c7, c10, 4 @ Drain Write Buffer
356 mcr p15, 0, ip, c7, c5, 4 @ Prefetch Flush
357 #ifdef L2_CACHE_ENABLE
358 orr r0, r0, #0x18 @ cache the page table in L2
360 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
361 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
365 * cpu_xsc3_set_pte(ptep, pte)
367 * Set a PTE and flush it out
371 ENTRY(cpu_xsc3_set_pte)
372 str r1, [r0], #-2048 @ linux version
374 bic r2, r1, #0xdf0 @ Keep C, B, coherency bits
375 orr r2, r2, #PTE_TYPE_EXT @ extended page
377 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
379 tst r3, #L_PTE_USER @ User?
380 orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w
382 tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
383 orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w
384 @ combined with user -> user r/w
387 @ If its cacheable it needs to be in L2 also.
388 eor ip, r1, #L_PTE_CACHEABLE
389 tst ip, #L_PTE_CACHEABLE
390 orreq r2, r2, #PTE_EXT_TEX(0x5)
393 tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
394 movne r2, #0 @ no -> fault
396 str r2, [r0] @ hardware version
398 mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line mcr
399 mcr p15, 0, ip, c7, c10, 4 @ Drain Write Buffer
408 .type __xsc3_setup, #function
410 mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
412 mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB
413 mcr p15, 0, ip, c7, c10, 4 @ Drain Write Buffer
414 mcr p15, 0, ip, c7, c5, 4 @ Prefetch Flush
415 mcr p15, 0, ip, c8, c7, 0 @ invalidate I, D TLBs
417 orr r4, r4, #0x18 @ cache the page table in L2
419 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
420 mov r0, #1 @ Allow access to CP0 and CP13
421 orr r0, r0, #1 << 13 @ Its undefined whether this
422 mcr p15, 0, r0, c15, c1, 0 @ affects USR or SVC modes
423 mrc p15, 0, r0, c1, c0, 1 @ get auxiliary control reg
424 and r0, r0, #2 @ preserve bit P bit setting
426 orr r0, r0, #(1 << 10) @ enable L2 for LLR cache
428 mcr p15, 0, r0, c1, c0, 1 @ set auxiliary control reg
432 mrc p15, 0, r0, c1, c0, 0 @ get control register
433 bic r0, r0, r5 @ .... .... .... ..A.
434 orr r0, r0, r6 @ .... .... .... .C.M
436 orr r0, r0, #0x00000800 @ ..VI Z..S .... ....
439 orr r0, r0, #0x04000000 @ L2 enable
443 .size __xsc3_setup, . - __xsc3_setup
445 .type xsc3_crval, #object
447 crval clear=0x04003b02, mmuset=0x00003105, ucset=0x00001100
452 * Purpose : Function pointers used to access above functions - all calls
456 .type xsc3_processor_functions, #object
457 ENTRY(xsc3_processor_functions)
458 .word v5t_early_abort
459 .word cpu_xsc3_proc_init
460 .word cpu_xsc3_proc_fin
462 .word cpu_xsc3_do_idle
463 .word cpu_xsc3_dcache_clean_area
464 .word cpu_xsc3_switch_mm
465 .word cpu_xsc3_set_pte
466 .size xsc3_processor_functions, . - xsc3_processor_functions
470 .type cpu_arch_name, #object
473 .size cpu_arch_name, . - cpu_arch_name
475 .type cpu_elf_name, #object
478 .size cpu_elf_name, . - cpu_elf_name
480 .type cpu_xsc3_name, #object
482 .asciz "XScale-Core3"
483 .size cpu_xsc3_name, . - cpu_xsc3_name
487 .section ".proc.info.init", #alloc, #execinstr
489 .type __xsc3_proc_info,#object
493 .long PMD_TYPE_SECT | \
494 PMD_SECT_BUFFERABLE | \
495 PMD_SECT_CACHEABLE | \
496 PMD_SECT_AP_WRITE | \
498 .long PMD_TYPE_SECT | \
499 PMD_SECT_AP_WRITE | \
504 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
506 .long xsc3_processor_functions
508 .long xsc3_mc_user_fns
510 .size __xsc3_proc_info, . - __xsc3_proc_info