2 * arch/arm/mm/cache-xsc3l2.c - XScale3 L2 cache controller support
4 * Copyright (C) 2007 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/init.h>
20 #include <asm/system.h>
21 #include <asm/cputype.h>
22 #include <asm/cacheflush.h>
23 #include <asm/kmap_types.h>
24 #include <asm/fixmap.h>
25 #include <asm/pgtable.h>
26 #include <asm/tlbflush.h>
29 #define CR_L2 (1 << 26)
31 #define CACHE_LINE_SIZE 32
32 #define CACHE_LINE_SHIFT 5
33 #define CACHE_WAY_PER_SET 8
35 #define CACHE_WAY_SIZE(l2ctype) (8192 << (((l2ctype) >> 8) & 0xf))
36 #define CACHE_SET_SIZE(l2ctype) (CACHE_WAY_SIZE(l2ctype) >> CACHE_LINE_SHIFT)
38 static inline int xsc3_l2_present(void)
40 unsigned long l2ctype
;
42 __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype
));
44 return !!(l2ctype
& 0xf8);
47 static inline void xsc3_l2_clean_mva(unsigned long addr
)
49 __asm__("mcr p15, 1, %0, c7, c11, 1" : : "r" (addr
));
52 static inline void xsc3_l2_inv_mva(unsigned long addr
)
54 __asm__("mcr p15, 1, %0, c7, c7, 1" : : "r" (addr
));
57 static inline void xsc3_l2_inv_all(void)
59 unsigned long l2ctype
, set_way
;
62 __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype
));
64 for (set
= 0; set
< CACHE_SET_SIZE(l2ctype
); set
++) {
65 for (way
= 0; way
< CACHE_WAY_PER_SET
; way
++) {
66 set_way
= (way
<< 29) | (set
<< 5);
67 __asm__("mcr p15, 1, %0, c7, c11, 2" : : "r"(set_way
));
75 #define l2_map_save_flags(x) raw_local_save_flags(x)
76 #define l2_map_restore_flags(x) raw_local_irq_restore(x)
78 #define l2_map_save_flags(x) ((x) = 0)
79 #define l2_map_restore_flags(x) ((void)(x))
82 static inline unsigned long l2_map_va(unsigned long pa
, unsigned long prev_va
,
86 unsigned long va
= prev_va
& PAGE_MASK
;
87 unsigned long pa_offset
= pa
<< (32 - PAGE_SHIFT
);
88 if (unlikely(pa_offset
< (prev_va
<< (32 - PAGE_SHIFT
)))) {
90 * Switching to a new page. Because cache ops are
91 * using virtual addresses only, we must put a mapping
92 * in place for it. We also enable interrupts for a
93 * short while and disable them again to protect this
97 raw_local_irq_restore(flags
);
98 idx
= KM_L2_CACHE
+ KM_TYPE_NR
* smp_processor_id();
99 va
= __fix_to_virt(FIX_KMAP_BEGIN
+ idx
);
100 raw_local_irq_restore(flags
| PSR_I_BIT
);
101 set_pte_ext(TOP_PTE(va
), pfn_pte(pa
>> PAGE_SHIFT
, PAGE_KERNEL
), 0);
102 local_flush_tlb_kernel_page(va
);
104 return va
+ (pa_offset
>> (32 - PAGE_SHIFT
));
106 return __phys_to_virt(pa
);
110 static void xsc3_l2_inv_range(unsigned long start
, unsigned long end
)
112 unsigned long vaddr
, flags
;
114 if (start
== 0 && end
== -1ul) {
119 vaddr
= -1; /* to force the first mapping */
120 l2_map_save_flags(flags
);
123 * Clean and invalidate partial first cache line.
125 if (start
& (CACHE_LINE_SIZE
- 1)) {
126 vaddr
= l2_map_va(start
& ~(CACHE_LINE_SIZE
- 1), vaddr
, flags
);
127 xsc3_l2_clean_mva(vaddr
);
128 xsc3_l2_inv_mva(vaddr
);
129 start
= (start
| (CACHE_LINE_SIZE
- 1)) + 1;
133 * Invalidate all full cache lines between 'start' and 'end'.
135 while (start
< (end
& ~(CACHE_LINE_SIZE
- 1))) {
136 vaddr
= l2_map_va(start
, vaddr
, flags
);
137 xsc3_l2_inv_mva(vaddr
);
138 start
+= CACHE_LINE_SIZE
;
142 * Clean and invalidate partial last cache line.
145 vaddr
= l2_map_va(start
, vaddr
, flags
);
146 xsc3_l2_clean_mva(vaddr
);
147 xsc3_l2_inv_mva(vaddr
);
150 l2_map_restore_flags(flags
);
155 static void xsc3_l2_clean_range(unsigned long start
, unsigned long end
)
157 unsigned long vaddr
, flags
;
159 vaddr
= -1; /* to force the first mapping */
160 l2_map_save_flags(flags
);
162 start
&= ~(CACHE_LINE_SIZE
- 1);
163 while (start
< end
) {
164 vaddr
= l2_map_va(start
, vaddr
, flags
);
165 xsc3_l2_clean_mva(vaddr
);
166 start
+= CACHE_LINE_SIZE
;
169 l2_map_restore_flags(flags
);
175 * optimize L2 flush all operation by set/way format
177 static inline void xsc3_l2_flush_all(void)
179 unsigned long l2ctype
, set_way
;
182 __asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype
));
184 for (set
= 0; set
< CACHE_SET_SIZE(l2ctype
); set
++) {
185 for (way
= 0; way
< CACHE_WAY_PER_SET
; way
++) {
186 set_way
= (way
<< 29) | (set
<< 5);
187 __asm__("mcr p15, 1, %0, c7, c15, 2" : : "r"(set_way
));
194 static void xsc3_l2_flush_range(unsigned long start
, unsigned long end
)
196 unsigned long vaddr
, flags
;
198 if (start
== 0 && end
== -1ul) {
203 vaddr
= -1; /* to force the first mapping */
204 l2_map_save_flags(flags
);
206 start
&= ~(CACHE_LINE_SIZE
- 1);
207 while (start
< end
) {
208 vaddr
= l2_map_va(start
, vaddr
, flags
);
209 xsc3_l2_clean_mva(vaddr
);
210 xsc3_l2_inv_mva(vaddr
);
211 start
+= CACHE_LINE_SIZE
;
214 l2_map_restore_flags(flags
);
219 static int __init
xsc3_l2_init(void)
221 if (!cpu_is_xsc3() || !xsc3_l2_present())
224 if (!(get_cr() & CR_L2
)) {
225 pr_info("XScale3 L2 cache enabled.\n");
226 adjust_cr(CR_L2
, CR_L2
);
230 outer_cache
.inv_range
= xsc3_l2_inv_range
;
231 outer_cache
.clean_range
= xsc3_l2_clean_range
;
232 outer_cache
.flush_range
= xsc3_l2_flush_range
;
236 core_initcall(xsc3_l2_init
);