2 * arch/score/mm/cache.c
4 * Score Processor version.
6 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
7 * Lennox Wu <lennox.wu@sunplusct.com>
8 * Chen Liqin <liqin.chen@sunplusct.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, see the file COPYING, or write
22 * to the Free Software Foundation, Inc.,
23 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
26 #include <linux/init.h>
27 #include <linux/linkage.h>
28 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/sched.h>
34 #include <asm/mmu_context.h>
37 Just flush entire Dcache!!
38 You must ensure the page doesn't include instructions, because
39 the function will not flush the Icache.
40 The addr must be cache aligned.
42 static void flush_data_cache_page(unsigned long addr
)
45 for (i
= 0; i
< (PAGE_SIZE
/ L1_CACHE_BYTES
); i
+= L1_CACHE_BYTES
) {
47 "cache 0x0e, [%0, 0]\n"
48 "cache 0x1a, [%0, 0]\n"
51 addr
+= L1_CACHE_BYTES
;
55 void flush_dcache_page(struct page
*page
)
57 struct address_space
*mapping
= page_mapping(page
);
60 if (PageHighMem(page
))
62 if (mapping
&& !mapping_mapped(mapping
)) {
63 set_bit(PG_dcache_dirty
, &(page
)->flags
);
68 * We could delay the flush for the !page_mapping case too. But that
69 * case is for exec env/arg pages and those are %99 certainly going to
70 * get faulted into the tlb (and thus flushed) anyways.
72 addr
= (unsigned long) page_address(page
);
73 flush_data_cache_page(addr
);
76 /* called by update_mmu_cache. */
77 void __update_cache(struct vm_area_struct
*vma
, unsigned long address
,
81 unsigned long pfn
, addr
;
82 int exec
= (vma
->vm_flags
& VM_EXEC
);
85 if (unlikely(!pfn_valid(pfn
)))
87 page
= pfn_to_page(pfn
);
88 if (page_mapping(page
) && test_bit(PG_dcache_dirty
, &(page
)->flags
)) {
89 addr
= (unsigned long) page_address(page
);
91 flush_data_cache_page(addr
);
92 clear_bit(PG_dcache_dirty
, &(page
)->flags
);
96 static inline void setup_protection_map(void)
98 protection_map
[0] = PAGE_NONE
;
99 protection_map
[1] = PAGE_READONLY
;
100 protection_map
[2] = PAGE_COPY
;
101 protection_map
[3] = PAGE_COPY
;
102 protection_map
[4] = PAGE_READONLY
;
103 protection_map
[5] = PAGE_READONLY
;
104 protection_map
[6] = PAGE_COPY
;
105 protection_map
[7] = PAGE_COPY
;
106 protection_map
[8] = PAGE_NONE
;
107 protection_map
[9] = PAGE_READONLY
;
108 protection_map
[10] = PAGE_SHARED
;
109 protection_map
[11] = PAGE_SHARED
;
110 protection_map
[12] = PAGE_READONLY
;
111 protection_map
[13] = PAGE_READONLY
;
112 protection_map
[14] = PAGE_SHARED
;
113 protection_map
[15] = PAGE_SHARED
;
116 void __devinit
cpu_cache_init(void)
118 setup_protection_map();
121 void flush_icache_all(void)
123 __asm__
__volatile__(
124 "la r8, flush_icache_all\n"
125 "cache 0x10, [r8, 0]\n"
126 "nop\nnop\nnop\nnop\nnop\nnop\n"
130 void flush_dcache_all(void)
132 __asm__
__volatile__(
133 "la r8, flush_dcache_all\n"
134 "cache 0x1f, [r8, 0]\n"
135 "nop\nnop\nnop\nnop\nnop\nnop\n"
136 "cache 0x1a, [r8, 0]\n"
137 "nop\nnop\nnop\nnop\nnop\nnop\n"
141 void flush_cache_all(void)
143 __asm__
__volatile__(
144 "la r8, flush_cache_all\n"
145 "cache 0x10, [r8, 0]\n"
146 "nop\nnop\nnop\nnop\nnop\nnop\n"
147 "cache 0x1f, [r8, 0]\n"
148 "nop\nnop\nnop\nnop\nnop\nnop\n"
149 "cache 0x1a, [r8, 0]\n"
150 "nop\nnop\nnop\nnop\nnop\nnop\n"
154 void flush_cache_mm(struct mm_struct
*mm
)
161 /*if we flush a range precisely , the processing may be very long.
162 We must check each page in the range whether present. If the page is present,
163 we can flush the range in the page. Be careful, the range may be cross two
164 page, a page is present and another is not present.
167 The interface is provided in hopes that the port can find
168 a suitably efficient method for removing multiple page
169 sized regions from the cache.
171 void flush_cache_range(struct vm_area_struct
*vma
,
172 unsigned long start
, unsigned long end
)
174 struct mm_struct
*mm
= vma
->vm_mm
;
175 int exec
= vma
->vm_flags
& VM_EXEC
;
184 pgdp
= pgd_offset(mm
, start
);
185 pudp
= pud_offset(pgdp
, start
);
186 pmdp
= pmd_offset(pudp
, start
);
187 ptep
= pte_offset(pmdp
, start
);
189 while (start
<= end
) {
190 unsigned long tmpend
;
191 pgdp
= pgd_offset(mm
, start
);
192 pudp
= pud_offset(pgdp
, start
);
193 pmdp
= pmd_offset(pudp
, start
);
194 ptep
= pte_offset(pmdp
, start
);
196 if (!(pte_val(*ptep
) & _PAGE_PRESENT
)) {
197 start
= (start
+ PAGE_SIZE
) & ~(PAGE_SIZE
- 1);
200 tmpend
= (start
| (PAGE_SIZE
-1)) > end
?
201 end
: (start
| (PAGE_SIZE
-1));
203 flush_dcache_range(start
, tmpend
);
205 flush_icache_range(start
, tmpend
);
206 start
= (start
+ PAGE_SIZE
) & ~(PAGE_SIZE
- 1);
210 void flush_cache_page(struct vm_area_struct
*vma
,
211 unsigned long addr
, unsigned long pfn
)
213 int exec
= vma
->vm_flags
& VM_EXEC
;
214 unsigned long kaddr
= 0xa0000000 | (pfn
<< PAGE_SHIFT
);
216 flush_dcache_range(kaddr
, kaddr
+ PAGE_SIZE
);
219 flush_icache_range(kaddr
, kaddr
+ PAGE_SIZE
);
222 void flush_cache_sigtramp(unsigned long addr
)
224 __asm__
__volatile__(
225 "cache 0x02, [%0, 0]\n"
226 "nop\nnop\nnop\nnop\nnop\n"
227 "cache 0x02, [%0, 0x4]\n"
228 "nop\nnop\nnop\nnop\nnop\n"
230 "cache 0x0d, [%0, 0]\n"
231 "nop\nnop\nnop\nnop\nnop\n"
232 "cache 0x0d, [%0, 0x4]\n"
233 "nop\nnop\nnop\nnop\nnop\n"
235 "cache 0x1a, [%0, 0]\n"
236 "nop\nnop\nnop\nnop\nnop\n"
241 1. WB and invalid a cache line of Dcache
242 2. Drain Write Buffer
243 the range must be smaller than PAGE_SIZE
245 void flush_dcache_range(unsigned long start
, unsigned long end
)
249 start
= start
& ~(L1_CACHE_BYTES
- 1);
250 end
= end
& ~(L1_CACHE_BYTES
- 1);
252 /* flush dcache to ram, and invalidate dcache lines. */
253 for (i
= 0; i
< size
; i
+= L1_CACHE_BYTES
) {
254 __asm__
__volatile__(
255 "cache 0x0e, [%0, 0]\n"
256 "nop\nnop\nnop\nnop\nnop\n"
257 "cache 0x1a, [%0, 0]\n"
258 "nop\nnop\nnop\nnop\nnop\n"
260 start
+= L1_CACHE_BYTES
;
264 void flush_icache_range(unsigned long start
, unsigned long end
)
267 start
= start
& ~(L1_CACHE_BYTES
- 1);
268 end
= end
& ~(L1_CACHE_BYTES
- 1);
271 /* invalidate icache lines. */
272 for (i
= 0; i
< size
; i
+= L1_CACHE_BYTES
) {
273 __asm__
__volatile__(
274 "cache 0x02, [%0, 0]\n"
275 "nop\nnop\nnop\nnop\nnop\n"
277 start
+= L1_CACHE_BYTES
;