SCSI: Silencing 'killing requests for dead queue'
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / sh / mm / cache.c
bloba2dc7f9ecc514b02c22ed8018cb419fd030fb210
1 /*
2 * arch/sh/mm/cache.c
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2002 - 2009 Paul Mundt
7 * Released under the terms of the GNU GPL v2.0.
8 */
9 #include <linux/mm.h>
10 #include <linux/init.h>
11 #include <linux/mutex.h>
12 #include <linux/fs.h>
13 #include <linux/smp.h>
14 #include <linux/highmem.h>
15 #include <linux/module.h>
16 #include <asm/mmu_context.h>
17 #include <asm/cacheflush.h>
19 void (*local_flush_cache_all)(void *args) = cache_noop;
20 void (*local_flush_cache_mm)(void *args) = cache_noop;
21 void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
22 void (*local_flush_cache_page)(void *args) = cache_noop;
23 void (*local_flush_cache_range)(void *args) = cache_noop;
24 void (*local_flush_dcache_page)(void *args) = cache_noop;
25 void (*local_flush_icache_range)(void *args) = cache_noop;
26 void (*local_flush_icache_page)(void *args) = cache_noop;
27 void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
29 void (*__flush_wback_region)(void *start, int size);
30 void (*__flush_purge_region)(void *start, int size);
31 void (*__flush_invalidate_region)(void *start, int size);
33 static inline void noop__flush_region(void *start, int size)
37 static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
38 int wait)
40 preempt_disable();
41 smp_call_function(func, info, wait);
42 func(info);
43 preempt_enable();
46 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
47 unsigned long vaddr, void *dst, const void *src,
48 unsigned long len)
50 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
51 !test_bit(PG_dcache_dirty, &page->flags)) {
52 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
53 memcpy(vto, src, len);
54 kunmap_coherent(vto);
55 } else {
56 memcpy(dst, src, len);
57 if (boot_cpu_data.dcache.n_aliases)
58 set_bit(PG_dcache_dirty, &page->flags);
61 if (vma->vm_flags & VM_EXEC)
62 flush_cache_page(vma, vaddr, page_to_pfn(page));
65 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
66 unsigned long vaddr, void *dst, const void *src,
67 unsigned long len)
69 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
70 !test_bit(PG_dcache_dirty, &page->flags)) {
71 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
72 memcpy(dst, vfrom, len);
73 kunmap_coherent(vfrom);
74 } else {
75 memcpy(dst, src, len);
76 if (boot_cpu_data.dcache.n_aliases)
77 set_bit(PG_dcache_dirty, &page->flags);
81 void copy_user_highpage(struct page *to, struct page *from,
82 unsigned long vaddr, struct vm_area_struct *vma)
84 void *vfrom, *vto;
86 vto = kmap_atomic(to, KM_USER1);
88 if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
89 !test_bit(PG_dcache_dirty, &from->flags)) {
90 vfrom = kmap_coherent(from, vaddr);
91 copy_page(vto, vfrom);
92 kunmap_coherent(vfrom);
93 } else {
94 vfrom = kmap_atomic(from, KM_USER0);
95 copy_page(vto, vfrom);
96 kunmap_atomic(vfrom, KM_USER0);
99 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
100 __flush_purge_region(vto, PAGE_SIZE);
102 kunmap_atomic(vto, KM_USER1);
103 /* Make sure this page is cleared on other CPU's too before using it */
104 smp_wmb();
106 EXPORT_SYMBOL(copy_user_highpage);
108 void clear_user_highpage(struct page *page, unsigned long vaddr)
110 void *kaddr = kmap_atomic(page, KM_USER0);
112 clear_page(kaddr);
114 if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
115 __flush_purge_region(kaddr, PAGE_SIZE);
117 kunmap_atomic(kaddr, KM_USER0);
119 EXPORT_SYMBOL(clear_user_highpage);
121 void __update_cache(struct vm_area_struct *vma,
122 unsigned long address, pte_t pte)
124 struct page *page;
125 unsigned long pfn = pte_pfn(pte);
127 if (!boot_cpu_data.dcache.n_aliases)
128 return;
130 page = pfn_to_page(pfn);
131 if (pfn_valid(pfn)) {
132 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
133 if (dirty) {
134 unsigned long addr = (unsigned long)page_address(page);
136 if (pages_do_alias(addr, address & PAGE_MASK))
137 __flush_purge_region((void *)addr, PAGE_SIZE);
142 void __flush_anon_page(struct page *page, unsigned long vmaddr)
144 unsigned long addr = (unsigned long) page_address(page);
146 if (pages_do_alias(addr, vmaddr)) {
147 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
148 !test_bit(PG_dcache_dirty, &page->flags)) {
149 void *kaddr;
151 kaddr = kmap_coherent(page, vmaddr);
152 /* XXX.. For now kunmap_coherent() does a purge */
153 /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
154 kunmap_coherent(kaddr);
155 } else
156 __flush_purge_region((void *)addr, PAGE_SIZE);
160 void flush_cache_all(void)
162 cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
165 void flush_cache_mm(struct mm_struct *mm)
167 cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
170 void flush_cache_dup_mm(struct mm_struct *mm)
172 cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
175 void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
176 unsigned long pfn)
178 struct flusher_data data;
180 data.vma = vma;
181 data.addr1 = addr;
182 data.addr2 = pfn;
184 cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
187 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
188 unsigned long end)
190 struct flusher_data data;
192 data.vma = vma;
193 data.addr1 = start;
194 data.addr2 = end;
196 cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
199 void flush_dcache_page(struct page *page)
201 cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
204 void flush_icache_range(unsigned long start, unsigned long end)
206 struct flusher_data data;
208 data.vma = NULL;
209 data.addr1 = start;
210 data.addr2 = end;
212 cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
215 void flush_icache_page(struct vm_area_struct *vma, struct page *page)
217 /* Nothing uses the VMA, so just pass the struct page along */
218 cacheop_on_each_cpu(local_flush_icache_page, page, 1);
221 void flush_cache_sigtramp(unsigned long address)
223 cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
226 static void compute_alias(struct cache_info *c)
228 c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
229 c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
232 static void __init emit_cache_params(void)
234 printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
235 boot_cpu_data.icache.ways,
236 boot_cpu_data.icache.sets,
237 boot_cpu_data.icache.way_incr);
238 printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
239 boot_cpu_data.icache.entry_mask,
240 boot_cpu_data.icache.alias_mask,
241 boot_cpu_data.icache.n_aliases);
242 printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
243 boot_cpu_data.dcache.ways,
244 boot_cpu_data.dcache.sets,
245 boot_cpu_data.dcache.way_incr);
246 printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
247 boot_cpu_data.dcache.entry_mask,
248 boot_cpu_data.dcache.alias_mask,
249 boot_cpu_data.dcache.n_aliases);
252 * Emit Secondary Cache parameters if the CPU has a probed L2.
254 if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
255 printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
256 boot_cpu_data.scache.ways,
257 boot_cpu_data.scache.sets,
258 boot_cpu_data.scache.way_incr);
259 printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
260 boot_cpu_data.scache.entry_mask,
261 boot_cpu_data.scache.alias_mask,
262 boot_cpu_data.scache.n_aliases);
266 void __init cpu_cache_init(void)
268 unsigned int cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
270 compute_alias(&boot_cpu_data.icache);
271 compute_alias(&boot_cpu_data.dcache);
272 compute_alias(&boot_cpu_data.scache);
274 __flush_wback_region = noop__flush_region;
275 __flush_purge_region = noop__flush_region;
276 __flush_invalidate_region = noop__flush_region;
279 * No flushing is necessary in the disabled cache case so we can
280 * just keep the noop functions in local_flush_..() and __flush_..()
282 if (unlikely(cache_disabled))
283 goto skip;
285 if (boot_cpu_data.family == CPU_FAMILY_SH2) {
286 extern void __weak sh2_cache_init(void);
288 sh2_cache_init();
291 if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
292 extern void __weak sh2a_cache_init(void);
294 sh2a_cache_init();
297 if (boot_cpu_data.family == CPU_FAMILY_SH3) {
298 extern void __weak sh3_cache_init(void);
300 sh3_cache_init();
302 if ((boot_cpu_data.type == CPU_SH7705) &&
303 (boot_cpu_data.dcache.sets == 512)) {
304 extern void __weak sh7705_cache_init(void);
306 sh7705_cache_init();
310 if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
311 (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
312 (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
313 extern void __weak sh4_cache_init(void);
315 sh4_cache_init();
318 if (boot_cpu_data.family == CPU_FAMILY_SH5) {
319 extern void __weak sh5_cache_init(void);
321 sh5_cache_init();
324 skip:
325 emit_cache_params();