MN10300: Allow some cacheflushes to be avoided if cache snooping is available
[linux-2.6/x86.git] / arch / mn10300 / mm / cache-flush-icache.c
blob0e471e1cb2dac2700f063f4082ce5c3266cf14dc
1 /* Flush dcache and invalidate icache when the dcache is in writeback mode
3 * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
11 #include <linux/module.h>
12 #include <linux/mm.h>
13 #include <asm/cacheflush.h>
14 /**
15 * flush_icache_page - Flush a page from the dcache and invalidate the icache
16 * @vma: The VMA the page is part of.
17 * @page: The page to be flushed.
19 * Write a page back from the dcache and invalidate the icache so that we can
20 * run code from it that we've just written into it
22 void flush_icache_page(struct vm_area_struct *vma, struct page *page)
24 unsigned long start = page_to_phys(page);
26 mn10300_dcache_flush_page(start);
27 mn10300_icache_inv_page(start);
29 EXPORT_SYMBOL(flush_icache_page);
31 /**
32 * flush_icache_page_range - Flush dcache and invalidate icache for part of a
33 * single page
34 * @start: The starting virtual address of the page part.
35 * @end: The ending virtual address of the page part.
37 * Flush the dcache and invalidate the icache for part of a single page, as
38 * determined by the virtual addresses given. The page must be in the paged
39 * area.
41 static void flush_icache_page_range(unsigned long start, unsigned long end)
43 unsigned long addr, size, off;
44 struct page *page;
45 pgd_t *pgd;
46 pud_t *pud;
47 pmd_t *pmd;
48 pte_t *ppte, pte;
50 /* work out how much of the page to flush */
51 off = start & ~PAGE_MASK;
52 size = end - start;
54 /* get the physical address the page is mapped to from the page
55 * tables */
56 pgd = pgd_offset(current->mm, start);
57 if (!pgd || !pgd_val(*pgd))
58 return;
60 pud = pud_offset(pgd, start);
61 if (!pud || !pud_val(*pud))
62 return;
64 pmd = pmd_offset(pud, start);
65 if (!pmd || !pmd_val(*pmd))
66 return;
68 ppte = pte_offset_map(pmd, start);
69 if (!ppte)
70 return;
71 pte = *ppte;
72 pte_unmap(ppte);
74 if (pte_none(pte))
75 return;
77 page = pte_page(pte);
78 if (!page)
79 return;
81 addr = page_to_phys(page);
83 /* flush the dcache and invalidate the icache coverage on that
84 * region */
85 mn10300_dcache_flush_range2(addr + off, size);
86 mn10300_icache_inv_range2(addr + off, size);
89 /**
90 * flush_icache_range - Globally flush dcache and invalidate icache for region
91 * @start: The starting virtual address of the region.
92 * @end: The ending virtual address of the region.
94 * This is used by the kernel to globally flush some code it has just written
95 * from the dcache back to RAM and then to globally invalidate the icache over
96 * that region so that that code can be run on all CPUs in the system.
98 void flush_icache_range(unsigned long start, unsigned long end)
100 unsigned long start_page, end_page;
102 if (end > 0x80000000UL) {
103 /* addresses above 0xa0000000 do not go through the cache */
104 if (end > 0xa0000000UL) {
105 end = 0xa0000000UL;
106 if (start >= end)
107 return;
110 /* kernel addresses between 0x80000000 and 0x9fffffff do not
111 * require page tables, so we just map such addresses
112 * directly */
113 start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
114 mn10300_dcache_flush_range(start_page, end);
115 mn10300_icache_inv_range(start_page, end);
116 if (start_page == start)
117 return;
118 end = start_page;
121 start_page = start & PAGE_MASK;
122 end_page = end & PAGE_MASK;
124 if (start_page == end_page) {
125 /* the first and last bytes are on the same page */
126 flush_icache_page_range(start, end);
127 } else if (start_page + 1 == end_page) {
128 /* split over two virtually contiguous pages */
129 flush_icache_page_range(start, end_page);
130 flush_icache_page_range(end_page, end);
131 } else {
132 /* more than 2 pages; just flush the entire cache */
133 mn10300_dcache_flush();
134 mn10300_icache_inv();
137 EXPORT_SYMBOL(flush_icache_range);