MOXA linux-2.6.x / linux-2.6.19-uc1 from UC-7110-LX-BOOTLOADER-1.9_VERSION-4.2.tgz
[linux-2.6.19-moxart.git] / arch / nios2nommu / mm / memory.c
blobb2ffdf16d148f1749bd72ceb1184912a790ec7ef
1 /*
2 * linux/arch/nio2nommu/mm/memory.c
4 * Copyright (C) 1995 Hamish Macdonald
5 * Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>,
6 * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
7 * Copyright (C) 2004 Microtronix Datacom Ltd.
9 * Based on:
11 * linux/arch/m68k/mm/memory.c
13 * All rights reserved.
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
23 * NON INFRINGEMENT. See the GNU General Public License for more
24 * details.
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32 #include <linux/mm.h>
33 #include <linux/kernel.h>
34 #include <linux/string.h>
35 #include <linux/types.h>
36 #include <linux/slab.h>
38 #include <asm/setup.h>
39 #include <asm/segment.h>
40 #include <asm/page.h>
41 #include <asm/pgtable.h>
42 #include <asm/system.h>
43 #include <asm/traps.h>
44 #include <asm/io.h>
47 * cache_clear() semantics: Clear any cache entries for the area in question,
48 * without writing back dirty entries first. This is useful if the data will
49 * be overwritten anyway, e.g. by DMA to memory. The range is defined by a
50 * _physical_ address.
53 void cache_clear (unsigned long paddr, int len)
59 * Define cache invalidate functions. The instruction and data cache
60 * will need to be flushed. Write back the dirty data cache and invalidate
61 * the instruction cache for the range.
65 static __inline__ void cache_invalidate_inst(unsigned long paddr, int len)
67 unsigned long sset, eset;
69 sset = (paddr & (nasys_icache_size - 1)) & (~(nasys_icache_line_size - 1));
70 eset = (((paddr & (nasys_icache_size - 1)) + len) & (~(nasys_icache_line_size - 1))) + nasys_icache_line_size;
72 __asm__ __volatile__ (
73 "1:\n\t"
74 "flushi %0\n\t"
75 "add %0,%0,%2\n\t"
76 "blt %0,%1,1b\n\t"
77 "flushp\n\t"
78 : : "r" (sset), "r" (eset), "r" (nasys_icache_line_size));
82 static __inline__ void cache_invalidate_data(unsigned long paddr, int len)
84 unsigned long sset, eset;
86 sset = (paddr & (nasys_dcache_size - 1)) & (~(nasys_dcache_line_size - 1));
87 eset = (((paddr & (nasys_dcache_size - 1)) + len) & (~(nasys_dcache_line_size - 1))) + nasys_dcache_line_size;
89 __asm__ __volatile__ (
90 "1:\n\t"
91 "flushd 0(%0)\n\t"
92 "add %0,%0,%2\n\t"
93 "blt %0,%1,1b\n\t"
94 : : "r" (sset),"r" (eset), "r" (nasys_dcache_line_size));
98 static __inline__ void cache_invalidate_lines(unsigned long paddr, int len)
100 unsigned long sset, eset;
102 sset = (paddr & (nasys_dcache_size - 1)) & (~(nasys_dcache_line_size - 1));
103 eset = (((paddr & (nasys_dcache_size - 1)) + len) & (~(nasys_dcache_line_size - 1))) + nasys_dcache_line_size;
105 __asm__ __volatile__ (
106 "1:\n\t"
107 "flushd 0(%0)\n\t"
108 "add %0,%0,%2\n\t"
109 "blt %0,%1,1b\n\t"
110 : : "r" (sset),"r" (eset), "r" (nasys_dcache_line_size));
112 sset = (paddr & (nasys_icache_size - 1)) & (~(nasys_icache_line_size - 1));
113 eset = (((paddr & (nasys_icache_size - 1)) + len) & (~(nasys_icache_line_size - 1))) + nasys_icache_line_size;
115 __asm__ __volatile__ (
116 "1:\n\t"
117 "flushi %0\n\t"
118 "add %0,%0,%2\n\t"
119 "blt %0,%1,1b\n\t"
120 "flushp\n\t"
121 : : "r" (sset), "r" (eset), "r" (nasys_icache_line_size));
126 * cache_push() semantics: Write back any dirty cache data in the given area,
127 * and invalidate the range in the instruction cache. It needs not (but may)
128 * invalidate those entries also in the data cache. The range is defined by a
129 * _physical_ address.
132 void cache_push (unsigned long paddr, int len)
134 cache_invalidate_lines(paddr, len);
139 * cache_push_v() semantics: Write back any dirty cache data in the given
140 * area, and invalidate those entries at least in the instruction cache. This
141 * is intended to be used after data has been written that can be executed as
142 * code later. The range is defined by a _user_mode_ _virtual_ address.
145 void cache_push_v (unsigned long vaddr, int len)
147 cache_invalidate_lines(vaddr, len);
151 * cache_push_all() semantics: Invalidate instruction cache and write back
152 * dirty data cache & invalidate.
154 void cache_push_all (void)
156 __asm__ __volatile__ (
157 "1:\n\t"
158 "flushd 0(%0)\n\t"
159 "sub %0,%0,%1\n\t"
160 "bgt %0,r0,1b\n\t"
161 : : "r" (nasys_dcache_size), "r" (nasys_dcache_line_size));
163 __asm__ __volatile__ (
164 "1:\n\t"
165 "flushi %0\n\t"
166 "sub %0,%0,%1\n\t"
167 "bgt %0,r0,1b\n\t"
168 "flushp\n\t"
169 : : "r" (nasys_icache_size), "r" (nasys_icache_line_size));
174 * dcache_push() semantics: Write back and dirty data cache and invalidate
175 * the range.
177 void dcache_push (unsigned long vaddr, int len)
179 cache_invalidate_data(vaddr, len);
183 * icache_push() semantics: Invalidate instruction cache in the range.
185 void icache_push (unsigned long vaddr, int len)
187 cache_invalidate_inst(vaddr, len);
190 /* Map some physical address range into the kernel address space. The
191 * code is copied and adapted from map_chunk().
194 unsigned long kernel_map(unsigned long paddr, unsigned long size,
195 int nocacheflag, unsigned long *memavailp )
197 return paddr;
201 int is_in_rom(unsigned long addr)
203 extern unsigned long _ramstart, _ramend;
206 * What we are really trying to do is determine if addr is
207 * in an allocated kernel memory region. If not then assume
208 * we cannot free it or otherwise de-allocate it. Ideally
209 * we could restrict this to really being in a ROM or flash,
210 * but that would need to be done on a board by board basis,
211 * not globally.
213 if ((addr < _ramstart) || (addr >= _ramend))
214 return(1);
216 /* Default case, not in ROM */
217 return(0);
220 int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
221 unsigned long address, int write_access)
223 BUG();
224 return VM_FAULT_OOM;