2 * linux/arch/nio2nommu/mm/memory.c
4 * Copyright (C) 1995 Hamish Macdonald
5 * Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>,
6 * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
7 * Copyright (C) 2004 Microtronix Datacom Ltd.
11 * linux/arch/m68k/mm/memory.c
13 * All rights reserved.
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
23 * NON INFRINGEMENT. See the GNU General Public License for more
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 #include <linux/kernel.h>
34 #include <linux/string.h>
35 #include <linux/types.h>
36 #include <linux/slab.h>
38 #include <asm/setup.h>
39 #include <asm/segment.h>
41 #include <asm/pgtable.h>
42 #include <asm/system.h>
43 #include <asm/traps.h>
47 * cache_clear() semantics: Clear any cache entries for the area in question,
48 * without writing back dirty entries first. This is useful if the data will
49 * be overwritten anyway, e.g. by DMA to memory. The range is defined by a
53 void cache_clear (unsigned long paddr
, int len
)
59 * Define cache invalidate functions. The instruction and data cache
60 * will need to be flushed. Write back the dirty data cache and invalidate
61 * the instruction cache for the range.
65 static __inline__
void cache_invalidate_inst(unsigned long paddr
, int len
)
67 unsigned long sset
, eset
;
69 sset
= (paddr
& (nasys_icache_size
- 1)) & (~(nasys_icache_line_size
- 1));
70 eset
= (((paddr
& (nasys_icache_size
- 1)) + len
) & (~(nasys_icache_line_size
- 1))) + nasys_icache_line_size
;
72 __asm__
__volatile__ (
78 : : "r" (sset
), "r" (eset
), "r" (nasys_icache_line_size
));
82 static __inline__
void cache_invalidate_data(unsigned long paddr
, int len
)
84 unsigned long sset
, eset
;
86 sset
= (paddr
& (nasys_dcache_size
- 1)) & (~(nasys_dcache_line_size
- 1));
87 eset
= (((paddr
& (nasys_dcache_size
- 1)) + len
) & (~(nasys_dcache_line_size
- 1))) + nasys_dcache_line_size
;
89 __asm__
__volatile__ (
94 : : "r" (sset
),"r" (eset
), "r" (nasys_dcache_line_size
));
98 static __inline__
void cache_invalidate_lines(unsigned long paddr
, int len
)
100 unsigned long sset
, eset
;
102 sset
= (paddr
& (nasys_dcache_size
- 1)) & (~(nasys_dcache_line_size
- 1));
103 eset
= (((paddr
& (nasys_dcache_size
- 1)) + len
) & (~(nasys_dcache_line_size
- 1))) + nasys_dcache_line_size
;
105 __asm__
__volatile__ (
110 : : "r" (sset
),"r" (eset
), "r" (nasys_dcache_line_size
));
112 sset
= (paddr
& (nasys_icache_size
- 1)) & (~(nasys_icache_line_size
- 1));
113 eset
= (((paddr
& (nasys_icache_size
- 1)) + len
) & (~(nasys_icache_line_size
- 1))) + nasys_icache_line_size
;
115 __asm__
__volatile__ (
121 : : "r" (sset
), "r" (eset
), "r" (nasys_icache_line_size
));
126 * cache_push() semantics: Write back any dirty cache data in the given area,
127 * and invalidate the range in the instruction cache. It needs not (but may)
128 * invalidate those entries also in the data cache. The range is defined by a
129 * _physical_ address.
132 void cache_push (unsigned long paddr
, int len
)
134 cache_invalidate_lines(paddr
, len
);
139 * cache_push_v() semantics: Write back any dirty cache data in the given
140 * area, and invalidate those entries at least in the instruction cache. This
141 * is intended to be used after data has been written that can be executed as
142 * code later. The range is defined by a _user_mode_ _virtual_ address.
145 void cache_push_v (unsigned long vaddr
, int len
)
147 cache_invalidate_lines(vaddr
, len
);
151 * cache_push_all() semantics: Invalidate instruction cache and write back
152 * dirty data cache & invalidate.
154 void cache_push_all (void)
156 __asm__
__volatile__ (
161 : : "r" (nasys_dcache_size
), "r" (nasys_dcache_line_size
));
163 __asm__
__volatile__ (
169 : : "r" (nasys_icache_size
), "r" (nasys_icache_line_size
));
174 * dcache_push() semantics: Write back and dirty data cache and invalidate
177 void dcache_push (unsigned long vaddr
, int len
)
179 cache_invalidate_data(vaddr
, len
);
183 * icache_push() semantics: Invalidate instruction cache in the range.
185 void icache_push (unsigned long vaddr
, int len
)
187 cache_invalidate_inst(vaddr
, len
);
190 /* Map some physical address range into the kernel address space. The
191 * code is copied and adapted from map_chunk().
194 unsigned long kernel_map(unsigned long paddr
, unsigned long size
,
195 int nocacheflag
, unsigned long *memavailp
)
201 int is_in_rom(unsigned long addr
)
203 extern unsigned long _ramstart
, _ramend
;
206 * What we are really trying to do is determine if addr is
207 * in an allocated kernel memory region. If not then assume
208 * we cannot free it or otherwise de-allocate it. Ideally
209 * we could restrict this to really being in a ROM or flash,
210 * but that would need to be done on a board by board basis,
213 if ((addr
< _ramstart
) || (addr
>= _ramend
))
216 /* Default case, not in ROM */
220 int __handle_mm_fault(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
221 unsigned long address
, int write_access
)