sh: Update SH-5 flush_cache_sigtramp() for API changes.
[linux-2.6/linux-loongson.git] / arch / sh / mm / ioremap_64.c
blobe27d165192354cda7d827b5a11ecac3207c9d802
1 /*
2 * arch/sh/mm/ioremap_64.c
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2003 - 2007 Paul Mundt
7 * Mostly derived from arch/sh/mm/ioremap.c which, in turn is mostly
8 * derived from arch/i386/mm/ioremap.c .
10 * (C) Copyright 1995 1996 Linus Torvalds
12 * This file is subject to the terms and conditions of the GNU General Public
13 * License. See the file "COPYING" in the main directory of this archive
14 * for more details.
16 #include <linux/vmalloc.h>
17 #include <linux/ioport.h>
18 #include <linux/module.h>
19 #include <linux/mm.h>
20 #include <linux/io.h>
21 #include <linux/bootmem.h>
22 #include <linux/proc_fs.h>
23 #include <asm/page.h>
24 #include <asm/pgalloc.h>
25 #include <asm/addrspace.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28 #include <asm/mmu.h>
30 static void shmedia_mapioaddr(unsigned long, unsigned long);
31 static unsigned long shmedia_ioremap(struct resource *, u32, int);
34 * Generic mapping function (not visible outside):
38 * Remap an arbitrary physical address space into the kernel virtual
39 * address space. Needed when the kernel wants to access high addresses
40 * directly.
42 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
43 * have to convert them into an offset in a page-aligned mapping, but the
44 * caller shouldn't need to know that small detail.
46 void *__ioremap(unsigned long phys_addr, unsigned long size,
47 unsigned long flags)
49 void * addr;
50 struct vm_struct * area;
51 unsigned long offset, last_addr;
52 pgprot_t pgprot;
54 /* Don't allow wraparound or zero size */
55 last_addr = phys_addr + size - 1;
56 if (!size || last_addr < phys_addr)
57 return NULL;
59 pgprot = __pgprot(_PAGE_PRESENT | _PAGE_READ |
60 _PAGE_WRITE | _PAGE_DIRTY |
61 _PAGE_ACCESSED | _PAGE_SHARED | flags);
64 * Mappings have to be page-aligned
66 offset = phys_addr & ~PAGE_MASK;
67 phys_addr &= PAGE_MASK;
68 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
71 * Ok, go for it..
73 area = get_vm_area(size, VM_IOREMAP);
74 pr_debug("Get vm_area returns %p addr %p\n",area,area->addr);
75 if (!area)
76 return NULL;
77 area->phys_addr = phys_addr;
78 addr = area->addr;
79 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
80 phys_addr, pgprot)) {
81 vunmap(addr);
82 return NULL;
84 return (void *) (offset + (char *)addr);
86 EXPORT_SYMBOL(__ioremap);
88 void __iounmap(void *addr)
90 struct vm_struct *area;
92 vfree((void *) (PAGE_MASK & (unsigned long) addr));
93 area = remove_vm_area((void *) (PAGE_MASK & (unsigned long) addr));
94 if (!area) {
95 printk(KERN_ERR "iounmap: bad address %p\n", addr);
96 return;
99 kfree(area);
101 EXPORT_SYMBOL(__iounmap);
103 static struct resource shmedia_iomap = {
104 .name = "shmedia_iomap",
105 .start = IOBASE_VADDR + PAGE_SIZE,
106 .end = IOBASE_END - 1,
109 static void shmedia_mapioaddr(unsigned long pa, unsigned long va);
110 static void shmedia_unmapioaddr(unsigned long vaddr);
111 static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz);
114 * We have the same problem as the SPARC, so lets have the same comment:
115 * Our mini-allocator...
116 * Boy this is gross! We need it because we must map I/O for
117 * timers and interrupt controller before the kmalloc is available.
120 #define XNMLN 15
121 #define XNRES 10
123 struct xresource {
124 struct resource xres; /* Must be first */
125 int xflag; /* 1 == used */
126 char xname[XNMLN+1];
129 static struct xresource xresv[XNRES];
131 static struct xresource *xres_alloc(void)
133 struct xresource *xrp;
134 int n;
136 xrp = xresv;
137 for (n = 0; n < XNRES; n++) {
138 if (xrp->xflag == 0) {
139 xrp->xflag = 1;
140 return xrp;
142 xrp++;
144 return NULL;
147 static void xres_free(struct xresource *xrp)
149 xrp->xflag = 0;
152 static struct resource *shmedia_find_resource(struct resource *root,
153 unsigned long vaddr)
155 struct resource *res;
157 for (res = root->child; res; res = res->sibling)
158 if (res->start <= vaddr && res->end >= vaddr)
159 return res;
161 return NULL;
164 static unsigned long shmedia_alloc_io(unsigned long phys, unsigned long size,
165 const char *name)
167 static int printed_full = 0;
168 struct xresource *xres;
169 struct resource *res;
170 char *tack;
171 int tlen;
173 if (name == NULL) name = "???";
175 if ((xres = xres_alloc()) != 0) {
176 tack = xres->xname;
177 res = &xres->xres;
178 } else {
179 if (!printed_full) {
180 printk("%s: done with statics, switching to kmalloc\n",
181 __FUNCTION__);
182 printed_full = 1;
184 tlen = strlen(name);
185 tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL);
186 if (!tack)
187 return -ENOMEM;
188 memset(tack, 0, sizeof(struct resource));
189 res = (struct resource *) tack;
190 tack += sizeof (struct resource);
193 strncpy(tack, name, XNMLN);
194 tack[XNMLN] = 0;
195 res->name = tack;
197 return shmedia_ioremap(res, phys, size);
200 static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz)
202 unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK);
203 unsigned long round_sz = (offset + sz + PAGE_SIZE-1) & PAGE_MASK;
204 unsigned long va;
205 unsigned int psz;
207 if (allocate_resource(&shmedia_iomap, res, round_sz,
208 shmedia_iomap.start, shmedia_iomap.end,
209 PAGE_SIZE, NULL, NULL) != 0) {
210 panic("alloc_io_res(%s): cannot occupy\n",
211 (res->name != NULL)? res->name: "???");
214 va = res->start;
215 pa &= PAGE_MASK;
217 psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE;
219 /* log at boot time ... */
220 printk("mapioaddr: %6s [%2d page%s] va 0x%08lx pa 0x%08x\n",
221 ((res->name != NULL) ? res->name : "???"),
222 psz, psz == 1 ? " " : "s", va, pa);
224 for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) {
225 shmedia_mapioaddr(pa, va);
226 va += PAGE_SIZE;
227 pa += PAGE_SIZE;
230 res->start += offset;
231 res->end = res->start + sz - 1; /* not strictly necessary.. */
233 return res->start;
236 static void shmedia_free_io(struct resource *res)
238 unsigned long len = res->end - res->start + 1;
240 BUG_ON((len & (PAGE_SIZE - 1)) != 0);
242 while (len) {
243 len -= PAGE_SIZE;
244 shmedia_unmapioaddr(res->start + len);
247 release_resource(res);
250 static __init_refok void *sh64_get_page(void)
252 extern int after_bootmem;
253 void *page;
255 if (after_bootmem) {
256 page = (void *)get_zeroed_page(GFP_ATOMIC);
257 } else {
258 page = alloc_bootmem_pages(PAGE_SIZE);
261 if (!page || ((unsigned long)page & ~PAGE_MASK))
262 panic("sh64_get_page: Out of memory already?\n");
264 return page;
267 static void shmedia_mapioaddr(unsigned long pa, unsigned long va)
269 pgd_t *pgdp;
270 pud_t *pudp;
271 pmd_t *pmdp;
272 pte_t *ptep, pte;
273 pgprot_t prot;
274 unsigned long flags = 1; /* 1 = CB0-1 device */
276 pr_debug("shmedia_mapiopage pa %08lx va %08lx\n", pa, va);
278 pgdp = pgd_offset_k(va);
279 if (pgd_none(*pgdp) || !pgd_present(*pgdp)) {
280 pudp = (pud_t *)sh64_get_page();
281 set_pgd(pgdp, __pgd((unsigned long)pudp | _KERNPG_TABLE));
284 pudp = pud_offset(pgdp, va);
285 if (pud_none(*pudp) || !pud_present(*pudp)) {
286 pmdp = (pmd_t *)sh64_get_page();
287 set_pud(pudp, __pud((unsigned long)pmdp | _KERNPG_TABLE));
290 pmdp = pmd_offset(pudp, va);
291 if (pmd_none(*pmdp) || !pmd_present(*pmdp) ) {
292 ptep = (pte_t *)sh64_get_page();
293 set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
296 prot = __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE |
297 _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SHARED | flags);
299 pte = pfn_pte(pa >> PAGE_SHIFT, prot);
300 ptep = pte_offset_kernel(pmdp, va);
302 if (!pte_none(*ptep) &&
303 pte_val(*ptep) != pte_val(pte))
304 pte_ERROR(*ptep);
306 set_pte(ptep, pte);
308 flush_tlb_kernel_range(va, PAGE_SIZE);
311 static void shmedia_unmapioaddr(unsigned long vaddr)
313 pgd_t *pgdp;
314 pud_t *pudp;
315 pmd_t *pmdp;
316 pte_t *ptep;
318 pgdp = pgd_offset_k(vaddr);
319 if (pgd_none(*pgdp) || pgd_bad(*pgdp))
320 return;
322 pudp = pud_offset(pgdp, vaddr);
323 if (pud_none(*pudp) || pud_bad(*pudp))
324 return;
326 pmdp = pmd_offset(pudp, vaddr);
327 if (pmd_none(*pmdp) || pmd_bad(*pmdp))
328 return;
330 ptep = pte_offset_kernel(pmdp, vaddr);
332 if (pte_none(*ptep) || !pte_present(*ptep))
333 return;
335 clear_page((void *)ptep);
336 pte_clear(&init_mm, vaddr, ptep);
339 unsigned long onchip_remap(unsigned long phys, unsigned long size, const char *name)
341 if (size < PAGE_SIZE)
342 size = PAGE_SIZE;
344 return shmedia_alloc_io(phys, size, name);
347 void onchip_unmap(unsigned long vaddr)
349 struct resource *res;
350 unsigned int psz;
352 res = shmedia_find_resource(&shmedia_iomap, vaddr);
353 if (!res) {
354 printk(KERN_ERR "%s: Failed to free 0x%08lx\n",
355 __FUNCTION__, vaddr);
356 return;
359 psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE;
361 printk(KERN_DEBUG "unmapioaddr: %6s [%2d page%s] freed\n",
362 res->name, psz, psz == 1 ? " " : "s");
364 shmedia_free_io(res);
366 if ((char *)res >= (char *)xresv &&
367 (char *)res < (char *)&xresv[XNRES]) {
368 xres_free((struct xresource *)res);
369 } else {
370 kfree(res);
374 #ifdef CONFIG_PROC_FS
375 static int
376 ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof,
377 void *data)
379 char *p = buf, *e = buf + length;
380 struct resource *r;
381 const char *nm;
383 for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) {
384 if (p + 32 >= e) /* Better than nothing */
385 break;
386 if ((nm = r->name) == 0) nm = "???";
387 p += sprintf(p, "%08lx-%08lx: %s\n",
388 (unsigned long)r->start,
389 (unsigned long)r->end, nm);
392 return p-buf;
394 #endif /* CONFIG_PROC_FS */
396 static int __init register_proc_onchip(void)
398 #ifdef CONFIG_PROC_FS
399 create_proc_read_entry("io_map",0,0, ioremap_proc_info, &shmedia_iomap);
400 #endif
401 return 0;
404 __initcall(register_proc_onchip);