nilfs2: fix oopses with doubly mounted snapshots
[linux-2.6/mini2440.git] / arch / sh / mm / ioremap_64.c
blob828c8597219da2b0e07fe9c73218b4d186521d92
1 /*
2 * arch/sh/mm/ioremap_64.c
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2003 - 2007 Paul Mundt
7 * Mostly derived from arch/sh/mm/ioremap.c which, in turn is mostly
8 * derived from arch/i386/mm/ioremap.c .
10 * (C) Copyright 1995 1996 Linus Torvalds
12 * This file is subject to the terms and conditions of the GNU General Public
13 * License. See the file "COPYING" in the main directory of this archive
14 * for more details.
16 #include <linux/vmalloc.h>
17 #include <linux/ioport.h>
18 #include <linux/module.h>
19 #include <linux/mm.h>
20 #include <linux/io.h>
21 #include <linux/bootmem.h>
22 #include <linux/proc_fs.h>
23 #include <linux/slab.h>
24 #include <asm/page.h>
25 #include <asm/pgalloc.h>
26 #include <asm/addrspace.h>
27 #include <asm/cacheflush.h>
28 #include <asm/tlbflush.h>
29 #include <asm/mmu.h>
31 static struct resource shmedia_iomap = {
32 .name = "shmedia_iomap",
33 .start = IOBASE_VADDR + PAGE_SIZE,
34 .end = IOBASE_END - 1,
37 static void shmedia_mapioaddr(unsigned long pa, unsigned long va,
38 unsigned long flags);
39 static void shmedia_unmapioaddr(unsigned long vaddr);
40 static void __iomem *shmedia_ioremap(struct resource *res, u32 pa,
41 int sz, unsigned long flags);
44 * We have the same problem as the SPARC, so lets have the same comment:
45 * Our mini-allocator...
46 * Boy this is gross! We need it because we must map I/O for
47 * timers and interrupt controller before the kmalloc is available.
50 #define XNMLN 15
51 #define XNRES 10
53 struct xresource {
54 struct resource xres; /* Must be first */
55 int xflag; /* 1 == used */
56 char xname[XNMLN+1];
59 static struct xresource xresv[XNRES];
61 static struct xresource *xres_alloc(void)
63 struct xresource *xrp;
64 int n;
66 xrp = xresv;
67 for (n = 0; n < XNRES; n++) {
68 if (xrp->xflag == 0) {
69 xrp->xflag = 1;
70 return xrp;
72 xrp++;
74 return NULL;
77 static void xres_free(struct xresource *xrp)
79 xrp->xflag = 0;
82 static struct resource *shmedia_find_resource(struct resource *root,
83 unsigned long vaddr)
85 struct resource *res;
87 for (res = root->child; res; res = res->sibling)
88 if (res->start <= vaddr && res->end >= vaddr)
89 return res;
91 return NULL;
94 static void __iomem *shmedia_alloc_io(unsigned long phys, unsigned long size,
95 const char *name, unsigned long flags)
97 static int printed_full;
98 struct xresource *xres;
99 struct resource *res;
100 char *tack;
101 int tlen;
103 if (name == NULL)
104 name = "???";
106 xres = xres_alloc();
107 if (xres != 0) {
108 tack = xres->xname;
109 res = &xres->xres;
110 } else {
111 if (!printed_full) {
112 printk(KERN_NOTICE "%s: done with statics, "
113 "switching to kmalloc\n", __func__);
114 printed_full = 1;
116 tlen = strlen(name);
117 tack = kmalloc(sizeof(struct resource) + tlen + 1, GFP_KERNEL);
118 if (!tack)
119 return NULL;
120 memset(tack, 0, sizeof(struct resource));
121 res = (struct resource *) tack;
122 tack += sizeof(struct resource);
125 strncpy(tack, name, XNMLN);
126 tack[XNMLN] = 0;
127 res->name = tack;
129 return shmedia_ioremap(res, phys, size, flags);
132 static void __iomem *shmedia_ioremap(struct resource *res, u32 pa, int sz,
133 unsigned long flags)
135 unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK);
136 unsigned long round_sz = (offset + sz + PAGE_SIZE-1) & PAGE_MASK;
137 unsigned long va;
138 unsigned int psz;
140 if (allocate_resource(&shmedia_iomap, res, round_sz,
141 shmedia_iomap.start, shmedia_iomap.end,
142 PAGE_SIZE, NULL, NULL) != 0) {
143 panic("alloc_io_res(%s): cannot occupy\n",
144 (res->name != NULL) ? res->name : "???");
147 va = res->start;
148 pa &= PAGE_MASK;
150 psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE;
152 for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) {
153 shmedia_mapioaddr(pa, va, flags);
154 va += PAGE_SIZE;
155 pa += PAGE_SIZE;
158 return (void __iomem *)(unsigned long)(res->start + offset);
161 static void shmedia_free_io(struct resource *res)
163 unsigned long len = res->end - res->start + 1;
165 BUG_ON((len & (PAGE_SIZE - 1)) != 0);
167 while (len) {
168 len -= PAGE_SIZE;
169 shmedia_unmapioaddr(res->start + len);
172 release_resource(res);
175 static __init_refok void *sh64_get_page(void)
177 void *page;
179 if (slab_is_available())
180 page = (void *)get_zeroed_page(GFP_KERNEL);
181 else
182 page = alloc_bootmem_pages(PAGE_SIZE);
184 if (!page || ((unsigned long)page & ~PAGE_MASK))
185 panic("sh64_get_page: Out of memory already?\n");
187 return page;
190 static void shmedia_mapioaddr(unsigned long pa, unsigned long va,
191 unsigned long flags)
193 pgd_t *pgdp;
194 pud_t *pudp;
195 pmd_t *pmdp;
196 pte_t *ptep, pte;
197 pgprot_t prot;
199 pr_debug("shmedia_mapiopage pa %08lx va %08lx\n", pa, va);
201 if (!flags)
202 flags = 1; /* 1 = CB0-1 device */
204 pgdp = pgd_offset_k(va);
205 if (pgd_none(*pgdp) || !pgd_present(*pgdp)) {
206 pudp = (pud_t *)sh64_get_page();
207 set_pgd(pgdp, __pgd((unsigned long)pudp | _KERNPG_TABLE));
210 pudp = pud_offset(pgdp, va);
211 if (pud_none(*pudp) || !pud_present(*pudp)) {
212 pmdp = (pmd_t *)sh64_get_page();
213 set_pud(pudp, __pud((unsigned long)pmdp | _KERNPG_TABLE));
216 pmdp = pmd_offset(pudp, va);
217 if (pmd_none(*pmdp) || !pmd_present(*pmdp)) {
218 ptep = (pte_t *)sh64_get_page();
219 set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE));
222 prot = __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE |
223 _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SHARED | flags);
225 pte = pfn_pte(pa >> PAGE_SHIFT, prot);
226 ptep = pte_offset_kernel(pmdp, va);
228 if (!pte_none(*ptep) &&
229 pte_val(*ptep) != pte_val(pte))
230 pte_ERROR(*ptep);
232 set_pte(ptep, pte);
234 flush_tlb_kernel_range(va, PAGE_SIZE);
237 static void shmedia_unmapioaddr(unsigned long vaddr)
239 pgd_t *pgdp;
240 pud_t *pudp;
241 pmd_t *pmdp;
242 pte_t *ptep;
244 pgdp = pgd_offset_k(vaddr);
245 if (pgd_none(*pgdp) || pgd_bad(*pgdp))
246 return;
248 pudp = pud_offset(pgdp, vaddr);
249 if (pud_none(*pudp) || pud_bad(*pudp))
250 return;
252 pmdp = pmd_offset(pudp, vaddr);
253 if (pmd_none(*pmdp) || pmd_bad(*pmdp))
254 return;
256 ptep = pte_offset_kernel(pmdp, vaddr);
258 if (pte_none(*ptep) || !pte_present(*ptep))
259 return;
261 clear_page((void *)ptep);
262 pte_clear(&init_mm, vaddr, ptep);
265 void __iomem *__ioremap(unsigned long offset, unsigned long size,
266 unsigned long flags)
268 char name[14];
270 sprintf(name, "phys_%08x", (u32)offset);
271 return shmedia_alloc_io(offset, size, name, flags);
273 EXPORT_SYMBOL(__ioremap);
275 void __iounmap(void __iomem *virtual)
277 unsigned long vaddr = (unsigned long)virtual & PAGE_MASK;
278 struct resource *res;
279 unsigned int psz;
281 res = shmedia_find_resource(&shmedia_iomap, vaddr);
282 if (!res) {
283 printk(KERN_ERR "%s: Failed to free 0x%08lx\n",
284 __func__, vaddr);
285 return;
288 psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE;
290 shmedia_free_io(res);
292 if ((char *)res >= (char *)xresv &&
293 (char *)res < (char *)&xresv[XNRES]) {
294 xres_free((struct xresource *)res);
295 } else {
296 kfree(res);
299 EXPORT_SYMBOL(__iounmap);
301 static int
302 ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof,
303 void *data)
305 char *p = buf, *e = buf + length;
306 struct resource *r;
307 const char *nm;
309 for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) {
310 if (p + 32 >= e) /* Better than nothing */
311 break;
312 nm = r->name;
313 if (nm == NULL)
314 nm = "???";
316 p += sprintf(p, "%08lx-%08lx: %s\n",
317 (unsigned long)r->start,
318 (unsigned long)r->end, nm);
321 return p-buf;
324 static int __init register_proc_onchip(void)
326 create_proc_read_entry("io_map", 0, 0, ioremap_proc_info,
327 &shmedia_iomap);
328 return 0;
330 late_initcall(register_proc_onchip);