Import 2.3.40pre5
[davej-history.git] / arch / i386 / mm / ioremap.c
blobcb44276729cded976f4112d45e9efe6d43e0f229
1 /*
2 * arch/i386/mm/ioremap.c
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
8 * (C) Copyright 1995 1996 Linus Torvalds
9 */
11 #include <linux/vmalloc.h>
12 #include <asm/io.h>
13 #include <asm/pgalloc.h>
15 static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
16 unsigned long phys_addr, unsigned long flags)
18 unsigned long end;
20 address &= ~PMD_MASK;
21 end = address + size;
22 if (end > PMD_SIZE)
23 end = PMD_SIZE;
24 if (address >= end)
25 BUG();
26 do {
27 if (!pte_none(*pte)) {
28 printk("remap_area_pte: page already exists\n");
29 BUG();
31 set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_RW |
32 _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
33 address += PAGE_SIZE;
34 phys_addr += PAGE_SIZE;
35 pte++;
36 } while (address && (address < end));
39 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
40 unsigned long phys_addr, unsigned long flags)
42 unsigned long end;
44 address &= ~PGDIR_MASK;
45 end = address + size;
46 if (end > PGDIR_SIZE)
47 end = PGDIR_SIZE;
48 phys_addr -= address;
49 if (address >= end)
50 BUG();
51 do {
52 pte_t * pte = pte_alloc_kernel(pmd, address);
53 if (!pte)
54 return -ENOMEM;
55 remap_area_pte(pte, address, end - address, address + phys_addr, flags);
56 address = (address + PMD_SIZE) & PMD_MASK;
57 pmd++;
58 } while (address && (address < end));
59 return 0;
62 static int remap_area_pages(unsigned long address, unsigned long phys_addr,
63 unsigned long size, unsigned long flags)
65 pgd_t * dir;
66 unsigned long end = address + size;
68 phys_addr -= address;
69 dir = pgd_offset(&init_mm, address);
70 flush_cache_all();
71 if (address >= end)
72 BUG();
73 do {
74 pmd_t *pmd;
75 pmd = pmd_alloc_kernel(dir, address);
76 if (!pmd)
77 return -ENOMEM;
78 if (remap_area_pmd(pmd, address, end - address,
79 phys_addr + address, flags))
80 return -ENOMEM;
81 set_pgdir(address, *dir);
82 address = (address + PGDIR_SIZE) & PGDIR_MASK;
83 dir++;
84 } while (address && (address < end));
85 flush_tlb_all();
86 return 0;
90 * Generic mapping function (not visible outside):
94 * Remap an arbitrary physical address space into the kernel virtual
95 * address space. Needed when the kernel wants to access high addresses
96 * directly.
98 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
99 * have to convert them into an offset in a page-aligned mapping, but the
100 * caller shouldn't need to know that small detail.
102 void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
104 void * addr;
105 struct vm_struct * area;
106 unsigned long offset, last_addr;
108 /* Don't allow wraparound or zero size */
109 last_addr = phys_addr + size - 1;
110 if (!size || last_addr < phys_addr)
111 return NULL;
114 * Don't remap the low PCI/ISA area, it's always mapped..
116 if (phys_addr >= 0xA0000 && last_addr < 0x100000)
117 return phys_to_virt(phys_addr);
120 * Don't allow anybody to remap normal RAM that we're using..
122 if (phys_addr < virt_to_phys(high_memory)) {
123 char *t_addr, *t_end;
124 int i;
126 t_addr = __va(phys_addr);
127 t_end = t_addr + (size - 1);
129 for(i = MAP_NR(t_addr); i < MAP_NR(t_end); i++) {
130 if(!PageReserved(mem_map + i))
131 return NULL;
136 * Mappings have to be page-aligned
138 offset = phys_addr & ~PAGE_MASK;
139 phys_addr &= PAGE_MASK;
140 size = PAGE_ALIGN(last_addr) - phys_addr;
143 * Ok, go for it..
145 area = get_vm_area(size, VM_IOREMAP);
146 if (!area)
147 return NULL;
148 addr = area->addr;
149 if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) {
150 vfree(addr);
151 return NULL;
153 return (void *) (offset + (char *)addr);
156 void iounmap(void *addr)
158 if (addr > high_memory)
159 return vfree((void *) (PAGE_MASK & (unsigned long) addr));