drm: Define struct page and use it everywhere
[dragonfly.git] / sys / dev / drm / linux_iomapping.c
blob64b27791a77577f9c17d160029f3bc395b6b9a15
1 /*
2 * Copyright (c) 2014-2016 François Tigeot
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <machine/pmap.h>
28 #include <vm/pmap.h>
29 #include <vm/vm.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/bug.h>
34 #include <asm/page.h>
35 #include <asm/io.h>
37 SLIST_HEAD(iomap_list_head, iomap) iomap_list = SLIST_HEAD_INITIALIZER(iomap_list);
39 void __iomem *
40 __ioremap_common(unsigned long phys_addr, unsigned long size, int cache_mode)
42 struct iomap *imp;
44 /* Ensure mappings are page-aligned */
45 BUG_ON(phys_addr & PAGE_MASK);
46 BUG_ON(size & PAGE_MASK);
48 imp = kmalloc(sizeof(struct iomap), M_DRM, M_WAITOK);
49 imp->paddr = phys_addr;
50 imp->npages = size / PAGE_SIZE;
51 imp->pmap_addr = pmap_mapdev_attr(phys_addr, size, cache_mode);
52 SLIST_INSERT_HEAD(&iomap_list, imp, im_iomaps);
54 return imp->pmap_addr;
57 void iounmap(void __iomem *ptr)
59 struct iomap *imp, *tmp_imp;
60 int found = 0;
61 int indx;
62 vm_paddr_t paddr_end;
64 SLIST_FOREACH_MUTABLE(imp, &iomap_list, im_iomaps, tmp_imp) {
65 if (imp->pmap_addr == ptr) {
66 found = 1;
67 break;
71 if (!found) {
72 kprintf("iounmap: invalid address %p\n", ptr);
73 return;
76 paddr_end = imp->paddr + (imp->npages * PAGE_SIZE) - 1;
77 /* Is this address space range backed by regular memory ? */
78 for (indx = 0; phys_avail[indx].phys_end != 0; ++indx) {
79 vm_paddr_t range_start = phys_avail[indx].phys_beg;
80 vm_paddr_t size = phys_avail[indx].phys_end -
81 phys_avail[indx].phys_beg;
82 vm_paddr_t range_end = range_start + size - 1;
84 if ((imp->paddr >= range_start) && (paddr_end <= range_end)) {
85 /* Yes, change page caching attributes */
86 pmap_change_attr(imp->paddr, imp->npages, PAT_WRITE_BACK);
87 break;
92 pmap_unmapdev((vm_offset_t)imp->pmap_addr, imp->npages * PAGE_SIZE);
94 SLIST_REMOVE(&iomap_list, imp, iomap, im_iomaps);
95 kfree(imp);