kernel - VM rework part 9 - Precursor work for terminal pv_entry removal
[dragonfly.git] / sys / dev / drm / drm_vm.c
blob5f4d8c441f81d4c0e6d29251b20cfbe52519a5fc
1 /*-
2 * Copyright 2003 Eric Anholt
3 * All Rights Reserved.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
20 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * $FreeBSD: head/sys/dev/drm2/drm_vm.c 235783 2012-05-22 11:07:44Z kib $"
26 /** @file drm_vm.c
27 * Support code for mmaping of DRM maps.
30 #include <drm/drmP.h>
31 #include <linux/export.h>
32 #include <linux/seq_file.h>
33 #if defined(__ia64__)
34 #include <linux/efi.h>
35 #include <linux/slab.h>
36 #endif
37 #include <asm/pgtable.h>
38 #include "drm_internal.h"
39 #include "drm_legacy.h"
41 #include <sys/mutex2.h>
43 int drm_mmap(struct dev_mmap_args *ap)
45 struct file *filp = ap->a_fp;
46 struct drm_file *priv = filp->private_data;
47 struct cdev *kdev = ap->a_head.a_dev;
48 vm_offset_t offset = ap->a_offset;
49 struct drm_device *dev = drm_get_device_from_kdev(kdev);
50 struct drm_local_map *map = NULL;
51 struct drm_hash_item *hash;
53 enum drm_map_type type;
54 vm_paddr_t phys;
56 if (!priv->authenticated)
57 return -EACCES;
59 DRM_DEBUG("called with offset %016jx\n", (uintmax_t)offset);
60 if (dev->dma && offset < ptoa(dev->dma->page_count)) {
61 struct drm_device_dma *dma = dev->dma;
63 if (dma->pagelist != NULL) {
64 unsigned long page = offset >> PAGE_SHIFT;
65 unsigned long phys = dma->pagelist[page];
67 // XXX *paddr = phys;
68 ap->a_result = phys;
69 return 0;
70 } else {
71 return -1;
75 /* A sequential search of a linked list is
76 fine here because: 1) there will only be
77 about 5-10 entries in the list and, 2) a
78 DRI client only has to do this mapping
79 once, so it doesn't have to be optimized
80 for performance, even if the list was a
81 bit longer.
83 DRM_LOCK(dev);
85 if (drm_ht_find_item(&dev->map_hash, offset, &hash)) {
86 DRM_ERROR("Could not find map\n");
87 return -EINVAL;
90 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
91 if (map == NULL) {
92 DRM_DEBUG("Can't find map, request offset = %016jx\n",
93 (uintmax_t)offset);
94 DRM_UNLOCK(dev);
95 return -1;
97 if (((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
98 DRM_UNLOCK(dev);
99 DRM_DEBUG("restricted map\n");
100 return -1;
103 type = map->type;
104 DRM_UNLOCK(dev);
106 switch (type) {
107 case _DRM_FRAME_BUFFER:
108 case _DRM_AGP:
109 #if 0 /* XXX */
110 *memattr = VM_MEMATTR_WRITE_COMBINING;
111 #endif
112 /* FALLTHROUGH */
113 case _DRM_REGISTERS:
114 phys = map->offset + offset;
115 break;
116 case _DRM_SCATTER_GATHER:
117 #if 0 /* XXX */
118 *memattr = VM_MEMATTR_WRITE_COMBINING;
119 #endif
120 /* FALLTHROUGH */
121 case _DRM_CONSISTENT:
122 case _DRM_SHM:
123 phys = vtophys((char *)map->handle + offset);
124 break;
125 default:
126 DRM_ERROR("bad map type %d\n", type);
127 return -1; /* This should never happen. */
130 ap->a_result = atop(phys);
131 return 0;
134 /* XXX The following is just temporary hack to replace the
135 * vm_phys_fictitious functions available on FreeBSD
137 #define VM_PHYS_FICTITIOUS_NSEGS 8
138 static struct vm_phys_fictitious_seg {
139 vm_paddr_t start;
140 vm_paddr_t end;
141 vm_page_t first_page;
142 } vm_phys_fictitious_segs[VM_PHYS_FICTITIOUS_NSEGS];
143 static struct mtx vm_phys_fictitious_reg_mtx = MTX_INITIALIZER("vmphy");
145 vm_page_t
146 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
148 struct vm_phys_fictitious_seg *seg;
149 vm_page_t m;
150 int segind;
152 m = NULL;
153 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
154 seg = &vm_phys_fictitious_segs[segind];
155 if (pa >= seg->start && pa < seg->end) {
156 m = &seg->first_page[atop(pa - seg->start)];
157 KASSERT((m->flags & PG_FICTITIOUS) != 0,
158 ("%p not fictitious", m));
159 break;
162 return (m);
166 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
167 vm_memattr_t memattr)
169 struct vm_phys_fictitious_seg *seg;
170 vm_page_t fp;
171 long i, page_count;
172 int segind;
174 page_count = (end - start) / PAGE_SIZE;
176 fp = kmalloc(page_count * sizeof(struct vm_page), M_DRM,
177 M_WAITOK | M_ZERO);
179 for (i = 0; i < page_count; i++) {
180 vm_page_initfake(&fp[i], start + PAGE_SIZE * i, memattr);
181 fp[i].flags &= ~PG_UNMANAGED;
182 atomic_clear_int(&fp[i].busy_count, PBUSY_LOCKED);
184 mtx_lock(&vm_phys_fictitious_reg_mtx);
185 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
186 seg = &vm_phys_fictitious_segs[segind];
187 if (seg->start == 0 && seg->end == 0) {
188 seg->start = start;
189 seg->end = end;
190 seg->first_page = fp;
191 mtx_unlock(&vm_phys_fictitious_reg_mtx);
192 return (0);
195 mtx_unlock(&vm_phys_fictitious_reg_mtx);
196 kfree(fp);
197 return (EBUSY);
200 void
201 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
203 struct vm_phys_fictitious_seg *seg;
204 vm_page_t fp;
205 int segind;
207 mtx_lock(&vm_phys_fictitious_reg_mtx);
208 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
209 seg = &vm_phys_fictitious_segs[segind];
210 if (seg->start == start && seg->end == end) {
211 seg->start = seg->end = 0;
212 fp = seg->first_page;
213 seg->first_page = NULL;
214 mtx_unlock(&vm_phys_fictitious_reg_mtx);
215 kfree(fp);
216 return;
219 mtx_unlock(&vm_phys_fictitious_reg_mtx);
220 KASSERT(0, ("Unregistering not registered fictitious range"));