kernel/drm: Add a missing include
[dragonfly.git] / sys / dev / drm / drm_vm.c
blob57cc55cb807d4397d9189c65521331008f08da70
1 /*-
2 * Copyright 2003 Eric Anholt
3 * All Rights Reserved.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
20 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * $FreeBSD: head/sys/dev/drm2/drm_vm.c 235783 2012-05-22 11:07:44Z kib $"
26 /** @file drm_vm.c
27 * Support code for mmaping of DRM maps.
30 #include <sys/conf.h>
31 #include <sys/devfs.h>
32 #include <sys/mutex2.h>
33 #include <vm/vm_page.h>
34 #include <vm/vm_pager.h>
36 #include <drm/drmP.h>
38 int drm_mmap(struct dev_mmap_args *ap)
40 struct cdev *kdev = ap->a_head.a_dev;
41 vm_offset_t offset = ap->a_offset;
42 struct drm_device *dev = drm_get_device_from_kdev(kdev);
43 struct drm_file *file_priv = NULL;
44 struct drm_local_map *map = NULL;
45 struct drm_map_list *r_list;
46 int error;
48 enum drm_map_type type;
49 vm_paddr_t phys;
51 /* d_mmap gets called twice, we can only reference file_priv during
52 * the first call. We need to assume that if error is EBADF the
53 * call was succesful and the client is authenticated.
55 error = devfs_get_cdevpriv(ap->a_fp, (void **)&file_priv);
56 if (error == ENOENT) {
57 DRM_ERROR("Could not find authenticator!\n");
58 return EINVAL;
61 if (file_priv && !file_priv->authenticated)
62 return EACCES;
64 DRM_DEBUG("called with offset %016jx\n", (uintmax_t)offset);
65 if (dev->dma && offset < ptoa(dev->dma->page_count)) {
66 drm_device_dma_t *dma = dev->dma;
68 spin_lock(&dev->dma_lock);
70 if (dma->pagelist != NULL) {
71 unsigned long page = offset >> PAGE_SHIFT;
72 unsigned long phys = dma->pagelist[page];
74 spin_unlock(&dev->dma_lock);
75 // XXX *paddr = phys;
76 ap->a_result = phys;
77 return 0;
78 } else {
79 spin_unlock(&dev->dma_lock);
80 return -1;
84 /* A sequential search of a linked list is
85 fine here because: 1) there will only be
86 about 5-10 entries in the list and, 2) a
87 DRI client only has to do this mapping
88 once, so it doesn't have to be optimized
89 for performance, even if the list was a
90 bit longer.
92 DRM_LOCK(dev);
93 list_for_each_entry(r_list, &dev->maplist, head) {
94 if (r_list->map && r_list->map->offset >> DRM_MAP_HANDLE_SHIFT ==
95 (unsigned long)r_list->map->handle >> DRM_MAP_HANDLE_SHIFT) {
96 map = r_list->map;
97 break;
101 if (map == NULL) {
102 DRM_DEBUG("Can't find map, request offset = %016jx\n",
103 (uintmax_t)offset);
104 list_for_each_entry(r_list, &dev->maplist, head) {
105 DRM_DEBUG("map offset = %016lx, handle = %016lx\n",
106 r_list->map->offset, (unsigned long)r_list->map->handle);
108 DRM_UNLOCK(dev);
109 return -1;
111 if (((map->flags & _DRM_RESTRICTED) && !DRM_SUSER(DRM_CURPROC))) {
112 DRM_UNLOCK(dev);
113 DRM_DEBUG("restricted map\n");
114 return -1;
116 type = map->type;
117 DRM_UNLOCK(dev);
119 offset = offset & ((1ULL << DRM_MAP_HANDLE_SHIFT) - 1);
121 switch (type) {
122 case _DRM_FRAME_BUFFER:
123 case _DRM_AGP:
124 #if 0 /* XXX */
125 *memattr = VM_MEMATTR_WRITE_COMBINING;
126 #endif
127 /* FALLTHROUGH */
128 case _DRM_REGISTERS:
129 phys = map->offset + offset;
130 break;
131 case _DRM_SCATTER_GATHER:
132 #if 0 /* XXX */
133 *memattr = VM_MEMATTR_WRITE_COMBINING;
134 #endif
135 /* FALLTHROUGH */
136 case _DRM_CONSISTENT:
137 case _DRM_SHM:
138 phys = vtophys((char *)map->virtual + offset);
139 break;
140 default:
141 DRM_ERROR("bad map type %d\n", type);
142 return -1; /* This should never happen. */
145 ap->a_result = atop(phys);
146 return 0;
149 /* XXX The following is just temporary hack to replace the
150 * vm_phys_fictitious functions available on FreeBSD
152 #define VM_PHYS_FICTITIOUS_NSEGS 8
153 static struct vm_phys_fictitious_seg {
154 vm_paddr_t start;
155 vm_paddr_t end;
156 vm_page_t first_page;
157 } vm_phys_fictitious_segs[VM_PHYS_FICTITIOUS_NSEGS];
158 static struct mtx vm_phys_fictitious_reg_mtx = MTX_INITIALIZER;
160 MALLOC_DEFINE(M_FICT_PAGES, "", "");
162 vm_page_t
163 vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
165 struct vm_phys_fictitious_seg *seg;
166 vm_page_t m;
167 int segind;
169 m = NULL;
170 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
171 seg = &vm_phys_fictitious_segs[segind];
172 if (pa >= seg->start && pa < seg->end) {
173 m = &seg->first_page[atop(pa - seg->start)];
174 KASSERT((m->flags & PG_FICTITIOUS) != 0,
175 ("%p not fictitious", m));
176 break;
179 return (m);
183 vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
184 vm_memattr_t memattr)
186 struct vm_phys_fictitious_seg *seg;
187 vm_page_t fp;
188 long i, page_count;
189 int segind;
191 page_count = (end - start) / PAGE_SIZE;
193 fp = kmalloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
194 M_WAITOK | M_ZERO);
196 for (i = 0; i < page_count; i++) {
197 vm_page_initfake(&fp[i], start + PAGE_SIZE * i, memattr);
198 fp[i].flags &= ~(PG_BUSY | PG_UNMANAGED);
200 mtx_lock(&vm_phys_fictitious_reg_mtx);
201 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
202 seg = &vm_phys_fictitious_segs[segind];
203 if (seg->start == 0 && seg->end == 0) {
204 seg->start = start;
205 seg->end = end;
206 seg->first_page = fp;
207 mtx_unlock(&vm_phys_fictitious_reg_mtx);
208 return (0);
211 mtx_unlock(&vm_phys_fictitious_reg_mtx);
212 kfree(fp, M_FICT_PAGES);
213 return (EBUSY);
216 void
217 vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
219 struct vm_phys_fictitious_seg *seg;
220 vm_page_t fp;
221 int segind;
223 mtx_lock(&vm_phys_fictitious_reg_mtx);
224 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
225 seg = &vm_phys_fictitious_segs[segind];
226 if (seg->start == start && seg->end == end) {
227 seg->start = seg->end = 0;
228 fp = seg->first_page;
229 seg->first_page = NULL;
230 mtx_unlock(&vm_phys_fictitious_reg_mtx);
231 kfree(fp, M_FICT_PAGES);
232 return;
235 mtx_unlock(&vm_phys_fictitious_reg_mtx);
236 KASSERT(0, ("Unregistering not registered fictitious range"));