2 * Copyright 2003 Eric Anholt
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
20 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * $FreeBSD: head/sys/dev/drm2/drm_vm.c 235783 2012-05-22 11:07:44Z kib $"
27 * Support code for mmaping of DRM maps.
31 #include <sys/devfs.h>
32 #include <sys/mutex2.h>
33 #include <vm/vm_page.h>
34 #include <vm/vm_pager.h>
37 #include <asm/pgtable.h>
38 #include "drm_legacy.h"
40 int drm_mmap(struct dev_mmap_args
*ap
)
42 struct cdev
*kdev
= ap
->a_head
.a_dev
;
43 vm_offset_t offset
= ap
->a_offset
;
44 struct drm_device
*dev
= drm_get_device_from_kdev(kdev
);
45 struct drm_file
*file_priv
= NULL
;
46 struct drm_local_map
*map
= NULL
;
48 struct drm_hash_item
*hash
;
50 enum drm_map_type type
;
53 /* d_mmap gets called twice, we can only reference file_priv during
54 * the first call. We need to assume that if error is EBADF the
55 * call was succesful and the client is authenticated.
57 error
= devfs_get_cdevpriv(ap
->a_fp
, (void **)&file_priv
);
58 if (error
== ENOENT
) {
59 DRM_ERROR("Could not find authenticator!\n");
63 if (file_priv
&& !file_priv
->authenticated
)
66 DRM_DEBUG("called with offset %016jx\n", (uintmax_t)offset
);
67 if (dev
->dma
&& offset
< ptoa(dev
->dma
->page_count
)) {
68 struct drm_device_dma
*dma
= dev
->dma
;
70 spin_lock(&dev
->dma_lock
);
72 if (dma
->pagelist
!= NULL
) {
73 unsigned long page
= offset
>> PAGE_SHIFT
;
74 unsigned long phys
= dma
->pagelist
[page
];
76 spin_unlock(&dev
->dma_lock
);
81 spin_unlock(&dev
->dma_lock
);
86 /* A sequential search of a linked list is
87 fine here because: 1) there will only be
88 about 5-10 entries in the list and, 2) a
89 DRI client only has to do this mapping
90 once, so it doesn't have to be optimized
91 for performance, even if the list was a
96 if (drm_ht_find_item(&dev
->map_hash
, offset
, &hash
)) {
97 DRM_ERROR("Could not find map\n");
101 map
= drm_hash_entry(hash
, struct drm_map_list
, hash
)->map
;
103 DRM_DEBUG("Can't find map, request offset = %016jx\n",
108 if (((map
->flags
& _DRM_RESTRICTED
) && !capable(CAP_SYS_ADMIN
))) {
110 DRM_DEBUG("restricted map\n");
118 case _DRM_FRAME_BUFFER
:
121 *memattr
= VM_MEMATTR_WRITE_COMBINING
;
125 phys
= map
->offset
+ offset
;
127 case _DRM_SCATTER_GATHER
:
129 *memattr
= VM_MEMATTR_WRITE_COMBINING
;
132 case _DRM_CONSISTENT
:
134 phys
= vtophys((char *)map
->handle
+ offset
);
137 DRM_ERROR("bad map type %d\n", type
);
138 return -1; /* This should never happen. */
141 ap
->a_result
= atop(phys
);
145 /* XXX The following is just temporary hack to replace the
146 * vm_phys_fictitious functions available on FreeBSD
148 #define VM_PHYS_FICTITIOUS_NSEGS 8
149 static struct vm_phys_fictitious_seg
{
152 vm_page_t first_page
;
153 } vm_phys_fictitious_segs
[VM_PHYS_FICTITIOUS_NSEGS
];
154 static struct mtx vm_phys_fictitious_reg_mtx
= MTX_INITIALIZER("vmphy");
157 vm_phys_fictitious_to_vm_page(vm_paddr_t pa
)
159 struct vm_phys_fictitious_seg
*seg
;
164 for (segind
= 0; segind
< VM_PHYS_FICTITIOUS_NSEGS
; segind
++) {
165 seg
= &vm_phys_fictitious_segs
[segind
];
166 if (pa
>= seg
->start
&& pa
< seg
->end
) {
167 m
= &seg
->first_page
[atop(pa
- seg
->start
)];
168 KASSERT((m
->flags
& PG_FICTITIOUS
) != 0,
169 ("%p not fictitious", m
));
177 vm_phys_fictitious_reg_range(vm_paddr_t start
, vm_paddr_t end
,
178 vm_memattr_t memattr
)
180 struct vm_phys_fictitious_seg
*seg
;
185 page_count
= (end
- start
) / PAGE_SIZE
;
187 fp
= kmalloc(page_count
* sizeof(struct vm_page
), M_DRM
,
190 for (i
= 0; i
< page_count
; i
++) {
191 vm_page_initfake(&fp
[i
], start
+ PAGE_SIZE
* i
, memattr
);
192 fp
[i
].flags
&= ~PG_UNMANAGED
;
193 atomic_clear_int(&fp
[i
].busy_count
, PBUSY_LOCKED
);
195 mtx_lock(&vm_phys_fictitious_reg_mtx
);
196 for (segind
= 0; segind
< VM_PHYS_FICTITIOUS_NSEGS
; segind
++) {
197 seg
= &vm_phys_fictitious_segs
[segind
];
198 if (seg
->start
== 0 && seg
->end
== 0) {
201 seg
->first_page
= fp
;
202 mtx_unlock(&vm_phys_fictitious_reg_mtx
);
206 mtx_unlock(&vm_phys_fictitious_reg_mtx
);
212 vm_phys_fictitious_unreg_range(vm_paddr_t start
, vm_paddr_t end
)
214 struct vm_phys_fictitious_seg
*seg
;
218 mtx_lock(&vm_phys_fictitious_reg_mtx
);
219 for (segind
= 0; segind
< VM_PHYS_FICTITIOUS_NSEGS
; segind
++) {
220 seg
= &vm_phys_fictitious_segs
[segind
];
221 if (seg
->start
== start
&& seg
->end
== end
) {
222 seg
->start
= seg
->end
= 0;
223 fp
= seg
->first_page
;
224 seg
->first_page
= NULL
;
225 mtx_unlock(&vm_phys_fictitious_reg_mtx
);
230 mtx_unlock(&vm_phys_fictitious_reg_mtx
);
231 KASSERT(0, ("Unregistering not registered fictitious range"));