2 * Copyright 2003 Eric Anholt
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
20 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * $FreeBSD: head/sys/dev/drm2/drm_vm.c 235783 2012-05-22 11:07:44Z kib $"
27 * Support code for mmaping of DRM maps.
31 #include <linux/export.h>
32 #include <linux/seq_file.h>
34 #include <linux/efi.h>
35 #include <linux/slab.h>
37 #include <linux/mem_encrypt.h>
38 #include <asm/pgtable.h>
39 #include "drm_internal.h"
40 #include "drm_legacy.h"
42 #include <sys/mutex2.h>
45 drm_mmap(struct dev_mmap_args
*ap
)
47 struct file
*filp
= ap
->a_fp
;
48 struct drm_file
*priv
;
49 struct cdev
*kdev
= ap
->a_head
.a_dev
;
50 vm_offset_t offset
= ap
->a_offset
;
51 struct drm_device
*dev
= drm_get_device_from_kdev(kdev
);
52 struct drm_local_map
*map
= NULL
;
53 struct drm_hash_item
*hash
;
54 enum drm_map_type type
;
58 * NOTE: If ddev->drm_ttm_bdev is not setup properly, this path
59 * may be hit with a NULL filp and panic.
61 priv
= filp
->private_data
;
62 if (!priv
->authenticated
)
65 DRM_DEBUG("called with offset %016jx\n", (uintmax_t)offset
);
66 if (dev
->dma
&& offset
< ptoa(dev
->dma
->page_count
)) {
67 struct drm_device_dma
*dma
= dev
->dma
;
69 if (dma
->pagelist
!= NULL
) {
70 unsigned long page
= offset
>> PAGE_SHIFT
;
71 unsigned long phys
= dma
->pagelist
[page
];
81 /* A sequential search of a linked list is
82 fine here because: 1) there will only be
83 about 5-10 entries in the list and, 2) a
84 DRI client only has to do this mapping
85 once, so it doesn't have to be optimized
86 for performance, even if the list was a
91 if (drm_ht_find_item(&dev
->map_hash
, offset
, &hash
)) {
92 DRM_ERROR("Could not find map\n");
97 map
= drm_hash_entry(hash
, struct drm_map_list
, hash
)->map
;
99 DRM_DEBUG("Can't find map, request offset = %016jx\n",
104 if (((map
->flags
& _DRM_RESTRICTED
) && !capable(CAP_SYS_ADMIN
))) {
106 DRM_DEBUG("restricted map\n");
114 case _DRM_FRAME_BUFFER
:
117 *memattr
= VM_MEMATTR_WRITE_COMBINING
;
121 phys
= map
->offset
+ offset
;
123 case _DRM_SCATTER_GATHER
:
125 *memattr
= VM_MEMATTR_WRITE_COMBINING
;
128 case _DRM_CONSISTENT
:
130 phys
= vtophys((char *)map
->handle
+ offset
);
133 DRM_ERROR("bad map type %d\n", type
);
134 return -1; /* This should never happen. */
137 ap
->a_result
= atop(phys
);
141 /* XXX The following is just temporary hack to replace the
142 * vm_phys_fictitious functions available on FreeBSD
144 #define VM_PHYS_FICTITIOUS_NSEGS 8
145 static struct vm_phys_fictitious_seg
{
148 vm_page_t first_page
;
149 } vm_phys_fictitious_segs
[VM_PHYS_FICTITIOUS_NSEGS
];
150 static struct mtx vm_phys_fictitious_reg_mtx
= MTX_INITIALIZER("vmphy");
153 vm_phys_fictitious_to_vm_page(vm_paddr_t pa
)
155 struct vm_phys_fictitious_seg
*seg
;
160 for (segind
= 0; segind
< VM_PHYS_FICTITIOUS_NSEGS
; segind
++) {
161 seg
= &vm_phys_fictitious_segs
[segind
];
162 if (pa
>= seg
->start
&& pa
< seg
->end
) {
163 m
= &seg
->first_page
[atop(pa
- seg
->start
)];
164 KASSERT((m
->flags
& PG_FICTITIOUS
) != 0,
165 ("%p not fictitious", m
));
173 vm_phys_fictitious_reg_range(vm_paddr_t start
, vm_paddr_t end
,
174 vm_memattr_t memattr
)
176 struct vm_phys_fictitious_seg
*seg
;
181 page_count
= (end
- start
) / PAGE_SIZE
;
183 fp
= kmalloc(page_count
* sizeof(struct vm_page
), M_DRM
,
186 for (i
= 0; i
< page_count
; i
++) {
187 vm_page_initfake(&fp
[i
], start
+ PAGE_SIZE
* i
, memattr
);
188 atomic_clear_int(&fp
[i
].busy_count
, PBUSY_LOCKED
);
190 mtx_lock(&vm_phys_fictitious_reg_mtx
);
191 for (segind
= 0; segind
< VM_PHYS_FICTITIOUS_NSEGS
; segind
++) {
192 seg
= &vm_phys_fictitious_segs
[segind
];
193 if (seg
->start
== 0 && seg
->end
== 0) {
196 seg
->first_page
= fp
;
197 mtx_unlock(&vm_phys_fictitious_reg_mtx
);
201 mtx_unlock(&vm_phys_fictitious_reg_mtx
);
207 vm_phys_fictitious_unreg_range(vm_paddr_t start
, vm_paddr_t end
)
209 struct vm_phys_fictitious_seg
*seg
;
213 mtx_lock(&vm_phys_fictitious_reg_mtx
);
214 for (segind
= 0; segind
< VM_PHYS_FICTITIOUS_NSEGS
; segind
++) {
215 seg
= &vm_phys_fictitious_segs
[segind
];
216 if (seg
->start
== start
&& seg
->end
== end
) {
217 seg
->start
= seg
->end
= 0;
218 fp
= seg
->first_page
;
219 seg
->first_page
= NULL
;
220 mtx_unlock(&vm_phys_fictitious_reg_mtx
);
225 mtx_unlock(&vm_phys_fictitious_reg_mtx
);
226 KASSERT(0, ("Unregistering not registered fictitious range"));