2 * Copyright 2003 Eric Anholt
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
20 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * $FreeBSD: head/sys/dev/drm2/drm_vm.c 235783 2012-05-22 11:07:44Z kib $"
27 * Support code for mmaping of DRM maps.
31 #include <linux/export.h>
32 #include <linux/seq_file.h>
34 #include <linux/efi.h>
35 #include <linux/slab.h>
37 #include <asm/pgtable.h>
38 #include "drm_internal.h"
39 #include "drm_legacy.h"
41 #include <sys/mutex2.h>
43 int drm_mmap(struct dev_mmap_args
*ap
)
45 struct file
*filp
= ap
->a_fp
;
46 struct drm_file
*priv
= filp
->private_data
;
47 struct cdev
*kdev
= ap
->a_head
.a_dev
;
48 vm_offset_t offset
= ap
->a_offset
;
49 struct drm_device
*dev
= drm_get_device_from_kdev(kdev
);
50 struct drm_local_map
*map
= NULL
;
51 struct drm_hash_item
*hash
;
53 enum drm_map_type type
;
56 if (!priv
->authenticated
)
59 DRM_DEBUG("called with offset %016jx\n", (uintmax_t)offset
);
60 if (dev
->dma
&& offset
< ptoa(dev
->dma
->page_count
)) {
61 struct drm_device_dma
*dma
= dev
->dma
;
63 if (dma
->pagelist
!= NULL
) {
64 unsigned long page
= offset
>> PAGE_SHIFT
;
65 unsigned long phys
= dma
->pagelist
[page
];
75 /* A sequential search of a linked list is
76 fine here because: 1) there will only be
77 about 5-10 entries in the list and, 2) a
78 DRI client only has to do this mapping
79 once, so it doesn't have to be optimized
80 for performance, even if the list was a
85 if (drm_ht_find_item(&dev
->map_hash
, offset
, &hash
)) {
86 DRM_ERROR("Could not find map\n");
90 map
= drm_hash_entry(hash
, struct drm_map_list
, hash
)->map
;
92 DRM_DEBUG("Can't find map, request offset = %016jx\n",
97 if (((map
->flags
& _DRM_RESTRICTED
) && !capable(CAP_SYS_ADMIN
))) {
99 DRM_DEBUG("restricted map\n");
107 case _DRM_FRAME_BUFFER
:
110 *memattr
= VM_MEMATTR_WRITE_COMBINING
;
114 phys
= map
->offset
+ offset
;
116 case _DRM_SCATTER_GATHER
:
118 *memattr
= VM_MEMATTR_WRITE_COMBINING
;
121 case _DRM_CONSISTENT
:
123 phys
= vtophys((char *)map
->handle
+ offset
);
126 DRM_ERROR("bad map type %d\n", type
);
127 return -1; /* This should never happen. */
130 ap
->a_result
= atop(phys
);
134 /* XXX The following is just temporary hack to replace the
135 * vm_phys_fictitious functions available on FreeBSD
137 #define VM_PHYS_FICTITIOUS_NSEGS 8
138 static struct vm_phys_fictitious_seg
{
141 vm_page_t first_page
;
142 } vm_phys_fictitious_segs
[VM_PHYS_FICTITIOUS_NSEGS
];
143 static struct mtx vm_phys_fictitious_reg_mtx
= MTX_INITIALIZER("vmphy");
146 vm_phys_fictitious_to_vm_page(vm_paddr_t pa
)
148 struct vm_phys_fictitious_seg
*seg
;
153 for (segind
= 0; segind
< VM_PHYS_FICTITIOUS_NSEGS
; segind
++) {
154 seg
= &vm_phys_fictitious_segs
[segind
];
155 if (pa
>= seg
->start
&& pa
< seg
->end
) {
156 m
= &seg
->first_page
[atop(pa
- seg
->start
)];
157 KASSERT((m
->flags
& PG_FICTITIOUS
) != 0,
158 ("%p not fictitious", m
));
166 vm_phys_fictitious_reg_range(vm_paddr_t start
, vm_paddr_t end
,
167 vm_memattr_t memattr
)
169 struct vm_phys_fictitious_seg
*seg
;
174 page_count
= (end
- start
) / PAGE_SIZE
;
176 fp
= kmalloc(page_count
* sizeof(struct vm_page
), M_DRM
,
179 for (i
= 0; i
< page_count
; i
++) {
180 vm_page_initfake(&fp
[i
], start
+ PAGE_SIZE
* i
, memattr
);
181 fp
[i
].flags
&= ~PG_UNMANAGED
;
182 atomic_clear_int(&fp
[i
].busy_count
, PBUSY_LOCKED
);
184 mtx_lock(&vm_phys_fictitious_reg_mtx
);
185 for (segind
= 0; segind
< VM_PHYS_FICTITIOUS_NSEGS
; segind
++) {
186 seg
= &vm_phys_fictitious_segs
[segind
];
187 if (seg
->start
== 0 && seg
->end
== 0) {
190 seg
->first_page
= fp
;
191 mtx_unlock(&vm_phys_fictitious_reg_mtx
);
195 mtx_unlock(&vm_phys_fictitious_reg_mtx
);
201 vm_phys_fictitious_unreg_range(vm_paddr_t start
, vm_paddr_t end
)
203 struct vm_phys_fictitious_seg
*seg
;
207 mtx_lock(&vm_phys_fictitious_reg_mtx
);
208 for (segind
= 0; segind
< VM_PHYS_FICTITIOUS_NSEGS
; segind
++) {
209 seg
= &vm_phys_fictitious_segs
[segind
];
210 if (seg
->start
== start
&& seg
->end
== end
) {
211 seg
->start
= seg
->end
= 0;
212 fp
= seg
->first_page
;
213 seg
->first_page
= NULL
;
214 mtx_unlock(&vm_phys_fictitious_reg_mtx
);
219 mtx_unlock(&vm_phys_fictitious_reg_mtx
);
220 KASSERT(0, ("Unregistering not registered fictitious range"));