Merge with Linux 2.4.0-test6-pre9.
[linux-2.6/linux-mips.git] / drivers / char / drm / vm.c
blobd295529ba76d79bbffd13f0ffd6562a2b9765d74
1 /* vm.c -- Memory mapping for DRM -*- linux-c -*-
2 * Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
32 #define __NO_VERSION__
33 #include "drmP.h"
35 struct vm_operations_struct drm_vm_ops = {
36 nopage: drm_vm_nopage,
37 open: drm_vm_open,
38 close: drm_vm_close,
41 struct vm_operations_struct drm_vm_shm_ops = {
42 nopage: drm_vm_shm_nopage,
43 open: drm_vm_open,
44 close: drm_vm_close,
47 struct vm_operations_struct drm_vm_shm_lock_ops = {
48 nopage: drm_vm_shm_nopage_lock,
49 open: drm_vm_open,
50 close: drm_vm_close,
53 struct vm_operations_struct drm_vm_dma_ops = {
54 nopage: drm_vm_dma_nopage,
55 open: drm_vm_open,
56 close: drm_vm_close,
59 #if LINUX_VERSION_CODE < 0x020317
60 unsigned long drm_vm_nopage(struct vm_area_struct *vma,
61 unsigned long address,
62 int write_access)
63 #else
64 /* Return type changed in 2.3.23 */
65 struct page *drm_vm_nopage(struct vm_area_struct *vma,
66 unsigned long address,
67 int write_access)
68 #endif
70 DRM_DEBUG("0x%08lx, %d\n", address, write_access);
72 return NOPAGE_SIGBUS; /* Disallow mremap */
75 #if LINUX_VERSION_CODE < 0x020317
76 unsigned long drm_vm_shm_nopage(struct vm_area_struct *vma,
77 unsigned long address,
78 int write_access)
79 #else
80 /* Return type changed in 2.3.23 */
81 struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
82 unsigned long address,
83 int write_access)
84 #endif
86 #if LINUX_VERSION_CODE >= 0x020300
87 drm_map_t *map = (drm_map_t *)vma->vm_private_data;
88 #else
89 drm_map_t *map = (drm_map_t *)vma->vm_pte;
90 #endif
91 unsigned long physical;
92 unsigned long offset;
94 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
95 if (!map) return NOPAGE_OOM; /* Nothing allocated */
97 offset = address - vma->vm_start;
98 physical = (unsigned long)map->handle + offset;
99 atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
101 DRM_DEBUG("0x%08lx => 0x%08lx\n", address, physical);
102 #if LINUX_VERSION_CODE < 0x020317
103 return physical;
104 #else
105 return virt_to_page(physical);
106 #endif
109 #if LINUX_VERSION_CODE < 0x020317
110 unsigned long drm_vm_shm_nopage_lock(struct vm_area_struct *vma,
111 unsigned long address,
112 int write_access)
113 #else
114 /* Return type changed in 2.3.23 */
115 struct page *drm_vm_shm_nopage_lock(struct vm_area_struct *vma,
116 unsigned long address,
117 int write_access)
118 #endif
120 drm_file_t *priv = vma->vm_file->private_data;
121 drm_device_t *dev = priv->dev;
122 unsigned long physical;
123 unsigned long offset;
124 unsigned long page;
126 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
127 if (!dev->lock.hw_lock) return NOPAGE_OOM; /* Nothing allocated */
129 offset = address - vma->vm_start;
130 page = offset >> PAGE_SHIFT;
131 physical = (unsigned long)dev->lock.hw_lock + offset;
132 atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
134 DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical);
135 #if LINUX_VERSION_CODE < 0x020317
136 return physical;
137 #else
138 return virt_to_page(physical);
139 #endif
142 #if LINUX_VERSION_CODE < 0x020317
143 unsigned long drm_vm_dma_nopage(struct vm_area_struct *vma,
144 unsigned long address,
145 int write_access)
146 #else
147 /* Return type changed in 2.3.23 */
148 struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
149 unsigned long address,
150 int write_access)
151 #endif
153 drm_file_t *priv = vma->vm_file->private_data;
154 drm_device_t *dev = priv->dev;
155 drm_device_dma_t *dma = dev->dma;
156 unsigned long physical;
157 unsigned long offset;
158 unsigned long page;
160 if (!dma) return NOPAGE_SIGBUS; /* Error */
161 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
162 if (!dma->pagelist) return NOPAGE_OOM ; /* Nothing allocated */
164 offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
165 page = offset >> PAGE_SHIFT;
166 physical = dma->pagelist[page] + (offset & (~PAGE_MASK));
167 atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
169 DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical);
170 #if LINUX_VERSION_CODE < 0x020317
171 return physical;
172 #else
173 return virt_to_page(physical);
174 #endif
177 void drm_vm_open(struct vm_area_struct *vma)
179 drm_file_t *priv = vma->vm_file->private_data;
180 drm_device_t *dev = priv->dev;
181 #if DRM_DEBUG_CODE
182 drm_vma_entry_t *vma_entry;
183 #endif
185 DRM_DEBUG("0x%08lx,0x%08lx\n",
186 vma->vm_start, vma->vm_end - vma->vm_start);
187 atomic_inc(&dev->vma_count);
188 #if LINUX_VERSION_CODE < 0x020333
189 /* The map can exist after the fd is closed. */
190 MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
191 #endif
194 #if DRM_DEBUG_CODE
195 vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
196 if (vma_entry) {
197 down(&dev->struct_sem);
198 vma_entry->vma = vma;
199 vma_entry->next = dev->vmalist;
200 vma_entry->pid = current->pid;
201 dev->vmalist = vma_entry;
202 up(&dev->struct_sem);
204 #endif
207 void drm_vm_close(struct vm_area_struct *vma)
209 drm_file_t *priv = vma->vm_file->private_data;
210 drm_device_t *dev = priv->dev;
211 #if DRM_DEBUG_CODE
212 drm_vma_entry_t *pt, *prev;
213 #endif
215 DRM_DEBUG("0x%08lx,0x%08lx\n",
216 vma->vm_start, vma->vm_end - vma->vm_start);
217 #if LINUX_VERSION_CODE < 0x020333
218 MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
219 #endif
220 atomic_dec(&dev->vma_count);
222 #if DRM_DEBUG_CODE
223 down(&dev->struct_sem);
224 for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
225 if (pt->vma == vma) {
226 if (prev) {
227 prev->next = pt->next;
228 } else {
229 dev->vmalist = pt->next;
231 drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
232 break;
235 up(&dev->struct_sem);
236 #endif
239 int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
241 drm_file_t *priv = filp->private_data;
242 drm_device_t *dev;
243 drm_device_dma_t *dma;
244 unsigned long length = vma->vm_end - vma->vm_start;
246 lock_kernel();
247 dev = priv->dev;
248 dma = dev->dma;
249 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
250 vma->vm_start, vma->vm_end, VM_OFFSET(vma));
252 /* Length must match exact page count */
253 if ((length >> PAGE_SHIFT) != dma->page_count) {
254 unlock_kernel();
255 return -EINVAL;
257 unlock_kernel();
259 vma->vm_ops = &drm_vm_dma_ops;
260 vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
262 #if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
263 /* In Linux 2.2.3 and above, this is
264 handled in do_mmap() in mm/mmap.c. */
265 ++filp->f_count;
266 #endif
267 vma->vm_file = filp; /* Needed for drm_vm_open() */
268 drm_vm_open(vma);
269 return 0;
272 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
274 drm_file_t *priv = filp->private_data;
275 drm_device_t *dev = priv->dev;
276 drm_map_t *map = NULL;
277 int i;
279 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
280 vma->vm_start, vma->vm_end, VM_OFFSET(vma));
282 if (!VM_OFFSET(vma)) return drm_mmap_dma(filp, vma);
284 /* A sequential search of a linked list is
285 fine here because: 1) there will only be
286 about 5-10 entries in the list and, 2) a
287 DRI client only has to do this mapping
288 once, so it doesn't have to be optimized
289 for performance, even if the list was a
290 bit longer. */
291 for (i = 0; i < dev->map_count; i++) {
292 map = dev->maplist[i];
293 if (map->offset == VM_OFFSET(vma)) break;
296 if (i >= dev->map_count) return -EINVAL;
297 if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
298 return -EPERM;
300 /* Check for valid size. */
301 if (map->size != vma->vm_end - vma->vm_start) return -EINVAL;
303 if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
304 vma->vm_flags &= VM_MAYWRITE;
305 #if defined(__i386__)
306 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
307 #else
308 /* Ye gads this is ugly. With more thought
309 we could move this up higher and use
310 `protection_map' instead. */
311 vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
312 __pte(pgprot_val(vma->vm_page_prot)))));
313 #endif
316 switch (map->type) {
317 case _DRM_FRAME_BUFFER:
318 case _DRM_REGISTERS:
319 case _DRM_AGP:
320 if (VM_OFFSET(vma) >= __pa(high_memory)) {
321 #if defined(__i386__)
322 if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
323 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
324 pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
326 #endif
327 vma->vm_flags |= VM_IO; /* not in core dump */
329 if (remap_page_range(vma->vm_start,
330 VM_OFFSET(vma),
331 vma->vm_end - vma->vm_start,
332 vma->vm_page_prot))
333 return -EAGAIN;
334 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
335 " offset = 0x%lx\n",
336 map->type,
337 vma->vm_start, vma->vm_end, VM_OFFSET(vma));
338 vma->vm_ops = &drm_vm_ops;
339 break;
340 case _DRM_SHM:
341 if (map->flags & _DRM_CONTAINS_LOCK)
342 vma->vm_ops = &drm_vm_shm_lock_ops;
343 else {
344 vma->vm_ops = &drm_vm_shm_ops;
345 #if LINUX_VERSION_CODE >= 0x020300
346 vma->vm_private_data = (void *)map;
347 #else
348 vma->vm_pte = (unsigned long)map;
349 #endif
352 /* Don't let this area swap. Change when
353 DRM_KERNEL advisory is supported. */
354 vma->vm_flags |= VM_LOCKED;
355 break;
356 default:
357 return -EINVAL; /* This should never happen. */
359 vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
361 #if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
362 /* In Linux 2.2.3 and above, this is
363 handled in do_mmap() in mm/mmap.c. */
364 ++filp->f_count;
365 #endif
366 vma->vm_file = filp; /* Needed for drm_vm_open() */
367 drm_vm_open(vma);
368 return 0;