Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / drivers / char / drm / vm.c
blob964921b46af9763849ce9f29779a4a8319210c1a
1 /* vm.c -- Memory mapping for DRM -*- linux-c -*-
2 * Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
32 #define __NO_VERSION__
33 #include "drmP.h"
35 struct vm_operations_struct drm_vm_ops = {
36 nopage: drm_vm_nopage,
37 open: drm_vm_open,
38 close: drm_vm_close,
41 struct vm_operations_struct drm_vm_shm_ops = {
42 nopage: drm_vm_shm_nopage,
43 open: drm_vm_open,
44 close: drm_vm_close,
47 struct vm_operations_struct drm_vm_shm_lock_ops = {
48 nopage: drm_vm_shm_nopage_lock,
49 open: drm_vm_open,
50 close: drm_vm_close,
53 struct vm_operations_struct drm_vm_dma_ops = {
54 nopage: drm_vm_dma_nopage,
55 open: drm_vm_open,
56 close: drm_vm_close,
59 #if LINUX_VERSION_CODE < 0x020317
60 unsigned long drm_vm_nopage(struct vm_area_struct *vma,
61 unsigned long address,
62 int write_access)
63 #else
64 /* Return type changed in 2.3.23 */
65 struct page *drm_vm_nopage(struct vm_area_struct *vma,
66 unsigned long address,
67 int write_access)
68 #endif
70 return NOPAGE_SIGBUS; /* Disallow mremap */
73 #if LINUX_VERSION_CODE < 0x020317
74 unsigned long drm_vm_shm_nopage(struct vm_area_struct *vma,
75 unsigned long address,
76 int write_access)
77 #else
78 /* Return type changed in 2.3.23 */
79 struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
80 unsigned long address,
81 int write_access)
82 #endif
84 #if LINUX_VERSION_CODE >= 0x020300
85 drm_map_t *map = (drm_map_t *)vma->vm_private_data;
86 #else
87 drm_map_t *map = (drm_map_t *)vma->vm_pte;
88 #endif
89 unsigned long physical;
90 unsigned long offset;
92 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
93 if (!map) return NOPAGE_OOM; /* Nothing allocated */
95 offset = address - vma->vm_start;
96 physical = (unsigned long)map->handle + offset;
97 atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
99 DRM_DEBUG("0x%08lx => 0x%08lx\n", address, physical);
100 #if LINUX_VERSION_CODE < 0x020317
101 return physical;
102 #else
103 return virt_to_page(physical);
104 #endif
107 #if LINUX_VERSION_CODE < 0x020317
108 unsigned long drm_vm_shm_nopage_lock(struct vm_area_struct *vma,
109 unsigned long address,
110 int write_access)
111 #else
112 /* Return type changed in 2.3.23 */
113 struct page *drm_vm_shm_nopage_lock(struct vm_area_struct *vma,
114 unsigned long address,
115 int write_access)
116 #endif
118 drm_file_t *priv = vma->vm_file->private_data;
119 drm_device_t *dev = priv->dev;
120 unsigned long physical;
121 unsigned long offset;
122 unsigned long page;
124 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
125 if (!dev->lock.hw_lock) return NOPAGE_OOM; /* Nothing allocated */
127 offset = address - vma->vm_start;
128 page = offset >> PAGE_SHIFT;
129 physical = (unsigned long)dev->lock.hw_lock + offset;
130 atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
132 DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical);
133 #if LINUX_VERSION_CODE < 0x020317
134 return physical;
135 #else
136 return virt_to_page(physical);
137 #endif
140 #if LINUX_VERSION_CODE < 0x020317
141 unsigned long drm_vm_dma_nopage(struct vm_area_struct *vma,
142 unsigned long address,
143 int write_access)
144 #else
145 /* Return type changed in 2.3.23 */
146 struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
147 unsigned long address,
148 int write_access)
149 #endif
151 drm_file_t *priv = vma->vm_file->private_data;
152 drm_device_t *dev = priv->dev;
153 drm_device_dma_t *dma = dev->dma;
154 unsigned long physical;
155 unsigned long offset;
156 unsigned long page;
158 if (!dma) return NOPAGE_SIGBUS; /* Error */
159 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
160 if (!dma->pagelist) return NOPAGE_OOM ; /* Nothing allocated */
162 offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
163 page = offset >> PAGE_SHIFT;
164 physical = dma->pagelist[page] + (offset & (~PAGE_MASK));
165 atomic_inc(&virt_to_page(physical)->count); /* Dec. by kernel */
167 DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical);
168 #if LINUX_VERSION_CODE < 0x020317
169 return physical;
170 #else
171 return virt_to_page(physical);
172 #endif
175 void drm_vm_open(struct vm_area_struct *vma)
177 drm_file_t *priv = vma->vm_file->private_data;
178 drm_device_t *dev = priv->dev;
179 #if DRM_DEBUG_CODE
180 drm_vma_entry_t *vma_entry;
181 #endif
183 DRM_DEBUG("0x%08lx,0x%08lx\n",
184 vma->vm_start, vma->vm_end - vma->vm_start);
185 atomic_inc(&dev->vma_count);
186 #if LINUX_VERSION_CODE < 0x020333
187 /* The map can exist after the fd is closed. */
188 MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
189 #endif
192 #if DRM_DEBUG_CODE
193 vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
194 if (vma_entry) {
195 down(&dev->struct_sem);
196 vma_entry->vma = vma;
197 vma_entry->next = dev->vmalist;
198 vma_entry->pid = current->pid;
199 dev->vmalist = vma_entry;
200 up(&dev->struct_sem);
202 #endif
205 void drm_vm_close(struct vm_area_struct *vma)
207 drm_file_t *priv = vma->vm_file->private_data;
208 drm_device_t *dev = priv->dev;
209 #if DRM_DEBUG_CODE
210 drm_vma_entry_t *pt, *prev;
211 #endif
213 DRM_DEBUG("0x%08lx,0x%08lx\n",
214 vma->vm_start, vma->vm_end - vma->vm_start);
215 #if LINUX_VERSION_CODE < 0x020333
216 MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
217 #endif
218 atomic_dec(&dev->vma_count);
220 #if DRM_DEBUG_CODE
221 down(&dev->struct_sem);
222 for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
223 if (pt->vma == vma) {
224 if (prev) {
225 prev->next = pt->next;
226 } else {
227 dev->vmalist = pt->next;
229 drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
230 break;
233 up(&dev->struct_sem);
234 #endif
237 int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
239 drm_file_t *priv = filp->private_data;
240 drm_device_t *dev;
241 drm_device_dma_t *dma;
242 unsigned long length = vma->vm_end - vma->vm_start;
244 lock_kernel();
245 dev = priv->dev;
246 dma = dev->dma;
247 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
248 vma->vm_start, vma->vm_end, VM_OFFSET(vma));
250 /* Length must match exact page count */
251 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
252 unlock_kernel();
253 return -EINVAL;
255 unlock_kernel();
257 vma->vm_ops = &drm_vm_dma_ops;
258 vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
260 #if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
261 /* In Linux 2.2.3 and above, this is
262 handled in do_mmap() in mm/mmap.c. */
263 ++filp->f_count;
264 #endif
265 vma->vm_file = filp; /* Needed for drm_vm_open() */
266 drm_vm_open(vma);
267 return 0;
270 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
272 drm_file_t *priv = filp->private_data;
273 drm_device_t *dev = priv->dev;
274 drm_map_t *map = NULL;
275 int i;
277 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
278 vma->vm_start, vma->vm_end, VM_OFFSET(vma));
280 if (!VM_OFFSET(vma)) return drm_mmap_dma(filp, vma);
282 /* A sequential search of a linked list is
283 fine here because: 1) there will only be
284 about 5-10 entries in the list and, 2) a
285 DRI client only has to do this mapping
286 once, so it doesn't have to be optimized
287 for performance, even if the list was a
288 bit longer. */
289 for (i = 0; i < dev->map_count; i++) {
290 map = dev->maplist[i];
291 if (map->offset == VM_OFFSET(vma)) break;
294 if (i >= dev->map_count) return -EINVAL;
295 if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
296 return -EPERM;
298 /* Check for valid size. */
299 if (map->size != vma->vm_end - vma->vm_start) return -EINVAL;
301 if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
302 vma->vm_flags &= VM_MAYWRITE;
303 #if defined(__i386__)
304 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
305 #else
306 /* Ye gads this is ugly. With more thought
307 we could move this up higher and use
308 `protection_map' instead. */
309 vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
310 __pte(pgprot_val(vma->vm_page_prot)))));
311 #endif
314 switch (map->type) {
315 case _DRM_FRAME_BUFFER:
316 case _DRM_REGISTERS:
317 case _DRM_AGP:
318 if (VM_OFFSET(vma) >= __pa(high_memory)) {
319 #if defined(__i386__)
320 if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
321 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
322 pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
324 #elif defined(__ia64__)
325 if (map->type != _DRM_AGP)
326 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
327 #endif
328 vma->vm_flags |= VM_IO; /* not in core dump */
330 if (remap_page_range(vma->vm_start,
331 VM_OFFSET(vma),
332 vma->vm_end - vma->vm_start,
333 vma->vm_page_prot))
334 return -EAGAIN;
335 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
336 " offset = 0x%lx\n",
337 map->type,
338 vma->vm_start, vma->vm_end, VM_OFFSET(vma));
339 vma->vm_ops = &drm_vm_ops;
340 break;
341 case _DRM_SHM:
342 if (map->flags & _DRM_CONTAINS_LOCK)
343 vma->vm_ops = &drm_vm_shm_lock_ops;
344 else {
345 vma->vm_ops = &drm_vm_shm_ops;
346 #if LINUX_VERSION_CODE >= 0x020300
347 vma->vm_private_data = (void *)map;
348 #else
349 vma->vm_pte = (unsigned long)map;
350 #endif
353 /* Don't let this area swap. Change when
354 DRM_KERNEL advisory is supported. */
355 vma->vm_flags |= VM_LOCKED;
356 break;
357 default:
358 return -EINVAL; /* This should never happen. */
360 vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
362 #if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
363 /* In Linux 2.2.3 and above, this is
364 handled in do_mmap() in mm/mmap.c. */
365 ++filp->f_count;
366 #endif
367 vma->vm_file = filp; /* Needed for drm_vm_open() */
368 drm_vm_open(vma);
369 return 0;