Merge with 2.3.99-pre1.
[linux-2.6/linux-mips.git] / drivers / char / drm / vm.c
blobb4c4c5bbf0a26ead6ad1c0c9291fee7e1ece3e3a
1 /* vm.c -- Memory mapping for DRM -*- linux-c -*-
2 * Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
26 * Authors:
27 * Rickard E. (Rik) Faith <faith@precisioninsight.com>
31 #define __NO_VERSION__
32 #include "drmP.h"
34 struct vm_operations_struct drm_vm_ops = {
35 nopage: drm_vm_nopage,
36 open: drm_vm_open,
37 close: drm_vm_close,
40 struct vm_operations_struct drm_vm_shm_ops = {
41 nopage: drm_vm_shm_nopage,
42 open: drm_vm_open,
43 close: drm_vm_close,
46 struct vm_operations_struct drm_vm_dma_ops = {
47 nopage: drm_vm_dma_nopage,
48 open: drm_vm_open,
49 close: drm_vm_close,
52 #if LINUX_VERSION_CODE < 0x020317
53 unsigned long drm_vm_nopage(struct vm_area_struct *vma,
54 unsigned long address,
55 int write_access)
56 #else
57 /* Return type changed in 2.3.23 */
58 struct page *drm_vm_nopage(struct vm_area_struct *vma,
59 unsigned long address,
60 int write_access)
61 #endif
63 DRM_DEBUG("0x%08lx, %d\n", address, write_access);
65 return NOPAGE_SIGBUS; /* Disallow mremap */
68 #if LINUX_VERSION_CODE < 0x020317
69 unsigned long drm_vm_shm_nopage(struct vm_area_struct *vma,
70 unsigned long address,
71 int write_access)
72 #else
73 /* Return type changed in 2.3.23 */
74 struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
75 unsigned long address,
76 int write_access)
77 #endif
79 drm_file_t *priv = vma->vm_file->private_data;
80 drm_device_t *dev = priv->dev;
81 unsigned long physical;
82 unsigned long offset;
83 unsigned long page;
85 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
86 if (!dev->lock.hw_lock) return NOPAGE_OOM; /* Nothing allocated */
88 offset = address - vma->vm_start;
89 page = offset >> PAGE_SHIFT;
90 physical = (unsigned long)dev->lock.hw_lock + (offset & (~PAGE_MASK));
91 atomic_inc(&mem_map[MAP_NR(physical)].count); /* Dec. by kernel */
93 DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical);
94 #if LINUX_VERSION_CODE < 0x020317
95 return physical;
96 #else
97 return mem_map + MAP_NR(physical);
98 #endif
101 #if LINUX_VERSION_CODE < 0x020317
102 unsigned long drm_vm_dma_nopage(struct vm_area_struct *vma,
103 unsigned long address,
104 int write_access)
105 #else
106 /* Return type changed in 2.3.23 */
107 struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
108 unsigned long address,
109 int write_access)
110 #endif
112 drm_file_t *priv = vma->vm_file->private_data;
113 drm_device_t *dev = priv->dev;
114 drm_device_dma_t *dma = dev->dma;
115 unsigned long physical;
116 unsigned long offset;
117 unsigned long page;
119 if (!dma) return NOPAGE_SIGBUS; /* Error */
120 if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
121 if (!dma->pagelist) return NOPAGE_OOM ; /* Nothing allocated */
123 offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
124 page = offset >> PAGE_SHIFT;
125 physical = dma->pagelist[page] + (offset & (~PAGE_MASK));
126 atomic_inc(&mem_map[MAP_NR(physical)].count); /* Dec. by kernel */
128 DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address, page, physical);
129 #if LINUX_VERSION_CODE < 0x020317
130 return physical;
131 #else
132 return mem_map + MAP_NR(physical);
133 #endif
136 void drm_vm_open(struct vm_area_struct *vma)
138 drm_file_t *priv = vma->vm_file->private_data;
139 drm_device_t *dev = priv->dev;
140 #if DRM_DEBUG_CODE
141 drm_vma_entry_t *vma_entry;
142 #endif
144 DRM_DEBUG("0x%08lx,0x%08lx\n",
145 vma->vm_start, vma->vm_end - vma->vm_start);
146 atomic_inc(&dev->vma_count);
147 MOD_INC_USE_COUNT;
149 #if DRM_DEBUG_CODE
150 vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
151 if (vma_entry) {
152 down(&dev->struct_sem);
153 vma_entry->vma = vma;
154 vma_entry->next = dev->vmalist;
155 vma_entry->pid = current->pid;
156 dev->vmalist = vma_entry;
157 up(&dev->struct_sem);
159 #endif
162 void drm_vm_close(struct vm_area_struct *vma)
164 drm_file_t *priv = vma->vm_file->private_data;
165 drm_device_t *dev = priv->dev;
166 #if DRM_DEBUG_CODE
167 drm_vma_entry_t *pt, *prev;
168 #endif
170 DRM_DEBUG("0x%08lx,0x%08lx\n",
171 vma->vm_start, vma->vm_end - vma->vm_start);
172 MOD_DEC_USE_COUNT;
173 atomic_dec(&dev->vma_count);
175 #if DRM_DEBUG_CODE
176 down(&dev->struct_sem);
177 for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
178 if (pt->vma == vma) {
179 if (prev) {
180 prev->next = pt->next;
181 } else {
182 dev->vmalist = pt->next;
184 drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
185 break;
188 up(&dev->struct_sem);
189 #endif
192 int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
194 drm_file_t *priv = filp->private_data;
195 drm_device_t *dev = priv->dev;
196 drm_device_dma_t *dma = dev->dma;
197 unsigned long length = vma->vm_end - vma->vm_start;
199 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
200 vma->vm_start, vma->vm_end, VM_OFFSET(vma));
202 /* Length must match exact page count */
203 if ((length >> PAGE_SHIFT) != dma->page_count) return -EINVAL;
205 vma->vm_ops = &drm_vm_dma_ops;
206 vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
208 #if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
209 /* In Linux 2.2.3 and above, this is
210 handled in do_mmap() in mm/mmap.c. */
211 ++filp->f_count;
212 #endif
213 vma->vm_file = filp; /* Needed for drm_vm_open() */
214 drm_vm_open(vma);
215 return 0;
218 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
220 drm_file_t *priv = filp->private_data;
221 drm_device_t *dev = priv->dev;
222 drm_map_t *map = NULL;
223 int i;
225 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
226 vma->vm_start, vma->vm_end, VM_OFFSET(vma));
228 if (!VM_OFFSET(vma)) return drm_mmap_dma(filp, vma);
230 /* A sequential search of a linked list is
231 fine here because: 1) there will only be
232 about 5-10 entries in the list and, 2) a
233 DRI client only has to do this mapping
234 once, so it doesn't have to be optimized
235 for performance, even if the list was a
236 bit longer. */
237 for (i = 0; i < dev->map_count; i++) {
238 map = dev->maplist[i];
239 if (map->offset == VM_OFFSET(vma)) break;
242 if (i >= dev->map_count) return -EINVAL;
243 if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
244 return -EPERM;
246 /* Check for valid size. */
247 if (map->size != vma->vm_end - vma->vm_start) return -EINVAL;
250 switch (map->type) {
251 case _DRM_FRAME_BUFFER:
252 case _DRM_REGISTERS:
253 if (VM_OFFSET(vma) >= __pa(high_memory)) {
254 #if defined(__i386__)
255 if (boot_cpu_data.x86 > 3) {
256 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
257 pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
259 #endif
260 vma->vm_flags |= VM_IO; /* not in core dump */
262 if (remap_page_range(vma->vm_start,
263 VM_OFFSET(vma),
264 vma->vm_end - vma->vm_start,
265 vma->vm_page_prot))
266 return -EAGAIN;
267 vma->vm_ops = &drm_vm_ops;
268 break;
269 case _DRM_SHM:
270 vma->vm_ops = &drm_vm_shm_ops;
271 /* Don't let this area swap. Change when
272 DRM_KERNEL advisory is supported. */
273 vma->vm_flags |= VM_LOCKED;
274 break;
275 default:
276 return -EINVAL; /* This should never happen. */
278 vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
279 if (map->flags & _DRM_READ_ONLY) {
280 #if defined(__i386__)
281 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
282 #else
283 /* Ye gads this is ugly. With more thought
284 we could move this up higher and use
285 `protection_map' instead. */
286 vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
287 __pte(pgprot_val(vma->vm_page_prot)))));
288 #endif
292 #if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
293 /* In Linux 2.2.3 and above, this is
294 handled in do_mmap() in mm/mmap.c. */
295 ++filp->f_count;
296 #endif
297 vma->vm_file = filp; /* Needed for drm_vm_open() */
298 drm_vm_open(vma);
299 return 0;