Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[wrt350n-kernel.git] / drivers / char / drm / drm_vm.c
blobe9cef80ae1c883e6c485d5e2a58929aa74241b7d
1 /**
2 * \file drm_vm.c
3 * Memory mapping for DRM
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
9 /*
10 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include "drmP.h"
37 #if defined(__ia64__)
38 #include <linux/efi.h>
39 #endif
41 static void drm_vm_open(struct vm_area_struct *vma);
42 static void drm_vm_close(struct vm_area_struct *vma);
44 static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
46 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
48 #if defined(__i386__) || defined(__x86_64__)
49 if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
50 pgprot_val(tmp) |= _PAGE_PCD;
51 pgprot_val(tmp) &= ~_PAGE_PWT;
53 #elif defined(__powerpc__)
54 pgprot_val(tmp) |= _PAGE_NO_CACHE;
55 if (map_type == _DRM_REGISTERS)
56 pgprot_val(tmp) |= _PAGE_GUARDED;
57 #endif
58 #if defined(__ia64__)
59 if (efi_range_is_wc(vma->vm_start, vma->vm_end -
60 vma->vm_start))
61 tmp = pgprot_writecombine(tmp);
62 else
63 tmp = pgprot_noncached(tmp);
64 #endif
65 return tmp;
68 /**
69 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
70 * \c nopage method for AGP virtual memory.
71 =======
72 * \c fault method for AGP virtual memory.
73 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
75 * \param vma virtual memory area.
76 * \param address access address.
77 * \return pointer to the page structure.
79 * Find the right map and if it's AGP memory find the real physical page to
80 * map, get the page, increment the use count and return it.
82 #if __OS_HAS_AGP
83 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
84 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
85 unsigned long address)
86 =======
87 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
88 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
90 struct drm_file *priv = vma->vm_file->private_data;
91 struct drm_device *dev = priv->head->dev;
92 struct drm_map *map = NULL;
93 struct drm_map_list *r_list;
94 struct drm_hash_item *hash;
97 * Find the right map
99 if (!drm_core_has_AGP(dev))
100 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
101 goto vm_nopage_error;
102 =======
103 goto vm_fault_error;
104 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
106 if (!dev->agp || !dev->agp->cant_use_aperture)
107 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
108 goto vm_nopage_error;
109 =======
110 goto vm_fault_error;
111 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
113 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
114 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
115 goto vm_nopage_error;
116 =======
117 goto vm_fault_error;
118 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
120 r_list = drm_hash_entry(hash, struct drm_map_list, hash);
121 map = r_list->map;
123 if (map && map->type == _DRM_AGP) {
124 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
125 unsigned long offset = address - vma->vm_start;
126 =======
128 * Using vm_pgoff as a selector forces us to use this unusual
129 * addressing scheme.
131 unsigned long offset = (unsigned long)vmf->virtual_address -
132 vma->vm_start;
133 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
134 unsigned long baddr = map->offset + offset;
135 struct drm_agp_mem *agpmem;
136 struct page *page;
138 #ifdef __alpha__
140 * Adjust to a bus-relative address
142 baddr -= dev->hose->mem_space->start;
143 #endif
146 * It's AGP memory - find the real physical page to map
148 list_for_each_entry(agpmem, &dev->agp->memory, head) {
149 if (agpmem->bound <= baddr &&
150 agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
151 break;
154 if (!agpmem)
155 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
156 goto vm_nopage_error;
157 =======
158 goto vm_fault_error;
159 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
162 * Get the page, inc the use count, and return it
164 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
165 page = virt_to_page(__va(agpmem->memory->memory[offset]));
166 get_page(page);
167 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
168 =======
169 vmf->page = page;
170 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
172 DRM_DEBUG
173 ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
174 baddr, __va(agpmem->memory->memory[offset]), offset,
175 page_count(page));
176 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
178 return page;
179 =======
180 return 0;
181 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
183 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
184 vm_nopage_error:
185 return NOPAGE_SIGBUS; /* Disallow mremap */
186 =======
187 vm_fault_error:
188 return VM_FAULT_SIGBUS; /* Disallow mremap */
189 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
191 #else /* __OS_HAS_AGP */
192 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
193 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
194 unsigned long address)
195 =======
196 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
197 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
199 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
200 return NOPAGE_SIGBUS;
201 =======
202 return VM_FAULT_SIGBUS;
203 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
205 #endif /* __OS_HAS_AGP */
208 * \c nopage method for shared virtual memory.
210 * \param vma virtual memory area.
211 * \param address access address.
212 * \return pointer to the page structure.
214 * Get the mapping, find the real physical page to map, get the page, and
215 * return it.
217 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
218 static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
219 unsigned long address)
220 =======
221 static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
222 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
224 struct drm_map *map = (struct drm_map *) vma->vm_private_data;
225 unsigned long offset;
226 unsigned long i;
227 struct page *page;
229 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
230 if (address > vma->vm_end)
231 return NOPAGE_SIGBUS; /* Disallow mremap */
232 =======
233 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
234 if (!map)
235 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
236 return NOPAGE_SIGBUS; /* Nothing allocated */
237 =======
238 return VM_FAULT_SIGBUS; /* Nothing allocated */
239 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
241 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
242 offset = address - vma->vm_start;
243 =======
244 offset = (unsigned long)vmf->virtual_address - vma->vm_start;
245 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
246 i = (unsigned long)map->handle + offset;
247 page = vmalloc_to_page((void *)i);
248 if (!page)
249 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
250 return NOPAGE_SIGBUS;
251 =======
252 return VM_FAULT_SIGBUS;
253 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
254 get_page(page);
255 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
256 =======
257 vmf->page = page;
258 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
260 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
261 DRM_DEBUG("0x%lx\n", address);
262 return page;
263 =======
264 DRM_DEBUG("shm_fault 0x%lx\n", offset);
265 return 0;
266 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
270 * \c close method for shared virtual memory.
272 * \param vma virtual memory area.
274 * Deletes map information if we are the last
275 * person to close a mapping and it's not in the global maplist.
277 static void drm_vm_shm_close(struct vm_area_struct *vma)
279 struct drm_file *priv = vma->vm_file->private_data;
280 struct drm_device *dev = priv->head->dev;
281 struct drm_vma_entry *pt, *temp;
282 struct drm_map *map;
283 struct drm_map_list *r_list;
284 int found_maps = 0;
286 DRM_DEBUG("0x%08lx,0x%08lx\n",
287 vma->vm_start, vma->vm_end - vma->vm_start);
288 atomic_dec(&dev->vma_count);
290 map = vma->vm_private_data;
292 mutex_lock(&dev->struct_mutex);
293 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
294 if (pt->vma->vm_private_data == map)
295 found_maps++;
296 if (pt->vma == vma) {
297 list_del(&pt->head);
298 drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
302 /* We were the only map that was found */
303 if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
304 /* Check to see if we are in the maplist, if we are not, then
305 * we delete this mappings information.
307 found_maps = 0;
308 list_for_each_entry(r_list, &dev->maplist, head) {
309 if (r_list->map == map)
310 found_maps++;
313 if (!found_maps) {
314 drm_dma_handle_t dmah;
316 switch (map->type) {
317 case _DRM_REGISTERS:
318 case _DRM_FRAME_BUFFER:
319 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
320 int retcode;
321 retcode = mtrr_del(map->mtrr,
322 map->offset,
323 map->size);
324 DRM_DEBUG("mtrr_del = %d\n", retcode);
326 iounmap(map->handle);
327 break;
328 case _DRM_SHM:
329 vfree(map->handle);
330 break;
331 case _DRM_AGP:
332 case _DRM_SCATTER_GATHER:
333 break;
334 case _DRM_CONSISTENT:
335 dmah.vaddr = map->handle;
336 dmah.busaddr = map->offset;
337 dmah.size = map->size;
338 __drm_pci_free(dev, &dmah);
339 break;
341 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
344 mutex_unlock(&dev->struct_mutex);
348 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
349 * \c nopage method for DMA virtual memory.
350 =======
351 * \c fault method for DMA virtual memory.
352 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
354 * \param vma virtual memory area.
355 * \param address access address.
356 * \return pointer to the page structure.
358 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
360 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
361 static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
362 unsigned long address)
363 =======
364 static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
365 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
367 struct drm_file *priv = vma->vm_file->private_data;
368 struct drm_device *dev = priv->head->dev;
369 struct drm_device_dma *dma = dev->dma;
370 unsigned long offset;
371 unsigned long page_nr;
372 struct page *page;
374 if (!dma)
375 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
376 return NOPAGE_SIGBUS; /* Error */
377 if (address > vma->vm_end)
378 return NOPAGE_SIGBUS; /* Disallow mremap */
379 =======
380 return VM_FAULT_SIGBUS; /* Error */
381 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
382 if (!dma->pagelist)
383 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
384 return NOPAGE_SIGBUS; /* Nothing allocated */
385 =======
386 return VM_FAULT_SIGBUS; /* Nothing allocated */
387 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
389 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
390 offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
391 page_nr = offset >> PAGE_SHIFT;
392 =======
393 offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
394 page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
395 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
396 page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
398 get_page(page);
399 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
400 =======
401 vmf->page = page;
402 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
404 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
405 DRM_DEBUG("0x%lx (page %lu)\n", address, page_nr);
406 return page;
407 =======
408 DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
409 return 0;
410 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
414 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
415 * \c nopage method for scatter-gather virtual memory.
416 =======
417 * \c fault method for scatter-gather virtual memory.
418 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
420 * \param vma virtual memory area.
421 * \param address access address.
422 * \return pointer to the page structure.
424 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
426 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
427 static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
428 unsigned long address)
429 =======
430 static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
431 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
433 struct drm_map *map = (struct drm_map *) vma->vm_private_data;
434 struct drm_file *priv = vma->vm_file->private_data;
435 struct drm_device *dev = priv->head->dev;
436 struct drm_sg_mem *entry = dev->sg;
437 unsigned long offset;
438 unsigned long map_offset;
439 unsigned long page_offset;
440 struct page *page;
442 if (!entry)
443 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
444 return NOPAGE_SIGBUS; /* Error */
445 if (address > vma->vm_end)
446 return NOPAGE_SIGBUS; /* Disallow mremap */
447 =======
448 return VM_FAULT_SIGBUS; /* Error */
449 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
450 if (!entry->pagelist)
451 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
452 return NOPAGE_SIGBUS; /* Nothing allocated */
453 =======
454 return VM_FAULT_SIGBUS; /* Nothing allocated */
455 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
457 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
458 offset = address - vma->vm_start;
459 =======
460 offset = (unsigned long)vmf->virtual_address - vma->vm_start;
461 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
462 map_offset = map->offset - (unsigned long)dev->sg->virtual;
463 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
464 page = entry->pagelist[page_offset];
465 get_page(page);
466 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
467 =======
468 vmf->page = page;
469 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
471 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
472 return page;
473 =======
474 return 0;
475 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
478 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
479 static struct page *drm_vm_nopage(struct vm_area_struct *vma,
480 unsigned long address, int *type)
481 =======
482 static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
483 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
485 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
486 if (type)
487 *type = VM_FAULT_MINOR;
488 return drm_do_vm_nopage(vma, address);
489 =======
490 return drm_do_vm_fault(vma, vmf);
491 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
494 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
495 static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
496 unsigned long address, int *type)
497 =======
498 static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
499 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
501 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
502 if (type)
503 *type = VM_FAULT_MINOR;
504 return drm_do_vm_shm_nopage(vma, address);
505 =======
506 return drm_do_vm_shm_fault(vma, vmf);
507 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
510 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
511 static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
512 unsigned long address, int *type)
513 =======
514 static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
515 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
517 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
518 if (type)
519 *type = VM_FAULT_MINOR;
520 return drm_do_vm_dma_nopage(vma, address);
521 =======
522 return drm_do_vm_dma_fault(vma, vmf);
523 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
526 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
527 static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
528 unsigned long address, int *type)
529 =======
530 static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
531 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
533 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
534 if (type)
535 *type = VM_FAULT_MINOR;
536 return drm_do_vm_sg_nopage(vma, address);
537 =======
538 return drm_do_vm_sg_fault(vma, vmf);
539 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
542 /** AGP virtual memory operations */
543 static struct vm_operations_struct drm_vm_ops = {
544 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
545 .nopage = drm_vm_nopage,
546 =======
547 .fault = drm_vm_fault,
548 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
549 .open = drm_vm_open,
550 .close = drm_vm_close,
553 /** Shared virtual memory operations */
554 static struct vm_operations_struct drm_vm_shm_ops = {
555 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
556 .nopage = drm_vm_shm_nopage,
557 =======
558 .fault = drm_vm_shm_fault,
559 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
560 .open = drm_vm_open,
561 .close = drm_vm_shm_close,
564 /** DMA virtual memory operations */
565 static struct vm_operations_struct drm_vm_dma_ops = {
566 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
567 .nopage = drm_vm_dma_nopage,
568 =======
569 .fault = drm_vm_dma_fault,
570 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
571 .open = drm_vm_open,
572 .close = drm_vm_close,
575 /** Scatter-gather virtual memory operations */
576 static struct vm_operations_struct drm_vm_sg_ops = {
577 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
578 .nopage = drm_vm_sg_nopage,
579 =======
580 .fault = drm_vm_sg_fault,
581 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
582 .open = drm_vm_open,
583 .close = drm_vm_close,
587 * \c open method for shared virtual memory.
589 * \param vma virtual memory area.
591 * Create a new drm_vma_entry structure as the \p vma private data entry and
592 * add it to drm_device::vmalist.
594 static void drm_vm_open_locked(struct vm_area_struct *vma)
596 struct drm_file *priv = vma->vm_file->private_data;
597 struct drm_device *dev = priv->head->dev;
598 struct drm_vma_entry *vma_entry;
600 DRM_DEBUG("0x%08lx,0x%08lx\n",
601 vma->vm_start, vma->vm_end - vma->vm_start);
602 atomic_inc(&dev->vma_count);
604 vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
605 if (vma_entry) {
606 vma_entry->vma = vma;
607 vma_entry->pid = current->pid;
608 list_add(&vma_entry->head, &dev->vmalist);
612 static void drm_vm_open(struct vm_area_struct *vma)
614 struct drm_file *priv = vma->vm_file->private_data;
615 struct drm_device *dev = priv->head->dev;
617 mutex_lock(&dev->struct_mutex);
618 drm_vm_open_locked(vma);
619 mutex_unlock(&dev->struct_mutex);
623 * \c close method for all virtual memory types.
625 * \param vma virtual memory area.
627 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
628 * free it.
630 static void drm_vm_close(struct vm_area_struct *vma)
632 struct drm_file *priv = vma->vm_file->private_data;
633 struct drm_device *dev = priv->head->dev;
634 struct drm_vma_entry *pt, *temp;
636 DRM_DEBUG("0x%08lx,0x%08lx\n",
637 vma->vm_start, vma->vm_end - vma->vm_start);
638 atomic_dec(&dev->vma_count);
640 mutex_lock(&dev->struct_mutex);
641 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
642 if (pt->vma == vma) {
643 list_del(&pt->head);
644 drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
645 break;
648 mutex_unlock(&dev->struct_mutex);
652 * mmap DMA memory.
654 * \param file_priv DRM file private.
655 * \param vma virtual memory area.
656 * \return zero on success or a negative number on failure.
658 * Sets the virtual memory area operations structure to vm_dma_ops, the file
659 * pointer, and calls vm_open().
661 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
663 struct drm_file *priv = filp->private_data;
664 struct drm_device *dev;
665 struct drm_device_dma *dma;
666 unsigned long length = vma->vm_end - vma->vm_start;
668 dev = priv->head->dev;
669 dma = dev->dma;
670 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
671 vma->vm_start, vma->vm_end, vma->vm_pgoff);
673 /* Length must match exact page count */
674 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
675 return -EINVAL;
678 if (!capable(CAP_SYS_ADMIN) &&
679 (dma->flags & _DRM_DMA_USE_PCI_RO)) {
680 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
681 #if defined(__i386__) || defined(__x86_64__)
682 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
683 #else
684 /* Ye gads this is ugly. With more thought
685 we could move this up higher and use
686 `protection_map' instead. */
687 vma->vm_page_prot =
688 __pgprot(pte_val
689 (pte_wrprotect
690 (__pte(pgprot_val(vma->vm_page_prot)))));
691 #endif
694 vma->vm_ops = &drm_vm_dma_ops;
696 vma->vm_flags |= VM_RESERVED; /* Don't swap */
697 vma->vm_flags |= VM_DONTEXPAND;
699 vma->vm_file = filp; /* Needed for drm_vm_open() */
700 drm_vm_open_locked(vma);
701 return 0;
704 unsigned long drm_core_get_map_ofs(struct drm_map * map)
706 return map->offset;
709 EXPORT_SYMBOL(drm_core_get_map_ofs);
711 unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
713 #ifdef __alpha__
714 return dev->hose->dense_mem_base - dev->hose->mem_space->start;
715 #else
716 return 0;
717 #endif
720 EXPORT_SYMBOL(drm_core_get_reg_ofs);
723 * mmap DMA memory.
725 * \param file_priv DRM file private.
726 * \param vma virtual memory area.
727 * \return zero on success or a negative number on failure.
729 * If the virtual memory area has no offset associated with it then it's a DMA
730 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
731 * checks that the restricted flag is not set, sets the virtual memory operations
732 * according to the mapping type and remaps the pages. Finally sets the file
733 * pointer and calls vm_open().
735 static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
737 struct drm_file *priv = filp->private_data;
738 struct drm_device *dev = priv->head->dev;
739 struct drm_map *map = NULL;
740 unsigned long offset = 0;
741 struct drm_hash_item *hash;
743 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
744 vma->vm_start, vma->vm_end, vma->vm_pgoff);
746 if (!priv->authenticated)
747 return -EACCES;
749 /* We check for "dma". On Apple's UniNorth, it's valid to have
750 * the AGP mapped at physical address 0
751 * --BenH.
753 if (!vma->vm_pgoff
754 #if __OS_HAS_AGP
755 && (!dev->agp
756 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
757 #endif
759 return drm_mmap_dma(filp, vma);
761 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
762 DRM_ERROR("Could not find map\n");
763 return -EINVAL;
766 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
767 if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
768 return -EPERM;
770 /* Check for valid size. */
771 if (map->size < vma->vm_end - vma->vm_start)
772 return -EINVAL;
774 if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
775 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
776 #if defined(__i386__) || defined(__x86_64__)
777 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
778 #else
779 /* Ye gads this is ugly. With more thought
780 we could move this up higher and use
781 `protection_map' instead. */
782 vma->vm_page_prot =
783 __pgprot(pte_val
784 (pte_wrprotect
785 (__pte(pgprot_val(vma->vm_page_prot)))));
786 #endif
789 switch (map->type) {
790 case _DRM_AGP:
791 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
793 * On some platforms we can't talk to bus dma address from the CPU, so for
794 * memory of type DRM_AGP, we'll deal with sorting out the real physical
795 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
796 * pages and mappings in nopage()
797 =======
798 * pages and mappings in fault()
799 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
801 #if defined(__powerpc__)
802 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
803 #endif
804 vma->vm_ops = &drm_vm_ops;
805 break;
807 /* fall through to _DRM_FRAME_BUFFER... */
808 case _DRM_FRAME_BUFFER:
809 case _DRM_REGISTERS:
810 offset = dev->driver->get_reg_ofs(dev);
811 vma->vm_flags |= VM_IO; /* not in core dump */
812 vma->vm_page_prot = drm_io_prot(map->type, vma);
813 #ifdef __sparc__
814 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
815 #endif
816 if (io_remap_pfn_range(vma, vma->vm_start,
817 (map->offset + offset) >> PAGE_SHIFT,
818 vma->vm_end - vma->vm_start,
819 vma->vm_page_prot))
820 return -EAGAIN;
821 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
822 " offset = 0x%lx\n",
823 map->type,
824 vma->vm_start, vma->vm_end, map->offset + offset);
825 vma->vm_ops = &drm_vm_ops;
826 break;
827 case _DRM_CONSISTENT:
828 /* Consistent memory is really like shared memory. But
829 <<<<<<< HEAD:drivers/char/drm/drm_vm.c
830 * it's allocated in a different way, so avoid nopage */
831 =======
832 * it's allocated in a different way, so avoid fault */
833 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/char/drm/drm_vm.c
834 if (remap_pfn_range(vma, vma->vm_start,
835 page_to_pfn(virt_to_page(map->handle)),
836 vma->vm_end - vma->vm_start, vma->vm_page_prot))
837 return -EAGAIN;
838 /* fall through to _DRM_SHM */
839 case _DRM_SHM:
840 vma->vm_ops = &drm_vm_shm_ops;
841 vma->vm_private_data = (void *)map;
842 /* Don't let this area swap. Change when
843 DRM_KERNEL advisory is supported. */
844 vma->vm_flags |= VM_RESERVED;
845 break;
846 case _DRM_SCATTER_GATHER:
847 vma->vm_ops = &drm_vm_sg_ops;
848 vma->vm_private_data = (void *)map;
849 vma->vm_flags |= VM_RESERVED;
850 break;
851 default:
852 return -EINVAL; /* This should never happen. */
854 vma->vm_flags |= VM_RESERVED; /* Don't swap */
855 vma->vm_flags |= VM_DONTEXPAND;
857 vma->vm_file = filp; /* Needed for drm_vm_open() */
858 drm_vm_open_locked(vma);
859 return 0;
862 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
864 struct drm_file *priv = filp->private_data;
865 struct drm_device *dev = priv->head->dev;
866 int ret;
868 mutex_lock(&dev->struct_mutex);
869 ret = drm_mmap_locked(filp, vma);
870 mutex_unlock(&dev->struct_mutex);
872 return ret;
874 EXPORT_SYMBOL(drm_mmap);