1 /* vm.c -- Memory mapping for DRM -*- linux-c -*-
2 * Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 * Rickard E. (Rik) Faith <faith@valinux.com>
32 #define __NO_VERSION__
35 struct vm_operations_struct drm_vm_ops
= {
36 nopage
: drm_vm_nopage
,
41 struct vm_operations_struct drm_vm_shm_ops
= {
42 nopage
: drm_vm_shm_nopage
,
47 struct vm_operations_struct drm_vm_shm_lock_ops
= {
48 nopage
: drm_vm_shm_nopage_lock
,
53 struct vm_operations_struct drm_vm_dma_ops
= {
54 nopage
: drm_vm_dma_nopage
,
59 #if LINUX_VERSION_CODE < 0x020317
60 unsigned long drm_vm_nopage(struct vm_area_struct
*vma
,
61 unsigned long address
,
64 /* Return type changed in 2.3.23 */
65 struct page
*drm_vm_nopage(struct vm_area_struct
*vma
,
66 unsigned long address
,
70 DRM_DEBUG("0x%08lx, %d\n", address
, write_access
);
72 return NOPAGE_SIGBUS
; /* Disallow mremap */
75 #if LINUX_VERSION_CODE < 0x020317
76 unsigned long drm_vm_shm_nopage(struct vm_area_struct
*vma
,
77 unsigned long address
,
80 /* Return type changed in 2.3.23 */
81 struct page
*drm_vm_shm_nopage(struct vm_area_struct
*vma
,
82 unsigned long address
,
86 #if LINUX_VERSION_CODE >= 0x020300
87 drm_map_t
*map
= (drm_map_t
*)vma
->vm_private_data
;
89 drm_map_t
*map
= (drm_map_t
*)vma
->vm_pte
;
91 unsigned long physical
;
94 if (address
> vma
->vm_end
) return NOPAGE_SIGBUS
; /* Disallow mremap */
95 if (!map
) return NOPAGE_OOM
; /* Nothing allocated */
97 offset
= address
- vma
->vm_start
;
98 physical
= (unsigned long)map
->handle
+ offset
;
99 atomic_inc(&virt_to_page(physical
)->count
); /* Dec. by kernel */
101 DRM_DEBUG("0x%08lx => 0x%08lx\n", address
, physical
);
102 #if LINUX_VERSION_CODE < 0x020317
105 return virt_to_page(physical
);
109 #if LINUX_VERSION_CODE < 0x020317
110 unsigned long drm_vm_shm_nopage_lock(struct vm_area_struct
*vma
,
111 unsigned long address
,
114 /* Return type changed in 2.3.23 */
115 struct page
*drm_vm_shm_nopage_lock(struct vm_area_struct
*vma
,
116 unsigned long address
,
120 drm_file_t
*priv
= vma
->vm_file
->private_data
;
121 drm_device_t
*dev
= priv
->dev
;
122 unsigned long physical
;
123 unsigned long offset
;
126 if (address
> vma
->vm_end
) return NOPAGE_SIGBUS
; /* Disallow mremap */
127 if (!dev
->lock
.hw_lock
) return NOPAGE_OOM
; /* Nothing allocated */
129 offset
= address
- vma
->vm_start
;
130 page
= offset
>> PAGE_SHIFT
;
131 physical
= (unsigned long)dev
->lock
.hw_lock
+ offset
;
132 atomic_inc(&virt_to_page(physical
)->count
); /* Dec. by kernel */
134 DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address
, page
, physical
);
135 #if LINUX_VERSION_CODE < 0x020317
138 return virt_to_page(physical
);
142 #if LINUX_VERSION_CODE < 0x020317
143 unsigned long drm_vm_dma_nopage(struct vm_area_struct
*vma
,
144 unsigned long address
,
147 /* Return type changed in 2.3.23 */
148 struct page
*drm_vm_dma_nopage(struct vm_area_struct
*vma
,
149 unsigned long address
,
153 drm_file_t
*priv
= vma
->vm_file
->private_data
;
154 drm_device_t
*dev
= priv
->dev
;
155 drm_device_dma_t
*dma
= dev
->dma
;
156 unsigned long physical
;
157 unsigned long offset
;
160 if (!dma
) return NOPAGE_SIGBUS
; /* Error */
161 if (address
> vma
->vm_end
) return NOPAGE_SIGBUS
; /* Disallow mremap */
162 if (!dma
->pagelist
) return NOPAGE_OOM
; /* Nothing allocated */
164 offset
= address
- vma
->vm_start
; /* vm_[pg]off[set] should be 0 */
165 page
= offset
>> PAGE_SHIFT
;
166 physical
= dma
->pagelist
[page
] + (offset
& (~PAGE_MASK
));
167 atomic_inc(&virt_to_page(physical
)->count
); /* Dec. by kernel */
169 DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address
, page
, physical
);
170 #if LINUX_VERSION_CODE < 0x020317
173 return virt_to_page(physical
);
177 void drm_vm_open(struct vm_area_struct
*vma
)
179 drm_file_t
*priv
= vma
->vm_file
->private_data
;
180 drm_device_t
*dev
= priv
->dev
;
182 drm_vma_entry_t
*vma_entry
;
185 DRM_DEBUG("0x%08lx,0x%08lx\n",
186 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
187 atomic_inc(&dev
->vma_count
);
188 #if LINUX_VERSION_CODE < 0x020333
189 /* The map can exist after the fd is closed. */
190 MOD_INC_USE_COUNT
; /* Needed before Linux 2.3.51 */
195 vma_entry
= drm_alloc(sizeof(*vma_entry
), DRM_MEM_VMAS
);
197 down(&dev
->struct_sem
);
198 vma_entry
->vma
= vma
;
199 vma_entry
->next
= dev
->vmalist
;
200 vma_entry
->pid
= current
->pid
;
201 dev
->vmalist
= vma_entry
;
202 up(&dev
->struct_sem
);
207 void drm_vm_close(struct vm_area_struct
*vma
)
209 drm_file_t
*priv
= vma
->vm_file
->private_data
;
210 drm_device_t
*dev
= priv
->dev
;
212 drm_vma_entry_t
*pt
, *prev
;
215 DRM_DEBUG("0x%08lx,0x%08lx\n",
216 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
217 #if LINUX_VERSION_CODE < 0x020333
218 MOD_DEC_USE_COUNT
; /* Needed before Linux 2.3.51 */
220 atomic_dec(&dev
->vma_count
);
223 down(&dev
->struct_sem
);
224 for (pt
= dev
->vmalist
, prev
= NULL
; pt
; prev
= pt
, pt
= pt
->next
) {
225 if (pt
->vma
== vma
) {
227 prev
->next
= pt
->next
;
229 dev
->vmalist
= pt
->next
;
231 drm_free(pt
, sizeof(*pt
), DRM_MEM_VMAS
);
235 up(&dev
->struct_sem
);
239 int drm_mmap_dma(struct file
*filp
, struct vm_area_struct
*vma
)
241 drm_file_t
*priv
= filp
->private_data
;
243 drm_device_dma_t
*dma
;
244 unsigned long length
= vma
->vm_end
- vma
->vm_start
;
249 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
250 vma
->vm_start
, vma
->vm_end
, VM_OFFSET(vma
));
252 /* Length must match exact page count */
253 if ((length
>> PAGE_SHIFT
) != dma
->page_count
) {
259 vma
->vm_ops
= &drm_vm_dma_ops
;
260 vma
->vm_flags
|= VM_LOCKED
| VM_SHM
; /* Don't swap */
262 #if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
263 /* In Linux 2.2.3 and above, this is
264 handled in do_mmap() in mm/mmap.c. */
267 vma
->vm_file
= filp
; /* Needed for drm_vm_open() */
272 int drm_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
274 drm_file_t
*priv
= filp
->private_data
;
275 drm_device_t
*dev
= priv
->dev
;
276 drm_map_t
*map
= NULL
;
279 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
280 vma
->vm_start
, vma
->vm_end
, VM_OFFSET(vma
));
282 if (!VM_OFFSET(vma
)) return drm_mmap_dma(filp
, vma
);
284 /* A sequential search of a linked list is
285 fine here because: 1) there will only be
286 about 5-10 entries in the list and, 2) a
287 DRI client only has to do this mapping
288 once, so it doesn't have to be optimized
289 for performance, even if the list was a
291 for (i
= 0; i
< dev
->map_count
; i
++) {
292 map
= dev
->maplist
[i
];
293 if (map
->offset
== VM_OFFSET(vma
)) break;
296 if (i
>= dev
->map_count
) return -EINVAL
;
297 if (!map
|| ((map
->flags
&_DRM_RESTRICTED
) && !capable(CAP_SYS_ADMIN
)))
300 /* Check for valid size. */
301 if (map
->size
!= vma
->vm_end
- vma
->vm_start
) return -EINVAL
;
303 if (!capable(CAP_SYS_ADMIN
) && (map
->flags
& _DRM_READ_ONLY
)) {
304 vma
->vm_flags
&= VM_MAYWRITE
;
305 #if defined(__i386__)
306 pgprot_val(vma
->vm_page_prot
) &= ~_PAGE_RW
;
308 /* Ye gads this is ugly. With more thought
309 we could move this up higher and use
310 `protection_map' instead. */
311 vma
->vm_page_prot
= __pgprot(pte_val(pte_wrprotect(
312 __pte(pgprot_val(vma
->vm_page_prot
)))));
317 case _DRM_FRAME_BUFFER
:
320 if (VM_OFFSET(vma
) >= __pa(high_memory
)) {
321 #if defined(__i386__)
322 if (boot_cpu_data
.x86
> 3 && map
->type
!= _DRM_AGP
) {
323 pgprot_val(vma
->vm_page_prot
) |= _PAGE_PCD
;
324 pgprot_val(vma
->vm_page_prot
) &= ~_PAGE_PWT
;
327 vma
->vm_flags
|= VM_IO
; /* not in core dump */
329 if (remap_page_range(vma
->vm_start
,
331 vma
->vm_end
- vma
->vm_start
,
334 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
337 vma
->vm_start
, vma
->vm_end
, VM_OFFSET(vma
));
338 vma
->vm_ops
= &drm_vm_ops
;
341 if (map
->flags
& _DRM_CONTAINS_LOCK
)
342 vma
->vm_ops
= &drm_vm_shm_lock_ops
;
344 vma
->vm_ops
= &drm_vm_shm_ops
;
345 #if LINUX_VERSION_CODE >= 0x020300
346 vma
->vm_private_data
= (void *)map
;
348 vma
->vm_pte
= (unsigned long)map
;
352 /* Don't let this area swap. Change when
353 DRM_KERNEL advisory is supported. */
354 vma
->vm_flags
|= VM_LOCKED
;
357 return -EINVAL
; /* This should never happen. */
359 vma
->vm_flags
|= VM_LOCKED
| VM_SHM
; /* Don't swap */
361 #if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
362 /* In Linux 2.2.3 and above, this is
363 handled in do_mmap() in mm/mmap.c. */
366 vma
->vm_file
= filp
; /* Needed for drm_vm_open() */