1 /* vm.c -- Memory mapping for DRM -*- linux-c -*-
2 * Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
27 * Rickard E. (Rik) Faith <faith@precisioninsight.com>
31 #define __NO_VERSION__
34 struct vm_operations_struct drm_vm_ops
= {
35 nopage
: drm_vm_nopage
,
40 struct vm_operations_struct drm_vm_shm_ops
= {
41 nopage
: drm_vm_shm_nopage
,
46 struct vm_operations_struct drm_vm_dma_ops
= {
47 nopage
: drm_vm_dma_nopage
,
52 #if LINUX_VERSION_CODE < 0x020317
53 unsigned long drm_vm_nopage(struct vm_area_struct
*vma
,
54 unsigned long address
,
57 /* Return type changed in 2.3.23 */
58 struct page
*drm_vm_nopage(struct vm_area_struct
*vma
,
59 unsigned long address
,
63 DRM_DEBUG("0x%08lx, %d\n", address
, write_access
);
65 return NOPAGE_SIGBUS
; /* Disallow mremap */
68 #if LINUX_VERSION_CODE < 0x020317
69 unsigned long drm_vm_shm_nopage(struct vm_area_struct
*vma
,
70 unsigned long address
,
73 /* Return type changed in 2.3.23 */
74 struct page
*drm_vm_shm_nopage(struct vm_area_struct
*vma
,
75 unsigned long address
,
79 drm_file_t
*priv
= vma
->vm_file
->private_data
;
80 drm_device_t
*dev
= priv
->dev
;
81 unsigned long physical
;
85 if (address
> vma
->vm_end
) return NOPAGE_SIGBUS
; /* Disallow mremap */
86 if (!dev
->lock
.hw_lock
) return NOPAGE_OOM
; /* Nothing allocated */
88 offset
= address
- vma
->vm_start
;
89 page
= offset
>> PAGE_SHIFT
;
90 physical
= (unsigned long)dev
->lock
.hw_lock
+ (offset
& (~PAGE_MASK
));
91 atomic_inc(&mem_map
[MAP_NR(physical
)].count
); /* Dec. by kernel */
93 DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address
, page
, physical
);
94 #if LINUX_VERSION_CODE < 0x020317
97 return mem_map
+ MAP_NR(physical
);
101 #if LINUX_VERSION_CODE < 0x020317
102 unsigned long drm_vm_dma_nopage(struct vm_area_struct
*vma
,
103 unsigned long address
,
106 /* Return type changed in 2.3.23 */
107 struct page
*drm_vm_dma_nopage(struct vm_area_struct
*vma
,
108 unsigned long address
,
112 drm_file_t
*priv
= vma
->vm_file
->private_data
;
113 drm_device_t
*dev
= priv
->dev
;
114 drm_device_dma_t
*dma
= dev
->dma
;
115 unsigned long physical
;
116 unsigned long offset
;
119 if (!dma
) return NOPAGE_SIGBUS
; /* Error */
120 if (address
> vma
->vm_end
) return NOPAGE_SIGBUS
; /* Disallow mremap */
121 if (!dma
->pagelist
) return NOPAGE_OOM
; /* Nothing allocated */
123 offset
= address
- vma
->vm_start
; /* vm_[pg]off[set] should be 0 */
124 page
= offset
>> PAGE_SHIFT
;
125 physical
= dma
->pagelist
[page
] + (offset
& (~PAGE_MASK
));
126 atomic_inc(&mem_map
[MAP_NR(physical
)].count
); /* Dec. by kernel */
128 DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address
, page
, physical
);
129 #if LINUX_VERSION_CODE < 0x020317
132 return mem_map
+ MAP_NR(physical
);
136 void drm_vm_open(struct vm_area_struct
*vma
)
138 drm_file_t
*priv
= vma
->vm_file
->private_data
;
139 drm_device_t
*dev
= priv
->dev
;
141 drm_vma_entry_t
*vma_entry
;
144 DRM_DEBUG("0x%08lx,0x%08lx\n",
145 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
146 atomic_inc(&dev
->vma_count
);
150 vma_entry
= drm_alloc(sizeof(*vma_entry
), DRM_MEM_VMAS
);
152 down(&dev
->struct_sem
);
153 vma_entry
->vma
= vma
;
154 vma_entry
->next
= dev
->vmalist
;
155 vma_entry
->pid
= current
->pid
;
156 dev
->vmalist
= vma_entry
;
157 up(&dev
->struct_sem
);
162 void drm_vm_close(struct vm_area_struct
*vma
)
164 drm_file_t
*priv
= vma
->vm_file
->private_data
;
165 drm_device_t
*dev
= priv
->dev
;
167 drm_vma_entry_t
*pt
, *prev
;
170 DRM_DEBUG("0x%08lx,0x%08lx\n",
171 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
173 atomic_dec(&dev
->vma_count
);
176 down(&dev
->struct_sem
);
177 for (pt
= dev
->vmalist
, prev
= NULL
; pt
; prev
= pt
, pt
= pt
->next
) {
178 if (pt
->vma
== vma
) {
180 prev
->next
= pt
->next
;
182 dev
->vmalist
= pt
->next
;
184 drm_free(pt
, sizeof(*pt
), DRM_MEM_VMAS
);
188 up(&dev
->struct_sem
);
192 int drm_mmap_dma(struct file
*filp
, struct vm_area_struct
*vma
)
194 drm_file_t
*priv
= filp
->private_data
;
195 drm_device_t
*dev
= priv
->dev
;
196 drm_device_dma_t
*dma
= dev
->dma
;
197 unsigned long length
= vma
->vm_end
- vma
->vm_start
;
199 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
200 vma
->vm_start
, vma
->vm_end
, VM_OFFSET(vma
));
202 /* Length must match exact page count */
203 if ((length
>> PAGE_SHIFT
) != dma
->page_count
) return -EINVAL
;
205 vma
->vm_ops
= &drm_vm_dma_ops
;
206 vma
->vm_flags
|= VM_LOCKED
| VM_SHM
; /* Don't swap */
208 #if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
209 /* In Linux 2.2.3 and above, this is
210 handled in do_mmap() in mm/mmap.c. */
213 vma
->vm_file
= filp
; /* Needed for drm_vm_open() */
218 int drm_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
220 drm_file_t
*priv
= filp
->private_data
;
221 drm_device_t
*dev
= priv
->dev
;
222 drm_map_t
*map
= NULL
;
225 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
226 vma
->vm_start
, vma
->vm_end
, VM_OFFSET(vma
));
228 if (!VM_OFFSET(vma
)) return drm_mmap_dma(filp
, vma
);
230 /* A sequential search of a linked list is
231 fine here because: 1) there will only be
232 about 5-10 entries in the list and, 2) a
233 DRI client only has to do this mapping
234 once, so it doesn't have to be optimized
235 for performance, even if the list was a
237 for (i
= 0; i
< dev
->map_count
; i
++) {
238 map
= dev
->maplist
[i
];
239 if (map
->offset
== VM_OFFSET(vma
)) break;
242 if (i
>= dev
->map_count
) return -EINVAL
;
243 if (!map
|| ((map
->flags
&_DRM_RESTRICTED
) && !capable(CAP_SYS_ADMIN
)))
246 /* Check for valid size. */
247 if (map
->size
!= vma
->vm_end
- vma
->vm_start
) return -EINVAL
;
251 case _DRM_FRAME_BUFFER
:
253 if (VM_OFFSET(vma
) >= __pa(high_memory
)) {
254 #if defined(__i386__)
255 if (boot_cpu_data
.x86
> 3) {
256 pgprot_val(vma
->vm_page_prot
) |= _PAGE_PCD
;
257 pgprot_val(vma
->vm_page_prot
) &= ~_PAGE_PWT
;
260 vma
->vm_flags
|= VM_IO
; /* not in core dump */
262 if (remap_page_range(vma
->vm_start
,
264 vma
->vm_end
- vma
->vm_start
,
267 vma
->vm_ops
= &drm_vm_ops
;
270 vma
->vm_ops
= &drm_vm_shm_ops
;
271 /* Don't let this area swap. Change when
272 DRM_KERNEL advisory is supported. */
273 vma
->vm_flags
|= VM_LOCKED
;
276 return -EINVAL
; /* This should never happen. */
278 vma
->vm_flags
|= VM_LOCKED
| VM_SHM
; /* Don't swap */
279 if (map
->flags
& _DRM_READ_ONLY
) {
280 #if defined(__i386__)
281 pgprot_val(vma
->vm_page_prot
) &= ~_PAGE_RW
;
283 /* Ye gads this is ugly. With more thought
284 we could move this up higher and use
285 `protection_map' instead. */
286 vma
->vm_page_prot
= __pgprot(pte_val(pte_wrprotect(
287 __pte(pgprot_val(vma
->vm_page_prot
)))));
292 #if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
293 /* In Linux 2.2.3 and above, this is
294 handled in do_mmap() in mm/mmap.c. */
297 vma
->vm_file
= filp
; /* Needed for drm_vm_open() */