1 /* vm.c -- Memory mapping for DRM -*- linux-c -*-
2 * Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 * Rickard E. (Rik) Faith <faith@valinux.com>
32 #define __NO_VERSION__
35 struct vm_operations_struct drm_vm_ops
= {
36 nopage
: drm_vm_nopage
,
41 struct vm_operations_struct drm_vm_shm_ops
= {
42 nopage
: drm_vm_shm_nopage
,
47 struct vm_operations_struct drm_vm_dma_ops
= {
48 nopage
: drm_vm_dma_nopage
,
53 #if LINUX_VERSION_CODE < 0x020317
54 unsigned long drm_vm_nopage(struct vm_area_struct
*vma
,
55 unsigned long address
,
58 /* Return type changed in 2.3.23 */
59 struct page
*drm_vm_nopage(struct vm_area_struct
*vma
,
60 unsigned long address
,
64 DRM_DEBUG("0x%08lx, %d\n", address
, write_access
);
66 return NOPAGE_SIGBUS
; /* Disallow mremap */
69 #if LINUX_VERSION_CODE < 0x020317
70 unsigned long drm_vm_shm_nopage(struct vm_area_struct
*vma
,
71 unsigned long address
,
74 /* Return type changed in 2.3.23 */
75 struct page
*drm_vm_shm_nopage(struct vm_area_struct
*vma
,
76 unsigned long address
,
80 drm_file_t
*priv
= vma
->vm_file
->private_data
;
81 drm_device_t
*dev
= priv
->dev
;
82 unsigned long physical
;
86 if (address
> vma
->vm_end
) return NOPAGE_SIGBUS
; /* Disallow mremap */
87 if (!dev
->lock
.hw_lock
) return NOPAGE_OOM
; /* Nothing allocated */
89 offset
= address
- vma
->vm_start
;
90 page
= offset
>> PAGE_SHIFT
;
91 physical
= (unsigned long)dev
->lock
.hw_lock
+ offset
;
92 atomic_inc(&mem_map
[MAP_NR(physical
)].count
); /* Dec. by kernel */
94 DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address
, page
, physical
);
95 #if LINUX_VERSION_CODE < 0x020317
98 return mem_map
+ MAP_NR(physical
);
102 #if LINUX_VERSION_CODE < 0x020317
103 unsigned long drm_vm_dma_nopage(struct vm_area_struct
*vma
,
104 unsigned long address
,
107 /* Return type changed in 2.3.23 */
108 struct page
*drm_vm_dma_nopage(struct vm_area_struct
*vma
,
109 unsigned long address
,
113 drm_file_t
*priv
= vma
->vm_file
->private_data
;
114 drm_device_t
*dev
= priv
->dev
;
115 drm_device_dma_t
*dma
= dev
->dma
;
116 unsigned long physical
;
117 unsigned long offset
;
120 if (!dma
) return NOPAGE_SIGBUS
; /* Error */
121 if (address
> vma
->vm_end
) return NOPAGE_SIGBUS
; /* Disallow mremap */
122 if (!dma
->pagelist
) return NOPAGE_OOM
; /* Nothing allocated */
124 offset
= address
- vma
->vm_start
; /* vm_[pg]off[set] should be 0 */
125 page
= offset
>> PAGE_SHIFT
;
126 physical
= dma
->pagelist
[page
] + (offset
& (~PAGE_MASK
));
127 atomic_inc(&mem_map
[MAP_NR(physical
)].count
); /* Dec. by kernel */
129 DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address
, page
, physical
);
130 #if LINUX_VERSION_CODE < 0x020317
133 return mem_map
+ MAP_NR(physical
);
137 void drm_vm_open(struct vm_area_struct
*vma
)
139 drm_file_t
*priv
= vma
->vm_file
->private_data
;
140 drm_device_t
*dev
= priv
->dev
;
142 drm_vma_entry_t
*vma_entry
;
145 DRM_DEBUG("0x%08lx,0x%08lx\n",
146 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
147 atomic_inc(&dev
->vma_count
);
148 #if LINUX_VERSION_CODE < 0x020333
149 /* The map can exist after the fd is closed. */
150 MOD_INC_USE_COUNT
; /* Needed before Linux 2.3.51 */
155 vma_entry
= drm_alloc(sizeof(*vma_entry
), DRM_MEM_VMAS
);
157 down(&dev
->struct_sem
);
158 vma_entry
->vma
= vma
;
159 vma_entry
->next
= dev
->vmalist
;
160 vma_entry
->pid
= current
->pid
;
161 dev
->vmalist
= vma_entry
;
162 up(&dev
->struct_sem
);
167 void drm_vm_close(struct vm_area_struct
*vma
)
169 drm_file_t
*priv
= vma
->vm_file
->private_data
;
170 drm_device_t
*dev
= priv
->dev
;
172 drm_vma_entry_t
*pt
, *prev
;
175 DRM_DEBUG("0x%08lx,0x%08lx\n",
176 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
177 #if LINUX_VERSION_CODE < 0x020333
178 MOD_DEC_USE_COUNT
; /* Needed before Linux 2.3.51 */
180 atomic_dec(&dev
->vma_count
);
183 down(&dev
->struct_sem
);
184 for (pt
= dev
->vmalist
, prev
= NULL
; pt
; prev
= pt
, pt
= pt
->next
) {
185 if (pt
->vma
== vma
) {
187 prev
->next
= pt
->next
;
189 dev
->vmalist
= pt
->next
;
191 drm_free(pt
, sizeof(*pt
), DRM_MEM_VMAS
);
195 up(&dev
->struct_sem
);
199 int drm_mmap_dma(struct file
*filp
, struct vm_area_struct
*vma
)
201 drm_file_t
*priv
= filp
->private_data
;
203 drm_device_dma_t
*dma
;
204 unsigned long length
= vma
->vm_end
- vma
->vm_start
;
209 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
210 vma
->vm_start
, vma
->vm_end
, VM_OFFSET(vma
));
212 /* Length must match exact page count */
213 if ((length
>> PAGE_SHIFT
) != dma
->page_count
) {
219 vma
->vm_ops
= &drm_vm_dma_ops
;
220 vma
->vm_flags
|= VM_LOCKED
| VM_SHM
; /* Don't swap */
222 #if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
223 /* In Linux 2.2.3 and above, this is
224 handled in do_mmap() in mm/mmap.c. */
227 vma
->vm_file
= filp
; /* Needed for drm_vm_open() */
232 int drm_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
234 drm_file_t
*priv
= filp
->private_data
;
235 drm_device_t
*dev
= priv
->dev
;
236 drm_map_t
*map
= NULL
;
239 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
240 vma
->vm_start
, vma
->vm_end
, VM_OFFSET(vma
));
242 if (!VM_OFFSET(vma
)) return drm_mmap_dma(filp
, vma
);
244 /* A sequential search of a linked list is
245 fine here because: 1) there will only be
246 about 5-10 entries in the list and, 2) a
247 DRI client only has to do this mapping
248 once, so it doesn't have to be optimized
249 for performance, even if the list was a
251 for (i
= 0; i
< dev
->map_count
; i
++) {
252 map
= dev
->maplist
[i
];
253 if (map
->offset
== VM_OFFSET(vma
)) break;
256 if (i
>= dev
->map_count
) return -EINVAL
;
257 if (!map
|| ((map
->flags
&_DRM_RESTRICTED
) && !capable(CAP_SYS_ADMIN
)))
260 /* Check for valid size. */
261 if (map
->size
!= vma
->vm_end
- vma
->vm_start
) return -EINVAL
;
263 if (!capable(CAP_SYS_ADMIN
) && (map
->flags
& _DRM_READ_ONLY
)) {
264 vma
->vm_flags
&= VM_MAYWRITE
;
265 #if defined(__i386__)
266 pgprot_val(vma
->vm_page_prot
) &= ~_PAGE_RW
;
268 /* Ye gads this is ugly. With more thought
269 we could move this up higher and use
270 `protection_map' instead. */
271 vma
->vm_page_prot
= __pgprot(pte_val(pte_wrprotect(
272 __pte(pgprot_val(vma
->vm_page_prot
)))));
277 case _DRM_FRAME_BUFFER
:
280 if (VM_OFFSET(vma
) >= __pa(high_memory
)) {
281 #if defined(__i386__)
282 if (boot_cpu_data
.x86
> 3 && map
->type
!= _DRM_AGP
) {
283 pgprot_val(vma
->vm_page_prot
) |= _PAGE_PCD
;
284 pgprot_val(vma
->vm_page_prot
) &= ~_PAGE_PWT
;
287 vma
->vm_flags
|= VM_IO
; /* not in core dump */
289 if (remap_page_range(vma
->vm_start
,
291 vma
->vm_end
- vma
->vm_start
,
294 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
297 vma
->vm_start
, vma
->vm_end
, VM_OFFSET(vma
));
298 vma
->vm_ops
= &drm_vm_ops
;
301 vma
->vm_ops
= &drm_vm_shm_ops
;
302 /* Don't let this area swap. Change when
303 DRM_KERNEL advisory is supported. */
304 vma
->vm_flags
|= VM_LOCKED
;
307 return -EINVAL
; /* This should never happen. */
309 vma
->vm_flags
|= VM_LOCKED
| VM_SHM
; /* Don't swap */
311 #if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
312 /* In Linux 2.2.3 and above, this is
313 handled in do_mmap() in mm/mmap.c. */
316 vma
->vm_file
= filp
; /* Needed for drm_vm_open() */