1 /* vm.c -- Memory mapping for DRM -*- linux-c -*-
2 * Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
3 * Revised: Fri Aug 20 22:48:11 1999 by faith@precisioninsight.com
5 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/generic/vm.c,v 1.7 1999/08/21 02:48:34 faith Exp $
32 #define __NO_VERSION__
35 struct vm_operations_struct drm_vm_ops
= {
36 nopage
: drm_vm_nopage
,
41 struct vm_operations_struct drm_vm_shm_ops
= {
42 nopage
: drm_vm_shm_nopage
,
47 struct vm_operations_struct drm_vm_dma_ops
= {
48 nopage
: drm_vm_dma_nopage
,
53 unsigned long drm_vm_nopage(struct vm_area_struct
*vma
,
54 unsigned long address
,
57 DRM_DEBUG("0x%08lx, %d\n", address
, write_access
);
59 return 0; /* Disallow mremap */
62 unsigned long drm_vm_shm_nopage(struct vm_area_struct
*vma
,
63 unsigned long address
,
66 drm_file_t
*priv
= vma
->vm_file
->private_data
;
67 drm_device_t
*dev
= priv
->dev
;
68 unsigned long physical
;
72 if (address
> vma
->vm_end
) return 0; /* Disallow mremap */
73 if (!dev
->lock
.hw_lock
) return 0; /* Nothing allocated */
75 offset
= address
- vma
->vm_start
;
76 page
= offset
>> PAGE_SHIFT
;
77 physical
= (unsigned long)dev
->lock
.hw_lock
+ (offset
& (~PAGE_MASK
));
78 atomic_inc(&mem_map
[MAP_NR(physical
)].count
); /* Dec. by kernel */
80 DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address
, page
, physical
);
84 unsigned long drm_vm_dma_nopage(struct vm_area_struct
*vma
,
85 unsigned long address
,
88 drm_file_t
*priv
= vma
->vm_file
->private_data
;
89 drm_device_t
*dev
= priv
->dev
;
90 drm_device_dma_t
*dma
= dev
->dma
;
91 unsigned long physical
;
95 if (!dma
) return 0; /* Error */
96 if (address
> vma
->vm_end
) return 0; /* Disallow mremap */
97 if (!dma
->pagelist
) return 0; /* Nothing allocated */
99 offset
= address
- vma
->vm_start
; /* vm_offset should be 0 */
100 page
= offset
>> PAGE_SHIFT
;
101 physical
= dma
->pagelist
[page
] + (offset
& (~PAGE_MASK
));
102 atomic_inc(&mem_map
[MAP_NR(physical
)].count
); /* Dec. by kernel */
104 DRM_DEBUG("0x%08lx (page %lu) => 0x%08lx\n", address
, page
, physical
);
108 void drm_vm_open(struct vm_area_struct
*vma
)
110 drm_file_t
*priv
= vma
->vm_file
->private_data
;
111 drm_device_t
*dev
= priv
->dev
;
113 drm_vma_entry_t
*vma_entry
;
116 DRM_DEBUG("0x%08lx,0x%08lx\n",
117 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
118 atomic_inc(&dev
->vma_count
);
122 vma_entry
= drm_alloc(sizeof(*vma_entry
), DRM_MEM_VMAS
);
124 down(&dev
->struct_sem
);
125 vma_entry
->vma
= vma
;
126 vma_entry
->next
= dev
->vmalist
;
127 vma_entry
->pid
= current
->pid
;
128 dev
->vmalist
= vma_entry
;
129 up(&dev
->struct_sem
);
134 void drm_vm_close(struct vm_area_struct
*vma
)
136 drm_file_t
*priv
= vma
->vm_file
->private_data
;
137 drm_device_t
*dev
= priv
->dev
;
139 drm_vma_entry_t
*pt
, *prev
;
142 DRM_DEBUG("0x%08lx,0x%08lx\n",
143 vma
->vm_start
, vma
->vm_end
- vma
->vm_start
);
145 atomic_dec(&dev
->vma_count
);
148 down(&dev
->struct_sem
);
149 for (pt
= dev
->vmalist
, prev
= NULL
; pt
; prev
= pt
, pt
= pt
->next
) {
150 if (pt
->vma
== vma
) {
152 prev
->next
= pt
->next
;
154 dev
->vmalist
= pt
->next
;
156 drm_free(pt
, sizeof(*pt
), DRM_MEM_VMAS
);
160 up(&dev
->struct_sem
);
164 int drm_mmap_dma(struct file
*filp
, struct vm_area_struct
*vma
)
166 drm_file_t
*priv
= filp
->private_data
;
167 drm_device_t
*dev
= priv
->dev
;
168 drm_device_dma_t
*dma
= dev
->dma
;
169 unsigned long length
= vma
->vm_end
- vma
->vm_start
;
171 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
172 vma
->vm_start
, vma
->vm_end
, vma
->vm_offset
);
174 /* Length must match exact page count */
175 if ((length
>> PAGE_SHIFT
) != dma
->page_count
) return -EINVAL
;
177 vma
->vm_ops
= &drm_vm_dma_ops
;
178 vma
->vm_flags
|= VM_LOCKED
| VM_SHM
; /* Don't swap */
180 #if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
181 /* In Linux 2.2.3 and above, this is
182 handled in do_mmap() in mm/mmap.c. */
185 vma
->vm_file
= filp
; /* Needed for drm_vm_open() */
190 int drm_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
192 drm_file_t
*priv
= filp
->private_data
;
193 drm_device_t
*dev
= priv
->dev
;
194 drm_map_t
*map
= NULL
;
197 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
198 vma
->vm_start
, vma
->vm_end
, vma
->vm_offset
);
200 if (!vma
->vm_offset
) return drm_mmap_dma(filp
, vma
);
202 /* A sequential search of a linked list is
203 fine here because: 1) there will only be
204 about 5-10 entries in the list and, 2) a
205 DRI client only has to do this mapping
206 once, so it doesn't have to be optimized
207 for performance, even if the list was a
209 for (i
= 0; i
< dev
->map_count
; i
++) {
210 map
= dev
->maplist
[i
];
211 if (map
->offset
== vma
->vm_offset
) break;
214 if (i
>= dev
->map_count
) return -EINVAL
;
215 if (!map
|| ((map
->flags
&_DRM_RESTRICTED
) && !capable(CAP_SYS_ADMIN
)))
218 /* Check for valid size. */
219 if (map
->size
!= vma
->vm_end
- vma
->vm_start
) return -EINVAL
;
223 case _DRM_FRAME_BUFFER
:
225 if (vma
->vm_offset
>= __pa(high_memory
)) {
226 #if defined(__i386__)
227 if (boot_cpu_data
.x86
> 3) {
228 pgprot_val(vma
->vm_page_prot
) |= _PAGE_PCD
;
229 pgprot_val(vma
->vm_page_prot
) &= ~_PAGE_PWT
;
232 vma
->vm_flags
|= VM_IO
; /* not in core dump */
234 if (remap_page_range(vma
->vm_start
,
236 vma
->vm_end
- vma
->vm_start
,
239 vma
->vm_ops
= &drm_vm_ops
;
242 vma
->vm_ops
= &drm_vm_shm_ops
;
243 /* Don't let this area swap. Change when
244 DRM_KERNEL advisory is supported. */
245 vma
->vm_flags
|= VM_LOCKED
;
248 return -EINVAL
; /* This should never happen. */
250 vma
->vm_flags
|= VM_LOCKED
| VM_SHM
; /* Don't swap */
251 if (map
->flags
& _DRM_READ_ONLY
) {
252 pgprot_val(vma
->vm_page_prot
) &= ~_PAGE_RW
;
256 #if LINUX_VERSION_CODE < 0x020203 /* KERNEL_VERSION(2,2,3) */
257 /* In Linux 2.2.3 and above, this is
258 handled in do_mmap() in mm/mmap.c. */
261 vma
->vm_file
= filp
; /* Needed for drm_vm_open() */