1 /* bufs.c -- IOCTLs to manage buffers -*- linux-c -*-
2 * Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
3 * Revised: Fri Aug 20 22:48:10 1999 by faith@precisioninsight.com
5 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/generic/bufs.c,v 1.8 1999/08/30 13:05:00 faith Exp $
32 #define __NO_VERSION__
36 /* Compute order. Can be made faster. */
37 int drm_order(unsigned long size
)
42 for (order
= 0, tmp
= size
; tmp
>>= 1; ++order
);
43 if (size
& ~(1 << order
)) ++order
;
47 int drm_addmap(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
50 drm_file_t
*priv
= filp
->private_data
;
51 drm_device_t
*dev
= priv
->dev
;
54 if (!(filp
->f_mode
& 3)) return -EACCES
; /* Require read/write */
56 map
= drm_alloc(sizeof(*map
), DRM_MEM_MAPS
);
57 if (!map
) return -ENOMEM
;
58 if (copy_from_user(map
, (drm_map_t
*)arg
, sizeof(*map
))) {
59 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
63 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
64 map
->offset
, map
->size
, map
->type
);
65 if ((map
->offset
& (~PAGE_MASK
)) || (map
->size
& (~PAGE_MASK
))) {
66 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
74 case _DRM_FRAME_BUFFER
:
75 if (map
->offset
+ map
->size
< map
->offset
76 || map
->offset
< virt_to_phys(high_memory
)) {
77 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
81 if (map
->type
== _DRM_FRAME_BUFFER
82 || (map
->flags
& _DRM_WRITE_COMBINING
)) {
83 map
->mtrr
= mtrr_add(map
->offset
, map
->size
,
87 map
->handle
= drm_ioremap(map
->offset
, map
->size
);
92 DRM_DEBUG("%ld %d\n", map
->size
, drm_order(map
->size
));
93 map
->handle
= (void *)drm_alloc_pages(drm_order(map
->size
)
97 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
100 map
->offset
= (unsigned long)map
->handle
;
101 if (map
->flags
& _DRM_CONTAINS_LOCK
) {
102 dev
->lock
.hw_lock
= map
->handle
; /* Pointer to lock */
106 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
110 down(&dev
->struct_sem
);
113 dev
->maplist
= drm_realloc(dev
->maplist
,
115 * sizeof(*dev
->maplist
),
117 * sizeof(*dev
->maplist
),
121 dev
->maplist
= drm_alloc(dev
->map_count
*sizeof(*dev
->maplist
),
124 dev
->maplist
[dev
->map_count
-1] = map
;
125 up(&dev
->struct_sem
);
127 copy_to_user_ret((drm_map_t
*)arg
, map
, sizeof(*map
), -EFAULT
);
128 if (map
->type
!= _DRM_SHM
) {
129 copy_to_user_ret(&((drm_map_t
*)arg
)->handle
,
137 int drm_addbufs(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
140 drm_file_t
*priv
= filp
->private_data
;
141 drm_device_t
*dev
= priv
->dev
;
142 drm_device_dma_t
*dma
= dev
->dma
;
143 drm_buf_desc_t request
;
149 drm_buf_entry_t
*entry
;
153 unsigned long offset
;
158 if (!dma
) return -EINVAL
;
160 copy_from_user_ret(&request
,
161 (drm_buf_desc_t
*)arg
,
165 count
= request
.count
;
166 order
= drm_order(request
.size
);
169 DRM_DEBUG("count = %d, size = %d (%d), order = %d, queue_count = %d\n",
170 request
.count
, request
.size
, size
, order
, dev
->queue_count
);
172 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
) return -EINVAL
;
173 if (dev
->queue_count
) return -EBUSY
; /* Not while in use */
175 alignment
= (request
.flags
& DRM_PAGE_ALIGN
) ? PAGE_ALIGN(size
) :size
;
176 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
177 total
= PAGE_SIZE
<< page_order
;
179 spin_lock(&dev
->count_lock
);
181 spin_unlock(&dev
->count_lock
);
184 atomic_inc(&dev
->buf_alloc
);
185 spin_unlock(&dev
->count_lock
);
187 down(&dev
->struct_sem
);
188 entry
= &dma
->bufs
[order
];
189 if (entry
->buf_count
) {
190 up(&dev
->struct_sem
);
191 atomic_dec(&dev
->buf_alloc
);
192 return -ENOMEM
; /* May only call once for each order */
195 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
197 if (!entry
->buflist
) {
198 up(&dev
->struct_sem
);
199 atomic_dec(&dev
->buf_alloc
);
202 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
204 entry
->seglist
= drm_alloc(count
* sizeof(*entry
->seglist
),
206 if (!entry
->seglist
) {
207 drm_free(entry
->buflist
,
208 count
* sizeof(*entry
->buflist
),
210 up(&dev
->struct_sem
);
211 atomic_dec(&dev
->buf_alloc
);
214 memset(entry
->seglist
, 0, count
* sizeof(*entry
->seglist
));
216 dma
->pagelist
= drm_realloc(dma
->pagelist
,
217 dma
->page_count
* sizeof(*dma
->pagelist
),
218 (dma
->page_count
+ (count
<< page_order
))
219 * sizeof(*dma
->pagelist
),
221 DRM_DEBUG("pagelist: %d entries\n",
222 dma
->page_count
+ (count
<< page_order
));
225 entry
->buf_size
= size
;
226 entry
->page_order
= page_order
;
229 while (entry
->buf_count
< count
) {
230 if (!(page
= drm_alloc_pages(page_order
, DRM_MEM_DMA
))) break;
231 entry
->seglist
[entry
->seg_count
++] = page
;
232 for (i
= 0; i
< (1 << page_order
); i
++) {
233 DRM_DEBUG("page %d @ 0x%08lx\n",
234 dma
->page_count
+ page_count
,
235 page
+ PAGE_SIZE
* i
);
236 dma
->pagelist
[dma
->page_count
+ page_count
++]
237 = page
+ PAGE_SIZE
* i
;
240 offset
+ size
<= total
&& entry
->buf_count
< count
;
241 offset
+= alignment
, ++entry
->buf_count
) {
242 buf
= &entry
->buflist
[entry
->buf_count
];
243 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
244 buf
->total
= alignment
;
247 buf
->offset
= (dma
->byte_count
+ byte_count
+ offset
);
248 buf
->address
= (void *)(page
+ offset
);
252 init_waitqueue_head(&buf
->dma_wait
);
254 #if DRM_DMA_HISTOGRAM
255 buf
->time_queued
= 0;
256 buf
->time_dispatched
= 0;
257 buf
->time_completed
= 0;
260 DRM_DEBUG("buffer %d @ %p\n",
261 entry
->buf_count
, buf
->address
);
263 byte_count
+= PAGE_SIZE
<< page_order
;
266 dma
->buflist
= drm_realloc(dma
->buflist
,
267 dma
->buf_count
* sizeof(*dma
->buflist
),
268 (dma
->buf_count
+ entry
->buf_count
)
269 * sizeof(*dma
->buflist
),
271 for (i
= dma
->buf_count
; i
< dma
->buf_count
+ entry
->buf_count
; i
++)
272 dma
->buflist
[i
] = &entry
->buflist
[i
- dma
->buf_count
];
274 dma
->buf_count
+= entry
->buf_count
;
275 dma
->seg_count
+= entry
->seg_count
;
276 dma
->page_count
+= entry
->seg_count
<< page_order
;
277 dma
->byte_count
+= PAGE_SIZE
* (entry
->seg_count
<< page_order
);
279 drm_freelist_create(&entry
->freelist
, entry
->buf_count
);
280 for (i
= 0; i
< entry
->buf_count
; i
++) {
281 drm_freelist_put(dev
, &entry
->freelist
, &entry
->buflist
[i
]);
284 up(&dev
->struct_sem
);
286 request
.count
= entry
->buf_count
;
289 copy_to_user_ret((drm_buf_desc_t
*)arg
,
294 atomic_dec(&dev
->buf_alloc
);
298 int drm_infobufs(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
301 drm_file_t
*priv
= filp
->private_data
;
302 drm_device_t
*dev
= priv
->dev
;
303 drm_device_dma_t
*dma
= dev
->dma
;
304 drm_buf_info_t request
;
308 if (!dma
) return -EINVAL
;
310 spin_lock(&dev
->count_lock
);
311 if (atomic_read(&dev
->buf_alloc
)) {
312 spin_unlock(&dev
->count_lock
);
315 ++dev
->buf_use
; /* Can't allocate more after this call */
316 spin_unlock(&dev
->count_lock
);
318 copy_from_user_ret(&request
,
319 (drm_buf_info_t
*)arg
,
323 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+1; i
++) {
324 if (dma
->bufs
[i
].buf_count
) ++count
;
327 DRM_DEBUG("count = %d\n", count
);
329 if (request
.count
>= count
) {
330 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+1; i
++) {
331 if (dma
->bufs
[i
].buf_count
) {
332 copy_to_user_ret(&request
.list
[count
].count
,
333 &dma
->bufs
[i
].buf_count
,
337 copy_to_user_ret(&request
.list
[count
].size
,
338 &dma
->bufs
[i
].buf_size
,
339 sizeof(dma
->bufs
[0].buf_size
),
341 copy_to_user_ret(&request
.list
[count
].low_mark
,
347 copy_to_user_ret(&request
.list
[count
]
352 .freelist
.high_mark
),
354 DRM_DEBUG("%d %d %d %d %d\n",
356 dma
->bufs
[i
].buf_count
,
357 dma
->bufs
[i
].buf_size
,
358 dma
->bufs
[i
].freelist
.low_mark
,
359 dma
->bufs
[i
].freelist
.high_mark
);
364 request
.count
= count
;
366 copy_to_user_ret((drm_buf_info_t
*)arg
,
374 int drm_markbufs(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
377 drm_file_t
*priv
= filp
->private_data
;
378 drm_device_t
*dev
= priv
->dev
;
379 drm_device_dma_t
*dma
= dev
->dma
;
380 drm_buf_desc_t request
;
382 drm_buf_entry_t
*entry
;
384 if (!dma
) return -EINVAL
;
386 copy_from_user_ret(&request
,
387 (drm_buf_desc_t
*)arg
,
391 DRM_DEBUG("%d, %d, %d\n",
392 request
.size
, request
.low_mark
, request
.high_mark
);
393 order
= drm_order(request
.size
);
394 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
) return -EINVAL
;
395 entry
= &dma
->bufs
[order
];
397 if (request
.low_mark
< 0 || request
.low_mark
> entry
->buf_count
)
399 if (request
.high_mark
< 0 || request
.high_mark
> entry
->buf_count
)
402 entry
->freelist
.low_mark
= request
.low_mark
;
403 entry
->freelist
.high_mark
= request
.high_mark
;
408 int drm_freebufs(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
411 drm_file_t
*priv
= filp
->private_data
;
412 drm_device_t
*dev
= priv
->dev
;
413 drm_device_dma_t
*dma
= dev
->dma
;
414 drm_buf_free_t request
;
419 if (!dma
) return -EINVAL
;
421 copy_from_user_ret(&request
,
422 (drm_buf_free_t
*)arg
,
426 DRM_DEBUG("%d\n", request
.count
);
427 for (i
= 0; i
< request
.count
; i
++) {
428 copy_from_user_ret(&idx
,
432 if (idx
< 0 || idx
>= dma
->buf_count
) {
433 DRM_ERROR("Index %d (of %d max)\n",
434 idx
, dma
->buf_count
- 1);
437 buf
= dma
->buflist
[idx
];
438 if (buf
->pid
!= current
->pid
) {
439 DRM_ERROR("Process %d freeing buffer owned by %d\n",
440 current
->pid
, buf
->pid
);
443 drm_free_buffer(dev
, buf
);
449 int drm_mapbufs(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
452 drm_file_t
*priv
= filp
->private_data
;
453 drm_device_t
*dev
= priv
->dev
;
454 drm_device_dma_t
*dma
= dev
->dma
;
457 unsigned long virtual;
458 unsigned long address
;
459 drm_buf_map_t request
;
462 if (!dma
) return -EINVAL
;
466 spin_lock(&dev
->count_lock
);
467 if (atomic_read(&dev
->buf_alloc
)) {
468 spin_unlock(&dev
->count_lock
);
471 ++dev
->buf_use
; /* Can't allocate more after this call */
472 spin_unlock(&dev
->count_lock
);
474 copy_from_user_ret(&request
,
475 (drm_buf_map_t
*)arg
,
479 if (request
.count
>= dma
->buf_count
) {
480 virtual = do_mmap(filp
, 0, dma
->byte_count
,
481 PROT_READ
|PROT_WRITE
, MAP_SHARED
, 0);
482 if (virtual > -1024UL) {
484 retcode
= (signed long)virtual;
487 request
.virtual = (void *)virtual;
489 for (i
= 0; i
< dma
->buf_count
; i
++) {
490 if (copy_to_user(&request
.list
[i
].idx
,
491 &dma
->buflist
[i
]->idx
,
492 sizeof(request
.list
[0].idx
))) {
496 if (copy_to_user(&request
.list
[i
].total
,
497 &dma
->buflist
[i
]->total
,
498 sizeof(request
.list
[0].total
))) {
502 if (copy_to_user(&request
.list
[i
].used
,
508 address
= virtual + dma
->buflist
[i
]->offset
;
509 if (copy_to_user(&request
.list
[i
].address
,
518 request
.count
= dma
->buf_count
;
519 DRM_DEBUG("%d buffers, retcode = %d\n", request
.count
, retcode
);
521 copy_to_user_ret((drm_buf_map_t
*)arg
,