Import 2.3.18pre1
[davej-history.git] / drivers / char / drm / dma.c
blob0ccbfc146b51de11e96b8155cdd8e431b30eed6f
1 /* dma.c -- DMA IOCTL and function support -*- linux-c -*-
2 * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
3 * Revised: Fri Aug 20 13:06:51 1999 by faith@precisioninsight.com
5 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * $PI: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/generic/dma.c,v 1.6 1999/08/20 20:00:53 faith Exp $
28 * $XFree86$
32 #define __NO_VERSION__
33 #include "drmP.h"
35 #include <linux/interrupt.h> /* For task queue support */
37 void drm_dma_setup(drm_device_t *dev)
39 int i;
41 dev->dma = drm_alloc(sizeof(*dev->dma), DRM_MEM_DRIVER);
42 memset(dev->dma, 0, sizeof(*dev->dma));
43 for (i = 0; i <= DRM_MAX_ORDER; i++)
44 memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
47 void drm_dma_takedown(drm_device_t *dev)
49 drm_device_dma_t *dma = dev->dma;
50 int i, j;
52 if (!dma) return;
54 /* Clear dma buffers */
55 for (i = 0; i <= DRM_MAX_ORDER; i++) {
56 if (dma->bufs[i].seg_count) {
57 DRM_DEBUG("order %d: buf_count = %d,"
58 " seg_count = %d\n",
60 dma->bufs[i].buf_count,
61 dma->bufs[i].seg_count);
62 for (j = 0; j < dma->bufs[i].seg_count; j++) {
63 drm_free_pages(dma->bufs[i].seglist[j],
64 dma->bufs[i].page_order,
65 DRM_MEM_DMA);
67 drm_free(dma->bufs[i].buflist,
68 dma->buf_count
69 * sizeof(*dma->bufs[0].buflist),
70 DRM_MEM_BUFS);
71 drm_free(dma->bufs[i].seglist,
72 dma->buf_count
73 * sizeof(*dma->bufs[0].seglist),
74 DRM_MEM_SEGS);
75 drm_freelist_destroy(&dma->bufs[i].freelist);
79 if (dma->buflist) {
80 drm_free(dma->buflist,
81 dma->buf_count * sizeof(*dma->buflist),
82 DRM_MEM_BUFS);
85 if (dma->pagelist) {
86 drm_free(dma->pagelist,
87 dma->page_count * sizeof(*dma->pagelist),
88 DRM_MEM_PAGES);
90 drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
91 dev->dma = NULL;
94 #if DRM_DMA_HISTOGRAM
95 /* This is slow, but is useful for debugging. */
96 int drm_histogram_slot(unsigned long count)
98 int value = DRM_DMA_HISTOGRAM_INITIAL;
99 int slot;
101 for (slot = 0;
102 slot < DRM_DMA_HISTOGRAM_SLOTS;
103 ++slot, value = DRM_DMA_HISTOGRAM_NEXT(value)) {
104 if (count < value) return slot;
106 return DRM_DMA_HISTOGRAM_SLOTS - 1;
109 void drm_histogram_compute(drm_device_t *dev, drm_buf_t *buf)
111 cycles_t queued_to_dispatched;
112 cycles_t dispatched_to_completed;
113 cycles_t completed_to_freed;
114 int q2d, d2c, c2f, q2c, q2f;
116 if (buf->time_queued) {
117 queued_to_dispatched = (buf->time_dispatched
118 - buf->time_queued);
119 dispatched_to_completed = (buf->time_completed
120 - buf->time_dispatched);
121 completed_to_freed = (buf->time_freed
122 - buf->time_completed);
124 q2d = drm_histogram_slot(queued_to_dispatched);
125 d2c = drm_histogram_slot(dispatched_to_completed);
126 c2f = drm_histogram_slot(completed_to_freed);
128 q2c = drm_histogram_slot(queued_to_dispatched
129 + dispatched_to_completed);
130 q2f = drm_histogram_slot(queued_to_dispatched
131 + dispatched_to_completed
132 + completed_to_freed);
134 atomic_inc(&dev->histo.total);
135 atomic_inc(&dev->histo.queued_to_dispatched[q2d]);
136 atomic_inc(&dev->histo.dispatched_to_completed[d2c]);
137 atomic_inc(&dev->histo.completed_to_freed[c2f]);
139 atomic_inc(&dev->histo.queued_to_completed[q2c]);
140 atomic_inc(&dev->histo.queued_to_freed[q2f]);
143 buf->time_queued = 0;
144 buf->time_dispatched = 0;
145 buf->time_completed = 0;
146 buf->time_freed = 0;
148 #endif
150 void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf)
152 drm_device_dma_t *dma = dev->dma;
154 if (!buf) return;
156 buf->waiting = 0;
157 buf->pending = 0;
158 buf->pid = 0;
159 buf->used = 0;
160 #if DRM_DMA_HISTOGRAM
161 buf->time_completed = get_cycles();
162 #endif
163 if (waitqueue_active(&buf->dma_wait)) {
164 wake_up_interruptible(&buf->dma_wait);
165 } else {
166 /* If processes are waiting, the last one
167 to wake will put the buffer on the free
168 list. If no processes are waiting, we
169 put the buffer on the freelist here. */
170 drm_freelist_put(dev, &dma->bufs[buf->order].freelist, buf);
174 void drm_reclaim_buffers(drm_device_t *dev, pid_t pid)
176 drm_device_dma_t *dma = dev->dma;
177 int i;
179 for (i = 0; i < dma->buf_count; i++) {
180 if (dma->buflist[i]->pid == pid) {
181 switch (dma->buflist[i]->list) {
182 case DRM_LIST_NONE:
183 drm_free_buffer(dev, dma->buflist[i]);
184 break;
185 case DRM_LIST_WAIT:
186 dma->buflist[i]->list = DRM_LIST_RECLAIM;
187 break;
188 default:
189 /* Buffer already on hardware. */
190 break;
196 int drm_context_switch(drm_device_t *dev, int old, int new)
198 char buf[64];
199 drm_queue_t *q;
201 atomic_inc(&dev->total_ctx);
203 if (test_and_set_bit(0, &dev->context_flag)) {
204 DRM_ERROR("Reentering -- FIXME\n");
205 return -EBUSY;
208 #if DRM_DMA_HISTOGRAM
209 dev->ctx_start = get_cycles();
210 #endif
212 DRM_DEBUG("Context switch from %d to %d\n", old, new);
214 if (new >= dev->queue_count) {
215 clear_bit(0, &dev->context_flag);
216 return -EINVAL;
219 if (new == dev->last_context) {
220 clear_bit(0, &dev->context_flag);
221 return 0;
224 q = dev->queuelist[new];
225 atomic_inc(&q->use_count);
226 if (atomic_read(&q->use_count) == 1) {
227 atomic_dec(&q->use_count);
228 clear_bit(0, &dev->context_flag);
229 return -EINVAL;
232 if (drm_flags & DRM_FLAG_NOCTX) {
233 drm_context_switch_complete(dev, new);
234 } else {
235 sprintf(buf, "C %d %d\n", old, new);
236 drm_write_string(dev, buf);
239 atomic_dec(&q->use_count);
241 return 0;
244 int drm_context_switch_complete(drm_device_t *dev, int new)
246 drm_device_dma_t *dma = dev->dma;
248 dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
249 dev->last_switch = jiffies;
251 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
252 DRM_ERROR("Lock isn't held after context switch\n");
255 if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
256 if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
257 DRM_KERNEL_CONTEXT)) {
258 DRM_ERROR("Cannot free lock\n");
262 #if DRM_DMA_HISTOGRAM
263 atomic_inc(&dev->histo.ctx[drm_histogram_slot(get_cycles()
264 - dev->ctx_start)]);
266 #endif
267 clear_bit(0, &dev->context_flag);
268 wake_up_interruptible(&dev->context_wait);
270 return 0;
273 void drm_clear_next_buffer(drm_device_t *dev)
275 drm_device_dma_t *dma = dev->dma;
277 dma->next_buffer = NULL;
278 if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
279 wake_up_interruptible(&dma->next_queue->flush_queue);
281 dma->next_queue = NULL;
285 int drm_select_queue(drm_device_t *dev, void (*wrapper)(unsigned long))
287 int i;
288 int candidate = -1;
289 int j = jiffies;
291 if (!dev) {
292 DRM_ERROR("No device\n");
293 return -1;
295 if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
296 /* This only happens between the time the
297 interrupt is initialized and the time
298 the queues are initialized. */
299 return -1;
302 /* Doing "while locked" DMA? */
303 if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
304 return DRM_KERNEL_CONTEXT;
307 /* If there are buffers on the last_context
308 queue, and we have not been executing
309 this context very long, continue to
310 execute this context. */
311 if (dev->last_switch <= j
312 && dev->last_switch + DRM_TIME_SLICE > j
313 && DRM_WAITCOUNT(dev, dev->last_context)) {
314 return dev->last_context;
317 /* Otherwise, find a candidate */
318 for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
319 if (DRM_WAITCOUNT(dev, i)) {
320 candidate = dev->last_checked = i;
321 break;
325 if (candidate < 0) {
326 for (i = 0; i < dev->queue_count; i++) {
327 if (DRM_WAITCOUNT(dev, i)) {
328 candidate = dev->last_checked = i;
329 break;
334 if (wrapper
335 && candidate >= 0
336 && candidate != dev->last_context
337 && dev->last_switch <= j
338 && dev->last_switch + DRM_TIME_SLICE > j) {
339 if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) {
340 del_timer(&dev->timer);
341 dev->timer.function = wrapper;
342 dev->timer.data = (unsigned long)dev;
343 dev->timer.expires = dev->last_switch+DRM_TIME_SLICE;
344 add_timer(&dev->timer);
346 return -1;
349 return candidate;
353 int drm_dma_enqueue(drm_device_t *dev, drm_dma_t *d)
355 int i;
356 drm_queue_t *q;
357 drm_buf_t *buf;
358 int idx;
359 int while_locked = 0;
360 drm_device_dma_t *dma = dev->dma;
361 DECLARE_WAITQUEUE(entry, current);
363 DRM_DEBUG("%d\n", d->send_count);
365 if (d->flags & _DRM_DMA_WHILE_LOCKED) {
366 int context = dev->lock.hw_lock->lock;
368 if (!_DRM_LOCK_IS_HELD(context)) {
369 DRM_ERROR("No lock held during \"while locked\""
370 " request\n");
371 return -EINVAL;
373 if (d->context != _DRM_LOCKING_CONTEXT(context)
374 && _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
375 DRM_ERROR("Lock held by %d while %d makes"
376 " \"while locked\" request\n",
377 _DRM_LOCKING_CONTEXT(context),
378 d->context);
379 return -EINVAL;
381 q = dev->queuelist[DRM_KERNEL_CONTEXT];
382 while_locked = 1;
383 } else {
384 q = dev->queuelist[d->context];
388 atomic_inc(&q->use_count);
389 if (atomic_read(&q->block_write)) {
390 current->state = TASK_INTERRUPTIBLE;
391 add_wait_queue(&q->write_queue, &entry);
392 atomic_inc(&q->block_count);
393 for (;;) {
394 if (!atomic_read(&q->block_write)) break;
395 schedule();
396 if (signal_pending(current)) {
397 atomic_dec(&q->use_count);
398 return -EINTR;
401 atomic_dec(&q->block_count);
402 current->state = TASK_RUNNING;
403 remove_wait_queue(&q->write_queue, &entry);
406 for (i = 0; i < d->send_count; i++) {
407 idx = d->send_indices[i];
408 if (idx < 0 || idx >= dma->buf_count) {
409 atomic_dec(&q->use_count);
410 DRM_ERROR("Index %d (of %d max)\n",
411 d->send_indices[i], dma->buf_count - 1);
412 return -EINVAL;
414 buf = dma->buflist[ idx ];
415 if (buf->pid != current->pid) {
416 atomic_dec(&q->use_count);
417 DRM_ERROR("Process %d using buffer owned by %d\n",
418 current->pid, buf->pid);
419 return -EINVAL;
421 if (buf->list != DRM_LIST_NONE) {
422 atomic_dec(&q->use_count);
423 DRM_ERROR("Process %d using buffer %d on list %d\n",
424 current->pid, buf->idx, buf->list);
426 buf->used = d->send_sizes[i];
427 buf->while_locked = while_locked;
428 buf->context = d->context;
429 if (!buf->used) {
430 DRM_ERROR("Queueing 0 length buffer\n");
432 if (buf->pending) {
433 atomic_dec(&q->use_count);
434 DRM_ERROR("Queueing pending buffer:"
435 " buffer %d, offset %d\n",
436 d->send_indices[i], i);
437 return -EINVAL;
439 if (buf->waiting) {
440 atomic_dec(&q->use_count);
441 DRM_ERROR("Queueing waiting buffer:"
442 " buffer %d, offset %d\n",
443 d->send_indices[i], i);
444 return -EINVAL;
446 buf->waiting = 1;
447 if (atomic_read(&q->use_count) == 1
448 || atomic_read(&q->finalization)) {
449 drm_free_buffer(dev, buf);
450 } else {
451 drm_waitlist_put(&q->waitlist, buf);
452 atomic_inc(&q->total_queued);
455 atomic_dec(&q->use_count);
457 return 0;
460 static int drm_dma_get_buffers_of_order(drm_device_t *dev, drm_dma_t *d,
461 int order)
463 int i;
464 drm_buf_t *buf;
465 drm_device_dma_t *dma = dev->dma;
467 for (i = d->granted_count; i < d->request_count; i++) {
468 buf = drm_freelist_get(&dma->bufs[order].freelist,
469 d->flags & _DRM_DMA_WAIT);
470 if (!buf) break;
471 if (buf->pending || buf->waiting) {
472 DRM_ERROR("Free buffer %d in use by %d (w%d, p%d)\n",
473 buf->idx,
474 buf->pid,
475 buf->waiting,
476 buf->pending);
478 buf->pid = current->pid;
479 copy_to_user_ret(&d->request_indices[i],
480 &buf->idx,
481 sizeof(buf->idx),
482 -EFAULT);
483 copy_to_user_ret(&d->request_sizes[i],
484 &buf->total,
485 sizeof(buf->total),
486 -EFAULT);
487 ++d->granted_count;
489 return 0;
493 int drm_dma_get_buffers(drm_device_t *dev, drm_dma_t *dma)
495 int order;
496 int retcode = 0;
497 int tmp_order;
499 order = drm_order(dma->request_size);
501 dma->granted_count = 0;
502 retcode = drm_dma_get_buffers_of_order(dev, dma, order);
504 if (dma->granted_count < dma->request_count
505 && (dma->flags & _DRM_DMA_SMALLER_OK)) {
506 for (tmp_order = order - 1;
507 !retcode
508 && dma->granted_count < dma->request_count
509 && tmp_order >= DRM_MIN_ORDER;
510 --tmp_order) {
512 retcode = drm_dma_get_buffers_of_order(dev, dma,
513 tmp_order);
517 if (dma->granted_count < dma->request_count
518 && (dma->flags & _DRM_DMA_LARGER_OK)) {
519 for (tmp_order = order + 1;
520 !retcode
521 && dma->granted_count < dma->request_count
522 && tmp_order <= DRM_MAX_ORDER;
523 ++tmp_order) {
525 retcode = drm_dma_get_buffers_of_order(dev, dma,
526 tmp_order);
529 return 0;