- David Miller: sparc and net updates. Fix merge_segments.
[davej-history.git] / drivers / char / drm / mga_dma.c
blob356376ca9cfeaae2cc6dea827c659572b045061e
1 /* mga_dma.c -- DMA support for mga g200/g400 -*- linux-c -*-
2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28 * Jeff Hartmann <jhartmann@valinux.com>
29 * Keith Whitwell <keithw@valinux.com>
33 #define __NO_VERSION__
34 #include "drmP.h"
35 #include "mga_drv.h"
37 #include <linux/interrupt.h> /* For task queue support */
39 #define MGA_REG(reg) 2
40 #define MGA_BASE(reg) ((unsigned long) \
41 ((drm_device_t *)dev)->maplist[MGA_REG(reg)]->handle)
42 #define MGA_ADDR(reg) (MGA_BASE(reg) + reg)
43 #define MGA_DEREF(reg) *(__volatile__ int *)MGA_ADDR(reg)
44 #define MGA_READ(reg) MGA_DEREF(reg)
45 #define MGA_WRITE(reg,val) do { MGA_DEREF(reg) = val; } while (0)
47 #define PDEA_pagpxfer_enable 0x2
49 static int mga_flush_queue(drm_device_t *dev);
51 static unsigned long mga_alloc_page(drm_device_t *dev)
53 unsigned long address;
55 address = __get_free_page(GFP_KERNEL);
56 if(address == 0UL) {
57 return 0;
59 atomic_inc(&virt_to_page(address)->count);
60 set_bit(PG_reserved, &virt_to_page(address)->flags);
62 return address;
65 static void mga_free_page(drm_device_t *dev, unsigned long page)
67 if(!page) return;
68 atomic_dec(&virt_to_page(page)->count);
69 clear_bit(PG_reserved, &virt_to_page(page)->flags);
70 free_page(page);
71 return;
74 static void mga_delay(void)
76 return;
79 /* These are two age tags that will never be sent to
80 * the hardware */
81 #define MGA_BUF_USED 0xffffffff
82 #define MGA_BUF_FREE 0
84 static int mga_freelist_init(drm_device_t *dev)
86 drm_device_dma_t *dma = dev->dma;
87 drm_buf_t *buf;
88 drm_mga_buf_priv_t *buf_priv;
89 drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
90 drm_mga_freelist_t *item;
91 int i;
93 dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
94 if(dev_priv->head == NULL) return -ENOMEM;
95 memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t));
96 dev_priv->head->age = MGA_BUF_USED;
98 for (i = 0; i < dma->buf_count; i++) {
99 buf = dma->buflist[ i ];
100 buf_priv = buf->dev_private;
101 item = drm_alloc(sizeof(drm_mga_freelist_t),
102 DRM_MEM_DRIVER);
103 if(item == NULL) return -ENOMEM;
104 memset(item, 0, sizeof(drm_mga_freelist_t));
105 item->age = MGA_BUF_FREE;
106 item->prev = dev_priv->head;
107 item->next = dev_priv->head->next;
108 if(dev_priv->head->next != NULL)
109 dev_priv->head->next->prev = item;
110 if(item->next == NULL) dev_priv->tail = item;
111 item->buf = buf;
112 buf_priv->my_freelist = item;
113 buf_priv->discard = 0;
114 buf_priv->dispatched = 0;
115 dev_priv->head->next = item;
118 return 0;
121 static void mga_freelist_cleanup(drm_device_t *dev)
123 drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
124 drm_mga_freelist_t *item;
125 drm_mga_freelist_t *prev;
127 item = dev_priv->head;
128 while(item) {
129 prev = item;
130 item = item->next;
131 drm_free(prev, sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
134 dev_priv->head = dev_priv->tail = NULL;
137 /* Frees dispatch lock */
138 static inline void mga_dma_quiescent(drm_device_t *dev)
140 drm_device_dma_t *dma = dev->dma;
141 drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
142 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
143 unsigned long end;
144 int i;
146 DRM_DEBUG("dispatch_status = 0x%02lx\n", dev_priv->dispatch_status);
147 end = jiffies + (HZ*3);
148 while(1) {
149 if(!test_and_set_bit(MGA_IN_DISPATCH,
150 &dev_priv->dispatch_status)) {
151 break;
153 if((signed)(end - jiffies) <= 0) {
154 DRM_ERROR("irqs: %d wanted %d\n",
155 atomic_read(&dev->total_irq),
156 atomic_read(&dma->total_lost));
157 DRM_ERROR("lockup: dispatch_status = 0x%02lx,"
158 " jiffies = %lu, end = %lu\n",
159 dev_priv->dispatch_status, jiffies, end);
160 return;
162 for (i = 0 ; i < 2000 ; i++) mga_delay();
164 end = jiffies + (HZ*3);
165 DRM_DEBUG("quiescent status : %x\n", MGA_READ(MGAREG_STATUS));
166 while((MGA_READ(MGAREG_STATUS) & 0x00030001) != 0x00020000) {
167 if((signed)(end - jiffies) <= 0) {
168 DRM_ERROR("irqs: %d wanted %d\n",
169 atomic_read(&dev->total_irq),
170 atomic_read(&dma->total_lost));
171 DRM_ERROR("lockup\n");
172 clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status);
173 return;
175 for (i = 0 ; i < 2000 ; i++) mga_delay();
177 sarea_priv->dirty |= MGA_DMA_FLUSH;
179 clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status);
180 DRM_DEBUG("exit, dispatch_status = 0x%02lx\n",
181 dev_priv->dispatch_status);
184 static void mga_reset_freelist(drm_device_t *dev)
186 drm_device_dma_t *dma = dev->dma;
187 drm_buf_t *buf;
188 drm_mga_buf_priv_t *buf_priv;
189 int i;
191 for (i = 0; i < dma->buf_count; i++) {
192 buf = dma->buflist[ i ];
193 buf_priv = buf->dev_private;
194 buf_priv->my_freelist->age = MGA_BUF_FREE;
198 /* Least recently used :
199 * These operations are not atomic b/c they are protected by the
200 * hardware lock */
202 drm_buf_t *mga_freelist_get(drm_device_t *dev)
204 DECLARE_WAITQUEUE(entry, current);
205 drm_mga_private_t *dev_priv =
206 (drm_mga_private_t *) dev->dev_private;
207 drm_mga_freelist_t *prev;
208 drm_mga_freelist_t *next;
209 static int failed = 0;
210 int return_null = 0;
212 if(failed >= 1000 && dev_priv->tail->age >= dev_priv->last_prim_age) {
213 DRM_DEBUG("Waiting on freelist,"
214 " tail->age = %d, last_prim_age= %d\n",
215 dev_priv->tail->age,
216 dev_priv->last_prim_age);
217 add_wait_queue(&dev_priv->buf_queue, &entry);
218 set_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status);
219 for (;;) {
220 current->state = TASK_INTERRUPTIBLE;
221 mga_dma_schedule(dev, 0);
222 if(dev_priv->tail->age < dev_priv->last_prim_age)
223 break;
224 atomic_inc(&dev->total_sleeps);
225 schedule();
226 if (signal_pending(current)) {
227 ++return_null;
228 break;
231 clear_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status);
232 current->state = TASK_RUNNING;
233 remove_wait_queue(&dev_priv->buf_queue, &entry);
234 if (return_null) return NULL;
237 if(dev_priv->tail->age < dev_priv->last_prim_age) {
238 prev = dev_priv->tail->prev;
239 next = dev_priv->tail;
240 prev->next = NULL;
241 next->prev = next->next = NULL;
242 dev_priv->tail = prev;
243 next->age = MGA_BUF_USED;
244 failed = 0;
245 return next->buf;
248 failed++;
249 return NULL;
252 int mga_freelist_put(drm_device_t *dev, drm_buf_t *buf)
254 drm_mga_private_t *dev_priv =
255 (drm_mga_private_t *) dev->dev_private;
256 drm_mga_buf_priv_t *buf_priv = buf->dev_private;
257 drm_mga_freelist_t *prev;
258 drm_mga_freelist_t *head;
259 drm_mga_freelist_t *next;
261 if(buf_priv->my_freelist->age == MGA_BUF_USED) {
262 /* Discarded buffer, put it on the tail */
263 next = buf_priv->my_freelist;
264 next->age = MGA_BUF_FREE;
265 prev = dev_priv->tail;
266 prev->next = next;
267 next->prev = prev;
268 next->next = NULL;
269 dev_priv->tail = next;
270 } else {
271 /* Normally aged buffer, put it on the head + 1,
272 * as the real head is a sentinal element
274 next = buf_priv->my_freelist;
275 head = dev_priv->head;
276 prev = head->next;
277 head->next = next;
278 prev->prev = next;
279 next->prev = head;
280 next->next = prev;
283 return 0;
286 static int mga_init_primary_bufs(drm_device_t *dev, drm_mga_init_t *init)
288 drm_mga_private_t *dev_priv = dev->dev_private;
289 drm_mga_prim_buf_t *prim_buffer;
290 int i, temp, size_of_buf;
291 int offset = init->reserved_map_agpstart;
293 dev_priv->primary_size = ((init->primary_size + PAGE_SIZE - 1) /
294 PAGE_SIZE) * PAGE_SIZE;
295 size_of_buf = dev_priv->primary_size / MGA_NUM_PRIM_BUFS;
296 dev_priv->warp_ucode_size = init->warp_ucode_size;
297 dev_priv->prim_bufs = drm_alloc(sizeof(drm_mga_prim_buf_t *) *
298 (MGA_NUM_PRIM_BUFS + 1),
299 DRM_MEM_DRIVER);
300 if(dev_priv->prim_bufs == NULL) {
301 DRM_ERROR("Unable to allocate memory for prim_buf\n");
302 return -ENOMEM;
304 memset(dev_priv->prim_bufs,
305 0, sizeof(drm_mga_prim_buf_t *) * (MGA_NUM_PRIM_BUFS + 1));
307 temp = init->warp_ucode_size + dev_priv->primary_size;
308 temp = ((temp + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE;
310 dev_priv->ioremap = drm_ioremap(dev->agp->base + offset,
311 temp);
312 if(dev_priv->ioremap == NULL) {
313 DRM_ERROR("Ioremap failed\n");
314 return -ENOMEM;
316 init_waitqueue_head(&dev_priv->wait_queue);
318 for(i = 0; i < MGA_NUM_PRIM_BUFS; i++) {
319 prim_buffer = drm_alloc(sizeof(drm_mga_prim_buf_t),
320 DRM_MEM_DRIVER);
321 if(prim_buffer == NULL) return -ENOMEM;
322 memset(prim_buffer, 0, sizeof(drm_mga_prim_buf_t));
323 prim_buffer->phys_head = offset + dev->agp->base;
324 prim_buffer->current_dma_ptr =
325 prim_buffer->head =
326 (u32 *) (dev_priv->ioremap +
327 offset -
328 init->reserved_map_agpstart);
329 prim_buffer->num_dwords = 0;
330 prim_buffer->max_dwords = size_of_buf / sizeof(u32);
331 prim_buffer->max_dwords -= 5; /* Leave room for the softrap */
332 prim_buffer->sec_used = 0;
333 prim_buffer->idx = i;
334 prim_buffer->prim_age = i + 1;
335 offset = offset + size_of_buf;
336 dev_priv->prim_bufs[i] = prim_buffer;
338 dev_priv->current_prim_idx = 0;
339 dev_priv->next_prim =
340 dev_priv->last_prim =
341 dev_priv->current_prim =
342 dev_priv->prim_bufs[0];
343 dev_priv->next_prim_age = 2;
344 dev_priv->last_prim_age = 1;
345 set_bit(MGA_BUF_IN_USE, &dev_priv->current_prim->buffer_status);
346 return 0;
349 void mga_fire_primary(drm_device_t *dev, drm_mga_prim_buf_t *prim)
351 drm_mga_private_t *dev_priv = dev->dev_private;
352 drm_device_dma_t *dma = dev->dma;
353 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
354 int use_agp = PDEA_pagpxfer_enable;
355 unsigned long end;
356 int i;
357 int next_idx;
358 PRIMLOCALS;
360 dev_priv->last_prim = prim;
362 /* We never check for overflow, b/c there is always room */
363 PRIMPTR(prim);
364 if(num_dwords <= 0) {
365 DRM_ERROR("num_dwords == 0 when dispatched\n");
366 goto out_prim_wait;
368 PRIMOUTREG( MGAREG_DMAPAD, 0);
369 PRIMOUTREG( MGAREG_DMAPAD, 0);
370 PRIMOUTREG( MGAREG_DMAPAD, 0);
371 PRIMOUTREG( MGAREG_SOFTRAP, 0);
372 PRIMFINISH(prim);
374 end = jiffies + (HZ*3);
375 if(sarea_priv->dirty & MGA_DMA_FLUSH) {
376 while((MGA_READ(MGAREG_STATUS) & 0x00030001) != 0x00020000) {
377 if((signed)(end - jiffies) <= 0) {
378 DRM_ERROR("irqs: %d wanted %d\n",
379 atomic_read(&dev->total_irq),
380 atomic_read(&dma->total_lost));
381 DRM_ERROR("lockup (flush)\n");
382 goto out_prim_wait;
385 for (i = 0 ; i < 4096 ; i++) mga_delay();
387 sarea_priv->dirty &= ~(MGA_DMA_FLUSH);
388 } else {
389 while((MGA_READ(MGAREG_STATUS) & 0x00020001) != 0x00020000) {
390 if((signed)(end - jiffies) <= 0) {
391 DRM_ERROR("irqs: %d wanted %d\n",
392 atomic_read(&dev->total_irq),
393 atomic_read(&dma->total_lost));
394 DRM_ERROR("lockup (wait)\n");
395 goto out_prim_wait;
398 for (i = 0 ; i < 4096 ; i++) mga_delay();
402 mga_flush_write_combine();
403 atomic_inc(&dev_priv->pending_bufs);
404 MGA_WRITE(MGAREG_PRIMADDRESS, phys_head | TT_GENERAL);
405 MGA_WRITE(MGAREG_PRIMEND, (phys_head + num_dwords * 4) | use_agp);
406 prim->num_dwords = 0;
407 sarea_priv->last_enqueue = prim->prim_age;
409 next_idx = prim->idx + 1;
410 if(next_idx >= MGA_NUM_PRIM_BUFS)
411 next_idx = 0;
413 dev_priv->next_prim = dev_priv->prim_bufs[next_idx];
414 return;
416 out_prim_wait:
417 prim->num_dwords = 0;
418 prim->sec_used = 0;
419 clear_bit(MGA_BUF_IN_USE, &prim->buffer_status);
420 wake_up_interruptible(&dev_priv->wait_queue);
421 clear_bit(MGA_BUF_SWAP_PENDING, &prim->buffer_status);
422 clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status);
425 int mga_advance_primary(drm_device_t *dev)
427 DECLARE_WAITQUEUE(entry, current);
428 drm_mga_private_t *dev_priv = dev->dev_private;
429 drm_mga_prim_buf_t *prim_buffer;
430 drm_device_dma_t *dma = dev->dma;
431 int next_prim_idx;
432 int ret = 0;
434 /* This needs to reset the primary buffer if available,
435 * we should collect stats on how many times it bites
436 * it's tail */
438 next_prim_idx = dev_priv->current_prim_idx + 1;
439 if(next_prim_idx >= MGA_NUM_PRIM_BUFS)
440 next_prim_idx = 0;
441 prim_buffer = dev_priv->prim_bufs[next_prim_idx];
442 set_bit(MGA_IN_WAIT, &dev_priv->dispatch_status);
444 /* In use is cleared in interrupt handler */
446 if(test_and_set_bit(MGA_BUF_IN_USE, &prim_buffer->buffer_status)) {
447 add_wait_queue(&dev_priv->wait_queue, &entry);
448 for (;;) {
449 current->state = TASK_INTERRUPTIBLE;
450 mga_dma_schedule(dev, 0);
451 if(!test_and_set_bit(MGA_BUF_IN_USE,
452 &prim_buffer->buffer_status))
453 break;
454 atomic_inc(&dev->total_sleeps);
455 atomic_inc(&dma->total_missed_sched);
456 schedule();
457 if (signal_pending(current)) {
458 ret = -ERESTARTSYS;
459 break;
462 current->state = TASK_RUNNING;
463 remove_wait_queue(&dev_priv->wait_queue, &entry);
464 if(ret) return ret;
466 clear_bit(MGA_IN_WAIT, &dev_priv->dispatch_status);
468 /* This primary buffer is now free to use */
469 prim_buffer->current_dma_ptr = prim_buffer->head;
470 prim_buffer->num_dwords = 0;
471 prim_buffer->sec_used = 0;
472 prim_buffer->prim_age = dev_priv->next_prim_age++;
473 if(prim_buffer->prim_age == 0 || prim_buffer->prim_age == 0xffffffff) {
474 mga_flush_queue(dev);
475 mga_dma_quiescent(dev);
476 mga_reset_freelist(dev);
477 prim_buffer->prim_age = (dev_priv->next_prim_age += 2);
480 /* Reset all buffer status stuff */
481 clear_bit(MGA_BUF_NEEDS_OVERFLOW, &prim_buffer->buffer_status);
482 clear_bit(MGA_BUF_FORCE_FIRE, &prim_buffer->buffer_status);
483 clear_bit(MGA_BUF_SWAP_PENDING, &prim_buffer->buffer_status);
485 dev_priv->current_prim = prim_buffer;
486 dev_priv->current_prim_idx = next_prim_idx;
487 return 0;
490 /* More dynamic performance decisions */
491 static inline int mga_decide_to_fire(drm_device_t *dev)
493 drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
495 if(test_bit(MGA_BUF_FORCE_FIRE, &dev_priv->next_prim->buffer_status)) {
496 return 1;
499 if (test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status) &&
500 dev_priv->next_prim->num_dwords) {
501 return 1;
504 if (test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status) &&
505 dev_priv->next_prim->num_dwords) {
506 return 1;
509 if(atomic_read(&dev_priv->pending_bufs) <= MGA_NUM_PRIM_BUFS - 1) {
510 if(test_bit(MGA_BUF_SWAP_PENDING,
511 &dev_priv->next_prim->buffer_status)) {
512 return 1;
516 if(atomic_read(&dev_priv->pending_bufs) <= MGA_NUM_PRIM_BUFS / 2) {
517 if(dev_priv->next_prim->sec_used >= MGA_DMA_BUF_NR / 8) {
518 return 1;
522 if(atomic_read(&dev_priv->pending_bufs) >= MGA_NUM_PRIM_BUFS / 2) {
523 if(dev_priv->next_prim->sec_used >= MGA_DMA_BUF_NR / 4) {
524 return 1;
528 return 0;
531 int mga_dma_schedule(drm_device_t *dev, int locked)
533 drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
534 int retval = 0;
536 if (!dev_priv) return -EBUSY;
538 if (test_and_set_bit(0, &dev->dma_flag)) {
539 retval = -EBUSY;
540 goto sch_out_wakeup;
543 if(test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status) ||
544 test_bit(MGA_IN_WAIT, &dev_priv->dispatch_status) ||
545 test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status)) {
546 locked = 1;
549 if (!locked &&
550 !drm_lock_take(&dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) {
551 clear_bit(0, &dev->dma_flag);
552 retval = -EBUSY;
553 goto sch_out_wakeup;
556 if(!test_and_set_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status)) {
557 /* Fire dma buffer */
558 if(mga_decide_to_fire(dev)) {
559 clear_bit(MGA_BUF_FORCE_FIRE,
560 &dev_priv->next_prim->buffer_status);
561 if(dev_priv->current_prim == dev_priv->next_prim) {
562 /* Schedule overflow for a later time */
563 set_bit(MGA_BUF_NEEDS_OVERFLOW,
564 &dev_priv->next_prim->buffer_status);
566 mga_fire_primary(dev, dev_priv->next_prim);
567 } else {
568 clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status);
572 if (!locked) {
573 if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
574 DRM_KERNEL_CONTEXT)) {
575 DRM_ERROR("\n");
579 clear_bit(0, &dev->dma_flag);
581 sch_out_wakeup:
582 if(test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status) &&
583 atomic_read(&dev_priv->pending_bufs) == 0) {
584 /* Everything has been processed by the hardware */
585 clear_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status);
586 wake_up_interruptible(&dev_priv->flush_queue);
589 if(test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status)
590 && dev_priv->tail->age < dev_priv->last_prim_age)
591 wake_up_interruptible(&dev_priv->buf_queue);
593 return retval;
596 static void mga_dma_service(int irq, void *device, struct pt_regs *regs)
598 drm_device_t *dev = (drm_device_t *)device;
599 drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
600 drm_mga_prim_buf_t *last_prim_buffer;
602 atomic_inc(&dev->total_irq);
603 if((MGA_READ(MGAREG_STATUS) & 0x00000001) != 0x00000001) return;
604 MGA_WRITE(MGAREG_ICLEAR, 0x00000001);
605 last_prim_buffer = dev_priv->last_prim;
606 last_prim_buffer->num_dwords = 0;
607 last_prim_buffer->sec_used = 0;
608 dev_priv->sarea_priv->last_dispatch =
609 dev_priv->last_prim_age = last_prim_buffer->prim_age;
610 clear_bit(MGA_BUF_IN_USE, &last_prim_buffer->buffer_status);
611 clear_bit(MGA_BUF_SWAP_PENDING, &last_prim_buffer->buffer_status);
612 clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status);
613 atomic_dec(&dev_priv->pending_bufs);
614 queue_task(&dev->tq, &tq_immediate);
615 mark_bh(IMMEDIATE_BH);
616 wake_up_interruptible(&dev_priv->wait_queue);
619 static void mga_dma_task_queue(void *device)
621 mga_dma_schedule((drm_device_t *)device, 0);
624 int mga_dma_cleanup(drm_device_t *dev)
626 if(dev->dev_private) {
627 drm_mga_private_t *dev_priv =
628 (drm_mga_private_t *) dev->dev_private;
630 if (dev->irq) mga_flush_queue(dev);
631 mga_dma_quiescent(dev);
633 if(dev_priv->ioremap) {
634 int temp = (dev_priv->warp_ucode_size +
635 dev_priv->primary_size +
636 PAGE_SIZE - 1) / PAGE_SIZE * PAGE_SIZE;
638 drm_ioremapfree((void *) dev_priv->ioremap, temp);
640 if(dev_priv->status_page != NULL) {
641 iounmap(dev_priv->status_page);
643 if(dev_priv->real_status_page != 0UL) {
644 mga_free_page(dev, dev_priv->real_status_page);
646 if(dev_priv->prim_bufs != NULL) {
647 int i;
648 for(i = 0; i < MGA_NUM_PRIM_BUFS; i++) {
649 if(dev_priv->prim_bufs[i] != NULL) {
650 drm_free(dev_priv->prim_bufs[i],
651 sizeof(drm_mga_prim_buf_t),
652 DRM_MEM_DRIVER);
655 drm_free(dev_priv->prim_bufs, sizeof(void *) *
656 (MGA_NUM_PRIM_BUFS + 1),
657 DRM_MEM_DRIVER);
659 if(dev_priv->head != NULL) {
660 mga_freelist_cleanup(dev);
664 drm_free(dev->dev_private, sizeof(drm_mga_private_t),
665 DRM_MEM_DRIVER);
666 dev->dev_private = NULL;
669 return 0;
672 static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) {
673 drm_mga_private_t *dev_priv;
674 drm_map_t *sarea_map = NULL;
676 dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
677 if(dev_priv == NULL) return -ENOMEM;
678 dev->dev_private = (void *) dev_priv;
680 memset(dev_priv, 0, sizeof(drm_mga_private_t));
682 if((init->reserved_map_idx >= dev->map_count) ||
683 (init->buffer_map_idx >= dev->map_count)) {
684 mga_dma_cleanup(dev);
685 return -EINVAL;
688 dev_priv->reserved_map_idx = init->reserved_map_idx;
689 dev_priv->buffer_map_idx = init->buffer_map_idx;
690 sarea_map = dev->maplist[0];
691 dev_priv->sarea_priv = (drm_mga_sarea_t *)
692 ((u8 *)sarea_map->handle +
693 init->sarea_priv_offset);
695 /* Scale primary size to the next page */
696 dev_priv->chipset = init->chipset;
697 dev_priv->frontOffset = init->frontOffset;
698 dev_priv->backOffset = init->backOffset;
699 dev_priv->depthOffset = init->depthOffset;
700 dev_priv->textureOffset = init->textureOffset;
701 dev_priv->textureSize = init->textureSize;
702 dev_priv->cpp = init->cpp;
703 dev_priv->sgram = init->sgram;
704 dev_priv->stride = init->stride;
706 dev_priv->mAccess = init->mAccess;
707 init_waitqueue_head(&dev_priv->flush_queue);
708 init_waitqueue_head(&dev_priv->buf_queue);
709 dev_priv->WarpPipe = 0xff000000;
710 dev_priv->vertexsize = 0;
712 DRM_DEBUG("chipset=%d ucode_size=%d backOffset=%x depthOffset=%x\n",
713 dev_priv->chipset, dev_priv->warp_ucode_size,
714 dev_priv->backOffset, dev_priv->depthOffset);
715 DRM_DEBUG("cpp: %d sgram: %d stride: %d maccess: %x\n",
716 dev_priv->cpp, dev_priv->sgram, dev_priv->stride,
717 dev_priv->mAccess);
719 memcpy(&dev_priv->WarpIndex, &init->WarpIndex,
720 sizeof(drm_mga_warp_index_t) * MGA_MAX_WARP_PIPES);
722 if(mga_init_primary_bufs(dev, init) != 0) {
723 DRM_ERROR("Can not initialize primary buffers\n");
724 mga_dma_cleanup(dev);
725 return -ENOMEM;
727 dev_priv->real_status_page = mga_alloc_page(dev);
728 if(dev_priv->real_status_page == 0UL) {
729 mga_dma_cleanup(dev);
730 DRM_ERROR("Can not allocate status page\n");
731 return -ENOMEM;
734 dev_priv->status_page =
735 ioremap_nocache(virt_to_bus((void *)dev_priv->real_status_page),
736 PAGE_SIZE);
738 if(dev_priv->status_page == NULL) {
739 mga_dma_cleanup(dev);
740 DRM_ERROR("Can not remap status page\n");
741 return -ENOMEM;
744 /* Write status page when secend or softrap occurs */
745 MGA_WRITE(MGAREG_PRIMPTR,
746 virt_to_bus((void *)dev_priv->real_status_page) | 0x00000003);
749 /* Private is now filled in, initialize the hardware */
751 PRIMLOCALS;
752 PRIMGETPTR( dev_priv );
754 PRIMOUTREG(MGAREG_DMAPAD, 0);
755 PRIMOUTREG(MGAREG_DMAPAD, 0);
756 PRIMOUTREG(MGAREG_DWGSYNC, 0x0100);
757 PRIMOUTREG(MGAREG_SOFTRAP, 0);
758 /* Poll for the first buffer to insure that
759 * the status register will be correct
762 mga_flush_write_combine();
763 MGA_WRITE(MGAREG_PRIMADDRESS, phys_head | TT_GENERAL);
765 MGA_WRITE(MGAREG_PRIMEND, ((phys_head + num_dwords * 4) |
766 PDEA_pagpxfer_enable));
768 while(MGA_READ(MGAREG_DWGSYNC) != 0x0100) ;
771 if(mga_freelist_init(dev) != 0) {
772 DRM_ERROR("Could not initialize freelist\n");
773 mga_dma_cleanup(dev);
774 return -ENOMEM;
776 return 0;
779 int mga_dma_init(struct inode *inode, struct file *filp,
780 unsigned int cmd, unsigned long arg)
782 drm_file_t *priv = filp->private_data;
783 drm_device_t *dev = priv->dev;
784 drm_mga_init_t init;
786 if (copy_from_user(&init, (drm_mga_init_t *)arg, sizeof(init)))
787 return -EFAULT;
789 switch(init.func) {
790 case MGA_INIT_DMA:
791 return mga_dma_initialize(dev, &init);
792 case MGA_CLEANUP_DMA:
793 return mga_dma_cleanup(dev);
796 return -EINVAL;
799 int mga_irq_install(drm_device_t *dev, int irq)
801 int retcode;
803 if (!irq) return -EINVAL;
805 down(&dev->struct_sem);
806 if (dev->irq) {
807 up(&dev->struct_sem);
808 return -EBUSY;
810 dev->irq = irq;
811 up(&dev->struct_sem);
813 DRM_DEBUG("install irq handler %d\n", irq);
815 dev->context_flag = 0;
816 dev->interrupt_flag = 0;
817 dev->dma_flag = 0;
818 dev->dma->next_buffer = NULL;
819 dev->dma->next_queue = NULL;
820 dev->dma->this_buffer = NULL;
821 INIT_LIST_HEAD(&dev->tq.list);
822 dev->tq.sync = 0;
823 dev->tq.routine = mga_dma_task_queue;
824 dev->tq.data = dev;
826 /* Before installing handler */
827 MGA_WRITE(MGAREG_IEN, 0);
828 /* Install handler */
829 if ((retcode = request_irq(dev->irq,
830 mga_dma_service,
831 SA_SHIRQ,
832 dev->devname,
833 dev))) {
834 down(&dev->struct_sem);
835 dev->irq = 0;
836 up(&dev->struct_sem);
837 return retcode;
839 /* After installing handler */
840 MGA_WRITE(MGAREG_ICLEAR, 0x00000001);
841 MGA_WRITE(MGAREG_IEN, 0x00000001);
842 return 0;
845 int mga_irq_uninstall(drm_device_t *dev)
847 int irq;
849 down(&dev->struct_sem);
850 irq = dev->irq;
851 dev->irq = 0;
852 up(&dev->struct_sem);
854 if (!irq) return -EINVAL;
855 DRM_DEBUG("remove irq handler %d\n", irq);
856 MGA_WRITE(MGAREG_ICLEAR, 0x00000001);
857 MGA_WRITE(MGAREG_IEN, 0);
858 free_irq(irq, dev);
859 return 0;
862 int mga_control(struct inode *inode, struct file *filp, unsigned int cmd,
863 unsigned long arg)
865 drm_file_t *priv = filp->private_data;
866 drm_device_t *dev = priv->dev;
867 drm_control_t ctl;
869 if (copy_from_user(&ctl, (drm_control_t *)arg, sizeof(ctl)))
870 return -EFAULT;
872 switch (ctl.func) {
873 case DRM_INST_HANDLER:
874 return mga_irq_install(dev, ctl.irq);
875 case DRM_UNINST_HANDLER:
876 return mga_irq_uninstall(dev);
877 default:
878 return -EINVAL;
882 static int mga_flush_queue(drm_device_t *dev)
884 DECLARE_WAITQUEUE(entry, current);
885 drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
886 int ret = 0;
888 if(!dev_priv) return 0;
890 if(dev_priv->next_prim->num_dwords != 0) {
891 add_wait_queue(&dev_priv->flush_queue, &entry);
892 if (test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status))
893 DRM_ERROR("Incorrect mga_flush_queue logic\n");
894 set_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status);
895 mga_dma_schedule(dev, 0);
896 for (;;) {
897 current->state = TASK_INTERRUPTIBLE;
898 if (!test_bit(MGA_IN_FLUSH,
899 &dev_priv->dispatch_status))
900 break;
901 atomic_inc(&dev->total_sleeps);
902 schedule();
903 if (signal_pending(current)) {
904 ret = -EINTR; /* Can't restart */
905 clear_bit(MGA_IN_FLUSH,
906 &dev_priv->dispatch_status);
907 break;
910 current->state = TASK_RUNNING;
911 remove_wait_queue(&dev_priv->flush_queue, &entry);
913 return ret;
916 /* Must be called with the lock held */
917 void mga_reclaim_buffers(drm_device_t *dev, pid_t pid)
919 drm_device_dma_t *dma = dev->dma;
920 int i;
922 if (!dma) return;
923 if(dev->dev_private == NULL) return;
924 if(dma->buflist == NULL) return;
926 DRM_DEBUG("buf_count=%d\n", dma->buf_count);
928 mga_flush_queue(dev);
930 for (i = 0; i < dma->buf_count; i++) {
931 drm_buf_t *buf = dma->buflist[ i ];
932 drm_mga_buf_priv_t *buf_priv = buf->dev_private;
934 /* Only buffers that need to get reclaimed ever
935 * get set to free
937 if (buf->pid == pid && buf_priv) {
938 if(buf_priv->my_freelist->age == MGA_BUF_USED)
939 buf_priv->my_freelist->age = MGA_BUF_FREE;
944 int mga_lock(struct inode *inode, struct file *filp, unsigned int cmd,
945 unsigned long arg)
947 drm_file_t *priv = filp->private_data;
948 drm_device_t *dev = priv->dev;
949 DECLARE_WAITQUEUE(entry, current);
950 int ret = 0;
951 drm_lock_t lock;
953 if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
954 return -EFAULT;
956 if (lock.context == DRM_KERNEL_CONTEXT) {
957 DRM_ERROR("Process %d using kernel context %d\n",
958 current->pid, lock.context);
959 return -EINVAL;
962 if (lock.context < 0) return -EINVAL;
964 /* Only one queue:
967 if (!ret) {
968 add_wait_queue(&dev->lock.lock_queue, &entry);
969 for (;;) {
970 current->state = TASK_INTERRUPTIBLE;
971 if (!dev->lock.hw_lock) {
972 /* Device has been unregistered */
973 ret = -EINTR;
974 break;
976 if (drm_lock_take(&dev->lock.hw_lock->lock,
977 lock.context)) {
978 dev->lock.pid = current->pid;
979 dev->lock.lock_time = jiffies;
980 atomic_inc(&dev->total_locks);
981 break; /* Got lock */
984 /* Contention */
985 atomic_inc(&dev->total_sleeps);
986 schedule();
987 if (signal_pending(current)) {
988 ret = -ERESTARTSYS;
989 break;
992 current->state = TASK_RUNNING;
993 remove_wait_queue(&dev->lock.lock_queue, &entry);
996 if (!ret) {
997 sigemptyset(&dev->sigmask);
998 sigaddset(&dev->sigmask, SIGSTOP);
999 sigaddset(&dev->sigmask, SIGTSTP);
1000 sigaddset(&dev->sigmask, SIGTTIN);
1001 sigaddset(&dev->sigmask, SIGTTOU);
1002 dev->sigdata.context = lock.context;
1003 dev->sigdata.lock = dev->lock.hw_lock;
1004 block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
1006 if (lock.flags & _DRM_LOCK_QUIESCENT) {
1007 DRM_DEBUG("_DRM_LOCK_QUIESCENT\n");
1008 mga_flush_queue(dev);
1009 mga_dma_quiescent(dev);
1013 if (ret) DRM_DEBUG("%d %s\n", lock.context,
1014 ret ? "interrupted" : "has lock");
1015 return ret;
1018 int mga_flush_ioctl(struct inode *inode, struct file *filp,
1019 unsigned int cmd, unsigned long arg)
1021 drm_file_t *priv = filp->private_data;
1022 drm_device_t *dev = priv->dev;
1023 drm_lock_t lock;
1024 drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
1026 if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
1027 return -EFAULT;
1029 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1030 DRM_ERROR("lock not held\n");
1031 return -EINVAL;
1034 if(lock.flags & _DRM_LOCK_FLUSH || lock.flags & _DRM_LOCK_FLUSH_ALL) {
1035 drm_mga_prim_buf_t *temp_buf;
1037 temp_buf = dev_priv->current_prim;
1039 if(temp_buf && temp_buf->num_dwords) {
1040 set_bit(MGA_BUF_FORCE_FIRE, &temp_buf->buffer_status);
1041 mga_advance_primary(dev);
1043 mga_dma_schedule(dev, 1);
1045 if(lock.flags & _DRM_LOCK_QUIESCENT) {
1046 mga_flush_queue(dev);
1047 mga_dma_quiescent(dev);
1050 return 0;