[SCSI] cciss 2.6 DMA mapping
[linux-2.6.22.y-op.git] / drivers / char / drm / r128_cce.c
blob08ed8d01d9d95f6d7c2491ddd932ff8d8afc4e85
1 /* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
2 * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
4 * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * Authors:
28 * Gareth Hughes <gareth@valinux.com>
31 #include "drmP.h"
32 #include "drm.h"
33 #include "r128_drm.h"
34 #include "r128_drv.h"
36 #define R128_FIFO_DEBUG 0
38 /* CCE microcode (from ATI) */
39 static u32 r128_cce_microcode[] = {
40 0, 276838400, 0, 268449792, 2, 142, 2, 145, 0, 1076765731, 0,
41 1617039951, 0, 774592877, 0, 1987540286, 0, 2307490946U, 0,
42 599558925, 0, 589505315, 0, 596487092, 0, 589505315, 1,
43 11544576, 1, 206848, 1, 311296, 1, 198656, 2, 912273422, 11,
44 262144, 0, 0, 1, 33559837, 1, 7438, 1, 14809, 1, 6615, 12, 28,
45 1, 6614, 12, 28, 2, 23, 11, 18874368, 0, 16790922, 1, 409600, 9,
46 30, 1, 147854772, 16, 420483072, 3, 8192, 0, 10240, 1, 198656,
47 1, 15630, 1, 51200, 10, 34858, 9, 42, 1, 33559823, 2, 10276, 1,
48 15717, 1, 15718, 2, 43, 1, 15936948, 1, 570480831, 1, 14715071,
49 12, 322123831, 1, 33953125, 12, 55, 1, 33559908, 1, 15718, 2,
50 46, 4, 2099258, 1, 526336, 1, 442623, 4, 4194365, 1, 509952, 1,
51 459007, 3, 0, 12, 92, 2, 46, 12, 176, 1, 15734, 1, 206848, 1,
52 18432, 1, 133120, 1, 100670734, 1, 149504, 1, 165888, 1,
53 15975928, 1, 1048576, 6, 3145806, 1, 15715, 16, 2150645232U, 2,
54 268449859, 2, 10307, 12, 176, 1, 15734, 1, 15735, 1, 15630, 1,
55 15631, 1, 5253120, 6, 3145810, 16, 2150645232U, 1, 15864, 2, 82,
56 1, 343310, 1, 1064207, 2, 3145813, 1, 15728, 1, 7817, 1, 15729,
57 3, 15730, 12, 92, 2, 98, 1, 16168, 1, 16167, 1, 16002, 1, 16008,
58 1, 15974, 1, 15975, 1, 15990, 1, 15976, 1, 15977, 1, 15980, 0,
59 15981, 1, 10240, 1, 5253120, 1, 15720, 1, 198656, 6, 110, 1,
60 180224, 1, 103824738, 2, 112, 2, 3145839, 0, 536885440, 1,
61 114880, 14, 125, 12, 206975, 1, 33559995, 12, 198784, 0,
62 33570236, 1, 15803, 0, 15804, 3, 294912, 1, 294912, 3, 442370,
63 1, 11544576, 0, 811612160, 1, 12593152, 1, 11536384, 1,
64 14024704, 7, 310382726, 0, 10240, 1, 14796, 1, 14797, 1, 14793,
65 1, 14794, 0, 14795, 1, 268679168, 1, 9437184, 1, 268449792, 1,
66 198656, 1, 9452827, 1, 1075854602, 1, 1075854603, 1, 557056, 1,
67 114880, 14, 159, 12, 198784, 1, 1109409213, 12, 198783, 1,
68 1107312059, 12, 198784, 1, 1109409212, 2, 162, 1, 1075854781, 1,
69 1073757627, 1, 1075854780, 1, 540672, 1, 10485760, 6, 3145894,
70 16, 274741248, 9, 168, 3, 4194304, 3, 4209949, 0, 0, 0, 256, 14,
71 174, 1, 114857, 1, 33560007, 12, 176, 0, 10240, 1, 114858, 1,
72 33560018, 1, 114857, 3, 33560007, 1, 16008, 1, 114874, 1,
73 33560360, 1, 114875, 1, 33560154, 0, 15963, 0, 256, 0, 4096, 1,
74 409611, 9, 188, 0, 10240, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
75 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
76 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
77 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
78 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
79 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
80 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
83 static int R128_READ_PLL(drm_device_t *dev, int addr)
85 drm_r128_private_t *dev_priv = dev->dev_private;
87 R128_WRITE8(R128_CLOCK_CNTL_INDEX, addr & 0x1f);
88 return R128_READ(R128_CLOCK_CNTL_DATA);
91 #if R128_FIFO_DEBUG
92 static void r128_status( drm_r128_private_t *dev_priv )
94 printk( "GUI_STAT = 0x%08x\n",
95 (unsigned int)R128_READ( R128_GUI_STAT ) );
96 printk( "PM4_STAT = 0x%08x\n",
97 (unsigned int)R128_READ( R128_PM4_STAT ) );
98 printk( "PM4_BUFFER_DL_WPTR = 0x%08x\n",
99 (unsigned int)R128_READ( R128_PM4_BUFFER_DL_WPTR ) );
100 printk( "PM4_BUFFER_DL_RPTR = 0x%08x\n",
101 (unsigned int)R128_READ( R128_PM4_BUFFER_DL_RPTR ) );
102 printk( "PM4_MICRO_CNTL = 0x%08x\n",
103 (unsigned int)R128_READ( R128_PM4_MICRO_CNTL ) );
104 printk( "PM4_BUFFER_CNTL = 0x%08x\n",
105 (unsigned int)R128_READ( R128_PM4_BUFFER_CNTL ) );
107 #endif
110 /* ================================================================
111 * Engine, FIFO control
114 static int r128_do_pixcache_flush( drm_r128_private_t *dev_priv )
116 u32 tmp;
117 int i;
119 tmp = R128_READ( R128_PC_NGUI_CTLSTAT ) | R128_PC_FLUSH_ALL;
120 R128_WRITE( R128_PC_NGUI_CTLSTAT, tmp );
122 for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
123 if ( !(R128_READ( R128_PC_NGUI_CTLSTAT ) & R128_PC_BUSY) ) {
124 return 0;
126 DRM_UDELAY( 1 );
129 #if R128_FIFO_DEBUG
130 DRM_ERROR( "failed!\n" );
131 #endif
132 return DRM_ERR(EBUSY);
135 static int r128_do_wait_for_fifo( drm_r128_private_t *dev_priv, int entries )
137 int i;
139 for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
140 int slots = R128_READ( R128_GUI_STAT ) & R128_GUI_FIFOCNT_MASK;
141 if ( slots >= entries ) return 0;
142 DRM_UDELAY( 1 );
145 #if R128_FIFO_DEBUG
146 DRM_ERROR( "failed!\n" );
147 #endif
148 return DRM_ERR(EBUSY);
151 static int r128_do_wait_for_idle( drm_r128_private_t *dev_priv )
153 int i, ret;
155 ret = r128_do_wait_for_fifo( dev_priv, 64 );
156 if ( ret ) return ret;
158 for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
159 if ( !(R128_READ( R128_GUI_STAT ) & R128_GUI_ACTIVE) ) {
160 r128_do_pixcache_flush( dev_priv );
161 return 0;
163 DRM_UDELAY( 1 );
166 #if R128_FIFO_DEBUG
167 DRM_ERROR( "failed!\n" );
168 #endif
169 return DRM_ERR(EBUSY);
173 /* ================================================================
174 * CCE control, initialization
177 /* Load the microcode for the CCE */
178 static void r128_cce_load_microcode( drm_r128_private_t *dev_priv )
180 int i;
182 DRM_DEBUG( "\n" );
184 r128_do_wait_for_idle( dev_priv );
186 R128_WRITE( R128_PM4_MICROCODE_ADDR, 0 );
187 for ( i = 0 ; i < 256 ; i++ ) {
188 R128_WRITE( R128_PM4_MICROCODE_DATAH,
189 r128_cce_microcode[i * 2] );
190 R128_WRITE( R128_PM4_MICROCODE_DATAL,
191 r128_cce_microcode[i * 2 + 1] );
195 /* Flush any pending commands to the CCE. This should only be used just
196 * prior to a wait for idle, as it informs the engine that the command
197 * stream is ending.
199 static void r128_do_cce_flush( drm_r128_private_t *dev_priv )
201 u32 tmp;
203 tmp = R128_READ( R128_PM4_BUFFER_DL_WPTR ) | R128_PM4_BUFFER_DL_DONE;
204 R128_WRITE( R128_PM4_BUFFER_DL_WPTR, tmp );
207 /* Wait for the CCE to go idle.
209 int r128_do_cce_idle( drm_r128_private_t *dev_priv )
211 int i;
213 for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
214 if ( GET_RING_HEAD( dev_priv ) == dev_priv->ring.tail ) {
215 int pm4stat = R128_READ( R128_PM4_STAT );
216 if ( ( (pm4stat & R128_PM4_FIFOCNT_MASK) >=
217 dev_priv->cce_fifo_size ) &&
218 !(pm4stat & (R128_PM4_BUSY |
219 R128_PM4_GUI_ACTIVE)) ) {
220 return r128_do_pixcache_flush( dev_priv );
223 DRM_UDELAY( 1 );
226 #if R128_FIFO_DEBUG
227 DRM_ERROR( "failed!\n" );
228 r128_status( dev_priv );
229 #endif
230 return DRM_ERR(EBUSY);
233 /* Start the Concurrent Command Engine.
235 static void r128_do_cce_start( drm_r128_private_t *dev_priv )
237 r128_do_wait_for_idle( dev_priv );
239 R128_WRITE( R128_PM4_BUFFER_CNTL,
240 dev_priv->cce_mode | dev_priv->ring.size_l2qw
241 | R128_PM4_BUFFER_CNTL_NOUPDATE );
242 R128_READ( R128_PM4_BUFFER_ADDR ); /* as per the sample code */
243 R128_WRITE( R128_PM4_MICRO_CNTL, R128_PM4_MICRO_FREERUN );
245 dev_priv->cce_running = 1;
248 /* Reset the Concurrent Command Engine. This will not flush any pending
249 * commands, so you must wait for the CCE command stream to complete
250 * before calling this routine.
252 static void r128_do_cce_reset( drm_r128_private_t *dev_priv )
254 R128_WRITE( R128_PM4_BUFFER_DL_WPTR, 0 );
255 R128_WRITE( R128_PM4_BUFFER_DL_RPTR, 0 );
256 dev_priv->ring.tail = 0;
259 /* Stop the Concurrent Command Engine. This will not flush any pending
260 * commands, so you must flush the command stream and wait for the CCE
261 * to go idle before calling this routine.
263 static void r128_do_cce_stop( drm_r128_private_t *dev_priv )
265 R128_WRITE( R128_PM4_MICRO_CNTL, 0 );
266 R128_WRITE( R128_PM4_BUFFER_CNTL,
267 R128_PM4_NONPM4 | R128_PM4_BUFFER_CNTL_NOUPDATE );
269 dev_priv->cce_running = 0;
272 /* Reset the engine. This will stop the CCE if it is running.
274 static int r128_do_engine_reset( drm_device_t *dev )
276 drm_r128_private_t *dev_priv = dev->dev_private;
277 u32 clock_cntl_index, mclk_cntl, gen_reset_cntl;
279 r128_do_pixcache_flush( dev_priv );
281 clock_cntl_index = R128_READ( R128_CLOCK_CNTL_INDEX );
282 mclk_cntl = R128_READ_PLL( dev, R128_MCLK_CNTL );
284 R128_WRITE_PLL( R128_MCLK_CNTL,
285 mclk_cntl | R128_FORCE_GCP | R128_FORCE_PIPE3D_CP );
287 gen_reset_cntl = R128_READ( R128_GEN_RESET_CNTL );
289 /* Taken from the sample code - do not change */
290 R128_WRITE( R128_GEN_RESET_CNTL,
291 gen_reset_cntl | R128_SOFT_RESET_GUI );
292 R128_READ( R128_GEN_RESET_CNTL );
293 R128_WRITE( R128_GEN_RESET_CNTL,
294 gen_reset_cntl & ~R128_SOFT_RESET_GUI );
295 R128_READ( R128_GEN_RESET_CNTL );
297 R128_WRITE_PLL( R128_MCLK_CNTL, mclk_cntl );
298 R128_WRITE( R128_CLOCK_CNTL_INDEX, clock_cntl_index );
299 R128_WRITE( R128_GEN_RESET_CNTL, gen_reset_cntl );
301 /* Reset the CCE ring */
302 r128_do_cce_reset( dev_priv );
304 /* The CCE is no longer running after an engine reset */
305 dev_priv->cce_running = 0;
307 /* Reset any pending vertex, indirect buffers */
308 r128_freelist_reset( dev );
310 return 0;
313 static void r128_cce_init_ring_buffer( drm_device_t *dev,
314 drm_r128_private_t *dev_priv )
316 u32 ring_start;
317 u32 tmp;
319 DRM_DEBUG( "\n" );
321 /* The manual (p. 2) says this address is in "VM space". This
322 * means it's an offset from the start of AGP space.
324 #if __OS_HAS_AGP
325 if ( !dev_priv->is_pci )
326 ring_start = dev_priv->cce_ring->offset - dev->agp->base;
327 else
328 #endif
329 ring_start = dev_priv->cce_ring->offset - dev->sg->handle;
331 R128_WRITE( R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET );
333 R128_WRITE( R128_PM4_BUFFER_DL_WPTR, 0 );
334 R128_WRITE( R128_PM4_BUFFER_DL_RPTR, 0 );
336 /* Set watermark control */
337 R128_WRITE( R128_PM4_BUFFER_WM_CNTL,
338 ((R128_WATERMARK_L/4) << R128_WMA_SHIFT)
339 | ((R128_WATERMARK_M/4) << R128_WMB_SHIFT)
340 | ((R128_WATERMARK_N/4) << R128_WMC_SHIFT)
341 | ((R128_WATERMARK_K/64) << R128_WB_WM_SHIFT) );
343 /* Force read. Why? Because it's in the examples... */
344 R128_READ( R128_PM4_BUFFER_ADDR );
346 /* Turn on bus mastering */
347 tmp = R128_READ( R128_BUS_CNTL ) & ~R128_BUS_MASTER_DIS;
348 R128_WRITE( R128_BUS_CNTL, tmp );
351 static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
353 drm_r128_private_t *dev_priv;
355 DRM_DEBUG( "\n" );
357 dev_priv = drm_alloc( sizeof(drm_r128_private_t), DRM_MEM_DRIVER );
358 if ( dev_priv == NULL )
359 return DRM_ERR(ENOMEM);
361 memset( dev_priv, 0, sizeof(drm_r128_private_t) );
363 dev_priv->is_pci = init->is_pci;
365 if ( dev_priv->is_pci && !dev->sg ) {
366 DRM_ERROR( "PCI GART memory not allocated!\n" );
367 dev->dev_private = (void *)dev_priv;
368 r128_do_cleanup_cce( dev );
369 return DRM_ERR(EINVAL);
372 dev_priv->usec_timeout = init->usec_timeout;
373 if ( dev_priv->usec_timeout < 1 ||
374 dev_priv->usec_timeout > R128_MAX_USEC_TIMEOUT ) {
375 DRM_DEBUG( "TIMEOUT problem!\n" );
376 dev->dev_private = (void *)dev_priv;
377 r128_do_cleanup_cce( dev );
378 return DRM_ERR(EINVAL);
381 dev_priv->cce_mode = init->cce_mode;
383 /* GH: Simple idle check.
385 atomic_set( &dev_priv->idle_count, 0 );
387 /* We don't support anything other than bus-mastering ring mode,
388 * but the ring can be in either AGP or PCI space for the ring
389 * read pointer.
391 if ( ( init->cce_mode != R128_PM4_192BM ) &&
392 ( init->cce_mode != R128_PM4_128BM_64INDBM ) &&
393 ( init->cce_mode != R128_PM4_64BM_128INDBM ) &&
394 ( init->cce_mode != R128_PM4_64BM_64VCBM_64INDBM ) ) {
395 DRM_DEBUG( "Bad cce_mode!\n" );
396 dev->dev_private = (void *)dev_priv;
397 r128_do_cleanup_cce( dev );
398 return DRM_ERR(EINVAL);
401 switch ( init->cce_mode ) {
402 case R128_PM4_NONPM4:
403 dev_priv->cce_fifo_size = 0;
404 break;
405 case R128_PM4_192PIO:
406 case R128_PM4_192BM:
407 dev_priv->cce_fifo_size = 192;
408 break;
409 case R128_PM4_128PIO_64INDBM:
410 case R128_PM4_128BM_64INDBM:
411 dev_priv->cce_fifo_size = 128;
412 break;
413 case R128_PM4_64PIO_128INDBM:
414 case R128_PM4_64BM_128INDBM:
415 case R128_PM4_64PIO_64VCBM_64INDBM:
416 case R128_PM4_64BM_64VCBM_64INDBM:
417 case R128_PM4_64PIO_64VCPIO_64INDPIO:
418 dev_priv->cce_fifo_size = 64;
419 break;
422 switch ( init->fb_bpp ) {
423 case 16:
424 dev_priv->color_fmt = R128_DATATYPE_RGB565;
425 break;
426 case 32:
427 default:
428 dev_priv->color_fmt = R128_DATATYPE_ARGB8888;
429 break;
431 dev_priv->front_offset = init->front_offset;
432 dev_priv->front_pitch = init->front_pitch;
433 dev_priv->back_offset = init->back_offset;
434 dev_priv->back_pitch = init->back_pitch;
436 switch ( init->depth_bpp ) {
437 case 16:
438 dev_priv->depth_fmt = R128_DATATYPE_RGB565;
439 break;
440 case 24:
441 case 32:
442 default:
443 dev_priv->depth_fmt = R128_DATATYPE_ARGB8888;
444 break;
446 dev_priv->depth_offset = init->depth_offset;
447 dev_priv->depth_pitch = init->depth_pitch;
448 dev_priv->span_offset = init->span_offset;
450 dev_priv->front_pitch_offset_c = (((dev_priv->front_pitch/8) << 21) |
451 (dev_priv->front_offset >> 5));
452 dev_priv->back_pitch_offset_c = (((dev_priv->back_pitch/8) << 21) |
453 (dev_priv->back_offset >> 5));
454 dev_priv->depth_pitch_offset_c = (((dev_priv->depth_pitch/8) << 21) |
455 (dev_priv->depth_offset >> 5) |
456 R128_DST_TILE);
457 dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch/8) << 21) |
458 (dev_priv->span_offset >> 5));
460 DRM_GETSAREA();
462 if(!dev_priv->sarea) {
463 DRM_ERROR("could not find sarea!\n");
464 dev->dev_private = (void *)dev_priv;
465 r128_do_cleanup_cce( dev );
466 return DRM_ERR(EINVAL);
469 dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
470 if(!dev_priv->mmio) {
471 DRM_ERROR("could not find mmio region!\n");
472 dev->dev_private = (void *)dev_priv;
473 r128_do_cleanup_cce( dev );
474 return DRM_ERR(EINVAL);
476 dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset);
477 if(!dev_priv->cce_ring) {
478 DRM_ERROR("could not find cce ring region!\n");
479 dev->dev_private = (void *)dev_priv;
480 r128_do_cleanup_cce( dev );
481 return DRM_ERR(EINVAL);
483 dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
484 if(!dev_priv->ring_rptr) {
485 DRM_ERROR("could not find ring read pointer!\n");
486 dev->dev_private = (void *)dev_priv;
487 r128_do_cleanup_cce( dev );
488 return DRM_ERR(EINVAL);
490 dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
491 if(!dev->agp_buffer_map) {
492 DRM_ERROR("could not find dma buffer region!\n");
493 dev->dev_private = (void *)dev_priv;
494 r128_do_cleanup_cce( dev );
495 return DRM_ERR(EINVAL);
498 if ( !dev_priv->is_pci ) {
499 dev_priv->agp_textures = drm_core_findmap(dev, init->agp_textures_offset);
500 if(!dev_priv->agp_textures) {
501 DRM_ERROR("could not find agp texture region!\n");
502 dev->dev_private = (void *)dev_priv;
503 r128_do_cleanup_cce( dev );
504 return DRM_ERR(EINVAL);
508 dev_priv->sarea_priv =
509 (drm_r128_sarea_t *)((u8 *)dev_priv->sarea->handle +
510 init->sarea_priv_offset);
512 #if __OS_HAS_AGP
513 if ( !dev_priv->is_pci ) {
514 drm_core_ioremap( dev_priv->cce_ring, dev );
515 drm_core_ioremap( dev_priv->ring_rptr, dev );
516 drm_core_ioremap( dev->agp_buffer_map, dev );
517 if(!dev_priv->cce_ring->handle ||
518 !dev_priv->ring_rptr->handle ||
519 !dev->agp_buffer_map->handle) {
520 DRM_ERROR("Could not ioremap agp regions!\n");
521 dev->dev_private = (void *)dev_priv;
522 r128_do_cleanup_cce( dev );
523 return DRM_ERR(ENOMEM);
525 } else
526 #endif
528 dev_priv->cce_ring->handle =
529 (void *)dev_priv->cce_ring->offset;
530 dev_priv->ring_rptr->handle =
531 (void *)dev_priv->ring_rptr->offset;
532 dev->agp_buffer_map->handle = (void *)dev->agp_buffer_map->offset;
535 #if __OS_HAS_AGP
536 if ( !dev_priv->is_pci )
537 dev_priv->cce_buffers_offset = dev->agp->base;
538 else
539 #endif
540 dev_priv->cce_buffers_offset = dev->sg->handle;
542 dev_priv->ring.start = (u32 *)dev_priv->cce_ring->handle;
543 dev_priv->ring.end = ((u32 *)dev_priv->cce_ring->handle
544 + init->ring_size / sizeof(u32));
545 dev_priv->ring.size = init->ring_size;
546 dev_priv->ring.size_l2qw = drm_order( init->ring_size / 8 );
548 dev_priv->ring.tail_mask =
549 (dev_priv->ring.size / sizeof(u32)) - 1;
551 dev_priv->ring.high_mark = 128;
553 dev_priv->sarea_priv->last_frame = 0;
554 R128_WRITE( R128_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame );
556 dev_priv->sarea_priv->last_dispatch = 0;
557 R128_WRITE( R128_LAST_DISPATCH_REG,
558 dev_priv->sarea_priv->last_dispatch );
560 #if __OS_HAS_AGP
561 if ( dev_priv->is_pci ) {
562 #endif
563 if (!drm_ati_pcigart_init( dev, &dev_priv->phys_pci_gart,
564 &dev_priv->bus_pci_gart) ) {
565 DRM_ERROR( "failed to init PCI GART!\n" );
566 dev->dev_private = (void *)dev_priv;
567 r128_do_cleanup_cce( dev );
568 return DRM_ERR(ENOMEM);
570 R128_WRITE( R128_PCI_GART_PAGE, dev_priv->bus_pci_gart );
571 #if __OS_HAS_AGP
573 #endif
575 r128_cce_init_ring_buffer( dev, dev_priv );
576 r128_cce_load_microcode( dev_priv );
578 dev->dev_private = (void *)dev_priv;
580 r128_do_engine_reset( dev );
582 return 0;
585 int r128_do_cleanup_cce( drm_device_t *dev )
588 /* Make sure interrupts are disabled here because the uninstall ioctl
589 * may not have been called from userspace and after dev_private
590 * is freed, it's too late.
592 if ( dev->irq_enabled ) drm_irq_uninstall(dev);
594 if ( dev->dev_private ) {
595 drm_r128_private_t *dev_priv = dev->dev_private;
597 #if __OS_HAS_AGP
598 if ( !dev_priv->is_pci ) {
599 if ( dev_priv->cce_ring != NULL )
600 drm_core_ioremapfree( dev_priv->cce_ring, dev );
601 if ( dev_priv->ring_rptr != NULL )
602 drm_core_ioremapfree( dev_priv->ring_rptr, dev );
603 if ( dev->agp_buffer_map != NULL )
604 drm_core_ioremapfree( dev->agp_buffer_map, dev );
605 } else
606 #endif
608 if (!drm_ati_pcigart_cleanup( dev,
609 dev_priv->phys_pci_gart,
610 dev_priv->bus_pci_gart ))
611 DRM_ERROR( "failed to cleanup PCI GART!\n" );
614 drm_free( dev->dev_private, sizeof(drm_r128_private_t),
615 DRM_MEM_DRIVER );
616 dev->dev_private = NULL;
619 return 0;
622 int r128_cce_init( DRM_IOCTL_ARGS )
624 DRM_DEVICE;
625 drm_r128_init_t init;
627 DRM_DEBUG( "\n" );
629 LOCK_TEST_WITH_RETURN( dev, filp );
631 DRM_COPY_FROM_USER_IOCTL( init, (drm_r128_init_t __user *)data, sizeof(init) );
633 switch ( init.func ) {
634 case R128_INIT_CCE:
635 return r128_do_init_cce( dev, &init );
636 case R128_CLEANUP_CCE:
637 return r128_do_cleanup_cce( dev );
640 return DRM_ERR(EINVAL);
643 int r128_cce_start( DRM_IOCTL_ARGS )
645 DRM_DEVICE;
646 drm_r128_private_t *dev_priv = dev->dev_private;
647 DRM_DEBUG( "\n" );
649 LOCK_TEST_WITH_RETURN( dev, filp );
651 if ( dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4 ) {
652 DRM_DEBUG( "%s while CCE running\n", __FUNCTION__ );
653 return 0;
656 r128_do_cce_start( dev_priv );
658 return 0;
661 /* Stop the CCE. The engine must have been idled before calling this
662 * routine.
664 int r128_cce_stop( DRM_IOCTL_ARGS )
666 DRM_DEVICE;
667 drm_r128_private_t *dev_priv = dev->dev_private;
668 drm_r128_cce_stop_t stop;
669 int ret;
670 DRM_DEBUG( "\n" );
672 LOCK_TEST_WITH_RETURN( dev, filp );
674 DRM_COPY_FROM_USER_IOCTL(stop, (drm_r128_cce_stop_t __user *)data, sizeof(stop) );
676 /* Flush any pending CCE commands. This ensures any outstanding
677 * commands are exectuted by the engine before we turn it off.
679 if ( stop.flush ) {
680 r128_do_cce_flush( dev_priv );
683 /* If we fail to make the engine go idle, we return an error
684 * code so that the DRM ioctl wrapper can try again.
686 if ( stop.idle ) {
687 ret = r128_do_cce_idle( dev_priv );
688 if ( ret ) return ret;
691 /* Finally, we can turn off the CCE. If the engine isn't idle,
692 * we will get some dropped triangles as they won't be fully
693 * rendered before the CCE is shut down.
695 r128_do_cce_stop( dev_priv );
697 /* Reset the engine */
698 r128_do_engine_reset( dev );
700 return 0;
703 /* Just reset the CCE ring. Called as part of an X Server engine reset.
705 int r128_cce_reset( DRM_IOCTL_ARGS )
707 DRM_DEVICE;
708 drm_r128_private_t *dev_priv = dev->dev_private;
709 DRM_DEBUG( "\n" );
711 LOCK_TEST_WITH_RETURN( dev, filp );
713 if ( !dev_priv ) {
714 DRM_DEBUG( "%s called before init done\n", __FUNCTION__ );
715 return DRM_ERR(EINVAL);
718 r128_do_cce_reset( dev_priv );
720 /* The CCE is no longer running after an engine reset */
721 dev_priv->cce_running = 0;
723 return 0;
726 int r128_cce_idle( DRM_IOCTL_ARGS )
728 DRM_DEVICE;
729 drm_r128_private_t *dev_priv = dev->dev_private;
730 DRM_DEBUG( "\n" );
732 LOCK_TEST_WITH_RETURN( dev, filp );
734 if ( dev_priv->cce_running ) {
735 r128_do_cce_flush( dev_priv );
738 return r128_do_cce_idle( dev_priv );
741 int r128_engine_reset( DRM_IOCTL_ARGS )
743 DRM_DEVICE;
744 DRM_DEBUG( "\n" );
746 LOCK_TEST_WITH_RETURN( dev, filp );
748 return r128_do_engine_reset( dev );
751 int r128_fullscreen( DRM_IOCTL_ARGS )
753 return DRM_ERR(EINVAL);
757 /* ================================================================
758 * Freelist management
760 #define R128_BUFFER_USED 0xffffffff
761 #define R128_BUFFER_FREE 0
763 #if 0
764 static int r128_freelist_init( drm_device_t *dev )
766 drm_device_dma_t *dma = dev->dma;
767 drm_r128_private_t *dev_priv = dev->dev_private;
768 drm_buf_t *buf;
769 drm_r128_buf_priv_t *buf_priv;
770 drm_r128_freelist_t *entry;
771 int i;
773 dev_priv->head = drm_alloc( sizeof(drm_r128_freelist_t),
774 DRM_MEM_DRIVER );
775 if ( dev_priv->head == NULL )
776 return DRM_ERR(ENOMEM);
778 memset( dev_priv->head, 0, sizeof(drm_r128_freelist_t) );
779 dev_priv->head->age = R128_BUFFER_USED;
781 for ( i = 0 ; i < dma->buf_count ; i++ ) {
782 buf = dma->buflist[i];
783 buf_priv = buf->dev_private;
785 entry = drm_alloc( sizeof(drm_r128_freelist_t),
786 DRM_MEM_DRIVER );
787 if ( !entry ) return DRM_ERR(ENOMEM);
789 entry->age = R128_BUFFER_FREE;
790 entry->buf = buf;
791 entry->prev = dev_priv->head;
792 entry->next = dev_priv->head->next;
793 if ( !entry->next )
794 dev_priv->tail = entry;
796 buf_priv->discard = 0;
797 buf_priv->dispatched = 0;
798 buf_priv->list_entry = entry;
800 dev_priv->head->next = entry;
802 if ( dev_priv->head->next )
803 dev_priv->head->next->prev = entry;
806 return 0;
809 #endif
811 static drm_buf_t *r128_freelist_get( drm_device_t *dev )
813 drm_device_dma_t *dma = dev->dma;
814 drm_r128_private_t *dev_priv = dev->dev_private;
815 drm_r128_buf_priv_t *buf_priv;
816 drm_buf_t *buf;
817 int i, t;
819 /* FIXME: Optimize -- use freelist code */
821 for ( i = 0 ; i < dma->buf_count ; i++ ) {
822 buf = dma->buflist[i];
823 buf_priv = buf->dev_private;
824 if ( buf->filp == 0 )
825 return buf;
828 for ( t = 0 ; t < dev_priv->usec_timeout ; t++ ) {
829 u32 done_age = R128_READ( R128_LAST_DISPATCH_REG );
831 for ( i = 0 ; i < dma->buf_count ; i++ ) {
832 buf = dma->buflist[i];
833 buf_priv = buf->dev_private;
834 if ( buf->pending && buf_priv->age <= done_age ) {
835 /* The buffer has been processed, so it
836 * can now be used.
838 buf->pending = 0;
839 return buf;
842 DRM_UDELAY( 1 );
845 DRM_DEBUG( "returning NULL!\n" );
846 return NULL;
849 void r128_freelist_reset( drm_device_t *dev )
851 drm_device_dma_t *dma = dev->dma;
852 int i;
854 for ( i = 0 ; i < dma->buf_count ; i++ ) {
855 drm_buf_t *buf = dma->buflist[i];
856 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
857 buf_priv->age = 0;
862 /* ================================================================
863 * CCE command submission
866 int r128_wait_ring( drm_r128_private_t *dev_priv, int n )
868 drm_r128_ring_buffer_t *ring = &dev_priv->ring;
869 int i;
871 for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {
872 r128_update_ring_snapshot( dev_priv );
873 if ( ring->space >= n )
874 return 0;
875 DRM_UDELAY( 1 );
878 /* FIXME: This is being ignored... */
879 DRM_ERROR( "failed!\n" );
880 return DRM_ERR(EBUSY);
883 static int r128_cce_get_buffers( DRMFILE filp, drm_device_t *dev, drm_dma_t *d )
885 int i;
886 drm_buf_t *buf;
888 for ( i = d->granted_count ; i < d->request_count ; i++ ) {
889 buf = r128_freelist_get( dev );
890 if ( !buf ) return DRM_ERR(EAGAIN);
892 buf->filp = filp;
894 if ( DRM_COPY_TO_USER( &d->request_indices[i], &buf->idx,
895 sizeof(buf->idx) ) )
896 return DRM_ERR(EFAULT);
897 if ( DRM_COPY_TO_USER( &d->request_sizes[i], &buf->total,
898 sizeof(buf->total) ) )
899 return DRM_ERR(EFAULT);
901 d->granted_count++;
903 return 0;
906 int r128_cce_buffers( DRM_IOCTL_ARGS )
908 DRM_DEVICE;
909 drm_device_dma_t *dma = dev->dma;
910 int ret = 0;
911 drm_dma_t __user *argp = (void __user *)data;
912 drm_dma_t d;
914 LOCK_TEST_WITH_RETURN( dev, filp );
916 DRM_COPY_FROM_USER_IOCTL( d, argp, sizeof(d) );
918 /* Please don't send us buffers.
920 if ( d.send_count != 0 ) {
921 DRM_ERROR( "Process %d trying to send %d buffers via drmDMA\n",
922 DRM_CURRENTPID, d.send_count );
923 return DRM_ERR(EINVAL);
926 /* We'll send you buffers.
928 if ( d.request_count < 0 || d.request_count > dma->buf_count ) {
929 DRM_ERROR( "Process %d trying to get %d buffers (of %d max)\n",
930 DRM_CURRENTPID, d.request_count, dma->buf_count );
931 return DRM_ERR(EINVAL);
934 d.granted_count = 0;
936 if ( d.request_count ) {
937 ret = r128_cce_get_buffers( filp, dev, &d );
940 DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d) );
942 return ret;