initial commit with v2.6.9
[linux-2.6.9-moxart.git] / drivers / char / drm / r128_state.c
blobadc326698d1f7e8b67319cd26ea2f61c0502c4e5
1 /* r128_state.c -- State support for r128 -*- linux-c -*-
2 * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com
4 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
5 * All Rights Reserved.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
26 * Authors:
27 * Gareth Hughes <gareth@valinux.com>
30 #include "r128.h"
31 #include "drmP.h"
32 #include "drm.h"
33 #include "r128_drm.h"
34 #include "r128_drv.h"
37 /* ================================================================
38 * CCE hardware state programming functions
41 static void r128_emit_clip_rects( drm_r128_private_t *dev_priv,
42 drm_clip_rect_t *boxes, int count )
44 u32 aux_sc_cntl = 0x00000000;
45 RING_LOCALS;
46 DRM_DEBUG( " %s\n", __FUNCTION__ );
48 BEGIN_RING( (count < 3? count: 3) * 5 + 2 );
50 if ( count >= 1 ) {
51 OUT_RING( CCE_PACKET0( R128_AUX1_SC_LEFT, 3 ) );
52 OUT_RING( boxes[0].x1 );
53 OUT_RING( boxes[0].x2 - 1 );
54 OUT_RING( boxes[0].y1 );
55 OUT_RING( boxes[0].y2 - 1 );
57 aux_sc_cntl |= (R128_AUX1_SC_EN | R128_AUX1_SC_MODE_OR);
59 if ( count >= 2 ) {
60 OUT_RING( CCE_PACKET0( R128_AUX2_SC_LEFT, 3 ) );
61 OUT_RING( boxes[1].x1 );
62 OUT_RING( boxes[1].x2 - 1 );
63 OUT_RING( boxes[1].y1 );
64 OUT_RING( boxes[1].y2 - 1 );
66 aux_sc_cntl |= (R128_AUX2_SC_EN | R128_AUX2_SC_MODE_OR);
68 if ( count >= 3 ) {
69 OUT_RING( CCE_PACKET0( R128_AUX3_SC_LEFT, 3 ) );
70 OUT_RING( boxes[2].x1 );
71 OUT_RING( boxes[2].x2 - 1 );
72 OUT_RING( boxes[2].y1 );
73 OUT_RING( boxes[2].y2 - 1 );
75 aux_sc_cntl |= (R128_AUX3_SC_EN | R128_AUX3_SC_MODE_OR);
78 OUT_RING( CCE_PACKET0( R128_AUX_SC_CNTL, 0 ) );
79 OUT_RING( aux_sc_cntl );
81 ADVANCE_RING();
84 static __inline__ void r128_emit_core( drm_r128_private_t *dev_priv )
86 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
87 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
88 RING_LOCALS;
89 DRM_DEBUG( " %s\n", __FUNCTION__ );
91 BEGIN_RING( 2 );
93 OUT_RING( CCE_PACKET0( R128_SCALE_3D_CNTL, 0 ) );
94 OUT_RING( ctx->scale_3d_cntl );
96 ADVANCE_RING();
99 static __inline__ void r128_emit_context( drm_r128_private_t *dev_priv )
101 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
102 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
103 RING_LOCALS;
104 DRM_DEBUG( " %s\n", __FUNCTION__ );
106 BEGIN_RING( 13 );
108 OUT_RING( CCE_PACKET0( R128_DST_PITCH_OFFSET_C, 11 ) );
109 OUT_RING( ctx->dst_pitch_offset_c );
110 OUT_RING( ctx->dp_gui_master_cntl_c );
111 OUT_RING( ctx->sc_top_left_c );
112 OUT_RING( ctx->sc_bottom_right_c );
113 OUT_RING( ctx->z_offset_c );
114 OUT_RING( ctx->z_pitch_c );
115 OUT_RING( ctx->z_sten_cntl_c );
116 OUT_RING( ctx->tex_cntl_c );
117 OUT_RING( ctx->misc_3d_state_cntl_reg );
118 OUT_RING( ctx->texture_clr_cmp_clr_c );
119 OUT_RING( ctx->texture_clr_cmp_msk_c );
120 OUT_RING( ctx->fog_color_c );
122 ADVANCE_RING();
125 static __inline__ void r128_emit_setup( drm_r128_private_t *dev_priv )
127 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
128 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
129 RING_LOCALS;
130 DRM_DEBUG( " %s\n", __FUNCTION__ );
132 BEGIN_RING( 3 );
134 OUT_RING( CCE_PACKET1( R128_SETUP_CNTL, R128_PM4_VC_FPU_SETUP ) );
135 OUT_RING( ctx->setup_cntl );
136 OUT_RING( ctx->pm4_vc_fpu_setup );
138 ADVANCE_RING();
141 static __inline__ void r128_emit_masks( drm_r128_private_t *dev_priv )
143 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
144 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
145 RING_LOCALS;
146 DRM_DEBUG( " %s\n", __FUNCTION__ );
148 BEGIN_RING( 5 );
150 OUT_RING( CCE_PACKET0( R128_DP_WRITE_MASK, 0 ) );
151 OUT_RING( ctx->dp_write_mask );
153 OUT_RING( CCE_PACKET0( R128_STEN_REF_MASK_C, 1 ) );
154 OUT_RING( ctx->sten_ref_mask_c );
155 OUT_RING( ctx->plane_3d_mask_c );
157 ADVANCE_RING();
160 static __inline__ void r128_emit_window( drm_r128_private_t *dev_priv )
162 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
163 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
164 RING_LOCALS;
165 DRM_DEBUG( " %s\n", __FUNCTION__ );
167 BEGIN_RING( 2 );
169 OUT_RING( CCE_PACKET0( R128_WINDOW_XY_OFFSET, 0 ) );
170 OUT_RING( ctx->window_xy_offset );
172 ADVANCE_RING();
175 static __inline__ void r128_emit_tex0( drm_r128_private_t *dev_priv )
177 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
178 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
179 drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0];
180 int i;
181 RING_LOCALS;
182 DRM_DEBUG( " %s\n", __FUNCTION__ );
184 BEGIN_RING( 7 + R128_MAX_TEXTURE_LEVELS );
186 OUT_RING( CCE_PACKET0( R128_PRIM_TEX_CNTL_C,
187 2 + R128_MAX_TEXTURE_LEVELS ) );
188 OUT_RING( tex->tex_cntl );
189 OUT_RING( tex->tex_combine_cntl );
190 OUT_RING( ctx->tex_size_pitch_c );
191 for ( i = 0 ; i < R128_MAX_TEXTURE_LEVELS ; i++ ) {
192 OUT_RING( tex->tex_offset[i] );
195 OUT_RING( CCE_PACKET0( R128_CONSTANT_COLOR_C, 1 ) );
196 OUT_RING( ctx->constant_color_c );
197 OUT_RING( tex->tex_border_color );
199 ADVANCE_RING();
202 static __inline__ void r128_emit_tex1( drm_r128_private_t *dev_priv )
204 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
205 drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
206 int i;
207 RING_LOCALS;
208 DRM_DEBUG( " %s\n", __FUNCTION__ );
210 BEGIN_RING( 5 + R128_MAX_TEXTURE_LEVELS );
212 OUT_RING( CCE_PACKET0( R128_SEC_TEX_CNTL_C,
213 1 + R128_MAX_TEXTURE_LEVELS ) );
214 OUT_RING( tex->tex_cntl );
215 OUT_RING( tex->tex_combine_cntl );
216 for ( i = 0 ; i < R128_MAX_TEXTURE_LEVELS ; i++ ) {
217 OUT_RING( tex->tex_offset[i] );
220 OUT_RING( CCE_PACKET0( R128_SEC_TEXTURE_BORDER_COLOR_C, 0 ) );
221 OUT_RING( tex->tex_border_color );
223 ADVANCE_RING();
226 static __inline__ void r128_emit_state( drm_r128_private_t *dev_priv )
228 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
229 unsigned int dirty = sarea_priv->dirty;
231 DRM_DEBUG( "%s: dirty=0x%08x\n", __FUNCTION__, dirty );
233 if ( dirty & R128_UPLOAD_CORE ) {
234 r128_emit_core( dev_priv );
235 sarea_priv->dirty &= ~R128_UPLOAD_CORE;
238 if ( dirty & R128_UPLOAD_CONTEXT ) {
239 r128_emit_context( dev_priv );
240 sarea_priv->dirty &= ~R128_UPLOAD_CONTEXT;
243 if ( dirty & R128_UPLOAD_SETUP ) {
244 r128_emit_setup( dev_priv );
245 sarea_priv->dirty &= ~R128_UPLOAD_SETUP;
248 if ( dirty & R128_UPLOAD_MASKS ) {
249 r128_emit_masks( dev_priv );
250 sarea_priv->dirty &= ~R128_UPLOAD_MASKS;
253 if ( dirty & R128_UPLOAD_WINDOW ) {
254 r128_emit_window( dev_priv );
255 sarea_priv->dirty &= ~R128_UPLOAD_WINDOW;
258 if ( dirty & R128_UPLOAD_TEX0 ) {
259 r128_emit_tex0( dev_priv );
260 sarea_priv->dirty &= ~R128_UPLOAD_TEX0;
263 if ( dirty & R128_UPLOAD_TEX1 ) {
264 r128_emit_tex1( dev_priv );
265 sarea_priv->dirty &= ~R128_UPLOAD_TEX1;
268 /* Turn off the texture cache flushing */
269 sarea_priv->context_state.tex_cntl_c &= ~R128_TEX_CACHE_FLUSH;
271 sarea_priv->dirty &= ~R128_REQUIRE_QUIESCENCE;
275 #if R128_PERFORMANCE_BOXES
276 /* ================================================================
277 * Performance monitoring functions
280 static void r128_clear_box( drm_r128_private_t *dev_priv,
281 int x, int y, int w, int h,
282 int r, int g, int b )
284 u32 pitch, offset;
285 u32 fb_bpp, color;
286 RING_LOCALS;
288 switch ( dev_priv->fb_bpp ) {
289 case 16:
290 fb_bpp = R128_GMC_DST_16BPP;
291 color = (((r & 0xf8) << 8) |
292 ((g & 0xfc) << 3) |
293 ((b & 0xf8) >> 3));
294 break;
295 case 24:
296 fb_bpp = R128_GMC_DST_24BPP;
297 color = ((r << 16) | (g << 8) | b);
298 break;
299 case 32:
300 fb_bpp = R128_GMC_DST_32BPP;
301 color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
302 break;
303 default:
304 return;
307 offset = dev_priv->back_offset;
308 pitch = dev_priv->back_pitch >> 3;
310 BEGIN_RING( 6 );
312 OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) );
313 OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL |
314 R128_GMC_BRUSH_SOLID_COLOR |
315 fb_bpp |
316 R128_GMC_SRC_DATATYPE_COLOR |
317 R128_ROP3_P |
318 R128_GMC_CLR_CMP_CNTL_DIS |
319 R128_GMC_AUX_CLIP_DIS );
321 OUT_RING( (pitch << 21) | (offset >> 5) );
322 OUT_RING( color );
324 OUT_RING( (x << 16) | y );
325 OUT_RING( (w << 16) | h );
327 ADVANCE_RING();
330 static void r128_cce_performance_boxes( drm_r128_private_t *dev_priv )
332 if ( atomic_read( &dev_priv->idle_count ) == 0 ) {
333 r128_clear_box( dev_priv, 64, 4, 8, 8, 0, 255, 0 );
334 } else {
335 atomic_set( &dev_priv->idle_count, 0 );
339 #endif
342 /* ================================================================
343 * CCE command dispatch functions
346 static void r128_print_dirty( const char *msg, unsigned int flags )
348 DRM_INFO( "%s: (0x%x) %s%s%s%s%s%s%s%s%s\n",
349 msg,
350 flags,
351 (flags & R128_UPLOAD_CORE) ? "core, " : "",
352 (flags & R128_UPLOAD_CONTEXT) ? "context, " : "",
353 (flags & R128_UPLOAD_SETUP) ? "setup, " : "",
354 (flags & R128_UPLOAD_TEX0) ? "tex0, " : "",
355 (flags & R128_UPLOAD_TEX1) ? "tex1, " : "",
356 (flags & R128_UPLOAD_MASKS) ? "masks, " : "",
357 (flags & R128_UPLOAD_WINDOW) ? "window, " : "",
358 (flags & R128_UPLOAD_CLIPRECTS) ? "cliprects, " : "",
359 (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "" );
362 static void r128_cce_dispatch_clear( drm_device_t *dev,
363 drm_r128_clear_t *clear )
365 drm_r128_private_t *dev_priv = dev->dev_private;
366 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
367 int nbox = sarea_priv->nbox;
368 drm_clip_rect_t *pbox = sarea_priv->boxes;
369 unsigned int flags = clear->flags;
370 int i;
371 RING_LOCALS;
372 DRM_DEBUG( "%s\n", __FUNCTION__ );
374 if ( dev_priv->page_flipping && dev_priv->current_page == 1 ) {
375 unsigned int tmp = flags;
377 flags &= ~(R128_FRONT | R128_BACK);
378 if ( tmp & R128_FRONT ) flags |= R128_BACK;
379 if ( tmp & R128_BACK ) flags |= R128_FRONT;
382 for ( i = 0 ; i < nbox ; i++ ) {
383 int x = pbox[i].x1;
384 int y = pbox[i].y1;
385 int w = pbox[i].x2 - x;
386 int h = pbox[i].y2 - y;
388 DRM_DEBUG( "dispatch clear %d,%d-%d,%d flags 0x%x\n",
389 pbox[i].x1, pbox[i].y1, pbox[i].x2,
390 pbox[i].y2, flags );
392 if ( flags & (R128_FRONT | R128_BACK) ) {
393 BEGIN_RING( 2 );
395 OUT_RING( CCE_PACKET0( R128_DP_WRITE_MASK, 0 ) );
396 OUT_RING( clear->color_mask );
398 ADVANCE_RING();
401 if ( flags & R128_FRONT ) {
402 BEGIN_RING( 6 );
404 OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) );
405 OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL |
406 R128_GMC_BRUSH_SOLID_COLOR |
407 (dev_priv->color_fmt << 8) |
408 R128_GMC_SRC_DATATYPE_COLOR |
409 R128_ROP3_P |
410 R128_GMC_CLR_CMP_CNTL_DIS |
411 R128_GMC_AUX_CLIP_DIS );
413 OUT_RING( dev_priv->front_pitch_offset_c );
414 OUT_RING( clear->clear_color );
416 OUT_RING( (x << 16) | y );
417 OUT_RING( (w << 16) | h );
419 ADVANCE_RING();
422 if ( flags & R128_BACK ) {
423 BEGIN_RING( 6 );
425 OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) );
426 OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL |
427 R128_GMC_BRUSH_SOLID_COLOR |
428 (dev_priv->color_fmt << 8) |
429 R128_GMC_SRC_DATATYPE_COLOR |
430 R128_ROP3_P |
431 R128_GMC_CLR_CMP_CNTL_DIS |
432 R128_GMC_AUX_CLIP_DIS );
434 OUT_RING( dev_priv->back_pitch_offset_c );
435 OUT_RING( clear->clear_color );
437 OUT_RING( (x << 16) | y );
438 OUT_RING( (w << 16) | h );
440 ADVANCE_RING();
443 if ( flags & R128_DEPTH ) {
444 BEGIN_RING( 6 );
446 OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) );
447 OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL |
448 R128_GMC_BRUSH_SOLID_COLOR |
449 (dev_priv->depth_fmt << 8) |
450 R128_GMC_SRC_DATATYPE_COLOR |
451 R128_ROP3_P |
452 R128_GMC_CLR_CMP_CNTL_DIS |
453 R128_GMC_AUX_CLIP_DIS |
454 R128_GMC_WR_MSK_DIS );
456 OUT_RING( dev_priv->depth_pitch_offset_c );
457 OUT_RING( clear->clear_depth );
459 OUT_RING( (x << 16) | y );
460 OUT_RING( (w << 16) | h );
462 ADVANCE_RING();
467 static void r128_cce_dispatch_swap( drm_device_t *dev )
469 drm_r128_private_t *dev_priv = dev->dev_private;
470 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
471 int nbox = sarea_priv->nbox;
472 drm_clip_rect_t *pbox = sarea_priv->boxes;
473 int i;
474 RING_LOCALS;
475 DRM_DEBUG( "%s\n", __FUNCTION__ );
477 #if R128_PERFORMANCE_BOXES
478 /* Do some trivial performance monitoring...
480 r128_cce_performance_boxes( dev_priv );
481 #endif
483 for ( i = 0 ; i < nbox ; i++ ) {
484 int x = pbox[i].x1;
485 int y = pbox[i].y1;
486 int w = pbox[i].x2 - x;
487 int h = pbox[i].y2 - y;
489 BEGIN_RING( 7 );
491 OUT_RING( CCE_PACKET3( R128_CNTL_BITBLT_MULTI, 5 ) );
492 OUT_RING( R128_GMC_SRC_PITCH_OFFSET_CNTL |
493 R128_GMC_DST_PITCH_OFFSET_CNTL |
494 R128_GMC_BRUSH_NONE |
495 (dev_priv->color_fmt << 8) |
496 R128_GMC_SRC_DATATYPE_COLOR |
497 R128_ROP3_S |
498 R128_DP_SRC_SOURCE_MEMORY |
499 R128_GMC_CLR_CMP_CNTL_DIS |
500 R128_GMC_AUX_CLIP_DIS |
501 R128_GMC_WR_MSK_DIS );
503 /* Make this work even if front & back are flipped:
505 if (dev_priv->current_page == 0) {
506 OUT_RING( dev_priv->back_pitch_offset_c );
507 OUT_RING( dev_priv->front_pitch_offset_c );
509 else {
510 OUT_RING( dev_priv->front_pitch_offset_c );
511 OUT_RING( dev_priv->back_pitch_offset_c );
514 OUT_RING( (x << 16) | y );
515 OUT_RING( (x << 16) | y );
516 OUT_RING( (w << 16) | h );
518 ADVANCE_RING();
521 /* Increment the frame counter. The client-side 3D driver must
522 * throttle the framerate by waiting for this value before
523 * performing the swapbuffer ioctl.
525 dev_priv->sarea_priv->last_frame++;
527 BEGIN_RING( 2 );
529 OUT_RING( CCE_PACKET0( R128_LAST_FRAME_REG, 0 ) );
530 OUT_RING( dev_priv->sarea_priv->last_frame );
532 ADVANCE_RING();
535 static void r128_cce_dispatch_flip( drm_device_t *dev )
537 drm_r128_private_t *dev_priv = dev->dev_private;
538 RING_LOCALS;
539 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
540 __FUNCTION__,
541 dev_priv->current_page,
542 dev_priv->sarea_priv->pfCurrentPage);
544 #if R128_PERFORMANCE_BOXES
545 /* Do some trivial performance monitoring...
547 r128_cce_performance_boxes( dev_priv );
548 #endif
550 BEGIN_RING( 4 );
552 R128_WAIT_UNTIL_PAGE_FLIPPED();
553 OUT_RING( CCE_PACKET0( R128_CRTC_OFFSET, 0 ) );
555 if ( dev_priv->current_page == 0 ) {
556 OUT_RING( dev_priv->back_offset );
557 } else {
558 OUT_RING( dev_priv->front_offset );
561 ADVANCE_RING();
563 /* Increment the frame counter. The client-side 3D driver must
564 * throttle the framerate by waiting for this value before
565 * performing the swapbuffer ioctl.
567 dev_priv->sarea_priv->last_frame++;
568 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
569 1 - dev_priv->current_page;
571 BEGIN_RING( 2 );
573 OUT_RING( CCE_PACKET0( R128_LAST_FRAME_REG, 0 ) );
574 OUT_RING( dev_priv->sarea_priv->last_frame );
576 ADVANCE_RING();
579 static void r128_cce_dispatch_vertex( drm_device_t *dev,
580 drm_buf_t *buf )
582 drm_r128_private_t *dev_priv = dev->dev_private;
583 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
584 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
585 int format = sarea_priv->vc_format;
586 int offset = buf->bus_address;
587 int size = buf->used;
588 int prim = buf_priv->prim;
589 int i = 0;
590 RING_LOCALS;
591 DRM_DEBUG( "buf=%d nbox=%d\n", buf->idx, sarea_priv->nbox );
593 if ( 0 )
594 r128_print_dirty( "dispatch_vertex", sarea_priv->dirty );
596 if ( buf->used ) {
597 buf_priv->dispatched = 1;
599 if ( sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS ) {
600 r128_emit_state( dev_priv );
603 do {
604 /* Emit the next set of up to three cliprects */
605 if ( i < sarea_priv->nbox ) {
606 r128_emit_clip_rects( dev_priv,
607 &sarea_priv->boxes[i],
608 sarea_priv->nbox - i );
611 /* Emit the vertex buffer rendering commands */
612 BEGIN_RING( 5 );
614 OUT_RING( CCE_PACKET3( R128_3D_RNDR_GEN_INDX_PRIM, 3 ) );
615 OUT_RING( offset );
616 OUT_RING( size );
617 OUT_RING( format );
618 OUT_RING( prim | R128_CCE_VC_CNTL_PRIM_WALK_LIST |
619 (size << R128_CCE_VC_CNTL_NUM_SHIFT) );
621 ADVANCE_RING();
623 i += 3;
624 } while ( i < sarea_priv->nbox );
627 if ( buf_priv->discard ) {
628 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
630 /* Emit the vertex buffer age */
631 BEGIN_RING( 2 );
633 OUT_RING( CCE_PACKET0( R128_LAST_DISPATCH_REG, 0 ) );
634 OUT_RING( buf_priv->age );
636 ADVANCE_RING();
638 buf->pending = 1;
639 buf->used = 0;
640 /* FIXME: Check dispatched field */
641 buf_priv->dispatched = 0;
644 dev_priv->sarea_priv->last_dispatch++;
646 sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
647 sarea_priv->nbox = 0;
650 static void r128_cce_dispatch_indirect( drm_device_t *dev,
651 drm_buf_t *buf,
652 int start, int end )
654 drm_r128_private_t *dev_priv = dev->dev_private;
655 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
656 RING_LOCALS;
657 DRM_DEBUG( "indirect: buf=%d s=0x%x e=0x%x\n",
658 buf->idx, start, end );
660 if ( start != end ) {
661 int offset = buf->bus_address + start;
662 int dwords = (end - start + 3) / sizeof(u32);
664 /* Indirect buffer data must be an even number of
665 * dwords, so if we've been given an odd number we must
666 * pad the data with a Type-2 CCE packet.
668 if ( dwords & 1 ) {
669 u32 *data = (u32 *)
670 ((char *)dev->agp_buffer_map->handle
671 + buf->offset + start);
672 data[dwords++] = cpu_to_le32( R128_CCE_PACKET2 );
675 buf_priv->dispatched = 1;
677 /* Fire off the indirect buffer */
678 BEGIN_RING( 3 );
680 OUT_RING( CCE_PACKET0( R128_PM4_IW_INDOFF, 1 ) );
681 OUT_RING( offset );
682 OUT_RING( dwords );
684 ADVANCE_RING();
687 if ( buf_priv->discard ) {
688 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
690 /* Emit the indirect buffer age */
691 BEGIN_RING( 2 );
693 OUT_RING( CCE_PACKET0( R128_LAST_DISPATCH_REG, 0 ) );
694 OUT_RING( buf_priv->age );
696 ADVANCE_RING();
698 buf->pending = 1;
699 buf->used = 0;
700 /* FIXME: Check dispatched field */
701 buf_priv->dispatched = 0;
704 dev_priv->sarea_priv->last_dispatch++;
707 static void r128_cce_dispatch_indices( drm_device_t *dev,
708 drm_buf_t *buf,
709 int start, int end,
710 int count )
712 drm_r128_private_t *dev_priv = dev->dev_private;
713 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
714 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
715 int format = sarea_priv->vc_format;
716 int offset = dev->agp_buffer_map->offset - dev_priv->cce_buffers_offset;
717 int prim = buf_priv->prim;
718 u32 *data;
719 int dwords;
720 int i = 0;
721 RING_LOCALS;
722 DRM_DEBUG( "indices: s=%d e=%d c=%d\n", start, end, count );
724 if ( 0 )
725 r128_print_dirty( "dispatch_indices", sarea_priv->dirty );
727 if ( start != end ) {
728 buf_priv->dispatched = 1;
730 if ( sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS ) {
731 r128_emit_state( dev_priv );
734 dwords = (end - start + 3) / sizeof(u32);
736 data = (u32 *)((char *)dev->agp_buffer_map->handle
737 + buf->offset + start);
739 data[0] = cpu_to_le32( CCE_PACKET3( R128_3D_RNDR_GEN_INDX_PRIM,
740 dwords-2 ) );
742 data[1] = cpu_to_le32( offset );
743 data[2] = cpu_to_le32( R128_MAX_VB_VERTS );
744 data[3] = cpu_to_le32( format );
745 data[4] = cpu_to_le32( (prim | R128_CCE_VC_CNTL_PRIM_WALK_IND |
746 (count << 16)) );
748 if ( count & 0x1 ) {
749 #ifdef __LITTLE_ENDIAN
750 data[dwords-1] &= 0x0000ffff;
751 #else
752 data[dwords-1] &= 0xffff0000;
753 #endif
756 do {
757 /* Emit the next set of up to three cliprects */
758 if ( i < sarea_priv->nbox ) {
759 r128_emit_clip_rects( dev_priv,
760 &sarea_priv->boxes[i],
761 sarea_priv->nbox - i );
764 r128_cce_dispatch_indirect( dev, buf, start, end );
766 i += 3;
767 } while ( i < sarea_priv->nbox );
770 if ( buf_priv->discard ) {
771 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
773 /* Emit the vertex buffer age */
774 BEGIN_RING( 2 );
776 OUT_RING( CCE_PACKET0( R128_LAST_DISPATCH_REG, 0 ) );
777 OUT_RING( buf_priv->age );
779 ADVANCE_RING();
781 buf->pending = 1;
782 /* FIXME: Check dispatched field */
783 buf_priv->dispatched = 0;
786 dev_priv->sarea_priv->last_dispatch++;
788 sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
789 sarea_priv->nbox = 0;
792 static int r128_cce_dispatch_blit( DRMFILE filp,
793 drm_device_t *dev,
794 drm_r128_blit_t *blit )
796 drm_r128_private_t *dev_priv = dev->dev_private;
797 drm_device_dma_t *dma = dev->dma;
798 drm_buf_t *buf;
799 drm_r128_buf_priv_t *buf_priv;
800 u32 *data;
801 int dword_shift, dwords;
802 RING_LOCALS;
803 DRM_DEBUG( "\n" );
805 /* The compiler won't optimize away a division by a variable,
806 * even if the only legal values are powers of two. Thus, we'll
807 * use a shift instead.
809 switch ( blit->format ) {
810 case R128_DATATYPE_ARGB8888:
811 dword_shift = 0;
812 break;
813 case R128_DATATYPE_ARGB1555:
814 case R128_DATATYPE_RGB565:
815 case R128_DATATYPE_ARGB4444:
816 case R128_DATATYPE_YVYU422:
817 case R128_DATATYPE_VYUY422:
818 dword_shift = 1;
819 break;
820 case R128_DATATYPE_CI8:
821 case R128_DATATYPE_RGB8:
822 dword_shift = 2;
823 break;
824 default:
825 DRM_ERROR( "invalid blit format %d\n", blit->format );
826 return DRM_ERR(EINVAL);
829 /* Flush the pixel cache, and mark the contents as Read Invalid.
830 * This ensures no pixel data gets mixed up with the texture
831 * data from the host data blit, otherwise part of the texture
832 * image may be corrupted.
834 BEGIN_RING( 2 );
836 OUT_RING( CCE_PACKET0( R128_PC_GUI_CTLSTAT, 0 ) );
837 OUT_RING( R128_PC_RI_GUI | R128_PC_FLUSH_GUI );
839 ADVANCE_RING();
841 /* Dispatch the indirect buffer.
843 buf = dma->buflist[blit->idx];
844 buf_priv = buf->dev_private;
846 if ( buf->filp != filp ) {
847 DRM_ERROR( "process %d using buffer owned by %p\n",
848 DRM_CURRENTPID, buf->filp );
849 return DRM_ERR(EINVAL);
851 if ( buf->pending ) {
852 DRM_ERROR( "sending pending buffer %d\n", blit->idx );
853 return DRM_ERR(EINVAL);
856 buf_priv->discard = 1;
858 dwords = (blit->width * blit->height) >> dword_shift;
860 data = (u32 *)((char *)dev->agp_buffer_map->handle + buf->offset);
862 data[0] = cpu_to_le32( CCE_PACKET3( R128_CNTL_HOSTDATA_BLT, dwords + 6 ) );
863 data[1] = cpu_to_le32( (R128_GMC_DST_PITCH_OFFSET_CNTL |
864 R128_GMC_BRUSH_NONE |
865 (blit->format << 8) |
866 R128_GMC_SRC_DATATYPE_COLOR |
867 R128_ROP3_S |
868 R128_DP_SRC_SOURCE_HOST_DATA |
869 R128_GMC_CLR_CMP_CNTL_DIS |
870 R128_GMC_AUX_CLIP_DIS |
871 R128_GMC_WR_MSK_DIS) );
873 data[2] = cpu_to_le32( (blit->pitch << 21) | (blit->offset >> 5) );
874 data[3] = cpu_to_le32( 0xffffffff );
875 data[4] = cpu_to_le32( 0xffffffff );
876 data[5] = cpu_to_le32( (blit->y << 16) | blit->x );
877 data[6] = cpu_to_le32( (blit->height << 16) | blit->width );
878 data[7] = cpu_to_le32( dwords );
880 buf->used = (dwords + 8) * sizeof(u32);
882 r128_cce_dispatch_indirect( dev, buf, 0, buf->used );
884 /* Flush the pixel cache after the blit completes. This ensures
885 * the texture data is written out to memory before rendering
886 * continues.
888 BEGIN_RING( 2 );
890 OUT_RING( CCE_PACKET0( R128_PC_GUI_CTLSTAT, 0 ) );
891 OUT_RING( R128_PC_FLUSH_GUI );
893 ADVANCE_RING();
895 return 0;
899 /* ================================================================
900 * Tiled depth buffer management
902 * FIXME: These should all set the destination write mask for when we
903 * have hardware stencil support.
906 static int r128_cce_dispatch_write_span( drm_device_t *dev,
907 drm_r128_depth_t *depth )
909 drm_r128_private_t *dev_priv = dev->dev_private;
910 int count, x, y;
911 u32 *buffer;
912 u8 *mask;
913 int i, buffer_size, mask_size;
914 RING_LOCALS;
915 DRM_DEBUG( "\n" );
917 count = depth->n;
918 if (count > 4096 || count <= 0)
919 return DRM_ERR(EMSGSIZE);
921 if ( DRM_COPY_FROM_USER( &x, depth->x, sizeof(x) ) ) {
922 return DRM_ERR(EFAULT);
924 if ( DRM_COPY_FROM_USER( &y, depth->y, sizeof(y) ) ) {
925 return DRM_ERR(EFAULT);
928 buffer_size = depth->n * sizeof(u32);
929 buffer = DRM_MALLOC( buffer_size );
930 if ( buffer == NULL )
931 return DRM_ERR(ENOMEM);
932 if ( DRM_COPY_FROM_USER( buffer, depth->buffer, buffer_size ) ) {
933 DRM_FREE( buffer, buffer_size);
934 return DRM_ERR(EFAULT);
937 mask_size = depth->n * sizeof(u8);
938 if ( depth->mask ) {
939 mask = DRM_MALLOC( mask_size );
940 if ( mask == NULL ) {
941 DRM_FREE( buffer, buffer_size );
942 return DRM_ERR(ENOMEM);
944 if ( DRM_COPY_FROM_USER( mask, depth->mask, mask_size ) ) {
945 DRM_FREE( buffer, buffer_size );
946 DRM_FREE( mask, mask_size );
947 return DRM_ERR(EFAULT);
950 for ( i = 0 ; i < count ; i++, x++ ) {
951 if ( mask[i] ) {
952 BEGIN_RING( 6 );
954 OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) );
955 OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL |
956 R128_GMC_BRUSH_SOLID_COLOR |
957 (dev_priv->depth_fmt << 8) |
958 R128_GMC_SRC_DATATYPE_COLOR |
959 R128_ROP3_P |
960 R128_GMC_CLR_CMP_CNTL_DIS |
961 R128_GMC_WR_MSK_DIS );
963 OUT_RING( dev_priv->depth_pitch_offset_c );
964 OUT_RING( buffer[i] );
966 OUT_RING( (x << 16) | y );
967 OUT_RING( (1 << 16) | 1 );
969 ADVANCE_RING();
973 DRM_FREE( mask, mask_size );
974 } else {
975 for ( i = 0 ; i < count ; i++, x++ ) {
976 BEGIN_RING( 6 );
978 OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) );
979 OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL |
980 R128_GMC_BRUSH_SOLID_COLOR |
981 (dev_priv->depth_fmt << 8) |
982 R128_GMC_SRC_DATATYPE_COLOR |
983 R128_ROP3_P |
984 R128_GMC_CLR_CMP_CNTL_DIS |
985 R128_GMC_WR_MSK_DIS );
987 OUT_RING( dev_priv->depth_pitch_offset_c );
988 OUT_RING( buffer[i] );
990 OUT_RING( (x << 16) | y );
991 OUT_RING( (1 << 16) | 1 );
993 ADVANCE_RING();
997 DRM_FREE( buffer, buffer_size );
999 return 0;
1002 static int r128_cce_dispatch_write_pixels( drm_device_t *dev,
1003 drm_r128_depth_t *depth )
1005 drm_r128_private_t *dev_priv = dev->dev_private;
1006 int count, *x, *y;
1007 u32 *buffer;
1008 u8 *mask;
1009 int i, xbuf_size, ybuf_size, buffer_size, mask_size;
1010 RING_LOCALS;
1011 DRM_DEBUG( "\n" );
1013 count = depth->n;
1014 if (count > 4096 || count <= 0)
1015 return DRM_ERR(EMSGSIZE);
1017 xbuf_size = count * sizeof(*x);
1018 ybuf_size = count * sizeof(*y);
1019 x = DRM_MALLOC( xbuf_size );
1020 if ( x == NULL ) {
1021 return DRM_ERR(ENOMEM);
1023 y = DRM_MALLOC( ybuf_size );
1024 if ( y == NULL ) {
1025 DRM_FREE( x, xbuf_size );
1026 return DRM_ERR(ENOMEM);
1028 if ( DRM_COPY_FROM_USER( x, depth->x, xbuf_size ) ) {
1029 DRM_FREE( x, xbuf_size );
1030 DRM_FREE( y, ybuf_size );
1031 return DRM_ERR(EFAULT);
1033 if ( DRM_COPY_FROM_USER( y, depth->y, xbuf_size ) ) {
1034 DRM_FREE( x, xbuf_size );
1035 DRM_FREE( y, ybuf_size );
1036 return DRM_ERR(EFAULT);
1039 buffer_size = depth->n * sizeof(u32);
1040 buffer = DRM_MALLOC( buffer_size );
1041 if ( buffer == NULL ) {
1042 DRM_FREE( x, xbuf_size );
1043 DRM_FREE( y, ybuf_size );
1044 return DRM_ERR(ENOMEM);
1046 if ( DRM_COPY_FROM_USER( buffer, depth->buffer, buffer_size ) ) {
1047 DRM_FREE( x, xbuf_size );
1048 DRM_FREE( y, ybuf_size );
1049 DRM_FREE( buffer, buffer_size );
1050 return DRM_ERR(EFAULT);
1053 if ( depth->mask ) {
1054 mask_size = depth->n * sizeof(u8);
1055 mask = DRM_MALLOC( mask_size );
1056 if ( mask == NULL ) {
1057 DRM_FREE( x, xbuf_size );
1058 DRM_FREE( y, ybuf_size );
1059 DRM_FREE( buffer, buffer_size );
1060 return DRM_ERR(ENOMEM);
1062 if ( DRM_COPY_FROM_USER( mask, depth->mask, mask_size ) ) {
1063 DRM_FREE( x, xbuf_size );
1064 DRM_FREE( y, ybuf_size );
1065 DRM_FREE( buffer, buffer_size );
1066 DRM_FREE( mask, mask_size );
1067 return DRM_ERR(EFAULT);
1070 for ( i = 0 ; i < count ; i++ ) {
1071 if ( mask[i] ) {
1072 BEGIN_RING( 6 );
1074 OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) );
1075 OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL |
1076 R128_GMC_BRUSH_SOLID_COLOR |
1077 (dev_priv->depth_fmt << 8) |
1078 R128_GMC_SRC_DATATYPE_COLOR |
1079 R128_ROP3_P |
1080 R128_GMC_CLR_CMP_CNTL_DIS |
1081 R128_GMC_WR_MSK_DIS );
1083 OUT_RING( dev_priv->depth_pitch_offset_c );
1084 OUT_RING( buffer[i] );
1086 OUT_RING( (x[i] << 16) | y[i] );
1087 OUT_RING( (1 << 16) | 1 );
1089 ADVANCE_RING();
1093 DRM_FREE( mask, mask_size );
1094 } else {
1095 for ( i = 0 ; i < count ; i++ ) {
1096 BEGIN_RING( 6 );
1098 OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) );
1099 OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL |
1100 R128_GMC_BRUSH_SOLID_COLOR |
1101 (dev_priv->depth_fmt << 8) |
1102 R128_GMC_SRC_DATATYPE_COLOR |
1103 R128_ROP3_P |
1104 R128_GMC_CLR_CMP_CNTL_DIS |
1105 R128_GMC_WR_MSK_DIS );
1107 OUT_RING( dev_priv->depth_pitch_offset_c );
1108 OUT_RING( buffer[i] );
1110 OUT_RING( (x[i] << 16) | y[i] );
1111 OUT_RING( (1 << 16) | 1 );
1113 ADVANCE_RING();
1117 DRM_FREE( x, xbuf_size );
1118 DRM_FREE( y, ybuf_size );
1119 DRM_FREE( buffer, buffer_size );
1121 return 0;
1124 static int r128_cce_dispatch_read_span( drm_device_t *dev,
1125 drm_r128_depth_t *depth )
1127 drm_r128_private_t *dev_priv = dev->dev_private;
1128 int count, x, y;
1129 RING_LOCALS;
1130 DRM_DEBUG( "\n" );
1132 count = depth->n;
1133 if (count > 4096 || count <= 0)
1134 return DRM_ERR(EMSGSIZE);
1136 if ( DRM_COPY_FROM_USER( &x, depth->x, sizeof(x) ) ) {
1137 return DRM_ERR(EFAULT);
1139 if ( DRM_COPY_FROM_USER( &y, depth->y, sizeof(y) ) ) {
1140 return DRM_ERR(EFAULT);
1143 BEGIN_RING( 7 );
1145 OUT_RING( CCE_PACKET3( R128_CNTL_BITBLT_MULTI, 5 ) );
1146 OUT_RING( R128_GMC_SRC_PITCH_OFFSET_CNTL |
1147 R128_GMC_DST_PITCH_OFFSET_CNTL |
1148 R128_GMC_BRUSH_NONE |
1149 (dev_priv->depth_fmt << 8) |
1150 R128_GMC_SRC_DATATYPE_COLOR |
1151 R128_ROP3_S |
1152 R128_DP_SRC_SOURCE_MEMORY |
1153 R128_GMC_CLR_CMP_CNTL_DIS |
1154 R128_GMC_WR_MSK_DIS );
1156 OUT_RING( dev_priv->depth_pitch_offset_c );
1157 OUT_RING( dev_priv->span_pitch_offset_c );
1159 OUT_RING( (x << 16) | y );
1160 OUT_RING( (0 << 16) | 0 );
1161 OUT_RING( (count << 16) | 1 );
1163 ADVANCE_RING();
1165 return 0;
1168 static int r128_cce_dispatch_read_pixels( drm_device_t *dev,
1169 drm_r128_depth_t *depth )
1171 drm_r128_private_t *dev_priv = dev->dev_private;
1172 int count, *x, *y;
1173 int i, xbuf_size, ybuf_size;
1174 RING_LOCALS;
1175 DRM_DEBUG( "%s\n", __FUNCTION__ );
1177 count = depth->n;
1178 if (count > 4096 || count <= 0)
1179 return DRM_ERR(EMSGSIZE);
1181 if ( count > dev_priv->depth_pitch ) {
1182 count = dev_priv->depth_pitch;
1185 xbuf_size = count * sizeof(*x);
1186 ybuf_size = count * sizeof(*y);
1187 x = DRM_MALLOC( xbuf_size );
1188 if ( x == NULL ) {
1189 return DRM_ERR(ENOMEM);
1191 y = DRM_MALLOC( ybuf_size );
1192 if ( y == NULL ) {
1193 DRM_FREE( x, xbuf_size );
1194 return DRM_ERR(ENOMEM);
1196 if ( DRM_COPY_FROM_USER( x, depth->x, xbuf_size ) ) {
1197 DRM_FREE( x, xbuf_size );
1198 DRM_FREE( y, ybuf_size );
1199 return DRM_ERR(EFAULT);
1201 if ( DRM_COPY_FROM_USER( y, depth->y, ybuf_size ) ) {
1202 DRM_FREE( x, xbuf_size );
1203 DRM_FREE( y, ybuf_size );
1204 return DRM_ERR(EFAULT);
1207 for ( i = 0 ; i < count ; i++ ) {
1208 BEGIN_RING( 7 );
1210 OUT_RING( CCE_PACKET3( R128_CNTL_BITBLT_MULTI, 5 ) );
1211 OUT_RING( R128_GMC_SRC_PITCH_OFFSET_CNTL |
1212 R128_GMC_DST_PITCH_OFFSET_CNTL |
1213 R128_GMC_BRUSH_NONE |
1214 (dev_priv->depth_fmt << 8) |
1215 R128_GMC_SRC_DATATYPE_COLOR |
1216 R128_ROP3_S |
1217 R128_DP_SRC_SOURCE_MEMORY |
1218 R128_GMC_CLR_CMP_CNTL_DIS |
1219 R128_GMC_WR_MSK_DIS );
1221 OUT_RING( dev_priv->depth_pitch_offset_c );
1222 OUT_RING( dev_priv->span_pitch_offset_c );
1224 OUT_RING( (x[i] << 16) | y[i] );
1225 OUT_RING( (i << 16) | 0 );
1226 OUT_RING( (1 << 16) | 1 );
1228 ADVANCE_RING();
1231 DRM_FREE( x, xbuf_size );
1232 DRM_FREE( y, ybuf_size );
1234 return 0;
1238 /* ================================================================
1239 * Polygon stipple
1242 static void r128_cce_dispatch_stipple( drm_device_t *dev, u32 *stipple )
1244 drm_r128_private_t *dev_priv = dev->dev_private;
1245 int i;
1246 RING_LOCALS;
1247 DRM_DEBUG( "%s\n", __FUNCTION__ );
1249 BEGIN_RING( 33 );
1251 OUT_RING( CCE_PACKET0( R128_BRUSH_DATA0, 31 ) );
1252 for ( i = 0 ; i < 32 ; i++ ) {
1253 OUT_RING( stipple[i] );
1256 ADVANCE_RING();
1260 /* ================================================================
1261 * IOCTL functions
1264 int r128_cce_clear( DRM_IOCTL_ARGS )
1266 DRM_DEVICE;
1267 drm_r128_private_t *dev_priv = dev->dev_private;
1268 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
1269 drm_r128_clear_t clear;
1270 DRM_DEBUG( "\n" );
1272 LOCK_TEST_WITH_RETURN( dev, filp );
1274 DRM_COPY_FROM_USER_IOCTL( clear, (drm_r128_clear_t __user *) data,
1275 sizeof(clear) );
1277 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1279 if ( sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS )
1280 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1282 r128_cce_dispatch_clear( dev, &clear );
1283 COMMIT_RING();
1285 /* Make sure we restore the 3D state next time.
1287 dev_priv->sarea_priv->dirty |= R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS;
1289 return 0;
1292 static int r128_do_init_pageflip( drm_device_t *dev )
1294 drm_r128_private_t *dev_priv = dev->dev_private;
1295 DRM_DEBUG( "\n" );
1297 dev_priv->crtc_offset = R128_READ( R128_CRTC_OFFSET );
1298 dev_priv->crtc_offset_cntl = R128_READ( R128_CRTC_OFFSET_CNTL );
1300 R128_WRITE( R128_CRTC_OFFSET, dev_priv->front_offset );
1301 R128_WRITE( R128_CRTC_OFFSET_CNTL,
1302 dev_priv->crtc_offset_cntl | R128_CRTC_OFFSET_FLIP_CNTL );
1304 dev_priv->page_flipping = 1;
1305 dev_priv->current_page = 0;
1306 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
1308 return 0;
1311 int r128_do_cleanup_pageflip( drm_device_t *dev )
1313 drm_r128_private_t *dev_priv = dev->dev_private;
1314 DRM_DEBUG( "\n" );
1316 R128_WRITE( R128_CRTC_OFFSET, dev_priv->crtc_offset );
1317 R128_WRITE( R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl );
1319 if (dev_priv->current_page != 0) {
1320 r128_cce_dispatch_flip( dev );
1321 COMMIT_RING();
1324 dev_priv->page_flipping = 0;
1325 return 0;
1328 /* Swapping and flipping are different operations, need different ioctls.
1329 * They can & should be intermixed to support multiple 3d windows.
1332 int r128_cce_flip( DRM_IOCTL_ARGS )
1334 DRM_DEVICE;
1335 drm_r128_private_t *dev_priv = dev->dev_private;
1336 DRM_DEBUG( "%s\n", __FUNCTION__ );
1338 LOCK_TEST_WITH_RETURN( dev, filp );
1340 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1342 if (!dev_priv->page_flipping)
1343 r128_do_init_pageflip( dev );
1345 r128_cce_dispatch_flip( dev );
1347 COMMIT_RING();
1348 return 0;
1351 int r128_cce_swap( DRM_IOCTL_ARGS )
1353 DRM_DEVICE;
1354 drm_r128_private_t *dev_priv = dev->dev_private;
1355 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
1356 DRM_DEBUG( "%s\n", __FUNCTION__ );
1358 LOCK_TEST_WITH_RETURN( dev, filp );
1360 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1362 if ( sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS )
1363 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1365 r128_cce_dispatch_swap( dev );
1366 dev_priv->sarea_priv->dirty |= (R128_UPLOAD_CONTEXT |
1367 R128_UPLOAD_MASKS);
1369 COMMIT_RING();
1370 return 0;
1373 int r128_cce_vertex( DRM_IOCTL_ARGS )
1375 DRM_DEVICE;
1376 drm_r128_private_t *dev_priv = dev->dev_private;
1377 drm_device_dma_t *dma = dev->dma;
1378 drm_buf_t *buf;
1379 drm_r128_buf_priv_t *buf_priv;
1380 drm_r128_vertex_t vertex;
1382 LOCK_TEST_WITH_RETURN( dev, filp );
1384 if ( !dev_priv ) {
1385 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1386 return DRM_ERR(EINVAL);
1389 DRM_COPY_FROM_USER_IOCTL( vertex, (drm_r128_vertex_t __user *) data,
1390 sizeof(vertex) );
1392 DRM_DEBUG( "pid=%d index=%d count=%d discard=%d\n",
1393 DRM_CURRENTPID,
1394 vertex.idx, vertex.count, vertex.discard );
1396 if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) {
1397 DRM_ERROR( "buffer index %d (of %d max)\n",
1398 vertex.idx, dma->buf_count - 1 );
1399 return DRM_ERR(EINVAL);
1401 if ( vertex.prim < 0 ||
1402 vertex.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2 ) {
1403 DRM_ERROR( "buffer prim %d\n", vertex.prim );
1404 return DRM_ERR(EINVAL);
1407 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1408 VB_AGE_TEST_WITH_RETURN( dev_priv );
1410 buf = dma->buflist[vertex.idx];
1411 buf_priv = buf->dev_private;
1413 if ( buf->filp != filp ) {
1414 DRM_ERROR( "process %d using buffer owned by %p\n",
1415 DRM_CURRENTPID, buf->filp );
1416 return DRM_ERR(EINVAL);
1418 if ( buf->pending ) {
1419 DRM_ERROR( "sending pending buffer %d\n", vertex.idx );
1420 return DRM_ERR(EINVAL);
1423 buf->used = vertex.count;
1424 buf_priv->prim = vertex.prim;
1425 buf_priv->discard = vertex.discard;
1427 r128_cce_dispatch_vertex( dev, buf );
1429 COMMIT_RING();
1430 return 0;
1433 int r128_cce_indices( DRM_IOCTL_ARGS )
1435 DRM_DEVICE;
1436 drm_r128_private_t *dev_priv = dev->dev_private;
1437 drm_device_dma_t *dma = dev->dma;
1438 drm_buf_t *buf;
1439 drm_r128_buf_priv_t *buf_priv;
1440 drm_r128_indices_t elts;
1441 int count;
1443 LOCK_TEST_WITH_RETURN( dev, filp );
1445 if ( !dev_priv ) {
1446 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1447 return DRM_ERR(EINVAL);
1450 DRM_COPY_FROM_USER_IOCTL( elts, (drm_r128_indices_t __user *) data,
1451 sizeof(elts) );
1453 DRM_DEBUG( "pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID,
1454 elts.idx, elts.start, elts.end, elts.discard );
1456 if ( elts.idx < 0 || elts.idx >= dma->buf_count ) {
1457 DRM_ERROR( "buffer index %d (of %d max)\n",
1458 elts.idx, dma->buf_count - 1 );
1459 return DRM_ERR(EINVAL);
1461 if ( elts.prim < 0 ||
1462 elts.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2 ) {
1463 DRM_ERROR( "buffer prim %d\n", elts.prim );
1464 return DRM_ERR(EINVAL);
1467 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1468 VB_AGE_TEST_WITH_RETURN( dev_priv );
1470 buf = dma->buflist[elts.idx];
1471 buf_priv = buf->dev_private;
1473 if ( buf->filp != filp ) {
1474 DRM_ERROR( "process %d using buffer owned by %p\n",
1475 DRM_CURRENTPID, buf->filp );
1476 return DRM_ERR(EINVAL);
1478 if ( buf->pending ) {
1479 DRM_ERROR( "sending pending buffer %d\n", elts.idx );
1480 return DRM_ERR(EINVAL);
1483 count = (elts.end - elts.start) / sizeof(u16);
1484 elts.start -= R128_INDEX_PRIM_OFFSET;
1486 if ( elts.start & 0x7 ) {
1487 DRM_ERROR( "misaligned buffer 0x%x\n", elts.start );
1488 return DRM_ERR(EINVAL);
1490 if ( elts.start < buf->used ) {
1491 DRM_ERROR( "no header 0x%x - 0x%x\n", elts.start, buf->used );
1492 return DRM_ERR(EINVAL);
1495 buf->used = elts.end;
1496 buf_priv->prim = elts.prim;
1497 buf_priv->discard = elts.discard;
1499 r128_cce_dispatch_indices( dev, buf, elts.start, elts.end, count );
1501 COMMIT_RING();
1502 return 0;
1505 int r128_cce_blit( DRM_IOCTL_ARGS )
1507 DRM_DEVICE;
1508 drm_device_dma_t *dma = dev->dma;
1509 drm_r128_private_t *dev_priv = dev->dev_private;
1510 drm_r128_blit_t blit;
1511 int ret;
1513 LOCK_TEST_WITH_RETURN( dev, filp );
1515 DRM_COPY_FROM_USER_IOCTL( blit, (drm_r128_blit_t __user *) data,
1516 sizeof(blit) );
1518 DRM_DEBUG( "pid=%d index=%d\n", DRM_CURRENTPID, blit.idx );
1520 if ( blit.idx < 0 || blit.idx >= dma->buf_count ) {
1521 DRM_ERROR( "buffer index %d (of %d max)\n",
1522 blit.idx, dma->buf_count - 1 );
1523 return DRM_ERR(EINVAL);
1526 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1527 VB_AGE_TEST_WITH_RETURN( dev_priv );
1529 ret = r128_cce_dispatch_blit( filp, dev, &blit );
1531 COMMIT_RING();
1532 return ret;
1535 int r128_cce_depth( DRM_IOCTL_ARGS )
1537 DRM_DEVICE;
1538 drm_r128_private_t *dev_priv = dev->dev_private;
1539 drm_r128_depth_t depth;
1540 int ret;
1542 LOCK_TEST_WITH_RETURN( dev, filp );
1544 DRM_COPY_FROM_USER_IOCTL( depth, (drm_r128_depth_t __user *) data,
1545 sizeof(depth) );
1547 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1549 ret = DRM_ERR(EINVAL);
1550 switch ( depth.func ) {
1551 case R128_WRITE_SPAN:
1552 ret = r128_cce_dispatch_write_span( dev, &depth );
1553 case R128_WRITE_PIXELS:
1554 ret = r128_cce_dispatch_write_pixels( dev, &depth );
1555 case R128_READ_SPAN:
1556 ret = r128_cce_dispatch_read_span( dev, &depth );
1557 case R128_READ_PIXELS:
1558 ret = r128_cce_dispatch_read_pixels( dev, &depth );
1561 COMMIT_RING();
1562 return ret;
1565 int r128_cce_stipple( DRM_IOCTL_ARGS )
1567 DRM_DEVICE;
1568 drm_r128_private_t *dev_priv = dev->dev_private;
1569 drm_r128_stipple_t stipple;
1570 u32 mask[32];
1572 LOCK_TEST_WITH_RETURN( dev, filp );
1574 DRM_COPY_FROM_USER_IOCTL( stipple, (drm_r128_stipple_t __user *) data,
1575 sizeof(stipple) );
1577 if ( DRM_COPY_FROM_USER( &mask, stipple.mask,
1578 32 * sizeof(u32) ) )
1579 return DRM_ERR( EFAULT );
1581 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1583 r128_cce_dispatch_stipple( dev, mask );
1585 COMMIT_RING();
1586 return 0;
1589 int r128_cce_indirect( DRM_IOCTL_ARGS )
1591 DRM_DEVICE;
1592 drm_r128_private_t *dev_priv = dev->dev_private;
1593 drm_device_dma_t *dma = dev->dma;
1594 drm_buf_t *buf;
1595 drm_r128_buf_priv_t *buf_priv;
1596 drm_r128_indirect_t indirect;
1597 #if 0
1598 RING_LOCALS;
1599 #endif
1601 LOCK_TEST_WITH_RETURN( dev, filp );
1603 if ( !dev_priv ) {
1604 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1605 return DRM_ERR(EINVAL);
1608 DRM_COPY_FROM_USER_IOCTL( indirect, (drm_r128_indirect_t __user *) data,
1609 sizeof(indirect) );
1611 DRM_DEBUG( "indirect: idx=%d s=%d e=%d d=%d\n",
1612 indirect.idx, indirect.start,
1613 indirect.end, indirect.discard );
1615 if ( indirect.idx < 0 || indirect.idx >= dma->buf_count ) {
1616 DRM_ERROR( "buffer index %d (of %d max)\n",
1617 indirect.idx, dma->buf_count - 1 );
1618 return DRM_ERR(EINVAL);
1621 buf = dma->buflist[indirect.idx];
1622 buf_priv = buf->dev_private;
1624 if ( buf->filp != filp ) {
1625 DRM_ERROR( "process %d using buffer owned by %p\n",
1626 DRM_CURRENTPID, buf->filp );
1627 return DRM_ERR(EINVAL);
1629 if ( buf->pending ) {
1630 DRM_ERROR( "sending pending buffer %d\n", indirect.idx );
1631 return DRM_ERR(EINVAL);
1634 if ( indirect.start < buf->used ) {
1635 DRM_ERROR( "reusing indirect: start=0x%x actual=0x%x\n",
1636 indirect.start, buf->used );
1637 return DRM_ERR(EINVAL);
1640 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1641 VB_AGE_TEST_WITH_RETURN( dev_priv );
1643 buf->used = indirect.end;
1644 buf_priv->discard = indirect.discard;
1646 #if 0
1647 /* Wait for the 3D stream to idle before the indirect buffer
1648 * containing 2D acceleration commands is processed.
1650 BEGIN_RING( 2 );
1651 RADEON_WAIT_UNTIL_3D_IDLE();
1652 ADVANCE_RING();
1653 #endif
1655 /* Dispatch the indirect buffer full of commands from the
1656 * X server. This is insecure and is thus only available to
1657 * privileged clients.
1659 r128_cce_dispatch_indirect( dev, buf, indirect.start, indirect.end );
1661 COMMIT_RING();
1662 return 0;
1665 int r128_getparam( DRM_IOCTL_ARGS )
1667 DRM_DEVICE;
1668 drm_r128_private_t *dev_priv = dev->dev_private;
1669 drm_r128_getparam_t param;
1670 int value;
1672 if ( !dev_priv ) {
1673 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1674 return DRM_ERR(EINVAL);
1677 DRM_COPY_FROM_USER_IOCTL( param, (drm_r128_getparam_t __user *)data,
1678 sizeof(param) );
1680 DRM_DEBUG( "pid=%d\n", DRM_CURRENTPID );
1682 switch( param.param ) {
1683 case R128_PARAM_IRQ_NR:
1684 value = dev->irq;
1685 break;
1686 default:
1687 return DRM_ERR(EINVAL);
1690 if ( DRM_COPY_TO_USER( param.value, &value, sizeof(int) ) ) {
1691 DRM_ERROR( "copy_to_user\n" );
1692 return DRM_ERR(EFAULT);
1695 return 0;
1698 static void r128_driver_prerelease(drm_device_t *dev, DRMFILE filp)
1700 if ( dev->dev_private ) {
1701 drm_r128_private_t *dev_priv = dev->dev_private;
1702 if ( dev_priv->page_flipping ) {
1703 r128_do_cleanup_pageflip( dev );
1708 static void r128_driver_pretakedown(drm_device_t *dev)
1710 r128_do_cleanup_cce( dev );
1713 void r128_driver_register_fns(drm_device_t *dev)
1715 dev->driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL;
1716 dev->dev_priv_size = sizeof(drm_r128_buf_priv_t);
1717 dev->fn_tbl.prerelease = r128_driver_prerelease;
1718 dev->fn_tbl.pretakedown = r128_driver_pretakedown;
1719 dev->fn_tbl.vblank_wait = r128_driver_vblank_wait;
1720 dev->fn_tbl.irq_preinstall = r128_driver_irq_preinstall;
1721 dev->fn_tbl.irq_postinstall = r128_driver_irq_postinstall;
1722 dev->fn_tbl.irq_uninstall = r128_driver_irq_uninstall;
1723 dev->fn_tbl.irq_handler = r128_driver_irq_handler;