Linux-2.6.12-rc2
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / char / drm / r128_state.c
blob38b3cbd0bbba40dcab14c487c109b3598966e0ed
1 /* r128_state.c -- State support for r128 -*- linux-c -*-
2 * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com
4 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
5 * All Rights Reserved.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
26 * Authors:
27 * Gareth Hughes <gareth@valinux.com>
30 #include "drmP.h"
31 #include "drm.h"
32 #include "r128_drm.h"
33 #include "r128_drv.h"
36 /* ================================================================
37 * CCE hardware state programming functions
40 static void r128_emit_clip_rects( drm_r128_private_t *dev_priv,
41 drm_clip_rect_t *boxes, int count )
43 u32 aux_sc_cntl = 0x00000000;
44 RING_LOCALS;
45 DRM_DEBUG( " %s\n", __FUNCTION__ );
47 BEGIN_RING( (count < 3? count: 3) * 5 + 2 );
49 if ( count >= 1 ) {
50 OUT_RING( CCE_PACKET0( R128_AUX1_SC_LEFT, 3 ) );
51 OUT_RING( boxes[0].x1 );
52 OUT_RING( boxes[0].x2 - 1 );
53 OUT_RING( boxes[0].y1 );
54 OUT_RING( boxes[0].y2 - 1 );
56 aux_sc_cntl |= (R128_AUX1_SC_EN | R128_AUX1_SC_MODE_OR);
58 if ( count >= 2 ) {
59 OUT_RING( CCE_PACKET0( R128_AUX2_SC_LEFT, 3 ) );
60 OUT_RING( boxes[1].x1 );
61 OUT_RING( boxes[1].x2 - 1 );
62 OUT_RING( boxes[1].y1 );
63 OUT_RING( boxes[1].y2 - 1 );
65 aux_sc_cntl |= (R128_AUX2_SC_EN | R128_AUX2_SC_MODE_OR);
67 if ( count >= 3 ) {
68 OUT_RING( CCE_PACKET0( R128_AUX3_SC_LEFT, 3 ) );
69 OUT_RING( boxes[2].x1 );
70 OUT_RING( boxes[2].x2 - 1 );
71 OUT_RING( boxes[2].y1 );
72 OUT_RING( boxes[2].y2 - 1 );
74 aux_sc_cntl |= (R128_AUX3_SC_EN | R128_AUX3_SC_MODE_OR);
77 OUT_RING( CCE_PACKET0( R128_AUX_SC_CNTL, 0 ) );
78 OUT_RING( aux_sc_cntl );
80 ADVANCE_RING();
83 static __inline__ void r128_emit_core( drm_r128_private_t *dev_priv )
85 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
86 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
87 RING_LOCALS;
88 DRM_DEBUG( " %s\n", __FUNCTION__ );
90 BEGIN_RING( 2 );
92 OUT_RING( CCE_PACKET0( R128_SCALE_3D_CNTL, 0 ) );
93 OUT_RING( ctx->scale_3d_cntl );
95 ADVANCE_RING();
98 static __inline__ void r128_emit_context( drm_r128_private_t *dev_priv )
100 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
101 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
102 RING_LOCALS;
103 DRM_DEBUG( " %s\n", __FUNCTION__ );
105 BEGIN_RING( 13 );
107 OUT_RING( CCE_PACKET0( R128_DST_PITCH_OFFSET_C, 11 ) );
108 OUT_RING( ctx->dst_pitch_offset_c );
109 OUT_RING( ctx->dp_gui_master_cntl_c );
110 OUT_RING( ctx->sc_top_left_c );
111 OUT_RING( ctx->sc_bottom_right_c );
112 OUT_RING( ctx->z_offset_c );
113 OUT_RING( ctx->z_pitch_c );
114 OUT_RING( ctx->z_sten_cntl_c );
115 OUT_RING( ctx->tex_cntl_c );
116 OUT_RING( ctx->misc_3d_state_cntl_reg );
117 OUT_RING( ctx->texture_clr_cmp_clr_c );
118 OUT_RING( ctx->texture_clr_cmp_msk_c );
119 OUT_RING( ctx->fog_color_c );
121 ADVANCE_RING();
124 static __inline__ void r128_emit_setup( drm_r128_private_t *dev_priv )
126 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
127 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
128 RING_LOCALS;
129 DRM_DEBUG( " %s\n", __FUNCTION__ );
131 BEGIN_RING( 3 );
133 OUT_RING( CCE_PACKET1( R128_SETUP_CNTL, R128_PM4_VC_FPU_SETUP ) );
134 OUT_RING( ctx->setup_cntl );
135 OUT_RING( ctx->pm4_vc_fpu_setup );
137 ADVANCE_RING();
140 static __inline__ void r128_emit_masks( drm_r128_private_t *dev_priv )
142 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
143 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
144 RING_LOCALS;
145 DRM_DEBUG( " %s\n", __FUNCTION__ );
147 BEGIN_RING( 5 );
149 OUT_RING( CCE_PACKET0( R128_DP_WRITE_MASK, 0 ) );
150 OUT_RING( ctx->dp_write_mask );
152 OUT_RING( CCE_PACKET0( R128_STEN_REF_MASK_C, 1 ) );
153 OUT_RING( ctx->sten_ref_mask_c );
154 OUT_RING( ctx->plane_3d_mask_c );
156 ADVANCE_RING();
159 static __inline__ void r128_emit_window( drm_r128_private_t *dev_priv )
161 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
162 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
163 RING_LOCALS;
164 DRM_DEBUG( " %s\n", __FUNCTION__ );
166 BEGIN_RING( 2 );
168 OUT_RING( CCE_PACKET0( R128_WINDOW_XY_OFFSET, 0 ) );
169 OUT_RING( ctx->window_xy_offset );
171 ADVANCE_RING();
174 static __inline__ void r128_emit_tex0( drm_r128_private_t *dev_priv )
176 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
177 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
178 drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0];
179 int i;
180 RING_LOCALS;
181 DRM_DEBUG( " %s\n", __FUNCTION__ );
183 BEGIN_RING( 7 + R128_MAX_TEXTURE_LEVELS );
185 OUT_RING( CCE_PACKET0( R128_PRIM_TEX_CNTL_C,
186 2 + R128_MAX_TEXTURE_LEVELS ) );
187 OUT_RING( tex->tex_cntl );
188 OUT_RING( tex->tex_combine_cntl );
189 OUT_RING( ctx->tex_size_pitch_c );
190 for ( i = 0 ; i < R128_MAX_TEXTURE_LEVELS ; i++ ) {
191 OUT_RING( tex->tex_offset[i] );
194 OUT_RING( CCE_PACKET0( R128_CONSTANT_COLOR_C, 1 ) );
195 OUT_RING( ctx->constant_color_c );
196 OUT_RING( tex->tex_border_color );
198 ADVANCE_RING();
201 static __inline__ void r128_emit_tex1( drm_r128_private_t *dev_priv )
203 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
204 drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
205 int i;
206 RING_LOCALS;
207 DRM_DEBUG( " %s\n", __FUNCTION__ );
209 BEGIN_RING( 5 + R128_MAX_TEXTURE_LEVELS );
211 OUT_RING( CCE_PACKET0( R128_SEC_TEX_CNTL_C,
212 1 + R128_MAX_TEXTURE_LEVELS ) );
213 OUT_RING( tex->tex_cntl );
214 OUT_RING( tex->tex_combine_cntl );
215 for ( i = 0 ; i < R128_MAX_TEXTURE_LEVELS ; i++ ) {
216 OUT_RING( tex->tex_offset[i] );
219 OUT_RING( CCE_PACKET0( R128_SEC_TEXTURE_BORDER_COLOR_C, 0 ) );
220 OUT_RING( tex->tex_border_color );
222 ADVANCE_RING();
225 static __inline__ void r128_emit_state( drm_r128_private_t *dev_priv )
227 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
228 unsigned int dirty = sarea_priv->dirty;
230 DRM_DEBUG( "%s: dirty=0x%08x\n", __FUNCTION__, dirty );
232 if ( dirty & R128_UPLOAD_CORE ) {
233 r128_emit_core( dev_priv );
234 sarea_priv->dirty &= ~R128_UPLOAD_CORE;
237 if ( dirty & R128_UPLOAD_CONTEXT ) {
238 r128_emit_context( dev_priv );
239 sarea_priv->dirty &= ~R128_UPLOAD_CONTEXT;
242 if ( dirty & R128_UPLOAD_SETUP ) {
243 r128_emit_setup( dev_priv );
244 sarea_priv->dirty &= ~R128_UPLOAD_SETUP;
247 if ( dirty & R128_UPLOAD_MASKS ) {
248 r128_emit_masks( dev_priv );
249 sarea_priv->dirty &= ~R128_UPLOAD_MASKS;
252 if ( dirty & R128_UPLOAD_WINDOW ) {
253 r128_emit_window( dev_priv );
254 sarea_priv->dirty &= ~R128_UPLOAD_WINDOW;
257 if ( dirty & R128_UPLOAD_TEX0 ) {
258 r128_emit_tex0( dev_priv );
259 sarea_priv->dirty &= ~R128_UPLOAD_TEX0;
262 if ( dirty & R128_UPLOAD_TEX1 ) {
263 r128_emit_tex1( dev_priv );
264 sarea_priv->dirty &= ~R128_UPLOAD_TEX1;
267 /* Turn off the texture cache flushing */
268 sarea_priv->context_state.tex_cntl_c &= ~R128_TEX_CACHE_FLUSH;
270 sarea_priv->dirty &= ~R128_REQUIRE_QUIESCENCE;
274 #if R128_PERFORMANCE_BOXES
275 /* ================================================================
276 * Performance monitoring functions
279 static void r128_clear_box( drm_r128_private_t *dev_priv,
280 int x, int y, int w, int h,
281 int r, int g, int b )
283 u32 pitch, offset;
284 u32 fb_bpp, color;
285 RING_LOCALS;
287 switch ( dev_priv->fb_bpp ) {
288 case 16:
289 fb_bpp = R128_GMC_DST_16BPP;
290 color = (((r & 0xf8) << 8) |
291 ((g & 0xfc) << 3) |
292 ((b & 0xf8) >> 3));
293 break;
294 case 24:
295 fb_bpp = R128_GMC_DST_24BPP;
296 color = ((r << 16) | (g << 8) | b);
297 break;
298 case 32:
299 fb_bpp = R128_GMC_DST_32BPP;
300 color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
301 break;
302 default:
303 return;
306 offset = dev_priv->back_offset;
307 pitch = dev_priv->back_pitch >> 3;
309 BEGIN_RING( 6 );
311 OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) );
312 OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL |
313 R128_GMC_BRUSH_SOLID_COLOR |
314 fb_bpp |
315 R128_GMC_SRC_DATATYPE_COLOR |
316 R128_ROP3_P |
317 R128_GMC_CLR_CMP_CNTL_DIS |
318 R128_GMC_AUX_CLIP_DIS );
320 OUT_RING( (pitch << 21) | (offset >> 5) );
321 OUT_RING( color );
323 OUT_RING( (x << 16) | y );
324 OUT_RING( (w << 16) | h );
326 ADVANCE_RING();
329 static void r128_cce_performance_boxes( drm_r128_private_t *dev_priv )
331 if ( atomic_read( &dev_priv->idle_count ) == 0 ) {
332 r128_clear_box( dev_priv, 64, 4, 8, 8, 0, 255, 0 );
333 } else {
334 atomic_set( &dev_priv->idle_count, 0 );
338 #endif
341 /* ================================================================
342 * CCE command dispatch functions
345 static void r128_print_dirty( const char *msg, unsigned int flags )
347 DRM_INFO( "%s: (0x%x) %s%s%s%s%s%s%s%s%s\n",
348 msg,
349 flags,
350 (flags & R128_UPLOAD_CORE) ? "core, " : "",
351 (flags & R128_UPLOAD_CONTEXT) ? "context, " : "",
352 (flags & R128_UPLOAD_SETUP) ? "setup, " : "",
353 (flags & R128_UPLOAD_TEX0) ? "tex0, " : "",
354 (flags & R128_UPLOAD_TEX1) ? "tex1, " : "",
355 (flags & R128_UPLOAD_MASKS) ? "masks, " : "",
356 (flags & R128_UPLOAD_WINDOW) ? "window, " : "",
357 (flags & R128_UPLOAD_CLIPRECTS) ? "cliprects, " : "",
358 (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "" );
361 static void r128_cce_dispatch_clear( drm_device_t *dev,
362 drm_r128_clear_t *clear )
364 drm_r128_private_t *dev_priv = dev->dev_private;
365 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
366 int nbox = sarea_priv->nbox;
367 drm_clip_rect_t *pbox = sarea_priv->boxes;
368 unsigned int flags = clear->flags;
369 int i;
370 RING_LOCALS;
371 DRM_DEBUG( "%s\n", __FUNCTION__ );
373 if ( dev_priv->page_flipping && dev_priv->current_page == 1 ) {
374 unsigned int tmp = flags;
376 flags &= ~(R128_FRONT | R128_BACK);
377 if ( tmp & R128_FRONT ) flags |= R128_BACK;
378 if ( tmp & R128_BACK ) flags |= R128_FRONT;
381 for ( i = 0 ; i < nbox ; i++ ) {
382 int x = pbox[i].x1;
383 int y = pbox[i].y1;
384 int w = pbox[i].x2 - x;
385 int h = pbox[i].y2 - y;
387 DRM_DEBUG( "dispatch clear %d,%d-%d,%d flags 0x%x\n",
388 pbox[i].x1, pbox[i].y1, pbox[i].x2,
389 pbox[i].y2, flags );
391 if ( flags & (R128_FRONT | R128_BACK) ) {
392 BEGIN_RING( 2 );
394 OUT_RING( CCE_PACKET0( R128_DP_WRITE_MASK, 0 ) );
395 OUT_RING( clear->color_mask );
397 ADVANCE_RING();
400 if ( flags & R128_FRONT ) {
401 BEGIN_RING( 6 );
403 OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) );
404 OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL |
405 R128_GMC_BRUSH_SOLID_COLOR |
406 (dev_priv->color_fmt << 8) |
407 R128_GMC_SRC_DATATYPE_COLOR |
408 R128_ROP3_P |
409 R128_GMC_CLR_CMP_CNTL_DIS |
410 R128_GMC_AUX_CLIP_DIS );
412 OUT_RING( dev_priv->front_pitch_offset_c );
413 OUT_RING( clear->clear_color );
415 OUT_RING( (x << 16) | y );
416 OUT_RING( (w << 16) | h );
418 ADVANCE_RING();
421 if ( flags & R128_BACK ) {
422 BEGIN_RING( 6 );
424 OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) );
425 OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL |
426 R128_GMC_BRUSH_SOLID_COLOR |
427 (dev_priv->color_fmt << 8) |
428 R128_GMC_SRC_DATATYPE_COLOR |
429 R128_ROP3_P |
430 R128_GMC_CLR_CMP_CNTL_DIS |
431 R128_GMC_AUX_CLIP_DIS );
433 OUT_RING( dev_priv->back_pitch_offset_c );
434 OUT_RING( clear->clear_color );
436 OUT_RING( (x << 16) | y );
437 OUT_RING( (w << 16) | h );
439 ADVANCE_RING();
442 if ( flags & R128_DEPTH ) {
443 BEGIN_RING( 6 );
445 OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) );
446 OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL |
447 R128_GMC_BRUSH_SOLID_COLOR |
448 (dev_priv->depth_fmt << 8) |
449 R128_GMC_SRC_DATATYPE_COLOR |
450 R128_ROP3_P |
451 R128_GMC_CLR_CMP_CNTL_DIS |
452 R128_GMC_AUX_CLIP_DIS |
453 R128_GMC_WR_MSK_DIS );
455 OUT_RING( dev_priv->depth_pitch_offset_c );
456 OUT_RING( clear->clear_depth );
458 OUT_RING( (x << 16) | y );
459 OUT_RING( (w << 16) | h );
461 ADVANCE_RING();
466 static void r128_cce_dispatch_swap( drm_device_t *dev )
468 drm_r128_private_t *dev_priv = dev->dev_private;
469 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
470 int nbox = sarea_priv->nbox;
471 drm_clip_rect_t *pbox = sarea_priv->boxes;
472 int i;
473 RING_LOCALS;
474 DRM_DEBUG( "%s\n", __FUNCTION__ );
476 #if R128_PERFORMANCE_BOXES
477 /* Do some trivial performance monitoring...
479 r128_cce_performance_boxes( dev_priv );
480 #endif
482 for ( i = 0 ; i < nbox ; i++ ) {
483 int x = pbox[i].x1;
484 int y = pbox[i].y1;
485 int w = pbox[i].x2 - x;
486 int h = pbox[i].y2 - y;
488 BEGIN_RING( 7 );
490 OUT_RING( CCE_PACKET3( R128_CNTL_BITBLT_MULTI, 5 ) );
491 OUT_RING( R128_GMC_SRC_PITCH_OFFSET_CNTL |
492 R128_GMC_DST_PITCH_OFFSET_CNTL |
493 R128_GMC_BRUSH_NONE |
494 (dev_priv->color_fmt << 8) |
495 R128_GMC_SRC_DATATYPE_COLOR |
496 R128_ROP3_S |
497 R128_DP_SRC_SOURCE_MEMORY |
498 R128_GMC_CLR_CMP_CNTL_DIS |
499 R128_GMC_AUX_CLIP_DIS |
500 R128_GMC_WR_MSK_DIS );
502 /* Make this work even if front & back are flipped:
504 if (dev_priv->current_page == 0) {
505 OUT_RING( dev_priv->back_pitch_offset_c );
506 OUT_RING( dev_priv->front_pitch_offset_c );
508 else {
509 OUT_RING( dev_priv->front_pitch_offset_c );
510 OUT_RING( dev_priv->back_pitch_offset_c );
513 OUT_RING( (x << 16) | y );
514 OUT_RING( (x << 16) | y );
515 OUT_RING( (w << 16) | h );
517 ADVANCE_RING();
520 /* Increment the frame counter. The client-side 3D driver must
521 * throttle the framerate by waiting for this value before
522 * performing the swapbuffer ioctl.
524 dev_priv->sarea_priv->last_frame++;
526 BEGIN_RING( 2 );
528 OUT_RING( CCE_PACKET0( R128_LAST_FRAME_REG, 0 ) );
529 OUT_RING( dev_priv->sarea_priv->last_frame );
531 ADVANCE_RING();
534 static void r128_cce_dispatch_flip( drm_device_t *dev )
536 drm_r128_private_t *dev_priv = dev->dev_private;
537 RING_LOCALS;
538 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
539 __FUNCTION__,
540 dev_priv->current_page,
541 dev_priv->sarea_priv->pfCurrentPage);
543 #if R128_PERFORMANCE_BOXES
544 /* Do some trivial performance monitoring...
546 r128_cce_performance_boxes( dev_priv );
547 #endif
549 BEGIN_RING( 4 );
551 R128_WAIT_UNTIL_PAGE_FLIPPED();
552 OUT_RING( CCE_PACKET0( R128_CRTC_OFFSET, 0 ) );
554 if ( dev_priv->current_page == 0 ) {
555 OUT_RING( dev_priv->back_offset );
556 } else {
557 OUT_RING( dev_priv->front_offset );
560 ADVANCE_RING();
562 /* Increment the frame counter. The client-side 3D driver must
563 * throttle the framerate by waiting for this value before
564 * performing the swapbuffer ioctl.
566 dev_priv->sarea_priv->last_frame++;
567 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
568 1 - dev_priv->current_page;
570 BEGIN_RING( 2 );
572 OUT_RING( CCE_PACKET0( R128_LAST_FRAME_REG, 0 ) );
573 OUT_RING( dev_priv->sarea_priv->last_frame );
575 ADVANCE_RING();
578 static void r128_cce_dispatch_vertex( drm_device_t *dev,
579 drm_buf_t *buf )
581 drm_r128_private_t *dev_priv = dev->dev_private;
582 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
583 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
584 int format = sarea_priv->vc_format;
585 int offset = buf->bus_address;
586 int size = buf->used;
587 int prim = buf_priv->prim;
588 int i = 0;
589 RING_LOCALS;
590 DRM_DEBUG( "buf=%d nbox=%d\n", buf->idx, sarea_priv->nbox );
592 if ( 0 )
593 r128_print_dirty( "dispatch_vertex", sarea_priv->dirty );
595 if ( buf->used ) {
596 buf_priv->dispatched = 1;
598 if ( sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS ) {
599 r128_emit_state( dev_priv );
602 do {
603 /* Emit the next set of up to three cliprects */
604 if ( i < sarea_priv->nbox ) {
605 r128_emit_clip_rects( dev_priv,
606 &sarea_priv->boxes[i],
607 sarea_priv->nbox - i );
610 /* Emit the vertex buffer rendering commands */
611 BEGIN_RING( 5 );
613 OUT_RING( CCE_PACKET3( R128_3D_RNDR_GEN_INDX_PRIM, 3 ) );
614 OUT_RING( offset );
615 OUT_RING( size );
616 OUT_RING( format );
617 OUT_RING( prim | R128_CCE_VC_CNTL_PRIM_WALK_LIST |
618 (size << R128_CCE_VC_CNTL_NUM_SHIFT) );
620 ADVANCE_RING();
622 i += 3;
623 } while ( i < sarea_priv->nbox );
626 if ( buf_priv->discard ) {
627 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
629 /* Emit the vertex buffer age */
630 BEGIN_RING( 2 );
632 OUT_RING( CCE_PACKET0( R128_LAST_DISPATCH_REG, 0 ) );
633 OUT_RING( buf_priv->age );
635 ADVANCE_RING();
637 buf->pending = 1;
638 buf->used = 0;
639 /* FIXME: Check dispatched field */
640 buf_priv->dispatched = 0;
643 dev_priv->sarea_priv->last_dispatch++;
645 sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
646 sarea_priv->nbox = 0;
649 static void r128_cce_dispatch_indirect( drm_device_t *dev,
650 drm_buf_t *buf,
651 int start, int end )
653 drm_r128_private_t *dev_priv = dev->dev_private;
654 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
655 RING_LOCALS;
656 DRM_DEBUG( "indirect: buf=%d s=0x%x e=0x%x\n",
657 buf->idx, start, end );
659 if ( start != end ) {
660 int offset = buf->bus_address + start;
661 int dwords = (end - start + 3) / sizeof(u32);
663 /* Indirect buffer data must be an even number of
664 * dwords, so if we've been given an odd number we must
665 * pad the data with a Type-2 CCE packet.
667 if ( dwords & 1 ) {
668 u32 *data = (u32 *)
669 ((char *)dev->agp_buffer_map->handle
670 + buf->offset + start);
671 data[dwords++] = cpu_to_le32( R128_CCE_PACKET2 );
674 buf_priv->dispatched = 1;
676 /* Fire off the indirect buffer */
677 BEGIN_RING( 3 );
679 OUT_RING( CCE_PACKET0( R128_PM4_IW_INDOFF, 1 ) );
680 OUT_RING( offset );
681 OUT_RING( dwords );
683 ADVANCE_RING();
686 if ( buf_priv->discard ) {
687 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
689 /* Emit the indirect buffer age */
690 BEGIN_RING( 2 );
692 OUT_RING( CCE_PACKET0( R128_LAST_DISPATCH_REG, 0 ) );
693 OUT_RING( buf_priv->age );
695 ADVANCE_RING();
697 buf->pending = 1;
698 buf->used = 0;
699 /* FIXME: Check dispatched field */
700 buf_priv->dispatched = 0;
703 dev_priv->sarea_priv->last_dispatch++;
706 static void r128_cce_dispatch_indices( drm_device_t *dev,
707 drm_buf_t *buf,
708 int start, int end,
709 int count )
711 drm_r128_private_t *dev_priv = dev->dev_private;
712 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
713 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
714 int format = sarea_priv->vc_format;
715 int offset = dev->agp_buffer_map->offset - dev_priv->cce_buffers_offset;
716 int prim = buf_priv->prim;
717 u32 *data;
718 int dwords;
719 int i = 0;
720 RING_LOCALS;
721 DRM_DEBUG( "indices: s=%d e=%d c=%d\n", start, end, count );
723 if ( 0 )
724 r128_print_dirty( "dispatch_indices", sarea_priv->dirty );
726 if ( start != end ) {
727 buf_priv->dispatched = 1;
729 if ( sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS ) {
730 r128_emit_state( dev_priv );
733 dwords = (end - start + 3) / sizeof(u32);
735 data = (u32 *)((char *)dev->agp_buffer_map->handle
736 + buf->offset + start);
738 data[0] = cpu_to_le32( CCE_PACKET3( R128_3D_RNDR_GEN_INDX_PRIM,
739 dwords-2 ) );
741 data[1] = cpu_to_le32( offset );
742 data[2] = cpu_to_le32( R128_MAX_VB_VERTS );
743 data[3] = cpu_to_le32( format );
744 data[4] = cpu_to_le32( (prim | R128_CCE_VC_CNTL_PRIM_WALK_IND |
745 (count << 16)) );
747 if ( count & 0x1 ) {
748 #ifdef __LITTLE_ENDIAN
749 data[dwords-1] &= 0x0000ffff;
750 #else
751 data[dwords-1] &= 0xffff0000;
752 #endif
755 do {
756 /* Emit the next set of up to three cliprects */
757 if ( i < sarea_priv->nbox ) {
758 r128_emit_clip_rects( dev_priv,
759 &sarea_priv->boxes[i],
760 sarea_priv->nbox - i );
763 r128_cce_dispatch_indirect( dev, buf, start, end );
765 i += 3;
766 } while ( i < sarea_priv->nbox );
769 if ( buf_priv->discard ) {
770 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
772 /* Emit the vertex buffer age */
773 BEGIN_RING( 2 );
775 OUT_RING( CCE_PACKET0( R128_LAST_DISPATCH_REG, 0 ) );
776 OUT_RING( buf_priv->age );
778 ADVANCE_RING();
780 buf->pending = 1;
781 /* FIXME: Check dispatched field */
782 buf_priv->dispatched = 0;
785 dev_priv->sarea_priv->last_dispatch++;
787 sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
788 sarea_priv->nbox = 0;
791 static int r128_cce_dispatch_blit( DRMFILE filp,
792 drm_device_t *dev,
793 drm_r128_blit_t *blit )
795 drm_r128_private_t *dev_priv = dev->dev_private;
796 drm_device_dma_t *dma = dev->dma;
797 drm_buf_t *buf;
798 drm_r128_buf_priv_t *buf_priv;
799 u32 *data;
800 int dword_shift, dwords;
801 RING_LOCALS;
802 DRM_DEBUG( "\n" );
804 /* The compiler won't optimize away a division by a variable,
805 * even if the only legal values are powers of two. Thus, we'll
806 * use a shift instead.
808 switch ( blit->format ) {
809 case R128_DATATYPE_ARGB8888:
810 dword_shift = 0;
811 break;
812 case R128_DATATYPE_ARGB1555:
813 case R128_DATATYPE_RGB565:
814 case R128_DATATYPE_ARGB4444:
815 case R128_DATATYPE_YVYU422:
816 case R128_DATATYPE_VYUY422:
817 dword_shift = 1;
818 break;
819 case R128_DATATYPE_CI8:
820 case R128_DATATYPE_RGB8:
821 dword_shift = 2;
822 break;
823 default:
824 DRM_ERROR( "invalid blit format %d\n", blit->format );
825 return DRM_ERR(EINVAL);
828 /* Flush the pixel cache, and mark the contents as Read Invalid.
829 * This ensures no pixel data gets mixed up with the texture
830 * data from the host data blit, otherwise part of the texture
831 * image may be corrupted.
833 BEGIN_RING( 2 );
835 OUT_RING( CCE_PACKET0( R128_PC_GUI_CTLSTAT, 0 ) );
836 OUT_RING( R128_PC_RI_GUI | R128_PC_FLUSH_GUI );
838 ADVANCE_RING();
840 /* Dispatch the indirect buffer.
842 buf = dma->buflist[blit->idx];
843 buf_priv = buf->dev_private;
845 if ( buf->filp != filp ) {
846 DRM_ERROR( "process %d using buffer owned by %p\n",
847 DRM_CURRENTPID, buf->filp );
848 return DRM_ERR(EINVAL);
850 if ( buf->pending ) {
851 DRM_ERROR( "sending pending buffer %d\n", blit->idx );
852 return DRM_ERR(EINVAL);
855 buf_priv->discard = 1;
857 dwords = (blit->width * blit->height) >> dword_shift;
859 data = (u32 *)((char *)dev->agp_buffer_map->handle + buf->offset);
861 data[0] = cpu_to_le32( CCE_PACKET3( R128_CNTL_HOSTDATA_BLT, dwords + 6 ) );
862 data[1] = cpu_to_le32( (R128_GMC_DST_PITCH_OFFSET_CNTL |
863 R128_GMC_BRUSH_NONE |
864 (blit->format << 8) |
865 R128_GMC_SRC_DATATYPE_COLOR |
866 R128_ROP3_S |
867 R128_DP_SRC_SOURCE_HOST_DATA |
868 R128_GMC_CLR_CMP_CNTL_DIS |
869 R128_GMC_AUX_CLIP_DIS |
870 R128_GMC_WR_MSK_DIS) );
872 data[2] = cpu_to_le32( (blit->pitch << 21) | (blit->offset >> 5) );
873 data[3] = cpu_to_le32( 0xffffffff );
874 data[4] = cpu_to_le32( 0xffffffff );
875 data[5] = cpu_to_le32( (blit->y << 16) | blit->x );
876 data[6] = cpu_to_le32( (blit->height << 16) | blit->width );
877 data[7] = cpu_to_le32( dwords );
879 buf->used = (dwords + 8) * sizeof(u32);
881 r128_cce_dispatch_indirect( dev, buf, 0, buf->used );
883 /* Flush the pixel cache after the blit completes. This ensures
884 * the texture data is written out to memory before rendering
885 * continues.
887 BEGIN_RING( 2 );
889 OUT_RING( CCE_PACKET0( R128_PC_GUI_CTLSTAT, 0 ) );
890 OUT_RING( R128_PC_FLUSH_GUI );
892 ADVANCE_RING();
894 return 0;
898 /* ================================================================
899 * Tiled depth buffer management
901 * FIXME: These should all set the destination write mask for when we
902 * have hardware stencil support.
905 static int r128_cce_dispatch_write_span( drm_device_t *dev,
906 drm_r128_depth_t *depth )
908 drm_r128_private_t *dev_priv = dev->dev_private;
909 int count, x, y;
910 u32 *buffer;
911 u8 *mask;
912 int i, buffer_size, mask_size;
913 RING_LOCALS;
914 DRM_DEBUG( "\n" );
916 count = depth->n;
917 if (count > 4096 || count <= 0)
918 return DRM_ERR(EMSGSIZE);
920 if ( DRM_COPY_FROM_USER( &x, depth->x, sizeof(x) ) ) {
921 return DRM_ERR(EFAULT);
923 if ( DRM_COPY_FROM_USER( &y, depth->y, sizeof(y) ) ) {
924 return DRM_ERR(EFAULT);
927 buffer_size = depth->n * sizeof(u32);
928 buffer = drm_alloc( buffer_size, DRM_MEM_BUFS );
929 if ( buffer == NULL )
930 return DRM_ERR(ENOMEM);
931 if ( DRM_COPY_FROM_USER( buffer, depth->buffer, buffer_size ) ) {
932 drm_free( buffer, buffer_size, DRM_MEM_BUFS);
933 return DRM_ERR(EFAULT);
936 mask_size = depth->n * sizeof(u8);
937 if ( depth->mask ) {
938 mask = drm_alloc( mask_size, DRM_MEM_BUFS );
939 if ( mask == NULL ) {
940 drm_free( buffer, buffer_size, DRM_MEM_BUFS );
941 return DRM_ERR(ENOMEM);
943 if ( DRM_COPY_FROM_USER( mask, depth->mask, mask_size ) ) {
944 drm_free( buffer, buffer_size, DRM_MEM_BUFS );
945 drm_free( mask, mask_size, DRM_MEM_BUFS );
946 return DRM_ERR(EFAULT);
949 for ( i = 0 ; i < count ; i++, x++ ) {
950 if ( mask[i] ) {
951 BEGIN_RING( 6 );
953 OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) );
954 OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL |
955 R128_GMC_BRUSH_SOLID_COLOR |
956 (dev_priv->depth_fmt << 8) |
957 R128_GMC_SRC_DATATYPE_COLOR |
958 R128_ROP3_P |
959 R128_GMC_CLR_CMP_CNTL_DIS |
960 R128_GMC_WR_MSK_DIS );
962 OUT_RING( dev_priv->depth_pitch_offset_c );
963 OUT_RING( buffer[i] );
965 OUT_RING( (x << 16) | y );
966 OUT_RING( (1 << 16) | 1 );
968 ADVANCE_RING();
972 drm_free( mask, mask_size, DRM_MEM_BUFS );
973 } else {
974 for ( i = 0 ; i < count ; i++, x++ ) {
975 BEGIN_RING( 6 );
977 OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) );
978 OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL |
979 R128_GMC_BRUSH_SOLID_COLOR |
980 (dev_priv->depth_fmt << 8) |
981 R128_GMC_SRC_DATATYPE_COLOR |
982 R128_ROP3_P |
983 R128_GMC_CLR_CMP_CNTL_DIS |
984 R128_GMC_WR_MSK_DIS );
986 OUT_RING( dev_priv->depth_pitch_offset_c );
987 OUT_RING( buffer[i] );
989 OUT_RING( (x << 16) | y );
990 OUT_RING( (1 << 16) | 1 );
992 ADVANCE_RING();
996 drm_free( buffer, buffer_size, DRM_MEM_BUFS );
998 return 0;
1001 static int r128_cce_dispatch_write_pixels( drm_device_t *dev,
1002 drm_r128_depth_t *depth )
1004 drm_r128_private_t *dev_priv = dev->dev_private;
1005 int count, *x, *y;
1006 u32 *buffer;
1007 u8 *mask;
1008 int i, xbuf_size, ybuf_size, buffer_size, mask_size;
1009 RING_LOCALS;
1010 DRM_DEBUG( "\n" );
1012 count = depth->n;
1013 if (count > 4096 || count <= 0)
1014 return DRM_ERR(EMSGSIZE);
1016 xbuf_size = count * sizeof(*x);
1017 ybuf_size = count * sizeof(*y);
1018 x = drm_alloc( xbuf_size, DRM_MEM_BUFS );
1019 if ( x == NULL ) {
1020 return DRM_ERR(ENOMEM);
1022 y = drm_alloc( ybuf_size, DRM_MEM_BUFS );
1023 if ( y == NULL ) {
1024 drm_free( x, xbuf_size, DRM_MEM_BUFS );
1025 return DRM_ERR(ENOMEM);
1027 if ( DRM_COPY_FROM_USER( x, depth->x, xbuf_size ) ) {
1028 drm_free( x, xbuf_size, DRM_MEM_BUFS );
1029 drm_free( y, ybuf_size, DRM_MEM_BUFS );
1030 return DRM_ERR(EFAULT);
1032 if ( DRM_COPY_FROM_USER( y, depth->y, xbuf_size ) ) {
1033 drm_free( x, xbuf_size, DRM_MEM_BUFS );
1034 drm_free( y, ybuf_size, DRM_MEM_BUFS );
1035 return DRM_ERR(EFAULT);
1038 buffer_size = depth->n * sizeof(u32);
1039 buffer = drm_alloc( buffer_size, DRM_MEM_BUFS );
1040 if ( buffer == NULL ) {
1041 drm_free( x, xbuf_size, DRM_MEM_BUFS );
1042 drm_free( y, ybuf_size, DRM_MEM_BUFS );
1043 return DRM_ERR(ENOMEM);
1045 if ( DRM_COPY_FROM_USER( buffer, depth->buffer, buffer_size ) ) {
1046 drm_free( x, xbuf_size, DRM_MEM_BUFS );
1047 drm_free( y, ybuf_size, DRM_MEM_BUFS );
1048 drm_free( buffer, buffer_size, DRM_MEM_BUFS );
1049 return DRM_ERR(EFAULT);
1052 if ( depth->mask ) {
1053 mask_size = depth->n * sizeof(u8);
1054 mask = drm_alloc( mask_size, DRM_MEM_BUFS );
1055 if ( mask == NULL ) {
1056 drm_free( x, xbuf_size, DRM_MEM_BUFS );
1057 drm_free( y, ybuf_size, DRM_MEM_BUFS );
1058 drm_free( buffer, buffer_size, DRM_MEM_BUFS );
1059 return DRM_ERR(ENOMEM);
1061 if ( DRM_COPY_FROM_USER( mask, depth->mask, mask_size ) ) {
1062 drm_free( x, xbuf_size, DRM_MEM_BUFS );
1063 drm_free( y, ybuf_size, DRM_MEM_BUFS );
1064 drm_free( buffer, buffer_size, DRM_MEM_BUFS );
1065 drm_free( mask, mask_size, DRM_MEM_BUFS );
1066 return DRM_ERR(EFAULT);
1069 for ( i = 0 ; i < count ; i++ ) {
1070 if ( mask[i] ) {
1071 BEGIN_RING( 6 );
1073 OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) );
1074 OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL |
1075 R128_GMC_BRUSH_SOLID_COLOR |
1076 (dev_priv->depth_fmt << 8) |
1077 R128_GMC_SRC_DATATYPE_COLOR |
1078 R128_ROP3_P |
1079 R128_GMC_CLR_CMP_CNTL_DIS |
1080 R128_GMC_WR_MSK_DIS );
1082 OUT_RING( dev_priv->depth_pitch_offset_c );
1083 OUT_RING( buffer[i] );
1085 OUT_RING( (x[i] << 16) | y[i] );
1086 OUT_RING( (1 << 16) | 1 );
1088 ADVANCE_RING();
1092 drm_free( mask, mask_size, DRM_MEM_BUFS );
1093 } else {
1094 for ( i = 0 ; i < count ; i++ ) {
1095 BEGIN_RING( 6 );
1097 OUT_RING( CCE_PACKET3( R128_CNTL_PAINT_MULTI, 4 ) );
1098 OUT_RING( R128_GMC_DST_PITCH_OFFSET_CNTL |
1099 R128_GMC_BRUSH_SOLID_COLOR |
1100 (dev_priv->depth_fmt << 8) |
1101 R128_GMC_SRC_DATATYPE_COLOR |
1102 R128_ROP3_P |
1103 R128_GMC_CLR_CMP_CNTL_DIS |
1104 R128_GMC_WR_MSK_DIS );
1106 OUT_RING( dev_priv->depth_pitch_offset_c );
1107 OUT_RING( buffer[i] );
1109 OUT_RING( (x[i] << 16) | y[i] );
1110 OUT_RING( (1 << 16) | 1 );
1112 ADVANCE_RING();
1116 drm_free( x, xbuf_size, DRM_MEM_BUFS );
1117 drm_free( y, ybuf_size, DRM_MEM_BUFS );
1118 drm_free( buffer, buffer_size, DRM_MEM_BUFS );
1120 return 0;
1123 static int r128_cce_dispatch_read_span( drm_device_t *dev,
1124 drm_r128_depth_t *depth )
1126 drm_r128_private_t *dev_priv = dev->dev_private;
1127 int count, x, y;
1128 RING_LOCALS;
1129 DRM_DEBUG( "\n" );
1131 count = depth->n;
1132 if (count > 4096 || count <= 0)
1133 return DRM_ERR(EMSGSIZE);
1135 if ( DRM_COPY_FROM_USER( &x, depth->x, sizeof(x) ) ) {
1136 return DRM_ERR(EFAULT);
1138 if ( DRM_COPY_FROM_USER( &y, depth->y, sizeof(y) ) ) {
1139 return DRM_ERR(EFAULT);
1142 BEGIN_RING( 7 );
1144 OUT_RING( CCE_PACKET3( R128_CNTL_BITBLT_MULTI, 5 ) );
1145 OUT_RING( R128_GMC_SRC_PITCH_OFFSET_CNTL |
1146 R128_GMC_DST_PITCH_OFFSET_CNTL |
1147 R128_GMC_BRUSH_NONE |
1148 (dev_priv->depth_fmt << 8) |
1149 R128_GMC_SRC_DATATYPE_COLOR |
1150 R128_ROP3_S |
1151 R128_DP_SRC_SOURCE_MEMORY |
1152 R128_GMC_CLR_CMP_CNTL_DIS |
1153 R128_GMC_WR_MSK_DIS );
1155 OUT_RING( dev_priv->depth_pitch_offset_c );
1156 OUT_RING( dev_priv->span_pitch_offset_c );
1158 OUT_RING( (x << 16) | y );
1159 OUT_RING( (0 << 16) | 0 );
1160 OUT_RING( (count << 16) | 1 );
1162 ADVANCE_RING();
1164 return 0;
1167 static int r128_cce_dispatch_read_pixels( drm_device_t *dev,
1168 drm_r128_depth_t *depth )
1170 drm_r128_private_t *dev_priv = dev->dev_private;
1171 int count, *x, *y;
1172 int i, xbuf_size, ybuf_size;
1173 RING_LOCALS;
1174 DRM_DEBUG( "%s\n", __FUNCTION__ );
1176 count = depth->n;
1177 if (count > 4096 || count <= 0)
1178 return DRM_ERR(EMSGSIZE);
1180 if ( count > dev_priv->depth_pitch ) {
1181 count = dev_priv->depth_pitch;
1184 xbuf_size = count * sizeof(*x);
1185 ybuf_size = count * sizeof(*y);
1186 x = drm_alloc( xbuf_size, DRM_MEM_BUFS );
1187 if ( x == NULL ) {
1188 return DRM_ERR(ENOMEM);
1190 y = drm_alloc( ybuf_size, DRM_MEM_BUFS );
1191 if ( y == NULL ) {
1192 drm_free( x, xbuf_size, DRM_MEM_BUFS );
1193 return DRM_ERR(ENOMEM);
1195 if ( DRM_COPY_FROM_USER( x, depth->x, xbuf_size ) ) {
1196 drm_free( x, xbuf_size, DRM_MEM_BUFS );
1197 drm_free( y, ybuf_size, DRM_MEM_BUFS );
1198 return DRM_ERR(EFAULT);
1200 if ( DRM_COPY_FROM_USER( y, depth->y, ybuf_size ) ) {
1201 drm_free( x, xbuf_size, DRM_MEM_BUFS );
1202 drm_free( y, ybuf_size, DRM_MEM_BUFS );
1203 return DRM_ERR(EFAULT);
1206 for ( i = 0 ; i < count ; i++ ) {
1207 BEGIN_RING( 7 );
1209 OUT_RING( CCE_PACKET3( R128_CNTL_BITBLT_MULTI, 5 ) );
1210 OUT_RING( R128_GMC_SRC_PITCH_OFFSET_CNTL |
1211 R128_GMC_DST_PITCH_OFFSET_CNTL |
1212 R128_GMC_BRUSH_NONE |
1213 (dev_priv->depth_fmt << 8) |
1214 R128_GMC_SRC_DATATYPE_COLOR |
1215 R128_ROP3_S |
1216 R128_DP_SRC_SOURCE_MEMORY |
1217 R128_GMC_CLR_CMP_CNTL_DIS |
1218 R128_GMC_WR_MSK_DIS );
1220 OUT_RING( dev_priv->depth_pitch_offset_c );
1221 OUT_RING( dev_priv->span_pitch_offset_c );
1223 OUT_RING( (x[i] << 16) | y[i] );
1224 OUT_RING( (i << 16) | 0 );
1225 OUT_RING( (1 << 16) | 1 );
1227 ADVANCE_RING();
1230 drm_free( x, xbuf_size, DRM_MEM_BUFS );
1231 drm_free( y, ybuf_size, DRM_MEM_BUFS );
1233 return 0;
1237 /* ================================================================
1238 * Polygon stipple
1241 static void r128_cce_dispatch_stipple( drm_device_t *dev, u32 *stipple )
1243 drm_r128_private_t *dev_priv = dev->dev_private;
1244 int i;
1245 RING_LOCALS;
1246 DRM_DEBUG( "%s\n", __FUNCTION__ );
1248 BEGIN_RING( 33 );
1250 OUT_RING( CCE_PACKET0( R128_BRUSH_DATA0, 31 ) );
1251 for ( i = 0 ; i < 32 ; i++ ) {
1252 OUT_RING( stipple[i] );
1255 ADVANCE_RING();
1259 /* ================================================================
1260 * IOCTL functions
1263 static int r128_cce_clear( DRM_IOCTL_ARGS )
1265 DRM_DEVICE;
1266 drm_r128_private_t *dev_priv = dev->dev_private;
1267 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
1268 drm_r128_clear_t clear;
1269 DRM_DEBUG( "\n" );
1271 LOCK_TEST_WITH_RETURN( dev, filp );
1273 DRM_COPY_FROM_USER_IOCTL( clear, (drm_r128_clear_t __user *) data,
1274 sizeof(clear) );
1276 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1278 if ( sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS )
1279 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1281 r128_cce_dispatch_clear( dev, &clear );
1282 COMMIT_RING();
1284 /* Make sure we restore the 3D state next time.
1286 dev_priv->sarea_priv->dirty |= R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS;
1288 return 0;
1291 static int r128_do_init_pageflip( drm_device_t *dev )
1293 drm_r128_private_t *dev_priv = dev->dev_private;
1294 DRM_DEBUG( "\n" );
1296 dev_priv->crtc_offset = R128_READ( R128_CRTC_OFFSET );
1297 dev_priv->crtc_offset_cntl = R128_READ( R128_CRTC_OFFSET_CNTL );
1299 R128_WRITE( R128_CRTC_OFFSET, dev_priv->front_offset );
1300 R128_WRITE( R128_CRTC_OFFSET_CNTL,
1301 dev_priv->crtc_offset_cntl | R128_CRTC_OFFSET_FLIP_CNTL );
1303 dev_priv->page_flipping = 1;
1304 dev_priv->current_page = 0;
1305 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
1307 return 0;
1310 int r128_do_cleanup_pageflip( drm_device_t *dev )
1312 drm_r128_private_t *dev_priv = dev->dev_private;
1313 DRM_DEBUG( "\n" );
1315 R128_WRITE( R128_CRTC_OFFSET, dev_priv->crtc_offset );
1316 R128_WRITE( R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl );
1318 if (dev_priv->current_page != 0) {
1319 r128_cce_dispatch_flip( dev );
1320 COMMIT_RING();
1323 dev_priv->page_flipping = 0;
1324 return 0;
1327 /* Swapping and flipping are different operations, need different ioctls.
1328 * They can & should be intermixed to support multiple 3d windows.
1331 static int r128_cce_flip( DRM_IOCTL_ARGS )
1333 DRM_DEVICE;
1334 drm_r128_private_t *dev_priv = dev->dev_private;
1335 DRM_DEBUG( "%s\n", __FUNCTION__ );
1337 LOCK_TEST_WITH_RETURN( dev, filp );
1339 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1341 if (!dev_priv->page_flipping)
1342 r128_do_init_pageflip( dev );
1344 r128_cce_dispatch_flip( dev );
1346 COMMIT_RING();
1347 return 0;
1350 static int r128_cce_swap( DRM_IOCTL_ARGS )
1352 DRM_DEVICE;
1353 drm_r128_private_t *dev_priv = dev->dev_private;
1354 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
1355 DRM_DEBUG( "%s\n", __FUNCTION__ );
1357 LOCK_TEST_WITH_RETURN( dev, filp );
1359 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1361 if ( sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS )
1362 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1364 r128_cce_dispatch_swap( dev );
1365 dev_priv->sarea_priv->dirty |= (R128_UPLOAD_CONTEXT |
1366 R128_UPLOAD_MASKS);
1368 COMMIT_RING();
1369 return 0;
1372 static int r128_cce_vertex( DRM_IOCTL_ARGS )
1374 DRM_DEVICE;
1375 drm_r128_private_t *dev_priv = dev->dev_private;
1376 drm_device_dma_t *dma = dev->dma;
1377 drm_buf_t *buf;
1378 drm_r128_buf_priv_t *buf_priv;
1379 drm_r128_vertex_t vertex;
1381 LOCK_TEST_WITH_RETURN( dev, filp );
1383 if ( !dev_priv ) {
1384 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1385 return DRM_ERR(EINVAL);
1388 DRM_COPY_FROM_USER_IOCTL( vertex, (drm_r128_vertex_t __user *) data,
1389 sizeof(vertex) );
1391 DRM_DEBUG( "pid=%d index=%d count=%d discard=%d\n",
1392 DRM_CURRENTPID,
1393 vertex.idx, vertex.count, vertex.discard );
1395 if ( vertex.idx < 0 || vertex.idx >= dma->buf_count ) {
1396 DRM_ERROR( "buffer index %d (of %d max)\n",
1397 vertex.idx, dma->buf_count - 1 );
1398 return DRM_ERR(EINVAL);
1400 if ( vertex.prim < 0 ||
1401 vertex.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2 ) {
1402 DRM_ERROR( "buffer prim %d\n", vertex.prim );
1403 return DRM_ERR(EINVAL);
1406 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1407 VB_AGE_TEST_WITH_RETURN( dev_priv );
1409 buf = dma->buflist[vertex.idx];
1410 buf_priv = buf->dev_private;
1412 if ( buf->filp != filp ) {
1413 DRM_ERROR( "process %d using buffer owned by %p\n",
1414 DRM_CURRENTPID, buf->filp );
1415 return DRM_ERR(EINVAL);
1417 if ( buf->pending ) {
1418 DRM_ERROR( "sending pending buffer %d\n", vertex.idx );
1419 return DRM_ERR(EINVAL);
1422 buf->used = vertex.count;
1423 buf_priv->prim = vertex.prim;
1424 buf_priv->discard = vertex.discard;
1426 r128_cce_dispatch_vertex( dev, buf );
1428 COMMIT_RING();
1429 return 0;
1432 static int r128_cce_indices( DRM_IOCTL_ARGS )
1434 DRM_DEVICE;
1435 drm_r128_private_t *dev_priv = dev->dev_private;
1436 drm_device_dma_t *dma = dev->dma;
1437 drm_buf_t *buf;
1438 drm_r128_buf_priv_t *buf_priv;
1439 drm_r128_indices_t elts;
1440 int count;
1442 LOCK_TEST_WITH_RETURN( dev, filp );
1444 if ( !dev_priv ) {
1445 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1446 return DRM_ERR(EINVAL);
1449 DRM_COPY_FROM_USER_IOCTL( elts, (drm_r128_indices_t __user *) data,
1450 sizeof(elts) );
1452 DRM_DEBUG( "pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID,
1453 elts.idx, elts.start, elts.end, elts.discard );
1455 if ( elts.idx < 0 || elts.idx >= dma->buf_count ) {
1456 DRM_ERROR( "buffer index %d (of %d max)\n",
1457 elts.idx, dma->buf_count - 1 );
1458 return DRM_ERR(EINVAL);
1460 if ( elts.prim < 0 ||
1461 elts.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2 ) {
1462 DRM_ERROR( "buffer prim %d\n", elts.prim );
1463 return DRM_ERR(EINVAL);
1466 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1467 VB_AGE_TEST_WITH_RETURN( dev_priv );
1469 buf = dma->buflist[elts.idx];
1470 buf_priv = buf->dev_private;
1472 if ( buf->filp != filp ) {
1473 DRM_ERROR( "process %d using buffer owned by %p\n",
1474 DRM_CURRENTPID, buf->filp );
1475 return DRM_ERR(EINVAL);
1477 if ( buf->pending ) {
1478 DRM_ERROR( "sending pending buffer %d\n", elts.idx );
1479 return DRM_ERR(EINVAL);
1482 count = (elts.end - elts.start) / sizeof(u16);
1483 elts.start -= R128_INDEX_PRIM_OFFSET;
1485 if ( elts.start & 0x7 ) {
1486 DRM_ERROR( "misaligned buffer 0x%x\n", elts.start );
1487 return DRM_ERR(EINVAL);
1489 if ( elts.start < buf->used ) {
1490 DRM_ERROR( "no header 0x%x - 0x%x\n", elts.start, buf->used );
1491 return DRM_ERR(EINVAL);
1494 buf->used = elts.end;
1495 buf_priv->prim = elts.prim;
1496 buf_priv->discard = elts.discard;
1498 r128_cce_dispatch_indices( dev, buf, elts.start, elts.end, count );
1500 COMMIT_RING();
1501 return 0;
1504 static int r128_cce_blit( DRM_IOCTL_ARGS )
1506 DRM_DEVICE;
1507 drm_device_dma_t *dma = dev->dma;
1508 drm_r128_private_t *dev_priv = dev->dev_private;
1509 drm_r128_blit_t blit;
1510 int ret;
1512 LOCK_TEST_WITH_RETURN( dev, filp );
1514 DRM_COPY_FROM_USER_IOCTL( blit, (drm_r128_blit_t __user *) data,
1515 sizeof(blit) );
1517 DRM_DEBUG( "pid=%d index=%d\n", DRM_CURRENTPID, blit.idx );
1519 if ( blit.idx < 0 || blit.idx >= dma->buf_count ) {
1520 DRM_ERROR( "buffer index %d (of %d max)\n",
1521 blit.idx, dma->buf_count - 1 );
1522 return DRM_ERR(EINVAL);
1525 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1526 VB_AGE_TEST_WITH_RETURN( dev_priv );
1528 ret = r128_cce_dispatch_blit( filp, dev, &blit );
1530 COMMIT_RING();
1531 return ret;
1534 static int r128_cce_depth( DRM_IOCTL_ARGS )
1536 DRM_DEVICE;
1537 drm_r128_private_t *dev_priv = dev->dev_private;
1538 drm_r128_depth_t depth;
1539 int ret;
1541 LOCK_TEST_WITH_RETURN( dev, filp );
1543 DRM_COPY_FROM_USER_IOCTL( depth, (drm_r128_depth_t __user *) data,
1544 sizeof(depth) );
1546 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1548 ret = DRM_ERR(EINVAL);
1549 switch ( depth.func ) {
1550 case R128_WRITE_SPAN:
1551 ret = r128_cce_dispatch_write_span( dev, &depth );
1552 case R128_WRITE_PIXELS:
1553 ret = r128_cce_dispatch_write_pixels( dev, &depth );
1554 case R128_READ_SPAN:
1555 ret = r128_cce_dispatch_read_span( dev, &depth );
1556 case R128_READ_PIXELS:
1557 ret = r128_cce_dispatch_read_pixels( dev, &depth );
1560 COMMIT_RING();
1561 return ret;
1564 static int r128_cce_stipple( DRM_IOCTL_ARGS )
1566 DRM_DEVICE;
1567 drm_r128_private_t *dev_priv = dev->dev_private;
1568 drm_r128_stipple_t stipple;
1569 u32 mask[32];
1571 LOCK_TEST_WITH_RETURN( dev, filp );
1573 DRM_COPY_FROM_USER_IOCTL( stipple, (drm_r128_stipple_t __user *) data,
1574 sizeof(stipple) );
1576 if ( DRM_COPY_FROM_USER( &mask, stipple.mask,
1577 32 * sizeof(u32) ) )
1578 return DRM_ERR( EFAULT );
1580 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1582 r128_cce_dispatch_stipple( dev, mask );
1584 COMMIT_RING();
1585 return 0;
1588 static int r128_cce_indirect( DRM_IOCTL_ARGS )
1590 DRM_DEVICE;
1591 drm_r128_private_t *dev_priv = dev->dev_private;
1592 drm_device_dma_t *dma = dev->dma;
1593 drm_buf_t *buf;
1594 drm_r128_buf_priv_t *buf_priv;
1595 drm_r128_indirect_t indirect;
1596 #if 0
1597 RING_LOCALS;
1598 #endif
1600 LOCK_TEST_WITH_RETURN( dev, filp );
1602 if ( !dev_priv ) {
1603 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1604 return DRM_ERR(EINVAL);
1607 DRM_COPY_FROM_USER_IOCTL( indirect, (drm_r128_indirect_t __user *) data,
1608 sizeof(indirect) );
1610 DRM_DEBUG( "indirect: idx=%d s=%d e=%d d=%d\n",
1611 indirect.idx, indirect.start,
1612 indirect.end, indirect.discard );
1614 if ( indirect.idx < 0 || indirect.idx >= dma->buf_count ) {
1615 DRM_ERROR( "buffer index %d (of %d max)\n",
1616 indirect.idx, dma->buf_count - 1 );
1617 return DRM_ERR(EINVAL);
1620 buf = dma->buflist[indirect.idx];
1621 buf_priv = buf->dev_private;
1623 if ( buf->filp != filp ) {
1624 DRM_ERROR( "process %d using buffer owned by %p\n",
1625 DRM_CURRENTPID, buf->filp );
1626 return DRM_ERR(EINVAL);
1628 if ( buf->pending ) {
1629 DRM_ERROR( "sending pending buffer %d\n", indirect.idx );
1630 return DRM_ERR(EINVAL);
1633 if ( indirect.start < buf->used ) {
1634 DRM_ERROR( "reusing indirect: start=0x%x actual=0x%x\n",
1635 indirect.start, buf->used );
1636 return DRM_ERR(EINVAL);
1639 RING_SPACE_TEST_WITH_RETURN( dev_priv );
1640 VB_AGE_TEST_WITH_RETURN( dev_priv );
1642 buf->used = indirect.end;
1643 buf_priv->discard = indirect.discard;
1645 #if 0
1646 /* Wait for the 3D stream to idle before the indirect buffer
1647 * containing 2D acceleration commands is processed.
1649 BEGIN_RING( 2 );
1650 RADEON_WAIT_UNTIL_3D_IDLE();
1651 ADVANCE_RING();
1652 #endif
1654 /* Dispatch the indirect buffer full of commands from the
1655 * X server. This is insecure and is thus only available to
1656 * privileged clients.
1658 r128_cce_dispatch_indirect( dev, buf, indirect.start, indirect.end );
1660 COMMIT_RING();
1661 return 0;
1664 static int r128_getparam( DRM_IOCTL_ARGS )
1666 DRM_DEVICE;
1667 drm_r128_private_t *dev_priv = dev->dev_private;
1668 drm_r128_getparam_t param;
1669 int value;
1671 if ( !dev_priv ) {
1672 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1673 return DRM_ERR(EINVAL);
1676 DRM_COPY_FROM_USER_IOCTL( param, (drm_r128_getparam_t __user *)data,
1677 sizeof(param) );
1679 DRM_DEBUG( "pid=%d\n", DRM_CURRENTPID );
1681 switch( param.param ) {
1682 case R128_PARAM_IRQ_NR:
1683 value = dev->irq;
1684 break;
1685 default:
1686 return DRM_ERR(EINVAL);
1689 if ( DRM_COPY_TO_USER( param.value, &value, sizeof(int) ) ) {
1690 DRM_ERROR( "copy_to_user\n" );
1691 return DRM_ERR(EFAULT);
1694 return 0;
1697 void r128_driver_prerelease(drm_device_t *dev, DRMFILE filp)
1699 if ( dev->dev_private ) {
1700 drm_r128_private_t *dev_priv = dev->dev_private;
1701 if ( dev_priv->page_flipping ) {
1702 r128_do_cleanup_pageflip( dev );
1707 void r128_driver_pretakedown(drm_device_t *dev)
1709 r128_do_cleanup_cce( dev );
1712 drm_ioctl_desc_t r128_ioctls[] = {
1713 [DRM_IOCTL_NR(DRM_R128_INIT)] = { r128_cce_init, 1, 1 },
1714 [DRM_IOCTL_NR(DRM_R128_CCE_START)] = { r128_cce_start, 1, 1 },
1715 [DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = { r128_cce_stop, 1, 1 },
1716 [DRM_IOCTL_NR(DRM_R128_CCE_RESET)] = { r128_cce_reset, 1, 1 },
1717 [DRM_IOCTL_NR(DRM_R128_CCE_IDLE)] = { r128_cce_idle, 1, 0 },
1718 [DRM_IOCTL_NR(DRM_R128_RESET)] = { r128_engine_reset, 1, 0 },
1719 [DRM_IOCTL_NR(DRM_R128_FULLSCREEN)] = { r128_fullscreen, 1, 0 },
1720 [DRM_IOCTL_NR(DRM_R128_SWAP)] = { r128_cce_swap, 1, 0 },
1721 [DRM_IOCTL_NR(DRM_R128_FLIP)] = { r128_cce_flip, 1, 0 },
1722 [DRM_IOCTL_NR(DRM_R128_CLEAR)] = { r128_cce_clear, 1, 0 },
1723 [DRM_IOCTL_NR(DRM_R128_VERTEX)] = { r128_cce_vertex, 1, 0 },
1724 [DRM_IOCTL_NR(DRM_R128_INDICES)] = { r128_cce_indices, 1, 0 },
1725 [DRM_IOCTL_NR(DRM_R128_BLIT)] = { r128_cce_blit, 1, 0 },
1726 [DRM_IOCTL_NR(DRM_R128_DEPTH)] = { r128_cce_depth, 1, 0 },
1727 [DRM_IOCTL_NR(DRM_R128_STIPPLE)] = { r128_cce_stipple, 1, 0 },
1728 [DRM_IOCTL_NR(DRM_R128_INDIRECT)] = { r128_cce_indirect, 1, 1 },
1729 [DRM_IOCTL_NR(DRM_R128_GETPARAM)] = { r128_getparam, 1, 0 },
1732 int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);