[PATCH] Have ia64 use add_active_range() and free_area_init_nodes
[linux-2.6/kvm.git] / drivers / char / drm / r128_state.c
bloba080cdd6081ed639baa377107982e3246dc9a7c9
1 /* r128_state.c -- State support for r128 -*- linux-c -*-
2 * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com
3 */
4 /* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
5 * All Rights Reserved.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
26 * Authors:
27 * Gareth Hughes <gareth@valinux.com>
30 #include "drmP.h"
31 #include "drm.h"
32 #include "r128_drm.h"
33 #include "r128_drv.h"
35 /* ================================================================
36 * CCE hardware state programming functions
39 static void r128_emit_clip_rects(drm_r128_private_t * dev_priv,
40 drm_clip_rect_t * boxes, int count)
42 u32 aux_sc_cntl = 0x00000000;
43 RING_LOCALS;
44 DRM_DEBUG(" %s\n", __FUNCTION__);
46 BEGIN_RING((count < 3 ? count : 3) * 5 + 2);
48 if (count >= 1) {
49 OUT_RING(CCE_PACKET0(R128_AUX1_SC_LEFT, 3));
50 OUT_RING(boxes[0].x1);
51 OUT_RING(boxes[0].x2 - 1);
52 OUT_RING(boxes[0].y1);
53 OUT_RING(boxes[0].y2 - 1);
55 aux_sc_cntl |= (R128_AUX1_SC_EN | R128_AUX1_SC_MODE_OR);
57 if (count >= 2) {
58 OUT_RING(CCE_PACKET0(R128_AUX2_SC_LEFT, 3));
59 OUT_RING(boxes[1].x1);
60 OUT_RING(boxes[1].x2 - 1);
61 OUT_RING(boxes[1].y1);
62 OUT_RING(boxes[1].y2 - 1);
64 aux_sc_cntl |= (R128_AUX2_SC_EN | R128_AUX2_SC_MODE_OR);
66 if (count >= 3) {
67 OUT_RING(CCE_PACKET0(R128_AUX3_SC_LEFT, 3));
68 OUT_RING(boxes[2].x1);
69 OUT_RING(boxes[2].x2 - 1);
70 OUT_RING(boxes[2].y1);
71 OUT_RING(boxes[2].y2 - 1);
73 aux_sc_cntl |= (R128_AUX3_SC_EN | R128_AUX3_SC_MODE_OR);
76 OUT_RING(CCE_PACKET0(R128_AUX_SC_CNTL, 0));
77 OUT_RING(aux_sc_cntl);
79 ADVANCE_RING();
82 static __inline__ void r128_emit_core(drm_r128_private_t * dev_priv)
84 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
85 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
86 RING_LOCALS;
87 DRM_DEBUG(" %s\n", __FUNCTION__);
89 BEGIN_RING(2);
91 OUT_RING(CCE_PACKET0(R128_SCALE_3D_CNTL, 0));
92 OUT_RING(ctx->scale_3d_cntl);
94 ADVANCE_RING();
97 static __inline__ void r128_emit_context(drm_r128_private_t * dev_priv)
99 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
100 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
101 RING_LOCALS;
102 DRM_DEBUG(" %s\n", __FUNCTION__);
104 BEGIN_RING(13);
106 OUT_RING(CCE_PACKET0(R128_DST_PITCH_OFFSET_C, 11));
107 OUT_RING(ctx->dst_pitch_offset_c);
108 OUT_RING(ctx->dp_gui_master_cntl_c);
109 OUT_RING(ctx->sc_top_left_c);
110 OUT_RING(ctx->sc_bottom_right_c);
111 OUT_RING(ctx->z_offset_c);
112 OUT_RING(ctx->z_pitch_c);
113 OUT_RING(ctx->z_sten_cntl_c);
114 OUT_RING(ctx->tex_cntl_c);
115 OUT_RING(ctx->misc_3d_state_cntl_reg);
116 OUT_RING(ctx->texture_clr_cmp_clr_c);
117 OUT_RING(ctx->texture_clr_cmp_msk_c);
118 OUT_RING(ctx->fog_color_c);
120 ADVANCE_RING();
123 static __inline__ void r128_emit_setup(drm_r128_private_t * dev_priv)
125 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
126 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
127 RING_LOCALS;
128 DRM_DEBUG(" %s\n", __FUNCTION__);
130 BEGIN_RING(3);
132 OUT_RING(CCE_PACKET1(R128_SETUP_CNTL, R128_PM4_VC_FPU_SETUP));
133 OUT_RING(ctx->setup_cntl);
134 OUT_RING(ctx->pm4_vc_fpu_setup);
136 ADVANCE_RING();
139 static __inline__ void r128_emit_masks(drm_r128_private_t * dev_priv)
141 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
142 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
143 RING_LOCALS;
144 DRM_DEBUG(" %s\n", __FUNCTION__);
146 BEGIN_RING(5);
148 OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
149 OUT_RING(ctx->dp_write_mask);
151 OUT_RING(CCE_PACKET0(R128_STEN_REF_MASK_C, 1));
152 OUT_RING(ctx->sten_ref_mask_c);
153 OUT_RING(ctx->plane_3d_mask_c);
155 ADVANCE_RING();
158 static __inline__ void r128_emit_window(drm_r128_private_t * dev_priv)
160 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
161 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
162 RING_LOCALS;
163 DRM_DEBUG(" %s\n", __FUNCTION__);
165 BEGIN_RING(2);
167 OUT_RING(CCE_PACKET0(R128_WINDOW_XY_OFFSET, 0));
168 OUT_RING(ctx->window_xy_offset);
170 ADVANCE_RING();
173 static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv)
175 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
176 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
177 drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0];
178 int i;
179 RING_LOCALS;
180 DRM_DEBUG(" %s\n", __FUNCTION__);
182 BEGIN_RING(7 + R128_MAX_TEXTURE_LEVELS);
184 OUT_RING(CCE_PACKET0(R128_PRIM_TEX_CNTL_C,
185 2 + R128_MAX_TEXTURE_LEVELS));
186 OUT_RING(tex->tex_cntl);
187 OUT_RING(tex->tex_combine_cntl);
188 OUT_RING(ctx->tex_size_pitch_c);
189 for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) {
190 OUT_RING(tex->tex_offset[i]);
193 OUT_RING(CCE_PACKET0(R128_CONSTANT_COLOR_C, 1));
194 OUT_RING(ctx->constant_color_c);
195 OUT_RING(tex->tex_border_color);
197 ADVANCE_RING();
200 static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv)
202 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
203 drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
204 int i;
205 RING_LOCALS;
206 DRM_DEBUG(" %s\n", __FUNCTION__);
208 BEGIN_RING(5 + R128_MAX_TEXTURE_LEVELS);
210 OUT_RING(CCE_PACKET0(R128_SEC_TEX_CNTL_C, 1 + R128_MAX_TEXTURE_LEVELS));
211 OUT_RING(tex->tex_cntl);
212 OUT_RING(tex->tex_combine_cntl);
213 for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) {
214 OUT_RING(tex->tex_offset[i]);
217 OUT_RING(CCE_PACKET0(R128_SEC_TEXTURE_BORDER_COLOR_C, 0));
218 OUT_RING(tex->tex_border_color);
220 ADVANCE_RING();
223 static void r128_emit_state(drm_r128_private_t * dev_priv)
225 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
226 unsigned int dirty = sarea_priv->dirty;
228 DRM_DEBUG("%s: dirty=0x%08x\n", __FUNCTION__, dirty);
230 if (dirty & R128_UPLOAD_CORE) {
231 r128_emit_core(dev_priv);
232 sarea_priv->dirty &= ~R128_UPLOAD_CORE;
235 if (dirty & R128_UPLOAD_CONTEXT) {
236 r128_emit_context(dev_priv);
237 sarea_priv->dirty &= ~R128_UPLOAD_CONTEXT;
240 if (dirty & R128_UPLOAD_SETUP) {
241 r128_emit_setup(dev_priv);
242 sarea_priv->dirty &= ~R128_UPLOAD_SETUP;
245 if (dirty & R128_UPLOAD_MASKS) {
246 r128_emit_masks(dev_priv);
247 sarea_priv->dirty &= ~R128_UPLOAD_MASKS;
250 if (dirty & R128_UPLOAD_WINDOW) {
251 r128_emit_window(dev_priv);
252 sarea_priv->dirty &= ~R128_UPLOAD_WINDOW;
255 if (dirty & R128_UPLOAD_TEX0) {
256 r128_emit_tex0(dev_priv);
257 sarea_priv->dirty &= ~R128_UPLOAD_TEX0;
260 if (dirty & R128_UPLOAD_TEX1) {
261 r128_emit_tex1(dev_priv);
262 sarea_priv->dirty &= ~R128_UPLOAD_TEX1;
265 /* Turn off the texture cache flushing */
266 sarea_priv->context_state.tex_cntl_c &= ~R128_TEX_CACHE_FLUSH;
268 sarea_priv->dirty &= ~R128_REQUIRE_QUIESCENCE;
271 #if R128_PERFORMANCE_BOXES
272 /* ================================================================
273 * Performance monitoring functions
276 static void r128_clear_box(drm_r128_private_t * dev_priv,
277 int x, int y, int w, int h, int r, int g, int b)
279 u32 pitch, offset;
280 u32 fb_bpp, color;
281 RING_LOCALS;
283 switch (dev_priv->fb_bpp) {
284 case 16:
285 fb_bpp = R128_GMC_DST_16BPP;
286 color = (((r & 0xf8) << 8) |
287 ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
288 break;
289 case 24:
290 fb_bpp = R128_GMC_DST_24BPP;
291 color = ((r << 16) | (g << 8) | b);
292 break;
293 case 32:
294 fb_bpp = R128_GMC_DST_32BPP;
295 color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
296 break;
297 default:
298 return;
301 offset = dev_priv->back_offset;
302 pitch = dev_priv->back_pitch >> 3;
304 BEGIN_RING(6);
306 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
307 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
308 R128_GMC_BRUSH_SOLID_COLOR |
309 fb_bpp |
310 R128_GMC_SRC_DATATYPE_COLOR |
311 R128_ROP3_P |
312 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_AUX_CLIP_DIS);
314 OUT_RING((pitch << 21) | (offset >> 5));
315 OUT_RING(color);
317 OUT_RING((x << 16) | y);
318 OUT_RING((w << 16) | h);
320 ADVANCE_RING();
323 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
325 if (atomic_read(&dev_priv->idle_count) == 0) {
326 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
327 } else {
328 atomic_set(&dev_priv->idle_count, 0);
332 #endif
334 /* ================================================================
335 * CCE command dispatch functions
338 static void r128_print_dirty(const char *msg, unsigned int flags)
340 DRM_INFO("%s: (0x%x) %s%s%s%s%s%s%s%s%s\n",
341 msg,
342 flags,
343 (flags & R128_UPLOAD_CORE) ? "core, " : "",
344 (flags & R128_UPLOAD_CONTEXT) ? "context, " : "",
345 (flags & R128_UPLOAD_SETUP) ? "setup, " : "",
346 (flags & R128_UPLOAD_TEX0) ? "tex0, " : "",
347 (flags & R128_UPLOAD_TEX1) ? "tex1, " : "",
348 (flags & R128_UPLOAD_MASKS) ? "masks, " : "",
349 (flags & R128_UPLOAD_WINDOW) ? "window, " : "",
350 (flags & R128_UPLOAD_CLIPRECTS) ? "cliprects, " : "",
351 (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "");
354 static void r128_cce_dispatch_clear(drm_device_t * dev,
355 drm_r128_clear_t * clear)
357 drm_r128_private_t *dev_priv = dev->dev_private;
358 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
359 int nbox = sarea_priv->nbox;
360 drm_clip_rect_t *pbox = sarea_priv->boxes;
361 unsigned int flags = clear->flags;
362 int i;
363 RING_LOCALS;
364 DRM_DEBUG("%s\n", __FUNCTION__);
366 if (dev_priv->page_flipping && dev_priv->current_page == 1) {
367 unsigned int tmp = flags;
369 flags &= ~(R128_FRONT | R128_BACK);
370 if (tmp & R128_FRONT)
371 flags |= R128_BACK;
372 if (tmp & R128_BACK)
373 flags |= R128_FRONT;
376 for (i = 0; i < nbox; i++) {
377 int x = pbox[i].x1;
378 int y = pbox[i].y1;
379 int w = pbox[i].x2 - x;
380 int h = pbox[i].y2 - y;
382 DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n",
383 pbox[i].x1, pbox[i].y1, pbox[i].x2,
384 pbox[i].y2, flags);
386 if (flags & (R128_FRONT | R128_BACK)) {
387 BEGIN_RING(2);
389 OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
390 OUT_RING(clear->color_mask);
392 ADVANCE_RING();
395 if (flags & R128_FRONT) {
396 BEGIN_RING(6);
398 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
399 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
400 R128_GMC_BRUSH_SOLID_COLOR |
401 (dev_priv->color_fmt << 8) |
402 R128_GMC_SRC_DATATYPE_COLOR |
403 R128_ROP3_P |
404 R128_GMC_CLR_CMP_CNTL_DIS |
405 R128_GMC_AUX_CLIP_DIS);
407 OUT_RING(dev_priv->front_pitch_offset_c);
408 OUT_RING(clear->clear_color);
410 OUT_RING((x << 16) | y);
411 OUT_RING((w << 16) | h);
413 ADVANCE_RING();
416 if (flags & R128_BACK) {
417 BEGIN_RING(6);
419 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
420 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
421 R128_GMC_BRUSH_SOLID_COLOR |
422 (dev_priv->color_fmt << 8) |
423 R128_GMC_SRC_DATATYPE_COLOR |
424 R128_ROP3_P |
425 R128_GMC_CLR_CMP_CNTL_DIS |
426 R128_GMC_AUX_CLIP_DIS);
428 OUT_RING(dev_priv->back_pitch_offset_c);
429 OUT_RING(clear->clear_color);
431 OUT_RING((x << 16) | y);
432 OUT_RING((w << 16) | h);
434 ADVANCE_RING();
437 if (flags & R128_DEPTH) {
438 BEGIN_RING(6);
440 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
441 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
442 R128_GMC_BRUSH_SOLID_COLOR |
443 (dev_priv->depth_fmt << 8) |
444 R128_GMC_SRC_DATATYPE_COLOR |
445 R128_ROP3_P |
446 R128_GMC_CLR_CMP_CNTL_DIS |
447 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
449 OUT_RING(dev_priv->depth_pitch_offset_c);
450 OUT_RING(clear->clear_depth);
452 OUT_RING((x << 16) | y);
453 OUT_RING((w << 16) | h);
455 ADVANCE_RING();
460 static void r128_cce_dispatch_swap(drm_device_t * dev)
462 drm_r128_private_t *dev_priv = dev->dev_private;
463 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
464 int nbox = sarea_priv->nbox;
465 drm_clip_rect_t *pbox = sarea_priv->boxes;
466 int i;
467 RING_LOCALS;
468 DRM_DEBUG("%s\n", __FUNCTION__);
470 #if R128_PERFORMANCE_BOXES
471 /* Do some trivial performance monitoring...
473 r128_cce_performance_boxes(dev_priv);
474 #endif
476 for (i = 0; i < nbox; i++) {
477 int x = pbox[i].x1;
478 int y = pbox[i].y1;
479 int w = pbox[i].x2 - x;
480 int h = pbox[i].y2 - y;
482 BEGIN_RING(7);
484 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
485 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
486 R128_GMC_DST_PITCH_OFFSET_CNTL |
487 R128_GMC_BRUSH_NONE |
488 (dev_priv->color_fmt << 8) |
489 R128_GMC_SRC_DATATYPE_COLOR |
490 R128_ROP3_S |
491 R128_DP_SRC_SOURCE_MEMORY |
492 R128_GMC_CLR_CMP_CNTL_DIS |
493 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
495 /* Make this work even if front & back are flipped:
497 if (dev_priv->current_page == 0) {
498 OUT_RING(dev_priv->back_pitch_offset_c);
499 OUT_RING(dev_priv->front_pitch_offset_c);
500 } else {
501 OUT_RING(dev_priv->front_pitch_offset_c);
502 OUT_RING(dev_priv->back_pitch_offset_c);
505 OUT_RING((x << 16) | y);
506 OUT_RING((x << 16) | y);
507 OUT_RING((w << 16) | h);
509 ADVANCE_RING();
512 /* Increment the frame counter. The client-side 3D driver must
513 * throttle the framerate by waiting for this value before
514 * performing the swapbuffer ioctl.
516 dev_priv->sarea_priv->last_frame++;
518 BEGIN_RING(2);
520 OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
521 OUT_RING(dev_priv->sarea_priv->last_frame);
523 ADVANCE_RING();
526 static void r128_cce_dispatch_flip(drm_device_t * dev)
528 drm_r128_private_t *dev_priv = dev->dev_private;
529 RING_LOCALS;
530 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
531 __FUNCTION__,
532 dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
534 #if R128_PERFORMANCE_BOXES
535 /* Do some trivial performance monitoring...
537 r128_cce_performance_boxes(dev_priv);
538 #endif
540 BEGIN_RING(4);
542 R128_WAIT_UNTIL_PAGE_FLIPPED();
543 OUT_RING(CCE_PACKET0(R128_CRTC_OFFSET, 0));
545 if (dev_priv->current_page == 0) {
546 OUT_RING(dev_priv->back_offset);
547 } else {
548 OUT_RING(dev_priv->front_offset);
551 ADVANCE_RING();
553 /* Increment the frame counter. The client-side 3D driver must
554 * throttle the framerate by waiting for this value before
555 * performing the swapbuffer ioctl.
557 dev_priv->sarea_priv->last_frame++;
558 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
559 1 - dev_priv->current_page;
561 BEGIN_RING(2);
563 OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
564 OUT_RING(dev_priv->sarea_priv->last_frame);
566 ADVANCE_RING();
569 static void r128_cce_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf)
571 drm_r128_private_t *dev_priv = dev->dev_private;
572 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
573 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
574 int format = sarea_priv->vc_format;
575 int offset = buf->bus_address;
576 int size = buf->used;
577 int prim = buf_priv->prim;
578 int i = 0;
579 RING_LOCALS;
580 DRM_DEBUG("buf=%d nbox=%d\n", buf->idx, sarea_priv->nbox);
582 if (0)
583 r128_print_dirty("dispatch_vertex", sarea_priv->dirty);
585 if (buf->used) {
586 buf_priv->dispatched = 1;
588 if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) {
589 r128_emit_state(dev_priv);
592 do {
593 /* Emit the next set of up to three cliprects */
594 if (i < sarea_priv->nbox) {
595 r128_emit_clip_rects(dev_priv,
596 &sarea_priv->boxes[i],
597 sarea_priv->nbox - i);
600 /* Emit the vertex buffer rendering commands */
601 BEGIN_RING(5);
603 OUT_RING(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM, 3));
604 OUT_RING(offset);
605 OUT_RING(size);
606 OUT_RING(format);
607 OUT_RING(prim | R128_CCE_VC_CNTL_PRIM_WALK_LIST |
608 (size << R128_CCE_VC_CNTL_NUM_SHIFT));
610 ADVANCE_RING();
612 i += 3;
613 } while (i < sarea_priv->nbox);
616 if (buf_priv->discard) {
617 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
619 /* Emit the vertex buffer age */
620 BEGIN_RING(2);
622 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
623 OUT_RING(buf_priv->age);
625 ADVANCE_RING();
627 buf->pending = 1;
628 buf->used = 0;
629 /* FIXME: Check dispatched field */
630 buf_priv->dispatched = 0;
633 dev_priv->sarea_priv->last_dispatch++;
635 sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
636 sarea_priv->nbox = 0;
639 static void r128_cce_dispatch_indirect(drm_device_t * dev,
640 drm_buf_t * buf, int start, int end)
642 drm_r128_private_t *dev_priv = dev->dev_private;
643 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
644 RING_LOCALS;
645 DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
647 if (start != end) {
648 int offset = buf->bus_address + start;
649 int dwords = (end - start + 3) / sizeof(u32);
651 /* Indirect buffer data must be an even number of
652 * dwords, so if we've been given an odd number we must
653 * pad the data with a Type-2 CCE packet.
655 if (dwords & 1) {
656 u32 *data = (u32 *)
657 ((char *)dev->agp_buffer_map->handle
658 + buf->offset + start);
659 data[dwords++] = cpu_to_le32(R128_CCE_PACKET2);
662 buf_priv->dispatched = 1;
664 /* Fire off the indirect buffer */
665 BEGIN_RING(3);
667 OUT_RING(CCE_PACKET0(R128_PM4_IW_INDOFF, 1));
668 OUT_RING(offset);
669 OUT_RING(dwords);
671 ADVANCE_RING();
674 if (buf_priv->discard) {
675 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
677 /* Emit the indirect buffer age */
678 BEGIN_RING(2);
680 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
681 OUT_RING(buf_priv->age);
683 ADVANCE_RING();
685 buf->pending = 1;
686 buf->used = 0;
687 /* FIXME: Check dispatched field */
688 buf_priv->dispatched = 0;
691 dev_priv->sarea_priv->last_dispatch++;
694 static void r128_cce_dispatch_indices(drm_device_t * dev,
695 drm_buf_t * buf,
696 int start, int end, int count)
698 drm_r128_private_t *dev_priv = dev->dev_private;
699 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
700 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
701 int format = sarea_priv->vc_format;
702 int offset = dev->agp_buffer_map->offset - dev_priv->cce_buffers_offset;
703 int prim = buf_priv->prim;
704 u32 *data;
705 int dwords;
706 int i = 0;
707 RING_LOCALS;
708 DRM_DEBUG("indices: s=%d e=%d c=%d\n", start, end, count);
710 if (0)
711 r128_print_dirty("dispatch_indices", sarea_priv->dirty);
713 if (start != end) {
714 buf_priv->dispatched = 1;
716 if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) {
717 r128_emit_state(dev_priv);
720 dwords = (end - start + 3) / sizeof(u32);
722 data = (u32 *) ((char *)dev->agp_buffer_map->handle
723 + buf->offset + start);
725 data[0] = cpu_to_le32(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM,
726 dwords - 2));
728 data[1] = cpu_to_le32(offset);
729 data[2] = cpu_to_le32(R128_MAX_VB_VERTS);
730 data[3] = cpu_to_le32(format);
731 data[4] = cpu_to_le32((prim | R128_CCE_VC_CNTL_PRIM_WALK_IND |
732 (count << 16)));
734 if (count & 0x1) {
735 #ifdef __LITTLE_ENDIAN
736 data[dwords - 1] &= 0x0000ffff;
737 #else
738 data[dwords - 1] &= 0xffff0000;
739 #endif
742 do {
743 /* Emit the next set of up to three cliprects */
744 if (i < sarea_priv->nbox) {
745 r128_emit_clip_rects(dev_priv,
746 &sarea_priv->boxes[i],
747 sarea_priv->nbox - i);
750 r128_cce_dispatch_indirect(dev, buf, start, end);
752 i += 3;
753 } while (i < sarea_priv->nbox);
756 if (buf_priv->discard) {
757 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
759 /* Emit the vertex buffer age */
760 BEGIN_RING(2);
762 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
763 OUT_RING(buf_priv->age);
765 ADVANCE_RING();
767 buf->pending = 1;
768 /* FIXME: Check dispatched field */
769 buf_priv->dispatched = 0;
772 dev_priv->sarea_priv->last_dispatch++;
774 sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
775 sarea_priv->nbox = 0;
778 static int r128_cce_dispatch_blit(DRMFILE filp,
779 drm_device_t * dev, drm_r128_blit_t * blit)
781 drm_r128_private_t *dev_priv = dev->dev_private;
782 drm_device_dma_t *dma = dev->dma;
783 drm_buf_t *buf;
784 drm_r128_buf_priv_t *buf_priv;
785 u32 *data;
786 int dword_shift, dwords;
787 RING_LOCALS;
788 DRM_DEBUG("\n");
790 /* The compiler won't optimize away a division by a variable,
791 * even if the only legal values are powers of two. Thus, we'll
792 * use a shift instead.
794 switch (blit->format) {
795 case R128_DATATYPE_ARGB8888:
796 dword_shift = 0;
797 break;
798 case R128_DATATYPE_ARGB1555:
799 case R128_DATATYPE_RGB565:
800 case R128_DATATYPE_ARGB4444:
801 case R128_DATATYPE_YVYU422:
802 case R128_DATATYPE_VYUY422:
803 dword_shift = 1;
804 break;
805 case R128_DATATYPE_CI8:
806 case R128_DATATYPE_RGB8:
807 dword_shift = 2;
808 break;
809 default:
810 DRM_ERROR("invalid blit format %d\n", blit->format);
811 return DRM_ERR(EINVAL);
814 /* Flush the pixel cache, and mark the contents as Read Invalid.
815 * This ensures no pixel data gets mixed up with the texture
816 * data from the host data blit, otherwise part of the texture
817 * image may be corrupted.
819 BEGIN_RING(2);
821 OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
822 OUT_RING(R128_PC_RI_GUI | R128_PC_FLUSH_GUI);
824 ADVANCE_RING();
826 /* Dispatch the indirect buffer.
828 buf = dma->buflist[blit->idx];
829 buf_priv = buf->dev_private;
831 if (buf->filp != filp) {
832 DRM_ERROR("process %d using buffer owned by %p\n",
833 DRM_CURRENTPID, buf->filp);
834 return DRM_ERR(EINVAL);
836 if (buf->pending) {
837 DRM_ERROR("sending pending buffer %d\n", blit->idx);
838 return DRM_ERR(EINVAL);
841 buf_priv->discard = 1;
843 dwords = (blit->width * blit->height) >> dword_shift;
845 data = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
847 data[0] = cpu_to_le32(CCE_PACKET3(R128_CNTL_HOSTDATA_BLT, dwords + 6));
848 data[1] = cpu_to_le32((R128_GMC_DST_PITCH_OFFSET_CNTL |
849 R128_GMC_BRUSH_NONE |
850 (blit->format << 8) |
851 R128_GMC_SRC_DATATYPE_COLOR |
852 R128_ROP3_S |
853 R128_DP_SRC_SOURCE_HOST_DATA |
854 R128_GMC_CLR_CMP_CNTL_DIS |
855 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS));
857 data[2] = cpu_to_le32((blit->pitch << 21) | (blit->offset >> 5));
858 data[3] = cpu_to_le32(0xffffffff);
859 data[4] = cpu_to_le32(0xffffffff);
860 data[5] = cpu_to_le32((blit->y << 16) | blit->x);
861 data[6] = cpu_to_le32((blit->height << 16) | blit->width);
862 data[7] = cpu_to_le32(dwords);
864 buf->used = (dwords + 8) * sizeof(u32);
866 r128_cce_dispatch_indirect(dev, buf, 0, buf->used);
868 /* Flush the pixel cache after the blit completes. This ensures
869 * the texture data is written out to memory before rendering
870 * continues.
872 BEGIN_RING(2);
874 OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
875 OUT_RING(R128_PC_FLUSH_GUI);
877 ADVANCE_RING();
879 return 0;
882 /* ================================================================
883 * Tiled depth buffer management
885 * FIXME: These should all set the destination write mask for when we
886 * have hardware stencil support.
889 static int r128_cce_dispatch_write_span(drm_device_t * dev,
890 drm_r128_depth_t * depth)
892 drm_r128_private_t *dev_priv = dev->dev_private;
893 int count, x, y;
894 u32 *buffer;
895 u8 *mask;
896 int i, buffer_size, mask_size;
897 RING_LOCALS;
898 DRM_DEBUG("\n");
900 count = depth->n;
901 if (count > 4096 || count <= 0)
902 return DRM_ERR(EMSGSIZE);
904 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
905 return DRM_ERR(EFAULT);
907 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
908 return DRM_ERR(EFAULT);
911 buffer_size = depth->n * sizeof(u32);
912 buffer = drm_alloc(buffer_size, DRM_MEM_BUFS);
913 if (buffer == NULL)
914 return DRM_ERR(ENOMEM);
915 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
916 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
917 return DRM_ERR(EFAULT);
920 mask_size = depth->n * sizeof(u8);
921 if (depth->mask) {
922 mask = drm_alloc(mask_size, DRM_MEM_BUFS);
923 if (mask == NULL) {
924 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
925 return DRM_ERR(ENOMEM);
927 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
928 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
929 drm_free(mask, mask_size, DRM_MEM_BUFS);
930 return DRM_ERR(EFAULT);
933 for (i = 0; i < count; i++, x++) {
934 if (mask[i]) {
935 BEGIN_RING(6);
937 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
938 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
939 R128_GMC_BRUSH_SOLID_COLOR |
940 (dev_priv->depth_fmt << 8) |
941 R128_GMC_SRC_DATATYPE_COLOR |
942 R128_ROP3_P |
943 R128_GMC_CLR_CMP_CNTL_DIS |
944 R128_GMC_WR_MSK_DIS);
946 OUT_RING(dev_priv->depth_pitch_offset_c);
947 OUT_RING(buffer[i]);
949 OUT_RING((x << 16) | y);
950 OUT_RING((1 << 16) | 1);
952 ADVANCE_RING();
956 drm_free(mask, mask_size, DRM_MEM_BUFS);
957 } else {
958 for (i = 0; i < count; i++, x++) {
959 BEGIN_RING(6);
961 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
962 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
963 R128_GMC_BRUSH_SOLID_COLOR |
964 (dev_priv->depth_fmt << 8) |
965 R128_GMC_SRC_DATATYPE_COLOR |
966 R128_ROP3_P |
967 R128_GMC_CLR_CMP_CNTL_DIS |
968 R128_GMC_WR_MSK_DIS);
970 OUT_RING(dev_priv->depth_pitch_offset_c);
971 OUT_RING(buffer[i]);
973 OUT_RING((x << 16) | y);
974 OUT_RING((1 << 16) | 1);
976 ADVANCE_RING();
980 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
982 return 0;
985 static int r128_cce_dispatch_write_pixels(drm_device_t * dev,
986 drm_r128_depth_t * depth)
988 drm_r128_private_t *dev_priv = dev->dev_private;
989 int count, *x, *y;
990 u32 *buffer;
991 u8 *mask;
992 int i, xbuf_size, ybuf_size, buffer_size, mask_size;
993 RING_LOCALS;
994 DRM_DEBUG("\n");
996 count = depth->n;
997 if (count > 4096 || count <= 0)
998 return DRM_ERR(EMSGSIZE);
1000 xbuf_size = count * sizeof(*x);
1001 ybuf_size = count * sizeof(*y);
1002 x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
1003 if (x == NULL) {
1004 return DRM_ERR(ENOMEM);
1006 y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
1007 if (y == NULL) {
1008 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1009 return DRM_ERR(ENOMEM);
1011 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
1012 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1013 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1014 return DRM_ERR(EFAULT);
1016 if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) {
1017 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1018 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1019 return DRM_ERR(EFAULT);
1022 buffer_size = depth->n * sizeof(u32);
1023 buffer = drm_alloc(buffer_size, DRM_MEM_BUFS);
1024 if (buffer == NULL) {
1025 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1026 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1027 return DRM_ERR(ENOMEM);
1029 if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
1030 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1031 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1032 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1033 return DRM_ERR(EFAULT);
1036 if (depth->mask) {
1037 mask_size = depth->n * sizeof(u8);
1038 mask = drm_alloc(mask_size, DRM_MEM_BUFS);
1039 if (mask == NULL) {
1040 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1041 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1042 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1043 return DRM_ERR(ENOMEM);
1045 if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
1046 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1047 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1048 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1049 drm_free(mask, mask_size, DRM_MEM_BUFS);
1050 return DRM_ERR(EFAULT);
1053 for (i = 0; i < count; i++) {
1054 if (mask[i]) {
1055 BEGIN_RING(6);
1057 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
1058 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
1059 R128_GMC_BRUSH_SOLID_COLOR |
1060 (dev_priv->depth_fmt << 8) |
1061 R128_GMC_SRC_DATATYPE_COLOR |
1062 R128_ROP3_P |
1063 R128_GMC_CLR_CMP_CNTL_DIS |
1064 R128_GMC_WR_MSK_DIS);
1066 OUT_RING(dev_priv->depth_pitch_offset_c);
1067 OUT_RING(buffer[i]);
1069 OUT_RING((x[i] << 16) | y[i]);
1070 OUT_RING((1 << 16) | 1);
1072 ADVANCE_RING();
1076 drm_free(mask, mask_size, DRM_MEM_BUFS);
1077 } else {
1078 for (i = 0; i < count; i++) {
1079 BEGIN_RING(6);
1081 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
1082 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
1083 R128_GMC_BRUSH_SOLID_COLOR |
1084 (dev_priv->depth_fmt << 8) |
1085 R128_GMC_SRC_DATATYPE_COLOR |
1086 R128_ROP3_P |
1087 R128_GMC_CLR_CMP_CNTL_DIS |
1088 R128_GMC_WR_MSK_DIS);
1090 OUT_RING(dev_priv->depth_pitch_offset_c);
1091 OUT_RING(buffer[i]);
1093 OUT_RING((x[i] << 16) | y[i]);
1094 OUT_RING((1 << 16) | 1);
1096 ADVANCE_RING();
1100 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1101 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1102 drm_free(buffer, buffer_size, DRM_MEM_BUFS);
1104 return 0;
1107 static int r128_cce_dispatch_read_span(drm_device_t * dev,
1108 drm_r128_depth_t * depth)
1110 drm_r128_private_t *dev_priv = dev->dev_private;
1111 int count, x, y;
1112 RING_LOCALS;
1113 DRM_DEBUG("\n");
1115 count = depth->n;
1116 if (count > 4096 || count <= 0)
1117 return DRM_ERR(EMSGSIZE);
1119 if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
1120 return DRM_ERR(EFAULT);
1122 if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
1123 return DRM_ERR(EFAULT);
1126 BEGIN_RING(7);
1128 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
1129 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
1130 R128_GMC_DST_PITCH_OFFSET_CNTL |
1131 R128_GMC_BRUSH_NONE |
1132 (dev_priv->depth_fmt << 8) |
1133 R128_GMC_SRC_DATATYPE_COLOR |
1134 R128_ROP3_S |
1135 R128_DP_SRC_SOURCE_MEMORY |
1136 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
1138 OUT_RING(dev_priv->depth_pitch_offset_c);
1139 OUT_RING(dev_priv->span_pitch_offset_c);
1141 OUT_RING((x << 16) | y);
1142 OUT_RING((0 << 16) | 0);
1143 OUT_RING((count << 16) | 1);
1145 ADVANCE_RING();
1147 return 0;
1150 static int r128_cce_dispatch_read_pixels(drm_device_t * dev,
1151 drm_r128_depth_t * depth)
1153 drm_r128_private_t *dev_priv = dev->dev_private;
1154 int count, *x, *y;
1155 int i, xbuf_size, ybuf_size;
1156 RING_LOCALS;
1157 DRM_DEBUG("%s\n", __FUNCTION__);
1159 count = depth->n;
1160 if (count > 4096 || count <= 0)
1161 return DRM_ERR(EMSGSIZE);
1163 if (count > dev_priv->depth_pitch) {
1164 count = dev_priv->depth_pitch;
1167 xbuf_size = count * sizeof(*x);
1168 ybuf_size = count * sizeof(*y);
1169 x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
1170 if (x == NULL) {
1171 return DRM_ERR(ENOMEM);
1173 y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
1174 if (y == NULL) {
1175 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1176 return DRM_ERR(ENOMEM);
1178 if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
1179 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1180 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1181 return DRM_ERR(EFAULT);
1183 if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) {
1184 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1185 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1186 return DRM_ERR(EFAULT);
1189 for (i = 0; i < count; i++) {
1190 BEGIN_RING(7);
1192 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
1193 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
1194 R128_GMC_DST_PITCH_OFFSET_CNTL |
1195 R128_GMC_BRUSH_NONE |
1196 (dev_priv->depth_fmt << 8) |
1197 R128_GMC_SRC_DATATYPE_COLOR |
1198 R128_ROP3_S |
1199 R128_DP_SRC_SOURCE_MEMORY |
1200 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
1202 OUT_RING(dev_priv->depth_pitch_offset_c);
1203 OUT_RING(dev_priv->span_pitch_offset_c);
1205 OUT_RING((x[i] << 16) | y[i]);
1206 OUT_RING((i << 16) | 0);
1207 OUT_RING((1 << 16) | 1);
1209 ADVANCE_RING();
1212 drm_free(x, xbuf_size, DRM_MEM_BUFS);
1213 drm_free(y, ybuf_size, DRM_MEM_BUFS);
1215 return 0;
1218 /* ================================================================
1219 * Polygon stipple
1222 static void r128_cce_dispatch_stipple(drm_device_t * dev, u32 * stipple)
1224 drm_r128_private_t *dev_priv = dev->dev_private;
1225 int i;
1226 RING_LOCALS;
1227 DRM_DEBUG("%s\n", __FUNCTION__);
1229 BEGIN_RING(33);
1231 OUT_RING(CCE_PACKET0(R128_BRUSH_DATA0, 31));
1232 for (i = 0; i < 32; i++) {
1233 OUT_RING(stipple[i]);
1236 ADVANCE_RING();
1239 /* ================================================================
1240 * IOCTL functions
1243 static int r128_cce_clear(DRM_IOCTL_ARGS)
1245 DRM_DEVICE;
1246 drm_r128_private_t *dev_priv = dev->dev_private;
1247 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
1248 drm_r128_clear_t clear;
1249 DRM_DEBUG("\n");
1251 LOCK_TEST_WITH_RETURN(dev, filp);
1253 DRM_COPY_FROM_USER_IOCTL(clear, (drm_r128_clear_t __user *) data,
1254 sizeof(clear));
1256 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1258 if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
1259 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1261 r128_cce_dispatch_clear(dev, &clear);
1262 COMMIT_RING();
1264 /* Make sure we restore the 3D state next time.
1266 dev_priv->sarea_priv->dirty |= R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS;
1268 return 0;
1271 static int r128_do_init_pageflip(drm_device_t * dev)
1273 drm_r128_private_t *dev_priv = dev->dev_private;
1274 DRM_DEBUG("\n");
1276 dev_priv->crtc_offset = R128_READ(R128_CRTC_OFFSET);
1277 dev_priv->crtc_offset_cntl = R128_READ(R128_CRTC_OFFSET_CNTL);
1279 R128_WRITE(R128_CRTC_OFFSET, dev_priv->front_offset);
1280 R128_WRITE(R128_CRTC_OFFSET_CNTL,
1281 dev_priv->crtc_offset_cntl | R128_CRTC_OFFSET_FLIP_CNTL);
1283 dev_priv->page_flipping = 1;
1284 dev_priv->current_page = 0;
1285 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
1287 return 0;
1290 static int r128_do_cleanup_pageflip(drm_device_t * dev)
1292 drm_r128_private_t *dev_priv = dev->dev_private;
1293 DRM_DEBUG("\n");
1295 R128_WRITE(R128_CRTC_OFFSET, dev_priv->crtc_offset);
1296 R128_WRITE(R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl);
1298 if (dev_priv->current_page != 0) {
1299 r128_cce_dispatch_flip(dev);
1300 COMMIT_RING();
1303 dev_priv->page_flipping = 0;
1304 return 0;
1307 /* Swapping and flipping are different operations, need different ioctls.
1308 * They can & should be intermixed to support multiple 3d windows.
1311 static int r128_cce_flip(DRM_IOCTL_ARGS)
1313 DRM_DEVICE;
1314 drm_r128_private_t *dev_priv = dev->dev_private;
1315 DRM_DEBUG("%s\n", __FUNCTION__);
1317 LOCK_TEST_WITH_RETURN(dev, filp);
1319 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1321 if (!dev_priv->page_flipping)
1322 r128_do_init_pageflip(dev);
1324 r128_cce_dispatch_flip(dev);
1326 COMMIT_RING();
1327 return 0;
1330 static int r128_cce_swap(DRM_IOCTL_ARGS)
1332 DRM_DEVICE;
1333 drm_r128_private_t *dev_priv = dev->dev_private;
1334 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
1335 DRM_DEBUG("%s\n", __FUNCTION__);
1337 LOCK_TEST_WITH_RETURN(dev, filp);
1339 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1341 if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
1342 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1344 r128_cce_dispatch_swap(dev);
1345 dev_priv->sarea_priv->dirty |= (R128_UPLOAD_CONTEXT |
1346 R128_UPLOAD_MASKS);
1348 COMMIT_RING();
1349 return 0;
1352 static int r128_cce_vertex(DRM_IOCTL_ARGS)
1354 DRM_DEVICE;
1355 drm_r128_private_t *dev_priv = dev->dev_private;
1356 drm_device_dma_t *dma = dev->dma;
1357 drm_buf_t *buf;
1358 drm_r128_buf_priv_t *buf_priv;
1359 drm_r128_vertex_t vertex;
1361 LOCK_TEST_WITH_RETURN(dev, filp);
1363 if (!dev_priv) {
1364 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1365 return DRM_ERR(EINVAL);
1368 DRM_COPY_FROM_USER_IOCTL(vertex, (drm_r128_vertex_t __user *) data,
1369 sizeof(vertex));
1371 DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
1372 DRM_CURRENTPID, vertex.idx, vertex.count, vertex.discard);
1374 if (vertex.idx < 0 || vertex.idx >= dma->buf_count) {
1375 DRM_ERROR("buffer index %d (of %d max)\n",
1376 vertex.idx, dma->buf_count - 1);
1377 return DRM_ERR(EINVAL);
1379 if (vertex.prim < 0 ||
1380 vertex.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
1381 DRM_ERROR("buffer prim %d\n", vertex.prim);
1382 return DRM_ERR(EINVAL);
1385 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1386 VB_AGE_TEST_WITH_RETURN(dev_priv);
1388 buf = dma->buflist[vertex.idx];
1389 buf_priv = buf->dev_private;
1391 if (buf->filp != filp) {
1392 DRM_ERROR("process %d using buffer owned by %p\n",
1393 DRM_CURRENTPID, buf->filp);
1394 return DRM_ERR(EINVAL);
1396 if (buf->pending) {
1397 DRM_ERROR("sending pending buffer %d\n", vertex.idx);
1398 return DRM_ERR(EINVAL);
1401 buf->used = vertex.count;
1402 buf_priv->prim = vertex.prim;
1403 buf_priv->discard = vertex.discard;
1405 r128_cce_dispatch_vertex(dev, buf);
1407 COMMIT_RING();
1408 return 0;
1411 static int r128_cce_indices(DRM_IOCTL_ARGS)
1413 DRM_DEVICE;
1414 drm_r128_private_t *dev_priv = dev->dev_private;
1415 drm_device_dma_t *dma = dev->dma;
1416 drm_buf_t *buf;
1417 drm_r128_buf_priv_t *buf_priv;
1418 drm_r128_indices_t elts;
1419 int count;
1421 LOCK_TEST_WITH_RETURN(dev, filp);
1423 if (!dev_priv) {
1424 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1425 return DRM_ERR(EINVAL);
1428 DRM_COPY_FROM_USER_IOCTL(elts, (drm_r128_indices_t __user *) data,
1429 sizeof(elts));
1431 DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID,
1432 elts.idx, elts.start, elts.end, elts.discard);
1434 if (elts.idx < 0 || elts.idx >= dma->buf_count) {
1435 DRM_ERROR("buffer index %d (of %d max)\n",
1436 elts.idx, dma->buf_count - 1);
1437 return DRM_ERR(EINVAL);
1439 if (elts.prim < 0 || elts.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
1440 DRM_ERROR("buffer prim %d\n", elts.prim);
1441 return DRM_ERR(EINVAL);
1444 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1445 VB_AGE_TEST_WITH_RETURN(dev_priv);
1447 buf = dma->buflist[elts.idx];
1448 buf_priv = buf->dev_private;
1450 if (buf->filp != filp) {
1451 DRM_ERROR("process %d using buffer owned by %p\n",
1452 DRM_CURRENTPID, buf->filp);
1453 return DRM_ERR(EINVAL);
1455 if (buf->pending) {
1456 DRM_ERROR("sending pending buffer %d\n", elts.idx);
1457 return DRM_ERR(EINVAL);
1460 count = (elts.end - elts.start) / sizeof(u16);
1461 elts.start -= R128_INDEX_PRIM_OFFSET;
1463 if (elts.start & 0x7) {
1464 DRM_ERROR("misaligned buffer 0x%x\n", elts.start);
1465 return DRM_ERR(EINVAL);
1467 if (elts.start < buf->used) {
1468 DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used);
1469 return DRM_ERR(EINVAL);
1472 buf->used = elts.end;
1473 buf_priv->prim = elts.prim;
1474 buf_priv->discard = elts.discard;
1476 r128_cce_dispatch_indices(dev, buf, elts.start, elts.end, count);
1478 COMMIT_RING();
1479 return 0;
1482 static int r128_cce_blit(DRM_IOCTL_ARGS)
1484 DRM_DEVICE;
1485 drm_device_dma_t *dma = dev->dma;
1486 drm_r128_private_t *dev_priv = dev->dev_private;
1487 drm_r128_blit_t blit;
1488 int ret;
1490 LOCK_TEST_WITH_RETURN(dev, filp);
1492 DRM_COPY_FROM_USER_IOCTL(blit, (drm_r128_blit_t __user *) data,
1493 sizeof(blit));
1495 DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit.idx);
1497 if (blit.idx < 0 || blit.idx >= dma->buf_count) {
1498 DRM_ERROR("buffer index %d (of %d max)\n",
1499 blit.idx, dma->buf_count - 1);
1500 return DRM_ERR(EINVAL);
1503 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1504 VB_AGE_TEST_WITH_RETURN(dev_priv);
1506 ret = r128_cce_dispatch_blit(filp, dev, &blit);
1508 COMMIT_RING();
1509 return ret;
1512 static int r128_cce_depth(DRM_IOCTL_ARGS)
1514 DRM_DEVICE;
1515 drm_r128_private_t *dev_priv = dev->dev_private;
1516 drm_r128_depth_t depth;
1517 int ret;
1519 LOCK_TEST_WITH_RETURN(dev, filp);
1521 DRM_COPY_FROM_USER_IOCTL(depth, (drm_r128_depth_t __user *) data,
1522 sizeof(depth));
1524 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1526 ret = DRM_ERR(EINVAL);
1527 switch (depth.func) {
1528 case R128_WRITE_SPAN:
1529 ret = r128_cce_dispatch_write_span(dev, &depth);
1530 break;
1531 case R128_WRITE_PIXELS:
1532 ret = r128_cce_dispatch_write_pixels(dev, &depth);
1533 break;
1534 case R128_READ_SPAN:
1535 ret = r128_cce_dispatch_read_span(dev, &depth);
1536 break;
1537 case R128_READ_PIXELS:
1538 ret = r128_cce_dispatch_read_pixels(dev, &depth);
1539 break;
1542 COMMIT_RING();
1543 return ret;
1546 static int r128_cce_stipple(DRM_IOCTL_ARGS)
1548 DRM_DEVICE;
1549 drm_r128_private_t *dev_priv = dev->dev_private;
1550 drm_r128_stipple_t stipple;
1551 u32 mask[32];
1553 LOCK_TEST_WITH_RETURN(dev, filp);
1555 DRM_COPY_FROM_USER_IOCTL(stipple, (drm_r128_stipple_t __user *) data,
1556 sizeof(stipple));
1558 if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32)))
1559 return DRM_ERR(EFAULT);
1561 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1563 r128_cce_dispatch_stipple(dev, mask);
1565 COMMIT_RING();
1566 return 0;
1569 static int r128_cce_indirect(DRM_IOCTL_ARGS)
1571 DRM_DEVICE;
1572 drm_r128_private_t *dev_priv = dev->dev_private;
1573 drm_device_dma_t *dma = dev->dma;
1574 drm_buf_t *buf;
1575 drm_r128_buf_priv_t *buf_priv;
1576 drm_r128_indirect_t indirect;
1577 #if 0
1578 RING_LOCALS;
1579 #endif
1581 LOCK_TEST_WITH_RETURN(dev, filp);
1583 if (!dev_priv) {
1584 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1585 return DRM_ERR(EINVAL);
1588 DRM_COPY_FROM_USER_IOCTL(indirect, (drm_r128_indirect_t __user *) data,
1589 sizeof(indirect));
1591 DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n",
1592 indirect.idx, indirect.start, indirect.end, indirect.discard);
1594 if (indirect.idx < 0 || indirect.idx >= dma->buf_count) {
1595 DRM_ERROR("buffer index %d (of %d max)\n",
1596 indirect.idx, dma->buf_count - 1);
1597 return DRM_ERR(EINVAL);
1600 buf = dma->buflist[indirect.idx];
1601 buf_priv = buf->dev_private;
1603 if (buf->filp != filp) {
1604 DRM_ERROR("process %d using buffer owned by %p\n",
1605 DRM_CURRENTPID, buf->filp);
1606 return DRM_ERR(EINVAL);
1608 if (buf->pending) {
1609 DRM_ERROR("sending pending buffer %d\n", indirect.idx);
1610 return DRM_ERR(EINVAL);
1613 if (indirect.start < buf->used) {
1614 DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
1615 indirect.start, buf->used);
1616 return DRM_ERR(EINVAL);
1619 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1620 VB_AGE_TEST_WITH_RETURN(dev_priv);
1622 buf->used = indirect.end;
1623 buf_priv->discard = indirect.discard;
1625 #if 0
1626 /* Wait for the 3D stream to idle before the indirect buffer
1627 * containing 2D acceleration commands is processed.
1629 BEGIN_RING(2);
1630 RADEON_WAIT_UNTIL_3D_IDLE();
1631 ADVANCE_RING();
1632 #endif
1634 /* Dispatch the indirect buffer full of commands from the
1635 * X server. This is insecure and is thus only available to
1636 * privileged clients.
1638 r128_cce_dispatch_indirect(dev, buf, indirect.start, indirect.end);
1640 COMMIT_RING();
1641 return 0;
1644 static int r128_getparam(DRM_IOCTL_ARGS)
1646 DRM_DEVICE;
1647 drm_r128_private_t *dev_priv = dev->dev_private;
1648 drm_r128_getparam_t param;
1649 int value;
1651 if (!dev_priv) {
1652 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
1653 return DRM_ERR(EINVAL);
1656 DRM_COPY_FROM_USER_IOCTL(param, (drm_r128_getparam_t __user *) data,
1657 sizeof(param));
1659 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1661 switch (param.param) {
1662 case R128_PARAM_IRQ_NR:
1663 value = dev->irq;
1664 break;
1665 default:
1666 return DRM_ERR(EINVAL);
1669 if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
1670 DRM_ERROR("copy_to_user\n");
1671 return DRM_ERR(EFAULT);
1674 return 0;
1677 void r128_driver_preclose(drm_device_t * dev, DRMFILE filp)
1679 if (dev->dev_private) {
1680 drm_r128_private_t *dev_priv = dev->dev_private;
1681 if (dev_priv->page_flipping) {
1682 r128_do_cleanup_pageflip(dev);
1687 void r128_driver_lastclose(drm_device_t * dev)
1689 r128_do_cleanup_cce(dev);
1692 drm_ioctl_desc_t r128_ioctls[] = {
1693 [DRM_IOCTL_NR(DRM_R128_INIT)] = {r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1694 [DRM_IOCTL_NR(DRM_R128_CCE_START)] = {r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1695 [DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = {r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1696 [DRM_IOCTL_NR(DRM_R128_CCE_RESET)] = {r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1697 [DRM_IOCTL_NR(DRM_R128_CCE_IDLE)] = {r128_cce_idle, DRM_AUTH},
1698 [DRM_IOCTL_NR(DRM_R128_RESET)] = {r128_engine_reset, DRM_AUTH},
1699 [DRM_IOCTL_NR(DRM_R128_FULLSCREEN)] = {r128_fullscreen, DRM_AUTH},
1700 [DRM_IOCTL_NR(DRM_R128_SWAP)] = {r128_cce_swap, DRM_AUTH},
1701 [DRM_IOCTL_NR(DRM_R128_FLIP)] = {r128_cce_flip, DRM_AUTH},
1702 [DRM_IOCTL_NR(DRM_R128_CLEAR)] = {r128_cce_clear, DRM_AUTH},
1703 [DRM_IOCTL_NR(DRM_R128_VERTEX)] = {r128_cce_vertex, DRM_AUTH},
1704 [DRM_IOCTL_NR(DRM_R128_INDICES)] = {r128_cce_indices, DRM_AUTH},
1705 [DRM_IOCTL_NR(DRM_R128_BLIT)] = {r128_cce_blit, DRM_AUTH},
1706 [DRM_IOCTL_NR(DRM_R128_DEPTH)] = {r128_cce_depth, DRM_AUTH},
1707 [DRM_IOCTL_NR(DRM_R128_STIPPLE)] = {r128_cce_stipple, DRM_AUTH},
1708 [DRM_IOCTL_NR(DRM_R128_INDIRECT)] = {r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
1709 [DRM_IOCTL_NR(DRM_R128_GETPARAM)] = {r128_getparam, DRM_AUTH},
1712 int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);