Sync with HEAD.
[dragonfly.git] / sys / dev / drm / mach64_state.c
blob8248f36eaef3b7f91882f3abde9617da28e0dfdb
1 /* mach64_state.c -- State support for mach64 (Rage Pro) driver -*- linux-c -*-
2 * Created: Sun Dec 03 19:20:26 2000 by gareth@valinux.com
3 */
4 /*
5 * Copyright 2000 Gareth Hughes
6 * Copyright 2002-2003 Leif Delgass
7 * All Rights Reserved.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
24 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
25 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 * Authors:
28 * Gareth Hughes <gareth@valinux.com>
29 * Leif Delgass <ldelgass@retinalburn.net>
30 * José Fonseca <j_r_fonseca@yahoo.co.uk>
32 * $DragonFly: src/sys/dev/drm/mach64_state.c,v 1.1 2008/04/05 18:12:29 hasso Exp $
35 #include "drmP.h"
36 #include "drm.h"
37 #include "mach64_drm.h"
38 #include "mach64_drv.h"
40 /* Interface history:
42 * 1.0 - Initial mach64 DRM
45 struct drm_ioctl_desc mach64_ioctls[] = {
46 DRM_IOCTL_DEF(DRM_MACH64_INIT, mach64_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
47 DRM_IOCTL_DEF(DRM_MACH64_CLEAR, mach64_dma_clear, DRM_AUTH),
48 DRM_IOCTL_DEF(DRM_MACH64_SWAP, mach64_dma_swap, DRM_AUTH),
49 DRM_IOCTL_DEF(DRM_MACH64_IDLE, mach64_dma_idle, DRM_AUTH),
50 DRM_IOCTL_DEF(DRM_MACH64_RESET, mach64_engine_reset, DRM_AUTH),
51 DRM_IOCTL_DEF(DRM_MACH64_VERTEX, mach64_dma_vertex, DRM_AUTH),
52 DRM_IOCTL_DEF(DRM_MACH64_BLIT, mach64_dma_blit, DRM_AUTH),
53 DRM_IOCTL_DEF(DRM_MACH64_FLUSH, mach64_dma_flush, DRM_AUTH),
54 DRM_IOCTL_DEF(DRM_MACH64_GETPARAM, mach64_get_param, DRM_AUTH),
57 int mach64_max_ioctl = DRM_ARRAY_SIZE(mach64_ioctls);
59 /* ================================================================
60 * DMA hardware state programming functions
63 static void mach64_print_dirty(const char *msg, unsigned int flags)
65 DRM_DEBUG("%s: (0x%x) %s%s%s%s%s%s%s%s%s%s%s%s\n",
66 msg,
67 flags,
68 (flags & MACH64_UPLOAD_DST_OFF_PITCH) ? "dst_off_pitch, " :
69 "",
70 (flags & MACH64_UPLOAD_Z_ALPHA_CNTL) ? "z_alpha_cntl, " : "",
71 (flags & MACH64_UPLOAD_SCALE_3D_CNTL) ? "scale_3d_cntl, " :
72 "", (flags & MACH64_UPLOAD_DP_FOG_CLR) ? "dp_fog_clr, " : "",
73 (flags & MACH64_UPLOAD_DP_WRITE_MASK) ? "dp_write_mask, " :
74 "",
75 (flags & MACH64_UPLOAD_DP_PIX_WIDTH) ? "dp_pix_width, " : "",
76 (flags & MACH64_UPLOAD_SETUP_CNTL) ? "setup_cntl, " : "",
77 (flags & MACH64_UPLOAD_MISC) ? "misc, " : "",
78 (flags & MACH64_UPLOAD_TEXTURE) ? "texture, " : "",
79 (flags & MACH64_UPLOAD_TEX0IMAGE) ? "tex0 image, " : "",
80 (flags & MACH64_UPLOAD_TEX1IMAGE) ? "tex1 image, " : "",
81 (flags & MACH64_UPLOAD_CLIPRECTS) ? "cliprects, " : "");
84 /* Mach64 doesn't have hardware cliprects, just one hardware scissor,
85 * so the GL scissor is intersected with each cliprect here
87 /* This function returns 0 on success, 1 for no intersection, and
88 * negative for an error
90 static int mach64_emit_cliprect(struct drm_file *file_priv,
91 drm_mach64_private_t * dev_priv,
92 struct drm_clip_rect * box)
94 u32 sc_left_right, sc_top_bottom;
95 struct drm_clip_rect scissor;
96 drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
97 drm_mach64_context_regs_t *regs = &sarea_priv->context_state;
98 DMALOCALS;
100 DRM_DEBUG("box=%p\n", box);
102 /* Get GL scissor */
103 /* FIXME: store scissor in SAREA as a cliprect instead of in
104 * hardware format, or do intersection client-side
106 scissor.x1 = regs->sc_left_right & 0xffff;
107 scissor.x2 = (regs->sc_left_right & 0xffff0000) >> 16;
108 scissor.y1 = regs->sc_top_bottom & 0xffff;
109 scissor.y2 = (regs->sc_top_bottom & 0xffff0000) >> 16;
111 /* Intersect GL scissor with cliprect */
112 if (box->x1 > scissor.x1)
113 scissor.x1 = box->x1;
114 if (box->y1 > scissor.y1)
115 scissor.y1 = box->y1;
116 if (box->x2 < scissor.x2)
117 scissor.x2 = box->x2;
118 if (box->y2 < scissor.y2)
119 scissor.y2 = box->y2;
120 /* positive return means skip */
121 if (scissor.x1 >= scissor.x2)
122 return 1;
123 if (scissor.y1 >= scissor.y2)
124 return 1;
126 DMAGETPTR(file_priv, dev_priv, 2); /* returns on failure to get buffer */
128 sc_left_right = ((scissor.x1 << 0) | (scissor.x2 << 16));
129 sc_top_bottom = ((scissor.y1 << 0) | (scissor.y2 << 16));
131 DMAOUTREG(MACH64_SC_LEFT_RIGHT, sc_left_right);
132 DMAOUTREG(MACH64_SC_TOP_BOTTOM, sc_top_bottom);
134 DMAADVANCE(dev_priv, 1);
136 return 0;
139 static __inline__ int mach64_emit_state(struct drm_file *file_priv,
140 drm_mach64_private_t * dev_priv)
142 drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
143 drm_mach64_context_regs_t *regs = &sarea_priv->context_state;
144 unsigned int dirty = sarea_priv->dirty;
145 u32 offset = ((regs->tex_size_pitch & 0xf0) >> 2);
146 DMALOCALS;
148 if (MACH64_VERBOSE) {
149 mach64_print_dirty(__FUNCTION__, dirty);
150 } else {
151 DRM_DEBUG("dirty=0x%08x\n", dirty);
154 DMAGETPTR(file_priv, dev_priv, 17); /* returns on failure to get buffer */
156 if (dirty & MACH64_UPLOAD_MISC) {
157 DMAOUTREG(MACH64_DP_MIX, regs->dp_mix);
158 DMAOUTREG(MACH64_DP_SRC, regs->dp_src);
159 DMAOUTREG(MACH64_CLR_CMP_CNTL, regs->clr_cmp_cntl);
160 DMAOUTREG(MACH64_GUI_TRAJ_CNTL, regs->gui_traj_cntl);
161 sarea_priv->dirty &= ~MACH64_UPLOAD_MISC;
164 if (dirty & MACH64_UPLOAD_DST_OFF_PITCH) {
165 DMAOUTREG(MACH64_DST_OFF_PITCH, regs->dst_off_pitch);
166 sarea_priv->dirty &= ~MACH64_UPLOAD_DST_OFF_PITCH;
168 if (dirty & MACH64_UPLOAD_Z_OFF_PITCH) {
169 DMAOUTREG(MACH64_Z_OFF_PITCH, regs->z_off_pitch);
170 sarea_priv->dirty &= ~MACH64_UPLOAD_Z_OFF_PITCH;
172 if (dirty & MACH64_UPLOAD_Z_ALPHA_CNTL) {
173 DMAOUTREG(MACH64_Z_CNTL, regs->z_cntl);
174 DMAOUTREG(MACH64_ALPHA_TST_CNTL, regs->alpha_tst_cntl);
175 sarea_priv->dirty &= ~MACH64_UPLOAD_Z_ALPHA_CNTL;
177 if (dirty & MACH64_UPLOAD_SCALE_3D_CNTL) {
178 DMAOUTREG(MACH64_SCALE_3D_CNTL, regs->scale_3d_cntl);
179 sarea_priv->dirty &= ~MACH64_UPLOAD_SCALE_3D_CNTL;
181 if (dirty & MACH64_UPLOAD_DP_FOG_CLR) {
182 DMAOUTREG(MACH64_DP_FOG_CLR, regs->dp_fog_clr);
183 sarea_priv->dirty &= ~MACH64_UPLOAD_DP_FOG_CLR;
185 if (dirty & MACH64_UPLOAD_DP_WRITE_MASK) {
186 DMAOUTREG(MACH64_DP_WRITE_MASK, regs->dp_write_mask);
187 sarea_priv->dirty &= ~MACH64_UPLOAD_DP_WRITE_MASK;
189 if (dirty & MACH64_UPLOAD_DP_PIX_WIDTH) {
190 DMAOUTREG(MACH64_DP_PIX_WIDTH, regs->dp_pix_width);
191 sarea_priv->dirty &= ~MACH64_UPLOAD_DP_PIX_WIDTH;
193 if (dirty & MACH64_UPLOAD_SETUP_CNTL) {
194 DMAOUTREG(MACH64_SETUP_CNTL, regs->setup_cntl);
195 sarea_priv->dirty &= ~MACH64_UPLOAD_SETUP_CNTL;
198 if (dirty & MACH64_UPLOAD_TEXTURE) {
199 DMAOUTREG(MACH64_TEX_SIZE_PITCH, regs->tex_size_pitch);
200 DMAOUTREG(MACH64_TEX_CNTL, regs->tex_cntl);
201 DMAOUTREG(MACH64_SECONDARY_TEX_OFF, regs->secondary_tex_off);
202 DMAOUTREG(MACH64_TEX_0_OFF + offset, regs->tex_offset);
203 sarea_priv->dirty &= ~MACH64_UPLOAD_TEXTURE;
206 DMAADVANCE(dev_priv, 1);
208 sarea_priv->dirty &= MACH64_UPLOAD_CLIPRECTS;
210 return 0;
214 /* ================================================================
215 * DMA command dispatch functions
218 static int mach64_dma_dispatch_clear(struct drm_device * dev,
219 struct drm_file *file_priv,
220 unsigned int flags,
221 int cx, int cy, int cw, int ch,
222 unsigned int clear_color,
223 unsigned int clear_depth)
225 drm_mach64_private_t *dev_priv = dev->dev_private;
226 drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
227 drm_mach64_context_regs_t *ctx = &sarea_priv->context_state;
228 int nbox = sarea_priv->nbox;
229 struct drm_clip_rect *pbox = sarea_priv->boxes;
230 u32 fb_bpp, depth_bpp;
231 int i;
232 DMALOCALS;
234 DRM_DEBUG("\n");
236 switch (dev_priv->fb_bpp) {
237 case 16:
238 fb_bpp = MACH64_DATATYPE_RGB565;
239 break;
240 case 32:
241 fb_bpp = MACH64_DATATYPE_ARGB8888;
242 break;
243 default:
244 return -EINVAL;
246 switch (dev_priv->depth_bpp) {
247 case 16:
248 depth_bpp = MACH64_DATATYPE_RGB565;
249 break;
250 case 24:
251 case 32:
252 depth_bpp = MACH64_DATATYPE_ARGB8888;
253 break;
254 default:
255 return -EINVAL;
258 if (!nbox)
259 return 0;
261 DMAGETPTR(file_priv, dev_priv, nbox * 31); /* returns on failure to get buffer */
263 for (i = 0; i < nbox; i++) {
264 int x = pbox[i].x1;
265 int y = pbox[i].y1;
266 int w = pbox[i].x2 - x;
267 int h = pbox[i].y2 - y;
269 DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n",
270 pbox[i].x1, pbox[i].y1,
271 pbox[i].x2, pbox[i].y2, flags);
273 if (flags & (MACH64_FRONT | MACH64_BACK)) {
274 /* Setup for color buffer clears
277 DMAOUTREG(MACH64_Z_CNTL, 0);
278 DMAOUTREG(MACH64_SCALE_3D_CNTL, 0);
280 DMAOUTREG(MACH64_SC_LEFT_RIGHT, ctx->sc_left_right);
281 DMAOUTREG(MACH64_SC_TOP_BOTTOM, ctx->sc_top_bottom);
283 DMAOUTREG(MACH64_CLR_CMP_CNTL, 0);
284 DMAOUTREG(MACH64_GUI_TRAJ_CNTL,
285 (MACH64_DST_X_LEFT_TO_RIGHT |
286 MACH64_DST_Y_TOP_TO_BOTTOM));
288 DMAOUTREG(MACH64_DP_PIX_WIDTH, ((fb_bpp << 0) |
289 (fb_bpp << 4) |
290 (fb_bpp << 8) |
291 (fb_bpp << 16) |
292 (fb_bpp << 28)));
294 DMAOUTREG(MACH64_DP_FRGD_CLR, clear_color);
295 DMAOUTREG(MACH64_DP_WRITE_MASK, ctx->dp_write_mask);
296 DMAOUTREG(MACH64_DP_MIX, (MACH64_BKGD_MIX_D |
297 MACH64_FRGD_MIX_S));
298 DMAOUTREG(MACH64_DP_SRC, (MACH64_BKGD_SRC_FRGD_CLR |
299 MACH64_FRGD_SRC_FRGD_CLR |
300 MACH64_MONO_SRC_ONE));
304 if (flags & MACH64_FRONT) {
306 DMAOUTREG(MACH64_DST_OFF_PITCH,
307 dev_priv->front_offset_pitch);
308 DMAOUTREG(MACH64_DST_X_Y, (y << 16) | x);
309 DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w);
313 if (flags & MACH64_BACK) {
315 DMAOUTREG(MACH64_DST_OFF_PITCH,
316 dev_priv->back_offset_pitch);
317 DMAOUTREG(MACH64_DST_X_Y, (y << 16) | x);
318 DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w);
322 if (flags & MACH64_DEPTH) {
323 /* Setup for depth buffer clear
325 DMAOUTREG(MACH64_Z_CNTL, 0);
326 DMAOUTREG(MACH64_SCALE_3D_CNTL, 0);
328 DMAOUTREG(MACH64_SC_LEFT_RIGHT, ctx->sc_left_right);
329 DMAOUTREG(MACH64_SC_TOP_BOTTOM, ctx->sc_top_bottom);
331 DMAOUTREG(MACH64_CLR_CMP_CNTL, 0);
332 DMAOUTREG(MACH64_GUI_TRAJ_CNTL,
333 (MACH64_DST_X_LEFT_TO_RIGHT |
334 MACH64_DST_Y_TOP_TO_BOTTOM));
336 DMAOUTREG(MACH64_DP_PIX_WIDTH, ((depth_bpp << 0) |
337 (depth_bpp << 4) |
338 (depth_bpp << 8) |
339 (depth_bpp << 16) |
340 (depth_bpp << 28)));
342 DMAOUTREG(MACH64_DP_FRGD_CLR, clear_depth);
343 DMAOUTREG(MACH64_DP_WRITE_MASK, 0xffffffff);
344 DMAOUTREG(MACH64_DP_MIX, (MACH64_BKGD_MIX_D |
345 MACH64_FRGD_MIX_S));
346 DMAOUTREG(MACH64_DP_SRC, (MACH64_BKGD_SRC_FRGD_CLR |
347 MACH64_FRGD_SRC_FRGD_CLR |
348 MACH64_MONO_SRC_ONE));
350 DMAOUTREG(MACH64_DST_OFF_PITCH,
351 dev_priv->depth_offset_pitch);
352 DMAOUTREG(MACH64_DST_X_Y, (y << 16) | x);
353 DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w);
357 DMAADVANCE(dev_priv, 1);
359 return 0;
362 static int mach64_dma_dispatch_swap(struct drm_device * dev,
363 struct drm_file *file_priv)
365 drm_mach64_private_t *dev_priv = dev->dev_private;
366 drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
367 int nbox = sarea_priv->nbox;
368 struct drm_clip_rect *pbox = sarea_priv->boxes;
369 u32 fb_bpp;
370 int i;
371 DMALOCALS;
373 DRM_DEBUG("\n");
375 switch (dev_priv->fb_bpp) {
376 case 16:
377 fb_bpp = MACH64_DATATYPE_RGB565;
378 break;
379 case 32:
380 default:
381 fb_bpp = MACH64_DATATYPE_ARGB8888;
382 break;
385 if (!nbox)
386 return 0;
388 DMAGETPTR(file_priv, dev_priv, 13 + nbox * 4); /* returns on failure to get buffer */
390 DMAOUTREG(MACH64_Z_CNTL, 0);
391 DMAOUTREG(MACH64_SCALE_3D_CNTL, 0);
393 DMAOUTREG(MACH64_SC_LEFT_RIGHT, 0 | (8191 << 16)); /* no scissor */
394 DMAOUTREG(MACH64_SC_TOP_BOTTOM, 0 | (16383 << 16));
396 DMAOUTREG(MACH64_CLR_CMP_CNTL, 0);
397 DMAOUTREG(MACH64_GUI_TRAJ_CNTL, (MACH64_DST_X_LEFT_TO_RIGHT |
398 MACH64_DST_Y_TOP_TO_BOTTOM));
400 DMAOUTREG(MACH64_DP_PIX_WIDTH, ((fb_bpp << 0) |
401 (fb_bpp << 4) |
402 (fb_bpp << 8) |
403 (fb_bpp << 16) | (fb_bpp << 28)));
405 DMAOUTREG(MACH64_DP_WRITE_MASK, 0xffffffff);
406 DMAOUTREG(MACH64_DP_MIX, (MACH64_BKGD_MIX_D | MACH64_FRGD_MIX_S));
407 DMAOUTREG(MACH64_DP_SRC, (MACH64_BKGD_SRC_BKGD_CLR |
408 MACH64_FRGD_SRC_BLIT | MACH64_MONO_SRC_ONE));
410 DMAOUTREG(MACH64_SRC_OFF_PITCH, dev_priv->back_offset_pitch);
411 DMAOUTREG(MACH64_DST_OFF_PITCH, dev_priv->front_offset_pitch);
413 for (i = 0; i < nbox; i++) {
414 int x = pbox[i].x1;
415 int y = pbox[i].y1;
416 int w = pbox[i].x2 - x;
417 int h = pbox[i].y2 - y;
419 DRM_DEBUG("dispatch swap %d,%d-%d,%d\n",
420 pbox[i].x1, pbox[i].y1, pbox[i].x2, pbox[i].y2);
422 DMAOUTREG(MACH64_SRC_WIDTH1, w);
423 DMAOUTREG(MACH64_SRC_Y_X, (x << 16) | y);
424 DMAOUTREG(MACH64_DST_Y_X, (x << 16) | y);
425 DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w);
429 DMAADVANCE(dev_priv, 1);
431 if (dev_priv->driver_mode == MACH64_MODE_DMA_ASYNC) {
432 for (i = 0; i < MACH64_MAX_QUEUED_FRAMES - 1; i++) {
433 dev_priv->frame_ofs[i] = dev_priv->frame_ofs[i + 1];
435 dev_priv->frame_ofs[i] = GETRINGOFFSET();
437 dev_priv->sarea_priv->frames_queued++;
440 return 0;
443 static int mach64_do_get_frames_queued(drm_mach64_private_t * dev_priv)
445 drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
446 drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
447 int i, start;
448 u32 head, tail, ofs;
450 DRM_DEBUG("\n");
452 if (sarea_priv->frames_queued == 0)
453 return 0;
455 tail = ring->tail;
456 mach64_ring_tick(dev_priv, ring);
457 head = ring->head;
459 start = (MACH64_MAX_QUEUED_FRAMES -
460 DRM_MIN(MACH64_MAX_QUEUED_FRAMES, sarea_priv->frames_queued));
462 if (head == tail) {
463 sarea_priv->frames_queued = 0;
464 for (i = start; i < MACH64_MAX_QUEUED_FRAMES; i++) {
465 dev_priv->frame_ofs[i] = ~0;
467 return 0;
470 for (i = start; i < MACH64_MAX_QUEUED_FRAMES; i++) {
471 ofs = dev_priv->frame_ofs[i];
472 DRM_DEBUG("frame_ofs[%d] ofs: %d\n", i, ofs);
473 if (ofs == ~0 ||
474 (head < tail && (ofs < head || ofs >= tail)) ||
475 (head > tail && (ofs < head && ofs >= tail))) {
476 sarea_priv->frames_queued =
477 (MACH64_MAX_QUEUED_FRAMES - 1) - i;
478 dev_priv->frame_ofs[i] = ~0;
482 return sarea_priv->frames_queued;
485 /* Copy and verify a client submited buffer.
486 * FIXME: Make an assembly optimized version
488 static __inline__ int copy_from_user_vertex(u32 *to,
489 const u32 __user *ufrom,
490 unsigned long bytes)
492 unsigned long n = bytes; /* dwords remaining in buffer */
493 u32 *from, *orig_from;
495 from = drm_alloc(bytes, DRM_MEM_DRIVER);
496 if (from == NULL)
497 return -ENOMEM;
499 if (DRM_COPY_FROM_USER(from, ufrom, bytes)) {
500 drm_free(from, bytes, DRM_MEM_DRIVER);
501 return -EFAULT;
503 orig_from = from; /* we'll be modifying the "from" ptr, so save it */
505 n >>= 2;
507 while (n > 1) {
508 u32 data, reg, count;
510 data = *from++;
512 n--;
514 reg = le32_to_cpu(data);
515 count = (reg >> 16) + 1;
516 if (count <= n) {
517 n -= count;
518 reg &= 0xffff;
520 /* This is an exact match of Mach64's Setup Engine registers,
521 * excluding SETUP_CNTL (1_C1).
523 if ((reg >= 0x0190 && reg < 0x01c1) ||
524 (reg >= 0x01ca && reg <= 0x01cf)) {
525 *to++ = data;
526 memcpy(to, from, count << 2);
527 from += count;
528 to += count;
529 } else {
530 DRM_ERROR("Got bad command: 0x%04x\n", reg);
531 drm_free(orig_from, bytes, DRM_MEM_DRIVER);
532 return -EACCES;
534 } else {
535 DRM_ERROR
536 ("Got bad command count(=%u) dwords remaining=%lu\n",
537 count, n);
538 drm_free(orig_from, bytes, DRM_MEM_DRIVER);
539 return -EINVAL;
543 drm_free(orig_from, bytes, DRM_MEM_DRIVER);
544 if (n == 0)
545 return 0;
546 else {
547 DRM_ERROR("Bad buf->used(=%lu)\n", bytes);
548 return -EINVAL;
552 static int mach64_dma_dispatch_vertex(struct drm_device * dev,
553 struct drm_file *file_priv,
554 drm_mach64_vertex_t * vertex)
556 drm_mach64_private_t *dev_priv = dev->dev_private;
557 drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
558 struct drm_buf *copy_buf;
559 void *buf = vertex->buf;
560 unsigned long used = vertex->used;
561 int ret = 0;
562 int i = 0;
563 int done = 0;
564 int verify_ret = 0;
565 DMALOCALS;
567 DRM_DEBUG("buf=%p used=%lu nbox=%d\n",
568 buf, used, sarea_priv->nbox);
570 if (!used)
571 goto _vertex_done;
573 copy_buf = mach64_freelist_get(dev_priv);
574 if (copy_buf == NULL) {
575 DRM_ERROR("couldn't get buffer\n");
576 return -EAGAIN;
579 /* Mach64's vertex data is actually register writes. To avoid security
580 * compromises these register writes have to be verified and copied from
581 * user space into a private DMA buffer.
583 verify_ret = copy_from_user_vertex(GETBUFPTR(copy_buf), buf, used);
585 if (verify_ret != 0) {
586 mach64_freelist_put(dev_priv, copy_buf);
587 goto _vertex_done;
590 copy_buf->used = used;
592 DMASETPTR(copy_buf);
594 if (sarea_priv->dirty & ~MACH64_UPLOAD_CLIPRECTS) {
595 ret = mach64_emit_state(file_priv, dev_priv);
596 if (ret < 0)
597 return ret;
600 do {
601 /* Emit the next cliprect */
602 if (i < sarea_priv->nbox) {
603 ret = mach64_emit_cliprect(file_priv, dev_priv,
604 &sarea_priv->boxes[i]);
605 if (ret < 0) {
606 /* failed to get buffer */
607 return ret;
608 } else if (ret != 0) {
609 /* null intersection with scissor */
610 continue;
613 if ((i >= sarea_priv->nbox - 1))
614 done = 1;
616 /* Add the buffer to the DMA queue */
617 DMAADVANCE(dev_priv, done);
619 } while (++i < sarea_priv->nbox);
621 if (!done) {
622 if (copy_buf->pending) {
623 DMADISCARDBUF();
624 } else {
625 /* This buffer wasn't used (no cliprects), so place it
626 * back on the free list
628 mach64_freelist_put(dev_priv, copy_buf);
632 _vertex_done:
633 sarea_priv->dirty &= ~MACH64_UPLOAD_CLIPRECTS;
634 sarea_priv->nbox = 0;
636 return verify_ret;
639 static __inline__ int copy_from_user_blit(u32 *to,
640 const u32 __user *ufrom,
641 unsigned long bytes)
643 to = (u32 *)((char *)to + MACH64_HOSTDATA_BLIT_OFFSET);
645 if (DRM_COPY_FROM_USER(to, ufrom, bytes)) {
646 return -EFAULT;
649 return 0;
652 static int mach64_dma_dispatch_blit(struct drm_device * dev,
653 struct drm_file *file_priv,
654 drm_mach64_blit_t * blit)
656 drm_mach64_private_t *dev_priv = dev->dev_private;
657 int dword_shift, dwords;
658 unsigned long used;
659 struct drm_buf *copy_buf;
660 int verify_ret = 0;
661 DMALOCALS;
663 /* The compiler won't optimize away a division by a variable,
664 * even if the only legal values are powers of two. Thus, we'll
665 * use a shift instead.
667 switch (blit->format) {
668 case MACH64_DATATYPE_ARGB8888:
669 dword_shift = 0;
670 break;
671 case MACH64_DATATYPE_ARGB1555:
672 case MACH64_DATATYPE_RGB565:
673 case MACH64_DATATYPE_VYUY422:
674 case MACH64_DATATYPE_YVYU422:
675 case MACH64_DATATYPE_ARGB4444:
676 dword_shift = 1;
677 break;
678 case MACH64_DATATYPE_CI8:
679 case MACH64_DATATYPE_RGB8:
680 dword_shift = 2;
681 break;
682 default:
683 DRM_ERROR("invalid blit format %d\n", blit->format);
684 return -EINVAL;
687 /* Set buf->used to the bytes of blit data based on the blit dimensions
688 * and verify the size. When the setup is emitted to the buffer with
689 * the DMA* macros below, buf->used is incremented to include the bytes
690 * used for setup as well as the blit data.
692 dwords = (blit->width * blit->height) >> dword_shift;
693 used = dwords << 2;
694 if (used <= 0 ||
695 used > MACH64_BUFFER_SIZE - MACH64_HOSTDATA_BLIT_OFFSET) {
696 DRM_ERROR("Invalid blit size: %lu bytes\n", used);
697 return -EINVAL;
700 copy_buf = mach64_freelist_get(dev_priv);
701 if (copy_buf == NULL) {
702 DRM_ERROR("couldn't get buffer\n");
703 return -EAGAIN;
706 /* Copy the blit data from userspace.
708 * XXX: This is overkill. The most efficient solution would be having
709 * two sets of buffers (one set private for vertex data, the other set
710 * client-writable for blits). However that would bring more complexity
711 * and would break backward compatability. The solution currently
712 * implemented is keeping all buffers private, allowing to secure the
713 * driver, without increasing complexity at the expense of some speed
714 * transfering data.
716 verify_ret = copy_from_user_blit(GETBUFPTR(copy_buf), blit->buf, used);
718 if (verify_ret != 0) {
719 mach64_freelist_put(dev_priv, copy_buf);
720 goto _blit_done;
723 copy_buf->used = used;
725 /* FIXME: Use a last buffer flag and reduce the state emitted for subsequent,
726 * continuation buffers?
729 /* Blit via BM_HOSTDATA (gui-master) - like HOST_DATA[0-15], but doesn't require
730 * a register command every 16 dwords. State setup is added at the start of the
731 * buffer -- the client leaves space for this based on MACH64_HOSTDATA_BLIT_OFFSET
733 DMASETPTR(copy_buf);
735 DMAOUTREG(MACH64_Z_CNTL, 0);
736 DMAOUTREG(MACH64_SCALE_3D_CNTL, 0);
738 DMAOUTREG(MACH64_SC_LEFT_RIGHT, 0 | (8191 << 16)); /* no scissor */
739 DMAOUTREG(MACH64_SC_TOP_BOTTOM, 0 | (16383 << 16));
741 DMAOUTREG(MACH64_CLR_CMP_CNTL, 0); /* disable */
742 DMAOUTREG(MACH64_GUI_TRAJ_CNTL,
743 MACH64_DST_X_LEFT_TO_RIGHT | MACH64_DST_Y_TOP_TO_BOTTOM);
745 DMAOUTREG(MACH64_DP_PIX_WIDTH, (blit->format << 0) /* dst pix width */
746 |(blit->format << 4) /* composite pix width */
747 |(blit->format << 8) /* src pix width */
748 |(blit->format << 16) /* host data pix width */
749 |(blit->format << 28) /* scaler/3D pix width */
752 DMAOUTREG(MACH64_DP_WRITE_MASK, 0xffffffff); /* enable all planes */
753 DMAOUTREG(MACH64_DP_MIX, MACH64_BKGD_MIX_D | MACH64_FRGD_MIX_S);
754 DMAOUTREG(MACH64_DP_SRC,
755 MACH64_BKGD_SRC_BKGD_CLR
756 | MACH64_FRGD_SRC_HOST | MACH64_MONO_SRC_ONE);
758 DMAOUTREG(MACH64_DST_OFF_PITCH,
759 (blit->pitch << 22) | (blit->offset >> 3));
760 DMAOUTREG(MACH64_DST_X_Y, (blit->y << 16) | blit->x);
761 DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (blit->height << 16) | blit->width);
763 DRM_DEBUG("%lu bytes\n", used);
765 /* Add the buffer to the queue */
766 DMAADVANCEHOSTDATA(dev_priv);
768 _blit_done:
769 return verify_ret;
772 /* ================================================================
773 * IOCTL functions
776 int mach64_dma_clear(struct drm_device *dev, void *data,
777 struct drm_file *file_priv)
779 drm_mach64_private_t *dev_priv = dev->dev_private;
780 drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
781 drm_mach64_clear_t *clear = data;
782 int ret;
784 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
786 LOCK_TEST_WITH_RETURN(dev, file_priv);
788 if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS)
789 sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS;
791 ret = mach64_dma_dispatch_clear(dev, file_priv, clear->flags,
792 clear->x, clear->y, clear->w, clear->h,
793 clear->clear_color,
794 clear->clear_depth);
796 /* Make sure we restore the 3D state next time.
798 sarea_priv->dirty |= (MACH64_UPLOAD_CONTEXT | MACH64_UPLOAD_MISC);
799 return ret;
802 int mach64_dma_swap(struct drm_device *dev, void *data,
803 struct drm_file *file_priv)
805 drm_mach64_private_t *dev_priv = dev->dev_private;
806 drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
807 int ret;
809 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
811 LOCK_TEST_WITH_RETURN(dev, file_priv);
813 if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS)
814 sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS;
816 ret = mach64_dma_dispatch_swap(dev, file_priv);
818 /* Make sure we restore the 3D state next time.
820 sarea_priv->dirty |= (MACH64_UPLOAD_CONTEXT | MACH64_UPLOAD_MISC);
821 return ret;
824 int mach64_dma_vertex(struct drm_device *dev, void *data,
825 struct drm_file *file_priv)
827 drm_mach64_private_t *dev_priv = dev->dev_private;
828 drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
829 drm_mach64_vertex_t *vertex = data;
831 LOCK_TEST_WITH_RETURN(dev, file_priv);
833 if (!dev_priv) {
834 DRM_ERROR("called with no initialization\n");
835 return -EINVAL;
838 DRM_DEBUG("pid=%d buf=%p used=%lu discard=%d\n",
839 DRM_CURRENTPID,
840 vertex->buf, vertex->used, vertex->discard);
842 if (vertex->prim < 0 || vertex->prim > MACH64_PRIM_POLYGON) {
843 DRM_ERROR("buffer prim %d\n", vertex->prim);
844 return -EINVAL;
847 if (vertex->used > MACH64_BUFFER_SIZE || (vertex->used & 3) != 0) {
848 DRM_ERROR("Invalid vertex buffer size: %lu bytes\n",
849 vertex->used);
850 return -EINVAL;
853 if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS)
854 sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS;
856 return mach64_dma_dispatch_vertex(dev, file_priv, vertex);
859 int mach64_dma_blit(struct drm_device *dev, void *data,
860 struct drm_file *file_priv)
862 drm_mach64_private_t *dev_priv = dev->dev_private;
863 drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
864 drm_mach64_blit_t *blit = data;
865 int ret;
867 LOCK_TEST_WITH_RETURN(dev, file_priv);
869 ret = mach64_dma_dispatch_blit(dev, file_priv, blit);
871 /* Make sure we restore the 3D state next time.
873 sarea_priv->dirty |= (MACH64_UPLOAD_CONTEXT |
874 MACH64_UPLOAD_MISC | MACH64_UPLOAD_CLIPRECTS);
876 return ret;
879 int mach64_get_param(struct drm_device *dev, void *data,
880 struct drm_file *file_priv)
882 drm_mach64_private_t *dev_priv = dev->dev_private;
883 drm_mach64_getparam_t *param = data;
884 int value;
886 DRM_DEBUG("\n");
888 if (!dev_priv) {
889 DRM_ERROR("called with no initialization\n");
890 return -EINVAL;
893 switch (param->param) {
894 case MACH64_PARAM_FRAMES_QUEUED:
895 /* Needs lock since it calls mach64_ring_tick() */
896 LOCK_TEST_WITH_RETURN(dev, file_priv);
897 value = mach64_do_get_frames_queued(dev_priv);
898 break;
899 case MACH64_PARAM_IRQ_NR:
900 value = dev->irq;
901 break;
902 default:
903 return -EINVAL;
906 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
907 DRM_ERROR("copy_to_user\n");
908 return -EFAULT;
911 return 0;