Update drm/radeon to Linux 4.7.10 as much as possible...
[dragonfly.git] / sys / dev / drm / radeon / evergreen_cs.c
blobb930ab488e9831989d7fc3a1a13d7346e30c44ce
1 /*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
28 #include <drm/drmP.h>
29 #include "radeon.h"
30 #include "radeon_asic.h"
31 #include "evergreend.h"
32 #include "evergreen_reg_safe.h"
33 #include "cayman_reg_safe.h"
35 #define MAX(a,b) (((a)>(b))?(a):(b))
36 #define MIN(a,b) (((a)<(b))?(a):(b))
38 #define REG_SAFE_BM_SIZE ARRAY_SIZE(evergreen_reg_safe_bm)
40 struct evergreen_cs_track {
41 u32 group_size;
42 u32 nbanks;
43 u32 npipes;
44 u32 row_size;
45 /* value we track */
46 u32 nsamples; /* unused */
47 struct radeon_bo *cb_color_bo[12];
48 u32 cb_color_bo_offset[12];
49 struct radeon_bo *cb_color_fmask_bo[8]; /* unused */
50 struct radeon_bo *cb_color_cmask_bo[8]; /* unused */
51 u32 cb_color_info[12];
52 u32 cb_color_view[12];
53 u32 cb_color_pitch[12];
54 u32 cb_color_slice[12];
55 u32 cb_color_slice_idx[12];
56 u32 cb_color_attrib[12];
57 u32 cb_color_cmask_slice[8];/* unused */
58 u32 cb_color_fmask_slice[8];/* unused */
59 u32 cb_target_mask;
60 u32 cb_shader_mask; /* unused */
61 u32 vgt_strmout_config;
62 u32 vgt_strmout_buffer_config;
63 struct radeon_bo *vgt_strmout_bo[4];
64 u32 vgt_strmout_bo_offset[4];
65 u32 vgt_strmout_size[4];
66 u32 db_depth_control;
67 u32 db_depth_view;
68 u32 db_depth_slice;
69 u32 db_depth_size;
70 u32 db_z_info;
71 u32 db_z_read_offset;
72 u32 db_z_write_offset;
73 struct radeon_bo *db_z_read_bo;
74 struct radeon_bo *db_z_write_bo;
75 u32 db_s_info;
76 u32 db_s_read_offset;
77 u32 db_s_write_offset;
78 struct radeon_bo *db_s_read_bo;
79 struct radeon_bo *db_s_write_bo;
80 bool sx_misc_kill_all_prims;
81 bool cb_dirty;
82 bool db_dirty;
83 bool streamout_dirty;
84 u32 htile_offset;
85 u32 htile_surface;
86 struct radeon_bo *htile_bo;
87 unsigned long indirect_draw_buffer_size;
88 const unsigned *reg_safe_bm;
91 static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
93 if (tiling_flags & RADEON_TILING_MACRO)
94 return ARRAY_2D_TILED_THIN1;
95 else if (tiling_flags & RADEON_TILING_MICRO)
96 return ARRAY_1D_TILED_THIN1;
97 else
98 return ARRAY_LINEAR_GENERAL;
101 static u32 evergreen_cs_get_num_banks(u32 nbanks)
103 switch (nbanks) {
104 case 2:
105 return ADDR_SURF_2_BANK;
106 case 4:
107 return ADDR_SURF_4_BANK;
108 case 8:
109 default:
110 return ADDR_SURF_8_BANK;
111 case 16:
112 return ADDR_SURF_16_BANK;
116 static void evergreen_cs_track_init(struct evergreen_cs_track *track)
118 int i;
120 for (i = 0; i < 8; i++) {
121 track->cb_color_fmask_bo[i] = NULL;
122 track->cb_color_cmask_bo[i] = NULL;
123 track->cb_color_cmask_slice[i] = 0;
124 track->cb_color_fmask_slice[i] = 0;
127 for (i = 0; i < 12; i++) {
128 track->cb_color_bo[i] = NULL;
129 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
130 track->cb_color_info[i] = 0;
131 track->cb_color_view[i] = 0xFFFFFFFF;
132 track->cb_color_pitch[i] = 0;
133 track->cb_color_slice[i] = 0xfffffff;
134 track->cb_color_slice_idx[i] = 0;
136 track->cb_target_mask = 0xFFFFFFFF;
137 track->cb_shader_mask = 0xFFFFFFFF;
138 track->cb_dirty = true;
140 track->db_depth_slice = 0xffffffff;
141 track->db_depth_view = 0xFFFFC000;
142 track->db_depth_size = 0xFFFFFFFF;
143 track->db_depth_control = 0xFFFFFFFF;
144 track->db_z_info = 0xFFFFFFFF;
145 track->db_z_read_offset = 0xFFFFFFFF;
146 track->db_z_write_offset = 0xFFFFFFFF;
147 track->db_z_read_bo = NULL;
148 track->db_z_write_bo = NULL;
149 track->db_s_info = 0xFFFFFFFF;
150 track->db_s_read_offset = 0xFFFFFFFF;
151 track->db_s_write_offset = 0xFFFFFFFF;
152 track->db_s_read_bo = NULL;
153 track->db_s_write_bo = NULL;
154 track->db_dirty = true;
155 track->htile_bo = NULL;
156 track->htile_offset = 0xFFFFFFFF;
157 track->htile_surface = 0;
159 for (i = 0; i < 4; i++) {
160 track->vgt_strmout_size[i] = 0;
161 track->vgt_strmout_bo[i] = NULL;
162 track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
164 track->streamout_dirty = true;
165 track->sx_misc_kill_all_prims = false;
168 struct eg_surface {
169 /* value gathered from cs */
170 unsigned nbx;
171 unsigned nby;
172 unsigned format;
173 unsigned mode;
174 unsigned nbanks;
175 unsigned bankw;
176 unsigned bankh;
177 unsigned tsplit;
178 unsigned mtilea;
179 unsigned nsamples;
180 /* output value */
181 unsigned bpe;
182 unsigned layer_size;
183 unsigned palign;
184 unsigned halign;
185 unsigned long base_align;
188 static int evergreen_surface_check_linear(struct radeon_cs_parser *p,
189 struct eg_surface *surf,
190 const char *prefix)
192 surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
193 surf->base_align = surf->bpe;
194 surf->palign = 1;
195 surf->halign = 1;
196 return 0;
199 static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p,
200 struct eg_surface *surf,
201 const char *prefix)
203 struct evergreen_cs_track *track = p->track;
204 unsigned palign;
206 palign = MAX(64, track->group_size / surf->bpe);
207 surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
208 surf->base_align = track->group_size;
209 surf->palign = palign;
210 surf->halign = 1;
211 if (surf->nbx & (palign - 1)) {
212 if (prefix) {
213 dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
214 __func__, __LINE__, prefix, surf->nbx, palign);
216 return -EINVAL;
218 return 0;
221 static int evergreen_surface_check_1d(struct radeon_cs_parser *p,
222 struct eg_surface *surf,
223 const char *prefix)
225 struct evergreen_cs_track *track = p->track;
226 unsigned palign;
228 palign = track->group_size / (8 * surf->bpe * surf->nsamples);
229 palign = MAX(8, palign);
230 surf->layer_size = surf->nbx * surf->nby * surf->bpe;
231 surf->base_align = track->group_size;
232 surf->palign = palign;
233 surf->halign = 8;
234 if ((surf->nbx & (palign - 1))) {
235 if (prefix) {
236 dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d (%d %d %d)\n",
237 __func__, __LINE__, prefix, surf->nbx, palign,
238 track->group_size, surf->bpe, surf->nsamples);
240 return -EINVAL;
242 if ((surf->nby & (8 - 1))) {
243 if (prefix) {
244 dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with 8\n",
245 __func__, __LINE__, prefix, surf->nby);
247 return -EINVAL;
249 return 0;
252 static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
253 struct eg_surface *surf,
254 const char *prefix)
256 struct evergreen_cs_track *track = p->track;
257 unsigned palign, halign, tileb, slice_pt;
258 unsigned mtile_pr, mtile_ps, mtileb;
260 tileb = 64 * surf->bpe * surf->nsamples;
261 slice_pt = 1;
262 if (tileb > surf->tsplit) {
263 slice_pt = tileb / surf->tsplit;
265 tileb = tileb / slice_pt;
266 /* macro tile width & height */
267 palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
268 halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
269 mtileb = (palign / 8) * (halign / 8) * tileb;
270 mtile_pr = surf->nbx / palign;
271 mtile_ps = (mtile_pr * surf->nby) / halign;
272 surf->layer_size = mtile_ps * mtileb * slice_pt;
273 surf->base_align = (palign / 8) * (halign / 8) * tileb;
274 surf->palign = palign;
275 surf->halign = halign;
277 if ((surf->nbx & (palign - 1))) {
278 if (prefix) {
279 dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
280 __func__, __LINE__, prefix, surf->nbx, palign);
282 return -EINVAL;
284 if ((surf->nby & (halign - 1))) {
285 if (prefix) {
286 dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with %d\n",
287 __func__, __LINE__, prefix, surf->nby, halign);
289 return -EINVAL;
292 return 0;
295 static int evergreen_surface_check(struct radeon_cs_parser *p,
296 struct eg_surface *surf,
297 const char *prefix)
299 /* some common value computed here */
300 surf->bpe = r600_fmt_get_blocksize(surf->format);
302 switch (surf->mode) {
303 case ARRAY_LINEAR_GENERAL:
304 return evergreen_surface_check_linear(p, surf, prefix);
305 case ARRAY_LINEAR_ALIGNED:
306 return evergreen_surface_check_linear_aligned(p, surf, prefix);
307 case ARRAY_1D_TILED_THIN1:
308 return evergreen_surface_check_1d(p, surf, prefix);
309 case ARRAY_2D_TILED_THIN1:
310 return evergreen_surface_check_2d(p, surf, prefix);
311 default:
312 dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
313 __func__, __LINE__, prefix, surf->mode);
314 return -EINVAL;
316 return -EINVAL;
319 static int evergreen_surface_value_conv_check(struct radeon_cs_parser *p,
320 struct eg_surface *surf,
321 const char *prefix)
323 switch (surf->mode) {
324 case ARRAY_2D_TILED_THIN1:
325 break;
326 case ARRAY_LINEAR_GENERAL:
327 case ARRAY_LINEAR_ALIGNED:
328 case ARRAY_1D_TILED_THIN1:
329 return 0;
330 default:
331 dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
332 __func__, __LINE__, prefix, surf->mode);
333 return -EINVAL;
336 switch (surf->nbanks) {
337 case 0: surf->nbanks = 2; break;
338 case 1: surf->nbanks = 4; break;
339 case 2: surf->nbanks = 8; break;
340 case 3: surf->nbanks = 16; break;
341 default:
342 dev_warn(p->dev, "%s:%d %s invalid number of banks %d\n",
343 __func__, __LINE__, prefix, surf->nbanks);
344 return -EINVAL;
346 switch (surf->bankw) {
347 case 0: surf->bankw = 1; break;
348 case 1: surf->bankw = 2; break;
349 case 2: surf->bankw = 4; break;
350 case 3: surf->bankw = 8; break;
351 default:
352 dev_warn(p->dev, "%s:%d %s invalid bankw %d\n",
353 __func__, __LINE__, prefix, surf->bankw);
354 return -EINVAL;
356 switch (surf->bankh) {
357 case 0: surf->bankh = 1; break;
358 case 1: surf->bankh = 2; break;
359 case 2: surf->bankh = 4; break;
360 case 3: surf->bankh = 8; break;
361 default:
362 dev_warn(p->dev, "%s:%d %s invalid bankh %d\n",
363 __func__, __LINE__, prefix, surf->bankh);
364 return -EINVAL;
366 switch (surf->mtilea) {
367 case 0: surf->mtilea = 1; break;
368 case 1: surf->mtilea = 2; break;
369 case 2: surf->mtilea = 4; break;
370 case 3: surf->mtilea = 8; break;
371 default:
372 dev_warn(p->dev, "%s:%d %s invalid macro tile aspect %d\n",
373 __func__, __LINE__, prefix, surf->mtilea);
374 return -EINVAL;
376 switch (surf->tsplit) {
377 case 0: surf->tsplit = 64; break;
378 case 1: surf->tsplit = 128; break;
379 case 2: surf->tsplit = 256; break;
380 case 3: surf->tsplit = 512; break;
381 case 4: surf->tsplit = 1024; break;
382 case 5: surf->tsplit = 2048; break;
383 case 6: surf->tsplit = 4096; break;
384 default:
385 dev_warn(p->dev, "%s:%d %s invalid tile split %d\n",
386 __func__, __LINE__, prefix, surf->tsplit);
387 return -EINVAL;
389 return 0;
392 static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned id)
394 struct evergreen_cs_track *track = p->track;
395 struct eg_surface surf;
396 unsigned pitch, slice, mslice;
397 unsigned long offset;
398 int r;
400 mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1;
401 pitch = track->cb_color_pitch[id];
402 slice = track->cb_color_slice[id];
403 surf.nbx = (pitch + 1) * 8;
404 surf.nby = ((slice + 1) * 64) / surf.nbx;
405 surf.mode = G_028C70_ARRAY_MODE(track->cb_color_info[id]);
406 surf.format = G_028C70_FORMAT(track->cb_color_info[id]);
407 surf.tsplit = G_028C74_TILE_SPLIT(track->cb_color_attrib[id]);
408 surf.nbanks = G_028C74_NUM_BANKS(track->cb_color_attrib[id]);
409 surf.bankw = G_028C74_BANK_WIDTH(track->cb_color_attrib[id]);
410 surf.bankh = G_028C74_BANK_HEIGHT(track->cb_color_attrib[id]);
411 surf.mtilea = G_028C74_MACRO_TILE_ASPECT(track->cb_color_attrib[id]);
412 surf.nsamples = 1;
414 if (!r600_fmt_is_valid_color(surf.format)) {
415 dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08x)\n",
416 __func__, __LINE__, surf.format,
417 id, track->cb_color_info[id]);
418 return -EINVAL;
421 r = evergreen_surface_value_conv_check(p, &surf, "cb");
422 if (r) {
423 return r;
426 r = evergreen_surface_check(p, &surf, "cb");
427 if (r) {
428 dev_warn(p->dev, "%s:%d cb[%d] invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
429 __func__, __LINE__, id, track->cb_color_pitch[id],
430 track->cb_color_slice[id], track->cb_color_attrib[id],
431 track->cb_color_info[id]);
432 return r;
435 offset = track->cb_color_bo_offset[id] << 8;
436 if (offset & (surf.base_align - 1)) {
437 dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n",
438 __func__, __LINE__, id, offset, surf.base_align);
439 return -EINVAL;
442 offset += surf.layer_size * mslice;
443 if (offset > radeon_bo_size(track->cb_color_bo[id])) {
444 /* old ddx are broken they allocate bo with w*h*bpp but
445 * program slice with ALIGN(h, 8), catch this and patch
446 * command stream.
448 if (!surf.mode) {
449 uint32_t *ib = p->ib.ptr;
450 unsigned long tmp, nby, bsize, size, min = 0;
452 /* find the height the ddx wants */
453 if (surf.nby > 8) {
454 min = surf.nby - 8;
456 bsize = radeon_bo_size(track->cb_color_bo[id]);
457 tmp = track->cb_color_bo_offset[id] << 8;
458 for (nby = surf.nby; nby > min; nby--) {
459 size = nby * surf.nbx * surf.bpe * surf.nsamples;
460 if ((tmp + size * mslice) <= bsize) {
461 break;
464 if (nby > min) {
465 surf.nby = nby;
466 slice = ((nby * surf.nbx) / 64) - 1;
467 if (!evergreen_surface_check(p, &surf, "cb")) {
468 /* check if this one works */
469 tmp += surf.layer_size * mslice;
470 if (tmp <= bsize) {
471 ib[track->cb_color_slice_idx[id]] = slice;
472 goto old_ddx_ok;
477 dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
478 "offset %d, max layer %d, bo size %ld, slice %d)\n",
479 __func__, __LINE__, id, surf.layer_size,
480 track->cb_color_bo_offset[id] << 8, mslice,
481 radeon_bo_size(track->cb_color_bo[id]), slice);
482 dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
483 __func__, __LINE__, surf.nbx, surf.nby,
484 surf.mode, surf.bpe, surf.nsamples,
485 surf.bankw, surf.bankh,
486 surf.tsplit, surf.mtilea);
487 return -EINVAL;
489 old_ddx_ok:
491 return 0;
494 static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
495 unsigned nbx, unsigned nby)
497 struct evergreen_cs_track *track = p->track;
498 unsigned long size;
500 if (track->htile_bo == NULL) {
501 dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
502 __func__, __LINE__, track->db_z_info);
503 return -EINVAL;
506 if (G_028ABC_LINEAR(track->htile_surface)) {
507 /* pitch must be 16 htiles aligned == 16 * 8 pixel aligned */
508 nbx = round_up(nbx, 16 * 8);
509 /* height is npipes htiles aligned == npipes * 8 pixel aligned */
510 nby = round_up(nby, track->npipes * 8);
511 } else {
512 /* always assume 8x8 htile */
513 /* align is htile align * 8, htile align vary according to
514 * number of pipe and tile width and nby
516 switch (track->npipes) {
517 case 8:
518 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
519 nbx = round_up(nbx, 64 * 8);
520 nby = round_up(nby, 64 * 8);
521 break;
522 case 4:
523 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
524 nbx = round_up(nbx, 64 * 8);
525 nby = round_up(nby, 32 * 8);
526 break;
527 case 2:
528 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
529 nbx = round_up(nbx, 32 * 8);
530 nby = round_up(nby, 32 * 8);
531 break;
532 case 1:
533 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
534 nbx = round_up(nbx, 32 * 8);
535 nby = round_up(nby, 16 * 8);
536 break;
537 default:
538 dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
539 __func__, __LINE__, track->npipes);
540 return -EINVAL;
543 /* compute number of htile */
544 nbx = nbx >> 3;
545 nby = nby >> 3;
546 /* size must be aligned on npipes * 2K boundary */
547 size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
548 size += track->htile_offset;
550 if (size > radeon_bo_size(track->htile_bo)) {
551 dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
552 __func__, __LINE__, radeon_bo_size(track->htile_bo),
553 size, nbx, nby);
554 return -EINVAL;
556 return 0;
559 static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
561 struct evergreen_cs_track *track = p->track;
562 struct eg_surface surf;
563 unsigned pitch, slice, mslice;
564 unsigned long offset;
565 int r;
567 mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
568 pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
569 slice = track->db_depth_slice;
570 surf.nbx = (pitch + 1) * 8;
571 surf.nby = ((slice + 1) * 64) / surf.nbx;
572 surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
573 surf.format = G_028044_FORMAT(track->db_s_info);
574 surf.tsplit = G_028044_TILE_SPLIT(track->db_s_info);
575 surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
576 surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
577 surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
578 surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
579 surf.nsamples = 1;
581 if (surf.format != 1) {
582 dev_warn(p->dev, "%s:%d stencil invalid format %d\n",
583 __func__, __LINE__, surf.format);
584 return -EINVAL;
586 /* replace by color format so we can use same code */
587 surf.format = V_028C70_COLOR_8;
589 r = evergreen_surface_value_conv_check(p, &surf, "stencil");
590 if (r) {
591 return r;
594 r = evergreen_surface_check(p, &surf, NULL);
595 if (r) {
596 /* old userspace doesn't compute proper depth/stencil alignment
597 * check that alignment against a bigger byte per elements and
598 * only report if that alignment is wrong too.
600 surf.format = V_028C70_COLOR_8_8_8_8;
601 r = evergreen_surface_check(p, &surf, "stencil");
602 if (r) {
603 dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
604 __func__, __LINE__, track->db_depth_size,
605 track->db_depth_slice, track->db_s_info, track->db_z_info);
607 return r;
610 offset = track->db_s_read_offset << 8;
611 if (offset & (surf.base_align - 1)) {
612 dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
613 __func__, __LINE__, offset, surf.base_align);
614 return -EINVAL;
616 offset += surf.layer_size * mslice;
617 if (offset > radeon_bo_size(track->db_s_read_bo)) {
618 dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, "
619 "offset %ld, max layer %d, bo size %ld)\n",
620 __func__, __LINE__, surf.layer_size,
621 (unsigned long)track->db_s_read_offset << 8, mslice,
622 radeon_bo_size(track->db_s_read_bo));
623 dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
624 __func__, __LINE__, track->db_depth_size,
625 track->db_depth_slice, track->db_s_info, track->db_z_info);
626 return -EINVAL;
629 offset = track->db_s_write_offset << 8;
630 if (offset & (surf.base_align - 1)) {
631 dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
632 __func__, __LINE__, offset, surf.base_align);
633 return -EINVAL;
635 offset += surf.layer_size * mslice;
636 if (offset > radeon_bo_size(track->db_s_write_bo)) {
637 dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, "
638 "offset %ld, max layer %d, bo size %ld)\n",
639 __func__, __LINE__, surf.layer_size,
640 (unsigned long)track->db_s_write_offset << 8, mslice,
641 radeon_bo_size(track->db_s_write_bo));
642 return -EINVAL;
645 /* hyperz */
646 if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
647 r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
648 if (r) {
649 return r;
653 return 0;
656 static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
658 struct evergreen_cs_track *track = p->track;
659 struct eg_surface surf;
660 unsigned pitch, slice, mslice;
661 unsigned long offset;
662 int r;
664 mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
665 pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
666 slice = track->db_depth_slice;
667 surf.nbx = (pitch + 1) * 8;
668 surf.nby = ((slice + 1) * 64) / surf.nbx;
669 surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
670 surf.format = G_028040_FORMAT(track->db_z_info);
671 surf.tsplit = G_028040_TILE_SPLIT(track->db_z_info);
672 surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
673 surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
674 surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
675 surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
676 surf.nsamples = 1;
678 switch (surf.format) {
679 case V_028040_Z_16:
680 surf.format = V_028C70_COLOR_16;
681 break;
682 case V_028040_Z_24:
683 case V_028040_Z_32_FLOAT:
684 surf.format = V_028C70_COLOR_8_8_8_8;
685 break;
686 default:
687 dev_warn(p->dev, "%s:%d depth invalid format %d\n",
688 __func__, __LINE__, surf.format);
689 return -EINVAL;
692 r = evergreen_surface_value_conv_check(p, &surf, "depth");
693 if (r) {
694 dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
695 __func__, __LINE__, track->db_depth_size,
696 track->db_depth_slice, track->db_z_info);
697 return r;
700 r = evergreen_surface_check(p, &surf, "depth");
701 if (r) {
702 dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
703 __func__, __LINE__, track->db_depth_size,
704 track->db_depth_slice, track->db_z_info);
705 return r;
708 offset = track->db_z_read_offset << 8;
709 if (offset & (surf.base_align - 1)) {
710 dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
711 __func__, __LINE__, offset, surf.base_align);
712 return -EINVAL;
714 offset += surf.layer_size * mslice;
715 if (offset > radeon_bo_size(track->db_z_read_bo)) {
716 dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, "
717 "offset %ld, max layer %d, bo size %ld)\n",
718 __func__, __LINE__, surf.layer_size,
719 (unsigned long)track->db_z_read_offset << 8, mslice,
720 radeon_bo_size(track->db_z_read_bo));
721 return -EINVAL;
724 offset = track->db_z_write_offset << 8;
725 if (offset & (surf.base_align - 1)) {
726 dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
727 __func__, __LINE__, offset, surf.base_align);
728 return -EINVAL;
730 offset += surf.layer_size * mslice;
731 if (offset > radeon_bo_size(track->db_z_write_bo)) {
732 dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, "
733 "offset %ld, max layer %d, bo size %ld)\n",
734 __func__, __LINE__, surf.layer_size,
735 (unsigned long)track->db_z_write_offset << 8, mslice,
736 radeon_bo_size(track->db_z_write_bo));
737 return -EINVAL;
740 /* hyperz */
741 if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
742 r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
743 if (r) {
744 return r;
748 return 0;
751 static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
752 struct radeon_bo *texture,
753 struct radeon_bo *mipmap,
754 unsigned idx)
756 struct eg_surface surf;
757 unsigned long toffset, moffset;
758 unsigned dim, llevel, mslice, width, height, depth, i;
759 u32 texdw[8];
760 int r;
762 texdw[0] = radeon_get_ib_value(p, idx + 0);
763 texdw[1] = radeon_get_ib_value(p, idx + 1);
764 texdw[2] = radeon_get_ib_value(p, idx + 2);
765 texdw[3] = radeon_get_ib_value(p, idx + 3);
766 texdw[4] = radeon_get_ib_value(p, idx + 4);
767 texdw[5] = radeon_get_ib_value(p, idx + 5);
768 texdw[6] = radeon_get_ib_value(p, idx + 6);
769 texdw[7] = radeon_get_ib_value(p, idx + 7);
770 dim = G_030000_DIM(texdw[0]);
771 llevel = G_030014_LAST_LEVEL(texdw[5]);
772 mslice = G_030014_LAST_ARRAY(texdw[5]) + 1;
773 width = G_030000_TEX_WIDTH(texdw[0]) + 1;
774 height = G_030004_TEX_HEIGHT(texdw[1]) + 1;
775 depth = G_030004_TEX_DEPTH(texdw[1]) + 1;
776 surf.format = G_03001C_DATA_FORMAT(texdw[7]);
777 surf.nbx = (G_030000_PITCH(texdw[0]) + 1) * 8;
778 surf.nbx = r600_fmt_get_nblocksx(surf.format, surf.nbx);
779 surf.nby = r600_fmt_get_nblocksy(surf.format, height);
780 surf.mode = G_030004_ARRAY_MODE(texdw[1]);
781 surf.tsplit = G_030018_TILE_SPLIT(texdw[6]);
782 surf.nbanks = G_03001C_NUM_BANKS(texdw[7]);
783 surf.bankw = G_03001C_BANK_WIDTH(texdw[7]);
784 surf.bankh = G_03001C_BANK_HEIGHT(texdw[7]);
785 surf.mtilea = G_03001C_MACRO_TILE_ASPECT(texdw[7]);
786 surf.nsamples = 1;
787 toffset = texdw[2] << 8;
788 moffset = texdw[3] << 8;
790 if (!r600_fmt_is_valid_texture(surf.format, p->family)) {
791 dev_warn(p->dev, "%s:%d texture invalid format %d\n",
792 __func__, __LINE__, surf.format);
793 return -EINVAL;
795 switch (dim) {
796 case V_030000_SQ_TEX_DIM_1D:
797 case V_030000_SQ_TEX_DIM_2D:
798 case V_030000_SQ_TEX_DIM_CUBEMAP:
799 case V_030000_SQ_TEX_DIM_1D_ARRAY:
800 case V_030000_SQ_TEX_DIM_2D_ARRAY:
801 depth = 1;
802 break;
803 case V_030000_SQ_TEX_DIM_2D_MSAA:
804 case V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA:
805 surf.nsamples = 1 << llevel;
806 llevel = 0;
807 depth = 1;
808 break;
809 case V_030000_SQ_TEX_DIM_3D:
810 break;
811 default:
812 dev_warn(p->dev, "%s:%d texture invalid dimension %d\n",
813 __func__, __LINE__, dim);
814 return -EINVAL;
817 r = evergreen_surface_value_conv_check(p, &surf, "texture");
818 if (r) {
819 return r;
822 /* align height */
823 evergreen_surface_check(p, &surf, NULL);
824 surf.nby = ALIGN(surf.nby, surf.halign);
826 r = evergreen_surface_check(p, &surf, "texture");
827 if (r) {
828 dev_warn(p->dev, "%s:%d texture invalid 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
829 __func__, __LINE__, texdw[0], texdw[1], texdw[4],
830 texdw[5], texdw[6], texdw[7]);
831 return r;
834 /* check texture size */
835 if (toffset & (surf.base_align - 1)) {
836 dev_warn(p->dev, "%s:%d texture bo base %ld not aligned with %ld\n",
837 __func__, __LINE__, toffset, surf.base_align);
838 return -EINVAL;
840 if (surf.nsamples <= 1 && moffset & (surf.base_align - 1)) {
841 dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
842 __func__, __LINE__, moffset, surf.base_align);
843 return -EINVAL;
845 if (dim == SQ_TEX_DIM_3D) {
846 toffset += surf.layer_size * depth;
847 } else {
848 toffset += surf.layer_size * mslice;
850 if (toffset > radeon_bo_size(texture)) {
851 dev_warn(p->dev, "%s:%d texture bo too small (layer size %d, "
852 "offset %ld, max layer %d, depth %d, bo size %ld) (%d %d)\n",
853 __func__, __LINE__, surf.layer_size,
854 (unsigned long)texdw[2] << 8, mslice,
855 depth, radeon_bo_size(texture),
856 surf.nbx, surf.nby);
857 return -EINVAL;
860 if (!mipmap) {
861 if (llevel) {
862 dev_warn(p->dev, "%s:%i got NULL MIP_ADDRESS relocation\n",
863 __func__, __LINE__);
864 return -EINVAL;
865 } else {
866 return 0; /* everything's ok */
870 /* check mipmap size */
871 for (i = 1; i <= llevel; i++) {
872 unsigned w, h, d;
874 w = r600_mip_minify(width, i);
875 h = r600_mip_minify(height, i);
876 d = r600_mip_minify(depth, i);
877 surf.nbx = r600_fmt_get_nblocksx(surf.format, w);
878 surf.nby = r600_fmt_get_nblocksy(surf.format, h);
880 switch (surf.mode) {
881 case ARRAY_2D_TILED_THIN1:
882 if (surf.nbx < surf.palign || surf.nby < surf.halign) {
883 surf.mode = ARRAY_1D_TILED_THIN1;
885 /* recompute alignment */
886 evergreen_surface_check(p, &surf, NULL);
887 break;
888 case ARRAY_LINEAR_GENERAL:
889 case ARRAY_LINEAR_ALIGNED:
890 case ARRAY_1D_TILED_THIN1:
891 break;
892 default:
893 dev_warn(p->dev, "%s:%d invalid array mode %d\n",
894 __func__, __LINE__, surf.mode);
895 return -EINVAL;
897 surf.nbx = ALIGN(surf.nbx, surf.palign);
898 surf.nby = ALIGN(surf.nby, surf.halign);
900 r = evergreen_surface_check(p, &surf, "mipmap");
901 if (r) {
902 return r;
905 if (dim == SQ_TEX_DIM_3D) {
906 moffset += surf.layer_size * d;
907 } else {
908 moffset += surf.layer_size * mslice;
910 if (moffset > radeon_bo_size(mipmap)) {
911 dev_warn(p->dev, "%s:%d mipmap [%d] bo too small (layer size %d, "
912 "offset %ld, coffset %ld, max layer %d, depth %d, "
913 "bo size %ld) level0 (%d %d %d)\n",
914 __func__, __LINE__, i, surf.layer_size,
915 (unsigned long)texdw[3] << 8, moffset, mslice,
916 d, radeon_bo_size(mipmap),
917 width, height, depth);
918 dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
919 __func__, __LINE__, surf.nbx, surf.nby,
920 surf.mode, surf.bpe, surf.nsamples,
921 surf.bankw, surf.bankh,
922 surf.tsplit, surf.mtilea);
923 return -EINVAL;
927 return 0;
930 static int evergreen_cs_track_check(struct radeon_cs_parser *p)
932 struct evergreen_cs_track *track = p->track;
933 unsigned tmp, i;
934 int r;
935 unsigned buffer_mask = 0;
937 /* check streamout */
938 if (track->streamout_dirty && track->vgt_strmout_config) {
939 for (i = 0; i < 4; i++) {
940 if (track->vgt_strmout_config & (1 << i)) {
941 buffer_mask |= (track->vgt_strmout_buffer_config >> (i * 4)) & 0xf;
945 for (i = 0; i < 4; i++) {
946 if (buffer_mask & (1 << i)) {
947 if (track->vgt_strmout_bo[i]) {
948 u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
949 (u64)track->vgt_strmout_size[i];
950 if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
951 DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
952 i, offset,
953 radeon_bo_size(track->vgt_strmout_bo[i]));
954 return -EINVAL;
956 } else {
957 dev_warn(p->dev, "No buffer for streamout %d\n", i);
958 return -EINVAL;
962 track->streamout_dirty = false;
965 if (track->sx_misc_kill_all_prims)
966 return 0;
968 /* check that we have a cb for each enabled target
970 if (track->cb_dirty) {
971 tmp = track->cb_target_mask;
972 for (i = 0; i < 8; i++) {
973 u32 format = G_028C70_FORMAT(track->cb_color_info[i]);
975 if (format != V_028C70_COLOR_INVALID &&
976 (tmp >> (i * 4)) & 0xF) {
977 /* at least one component is enabled */
978 if (track->cb_color_bo[i] == NULL) {
979 dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
980 __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
981 return -EINVAL;
983 /* check cb */
984 r = evergreen_cs_track_validate_cb(p, i);
985 if (r) {
986 return r;
990 track->cb_dirty = false;
993 if (track->db_dirty) {
994 /* Check stencil buffer */
995 if (G_028044_FORMAT(track->db_s_info) != V_028044_STENCIL_INVALID &&
996 G_028800_STENCIL_ENABLE(track->db_depth_control)) {
997 r = evergreen_cs_track_validate_stencil(p);
998 if (r)
999 return r;
1001 /* Check depth buffer */
1002 if (G_028040_FORMAT(track->db_z_info) != V_028040_Z_INVALID &&
1003 G_028800_Z_ENABLE(track->db_depth_control)) {
1004 r = evergreen_cs_track_validate_depth(p);
1005 if (r)
1006 return r;
1008 track->db_dirty = false;
1011 return 0;
1015 * evergreen_cs_packet_parse_vline() - parse userspace VLINE packet
1016 * @parser: parser structure holding parsing context.
1018 * This is an Evergreen(+)-specific function for parsing VLINE packets.
1019 * Real work is done by r600_cs_common_vline_parse function.
1020 * Here we just set up ASIC-specific register table and call
1021 * the common implementation function.
1023 static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
1026 static uint32_t vline_start_end[6] = {
1027 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC0_REGISTER_OFFSET,
1028 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC1_REGISTER_OFFSET,
1029 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC2_REGISTER_OFFSET,
1030 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC3_REGISTER_OFFSET,
1031 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC4_REGISTER_OFFSET,
1032 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC5_REGISTER_OFFSET
1034 static uint32_t vline_status[6] = {
1035 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
1036 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
1037 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
1038 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
1039 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
1040 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET
1043 return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
1046 static int evergreen_packet0_check(struct radeon_cs_parser *p,
1047 struct radeon_cs_packet *pkt,
1048 unsigned idx, unsigned reg)
1050 int r;
1052 switch (reg) {
1053 case EVERGREEN_VLINE_START_END:
1054 r = evergreen_cs_packet_parse_vline(p);
1055 if (r) {
1056 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1057 idx, reg);
1058 return r;
1060 break;
1061 default:
1062 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1063 reg, idx);
1064 return -EINVAL;
1066 return 0;
1069 static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
1070 struct radeon_cs_packet *pkt)
1072 unsigned reg, i;
1073 unsigned idx;
1074 int r;
1076 idx = pkt->idx + 1;
1077 reg = pkt->reg;
1078 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
1079 r = evergreen_packet0_check(p, pkt, idx, reg);
1080 if (r) {
1081 return r;
1084 return 0;
1088 * evergreen_cs_handle_reg() - process registers that need special handling.
1089 * @parser: parser structure holding parsing context
1090 * @reg: register we are testing
1091 * @idx: index into the cs buffer
1093 static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1095 struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
1096 struct radeon_bo_list *reloc;
1097 u32 tmp, *ib;
1098 int r;
1100 ib = p->ib.ptr;
1101 switch (reg) {
1102 /* force following reg to 0 in an attempt to disable out buffer
1103 * which will need us to better understand how it works to perform
1104 * security check on it (Jerome)
1106 case SQ_ESGS_RING_SIZE:
1107 case SQ_GSVS_RING_SIZE:
1108 case SQ_ESTMP_RING_SIZE:
1109 case SQ_GSTMP_RING_SIZE:
1110 case SQ_HSTMP_RING_SIZE:
1111 case SQ_LSTMP_RING_SIZE:
1112 case SQ_PSTMP_RING_SIZE:
1113 case SQ_VSTMP_RING_SIZE:
1114 case SQ_ESGS_RING_ITEMSIZE:
1115 case SQ_ESTMP_RING_ITEMSIZE:
1116 case SQ_GSTMP_RING_ITEMSIZE:
1117 case SQ_GSVS_RING_ITEMSIZE:
1118 case SQ_GS_VERT_ITEMSIZE:
1119 case SQ_GS_VERT_ITEMSIZE_1:
1120 case SQ_GS_VERT_ITEMSIZE_2:
1121 case SQ_GS_VERT_ITEMSIZE_3:
1122 case SQ_GSVS_RING_OFFSET_1:
1123 case SQ_GSVS_RING_OFFSET_2:
1124 case SQ_GSVS_RING_OFFSET_3:
1125 case SQ_HSTMP_RING_ITEMSIZE:
1126 case SQ_LSTMP_RING_ITEMSIZE:
1127 case SQ_PSTMP_RING_ITEMSIZE:
1128 case SQ_VSTMP_RING_ITEMSIZE:
1129 case VGT_TF_RING_SIZE:
1130 /* get value to populate the IB don't remove */
1131 /*tmp =radeon_get_ib_value(p, idx);
1132 ib[idx] = 0;*/
1133 break;
1134 case SQ_ESGS_RING_BASE:
1135 case SQ_GSVS_RING_BASE:
1136 case SQ_ESTMP_RING_BASE:
1137 case SQ_GSTMP_RING_BASE:
1138 case SQ_HSTMP_RING_BASE:
1139 case SQ_LSTMP_RING_BASE:
1140 case SQ_PSTMP_RING_BASE:
1141 case SQ_VSTMP_RING_BASE:
1142 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1143 if (r) {
1144 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1145 "0x%04X\n", reg);
1146 return -EINVAL;
1148 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1149 break;
1150 case DB_DEPTH_CONTROL:
1151 track->db_depth_control = radeon_get_ib_value(p, idx);
1152 track->db_dirty = true;
1153 break;
1154 case CAYMAN_DB_EQAA:
1155 if (p->rdev->family < CHIP_CAYMAN) {
1156 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1157 "0x%04X\n", reg);
1158 return -EINVAL;
1160 break;
1161 case CAYMAN_DB_DEPTH_INFO:
1162 if (p->rdev->family < CHIP_CAYMAN) {
1163 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1164 "0x%04X\n", reg);
1165 return -EINVAL;
1167 break;
1168 case DB_Z_INFO:
1169 track->db_z_info = radeon_get_ib_value(p, idx);
1170 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1171 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1172 if (r) {
1173 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1174 "0x%04X\n", reg);
1175 return -EINVAL;
1177 ib[idx] &= ~Z_ARRAY_MODE(0xf);
1178 track->db_z_info &= ~Z_ARRAY_MODE(0xf);
1179 ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
1180 track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
1181 if (reloc->tiling_flags & RADEON_TILING_MACRO) {
1182 unsigned bankw, bankh, mtaspect, tile_split;
1184 evergreen_tiling_fields(reloc->tiling_flags,
1185 &bankw, &bankh, &mtaspect,
1186 &tile_split);
1187 ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1188 ib[idx] |= DB_TILE_SPLIT(tile_split) |
1189 DB_BANK_WIDTH(bankw) |
1190 DB_BANK_HEIGHT(bankh) |
1191 DB_MACRO_TILE_ASPECT(mtaspect);
1194 track->db_dirty = true;
1195 break;
1196 case DB_STENCIL_INFO:
1197 track->db_s_info = radeon_get_ib_value(p, idx);
1198 track->db_dirty = true;
1199 break;
1200 case DB_DEPTH_VIEW:
1201 track->db_depth_view = radeon_get_ib_value(p, idx);
1202 track->db_dirty = true;
1203 break;
1204 case DB_DEPTH_SIZE:
1205 track->db_depth_size = radeon_get_ib_value(p, idx);
1206 track->db_dirty = true;
1207 break;
1208 case R_02805C_DB_DEPTH_SLICE:
1209 track->db_depth_slice = radeon_get_ib_value(p, idx);
1210 track->db_dirty = true;
1211 break;
1212 case DB_Z_READ_BASE:
1213 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1214 if (r) {
1215 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1216 "0x%04X\n", reg);
1217 return -EINVAL;
1219 track->db_z_read_offset = radeon_get_ib_value(p, idx);
1220 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1221 track->db_z_read_bo = reloc->robj;
1222 track->db_dirty = true;
1223 break;
1224 case DB_Z_WRITE_BASE:
1225 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1226 if (r) {
1227 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1228 "0x%04X\n", reg);
1229 return -EINVAL;
1231 track->db_z_write_offset = radeon_get_ib_value(p, idx);
1232 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1233 track->db_z_write_bo = reloc->robj;
1234 track->db_dirty = true;
1235 break;
1236 case DB_STENCIL_READ_BASE:
1237 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1238 if (r) {
1239 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1240 "0x%04X\n", reg);
1241 return -EINVAL;
1243 track->db_s_read_offset = radeon_get_ib_value(p, idx);
1244 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1245 track->db_s_read_bo = reloc->robj;
1246 track->db_dirty = true;
1247 break;
1248 case DB_STENCIL_WRITE_BASE:
1249 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1250 if (r) {
1251 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1252 "0x%04X\n", reg);
1253 return -EINVAL;
1255 track->db_s_write_offset = radeon_get_ib_value(p, idx);
1256 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1257 track->db_s_write_bo = reloc->robj;
1258 track->db_dirty = true;
1259 break;
1260 case VGT_STRMOUT_CONFIG:
1261 track->vgt_strmout_config = radeon_get_ib_value(p, idx);
1262 track->streamout_dirty = true;
1263 break;
1264 case VGT_STRMOUT_BUFFER_CONFIG:
1265 track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
1266 track->streamout_dirty = true;
1267 break;
1268 case VGT_STRMOUT_BUFFER_BASE_0:
1269 case VGT_STRMOUT_BUFFER_BASE_1:
1270 case VGT_STRMOUT_BUFFER_BASE_2:
1271 case VGT_STRMOUT_BUFFER_BASE_3:
1272 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1273 if (r) {
1274 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1275 "0x%04X\n", reg);
1276 return -EINVAL;
1278 tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
1279 track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1280 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1281 track->vgt_strmout_bo[tmp] = reloc->robj;
1282 track->streamout_dirty = true;
1283 break;
1284 case VGT_STRMOUT_BUFFER_SIZE_0:
1285 case VGT_STRMOUT_BUFFER_SIZE_1:
1286 case VGT_STRMOUT_BUFFER_SIZE_2:
1287 case VGT_STRMOUT_BUFFER_SIZE_3:
1288 tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
1289 /* size in register is DWs, convert to bytes */
1290 track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
1291 track->streamout_dirty = true;
1292 break;
1293 case CP_COHER_BASE:
1294 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1295 if (r) {
1296 dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
1297 "0x%04X\n", reg);
1298 return -EINVAL;
1300 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1301 case CB_TARGET_MASK:
1302 track->cb_target_mask = radeon_get_ib_value(p, idx);
1303 track->cb_dirty = true;
1304 break;
1305 case CB_SHADER_MASK:
1306 track->cb_shader_mask = radeon_get_ib_value(p, idx);
1307 track->cb_dirty = true;
1308 break;
1309 case PA_SC_AA_CONFIG:
1310 if (p->rdev->family >= CHIP_CAYMAN) {
1311 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1312 "0x%04X\n", reg);
1313 return -EINVAL;
1315 tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
1316 track->nsamples = 1 << tmp;
1317 break;
1318 case CAYMAN_PA_SC_AA_CONFIG:
1319 if (p->rdev->family < CHIP_CAYMAN) {
1320 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1321 "0x%04X\n", reg);
1322 return -EINVAL;
1324 tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
1325 track->nsamples = 1 << tmp;
1326 break;
1327 case CB_COLOR0_VIEW:
1328 case CB_COLOR1_VIEW:
1329 case CB_COLOR2_VIEW:
1330 case CB_COLOR3_VIEW:
1331 case CB_COLOR4_VIEW:
1332 case CB_COLOR5_VIEW:
1333 case CB_COLOR6_VIEW:
1334 case CB_COLOR7_VIEW:
1335 tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
1336 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
1337 track->cb_dirty = true;
1338 break;
1339 case CB_COLOR8_VIEW:
1340 case CB_COLOR9_VIEW:
1341 case CB_COLOR10_VIEW:
1342 case CB_COLOR11_VIEW:
1343 tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
1344 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
1345 track->cb_dirty = true;
1346 break;
1347 case CB_COLOR0_INFO:
1348 case CB_COLOR1_INFO:
1349 case CB_COLOR2_INFO:
1350 case CB_COLOR3_INFO:
1351 case CB_COLOR4_INFO:
1352 case CB_COLOR5_INFO:
1353 case CB_COLOR6_INFO:
1354 case CB_COLOR7_INFO:
1355 tmp = (reg - CB_COLOR0_INFO) / 0x3c;
1356 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1357 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1358 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1359 if (r) {
1360 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1361 "0x%04X\n", reg);
1362 return -EINVAL;
1364 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
1365 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
1367 track->cb_dirty = true;
1368 break;
1369 case CB_COLOR8_INFO:
1370 case CB_COLOR9_INFO:
1371 case CB_COLOR10_INFO:
1372 case CB_COLOR11_INFO:
1373 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
1374 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1375 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1376 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1377 if (r) {
1378 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1379 "0x%04X\n", reg);
1380 return -EINVAL;
1382 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
1383 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
1385 track->cb_dirty = true;
1386 break;
1387 case CB_COLOR0_PITCH:
1388 case CB_COLOR1_PITCH:
1389 case CB_COLOR2_PITCH:
1390 case CB_COLOR3_PITCH:
1391 case CB_COLOR4_PITCH:
1392 case CB_COLOR5_PITCH:
1393 case CB_COLOR6_PITCH:
1394 case CB_COLOR7_PITCH:
1395 tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
1396 track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
1397 track->cb_dirty = true;
1398 break;
1399 case CB_COLOR8_PITCH:
1400 case CB_COLOR9_PITCH:
1401 case CB_COLOR10_PITCH:
1402 case CB_COLOR11_PITCH:
1403 tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
1404 track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
1405 track->cb_dirty = true;
1406 break;
1407 case CB_COLOR0_SLICE:
1408 case CB_COLOR1_SLICE:
1409 case CB_COLOR2_SLICE:
1410 case CB_COLOR3_SLICE:
1411 case CB_COLOR4_SLICE:
1412 case CB_COLOR5_SLICE:
1413 case CB_COLOR6_SLICE:
1414 case CB_COLOR7_SLICE:
1415 tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
1416 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
1417 track->cb_color_slice_idx[tmp] = idx;
1418 track->cb_dirty = true;
1419 break;
1420 case CB_COLOR8_SLICE:
1421 case CB_COLOR9_SLICE:
1422 case CB_COLOR10_SLICE:
1423 case CB_COLOR11_SLICE:
1424 tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
1425 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
1426 track->cb_color_slice_idx[tmp] = idx;
1427 track->cb_dirty = true;
1428 break;
1429 case CB_COLOR0_ATTRIB:
1430 case CB_COLOR1_ATTRIB:
1431 case CB_COLOR2_ATTRIB:
1432 case CB_COLOR3_ATTRIB:
1433 case CB_COLOR4_ATTRIB:
1434 case CB_COLOR5_ATTRIB:
1435 case CB_COLOR6_ATTRIB:
1436 case CB_COLOR7_ATTRIB:
1437 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1438 if (r) {
1439 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1440 "0x%04X\n", reg);
1441 return -EINVAL;
1443 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1444 if (reloc->tiling_flags & RADEON_TILING_MACRO) {
1445 unsigned bankw, bankh, mtaspect, tile_split;
1447 evergreen_tiling_fields(reloc->tiling_flags,
1448 &bankw, &bankh, &mtaspect,
1449 &tile_split);
1450 ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1451 ib[idx] |= CB_TILE_SPLIT(tile_split) |
1452 CB_BANK_WIDTH(bankw) |
1453 CB_BANK_HEIGHT(bankh) |
1454 CB_MACRO_TILE_ASPECT(mtaspect);
1457 tmp = ((reg - CB_COLOR0_ATTRIB) / 0x3c);
1458 track->cb_color_attrib[tmp] = ib[idx];
1459 track->cb_dirty = true;
1460 break;
1461 case CB_COLOR8_ATTRIB:
1462 case CB_COLOR9_ATTRIB:
1463 case CB_COLOR10_ATTRIB:
1464 case CB_COLOR11_ATTRIB:
1465 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1466 if (r) {
1467 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1468 "0x%04X\n", reg);
1469 return -EINVAL;
1471 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1472 if (reloc->tiling_flags & RADEON_TILING_MACRO) {
1473 unsigned bankw, bankh, mtaspect, tile_split;
1475 evergreen_tiling_fields(reloc->tiling_flags,
1476 &bankw, &bankh, &mtaspect,
1477 &tile_split);
1478 ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1479 ib[idx] |= CB_TILE_SPLIT(tile_split) |
1480 CB_BANK_WIDTH(bankw) |
1481 CB_BANK_HEIGHT(bankh) |
1482 CB_MACRO_TILE_ASPECT(mtaspect);
1485 tmp = ((reg - CB_COLOR8_ATTRIB) / 0x1c) + 8;
1486 track->cb_color_attrib[tmp] = ib[idx];
1487 track->cb_dirty = true;
1488 break;
1489 case CB_COLOR0_FMASK:
1490 case CB_COLOR1_FMASK:
1491 case CB_COLOR2_FMASK:
1492 case CB_COLOR3_FMASK:
1493 case CB_COLOR4_FMASK:
1494 case CB_COLOR5_FMASK:
1495 case CB_COLOR6_FMASK:
1496 case CB_COLOR7_FMASK:
1497 tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
1498 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1499 if (r) {
1500 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1501 return -EINVAL;
1503 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1504 track->cb_color_fmask_bo[tmp] = reloc->robj;
1505 break;
1506 case CB_COLOR0_CMASK:
1507 case CB_COLOR1_CMASK:
1508 case CB_COLOR2_CMASK:
1509 case CB_COLOR3_CMASK:
1510 case CB_COLOR4_CMASK:
1511 case CB_COLOR5_CMASK:
1512 case CB_COLOR6_CMASK:
1513 case CB_COLOR7_CMASK:
1514 tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
1515 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1516 if (r) {
1517 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1518 return -EINVAL;
1520 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1521 track->cb_color_cmask_bo[tmp] = reloc->robj;
1522 break;
1523 case CB_COLOR0_FMASK_SLICE:
1524 case CB_COLOR1_FMASK_SLICE:
1525 case CB_COLOR2_FMASK_SLICE:
1526 case CB_COLOR3_FMASK_SLICE:
1527 case CB_COLOR4_FMASK_SLICE:
1528 case CB_COLOR5_FMASK_SLICE:
1529 case CB_COLOR6_FMASK_SLICE:
1530 case CB_COLOR7_FMASK_SLICE:
1531 tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
1532 track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
1533 break;
1534 case CB_COLOR0_CMASK_SLICE:
1535 case CB_COLOR1_CMASK_SLICE:
1536 case CB_COLOR2_CMASK_SLICE:
1537 case CB_COLOR3_CMASK_SLICE:
1538 case CB_COLOR4_CMASK_SLICE:
1539 case CB_COLOR5_CMASK_SLICE:
1540 case CB_COLOR6_CMASK_SLICE:
1541 case CB_COLOR7_CMASK_SLICE:
1542 tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
1543 track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
1544 break;
1545 case CB_COLOR0_BASE:
1546 case CB_COLOR1_BASE:
1547 case CB_COLOR2_BASE:
1548 case CB_COLOR3_BASE:
1549 case CB_COLOR4_BASE:
1550 case CB_COLOR5_BASE:
1551 case CB_COLOR6_BASE:
1552 case CB_COLOR7_BASE:
1553 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1554 if (r) {
1555 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1556 "0x%04X\n", reg);
1557 return -EINVAL;
1559 tmp = (reg - CB_COLOR0_BASE) / 0x3c;
1560 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
1561 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1562 track->cb_color_bo[tmp] = reloc->robj;
1563 track->cb_dirty = true;
1564 break;
1565 case CB_COLOR8_BASE:
1566 case CB_COLOR9_BASE:
1567 case CB_COLOR10_BASE:
1568 case CB_COLOR11_BASE:
1569 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1570 if (r) {
1571 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1572 "0x%04X\n", reg);
1573 return -EINVAL;
1575 tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
1576 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
1577 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1578 track->cb_color_bo[tmp] = reloc->robj;
1579 track->cb_dirty = true;
1580 break;
1581 case DB_HTILE_DATA_BASE:
1582 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1583 if (r) {
1584 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1585 "0x%04X\n", reg);
1586 return -EINVAL;
1588 track->htile_offset = radeon_get_ib_value(p, idx);
1589 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1590 track->htile_bo = reloc->robj;
1591 track->db_dirty = true;
1592 break;
1593 case DB_HTILE_SURFACE:
1594 /* 8x8 only */
1595 track->htile_surface = radeon_get_ib_value(p, idx);
1596 /* force 8x8 htile width and height */
1597 ib[idx] |= 3;
1598 track->db_dirty = true;
1599 break;
1600 case CB_IMMED0_BASE:
1601 case CB_IMMED1_BASE:
1602 case CB_IMMED2_BASE:
1603 case CB_IMMED3_BASE:
1604 case CB_IMMED4_BASE:
1605 case CB_IMMED5_BASE:
1606 case CB_IMMED6_BASE:
1607 case CB_IMMED7_BASE:
1608 case CB_IMMED8_BASE:
1609 case CB_IMMED9_BASE:
1610 case CB_IMMED10_BASE:
1611 case CB_IMMED11_BASE:
1612 case SQ_PGM_START_FS:
1613 case SQ_PGM_START_ES:
1614 case SQ_PGM_START_VS:
1615 case SQ_PGM_START_GS:
1616 case SQ_PGM_START_PS:
1617 case SQ_PGM_START_HS:
1618 case SQ_PGM_START_LS:
1619 case SQ_CONST_MEM_BASE:
1620 case SQ_ALU_CONST_CACHE_GS_0:
1621 case SQ_ALU_CONST_CACHE_GS_1:
1622 case SQ_ALU_CONST_CACHE_GS_2:
1623 case SQ_ALU_CONST_CACHE_GS_3:
1624 case SQ_ALU_CONST_CACHE_GS_4:
1625 case SQ_ALU_CONST_CACHE_GS_5:
1626 case SQ_ALU_CONST_CACHE_GS_6:
1627 case SQ_ALU_CONST_CACHE_GS_7:
1628 case SQ_ALU_CONST_CACHE_GS_8:
1629 case SQ_ALU_CONST_CACHE_GS_9:
1630 case SQ_ALU_CONST_CACHE_GS_10:
1631 case SQ_ALU_CONST_CACHE_GS_11:
1632 case SQ_ALU_CONST_CACHE_GS_12:
1633 case SQ_ALU_CONST_CACHE_GS_13:
1634 case SQ_ALU_CONST_CACHE_GS_14:
1635 case SQ_ALU_CONST_CACHE_GS_15:
1636 case SQ_ALU_CONST_CACHE_PS_0:
1637 case SQ_ALU_CONST_CACHE_PS_1:
1638 case SQ_ALU_CONST_CACHE_PS_2:
1639 case SQ_ALU_CONST_CACHE_PS_3:
1640 case SQ_ALU_CONST_CACHE_PS_4:
1641 case SQ_ALU_CONST_CACHE_PS_5:
1642 case SQ_ALU_CONST_CACHE_PS_6:
1643 case SQ_ALU_CONST_CACHE_PS_7:
1644 case SQ_ALU_CONST_CACHE_PS_8:
1645 case SQ_ALU_CONST_CACHE_PS_9:
1646 case SQ_ALU_CONST_CACHE_PS_10:
1647 case SQ_ALU_CONST_CACHE_PS_11:
1648 case SQ_ALU_CONST_CACHE_PS_12:
1649 case SQ_ALU_CONST_CACHE_PS_13:
1650 case SQ_ALU_CONST_CACHE_PS_14:
1651 case SQ_ALU_CONST_CACHE_PS_15:
1652 case SQ_ALU_CONST_CACHE_VS_0:
1653 case SQ_ALU_CONST_CACHE_VS_1:
1654 case SQ_ALU_CONST_CACHE_VS_2:
1655 case SQ_ALU_CONST_CACHE_VS_3:
1656 case SQ_ALU_CONST_CACHE_VS_4:
1657 case SQ_ALU_CONST_CACHE_VS_5:
1658 case SQ_ALU_CONST_CACHE_VS_6:
1659 case SQ_ALU_CONST_CACHE_VS_7:
1660 case SQ_ALU_CONST_CACHE_VS_8:
1661 case SQ_ALU_CONST_CACHE_VS_9:
1662 case SQ_ALU_CONST_CACHE_VS_10:
1663 case SQ_ALU_CONST_CACHE_VS_11:
1664 case SQ_ALU_CONST_CACHE_VS_12:
1665 case SQ_ALU_CONST_CACHE_VS_13:
1666 case SQ_ALU_CONST_CACHE_VS_14:
1667 case SQ_ALU_CONST_CACHE_VS_15:
1668 case SQ_ALU_CONST_CACHE_HS_0:
1669 case SQ_ALU_CONST_CACHE_HS_1:
1670 case SQ_ALU_CONST_CACHE_HS_2:
1671 case SQ_ALU_CONST_CACHE_HS_3:
1672 case SQ_ALU_CONST_CACHE_HS_4:
1673 case SQ_ALU_CONST_CACHE_HS_5:
1674 case SQ_ALU_CONST_CACHE_HS_6:
1675 case SQ_ALU_CONST_CACHE_HS_7:
1676 case SQ_ALU_CONST_CACHE_HS_8:
1677 case SQ_ALU_CONST_CACHE_HS_9:
1678 case SQ_ALU_CONST_CACHE_HS_10:
1679 case SQ_ALU_CONST_CACHE_HS_11:
1680 case SQ_ALU_CONST_CACHE_HS_12:
1681 case SQ_ALU_CONST_CACHE_HS_13:
1682 case SQ_ALU_CONST_CACHE_HS_14:
1683 case SQ_ALU_CONST_CACHE_HS_15:
1684 case SQ_ALU_CONST_CACHE_LS_0:
1685 case SQ_ALU_CONST_CACHE_LS_1:
1686 case SQ_ALU_CONST_CACHE_LS_2:
1687 case SQ_ALU_CONST_CACHE_LS_3:
1688 case SQ_ALU_CONST_CACHE_LS_4:
1689 case SQ_ALU_CONST_CACHE_LS_5:
1690 case SQ_ALU_CONST_CACHE_LS_6:
1691 case SQ_ALU_CONST_CACHE_LS_7:
1692 case SQ_ALU_CONST_CACHE_LS_8:
1693 case SQ_ALU_CONST_CACHE_LS_9:
1694 case SQ_ALU_CONST_CACHE_LS_10:
1695 case SQ_ALU_CONST_CACHE_LS_11:
1696 case SQ_ALU_CONST_CACHE_LS_12:
1697 case SQ_ALU_CONST_CACHE_LS_13:
1698 case SQ_ALU_CONST_CACHE_LS_14:
1699 case SQ_ALU_CONST_CACHE_LS_15:
1700 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1701 if (r) {
1702 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1703 "0x%04X\n", reg);
1704 return -EINVAL;
1706 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1707 break;
1708 case SX_MEMORY_EXPORT_BASE:
1709 if (p->rdev->family >= CHIP_CAYMAN) {
1710 dev_warn(p->dev, "bad SET_CONFIG_REG "
1711 "0x%04X\n", reg);
1712 return -EINVAL;
1714 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1715 if (r) {
1716 dev_warn(p->dev, "bad SET_CONFIG_REG "
1717 "0x%04X\n", reg);
1718 return -EINVAL;
1720 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1721 break;
1722 case CAYMAN_SX_SCATTER_EXPORT_BASE:
1723 if (p->rdev->family < CHIP_CAYMAN) {
1724 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1725 "0x%04X\n", reg);
1726 return -EINVAL;
1728 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1729 if (r) {
1730 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1731 "0x%04X\n", reg);
1732 return -EINVAL;
1734 ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1735 break;
1736 case SX_MISC:
1737 track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
1738 break;
1739 default:
1740 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1741 return -EINVAL;
1743 return 0;
1747 * evergreen_is_safe_reg() - check if register is authorized or not
1748 * @parser: parser structure holding parsing context
1749 * @reg: register we are testing
1751 * This function will test against reg_safe_bm and return true
1752 * if register is safe or false otherwise.
1754 static inline bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg)
1756 struct evergreen_cs_track *track = p->track;
1757 u32 m, i;
1759 i = (reg >> 7);
1760 if (unlikely(i >= REG_SAFE_BM_SIZE)) {
1761 return false;
1763 m = 1 << ((reg >> 2) & 31);
1764 if (!(track->reg_safe_bm[i] & m))
1765 return true;
1767 return false;
1770 static int evergreen_packet3_check(struct radeon_cs_parser *p,
1771 struct radeon_cs_packet *pkt)
1773 struct radeon_bo_list *reloc;
1774 struct evergreen_cs_track *track;
1775 uint32_t *ib;
1776 unsigned idx;
1777 unsigned i;
1778 unsigned start_reg, end_reg, reg;
1779 int r;
1780 u32 idx_value;
1782 track = (struct evergreen_cs_track *)p->track;
1783 ib = p->ib.ptr;
1784 idx = pkt->idx + 1;
1785 idx_value = radeon_get_ib_value(p, idx);
1787 switch (pkt->opcode) {
1788 case PACKET3_SET_PREDICATION:
1790 int pred_op;
1791 int tmp;
1792 uint64_t offset;
1794 if (pkt->count != 1) {
1795 DRM_ERROR("bad SET PREDICATION\n");
1796 return -EINVAL;
1799 tmp = radeon_get_ib_value(p, idx + 1);
1800 pred_op = (tmp >> 16) & 0x7;
1802 /* for the clear predicate operation */
1803 if (pred_op == 0)
1804 return 0;
1806 if (pred_op > 2) {
1807 DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
1808 return -EINVAL;
1811 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1812 if (r) {
1813 DRM_ERROR("bad SET PREDICATION\n");
1814 return -EINVAL;
1817 offset = reloc->gpu_offset +
1818 (idx_value & 0xfffffff0) +
1819 ((u64)(tmp & 0xff) << 32);
1821 ib[idx + 0] = offset;
1822 ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1824 break;
1825 case PACKET3_CONTEXT_CONTROL:
1826 if (pkt->count != 1) {
1827 DRM_ERROR("bad CONTEXT_CONTROL\n");
1828 return -EINVAL;
1830 break;
1831 case PACKET3_INDEX_TYPE:
1832 case PACKET3_NUM_INSTANCES:
1833 case PACKET3_CLEAR_STATE:
1834 if (pkt->count) {
1835 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1836 return -EINVAL;
1838 break;
1839 case CAYMAN_PACKET3_DEALLOC_STATE:
1840 if (p->rdev->family < CHIP_CAYMAN) {
1841 DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
1842 return -EINVAL;
1844 if (pkt->count) {
1845 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1846 return -EINVAL;
1848 break;
1849 case PACKET3_INDEX_BASE:
1851 uint64_t offset;
1853 if (pkt->count != 1) {
1854 DRM_ERROR("bad INDEX_BASE\n");
1855 return -EINVAL;
1857 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1858 if (r) {
1859 DRM_ERROR("bad INDEX_BASE\n");
1860 return -EINVAL;
1863 offset = reloc->gpu_offset +
1864 idx_value +
1865 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1867 ib[idx+0] = offset;
1868 ib[idx+1] = upper_32_bits(offset) & 0xff;
1870 r = evergreen_cs_track_check(p);
1871 if (r) {
1872 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1873 return r;
1875 break;
1877 case PACKET3_INDEX_BUFFER_SIZE:
1879 if (pkt->count != 0) {
1880 DRM_ERROR("bad INDEX_BUFFER_SIZE\n");
1881 return -EINVAL;
1883 break;
1885 case PACKET3_DRAW_INDEX:
1887 uint64_t offset;
1888 if (pkt->count != 3) {
1889 DRM_ERROR("bad DRAW_INDEX\n");
1890 return -EINVAL;
1892 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1893 if (r) {
1894 DRM_ERROR("bad DRAW_INDEX\n");
1895 return -EINVAL;
1898 offset = reloc->gpu_offset +
1899 idx_value +
1900 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1902 ib[idx+0] = offset;
1903 ib[idx+1] = upper_32_bits(offset) & 0xff;
1905 r = evergreen_cs_track_check(p);
1906 if (r) {
1907 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1908 return r;
1910 break;
1912 case PACKET3_DRAW_INDEX_2:
1914 uint64_t offset;
1916 if (pkt->count != 4) {
1917 DRM_ERROR("bad DRAW_INDEX_2\n");
1918 return -EINVAL;
1920 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1921 if (r) {
1922 DRM_ERROR("bad DRAW_INDEX_2\n");
1923 return -EINVAL;
1926 offset = reloc->gpu_offset +
1927 radeon_get_ib_value(p, idx+1) +
1928 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1930 ib[idx+1] = offset;
1931 ib[idx+2] = upper_32_bits(offset) & 0xff;
1933 r = evergreen_cs_track_check(p);
1934 if (r) {
1935 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1936 return r;
1938 break;
1940 case PACKET3_DRAW_INDEX_AUTO:
1941 if (pkt->count != 1) {
1942 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1943 return -EINVAL;
1945 r = evergreen_cs_track_check(p);
1946 if (r) {
1947 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1948 return r;
1950 break;
1951 case PACKET3_DRAW_INDEX_MULTI_AUTO:
1952 if (pkt->count != 2) {
1953 DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
1954 return -EINVAL;
1956 r = evergreen_cs_track_check(p);
1957 if (r) {
1958 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1959 return r;
1961 break;
1962 case PACKET3_DRAW_INDEX_IMMD:
1963 if (pkt->count < 2) {
1964 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1965 return -EINVAL;
1967 r = evergreen_cs_track_check(p);
1968 if (r) {
1969 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1970 return r;
1972 break;
1973 case PACKET3_DRAW_INDEX_OFFSET:
1974 if (pkt->count != 2) {
1975 DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
1976 return -EINVAL;
1978 r = evergreen_cs_track_check(p);
1979 if (r) {
1980 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1981 return r;
1983 break;
1984 case PACKET3_DRAW_INDEX_OFFSET_2:
1985 if (pkt->count != 3) {
1986 DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
1987 return -EINVAL;
1989 r = evergreen_cs_track_check(p);
1990 if (r) {
1991 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1992 return r;
1994 break;
1995 case PACKET3_SET_BASE:
1998 DW 1 HEADER Header of the packet. Shader_Type in bit 1 of the Header will correspond to the shader type of the Load, see Type-3 Packet.
1999 2 BASE_INDEX Bits [3:0] BASE_INDEX - Base Index specifies which base address is specified in the last two DWs.
2000 0001: DX11 Draw_Index_Indirect Patch Table Base: Base address for Draw_Index_Indirect data.
2001 3 ADDRESS_LO Bits [31:3] - Lower bits of QWORD-Aligned Address. Bits [2:0] - Reserved
2002 4 ADDRESS_HI Bits [31:8] - Reserved. Bits [7:0] - Upper bits of Address [47:32]
2004 if (pkt->count != 2) {
2005 DRM_ERROR("bad SET_BASE\n");
2006 return -EINVAL;
2009 /* currently only supporting setting indirect draw buffer base address */
2010 if (idx_value != 1) {
2011 DRM_ERROR("bad SET_BASE\n");
2012 return -EINVAL;
2015 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2016 if (r) {
2017 DRM_ERROR("bad SET_BASE\n");
2018 return -EINVAL;
2021 track->indirect_draw_buffer_size = radeon_bo_size(reloc->robj);
2023 ib[idx+1] = reloc->gpu_offset;
2024 ib[idx+2] = upper_32_bits(reloc->gpu_offset) & 0xff;
2026 break;
2028 case PACKET3_DRAW_INDIRECT:
2029 case PACKET3_DRAW_INDEX_INDIRECT:
2031 u64 size = pkt->opcode == PACKET3_DRAW_INDIRECT ? 16 : 20;
2034 DW 1 HEADER
2035 2 DATA_OFFSET Bits [31:0] + byte aligned offset where the required data structure starts. Bits 1:0 are zero
2036 3 DRAW_INITIATOR Draw Initiator Register. Written to the VGT_DRAW_INITIATOR register for the assigned context
2038 if (pkt->count != 1) {
2039 DRM_ERROR("bad DRAW_INDIRECT\n");
2040 return -EINVAL;
2043 if (idx_value + size > track->indirect_draw_buffer_size) {
2044 dev_warn(p->dev, "DRAW_INDIRECT buffer too small %u + %llu > %lu\n",
2045 idx_value, size, track->indirect_draw_buffer_size);
2046 return -EINVAL;
2049 r = evergreen_cs_track_check(p);
2050 if (r) {
2051 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2052 return r;
2054 break;
2056 case PACKET3_DISPATCH_DIRECT:
2057 if (pkt->count != 3) {
2058 DRM_ERROR("bad DISPATCH_DIRECT\n");
2059 return -EINVAL;
2061 r = evergreen_cs_track_check(p);
2062 if (r) {
2063 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
2064 return r;
2066 break;
2067 case PACKET3_DISPATCH_INDIRECT:
2068 if (pkt->count != 1) {
2069 DRM_ERROR("bad DISPATCH_INDIRECT\n");
2070 return -EINVAL;
2072 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2073 if (r) {
2074 DRM_ERROR("bad DISPATCH_INDIRECT\n");
2075 return -EINVAL;
2077 ib[idx+0] = idx_value + (u32)(reloc->gpu_offset & 0xffffffff);
2078 r = evergreen_cs_track_check(p);
2079 if (r) {
2080 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2081 return r;
2083 break;
2084 case PACKET3_WAIT_REG_MEM:
2085 if (pkt->count != 5) {
2086 DRM_ERROR("bad WAIT_REG_MEM\n");
2087 return -EINVAL;
2089 /* bit 4 is reg (0) or mem (1) */
2090 if (idx_value & 0x10) {
2091 uint64_t offset;
2093 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2094 if (r) {
2095 DRM_ERROR("bad WAIT_REG_MEM\n");
2096 return -EINVAL;
2099 offset = reloc->gpu_offset +
2100 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2101 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2103 ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
2104 ib[idx+2] = upper_32_bits(offset) & 0xff;
2105 } else if (idx_value & 0x100) {
2106 DRM_ERROR("cannot use PFP on REG wait\n");
2107 return -EINVAL;
2109 break;
2110 case PACKET3_CP_DMA:
2112 u32 command, size, info;
2113 u64 offset, tmp;
2114 if (pkt->count != 4) {
2115 DRM_ERROR("bad CP DMA\n");
2116 return -EINVAL;
2118 command = radeon_get_ib_value(p, idx+4);
2119 size = command & 0x1fffff;
2120 info = radeon_get_ib_value(p, idx+1);
2121 if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
2122 (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
2123 ((((info & 0x00300000) >> 20) == 0) &&
2124 (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
2125 ((((info & 0x60000000) >> 29) == 0) &&
2126 (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
2127 /* non mem to mem copies requires dw aligned count */
2128 if (size % 4) {
2129 DRM_ERROR("CP DMA command requires dw count alignment\n");
2130 return -EINVAL;
2133 if (command & PACKET3_CP_DMA_CMD_SAS) {
2134 /* src address space is register */
2135 /* GDS is ok */
2136 if (((info & 0x60000000) >> 29) != 1) {
2137 DRM_ERROR("CP DMA SAS not supported\n");
2138 return -EINVAL;
2140 } else {
2141 if (command & PACKET3_CP_DMA_CMD_SAIC) {
2142 DRM_ERROR("CP DMA SAIC only supported for registers\n");
2143 return -EINVAL;
2145 /* src address space is memory */
2146 if (((info & 0x60000000) >> 29) == 0) {
2147 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2148 if (r) {
2149 DRM_ERROR("bad CP DMA SRC\n");
2150 return -EINVAL;
2153 tmp = radeon_get_ib_value(p, idx) +
2154 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
2156 offset = reloc->gpu_offset + tmp;
2158 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
2159 dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
2160 tmp + size, radeon_bo_size(reloc->robj));
2161 return -EINVAL;
2164 ib[idx] = offset;
2165 ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2166 } else if (((info & 0x60000000) >> 29) != 2) {
2167 DRM_ERROR("bad CP DMA SRC_SEL\n");
2168 return -EINVAL;
2171 if (command & PACKET3_CP_DMA_CMD_DAS) {
2172 /* dst address space is register */
2173 /* GDS is ok */
2174 if (((info & 0x00300000) >> 20) != 1) {
2175 DRM_ERROR("CP DMA DAS not supported\n");
2176 return -EINVAL;
2178 } else {
2179 /* dst address space is memory */
2180 if (command & PACKET3_CP_DMA_CMD_DAIC) {
2181 DRM_ERROR("CP DMA DAIC only supported for registers\n");
2182 return -EINVAL;
2184 if (((info & 0x00300000) >> 20) == 0) {
2185 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2186 if (r) {
2187 DRM_ERROR("bad CP DMA DST\n");
2188 return -EINVAL;
2191 tmp = radeon_get_ib_value(p, idx+2) +
2192 ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
2194 offset = reloc->gpu_offset + tmp;
2196 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
2197 dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
2198 tmp + size, radeon_bo_size(reloc->robj));
2199 return -EINVAL;
2202 ib[idx+2] = offset;
2203 ib[idx+3] = upper_32_bits(offset) & 0xff;
2204 } else {
2205 DRM_ERROR("bad CP DMA DST_SEL\n");
2206 return -EINVAL;
2209 break;
2211 case PACKET3_SURFACE_SYNC:
2212 if (pkt->count != 3) {
2213 DRM_ERROR("bad SURFACE_SYNC\n");
2214 return -EINVAL;
2216 /* 0xffffffff/0x0 is flush all cache flag */
2217 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
2218 radeon_get_ib_value(p, idx + 2) != 0) {
2219 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2220 if (r) {
2221 DRM_ERROR("bad SURFACE_SYNC\n");
2222 return -EINVAL;
2224 ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
2226 break;
2227 case PACKET3_EVENT_WRITE:
2228 if (pkt->count != 2 && pkt->count != 0) {
2229 DRM_ERROR("bad EVENT_WRITE\n");
2230 return -EINVAL;
2232 if (pkt->count) {
2233 uint64_t offset;
2235 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2236 if (r) {
2237 DRM_ERROR("bad EVENT_WRITE\n");
2238 return -EINVAL;
2240 offset = reloc->gpu_offset +
2241 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
2242 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2244 ib[idx+1] = offset & 0xfffffff8;
2245 ib[idx+2] = upper_32_bits(offset) & 0xff;
2247 break;
2248 case PACKET3_EVENT_WRITE_EOP:
2250 uint64_t offset;
2252 if (pkt->count != 4) {
2253 DRM_ERROR("bad EVENT_WRITE_EOP\n");
2254 return -EINVAL;
2256 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2257 if (r) {
2258 DRM_ERROR("bad EVENT_WRITE_EOP\n");
2259 return -EINVAL;
2262 offset = reloc->gpu_offset +
2263 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2264 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2266 ib[idx+1] = offset & 0xfffffffc;
2267 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2268 break;
2270 case PACKET3_EVENT_WRITE_EOS:
2272 uint64_t offset;
2274 if (pkt->count != 3) {
2275 DRM_ERROR("bad EVENT_WRITE_EOS\n");
2276 return -EINVAL;
2278 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2279 if (r) {
2280 DRM_ERROR("bad EVENT_WRITE_EOS\n");
2281 return -EINVAL;
2284 offset = reloc->gpu_offset +
2285 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2286 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2288 ib[idx+1] = offset & 0xfffffffc;
2289 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2290 break;
2292 case PACKET3_SET_CONFIG_REG:
2293 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
2294 end_reg = 4 * pkt->count + start_reg - 4;
2295 if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
2296 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
2297 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
2298 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
2299 return -EINVAL;
2301 for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) {
2302 if (evergreen_is_safe_reg(p, reg))
2303 continue;
2304 r = evergreen_cs_handle_reg(p, reg, idx);
2305 if (r)
2306 return r;
2308 break;
2309 case PACKET3_SET_CONTEXT_REG:
2310 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
2311 end_reg = 4 * pkt->count + start_reg - 4;
2312 if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
2313 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
2314 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
2315 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
2316 return -EINVAL;
2318 for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) {
2319 if (evergreen_is_safe_reg(p, reg))
2320 continue;
2321 r = evergreen_cs_handle_reg(p, reg, idx);
2322 if (r)
2323 return r;
2325 break;
2326 case PACKET3_SET_RESOURCE:
2327 if (pkt->count % 8) {
2328 DRM_ERROR("bad SET_RESOURCE\n");
2329 return -EINVAL;
2331 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
2332 end_reg = 4 * pkt->count + start_reg - 4;
2333 if ((start_reg < PACKET3_SET_RESOURCE_START) ||
2334 (start_reg >= PACKET3_SET_RESOURCE_END) ||
2335 (end_reg >= PACKET3_SET_RESOURCE_END)) {
2336 DRM_ERROR("bad SET_RESOURCE\n");
2337 return -EINVAL;
2339 for (i = 0; i < (pkt->count / 8); i++) {
2340 struct radeon_bo *texture, *mipmap;
2341 u32 toffset, moffset;
2342 u32 size, offset, mip_address, tex_dim;
2344 switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
2345 case SQ_TEX_VTX_VALID_TEXTURE:
2346 /* tex base */
2347 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2348 if (r) {
2349 DRM_ERROR("bad SET_RESOURCE (tex)\n");
2350 return -EINVAL;
2352 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
2353 ib[idx+1+(i*8)+1] |=
2354 TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
2355 if (reloc->tiling_flags & RADEON_TILING_MACRO) {
2356 unsigned bankw, bankh, mtaspect, tile_split;
2358 evergreen_tiling_fields(reloc->tiling_flags,
2359 &bankw, &bankh, &mtaspect,
2360 &tile_split);
2361 ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split);
2362 ib[idx+1+(i*8)+7] |=
2363 TEX_BANK_WIDTH(bankw) |
2364 TEX_BANK_HEIGHT(bankh) |
2365 MACRO_TILE_ASPECT(mtaspect) |
2366 TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
2369 texture = reloc->robj;
2370 toffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
2372 /* tex mip base */
2373 tex_dim = ib[idx+1+(i*8)+0] & 0x7;
2374 mip_address = ib[idx+1+(i*8)+3];
2376 if ((tex_dim == SQ_TEX_DIM_2D_MSAA || tex_dim == SQ_TEX_DIM_2D_ARRAY_MSAA) &&
2377 !mip_address &&
2378 !radeon_cs_packet_next_is_pkt3_nop(p)) {
2379 /* MIP_ADDRESS should point to FMASK for an MSAA texture.
2380 * It should be 0 if FMASK is disabled. */
2381 moffset = 0;
2382 mipmap = NULL;
2383 } else {
2384 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2385 if (r) {
2386 DRM_ERROR("bad SET_RESOURCE (tex)\n");
2387 return -EINVAL;
2389 moffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
2390 mipmap = reloc->robj;
2393 r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8));
2394 if (r)
2395 return r;
2396 ib[idx+1+(i*8)+2] += toffset;
2397 ib[idx+1+(i*8)+3] += moffset;
2398 break;
2399 case SQ_TEX_VTX_VALID_BUFFER:
2401 uint64_t offset64;
2402 /* vtx base */
2403 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2404 if (r) {
2405 DRM_ERROR("bad SET_RESOURCE (vtx)\n");
2406 return -EINVAL;
2408 offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
2409 size = radeon_get_ib_value(p, idx+1+(i*8)+1);
2410 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
2411 /* force size to size of the buffer */
2412 dev_warn(p->dev, "vbo resource seems too big for the bo\n");
2413 ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
2416 offset64 = reloc->gpu_offset + offset;
2417 ib[idx+1+(i*8)+0] = offset64;
2418 ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
2419 (upper_32_bits(offset64) & 0xff);
2420 break;
2422 case SQ_TEX_VTX_INVALID_TEXTURE:
2423 case SQ_TEX_VTX_INVALID_BUFFER:
2424 default:
2425 DRM_ERROR("bad SET_RESOURCE\n");
2426 return -EINVAL;
2429 break;
2430 case PACKET3_SET_ALU_CONST:
2431 /* XXX fix me ALU const buffers only */
2432 break;
2433 case PACKET3_SET_BOOL_CONST:
2434 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
2435 end_reg = 4 * pkt->count + start_reg - 4;
2436 if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
2437 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
2438 (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
2439 DRM_ERROR("bad SET_BOOL_CONST\n");
2440 return -EINVAL;
2442 break;
2443 case PACKET3_SET_LOOP_CONST:
2444 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
2445 end_reg = 4 * pkt->count + start_reg - 4;
2446 if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
2447 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
2448 (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
2449 DRM_ERROR("bad SET_LOOP_CONST\n");
2450 return -EINVAL;
2452 break;
2453 case PACKET3_SET_CTL_CONST:
2454 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
2455 end_reg = 4 * pkt->count + start_reg - 4;
2456 if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
2457 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
2458 (end_reg >= PACKET3_SET_CTL_CONST_END)) {
2459 DRM_ERROR("bad SET_CTL_CONST\n");
2460 return -EINVAL;
2462 break;
2463 case PACKET3_SET_SAMPLER:
2464 if (pkt->count % 3) {
2465 DRM_ERROR("bad SET_SAMPLER\n");
2466 return -EINVAL;
2468 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
2469 end_reg = 4 * pkt->count + start_reg - 4;
2470 if ((start_reg < PACKET3_SET_SAMPLER_START) ||
2471 (start_reg >= PACKET3_SET_SAMPLER_END) ||
2472 (end_reg >= PACKET3_SET_SAMPLER_END)) {
2473 DRM_ERROR("bad SET_SAMPLER\n");
2474 return -EINVAL;
2476 break;
2477 case PACKET3_STRMOUT_BUFFER_UPDATE:
2478 if (pkt->count != 4) {
2479 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
2480 return -EINVAL;
2482 /* Updating memory at DST_ADDRESS. */
2483 if (idx_value & 0x1) {
2484 u64 offset;
2485 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2486 if (r) {
2487 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
2488 return -EINVAL;
2490 offset = radeon_get_ib_value(p, idx+1);
2491 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2492 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2493 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
2494 offset + 4, radeon_bo_size(reloc->robj));
2495 return -EINVAL;
2497 offset += reloc->gpu_offset;
2498 ib[idx+1] = offset;
2499 ib[idx+2] = upper_32_bits(offset) & 0xff;
2501 /* Reading data from SRC_ADDRESS. */
2502 if (((idx_value >> 1) & 0x3) == 2) {
2503 u64 offset;
2504 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2505 if (r) {
2506 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
2507 return -EINVAL;
2509 offset = radeon_get_ib_value(p, idx+3);
2510 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2511 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2512 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
2513 offset + 4, radeon_bo_size(reloc->robj));
2514 return -EINVAL;
2516 offset += reloc->gpu_offset;
2517 ib[idx+3] = offset;
2518 ib[idx+4] = upper_32_bits(offset) & 0xff;
2520 break;
2521 case PACKET3_MEM_WRITE:
2523 u64 offset;
2525 if (pkt->count != 3) {
2526 DRM_ERROR("bad MEM_WRITE (invalid count)\n");
2527 return -EINVAL;
2529 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2530 if (r) {
2531 DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
2532 return -EINVAL;
2534 offset = radeon_get_ib_value(p, idx+0);
2535 offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
2536 if (offset & 0x7) {
2537 DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
2538 return -EINVAL;
2540 if ((offset + 8) > radeon_bo_size(reloc->robj)) {
2541 DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
2542 offset + 8, radeon_bo_size(reloc->robj));
2543 return -EINVAL;
2545 offset += reloc->gpu_offset;
2546 ib[idx+0] = offset;
2547 ib[idx+1] = upper_32_bits(offset) & 0xff;
2548 break;
2550 case PACKET3_COPY_DW:
2551 if (pkt->count != 4) {
2552 DRM_ERROR("bad COPY_DW (invalid count)\n");
2553 return -EINVAL;
2555 if (idx_value & 0x1) {
2556 u64 offset;
2557 /* SRC is memory. */
2558 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2559 if (r) {
2560 DRM_ERROR("bad COPY_DW (missing src reloc)\n");
2561 return -EINVAL;
2563 offset = radeon_get_ib_value(p, idx+1);
2564 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2565 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2566 DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
2567 offset + 4, radeon_bo_size(reloc->robj));
2568 return -EINVAL;
2570 offset += reloc->gpu_offset;
2571 ib[idx+1] = offset;
2572 ib[idx+2] = upper_32_bits(offset) & 0xff;
2573 } else {
2574 /* SRC is a reg. */
2575 reg = radeon_get_ib_value(p, idx+1) << 2;
2576 if (!evergreen_is_safe_reg(p, reg)) {
2577 dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
2578 reg, idx + 1);
2579 return -EINVAL;
2582 if (idx_value & 0x2) {
2583 u64 offset;
2584 /* DST is memory. */
2585 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2586 if (r) {
2587 DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
2588 return -EINVAL;
2590 offset = radeon_get_ib_value(p, idx+3);
2591 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2592 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2593 DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
2594 offset + 4, radeon_bo_size(reloc->robj));
2595 return -EINVAL;
2597 offset += reloc->gpu_offset;
2598 ib[idx+3] = offset;
2599 ib[idx+4] = upper_32_bits(offset) & 0xff;
2600 } else {
2601 /* DST is a reg. */
2602 reg = radeon_get_ib_value(p, idx+3) << 2;
2603 if (!evergreen_is_safe_reg(p, reg)) {
2604 dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
2605 reg, idx + 3);
2606 return -EINVAL;
2609 break;
2610 case PACKET3_SET_APPEND_CNT:
2612 uint32_t areg;
2613 uint32_t allowed_reg_base;
2614 uint32_t source_sel;
2615 if (pkt->count != 2) {
2616 DRM_ERROR("bad SET_APPEND_CNT (invalid count)\n");
2617 return -EINVAL;
2620 allowed_reg_base = GDS_APPEND_COUNT_0;
2621 allowed_reg_base -= PACKET3_SET_CONTEXT_REG_START;
2622 allowed_reg_base >>= 2;
2624 areg = idx_value >> 16;
2625 if (areg < allowed_reg_base || areg > (allowed_reg_base + 11)) {
2626 dev_warn(p->dev, "forbidden register for append cnt 0x%08x at %d\n",
2627 areg, idx);
2628 return -EINVAL;
2631 source_sel = G_PACKET3_SET_APPEND_CNT_SRC_SELECT(idx_value);
2632 if (source_sel == PACKET3_SAC_SRC_SEL_MEM) {
2633 uint64_t offset;
2634 uint32_t swap;
2635 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2636 if (r) {
2637 DRM_ERROR("bad SET_APPEND_CNT (missing reloc)\n");
2638 return -EINVAL;
2640 offset = radeon_get_ib_value(p, idx + 1);
2641 swap = offset & 0x3;
2642 offset &= ~0x3;
2644 offset += ((u64)(radeon_get_ib_value(p, idx + 2) & 0xff)) << 32;
2646 offset += reloc->gpu_offset;
2647 ib[idx+1] = (offset & 0xfffffffc) | swap;
2648 ib[idx+2] = upper_32_bits(offset) & 0xff;
2649 } else {
2650 DRM_ERROR("bad SET_APPEND_CNT (unsupported operation)\n");
2651 return -EINVAL;
2653 break;
2655 case PACKET3_NOP:
2656 break;
2657 default:
2658 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
2659 return -EINVAL;
2661 return 0;
2664 int evergreen_cs_parse(struct radeon_cs_parser *p)
2666 struct radeon_cs_packet pkt;
2667 struct evergreen_cs_track *track;
2668 u32 tmp;
2669 int r;
2671 if (p->track == NULL) {
2672 /* initialize tracker, we are in kms */
2673 track = kzalloc(sizeof(*track), GFP_KERNEL);
2674 if (track == NULL)
2675 return -ENOMEM;
2676 evergreen_cs_track_init(track);
2677 if (p->rdev->family >= CHIP_CAYMAN) {
2678 tmp = p->rdev->config.cayman.tile_config;
2679 track->reg_safe_bm = cayman_reg_safe_bm;
2680 } else {
2681 tmp = p->rdev->config.evergreen.tile_config;
2682 track->reg_safe_bm = evergreen_reg_safe_bm;
2684 BUILD_BUG_ON(ARRAY_SIZE(cayman_reg_safe_bm) != REG_SAFE_BM_SIZE);
2685 BUILD_BUG_ON(ARRAY_SIZE(evergreen_reg_safe_bm) != REG_SAFE_BM_SIZE);
2686 switch (tmp & 0xf) {
2687 case 0:
2688 track->npipes = 1;
2689 break;
2690 case 1:
2691 default:
2692 track->npipes = 2;
2693 break;
2694 case 2:
2695 track->npipes = 4;
2696 break;
2697 case 3:
2698 track->npipes = 8;
2699 break;
2702 switch ((tmp & 0xf0) >> 4) {
2703 case 0:
2704 track->nbanks = 4;
2705 break;
2706 case 1:
2707 default:
2708 track->nbanks = 8;
2709 break;
2710 case 2:
2711 track->nbanks = 16;
2712 break;
2715 switch ((tmp & 0xf00) >> 8) {
2716 case 0:
2717 track->group_size = 256;
2718 break;
2719 case 1:
2720 default:
2721 track->group_size = 512;
2722 break;
2725 switch ((tmp & 0xf000) >> 12) {
2726 case 0:
2727 track->row_size = 1;
2728 break;
2729 case 1:
2730 default:
2731 track->row_size = 2;
2732 break;
2733 case 2:
2734 track->row_size = 4;
2735 break;
2738 p->track = track;
2740 do {
2741 r = radeon_cs_packet_parse(p, &pkt, p->idx);
2742 if (r) {
2743 kfree(p->track);
2744 p->track = NULL;
2745 return r;
2747 p->idx += pkt.count + 2;
2748 switch (pkt.type) {
2749 case RADEON_PACKET_TYPE0:
2750 r = evergreen_cs_parse_packet0(p, &pkt);
2751 break;
2752 case RADEON_PACKET_TYPE2:
2753 break;
2754 case RADEON_PACKET_TYPE3:
2755 r = evergreen_packet3_check(p, &pkt);
2756 break;
2757 default:
2758 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
2759 kfree(p->track);
2760 p->track = NULL;
2761 return -EINVAL;
2763 if (r) {
2764 kfree(p->track);
2765 p->track = NULL;
2766 return r;
2768 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2769 #if 0
2770 for (r = 0; r < p->ib.length_dw; r++) {
2771 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
2772 mdelay(1);
2774 #endif
2775 kfree(p->track);
2776 p->track = NULL;
2777 return 0;
2781 * evergreen_dma_cs_parse() - parse the DMA IB
2782 * @p: parser structure holding parsing context.
2784 * Parses the DMA IB from the CS ioctl and updates
2785 * the GPU addresses based on the reloc information and
2786 * checks for errors. (Evergreen-Cayman)
2787 * Returns 0 for success and an error on failure.
2789 int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2791 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
2792 struct radeon_bo_list *src_reloc, *dst_reloc, *dst2_reloc;
2793 u32 header, cmd, count, sub_cmd;
2794 uint32_t *ib = p->ib.ptr;
2795 u32 idx;
2796 u64 src_offset, dst_offset, dst2_offset;
2797 int r;
2799 do {
2800 if (p->idx >= ib_chunk->length_dw) {
2801 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
2802 p->idx, ib_chunk->length_dw);
2803 return -EINVAL;
2805 idx = p->idx;
2806 header = radeon_get_ib_value(p, idx);
2807 cmd = GET_DMA_CMD(header);
2808 count = GET_DMA_COUNT(header);
2809 sub_cmd = GET_DMA_SUB_CMD(header);
2811 switch (cmd) {
2812 case DMA_PACKET_WRITE:
2813 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2814 if (r) {
2815 DRM_ERROR("bad DMA_PACKET_WRITE\n");
2816 return -EINVAL;
2818 switch (sub_cmd) {
2819 /* tiled */
2820 case 8:
2821 dst_offset = radeon_get_ib_value(p, idx+1);
2822 dst_offset <<= 8;
2824 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
2825 p->idx += count + 7;
2826 break;
2827 /* linear */
2828 case 0:
2829 dst_offset = radeon_get_ib_value(p, idx+1);
2830 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2832 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
2833 ib[idx+2] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
2834 p->idx += count + 3;
2835 break;
2836 default:
2837 DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header);
2838 return -EINVAL;
2840 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2841 dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
2842 dst_offset, radeon_bo_size(dst_reloc->robj));
2843 return -EINVAL;
2845 break;
2846 case DMA_PACKET_COPY:
2847 r = r600_dma_cs_next_reloc(p, &src_reloc);
2848 if (r) {
2849 DRM_ERROR("bad DMA_PACKET_COPY\n");
2850 return -EINVAL;
2852 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2853 if (r) {
2854 DRM_ERROR("bad DMA_PACKET_COPY\n");
2855 return -EINVAL;
2857 switch (sub_cmd) {
2858 /* Copy L2L, DW aligned */
2859 case 0x00:
2860 /* L2L, dw */
2861 src_offset = radeon_get_ib_value(p, idx+2);
2862 src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2863 dst_offset = radeon_get_ib_value(p, idx+1);
2864 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
2865 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2866 dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
2867 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2868 return -EINVAL;
2870 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2871 dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
2872 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2873 return -EINVAL;
2875 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
2876 ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
2877 ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
2878 ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
2879 p->idx += 5;
2880 break;
2881 /* Copy L2T/T2L */
2882 case 0x08:
2883 /* detile bit */
2884 if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
2885 /* tiled src, linear dst */
2886 src_offset = radeon_get_ib_value(p, idx+1);
2887 src_offset <<= 8;
2888 ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
2890 dst_offset = radeon_get_ib_value(p, idx + 7);
2891 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
2892 ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
2893 ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
2894 } else {
2895 /* linear src, tiled dst */
2896 src_offset = radeon_get_ib_value(p, idx+7);
2897 src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
2898 ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
2899 ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
2901 dst_offset = radeon_get_ib_value(p, idx+1);
2902 dst_offset <<= 8;
2903 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
2905 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2906 dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n",
2907 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2908 return -EINVAL;
2910 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2911 dev_warn(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n",
2912 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2913 return -EINVAL;
2915 p->idx += 9;
2916 break;
2917 /* Copy L2L, byte aligned */
2918 case 0x40:
2919 /* L2L, byte */
2920 src_offset = radeon_get_ib_value(p, idx+2);
2921 src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2922 dst_offset = radeon_get_ib_value(p, idx+1);
2923 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
2924 if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
2925 dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
2926 src_offset + count, radeon_bo_size(src_reloc->robj));
2927 return -EINVAL;
2929 if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
2930 dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
2931 dst_offset + count, radeon_bo_size(dst_reloc->robj));
2932 return -EINVAL;
2934 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xffffffff);
2935 ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xffffffff);
2936 ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
2937 ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
2938 p->idx += 5;
2939 break;
2940 /* Copy L2L, partial */
2941 case 0x41:
2942 /* L2L, partial */
2943 if (p->family < CHIP_CAYMAN) {
2944 DRM_ERROR("L2L Partial is cayman only !\n");
2945 return -EINVAL;
2947 ib[idx+1] += (u32)(src_reloc->gpu_offset & 0xffffffff);
2948 ib[idx+2] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
2949 ib[idx+4] += (u32)(dst_reloc->gpu_offset & 0xffffffff);
2950 ib[idx+5] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
2952 p->idx += 9;
2953 break;
2954 /* Copy L2L, DW aligned, broadcast */
2955 case 0x44:
2956 /* L2L, dw, broadcast */
2957 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
2958 if (r) {
2959 DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
2960 return -EINVAL;
2962 dst_offset = radeon_get_ib_value(p, idx+1);
2963 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2964 dst2_offset = radeon_get_ib_value(p, idx+2);
2965 dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
2966 src_offset = radeon_get_ib_value(p, idx+3);
2967 src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
2968 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2969 dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
2970 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2971 return -EINVAL;
2973 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2974 dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
2975 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2976 return -EINVAL;
2978 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
2979 dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
2980 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
2981 return -EINVAL;
2983 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
2984 ib[idx+2] += (u32)(dst2_reloc->gpu_offset & 0xfffffffc);
2985 ib[idx+3] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
2986 ib[idx+4] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
2987 ib[idx+5] += upper_32_bits(dst2_reloc->gpu_offset) & 0xff;
2988 ib[idx+6] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
2989 p->idx += 7;
2990 break;
2991 /* Copy L2T Frame to Field */
2992 case 0x48:
2993 if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
2994 DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
2995 return -EINVAL;
2997 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
2998 if (r) {
2999 DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
3000 return -EINVAL;
3002 dst_offset = radeon_get_ib_value(p, idx+1);
3003 dst_offset <<= 8;
3004 dst2_offset = radeon_get_ib_value(p, idx+2);
3005 dst2_offset <<= 8;
3006 src_offset = radeon_get_ib_value(p, idx+8);
3007 src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
3008 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3009 dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
3010 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3011 return -EINVAL;
3013 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3014 dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
3015 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3016 return -EINVAL;
3018 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
3019 dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
3020 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
3021 return -EINVAL;
3023 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
3024 ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8);
3025 ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
3026 ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
3027 p->idx += 10;
3028 break;
3029 /* Copy L2T/T2L, partial */
3030 case 0x49:
3031 /* L2T, T2L partial */
3032 if (p->family < CHIP_CAYMAN) {
3033 DRM_ERROR("L2T, T2L Partial is cayman only !\n");
3034 return -EINVAL;
3036 /* detile bit */
3037 if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
3038 /* tiled src, linear dst */
3039 ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
3041 ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
3042 ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
3043 } else {
3044 /* linear src, tiled dst */
3045 ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
3046 ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
3048 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
3050 p->idx += 12;
3051 break;
3052 /* Copy L2T broadcast */
3053 case 0x4b:
3054 /* L2T, broadcast */
3055 if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
3056 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3057 return -EINVAL;
3059 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
3060 if (r) {
3061 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3062 return -EINVAL;
3064 dst_offset = radeon_get_ib_value(p, idx+1);
3065 dst_offset <<= 8;
3066 dst2_offset = radeon_get_ib_value(p, idx+2);
3067 dst2_offset <<= 8;
3068 src_offset = radeon_get_ib_value(p, idx+8);
3069 src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
3070 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3071 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
3072 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3073 return -EINVAL;
3075 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3076 dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
3077 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3078 return -EINVAL;
3080 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
3081 dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
3082 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
3083 return -EINVAL;
3085 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
3086 ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8);
3087 ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
3088 ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
3089 p->idx += 10;
3090 break;
3091 /* Copy L2T/T2L (tile units) */
3092 case 0x4c:
3093 /* L2T, T2L */
3094 /* detile bit */
3095 if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
3096 /* tiled src, linear dst */
3097 src_offset = radeon_get_ib_value(p, idx+1);
3098 src_offset <<= 8;
3099 ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
3101 dst_offset = radeon_get_ib_value(p, idx+7);
3102 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
3103 ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
3104 ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
3105 } else {
3106 /* linear src, tiled dst */
3107 src_offset = radeon_get_ib_value(p, idx+7);
3108 src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
3109 ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
3110 ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
3112 dst_offset = radeon_get_ib_value(p, idx+1);
3113 dst_offset <<= 8;
3114 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
3116 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3117 dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
3118 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3119 return -EINVAL;
3121 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3122 dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
3123 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3124 return -EINVAL;
3126 p->idx += 9;
3127 break;
3128 /* Copy T2T, partial (tile units) */
3129 case 0x4d:
3130 /* T2T partial */
3131 if (p->family < CHIP_CAYMAN) {
3132 DRM_ERROR("L2T, T2L Partial is cayman only !\n");
3133 return -EINVAL;
3135 ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
3136 ib[idx+4] += (u32)(dst_reloc->gpu_offset >> 8);
3137 p->idx += 13;
3138 break;
3139 /* Copy L2T broadcast (tile units) */
3140 case 0x4f:
3141 /* L2T, broadcast */
3142 if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
3143 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3144 return -EINVAL;
3146 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
3147 if (r) {
3148 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3149 return -EINVAL;
3151 dst_offset = radeon_get_ib_value(p, idx+1);
3152 dst_offset <<= 8;
3153 dst2_offset = radeon_get_ib_value(p, idx+2);
3154 dst2_offset <<= 8;
3155 src_offset = radeon_get_ib_value(p, idx+8);
3156 src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
3157 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3158 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
3159 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3160 return -EINVAL;
3162 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3163 dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
3164 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3165 return -EINVAL;
3167 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
3168 dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
3169 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
3170 return -EINVAL;
3172 ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
3173 ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8);
3174 ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
3175 ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
3176 p->idx += 10;
3177 break;
3178 default:
3179 DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header);
3180 return -EINVAL;
3182 break;
3183 case DMA_PACKET_CONSTANT_FILL:
3184 r = r600_dma_cs_next_reloc(p, &dst_reloc);
3185 if (r) {
3186 DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
3187 return -EINVAL;
3189 dst_offset = radeon_get_ib_value(p, idx+1);
3190 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
3191 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3192 dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
3193 dst_offset, radeon_bo_size(dst_reloc->robj));
3194 return -EINVAL;
3196 ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
3197 ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) << 16) & 0x00ff0000;
3198 p->idx += 4;
3199 break;
3200 case DMA_PACKET_NOP:
3201 p->idx += 1;
3202 break;
3203 default:
3204 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
3205 return -EINVAL;
3207 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
3208 #if 0
3209 for (r = 0; r < p->ib->length_dw; r++) {
3210 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
3211 mdelay(1);
3213 #endif
3214 return 0;
3217 /* vm parser */
3218 static bool evergreen_vm_reg_valid(u32 reg)
3220 /* context regs are fine */
3221 if (reg >= 0x28000)
3222 return true;
3224 /* check config regs */
3225 switch (reg) {
3226 case WAIT_UNTIL:
3227 case GRBM_GFX_INDEX:
3228 case CP_STRMOUT_CNTL:
3229 case CP_COHER_CNTL:
3230 case CP_COHER_SIZE:
3231 case VGT_VTX_VECT_EJECT_REG:
3232 case VGT_CACHE_INVALIDATION:
3233 case VGT_GS_VERTEX_REUSE:
3234 case VGT_PRIMITIVE_TYPE:
3235 case VGT_INDEX_TYPE:
3236 case VGT_NUM_INDICES:
3237 case VGT_NUM_INSTANCES:
3238 case VGT_COMPUTE_DIM_X:
3239 case VGT_COMPUTE_DIM_Y:
3240 case VGT_COMPUTE_DIM_Z:
3241 case VGT_COMPUTE_START_X:
3242 case VGT_COMPUTE_START_Y:
3243 case VGT_COMPUTE_START_Z:
3244 case VGT_COMPUTE_INDEX:
3245 case VGT_COMPUTE_THREAD_GROUP_SIZE:
3246 case VGT_HS_OFFCHIP_PARAM:
3247 case PA_CL_ENHANCE:
3248 case PA_SU_LINE_STIPPLE_VALUE:
3249 case PA_SC_LINE_STIPPLE_STATE:
3250 case PA_SC_ENHANCE:
3251 case SQ_DYN_GPR_CNTL_PS_FLUSH_REQ:
3252 case SQ_DYN_GPR_SIMD_LOCK_EN:
3253 case SQ_CONFIG:
3254 case SQ_GPR_RESOURCE_MGMT_1:
3255 case SQ_GLOBAL_GPR_RESOURCE_MGMT_1:
3256 case SQ_GLOBAL_GPR_RESOURCE_MGMT_2:
3257 case SQ_CONST_MEM_BASE:
3258 case SQ_STATIC_THREAD_MGMT_1:
3259 case SQ_STATIC_THREAD_MGMT_2:
3260 case SQ_STATIC_THREAD_MGMT_3:
3261 case SPI_CONFIG_CNTL:
3262 case SPI_CONFIG_CNTL_1:
3263 case TA_CNTL_AUX:
3264 case DB_DEBUG:
3265 case DB_DEBUG2:
3266 case DB_DEBUG3:
3267 case DB_DEBUG4:
3268 case DB_WATERMARKS:
3269 case TD_PS_BORDER_COLOR_INDEX:
3270 case TD_PS_BORDER_COLOR_RED:
3271 case TD_PS_BORDER_COLOR_GREEN:
3272 case TD_PS_BORDER_COLOR_BLUE:
3273 case TD_PS_BORDER_COLOR_ALPHA:
3274 case TD_VS_BORDER_COLOR_INDEX:
3275 case TD_VS_BORDER_COLOR_RED:
3276 case TD_VS_BORDER_COLOR_GREEN:
3277 case TD_VS_BORDER_COLOR_BLUE:
3278 case TD_VS_BORDER_COLOR_ALPHA:
3279 case TD_GS_BORDER_COLOR_INDEX:
3280 case TD_GS_BORDER_COLOR_RED:
3281 case TD_GS_BORDER_COLOR_GREEN:
3282 case TD_GS_BORDER_COLOR_BLUE:
3283 case TD_GS_BORDER_COLOR_ALPHA:
3284 case TD_HS_BORDER_COLOR_INDEX:
3285 case TD_HS_BORDER_COLOR_RED:
3286 case TD_HS_BORDER_COLOR_GREEN:
3287 case TD_HS_BORDER_COLOR_BLUE:
3288 case TD_HS_BORDER_COLOR_ALPHA:
3289 case TD_LS_BORDER_COLOR_INDEX:
3290 case TD_LS_BORDER_COLOR_RED:
3291 case TD_LS_BORDER_COLOR_GREEN:
3292 case TD_LS_BORDER_COLOR_BLUE:
3293 case TD_LS_BORDER_COLOR_ALPHA:
3294 case TD_CS_BORDER_COLOR_INDEX:
3295 case TD_CS_BORDER_COLOR_RED:
3296 case TD_CS_BORDER_COLOR_GREEN:
3297 case TD_CS_BORDER_COLOR_BLUE:
3298 case TD_CS_BORDER_COLOR_ALPHA:
3299 case SQ_ESGS_RING_SIZE:
3300 case SQ_GSVS_RING_SIZE:
3301 case SQ_ESTMP_RING_SIZE:
3302 case SQ_GSTMP_RING_SIZE:
3303 case SQ_HSTMP_RING_SIZE:
3304 case SQ_LSTMP_RING_SIZE:
3305 case SQ_PSTMP_RING_SIZE:
3306 case SQ_VSTMP_RING_SIZE:
3307 case SQ_ESGS_RING_ITEMSIZE:
3308 case SQ_ESTMP_RING_ITEMSIZE:
3309 case SQ_GSTMP_RING_ITEMSIZE:
3310 case SQ_GSVS_RING_ITEMSIZE:
3311 case SQ_GS_VERT_ITEMSIZE:
3312 case SQ_GS_VERT_ITEMSIZE_1:
3313 case SQ_GS_VERT_ITEMSIZE_2:
3314 case SQ_GS_VERT_ITEMSIZE_3:
3315 case SQ_GSVS_RING_OFFSET_1:
3316 case SQ_GSVS_RING_OFFSET_2:
3317 case SQ_GSVS_RING_OFFSET_3:
3318 case SQ_HSTMP_RING_ITEMSIZE:
3319 case SQ_LSTMP_RING_ITEMSIZE:
3320 case SQ_PSTMP_RING_ITEMSIZE:
3321 case SQ_VSTMP_RING_ITEMSIZE:
3322 case VGT_TF_RING_SIZE:
3323 case SQ_ESGS_RING_BASE:
3324 case SQ_GSVS_RING_BASE:
3325 case SQ_ESTMP_RING_BASE:
3326 case SQ_GSTMP_RING_BASE:
3327 case SQ_HSTMP_RING_BASE:
3328 case SQ_LSTMP_RING_BASE:
3329 case SQ_PSTMP_RING_BASE:
3330 case SQ_VSTMP_RING_BASE:
3331 case CAYMAN_VGT_OFFCHIP_LDS_BASE:
3332 case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
3333 return true;
3334 default:
3335 DRM_ERROR("Invalid register 0x%x in CS\n", reg);
3336 return false;
3340 static int evergreen_vm_packet3_check(struct radeon_device *rdev,
3341 u32 *ib, struct radeon_cs_packet *pkt)
3343 u32 idx = pkt->idx + 1;
3344 u32 idx_value = ib[idx];
3345 u32 start_reg, end_reg, reg, i;
3346 u32 command, info;
3348 switch (pkt->opcode) {
3349 case PACKET3_NOP:
3350 break;
3351 case PACKET3_SET_BASE:
3352 if (idx_value != 1) {
3353 DRM_ERROR("bad SET_BASE");
3354 return -EINVAL;
3356 break;
3357 case PACKET3_CLEAR_STATE:
3358 case PACKET3_INDEX_BUFFER_SIZE:
3359 case PACKET3_DISPATCH_DIRECT:
3360 case PACKET3_DISPATCH_INDIRECT:
3361 case PACKET3_MODE_CONTROL:
3362 case PACKET3_SET_PREDICATION:
3363 case PACKET3_COND_EXEC:
3364 case PACKET3_PRED_EXEC:
3365 case PACKET3_DRAW_INDIRECT:
3366 case PACKET3_DRAW_INDEX_INDIRECT:
3367 case PACKET3_INDEX_BASE:
3368 case PACKET3_DRAW_INDEX_2:
3369 case PACKET3_CONTEXT_CONTROL:
3370 case PACKET3_DRAW_INDEX_OFFSET:
3371 case PACKET3_INDEX_TYPE:
3372 case PACKET3_DRAW_INDEX:
3373 case PACKET3_DRAW_INDEX_AUTO:
3374 case PACKET3_DRAW_INDEX_IMMD:
3375 case PACKET3_NUM_INSTANCES:
3376 case PACKET3_DRAW_INDEX_MULTI_AUTO:
3377 case PACKET3_STRMOUT_BUFFER_UPDATE:
3378 case PACKET3_DRAW_INDEX_OFFSET_2:
3379 case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
3380 case PACKET3_MPEG_INDEX:
3381 case PACKET3_WAIT_REG_MEM:
3382 case PACKET3_MEM_WRITE:
3383 case PACKET3_SURFACE_SYNC:
3384 case PACKET3_EVENT_WRITE:
3385 case PACKET3_EVENT_WRITE_EOP:
3386 case PACKET3_EVENT_WRITE_EOS:
3387 case PACKET3_SET_CONTEXT_REG:
3388 case PACKET3_SET_BOOL_CONST:
3389 case PACKET3_SET_LOOP_CONST:
3390 case PACKET3_SET_RESOURCE:
3391 case PACKET3_SET_SAMPLER:
3392 case PACKET3_SET_CTL_CONST:
3393 case PACKET3_SET_RESOURCE_OFFSET:
3394 case PACKET3_SET_CONTEXT_REG_INDIRECT:
3395 case PACKET3_SET_RESOURCE_INDIRECT:
3396 case CAYMAN_PACKET3_DEALLOC_STATE:
3397 break;
3398 case PACKET3_COND_WRITE:
3399 if (idx_value & 0x100) {
3400 reg = ib[idx + 5] * 4;
3401 if (!evergreen_vm_reg_valid(reg))
3402 return -EINVAL;
3404 break;
3405 case PACKET3_COPY_DW:
3406 if (idx_value & 0x2) {
3407 reg = ib[idx + 3] * 4;
3408 if (!evergreen_vm_reg_valid(reg))
3409 return -EINVAL;
3411 break;
3412 case PACKET3_SET_CONFIG_REG:
3413 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
3414 end_reg = 4 * pkt->count + start_reg - 4;
3415 if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
3416 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
3417 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
3418 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
3419 return -EINVAL;
3421 for (i = 0; i < pkt->count; i++) {
3422 reg = start_reg + (4 * i);
3423 if (!evergreen_vm_reg_valid(reg))
3424 return -EINVAL;
3426 break;
3427 case PACKET3_CP_DMA:
3428 command = ib[idx + 4];
3429 info = ib[idx + 1];
3430 if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
3431 (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
3432 ((((info & 0x00300000) >> 20) == 0) &&
3433 (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
3434 ((((info & 0x60000000) >> 29) == 0) &&
3435 (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
3436 /* non mem to mem copies requires dw aligned count */
3437 if ((command & 0x1fffff) % 4) {
3438 DRM_ERROR("CP DMA command requires dw count alignment\n");
3439 return -EINVAL;
3442 if (command & PACKET3_CP_DMA_CMD_SAS) {
3443 /* src address space is register */
3444 if (((info & 0x60000000) >> 29) == 0) {
3445 start_reg = idx_value << 2;
3446 if (command & PACKET3_CP_DMA_CMD_SAIC) {
3447 reg = start_reg;
3448 if (!evergreen_vm_reg_valid(reg)) {
3449 DRM_ERROR("CP DMA Bad SRC register\n");
3450 return -EINVAL;
3452 } else {
3453 for (i = 0; i < (command & 0x1fffff); i++) {
3454 reg = start_reg + (4 * i);
3455 if (!evergreen_vm_reg_valid(reg)) {
3456 DRM_ERROR("CP DMA Bad SRC register\n");
3457 return -EINVAL;
3463 if (command & PACKET3_CP_DMA_CMD_DAS) {
3464 /* dst address space is register */
3465 if (((info & 0x00300000) >> 20) == 0) {
3466 start_reg = ib[idx + 2];
3467 if (command & PACKET3_CP_DMA_CMD_DAIC) {
3468 reg = start_reg;
3469 if (!evergreen_vm_reg_valid(reg)) {
3470 DRM_ERROR("CP DMA Bad DST register\n");
3471 return -EINVAL;
3473 } else {
3474 for (i = 0; i < (command & 0x1fffff); i++) {
3475 reg = start_reg + (4 * i);
3476 if (!evergreen_vm_reg_valid(reg)) {
3477 DRM_ERROR("CP DMA Bad DST register\n");
3478 return -EINVAL;
3484 break;
3485 case PACKET3_SET_APPEND_CNT: {
3486 uint32_t areg;
3487 uint32_t allowed_reg_base;
3489 if (pkt->count != 2) {
3490 DRM_ERROR("bad SET_APPEND_CNT (invalid count)\n");
3491 return -EINVAL;
3494 allowed_reg_base = GDS_APPEND_COUNT_0;
3495 allowed_reg_base -= PACKET3_SET_CONTEXT_REG_START;
3496 allowed_reg_base >>= 2;
3498 areg = idx_value >> 16;
3499 if (areg < allowed_reg_base || areg > (allowed_reg_base + 11)) {
3500 DRM_ERROR("forbidden register for append cnt 0x%08x at %d\n",
3501 areg, idx);
3502 return -EINVAL;
3504 break;
3506 default:
3507 return -EINVAL;
3509 return 0;
3512 int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
3514 int ret = 0;
3515 u32 idx = 0;
3516 struct radeon_cs_packet pkt;
3518 do {
3519 pkt.idx = idx;
3520 pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
3521 pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
3522 pkt.one_reg_wr = 0;
3523 switch (pkt.type) {
3524 case RADEON_PACKET_TYPE0:
3525 dev_err(rdev->dev, "Packet0 not allowed!\n");
3526 ret = -EINVAL;
3527 break;
3528 case RADEON_PACKET_TYPE2:
3529 idx += 1;
3530 break;
3531 case RADEON_PACKET_TYPE3:
3532 pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
3533 ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
3534 idx += pkt.count + 2;
3535 break;
3536 default:
3537 dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
3538 ret = -EINVAL;
3539 break;
3541 if (ret)
3542 break;
3543 } while (idx < ib->length_dw);
3545 return ret;
3549 * evergreen_dma_ib_parse() - parse the DMA IB for VM
3550 * @rdev: radeon_device pointer
3551 * @ib: radeon_ib pointer
3553 * Parses the DMA IB from the VM CS ioctl
3554 * checks for errors. (Cayman-SI)
3555 * Returns 0 for success and an error on failure.
3557 int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
3559 u32 idx = 0;
3560 u32 header, cmd, count, sub_cmd;
3562 do {
3563 header = ib->ptr[idx];
3564 cmd = GET_DMA_CMD(header);
3565 count = GET_DMA_COUNT(header);
3566 sub_cmd = GET_DMA_SUB_CMD(header);
3568 switch (cmd) {
3569 case DMA_PACKET_WRITE:
3570 switch (sub_cmd) {
3571 /* tiled */
3572 case 8:
3573 idx += count + 7;
3574 break;
3575 /* linear */
3576 case 0:
3577 idx += count + 3;
3578 break;
3579 default:
3580 DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, ib->ptr[idx]);
3581 return -EINVAL;
3583 break;
3584 case DMA_PACKET_COPY:
3585 switch (sub_cmd) {
3586 /* Copy L2L, DW aligned */
3587 case 0x00:
3588 idx += 5;
3589 break;
3590 /* Copy L2T/T2L */
3591 case 0x08:
3592 idx += 9;
3593 break;
3594 /* Copy L2L, byte aligned */
3595 case 0x40:
3596 idx += 5;
3597 break;
3598 /* Copy L2L, partial */
3599 case 0x41:
3600 idx += 9;
3601 break;
3602 /* Copy L2L, DW aligned, broadcast */
3603 case 0x44:
3604 idx += 7;
3605 break;
3606 /* Copy L2T Frame to Field */
3607 case 0x48:
3608 idx += 10;
3609 break;
3610 /* Copy L2T/T2L, partial */
3611 case 0x49:
3612 idx += 12;
3613 break;
3614 /* Copy L2T broadcast */
3615 case 0x4b:
3616 idx += 10;
3617 break;
3618 /* Copy L2T/T2L (tile units) */
3619 case 0x4c:
3620 idx += 9;
3621 break;
3622 /* Copy T2T, partial (tile units) */
3623 case 0x4d:
3624 idx += 13;
3625 break;
3626 /* Copy L2T broadcast (tile units) */
3627 case 0x4f:
3628 idx += 10;
3629 break;
3630 default:
3631 DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, ib->ptr[idx]);
3632 return -EINVAL;
3634 break;
3635 case DMA_PACKET_CONSTANT_FILL:
3636 idx += 4;
3637 break;
3638 case DMA_PACKET_NOP:
3639 idx += 1;
3640 break;
3641 default:
3642 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
3643 return -EINVAL;
3645 } while (idx < ib->length_dw);
3647 return 0;