2 * Copyright 2010 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
30 #include "evergreend.h"
31 #include "evergreen_reg_safe.h"
33 static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser
*p
,
34 struct radeon_cs_reloc
**cs_reloc
);
36 struct evergreen_cs_track
{
42 u32 cb_color_base_last
[12];
43 struct radeon_bo
*cb_color_bo
[12];
44 u32 cb_color_bo_offset
[12];
45 struct radeon_bo
*cb_color_fmask_bo
[8];
46 struct radeon_bo
*cb_color_cmask_bo
[8];
47 u32 cb_color_info
[12];
48 u32 cb_color_view
[12];
49 u32 cb_color_pitch_idx
[12];
50 u32 cb_color_slice_idx
[12];
51 u32 cb_color_dim_idx
[12];
53 u32 cb_color_pitch
[12];
54 u32 cb_color_slice
[12];
55 u32 cb_color_cmask_slice
[8];
56 u32 cb_color_fmask_slice
[8];
59 u32 vgt_strmout_config
;
60 u32 vgt_strmout_buffer_config
;
64 u32 db_depth_size_idx
;
68 u32 db_z_write_offset
;
69 struct radeon_bo
*db_z_read_bo
;
70 struct radeon_bo
*db_z_write_bo
;
74 u32 db_s_write_offset
;
75 struct radeon_bo
*db_s_read_bo
;
76 struct radeon_bo
*db_s_write_bo
;
79 static void evergreen_cs_track_init(struct evergreen_cs_track
*track
)
83 for (i
= 0; i
< 8; i
++) {
84 track
->cb_color_fmask_bo
[i
] = NULL
;
85 track
->cb_color_cmask_bo
[i
] = NULL
;
86 track
->cb_color_cmask_slice
[i
] = 0;
87 track
->cb_color_fmask_slice
[i
] = 0;
90 for (i
= 0; i
< 12; i
++) {
91 track
->cb_color_base_last
[i
] = 0;
92 track
->cb_color_bo
[i
] = NULL
;
93 track
->cb_color_bo_offset
[i
] = 0xFFFFFFFF;
94 track
->cb_color_info
[i
] = 0;
95 track
->cb_color_view
[i
] = 0;
96 track
->cb_color_pitch_idx
[i
] = 0;
97 track
->cb_color_slice_idx
[i
] = 0;
98 track
->cb_color_dim
[i
] = 0;
99 track
->cb_color_pitch
[i
] = 0;
100 track
->cb_color_slice
[i
] = 0;
101 track
->cb_color_dim
[i
] = 0;
103 track
->cb_target_mask
= 0xFFFFFFFF;
104 track
->cb_shader_mask
= 0xFFFFFFFF;
106 track
->db_depth_view
= 0xFFFFC000;
107 track
->db_depth_size
= 0xFFFFFFFF;
108 track
->db_depth_size_idx
= 0;
109 track
->db_depth_control
= 0xFFFFFFFF;
110 track
->db_z_info
= 0xFFFFFFFF;
111 track
->db_z_idx
= 0xFFFFFFFF;
112 track
->db_z_read_offset
= 0xFFFFFFFF;
113 track
->db_z_write_offset
= 0xFFFFFFFF;
114 track
->db_z_read_bo
= NULL
;
115 track
->db_z_write_bo
= NULL
;
116 track
->db_s_info
= 0xFFFFFFFF;
117 track
->db_s_idx
= 0xFFFFFFFF;
118 track
->db_s_read_offset
= 0xFFFFFFFF;
119 track
->db_s_write_offset
= 0xFFFFFFFF;
120 track
->db_s_read_bo
= NULL
;
121 track
->db_s_write_bo
= NULL
;
124 static inline int evergreen_cs_track_validate_cb(struct radeon_cs_parser
*p
, int i
)
130 static int evergreen_cs_track_check(struct radeon_cs_parser
*p
)
132 struct evergreen_cs_track
*track
= p
->track
;
134 /* we don't support stream out buffer yet */
135 if (track
->vgt_strmout_config
|| track
->vgt_strmout_buffer_config
) {
136 dev_warn(p
->dev
, "this kernel doesn't support SMX output buffer\n");
145 * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
146 * @parser: parser structure holding parsing context.
147 * @pkt: where to store packet informations
149 * Assume that chunk_ib_index is properly set. Will return -EINVAL
150 * if packet is bigger than remaining ib size. or if packets is unknown.
152 int evergreen_cs_packet_parse(struct radeon_cs_parser
*p
,
153 struct radeon_cs_packet
*pkt
,
156 struct radeon_cs_chunk
*ib_chunk
= &p
->chunks
[p
->chunk_ib_idx
];
159 if (idx
>= ib_chunk
->length_dw
) {
160 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
161 idx
, ib_chunk
->length_dw
);
164 header
= radeon_get_ib_value(p
, idx
);
166 pkt
->type
= CP_PACKET_GET_TYPE(header
);
167 pkt
->count
= CP_PACKET_GET_COUNT(header
);
171 pkt
->reg
= CP_PACKET0_GET_REG(header
);
174 pkt
->opcode
= CP_PACKET3_GET_OPCODE(header
);
180 DRM_ERROR("Unknown packet type %d at %d !\n", pkt
->type
, idx
);
183 if ((pkt
->count
+ 1 + pkt
->idx
) >= ib_chunk
->length_dw
) {
184 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
185 pkt
->idx
, pkt
->type
, pkt
->count
, ib_chunk
->length_dw
);
192 * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
193 * @parser: parser structure holding parsing context.
194 * @data: pointer to relocation data
195 * @offset_start: starting offset
196 * @offset_mask: offset mask (to align start offset on)
197 * @reloc: reloc informations
199 * Check next packet is relocation packet3, do bo validation and compute
200 * GPU offset using the provided start.
202 static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser
*p
,
203 struct radeon_cs_reloc
**cs_reloc
)
205 struct radeon_cs_chunk
*relocs_chunk
;
206 struct radeon_cs_packet p3reloc
;
210 if (p
->chunk_relocs_idx
== -1) {
211 DRM_ERROR("No relocation chunk !\n");
215 relocs_chunk
= &p
->chunks
[p
->chunk_relocs_idx
];
216 r
= evergreen_cs_packet_parse(p
, &p3reloc
, p
->idx
);
220 p
->idx
+= p3reloc
.count
+ 2;
221 if (p3reloc
.type
!= PACKET_TYPE3
|| p3reloc
.opcode
!= PACKET3_NOP
) {
222 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
226 idx
= radeon_get_ib_value(p
, p3reloc
.idx
+ 1);
227 if (idx
>= relocs_chunk
->length_dw
) {
228 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
229 idx
, relocs_chunk
->length_dw
);
232 /* FIXME: we assume reloc size is 4 dwords */
233 *cs_reloc
= p
->relocs_ptr
[(idx
/ 4)];
238 * evergreen_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
239 * @parser: parser structure holding parsing context.
241 * Check next packet is relocation packet3, do bo validation and compute
242 * GPU offset using the provided start.
244 static inline int evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser
*p
)
246 struct radeon_cs_packet p3reloc
;
249 r
= evergreen_cs_packet_parse(p
, &p3reloc
, p
->idx
);
253 if (p3reloc
.type
!= PACKET_TYPE3
|| p3reloc
.opcode
!= PACKET3_NOP
) {
260 * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
261 * @parser: parser structure holding parsing context.
263 * Userspace sends a special sequence for VLINE waits.
264 * PACKET0 - VLINE_START_END + value
265 * PACKET3 - WAIT_REG_MEM poll vline status reg
266 * RELOC (P3) - crtc_id in reloc.
268 * This function parses this and relocates the VLINE START END
269 * and WAIT_REG_MEM packets to the correct crtc.
270 * It also detects a switched off crtc and nulls out the
273 static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser
*p
)
275 struct drm_mode_object
*obj
;
276 struct drm_crtc
*crtc
;
277 struct radeon_crtc
*radeon_crtc
;
278 struct radeon_cs_packet p3reloc
, wait_reg_mem
;
281 uint32_t header
, h_idx
, reg
, wait_reg_mem_info
;
282 volatile uint32_t *ib
;
286 /* parse the WAIT_REG_MEM */
287 r
= evergreen_cs_packet_parse(p
, &wait_reg_mem
, p
->idx
);
291 /* check its a WAIT_REG_MEM */
292 if (wait_reg_mem
.type
!= PACKET_TYPE3
||
293 wait_reg_mem
.opcode
!= PACKET3_WAIT_REG_MEM
) {
294 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
299 wait_reg_mem_info
= radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 1);
300 /* bit 4 is reg (0) or mem (1) */
301 if (wait_reg_mem_info
& 0x10) {
302 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
306 /* waiting for value to be equal */
307 if ((wait_reg_mem_info
& 0x7) != 0x3) {
308 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
312 if ((radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 2) << 2) != EVERGREEN_VLINE_STATUS
) {
313 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
318 if (radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 5) != EVERGREEN_VLINE_STAT
) {
319 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
324 /* jump over the NOP */
325 r
= evergreen_cs_packet_parse(p
, &p3reloc
, p
->idx
+ wait_reg_mem
.count
+ 2);
330 p
->idx
+= wait_reg_mem
.count
+ 2;
331 p
->idx
+= p3reloc
.count
+ 2;
333 header
= radeon_get_ib_value(p
, h_idx
);
334 crtc_id
= radeon_get_ib_value(p
, h_idx
+ 2 + 7 + 1);
335 reg
= CP_PACKET0_GET_REG(header
);
336 mutex_lock(&p
->rdev
->ddev
->mode_config
.mutex
);
337 obj
= drm_mode_object_find(p
->rdev
->ddev
, crtc_id
, DRM_MODE_OBJECT_CRTC
);
339 DRM_ERROR("cannot find crtc %d\n", crtc_id
);
343 crtc
= obj_to_crtc(obj
);
344 radeon_crtc
= to_radeon_crtc(crtc
);
345 crtc_id
= radeon_crtc
->crtc_id
;
347 if (!crtc
->enabled
) {
348 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
349 ib
[h_idx
+ 2] = PACKET2(0);
350 ib
[h_idx
+ 3] = PACKET2(0);
351 ib
[h_idx
+ 4] = PACKET2(0);
352 ib
[h_idx
+ 5] = PACKET2(0);
353 ib
[h_idx
+ 6] = PACKET2(0);
354 ib
[h_idx
+ 7] = PACKET2(0);
355 ib
[h_idx
+ 8] = PACKET2(0);
358 case EVERGREEN_VLINE_START_END
:
359 header
&= ~R600_CP_PACKET0_REG_MASK
;
360 header
|= (EVERGREEN_VLINE_START_END
+ radeon_crtc
->crtc_offset
) >> 2;
362 ib
[h_idx
+ 4] = (EVERGREEN_VLINE_STATUS
+ radeon_crtc
->crtc_offset
) >> 2;
365 DRM_ERROR("unknown crtc reloc\n");
371 mutex_unlock(&p
->rdev
->ddev
->mode_config
.mutex
);
375 static int evergreen_packet0_check(struct radeon_cs_parser
*p
,
376 struct radeon_cs_packet
*pkt
,
377 unsigned idx
, unsigned reg
)
382 case EVERGREEN_VLINE_START_END
:
383 r
= evergreen_cs_packet_parse_vline(p
);
385 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
391 printk(KERN_ERR
"Forbidden register 0x%04X in cs at %d\n",
398 static int evergreen_cs_parse_packet0(struct radeon_cs_parser
*p
,
399 struct radeon_cs_packet
*pkt
)
407 for (i
= 0; i
<= pkt
->count
; i
++, idx
++, reg
+= 4) {
408 r
= evergreen_packet0_check(p
, pkt
, idx
, reg
);
417 * evergreen_cs_check_reg() - check if register is authorized or not
418 * @parser: parser structure holding parsing context
419 * @reg: register we are testing
420 * @idx: index into the cs buffer
422 * This function will test against evergreen_reg_safe_bm and return 0
423 * if register is safe. If register is not flag as safe this function
424 * will test it against a list of register needind special handling.
426 static inline int evergreen_cs_check_reg(struct radeon_cs_parser
*p
, u32 reg
, u32 idx
)
428 struct evergreen_cs_track
*track
= (struct evergreen_cs_track
*)p
->track
;
429 struct radeon_cs_reloc
*reloc
;
430 u32 last_reg
= ARRAY_SIZE(evergreen_reg_safe_bm
);
436 dev_warn(p
->dev
, "forbidden register 0x%08x at %d\n", reg
, idx
);
439 m
= 1 << ((reg
>> 2) & 31);
440 if (!(evergreen_reg_safe_bm
[i
] & m
))
444 /* force following reg to 0 in an attemp to disable out buffer
445 * which will need us to better understand how it works to perform
446 * security check on it (Jerome)
448 case SQ_ESGS_RING_SIZE
:
449 case SQ_GSVS_RING_SIZE
:
450 case SQ_ESTMP_RING_SIZE
:
451 case SQ_GSTMP_RING_SIZE
:
452 case SQ_HSTMP_RING_SIZE
:
453 case SQ_LSTMP_RING_SIZE
:
454 case SQ_PSTMP_RING_SIZE
:
455 case SQ_VSTMP_RING_SIZE
:
456 case SQ_ESGS_RING_ITEMSIZE
:
457 case SQ_ESTMP_RING_ITEMSIZE
:
458 case SQ_GSTMP_RING_ITEMSIZE
:
459 case SQ_GSVS_RING_ITEMSIZE
:
460 case SQ_GS_VERT_ITEMSIZE
:
461 case SQ_GS_VERT_ITEMSIZE_1
:
462 case SQ_GS_VERT_ITEMSIZE_2
:
463 case SQ_GS_VERT_ITEMSIZE_3
:
464 case SQ_GSVS_RING_OFFSET_1
:
465 case SQ_GSVS_RING_OFFSET_2
:
466 case SQ_GSVS_RING_OFFSET_3
:
467 case SQ_HSTMP_RING_ITEMSIZE
:
468 case SQ_LSTMP_RING_ITEMSIZE
:
469 case SQ_PSTMP_RING_ITEMSIZE
:
470 case SQ_VSTMP_RING_ITEMSIZE
:
471 case VGT_TF_RING_SIZE
:
472 /* get value to populate the IB don't remove */
473 tmp
=radeon_get_ib_value(p
, idx
);
476 case DB_DEPTH_CONTROL
:
477 track
->db_depth_control
= radeon_get_ib_value(p
, idx
);
480 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
482 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
486 track
->db_z_info
= radeon_get_ib_value(p
, idx
);
487 ib
[idx
] &= ~Z_ARRAY_MODE(0xf);
488 track
->db_z_info
&= ~Z_ARRAY_MODE(0xf);
489 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
) {
490 ib
[idx
] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
491 track
->db_z_info
|= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
493 ib
[idx
] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
494 track
->db_z_info
|= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
497 case DB_STENCIL_INFO
:
498 track
->db_s_info
= radeon_get_ib_value(p
, idx
);
501 track
->db_depth_view
= radeon_get_ib_value(p
, idx
);
504 track
->db_depth_size
= radeon_get_ib_value(p
, idx
);
505 track
->db_depth_size_idx
= idx
;
508 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
510 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
514 track
->db_z_read_offset
= radeon_get_ib_value(p
, idx
);
515 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
516 track
->db_z_read_bo
= reloc
->robj
;
518 case DB_Z_WRITE_BASE
:
519 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
521 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
525 track
->db_z_write_offset
= radeon_get_ib_value(p
, idx
);
526 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
527 track
->db_z_write_bo
= reloc
->robj
;
529 case DB_STENCIL_READ_BASE
:
530 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
532 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
536 track
->db_s_read_offset
= radeon_get_ib_value(p
, idx
);
537 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
538 track
->db_s_read_bo
= reloc
->robj
;
540 case DB_STENCIL_WRITE_BASE
:
541 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
543 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
547 track
->db_s_write_offset
= radeon_get_ib_value(p
, idx
);
548 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
549 track
->db_s_write_bo
= reloc
->robj
;
551 case VGT_STRMOUT_CONFIG
:
552 track
->vgt_strmout_config
= radeon_get_ib_value(p
, idx
);
554 case VGT_STRMOUT_BUFFER_CONFIG
:
555 track
->vgt_strmout_buffer_config
= radeon_get_ib_value(p
, idx
);
558 track
->cb_target_mask
= radeon_get_ib_value(p
, idx
);
561 track
->cb_shader_mask
= radeon_get_ib_value(p
, idx
);
563 case PA_SC_AA_CONFIG
:
564 tmp
= radeon_get_ib_value(p
, idx
) & MSAA_NUM_SAMPLES_MASK
;
565 track
->nsamples
= 1 << tmp
;
575 tmp
= (reg
- CB_COLOR0_VIEW
) / 0x3c;
576 track
->cb_color_view
[tmp
] = radeon_get_ib_value(p
, idx
);
580 case CB_COLOR10_VIEW
:
581 case CB_COLOR11_VIEW
:
582 tmp
= ((reg
- CB_COLOR8_VIEW
) / 0x1c) + 8;
583 track
->cb_color_view
[tmp
] = radeon_get_ib_value(p
, idx
);
593 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
595 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
599 tmp
= (reg
- CB_COLOR0_INFO
) / 0x3c;
600 track
->cb_color_info
[tmp
] = radeon_get_ib_value(p
, idx
);
601 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
) {
602 ib
[idx
] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
603 track
->cb_color_info
[tmp
] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
604 } else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
) {
605 ib
[idx
] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
606 track
->cb_color_info
[tmp
] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
611 case CB_COLOR10_INFO
:
612 case CB_COLOR11_INFO
:
613 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
615 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
619 tmp
= ((reg
- CB_COLOR8_INFO
) / 0x1c) + 8;
620 track
->cb_color_info
[tmp
] = radeon_get_ib_value(p
, idx
);
621 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
) {
622 ib
[idx
] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
623 track
->cb_color_info
[tmp
] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
624 } else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
) {
625 ib
[idx
] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
626 track
->cb_color_info
[tmp
] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
629 case CB_COLOR0_PITCH
:
630 case CB_COLOR1_PITCH
:
631 case CB_COLOR2_PITCH
:
632 case CB_COLOR3_PITCH
:
633 case CB_COLOR4_PITCH
:
634 case CB_COLOR5_PITCH
:
635 case CB_COLOR6_PITCH
:
636 case CB_COLOR7_PITCH
:
637 tmp
= (reg
- CB_COLOR0_PITCH
) / 0x3c;
638 track
->cb_color_pitch
[tmp
] = radeon_get_ib_value(p
, idx
);
639 track
->cb_color_pitch_idx
[tmp
] = idx
;
641 case CB_COLOR8_PITCH
:
642 case CB_COLOR9_PITCH
:
643 case CB_COLOR10_PITCH
:
644 case CB_COLOR11_PITCH
:
645 tmp
= ((reg
- CB_COLOR8_PITCH
) / 0x1c) + 8;
646 track
->cb_color_pitch
[tmp
] = radeon_get_ib_value(p
, idx
);
647 track
->cb_color_pitch_idx
[tmp
] = idx
;
649 case CB_COLOR0_SLICE
:
650 case CB_COLOR1_SLICE
:
651 case CB_COLOR2_SLICE
:
652 case CB_COLOR3_SLICE
:
653 case CB_COLOR4_SLICE
:
654 case CB_COLOR5_SLICE
:
655 case CB_COLOR6_SLICE
:
656 case CB_COLOR7_SLICE
:
657 tmp
= (reg
- CB_COLOR0_SLICE
) / 0x3c;
658 track
->cb_color_slice
[tmp
] = radeon_get_ib_value(p
, idx
);
659 track
->cb_color_slice_idx
[tmp
] = idx
;
661 case CB_COLOR8_SLICE
:
662 case CB_COLOR9_SLICE
:
663 case CB_COLOR10_SLICE
:
664 case CB_COLOR11_SLICE
:
665 tmp
= ((reg
- CB_COLOR8_SLICE
) / 0x1c) + 8;
666 track
->cb_color_slice
[tmp
] = radeon_get_ib_value(p
, idx
);
667 track
->cb_color_slice_idx
[tmp
] = idx
;
669 case CB_COLOR0_ATTRIB
:
670 case CB_COLOR1_ATTRIB
:
671 case CB_COLOR2_ATTRIB
:
672 case CB_COLOR3_ATTRIB
:
673 case CB_COLOR4_ATTRIB
:
674 case CB_COLOR5_ATTRIB
:
675 case CB_COLOR6_ATTRIB
:
676 case CB_COLOR7_ATTRIB
:
677 case CB_COLOR8_ATTRIB
:
678 case CB_COLOR9_ATTRIB
:
679 case CB_COLOR10_ATTRIB
:
680 case CB_COLOR11_ATTRIB
:
690 tmp
= (reg
- CB_COLOR0_DIM
) / 0x3c;
691 track
->cb_color_dim
[tmp
] = radeon_get_ib_value(p
, idx
);
692 track
->cb_color_dim_idx
[tmp
] = idx
;
698 tmp
= ((reg
- CB_COLOR8_DIM
) / 0x1c) + 8;
699 track
->cb_color_dim
[tmp
] = radeon_get_ib_value(p
, idx
);
700 track
->cb_color_dim_idx
[tmp
] = idx
;
702 case CB_COLOR0_FMASK
:
703 case CB_COLOR1_FMASK
:
704 case CB_COLOR2_FMASK
:
705 case CB_COLOR3_FMASK
:
706 case CB_COLOR4_FMASK
:
707 case CB_COLOR5_FMASK
:
708 case CB_COLOR6_FMASK
:
709 case CB_COLOR7_FMASK
:
710 tmp
= (reg
- CB_COLOR0_FMASK
) / 0x3c;
711 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
713 dev_err(p
->dev
, "bad SET_CONTEXT_REG 0x%04X\n", reg
);
716 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
717 track
->cb_color_fmask_bo
[tmp
] = reloc
->robj
;
719 case CB_COLOR0_CMASK
:
720 case CB_COLOR1_CMASK
:
721 case CB_COLOR2_CMASK
:
722 case CB_COLOR3_CMASK
:
723 case CB_COLOR4_CMASK
:
724 case CB_COLOR5_CMASK
:
725 case CB_COLOR6_CMASK
:
726 case CB_COLOR7_CMASK
:
727 tmp
= (reg
- CB_COLOR0_CMASK
) / 0x3c;
728 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
730 dev_err(p
->dev
, "bad SET_CONTEXT_REG 0x%04X\n", reg
);
733 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
734 track
->cb_color_cmask_bo
[tmp
] = reloc
->robj
;
736 case CB_COLOR0_FMASK_SLICE
:
737 case CB_COLOR1_FMASK_SLICE
:
738 case CB_COLOR2_FMASK_SLICE
:
739 case CB_COLOR3_FMASK_SLICE
:
740 case CB_COLOR4_FMASK_SLICE
:
741 case CB_COLOR5_FMASK_SLICE
:
742 case CB_COLOR6_FMASK_SLICE
:
743 case CB_COLOR7_FMASK_SLICE
:
744 tmp
= (reg
- CB_COLOR0_FMASK_SLICE
) / 0x3c;
745 track
->cb_color_fmask_slice
[tmp
] = radeon_get_ib_value(p
, idx
);
747 case CB_COLOR0_CMASK_SLICE
:
748 case CB_COLOR1_CMASK_SLICE
:
749 case CB_COLOR2_CMASK_SLICE
:
750 case CB_COLOR3_CMASK_SLICE
:
751 case CB_COLOR4_CMASK_SLICE
:
752 case CB_COLOR5_CMASK_SLICE
:
753 case CB_COLOR6_CMASK_SLICE
:
754 case CB_COLOR7_CMASK_SLICE
:
755 tmp
= (reg
- CB_COLOR0_CMASK_SLICE
) / 0x3c;
756 track
->cb_color_cmask_slice
[tmp
] = radeon_get_ib_value(p
, idx
);
766 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
768 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
772 tmp
= (reg
- CB_COLOR0_BASE
) / 0x3c;
773 track
->cb_color_bo_offset
[tmp
] = radeon_get_ib_value(p
, idx
);
774 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
775 track
->cb_color_base_last
[tmp
] = ib
[idx
];
776 track
->cb_color_bo
[tmp
] = reloc
->robj
;
780 case CB_COLOR10_BASE
:
781 case CB_COLOR11_BASE
:
782 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
784 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
788 tmp
= ((reg
- CB_COLOR8_BASE
) / 0x1c) + 8;
789 track
->cb_color_bo_offset
[tmp
] = radeon_get_ib_value(p
, idx
);
790 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
791 track
->cb_color_base_last
[tmp
] = ib
[idx
];
792 track
->cb_color_bo
[tmp
] = reloc
->robj
;
804 case CB_IMMED10_BASE
:
805 case CB_IMMED11_BASE
:
806 case DB_HTILE_DATA_BASE
:
807 case SQ_PGM_START_FS
:
808 case SQ_PGM_START_ES
:
809 case SQ_PGM_START_VS
:
810 case SQ_PGM_START_GS
:
811 case SQ_PGM_START_PS
:
812 case SQ_PGM_START_HS
:
813 case SQ_PGM_START_LS
:
815 case SQ_CONST_MEM_BASE
:
816 case SQ_ALU_CONST_CACHE_GS_0
:
817 case SQ_ALU_CONST_CACHE_GS_1
:
818 case SQ_ALU_CONST_CACHE_GS_2
:
819 case SQ_ALU_CONST_CACHE_GS_3
:
820 case SQ_ALU_CONST_CACHE_GS_4
:
821 case SQ_ALU_CONST_CACHE_GS_5
:
822 case SQ_ALU_CONST_CACHE_GS_6
:
823 case SQ_ALU_CONST_CACHE_GS_7
:
824 case SQ_ALU_CONST_CACHE_GS_8
:
825 case SQ_ALU_CONST_CACHE_GS_9
:
826 case SQ_ALU_CONST_CACHE_GS_10
:
827 case SQ_ALU_CONST_CACHE_GS_11
:
828 case SQ_ALU_CONST_CACHE_GS_12
:
829 case SQ_ALU_CONST_CACHE_GS_13
:
830 case SQ_ALU_CONST_CACHE_GS_14
:
831 case SQ_ALU_CONST_CACHE_GS_15
:
832 case SQ_ALU_CONST_CACHE_PS_0
:
833 case SQ_ALU_CONST_CACHE_PS_1
:
834 case SQ_ALU_CONST_CACHE_PS_2
:
835 case SQ_ALU_CONST_CACHE_PS_3
:
836 case SQ_ALU_CONST_CACHE_PS_4
:
837 case SQ_ALU_CONST_CACHE_PS_5
:
838 case SQ_ALU_CONST_CACHE_PS_6
:
839 case SQ_ALU_CONST_CACHE_PS_7
:
840 case SQ_ALU_CONST_CACHE_PS_8
:
841 case SQ_ALU_CONST_CACHE_PS_9
:
842 case SQ_ALU_CONST_CACHE_PS_10
:
843 case SQ_ALU_CONST_CACHE_PS_11
:
844 case SQ_ALU_CONST_CACHE_PS_12
:
845 case SQ_ALU_CONST_CACHE_PS_13
:
846 case SQ_ALU_CONST_CACHE_PS_14
:
847 case SQ_ALU_CONST_CACHE_PS_15
:
848 case SQ_ALU_CONST_CACHE_VS_0
:
849 case SQ_ALU_CONST_CACHE_VS_1
:
850 case SQ_ALU_CONST_CACHE_VS_2
:
851 case SQ_ALU_CONST_CACHE_VS_3
:
852 case SQ_ALU_CONST_CACHE_VS_4
:
853 case SQ_ALU_CONST_CACHE_VS_5
:
854 case SQ_ALU_CONST_CACHE_VS_6
:
855 case SQ_ALU_CONST_CACHE_VS_7
:
856 case SQ_ALU_CONST_CACHE_VS_8
:
857 case SQ_ALU_CONST_CACHE_VS_9
:
858 case SQ_ALU_CONST_CACHE_VS_10
:
859 case SQ_ALU_CONST_CACHE_VS_11
:
860 case SQ_ALU_CONST_CACHE_VS_12
:
861 case SQ_ALU_CONST_CACHE_VS_13
:
862 case SQ_ALU_CONST_CACHE_VS_14
:
863 case SQ_ALU_CONST_CACHE_VS_15
:
864 case SQ_ALU_CONST_CACHE_HS_0
:
865 case SQ_ALU_CONST_CACHE_HS_1
:
866 case SQ_ALU_CONST_CACHE_HS_2
:
867 case SQ_ALU_CONST_CACHE_HS_3
:
868 case SQ_ALU_CONST_CACHE_HS_4
:
869 case SQ_ALU_CONST_CACHE_HS_5
:
870 case SQ_ALU_CONST_CACHE_HS_6
:
871 case SQ_ALU_CONST_CACHE_HS_7
:
872 case SQ_ALU_CONST_CACHE_HS_8
:
873 case SQ_ALU_CONST_CACHE_HS_9
:
874 case SQ_ALU_CONST_CACHE_HS_10
:
875 case SQ_ALU_CONST_CACHE_HS_11
:
876 case SQ_ALU_CONST_CACHE_HS_12
:
877 case SQ_ALU_CONST_CACHE_HS_13
:
878 case SQ_ALU_CONST_CACHE_HS_14
:
879 case SQ_ALU_CONST_CACHE_HS_15
:
880 case SQ_ALU_CONST_CACHE_LS_0
:
881 case SQ_ALU_CONST_CACHE_LS_1
:
882 case SQ_ALU_CONST_CACHE_LS_2
:
883 case SQ_ALU_CONST_CACHE_LS_3
:
884 case SQ_ALU_CONST_CACHE_LS_4
:
885 case SQ_ALU_CONST_CACHE_LS_5
:
886 case SQ_ALU_CONST_CACHE_LS_6
:
887 case SQ_ALU_CONST_CACHE_LS_7
:
888 case SQ_ALU_CONST_CACHE_LS_8
:
889 case SQ_ALU_CONST_CACHE_LS_9
:
890 case SQ_ALU_CONST_CACHE_LS_10
:
891 case SQ_ALU_CONST_CACHE_LS_11
:
892 case SQ_ALU_CONST_CACHE_LS_12
:
893 case SQ_ALU_CONST_CACHE_LS_13
:
894 case SQ_ALU_CONST_CACHE_LS_14
:
895 case SQ_ALU_CONST_CACHE_LS_15
:
896 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
898 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
902 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
905 dev_warn(p
->dev
, "forbidden register 0x%08x at %d\n", reg
, idx
);
912 * evergreen_check_texture_resource() - check if register is authorized or not
913 * @p: parser structure holding parsing context
914 * @idx: index into the cs buffer
915 * @texture: texture's bo structure
916 * @mipmap: mipmap's bo structure
918 * This function will check that the resource has valid field and that
919 * the texture and mipmap bo object are big enough to cover this resource.
921 static inline int evergreen_check_texture_resource(struct radeon_cs_parser
*p
, u32 idx
,
922 struct radeon_bo
*texture
,
923 struct radeon_bo
*mipmap
)
929 static int evergreen_packet3_check(struct radeon_cs_parser
*p
,
930 struct radeon_cs_packet
*pkt
)
932 struct radeon_cs_reloc
*reloc
;
933 struct evergreen_cs_track
*track
;
937 unsigned start_reg
, end_reg
, reg
;
941 track
= (struct evergreen_cs_track
*)p
->track
;
944 idx_value
= radeon_get_ib_value(p
, idx
);
946 switch (pkt
->opcode
) {
947 case PACKET3_CONTEXT_CONTROL
:
948 if (pkt
->count
!= 1) {
949 DRM_ERROR("bad CONTEXT_CONTROL\n");
953 case PACKET3_INDEX_TYPE
:
954 case PACKET3_NUM_INSTANCES
:
955 case PACKET3_CLEAR_STATE
:
957 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
961 case PACKET3_INDEX_BASE
:
962 if (pkt
->count
!= 1) {
963 DRM_ERROR("bad INDEX_BASE\n");
966 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
968 DRM_ERROR("bad INDEX_BASE\n");
971 ib
[idx
+0] = idx_value
+ (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
972 ib
[idx
+1] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
973 r
= evergreen_cs_track_check(p
);
975 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
979 case PACKET3_DRAW_INDEX
:
980 if (pkt
->count
!= 3) {
981 DRM_ERROR("bad DRAW_INDEX\n");
984 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
986 DRM_ERROR("bad DRAW_INDEX\n");
989 ib
[idx
+0] = idx_value
+ (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
990 ib
[idx
+1] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
991 r
= evergreen_cs_track_check(p
);
993 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
997 case PACKET3_DRAW_INDEX_2
:
998 if (pkt
->count
!= 4) {
999 DRM_ERROR("bad DRAW_INDEX_2\n");
1002 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1004 DRM_ERROR("bad DRAW_INDEX_2\n");
1007 ib
[idx
+1] = idx_value
+ (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1008 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1009 r
= evergreen_cs_track_check(p
);
1011 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1015 case PACKET3_DRAW_INDEX_AUTO
:
1016 if (pkt
->count
!= 1) {
1017 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1020 r
= evergreen_cs_track_check(p
);
1022 dev_warn(p
->dev
, "%s:%d invalid cmd stream %d\n", __func__
, __LINE__
, idx
);
1026 case PACKET3_DRAW_INDEX_MULTI_AUTO
:
1027 if (pkt
->count
!= 2) {
1028 DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
1031 r
= evergreen_cs_track_check(p
);
1033 dev_warn(p
->dev
, "%s:%d invalid cmd stream %d\n", __func__
, __LINE__
, idx
);
1037 case PACKET3_DRAW_INDEX_IMMD
:
1038 if (pkt
->count
< 2) {
1039 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1042 r
= evergreen_cs_track_check(p
);
1044 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1048 case PACKET3_DRAW_INDEX_OFFSET
:
1049 if (pkt
->count
!= 2) {
1050 DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
1053 r
= evergreen_cs_track_check(p
);
1055 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1059 case PACKET3_DRAW_INDEX_OFFSET_2
:
1060 if (pkt
->count
!= 3) {
1061 DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
1064 r
= evergreen_cs_track_check(p
);
1066 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1070 case PACKET3_WAIT_REG_MEM
:
1071 if (pkt
->count
!= 5) {
1072 DRM_ERROR("bad WAIT_REG_MEM\n");
1075 /* bit 4 is reg (0) or mem (1) */
1076 if (idx_value
& 0x10) {
1077 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1079 DRM_ERROR("bad WAIT_REG_MEM\n");
1082 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1083 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1086 case PACKET3_SURFACE_SYNC
:
1087 if (pkt
->count
!= 3) {
1088 DRM_ERROR("bad SURFACE_SYNC\n");
1091 /* 0xffffffff/0x0 is flush all cache flag */
1092 if (radeon_get_ib_value(p
, idx
+ 1) != 0xffffffff ||
1093 radeon_get_ib_value(p
, idx
+ 2) != 0) {
1094 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1096 DRM_ERROR("bad SURFACE_SYNC\n");
1099 ib
[idx
+2] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1102 case PACKET3_EVENT_WRITE
:
1103 if (pkt
->count
!= 2 && pkt
->count
!= 0) {
1104 DRM_ERROR("bad EVENT_WRITE\n");
1108 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1110 DRM_ERROR("bad EVENT_WRITE\n");
1113 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1114 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1117 case PACKET3_EVENT_WRITE_EOP
:
1118 if (pkt
->count
!= 4) {
1119 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1122 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1124 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1127 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1128 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1130 case PACKET3_EVENT_WRITE_EOS
:
1131 if (pkt
->count
!= 3) {
1132 DRM_ERROR("bad EVENT_WRITE_EOS\n");
1135 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1137 DRM_ERROR("bad EVENT_WRITE_EOS\n");
1140 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1141 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1143 case PACKET3_SET_CONFIG_REG
:
1144 start_reg
= (idx_value
<< 2) + PACKET3_SET_CONFIG_REG_START
;
1145 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1146 if ((start_reg
< PACKET3_SET_CONFIG_REG_START
) ||
1147 (start_reg
>= PACKET3_SET_CONFIG_REG_END
) ||
1148 (end_reg
>= PACKET3_SET_CONFIG_REG_END
)) {
1149 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1152 for (i
= 0; i
< pkt
->count
; i
++) {
1153 reg
= start_reg
+ (4 * i
);
1154 r
= evergreen_cs_check_reg(p
, reg
, idx
+1+i
);
1159 case PACKET3_SET_CONTEXT_REG
:
1160 start_reg
= (idx_value
<< 2) + PACKET3_SET_CONTEXT_REG_START
;
1161 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1162 if ((start_reg
< PACKET3_SET_CONTEXT_REG_START
) ||
1163 (start_reg
>= PACKET3_SET_CONTEXT_REG_END
) ||
1164 (end_reg
>= PACKET3_SET_CONTEXT_REG_END
)) {
1165 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
1168 for (i
= 0; i
< pkt
->count
; i
++) {
1169 reg
= start_reg
+ (4 * i
);
1170 r
= evergreen_cs_check_reg(p
, reg
, idx
+1+i
);
1175 case PACKET3_SET_RESOURCE
:
1176 if (pkt
->count
% 8) {
1177 DRM_ERROR("bad SET_RESOURCE\n");
1180 start_reg
= (idx_value
<< 2) + PACKET3_SET_RESOURCE_START
;
1181 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1182 if ((start_reg
< PACKET3_SET_RESOURCE_START
) ||
1183 (start_reg
>= PACKET3_SET_RESOURCE_END
) ||
1184 (end_reg
>= PACKET3_SET_RESOURCE_END
)) {
1185 DRM_ERROR("bad SET_RESOURCE\n");
1188 for (i
= 0; i
< (pkt
->count
/ 8); i
++) {
1189 struct radeon_bo
*texture
, *mipmap
;
1192 switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p
, idx
+1+(i
*8)+7))) {
1193 case SQ_TEX_VTX_VALID_TEXTURE
:
1195 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1197 DRM_ERROR("bad SET_RESOURCE (tex)\n");
1200 ib
[idx
+1+(i
*8)+2] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1201 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
)
1202 ib
[idx
+1+(i
*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
1203 else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
)
1204 ib
[idx
+1+(i
*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
1205 texture
= reloc
->robj
;
1207 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1209 DRM_ERROR("bad SET_RESOURCE (tex)\n");
1212 ib
[idx
+1+(i
*8)+3] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1213 mipmap
= reloc
->robj
;
1214 r
= evergreen_check_texture_resource(p
, idx
+1+(i
*8),
1219 case SQ_TEX_VTX_VALID_BUFFER
:
1221 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1223 DRM_ERROR("bad SET_RESOURCE (vtx)\n");
1226 offset
= radeon_get_ib_value(p
, idx
+1+(i
*8)+0);
1227 size
= radeon_get_ib_value(p
, idx
+1+(i
*8)+1);
1228 if (p
->rdev
&& (size
+ offset
) > radeon_bo_size(reloc
->robj
)) {
1229 /* force size to size of the buffer */
1230 dev_warn(p
->dev
, "vbo resource seems too big for the bo\n");
1231 ib
[idx
+1+(i
*8)+1] = radeon_bo_size(reloc
->robj
);
1233 ib
[idx
+1+(i
*8)+0] += (u32
)((reloc
->lobj
.gpu_offset
) & 0xffffffff);
1234 ib
[idx
+1+(i
*8)+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1236 case SQ_TEX_VTX_INVALID_TEXTURE
:
1237 case SQ_TEX_VTX_INVALID_BUFFER
:
1239 DRM_ERROR("bad SET_RESOURCE\n");
1244 case PACKET3_SET_ALU_CONST
:
1245 /* XXX fix me ALU const buffers only */
1247 case PACKET3_SET_BOOL_CONST
:
1248 start_reg
= (idx_value
<< 2) + PACKET3_SET_BOOL_CONST_START
;
1249 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1250 if ((start_reg
< PACKET3_SET_BOOL_CONST_START
) ||
1251 (start_reg
>= PACKET3_SET_BOOL_CONST_END
) ||
1252 (end_reg
>= PACKET3_SET_BOOL_CONST_END
)) {
1253 DRM_ERROR("bad SET_BOOL_CONST\n");
1257 case PACKET3_SET_LOOP_CONST
:
1258 start_reg
= (idx_value
<< 2) + PACKET3_SET_LOOP_CONST_START
;
1259 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1260 if ((start_reg
< PACKET3_SET_LOOP_CONST_START
) ||
1261 (start_reg
>= PACKET3_SET_LOOP_CONST_END
) ||
1262 (end_reg
>= PACKET3_SET_LOOP_CONST_END
)) {
1263 DRM_ERROR("bad SET_LOOP_CONST\n");
1267 case PACKET3_SET_CTL_CONST
:
1268 start_reg
= (idx_value
<< 2) + PACKET3_SET_CTL_CONST_START
;
1269 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1270 if ((start_reg
< PACKET3_SET_CTL_CONST_START
) ||
1271 (start_reg
>= PACKET3_SET_CTL_CONST_END
) ||
1272 (end_reg
>= PACKET3_SET_CTL_CONST_END
)) {
1273 DRM_ERROR("bad SET_CTL_CONST\n");
1277 case PACKET3_SET_SAMPLER
:
1278 if (pkt
->count
% 3) {
1279 DRM_ERROR("bad SET_SAMPLER\n");
1282 start_reg
= (idx_value
<< 2) + PACKET3_SET_SAMPLER_START
;
1283 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1284 if ((start_reg
< PACKET3_SET_SAMPLER_START
) ||
1285 (start_reg
>= PACKET3_SET_SAMPLER_END
) ||
1286 (end_reg
>= PACKET3_SET_SAMPLER_END
)) {
1287 DRM_ERROR("bad SET_SAMPLER\n");
1294 DRM_ERROR("Packet3 opcode %x not supported\n", pkt
->opcode
);
1300 int evergreen_cs_parse(struct radeon_cs_parser
*p
)
1302 struct radeon_cs_packet pkt
;
1303 struct evergreen_cs_track
*track
;
1306 if (p
->track
== NULL
) {
1307 /* initialize tracker, we are in kms */
1308 track
= kzalloc(sizeof(*track
), GFP_KERNEL
);
1311 evergreen_cs_track_init(track
);
1312 track
->npipes
= p
->rdev
->config
.evergreen
.tiling_npipes
;
1313 track
->nbanks
= p
->rdev
->config
.evergreen
.tiling_nbanks
;
1314 track
->group_size
= p
->rdev
->config
.evergreen
.tiling_group_size
;
1318 r
= evergreen_cs_packet_parse(p
, &pkt
, p
->idx
);
1324 p
->idx
+= pkt
.count
+ 2;
1327 r
= evergreen_cs_parse_packet0(p
, &pkt
);
1332 r
= evergreen_packet3_check(p
, &pkt
);
1335 DRM_ERROR("Unknown packet type %d !\n", pkt
.type
);
1345 } while (p
->idx
< p
->chunks
[p
->chunk_ib_idx
].length_dw
);
1347 for (r
= 0; r
< p
->ib
->length_dw
; r
++) {
1348 printk(KERN_INFO
"%05d 0x%08X\n", r
, p
->ib
->ptr
[r
]);