2 * Copyright 2010 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
30 #include "evergreend.h"
31 #include "evergreen_reg_safe.h"
32 #include "cayman_reg_safe.h"
34 static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser
*p
,
35 struct radeon_cs_reloc
**cs_reloc
);
37 struct evergreen_cs_track
{
43 u32 cb_color_base_last
[12];
44 struct radeon_bo
*cb_color_bo
[12];
45 u32 cb_color_bo_offset
[12];
46 struct radeon_bo
*cb_color_fmask_bo
[8];
47 struct radeon_bo
*cb_color_cmask_bo
[8];
48 u32 cb_color_info
[12];
49 u32 cb_color_view
[12];
50 u32 cb_color_pitch_idx
[12];
51 u32 cb_color_slice_idx
[12];
52 u32 cb_color_dim_idx
[12];
54 u32 cb_color_pitch
[12];
55 u32 cb_color_slice
[12];
56 u32 cb_color_cmask_slice
[8];
57 u32 cb_color_fmask_slice
[8];
60 u32 vgt_strmout_config
;
61 u32 vgt_strmout_buffer_config
;
65 u32 db_depth_size_idx
;
69 u32 db_z_write_offset
;
70 struct radeon_bo
*db_z_read_bo
;
71 struct radeon_bo
*db_z_write_bo
;
75 u32 db_s_write_offset
;
76 struct radeon_bo
*db_s_read_bo
;
77 struct radeon_bo
*db_s_write_bo
;
80 static void evergreen_cs_track_init(struct evergreen_cs_track
*track
)
84 for (i
= 0; i
< 8; i
++) {
85 track
->cb_color_fmask_bo
[i
] = NULL
;
86 track
->cb_color_cmask_bo
[i
] = NULL
;
87 track
->cb_color_cmask_slice
[i
] = 0;
88 track
->cb_color_fmask_slice
[i
] = 0;
91 for (i
= 0; i
< 12; i
++) {
92 track
->cb_color_base_last
[i
] = 0;
93 track
->cb_color_bo
[i
] = NULL
;
94 track
->cb_color_bo_offset
[i
] = 0xFFFFFFFF;
95 track
->cb_color_info
[i
] = 0;
96 track
->cb_color_view
[i
] = 0;
97 track
->cb_color_pitch_idx
[i
] = 0;
98 track
->cb_color_slice_idx
[i
] = 0;
99 track
->cb_color_dim
[i
] = 0;
100 track
->cb_color_pitch
[i
] = 0;
101 track
->cb_color_slice
[i
] = 0;
102 track
->cb_color_dim
[i
] = 0;
104 track
->cb_target_mask
= 0xFFFFFFFF;
105 track
->cb_shader_mask
= 0xFFFFFFFF;
107 track
->db_depth_view
= 0xFFFFC000;
108 track
->db_depth_size
= 0xFFFFFFFF;
109 track
->db_depth_size_idx
= 0;
110 track
->db_depth_control
= 0xFFFFFFFF;
111 track
->db_z_info
= 0xFFFFFFFF;
112 track
->db_z_idx
= 0xFFFFFFFF;
113 track
->db_z_read_offset
= 0xFFFFFFFF;
114 track
->db_z_write_offset
= 0xFFFFFFFF;
115 track
->db_z_read_bo
= NULL
;
116 track
->db_z_write_bo
= NULL
;
117 track
->db_s_info
= 0xFFFFFFFF;
118 track
->db_s_idx
= 0xFFFFFFFF;
119 track
->db_s_read_offset
= 0xFFFFFFFF;
120 track
->db_s_write_offset
= 0xFFFFFFFF;
121 track
->db_s_read_bo
= NULL
;
122 track
->db_s_write_bo
= NULL
;
125 static inline int evergreen_cs_track_validate_cb(struct radeon_cs_parser
*p
, int i
)
131 static int evergreen_cs_track_check(struct radeon_cs_parser
*p
)
133 struct evergreen_cs_track
*track
= p
->track
;
135 /* we don't support stream out buffer yet */
136 if (track
->vgt_strmout_config
|| track
->vgt_strmout_buffer_config
) {
137 dev_warn(p
->dev
, "this kernel doesn't support SMX output buffer\n");
146 * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
147 * @parser: parser structure holding parsing context.
148 * @pkt: where to store packet informations
150 * Assume that chunk_ib_index is properly set. Will return -EINVAL
151 * if packet is bigger than remaining ib size. or if packets is unknown.
153 int evergreen_cs_packet_parse(struct radeon_cs_parser
*p
,
154 struct radeon_cs_packet
*pkt
,
157 struct radeon_cs_chunk
*ib_chunk
= &p
->chunks
[p
->chunk_ib_idx
];
160 if (idx
>= ib_chunk
->length_dw
) {
161 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
162 idx
, ib_chunk
->length_dw
);
165 header
= radeon_get_ib_value(p
, idx
);
167 pkt
->type
= CP_PACKET_GET_TYPE(header
);
168 pkt
->count
= CP_PACKET_GET_COUNT(header
);
172 pkt
->reg
= CP_PACKET0_GET_REG(header
);
175 pkt
->opcode
= CP_PACKET3_GET_OPCODE(header
);
181 DRM_ERROR("Unknown packet type %d at %d !\n", pkt
->type
, idx
);
184 if ((pkt
->count
+ 1 + pkt
->idx
) >= ib_chunk
->length_dw
) {
185 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
186 pkt
->idx
, pkt
->type
, pkt
->count
, ib_chunk
->length_dw
);
193 * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
194 * @parser: parser structure holding parsing context.
195 * @data: pointer to relocation data
196 * @offset_start: starting offset
197 * @offset_mask: offset mask (to align start offset on)
198 * @reloc: reloc informations
200 * Check next packet is relocation packet3, do bo validation and compute
201 * GPU offset using the provided start.
203 static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser
*p
,
204 struct radeon_cs_reloc
**cs_reloc
)
206 struct radeon_cs_chunk
*relocs_chunk
;
207 struct radeon_cs_packet p3reloc
;
211 if (p
->chunk_relocs_idx
== -1) {
212 DRM_ERROR("No relocation chunk !\n");
216 relocs_chunk
= &p
->chunks
[p
->chunk_relocs_idx
];
217 r
= evergreen_cs_packet_parse(p
, &p3reloc
, p
->idx
);
221 p
->idx
+= p3reloc
.count
+ 2;
222 if (p3reloc
.type
!= PACKET_TYPE3
|| p3reloc
.opcode
!= PACKET3_NOP
) {
223 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
227 idx
= radeon_get_ib_value(p
, p3reloc
.idx
+ 1);
228 if (idx
>= relocs_chunk
->length_dw
) {
229 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
230 idx
, relocs_chunk
->length_dw
);
233 /* FIXME: we assume reloc size is 4 dwords */
234 *cs_reloc
= p
->relocs_ptr
[(idx
/ 4)];
239 * evergreen_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
240 * @parser: parser structure holding parsing context.
242 * Check next packet is relocation packet3, do bo validation and compute
243 * GPU offset using the provided start.
245 static inline int evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser
*p
)
247 struct radeon_cs_packet p3reloc
;
250 r
= evergreen_cs_packet_parse(p
, &p3reloc
, p
->idx
);
254 if (p3reloc
.type
!= PACKET_TYPE3
|| p3reloc
.opcode
!= PACKET3_NOP
) {
261 * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
262 * @parser: parser structure holding parsing context.
264 * Userspace sends a special sequence for VLINE waits.
265 * PACKET0 - VLINE_START_END + value
266 * PACKET3 - WAIT_REG_MEM poll vline status reg
267 * RELOC (P3) - crtc_id in reloc.
269 * This function parses this and relocates the VLINE START END
270 * and WAIT_REG_MEM packets to the correct crtc.
271 * It also detects a switched off crtc and nulls out the
274 static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser
*p
)
276 struct drm_mode_object
*obj
;
277 struct drm_crtc
*crtc
;
278 struct radeon_crtc
*radeon_crtc
;
279 struct radeon_cs_packet p3reloc
, wait_reg_mem
;
282 uint32_t header
, h_idx
, reg
, wait_reg_mem_info
;
283 volatile uint32_t *ib
;
287 /* parse the WAIT_REG_MEM */
288 r
= evergreen_cs_packet_parse(p
, &wait_reg_mem
, p
->idx
);
292 /* check its a WAIT_REG_MEM */
293 if (wait_reg_mem
.type
!= PACKET_TYPE3
||
294 wait_reg_mem
.opcode
!= PACKET3_WAIT_REG_MEM
) {
295 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
300 wait_reg_mem_info
= radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 1);
301 /* bit 4 is reg (0) or mem (1) */
302 if (wait_reg_mem_info
& 0x10) {
303 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
307 /* waiting for value to be equal */
308 if ((wait_reg_mem_info
& 0x7) != 0x3) {
309 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
313 if ((radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 2) << 2) != EVERGREEN_VLINE_STATUS
) {
314 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
319 if (radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 5) != EVERGREEN_VLINE_STAT
) {
320 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
325 /* jump over the NOP */
326 r
= evergreen_cs_packet_parse(p
, &p3reloc
, p
->idx
+ wait_reg_mem
.count
+ 2);
331 p
->idx
+= wait_reg_mem
.count
+ 2;
332 p
->idx
+= p3reloc
.count
+ 2;
334 header
= radeon_get_ib_value(p
, h_idx
);
335 crtc_id
= radeon_get_ib_value(p
, h_idx
+ 2 + 7 + 1);
336 reg
= CP_PACKET0_GET_REG(header
);
337 obj
= drm_mode_object_find(p
->rdev
->ddev
, crtc_id
, DRM_MODE_OBJECT_CRTC
);
339 DRM_ERROR("cannot find crtc %d\n", crtc_id
);
343 crtc
= obj_to_crtc(obj
);
344 radeon_crtc
= to_radeon_crtc(crtc
);
345 crtc_id
= radeon_crtc
->crtc_id
;
347 if (!crtc
->enabled
) {
348 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
349 ib
[h_idx
+ 2] = PACKET2(0);
350 ib
[h_idx
+ 3] = PACKET2(0);
351 ib
[h_idx
+ 4] = PACKET2(0);
352 ib
[h_idx
+ 5] = PACKET2(0);
353 ib
[h_idx
+ 6] = PACKET2(0);
354 ib
[h_idx
+ 7] = PACKET2(0);
355 ib
[h_idx
+ 8] = PACKET2(0);
358 case EVERGREEN_VLINE_START_END
:
359 header
&= ~R600_CP_PACKET0_REG_MASK
;
360 header
|= (EVERGREEN_VLINE_START_END
+ radeon_crtc
->crtc_offset
) >> 2;
362 ib
[h_idx
+ 4] = (EVERGREEN_VLINE_STATUS
+ radeon_crtc
->crtc_offset
) >> 2;
365 DRM_ERROR("unknown crtc reloc\n");
374 static int evergreen_packet0_check(struct radeon_cs_parser
*p
,
375 struct radeon_cs_packet
*pkt
,
376 unsigned idx
, unsigned reg
)
381 case EVERGREEN_VLINE_START_END
:
382 r
= evergreen_cs_packet_parse_vline(p
);
384 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
390 printk(KERN_ERR
"Forbidden register 0x%04X in cs at %d\n",
397 static int evergreen_cs_parse_packet0(struct radeon_cs_parser
*p
,
398 struct radeon_cs_packet
*pkt
)
406 for (i
= 0; i
<= pkt
->count
; i
++, idx
++, reg
+= 4) {
407 r
= evergreen_packet0_check(p
, pkt
, idx
, reg
);
416 * evergreen_cs_check_reg() - check if register is authorized or not
417 * @parser: parser structure holding parsing context
418 * @reg: register we are testing
419 * @idx: index into the cs buffer
421 * This function will test against evergreen_reg_safe_bm and return 0
422 * if register is safe. If register is not flag as safe this function
423 * will test it against a list of register needind special handling.
425 static inline int evergreen_cs_check_reg(struct radeon_cs_parser
*p
, u32 reg
, u32 idx
)
427 struct evergreen_cs_track
*track
= (struct evergreen_cs_track
*)p
->track
;
428 struct radeon_cs_reloc
*reloc
;
433 if (p
->rdev
->family
>= CHIP_CAYMAN
)
434 last_reg
= ARRAY_SIZE(cayman_reg_safe_bm
);
436 last_reg
= ARRAY_SIZE(evergreen_reg_safe_bm
);
440 dev_warn(p
->dev
, "forbidden register 0x%08x at %d\n", reg
, idx
);
443 m
= 1 << ((reg
>> 2) & 31);
444 if (p
->rdev
->family
>= CHIP_CAYMAN
) {
445 if (!(cayman_reg_safe_bm
[i
] & m
))
448 if (!(evergreen_reg_safe_bm
[i
] & m
))
453 /* force following reg to 0 in an attemp to disable out buffer
454 * which will need us to better understand how it works to perform
455 * security check on it (Jerome)
457 case SQ_ESGS_RING_SIZE
:
458 case SQ_GSVS_RING_SIZE
:
459 case SQ_ESTMP_RING_SIZE
:
460 case SQ_GSTMP_RING_SIZE
:
461 case SQ_HSTMP_RING_SIZE
:
462 case SQ_LSTMP_RING_SIZE
:
463 case SQ_PSTMP_RING_SIZE
:
464 case SQ_VSTMP_RING_SIZE
:
465 case SQ_ESGS_RING_ITEMSIZE
:
466 case SQ_ESTMP_RING_ITEMSIZE
:
467 case SQ_GSTMP_RING_ITEMSIZE
:
468 case SQ_GSVS_RING_ITEMSIZE
:
469 case SQ_GS_VERT_ITEMSIZE
:
470 case SQ_GS_VERT_ITEMSIZE_1
:
471 case SQ_GS_VERT_ITEMSIZE_2
:
472 case SQ_GS_VERT_ITEMSIZE_3
:
473 case SQ_GSVS_RING_OFFSET_1
:
474 case SQ_GSVS_RING_OFFSET_2
:
475 case SQ_GSVS_RING_OFFSET_3
:
476 case SQ_HSTMP_RING_ITEMSIZE
:
477 case SQ_LSTMP_RING_ITEMSIZE
:
478 case SQ_PSTMP_RING_ITEMSIZE
:
479 case SQ_VSTMP_RING_ITEMSIZE
:
480 case VGT_TF_RING_SIZE
:
481 /* get value to populate the IB don't remove */
482 /*tmp =radeon_get_ib_value(p, idx);
485 case SQ_ESGS_RING_BASE
:
486 case SQ_GSVS_RING_BASE
:
487 case SQ_ESTMP_RING_BASE
:
488 case SQ_GSTMP_RING_BASE
:
489 case SQ_HSTMP_RING_BASE
:
490 case SQ_LSTMP_RING_BASE
:
491 case SQ_PSTMP_RING_BASE
:
492 case SQ_VSTMP_RING_BASE
:
493 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
495 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
499 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
501 case DB_DEPTH_CONTROL
:
502 track
->db_depth_control
= radeon_get_ib_value(p
, idx
);
505 if (p
->rdev
->family
< CHIP_CAYMAN
) {
506 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
511 case CAYMAN_DB_DEPTH_INFO
:
512 if (p
->rdev
->family
< CHIP_CAYMAN
) {
513 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
519 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
521 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
525 track
->db_z_info
= radeon_get_ib_value(p
, idx
);
526 ib
[idx
] &= ~Z_ARRAY_MODE(0xf);
527 track
->db_z_info
&= ~Z_ARRAY_MODE(0xf);
528 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
) {
529 ib
[idx
] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
530 track
->db_z_info
|= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
532 ib
[idx
] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
533 track
->db_z_info
|= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
536 case DB_STENCIL_INFO
:
537 track
->db_s_info
= radeon_get_ib_value(p
, idx
);
540 track
->db_depth_view
= radeon_get_ib_value(p
, idx
);
543 track
->db_depth_size
= radeon_get_ib_value(p
, idx
);
544 track
->db_depth_size_idx
= idx
;
547 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
549 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
553 track
->db_z_read_offset
= radeon_get_ib_value(p
, idx
);
554 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
555 track
->db_z_read_bo
= reloc
->robj
;
557 case DB_Z_WRITE_BASE
:
558 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
560 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
564 track
->db_z_write_offset
= radeon_get_ib_value(p
, idx
);
565 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
566 track
->db_z_write_bo
= reloc
->robj
;
568 case DB_STENCIL_READ_BASE
:
569 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
571 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
575 track
->db_s_read_offset
= radeon_get_ib_value(p
, idx
);
576 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
577 track
->db_s_read_bo
= reloc
->robj
;
579 case DB_STENCIL_WRITE_BASE
:
580 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
582 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
586 track
->db_s_write_offset
= radeon_get_ib_value(p
, idx
);
587 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
588 track
->db_s_write_bo
= reloc
->robj
;
590 case VGT_STRMOUT_CONFIG
:
591 track
->vgt_strmout_config
= radeon_get_ib_value(p
, idx
);
593 case VGT_STRMOUT_BUFFER_CONFIG
:
594 track
->vgt_strmout_buffer_config
= radeon_get_ib_value(p
, idx
);
597 track
->cb_target_mask
= radeon_get_ib_value(p
, idx
);
600 track
->cb_shader_mask
= radeon_get_ib_value(p
, idx
);
602 case PA_SC_AA_CONFIG
:
603 if (p
->rdev
->family
>= CHIP_CAYMAN
) {
604 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
608 tmp
= radeon_get_ib_value(p
, idx
) & MSAA_NUM_SAMPLES_MASK
;
609 track
->nsamples
= 1 << tmp
;
611 case CAYMAN_PA_SC_AA_CONFIG
:
612 if (p
->rdev
->family
< CHIP_CAYMAN
) {
613 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
617 tmp
= radeon_get_ib_value(p
, idx
) & CAYMAN_MSAA_NUM_SAMPLES_MASK
;
618 track
->nsamples
= 1 << tmp
;
628 tmp
= (reg
- CB_COLOR0_VIEW
) / 0x3c;
629 track
->cb_color_view
[tmp
] = radeon_get_ib_value(p
, idx
);
633 case CB_COLOR10_VIEW
:
634 case CB_COLOR11_VIEW
:
635 tmp
= ((reg
- CB_COLOR8_VIEW
) / 0x1c) + 8;
636 track
->cb_color_view
[tmp
] = radeon_get_ib_value(p
, idx
);
646 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
648 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
652 tmp
= (reg
- CB_COLOR0_INFO
) / 0x3c;
653 track
->cb_color_info
[tmp
] = radeon_get_ib_value(p
, idx
);
654 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
) {
655 ib
[idx
] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
656 track
->cb_color_info
[tmp
] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
657 } else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
) {
658 ib
[idx
] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
659 track
->cb_color_info
[tmp
] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
664 case CB_COLOR10_INFO
:
665 case CB_COLOR11_INFO
:
666 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
668 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
672 tmp
= ((reg
- CB_COLOR8_INFO
) / 0x1c) + 8;
673 track
->cb_color_info
[tmp
] = radeon_get_ib_value(p
, idx
);
674 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
) {
675 ib
[idx
] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
676 track
->cb_color_info
[tmp
] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
677 } else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
) {
678 ib
[idx
] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
679 track
->cb_color_info
[tmp
] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
682 case CB_COLOR0_PITCH
:
683 case CB_COLOR1_PITCH
:
684 case CB_COLOR2_PITCH
:
685 case CB_COLOR3_PITCH
:
686 case CB_COLOR4_PITCH
:
687 case CB_COLOR5_PITCH
:
688 case CB_COLOR6_PITCH
:
689 case CB_COLOR7_PITCH
:
690 tmp
= (reg
- CB_COLOR0_PITCH
) / 0x3c;
691 track
->cb_color_pitch
[tmp
] = radeon_get_ib_value(p
, idx
);
692 track
->cb_color_pitch_idx
[tmp
] = idx
;
694 case CB_COLOR8_PITCH
:
695 case CB_COLOR9_PITCH
:
696 case CB_COLOR10_PITCH
:
697 case CB_COLOR11_PITCH
:
698 tmp
= ((reg
- CB_COLOR8_PITCH
) / 0x1c) + 8;
699 track
->cb_color_pitch
[tmp
] = radeon_get_ib_value(p
, idx
);
700 track
->cb_color_pitch_idx
[tmp
] = idx
;
702 case CB_COLOR0_SLICE
:
703 case CB_COLOR1_SLICE
:
704 case CB_COLOR2_SLICE
:
705 case CB_COLOR3_SLICE
:
706 case CB_COLOR4_SLICE
:
707 case CB_COLOR5_SLICE
:
708 case CB_COLOR6_SLICE
:
709 case CB_COLOR7_SLICE
:
710 tmp
= (reg
- CB_COLOR0_SLICE
) / 0x3c;
711 track
->cb_color_slice
[tmp
] = radeon_get_ib_value(p
, idx
);
712 track
->cb_color_slice_idx
[tmp
] = idx
;
714 case CB_COLOR8_SLICE
:
715 case CB_COLOR9_SLICE
:
716 case CB_COLOR10_SLICE
:
717 case CB_COLOR11_SLICE
:
718 tmp
= ((reg
- CB_COLOR8_SLICE
) / 0x1c) + 8;
719 track
->cb_color_slice
[tmp
] = radeon_get_ib_value(p
, idx
);
720 track
->cb_color_slice_idx
[tmp
] = idx
;
722 case CB_COLOR0_ATTRIB
:
723 case CB_COLOR1_ATTRIB
:
724 case CB_COLOR2_ATTRIB
:
725 case CB_COLOR3_ATTRIB
:
726 case CB_COLOR4_ATTRIB
:
727 case CB_COLOR5_ATTRIB
:
728 case CB_COLOR6_ATTRIB
:
729 case CB_COLOR7_ATTRIB
:
730 case CB_COLOR8_ATTRIB
:
731 case CB_COLOR9_ATTRIB
:
732 case CB_COLOR10_ATTRIB
:
733 case CB_COLOR11_ATTRIB
:
743 tmp
= (reg
- CB_COLOR0_DIM
) / 0x3c;
744 track
->cb_color_dim
[tmp
] = radeon_get_ib_value(p
, idx
);
745 track
->cb_color_dim_idx
[tmp
] = idx
;
751 tmp
= ((reg
- CB_COLOR8_DIM
) / 0x1c) + 8;
752 track
->cb_color_dim
[tmp
] = radeon_get_ib_value(p
, idx
);
753 track
->cb_color_dim_idx
[tmp
] = idx
;
755 case CB_COLOR0_FMASK
:
756 case CB_COLOR1_FMASK
:
757 case CB_COLOR2_FMASK
:
758 case CB_COLOR3_FMASK
:
759 case CB_COLOR4_FMASK
:
760 case CB_COLOR5_FMASK
:
761 case CB_COLOR6_FMASK
:
762 case CB_COLOR7_FMASK
:
763 tmp
= (reg
- CB_COLOR0_FMASK
) / 0x3c;
764 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
766 dev_err(p
->dev
, "bad SET_CONTEXT_REG 0x%04X\n", reg
);
769 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
770 track
->cb_color_fmask_bo
[tmp
] = reloc
->robj
;
772 case CB_COLOR0_CMASK
:
773 case CB_COLOR1_CMASK
:
774 case CB_COLOR2_CMASK
:
775 case CB_COLOR3_CMASK
:
776 case CB_COLOR4_CMASK
:
777 case CB_COLOR5_CMASK
:
778 case CB_COLOR6_CMASK
:
779 case CB_COLOR7_CMASK
:
780 tmp
= (reg
- CB_COLOR0_CMASK
) / 0x3c;
781 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
783 dev_err(p
->dev
, "bad SET_CONTEXT_REG 0x%04X\n", reg
);
786 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
787 track
->cb_color_cmask_bo
[tmp
] = reloc
->robj
;
789 case CB_COLOR0_FMASK_SLICE
:
790 case CB_COLOR1_FMASK_SLICE
:
791 case CB_COLOR2_FMASK_SLICE
:
792 case CB_COLOR3_FMASK_SLICE
:
793 case CB_COLOR4_FMASK_SLICE
:
794 case CB_COLOR5_FMASK_SLICE
:
795 case CB_COLOR6_FMASK_SLICE
:
796 case CB_COLOR7_FMASK_SLICE
:
797 tmp
= (reg
- CB_COLOR0_FMASK_SLICE
) / 0x3c;
798 track
->cb_color_fmask_slice
[tmp
] = radeon_get_ib_value(p
, idx
);
800 case CB_COLOR0_CMASK_SLICE
:
801 case CB_COLOR1_CMASK_SLICE
:
802 case CB_COLOR2_CMASK_SLICE
:
803 case CB_COLOR3_CMASK_SLICE
:
804 case CB_COLOR4_CMASK_SLICE
:
805 case CB_COLOR5_CMASK_SLICE
:
806 case CB_COLOR6_CMASK_SLICE
:
807 case CB_COLOR7_CMASK_SLICE
:
808 tmp
= (reg
- CB_COLOR0_CMASK_SLICE
) / 0x3c;
809 track
->cb_color_cmask_slice
[tmp
] = radeon_get_ib_value(p
, idx
);
819 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
821 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
825 tmp
= (reg
- CB_COLOR0_BASE
) / 0x3c;
826 track
->cb_color_bo_offset
[tmp
] = radeon_get_ib_value(p
, idx
);
827 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
828 track
->cb_color_base_last
[tmp
] = ib
[idx
];
829 track
->cb_color_bo
[tmp
] = reloc
->robj
;
833 case CB_COLOR10_BASE
:
834 case CB_COLOR11_BASE
:
835 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
837 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
841 tmp
= ((reg
- CB_COLOR8_BASE
) / 0x1c) + 8;
842 track
->cb_color_bo_offset
[tmp
] = radeon_get_ib_value(p
, idx
);
843 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
844 track
->cb_color_base_last
[tmp
] = ib
[idx
];
845 track
->cb_color_bo
[tmp
] = reloc
->robj
;
857 case CB_IMMED10_BASE
:
858 case CB_IMMED11_BASE
:
859 case DB_HTILE_DATA_BASE
:
860 case SQ_PGM_START_FS
:
861 case SQ_PGM_START_ES
:
862 case SQ_PGM_START_VS
:
863 case SQ_PGM_START_GS
:
864 case SQ_PGM_START_PS
:
865 case SQ_PGM_START_HS
:
866 case SQ_PGM_START_LS
:
868 case SQ_CONST_MEM_BASE
:
869 case SQ_ALU_CONST_CACHE_GS_0
:
870 case SQ_ALU_CONST_CACHE_GS_1
:
871 case SQ_ALU_CONST_CACHE_GS_2
:
872 case SQ_ALU_CONST_CACHE_GS_3
:
873 case SQ_ALU_CONST_CACHE_GS_4
:
874 case SQ_ALU_CONST_CACHE_GS_5
:
875 case SQ_ALU_CONST_CACHE_GS_6
:
876 case SQ_ALU_CONST_CACHE_GS_7
:
877 case SQ_ALU_CONST_CACHE_GS_8
:
878 case SQ_ALU_CONST_CACHE_GS_9
:
879 case SQ_ALU_CONST_CACHE_GS_10
:
880 case SQ_ALU_CONST_CACHE_GS_11
:
881 case SQ_ALU_CONST_CACHE_GS_12
:
882 case SQ_ALU_CONST_CACHE_GS_13
:
883 case SQ_ALU_CONST_CACHE_GS_14
:
884 case SQ_ALU_CONST_CACHE_GS_15
:
885 case SQ_ALU_CONST_CACHE_PS_0
:
886 case SQ_ALU_CONST_CACHE_PS_1
:
887 case SQ_ALU_CONST_CACHE_PS_2
:
888 case SQ_ALU_CONST_CACHE_PS_3
:
889 case SQ_ALU_CONST_CACHE_PS_4
:
890 case SQ_ALU_CONST_CACHE_PS_5
:
891 case SQ_ALU_CONST_CACHE_PS_6
:
892 case SQ_ALU_CONST_CACHE_PS_7
:
893 case SQ_ALU_CONST_CACHE_PS_8
:
894 case SQ_ALU_CONST_CACHE_PS_9
:
895 case SQ_ALU_CONST_CACHE_PS_10
:
896 case SQ_ALU_CONST_CACHE_PS_11
:
897 case SQ_ALU_CONST_CACHE_PS_12
:
898 case SQ_ALU_CONST_CACHE_PS_13
:
899 case SQ_ALU_CONST_CACHE_PS_14
:
900 case SQ_ALU_CONST_CACHE_PS_15
:
901 case SQ_ALU_CONST_CACHE_VS_0
:
902 case SQ_ALU_CONST_CACHE_VS_1
:
903 case SQ_ALU_CONST_CACHE_VS_2
:
904 case SQ_ALU_CONST_CACHE_VS_3
:
905 case SQ_ALU_CONST_CACHE_VS_4
:
906 case SQ_ALU_CONST_CACHE_VS_5
:
907 case SQ_ALU_CONST_CACHE_VS_6
:
908 case SQ_ALU_CONST_CACHE_VS_7
:
909 case SQ_ALU_CONST_CACHE_VS_8
:
910 case SQ_ALU_CONST_CACHE_VS_9
:
911 case SQ_ALU_CONST_CACHE_VS_10
:
912 case SQ_ALU_CONST_CACHE_VS_11
:
913 case SQ_ALU_CONST_CACHE_VS_12
:
914 case SQ_ALU_CONST_CACHE_VS_13
:
915 case SQ_ALU_CONST_CACHE_VS_14
:
916 case SQ_ALU_CONST_CACHE_VS_15
:
917 case SQ_ALU_CONST_CACHE_HS_0
:
918 case SQ_ALU_CONST_CACHE_HS_1
:
919 case SQ_ALU_CONST_CACHE_HS_2
:
920 case SQ_ALU_CONST_CACHE_HS_3
:
921 case SQ_ALU_CONST_CACHE_HS_4
:
922 case SQ_ALU_CONST_CACHE_HS_5
:
923 case SQ_ALU_CONST_CACHE_HS_6
:
924 case SQ_ALU_CONST_CACHE_HS_7
:
925 case SQ_ALU_CONST_CACHE_HS_8
:
926 case SQ_ALU_CONST_CACHE_HS_9
:
927 case SQ_ALU_CONST_CACHE_HS_10
:
928 case SQ_ALU_CONST_CACHE_HS_11
:
929 case SQ_ALU_CONST_CACHE_HS_12
:
930 case SQ_ALU_CONST_CACHE_HS_13
:
931 case SQ_ALU_CONST_CACHE_HS_14
:
932 case SQ_ALU_CONST_CACHE_HS_15
:
933 case SQ_ALU_CONST_CACHE_LS_0
:
934 case SQ_ALU_CONST_CACHE_LS_1
:
935 case SQ_ALU_CONST_CACHE_LS_2
:
936 case SQ_ALU_CONST_CACHE_LS_3
:
937 case SQ_ALU_CONST_CACHE_LS_4
:
938 case SQ_ALU_CONST_CACHE_LS_5
:
939 case SQ_ALU_CONST_CACHE_LS_6
:
940 case SQ_ALU_CONST_CACHE_LS_7
:
941 case SQ_ALU_CONST_CACHE_LS_8
:
942 case SQ_ALU_CONST_CACHE_LS_9
:
943 case SQ_ALU_CONST_CACHE_LS_10
:
944 case SQ_ALU_CONST_CACHE_LS_11
:
945 case SQ_ALU_CONST_CACHE_LS_12
:
946 case SQ_ALU_CONST_CACHE_LS_13
:
947 case SQ_ALU_CONST_CACHE_LS_14
:
948 case SQ_ALU_CONST_CACHE_LS_15
:
949 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
951 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
955 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
958 dev_warn(p
->dev
, "forbidden register 0x%08x at %d\n", reg
, idx
);
965 * evergreen_check_texture_resource() - check if register is authorized or not
966 * @p: parser structure holding parsing context
967 * @idx: index into the cs buffer
968 * @texture: texture's bo structure
969 * @mipmap: mipmap's bo structure
971 * This function will check that the resource has valid field and that
972 * the texture and mipmap bo object are big enough to cover this resource.
974 static inline int evergreen_check_texture_resource(struct radeon_cs_parser
*p
, u32 idx
,
975 struct radeon_bo
*texture
,
976 struct radeon_bo
*mipmap
)
982 static int evergreen_packet3_check(struct radeon_cs_parser
*p
,
983 struct radeon_cs_packet
*pkt
)
985 struct radeon_cs_reloc
*reloc
;
986 struct evergreen_cs_track
*track
;
990 unsigned start_reg
, end_reg
, reg
;
994 track
= (struct evergreen_cs_track
*)p
->track
;
997 idx_value
= radeon_get_ib_value(p
, idx
);
999 switch (pkt
->opcode
) {
1000 case PACKET3_SET_PREDICATION
:
1004 if (pkt
->count
!= 1) {
1005 DRM_ERROR("bad SET PREDICATION\n");
1009 tmp
= radeon_get_ib_value(p
, idx
+ 1);
1010 pred_op
= (tmp
>> 16) & 0x7;
1012 /* for the clear predicate operation */
1017 DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op
);
1021 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1023 DRM_ERROR("bad SET PREDICATION\n");
1027 ib
[idx
+ 0] = idx_value
+ (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1028 ib
[idx
+ 1] = tmp
+ (upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff);
1031 case PACKET3_CONTEXT_CONTROL
:
1032 if (pkt
->count
!= 1) {
1033 DRM_ERROR("bad CONTEXT_CONTROL\n");
1037 case PACKET3_INDEX_TYPE
:
1038 case PACKET3_NUM_INSTANCES
:
1039 case PACKET3_CLEAR_STATE
:
1041 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1045 case CAYMAN_PACKET3_DEALLOC_STATE
:
1046 if (p
->rdev
->family
< CHIP_CAYMAN
) {
1047 DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
1051 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1055 case PACKET3_INDEX_BASE
:
1056 if (pkt
->count
!= 1) {
1057 DRM_ERROR("bad INDEX_BASE\n");
1060 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1062 DRM_ERROR("bad INDEX_BASE\n");
1065 ib
[idx
+0] = idx_value
+ (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1066 ib
[idx
+1] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1067 r
= evergreen_cs_track_check(p
);
1069 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1073 case PACKET3_DRAW_INDEX
:
1074 if (pkt
->count
!= 3) {
1075 DRM_ERROR("bad DRAW_INDEX\n");
1078 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1080 DRM_ERROR("bad DRAW_INDEX\n");
1083 ib
[idx
+0] = idx_value
+ (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1084 ib
[idx
+1] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1085 r
= evergreen_cs_track_check(p
);
1087 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1091 case PACKET3_DRAW_INDEX_2
:
1092 if (pkt
->count
!= 4) {
1093 DRM_ERROR("bad DRAW_INDEX_2\n");
1096 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1098 DRM_ERROR("bad DRAW_INDEX_2\n");
1101 ib
[idx
+1] = idx_value
+ (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1102 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1103 r
= evergreen_cs_track_check(p
);
1105 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1109 case PACKET3_DRAW_INDEX_AUTO
:
1110 if (pkt
->count
!= 1) {
1111 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1114 r
= evergreen_cs_track_check(p
);
1116 dev_warn(p
->dev
, "%s:%d invalid cmd stream %d\n", __func__
, __LINE__
, idx
);
1120 case PACKET3_DRAW_INDEX_MULTI_AUTO
:
1121 if (pkt
->count
!= 2) {
1122 DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
1125 r
= evergreen_cs_track_check(p
);
1127 dev_warn(p
->dev
, "%s:%d invalid cmd stream %d\n", __func__
, __LINE__
, idx
);
1131 case PACKET3_DRAW_INDEX_IMMD
:
1132 if (pkt
->count
< 2) {
1133 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1136 r
= evergreen_cs_track_check(p
);
1138 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1142 case PACKET3_DRAW_INDEX_OFFSET
:
1143 if (pkt
->count
!= 2) {
1144 DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
1147 r
= evergreen_cs_track_check(p
);
1149 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1153 case PACKET3_DRAW_INDEX_OFFSET_2
:
1154 if (pkt
->count
!= 3) {
1155 DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
1158 r
= evergreen_cs_track_check(p
);
1160 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1164 case PACKET3_WAIT_REG_MEM
:
1165 if (pkt
->count
!= 5) {
1166 DRM_ERROR("bad WAIT_REG_MEM\n");
1169 /* bit 4 is reg (0) or mem (1) */
1170 if (idx_value
& 0x10) {
1171 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1173 DRM_ERROR("bad WAIT_REG_MEM\n");
1176 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1177 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1180 case PACKET3_SURFACE_SYNC
:
1181 if (pkt
->count
!= 3) {
1182 DRM_ERROR("bad SURFACE_SYNC\n");
1185 /* 0xffffffff/0x0 is flush all cache flag */
1186 if (radeon_get_ib_value(p
, idx
+ 1) != 0xffffffff ||
1187 radeon_get_ib_value(p
, idx
+ 2) != 0) {
1188 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1190 DRM_ERROR("bad SURFACE_SYNC\n");
1193 ib
[idx
+2] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1196 case PACKET3_EVENT_WRITE
:
1197 if (pkt
->count
!= 2 && pkt
->count
!= 0) {
1198 DRM_ERROR("bad EVENT_WRITE\n");
1202 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1204 DRM_ERROR("bad EVENT_WRITE\n");
1207 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1208 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1211 case PACKET3_EVENT_WRITE_EOP
:
1212 if (pkt
->count
!= 4) {
1213 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1216 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1218 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1221 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1222 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1224 case PACKET3_EVENT_WRITE_EOS
:
1225 if (pkt
->count
!= 3) {
1226 DRM_ERROR("bad EVENT_WRITE_EOS\n");
1229 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1231 DRM_ERROR("bad EVENT_WRITE_EOS\n");
1234 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1235 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1237 case PACKET3_SET_CONFIG_REG
:
1238 start_reg
= (idx_value
<< 2) + PACKET3_SET_CONFIG_REG_START
;
1239 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1240 if ((start_reg
< PACKET3_SET_CONFIG_REG_START
) ||
1241 (start_reg
>= PACKET3_SET_CONFIG_REG_END
) ||
1242 (end_reg
>= PACKET3_SET_CONFIG_REG_END
)) {
1243 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1246 for (i
= 0; i
< pkt
->count
; i
++) {
1247 reg
= start_reg
+ (4 * i
);
1248 r
= evergreen_cs_check_reg(p
, reg
, idx
+1+i
);
1253 case PACKET3_SET_CONTEXT_REG
:
1254 start_reg
= (idx_value
<< 2) + PACKET3_SET_CONTEXT_REG_START
;
1255 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1256 if ((start_reg
< PACKET3_SET_CONTEXT_REG_START
) ||
1257 (start_reg
>= PACKET3_SET_CONTEXT_REG_END
) ||
1258 (end_reg
>= PACKET3_SET_CONTEXT_REG_END
)) {
1259 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
1262 for (i
= 0; i
< pkt
->count
; i
++) {
1263 reg
= start_reg
+ (4 * i
);
1264 r
= evergreen_cs_check_reg(p
, reg
, idx
+1+i
);
1269 case PACKET3_SET_RESOURCE
:
1270 if (pkt
->count
% 8) {
1271 DRM_ERROR("bad SET_RESOURCE\n");
1274 start_reg
= (idx_value
<< 2) + PACKET3_SET_RESOURCE_START
;
1275 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1276 if ((start_reg
< PACKET3_SET_RESOURCE_START
) ||
1277 (start_reg
>= PACKET3_SET_RESOURCE_END
) ||
1278 (end_reg
>= PACKET3_SET_RESOURCE_END
)) {
1279 DRM_ERROR("bad SET_RESOURCE\n");
1282 for (i
= 0; i
< (pkt
->count
/ 8); i
++) {
1283 struct radeon_bo
*texture
, *mipmap
;
1286 switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p
, idx
+1+(i
*8)+7))) {
1287 case SQ_TEX_VTX_VALID_TEXTURE
:
1289 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1291 DRM_ERROR("bad SET_RESOURCE (tex)\n");
1294 ib
[idx
+1+(i
*8)+2] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1295 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
)
1296 ib
[idx
+1+(i
*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
1297 else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
)
1298 ib
[idx
+1+(i
*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
1299 texture
= reloc
->robj
;
1301 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1303 DRM_ERROR("bad SET_RESOURCE (tex)\n");
1306 ib
[idx
+1+(i
*8)+3] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1307 mipmap
= reloc
->robj
;
1308 r
= evergreen_check_texture_resource(p
, idx
+1+(i
*8),
1313 case SQ_TEX_VTX_VALID_BUFFER
:
1315 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1317 DRM_ERROR("bad SET_RESOURCE (vtx)\n");
1320 offset
= radeon_get_ib_value(p
, idx
+1+(i
*8)+0);
1321 size
= radeon_get_ib_value(p
, idx
+1+(i
*8)+1);
1322 if (p
->rdev
&& (size
+ offset
) > radeon_bo_size(reloc
->robj
)) {
1323 /* force size to size of the buffer */
1324 dev_warn(p
->dev
, "vbo resource seems too big for the bo\n");
1325 ib
[idx
+1+(i
*8)+1] = radeon_bo_size(reloc
->robj
);
1327 ib
[idx
+1+(i
*8)+0] += (u32
)((reloc
->lobj
.gpu_offset
) & 0xffffffff);
1328 ib
[idx
+1+(i
*8)+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1330 case SQ_TEX_VTX_INVALID_TEXTURE
:
1331 case SQ_TEX_VTX_INVALID_BUFFER
:
1333 DRM_ERROR("bad SET_RESOURCE\n");
1338 case PACKET3_SET_ALU_CONST
:
1339 /* XXX fix me ALU const buffers only */
1341 case PACKET3_SET_BOOL_CONST
:
1342 start_reg
= (idx_value
<< 2) + PACKET3_SET_BOOL_CONST_START
;
1343 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1344 if ((start_reg
< PACKET3_SET_BOOL_CONST_START
) ||
1345 (start_reg
>= PACKET3_SET_BOOL_CONST_END
) ||
1346 (end_reg
>= PACKET3_SET_BOOL_CONST_END
)) {
1347 DRM_ERROR("bad SET_BOOL_CONST\n");
1351 case PACKET3_SET_LOOP_CONST
:
1352 start_reg
= (idx_value
<< 2) + PACKET3_SET_LOOP_CONST_START
;
1353 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1354 if ((start_reg
< PACKET3_SET_LOOP_CONST_START
) ||
1355 (start_reg
>= PACKET3_SET_LOOP_CONST_END
) ||
1356 (end_reg
>= PACKET3_SET_LOOP_CONST_END
)) {
1357 DRM_ERROR("bad SET_LOOP_CONST\n");
1361 case PACKET3_SET_CTL_CONST
:
1362 start_reg
= (idx_value
<< 2) + PACKET3_SET_CTL_CONST_START
;
1363 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1364 if ((start_reg
< PACKET3_SET_CTL_CONST_START
) ||
1365 (start_reg
>= PACKET3_SET_CTL_CONST_END
) ||
1366 (end_reg
>= PACKET3_SET_CTL_CONST_END
)) {
1367 DRM_ERROR("bad SET_CTL_CONST\n");
1371 case PACKET3_SET_SAMPLER
:
1372 if (pkt
->count
% 3) {
1373 DRM_ERROR("bad SET_SAMPLER\n");
1376 start_reg
= (idx_value
<< 2) + PACKET3_SET_SAMPLER_START
;
1377 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1378 if ((start_reg
< PACKET3_SET_SAMPLER_START
) ||
1379 (start_reg
>= PACKET3_SET_SAMPLER_END
) ||
1380 (end_reg
>= PACKET3_SET_SAMPLER_END
)) {
1381 DRM_ERROR("bad SET_SAMPLER\n");
1388 DRM_ERROR("Packet3 opcode %x not supported\n", pkt
->opcode
);
1394 int evergreen_cs_parse(struct radeon_cs_parser
*p
)
1396 struct radeon_cs_packet pkt
;
1397 struct evergreen_cs_track
*track
;
1400 if (p
->track
== NULL
) {
1401 /* initialize tracker, we are in kms */
1402 track
= kzalloc(sizeof(*track
), GFP_KERNEL
);
1405 evergreen_cs_track_init(track
);
1406 track
->npipes
= p
->rdev
->config
.evergreen
.tiling_npipes
;
1407 track
->nbanks
= p
->rdev
->config
.evergreen
.tiling_nbanks
;
1408 track
->group_size
= p
->rdev
->config
.evergreen
.tiling_group_size
;
1412 r
= evergreen_cs_packet_parse(p
, &pkt
, p
->idx
);
1418 p
->idx
+= pkt
.count
+ 2;
1421 r
= evergreen_cs_parse_packet0(p
, &pkt
);
1426 r
= evergreen_packet3_check(p
, &pkt
);
1429 DRM_ERROR("Unknown packet type %d !\n", pkt
.type
);
1439 } while (p
->idx
< p
->chunks
[p
->chunk_ib_idx
].length_dw
);
1441 for (r
= 0; r
< p
->ib
->length_dw
; r
++) {
1442 printk(KERN_INFO
"%05d 0x%08X\n", r
, p
->ib
->ptr
[r
]);