2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
33 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser
*p
,
34 struct radeon_cs_reloc
**cs_reloc
);
35 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser
*p
,
36 struct radeon_cs_reloc
**cs_reloc
);
37 typedef int (*next_reloc_t
)(struct radeon_cs_parser
*, struct radeon_cs_reloc
**);
38 static next_reloc_t r600_cs_packet_next_reloc
= &r600_cs_packet_next_reloc_mm
;
41 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
42 * @parser: parser structure holding parsing context.
43 * @pkt: where to store packet informations
45 * Assume that chunk_ib_index is properly set. Will return -EINVAL
46 * if packet is bigger than remaining ib size. or if packets is unknown.
48 int r600_cs_packet_parse(struct radeon_cs_parser
*p
,
49 struct radeon_cs_packet
*pkt
,
52 struct radeon_cs_chunk
*ib_chunk
= &p
->chunks
[p
->chunk_ib_idx
];
55 if (idx
>= ib_chunk
->length_dw
) {
56 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
57 idx
, ib_chunk
->length_dw
);
60 header
= radeon_get_ib_value(p
, idx
);
62 pkt
->type
= CP_PACKET_GET_TYPE(header
);
63 pkt
->count
= CP_PACKET_GET_COUNT(header
);
67 pkt
->reg
= CP_PACKET0_GET_REG(header
);
70 pkt
->opcode
= CP_PACKET3_GET_OPCODE(header
);
76 DRM_ERROR("Unknown packet type %d at %d !\n", pkt
->type
, idx
);
79 if ((pkt
->count
+ 1 + pkt
->idx
) >= ib_chunk
->length_dw
) {
80 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
81 pkt
->idx
, pkt
->type
, pkt
->count
, ib_chunk
->length_dw
);
88 * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
89 * @parser: parser structure holding parsing context.
90 * @data: pointer to relocation data
91 * @offset_start: starting offset
92 * @offset_mask: offset mask (to align start offset on)
93 * @reloc: reloc informations
95 * Check next packet is relocation packet3, do bo validation and compute
96 * GPU offset using the provided start.
98 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser
*p
,
99 struct radeon_cs_reloc
**cs_reloc
)
101 struct radeon_cs_chunk
*relocs_chunk
;
102 struct radeon_cs_packet p3reloc
;
106 if (p
->chunk_relocs_idx
== -1) {
107 DRM_ERROR("No relocation chunk !\n");
111 relocs_chunk
= &p
->chunks
[p
->chunk_relocs_idx
];
112 r
= r600_cs_packet_parse(p
, &p3reloc
, p
->idx
);
116 p
->idx
+= p3reloc
.count
+ 2;
117 if (p3reloc
.type
!= PACKET_TYPE3
|| p3reloc
.opcode
!= PACKET3_NOP
) {
118 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
122 idx
= radeon_get_ib_value(p
, p3reloc
.idx
+ 1);
123 if (idx
>= relocs_chunk
->length_dw
) {
124 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
125 idx
, relocs_chunk
->length_dw
);
128 /* FIXME: we assume reloc size is 4 dwords */
129 *cs_reloc
= p
->relocs_ptr
[(idx
/ 4)];
134 * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
135 * @parser: parser structure holding parsing context.
136 * @data: pointer to relocation data
137 * @offset_start: starting offset
138 * @offset_mask: offset mask (to align start offset on)
139 * @reloc: reloc informations
141 * Check next packet is relocation packet3, do bo validation and compute
142 * GPU offset using the provided start.
144 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser
*p
,
145 struct radeon_cs_reloc
**cs_reloc
)
147 struct radeon_cs_chunk
*relocs_chunk
;
148 struct radeon_cs_packet p3reloc
;
152 if (p
->chunk_relocs_idx
== -1) {
153 DRM_ERROR("No relocation chunk !\n");
157 relocs_chunk
= &p
->chunks
[p
->chunk_relocs_idx
];
158 r
= r600_cs_packet_parse(p
, &p3reloc
, p
->idx
);
162 p
->idx
+= p3reloc
.count
+ 2;
163 if (p3reloc
.type
!= PACKET_TYPE3
|| p3reloc
.opcode
!= PACKET3_NOP
) {
164 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
168 idx
= radeon_get_ib_value(p
, p3reloc
.idx
+ 1);
169 if (idx
>= relocs_chunk
->length_dw
) {
170 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
171 idx
, relocs_chunk
->length_dw
);
174 *cs_reloc
= &p
->relocs
[0];
175 (*cs_reloc
)->lobj
.gpu_offset
= (u64
)relocs_chunk
->kdata
[idx
+ 3] << 32;
176 (*cs_reloc
)->lobj
.gpu_offset
|= relocs_chunk
->kdata
[idx
+ 0];
181 * r600_cs_packet_next_vline() - parse userspace VLINE packet
182 * @parser: parser structure holding parsing context.
184 * Userspace sends a special sequence for VLINE waits.
185 * PACKET0 - VLINE_START_END + value
186 * PACKET3 - WAIT_REG_MEM poll vline status reg
187 * RELOC (P3) - crtc_id in reloc.
189 * This function parses this and relocates the VLINE START END
190 * and WAIT_REG_MEM packets to the correct crtc.
191 * It also detects a switched off crtc and nulls out the
194 static int r600_cs_packet_parse_vline(struct radeon_cs_parser
*p
)
196 struct drm_mode_object
*obj
;
197 struct drm_crtc
*crtc
;
198 struct radeon_crtc
*radeon_crtc
;
199 struct radeon_cs_packet p3reloc
, wait_reg_mem
;
202 uint32_t header
, h_idx
, reg
, wait_reg_mem_info
;
203 volatile uint32_t *ib
;
207 /* parse the WAIT_REG_MEM */
208 r
= r600_cs_packet_parse(p
, &wait_reg_mem
, p
->idx
);
212 /* check its a WAIT_REG_MEM */
213 if (wait_reg_mem
.type
!= PACKET_TYPE3
||
214 wait_reg_mem
.opcode
!= PACKET3_WAIT_REG_MEM
) {
215 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
220 wait_reg_mem_info
= radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 1);
221 /* bit 4 is reg (0) or mem (1) */
222 if (wait_reg_mem_info
& 0x10) {
223 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
227 /* waiting for value to be equal */
228 if ((wait_reg_mem_info
& 0x7) != 0x3) {
229 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
233 if ((radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 2) << 2) != AVIVO_D1MODE_VLINE_STATUS
) {
234 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
239 if (radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 5) != AVIVO_D1MODE_VLINE_STAT
) {
240 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
245 /* jump over the NOP */
246 r
= r600_cs_packet_parse(p
, &p3reloc
, p
->idx
+ wait_reg_mem
.count
+ 2);
251 p
->idx
+= wait_reg_mem
.count
+ 2;
252 p
->idx
+= p3reloc
.count
+ 2;
254 header
= radeon_get_ib_value(p
, h_idx
);
255 crtc_id
= radeon_get_ib_value(p
, h_idx
+ 2 + 7 + 1);
257 mutex_lock(&p
->rdev
->ddev
->mode_config
.mutex
);
258 obj
= drm_mode_object_find(p
->rdev
->ddev
, crtc_id
, DRM_MODE_OBJECT_CRTC
);
260 DRM_ERROR("cannot find crtc %d\n", crtc_id
);
264 crtc
= obj_to_crtc(obj
);
265 radeon_crtc
= to_radeon_crtc(crtc
);
266 crtc_id
= radeon_crtc
->crtc_id
;
268 if (!crtc
->enabled
) {
269 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
270 ib
[h_idx
+ 2] = PACKET2(0);
271 ib
[h_idx
+ 3] = PACKET2(0);
272 ib
[h_idx
+ 4] = PACKET2(0);
273 ib
[h_idx
+ 5] = PACKET2(0);
274 ib
[h_idx
+ 6] = PACKET2(0);
275 ib
[h_idx
+ 7] = PACKET2(0);
276 ib
[h_idx
+ 8] = PACKET2(0);
277 } else if (crtc_id
== 1) {
279 case AVIVO_D1MODE_VLINE_START_END
:
280 header
&= ~R600_CP_PACKET0_REG_MASK
;
281 header
|= AVIVO_D2MODE_VLINE_START_END
>> 2;
284 DRM_ERROR("unknown crtc reloc\n");
289 ib
[h_idx
+ 4] = AVIVO_D2MODE_VLINE_STATUS
>> 2;
292 mutex_unlock(&p
->rdev
->ddev
->mode_config
.mutex
);
296 static int r600_packet0_check(struct radeon_cs_parser
*p
,
297 struct radeon_cs_packet
*pkt
,
298 unsigned idx
, unsigned reg
)
303 case AVIVO_D1MODE_VLINE_START_END
:
304 r
= r600_cs_packet_parse_vline(p
);
306 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
312 printk(KERN_ERR
"Forbidden register 0x%04X in cs at %d\n",
319 static int r600_cs_parse_packet0(struct radeon_cs_parser
*p
,
320 struct radeon_cs_packet
*pkt
)
328 for (i
= 0; i
<= pkt
->count
; i
++, idx
++, reg
+= 4) {
329 r
= r600_packet0_check(p
, pkt
, idx
, reg
);
337 static int r600_packet3_check(struct radeon_cs_parser
*p
,
338 struct radeon_cs_packet
*pkt
)
340 struct radeon_cs_reloc
*reloc
;
344 unsigned start_reg
, end_reg
, reg
;
350 idx_value
= radeon_get_ib_value(p
, idx
);
352 switch (pkt
->opcode
) {
353 case PACKET3_START_3D_CMDBUF
:
354 if (p
->family
>= CHIP_RV770
|| pkt
->count
) {
355 DRM_ERROR("bad START_3D\n");
359 case PACKET3_CONTEXT_CONTROL
:
360 if (pkt
->count
!= 1) {
361 DRM_ERROR("bad CONTEXT_CONTROL\n");
365 case PACKET3_INDEX_TYPE
:
366 case PACKET3_NUM_INSTANCES
:
368 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
372 case PACKET3_DRAW_INDEX
:
373 if (pkt
->count
!= 3) {
374 DRM_ERROR("bad DRAW_INDEX\n");
377 r
= r600_cs_packet_next_reloc(p
, &reloc
);
379 DRM_ERROR("bad DRAW_INDEX\n");
382 ib
[idx
+0] = idx_value
+ (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
383 ib
[idx
+1] = upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
385 case PACKET3_DRAW_INDEX_AUTO
:
386 if (pkt
->count
!= 1) {
387 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
391 case PACKET3_DRAW_INDEX_IMMD_BE
:
392 case PACKET3_DRAW_INDEX_IMMD
:
393 if (pkt
->count
< 2) {
394 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
398 case PACKET3_WAIT_REG_MEM
:
399 if (pkt
->count
!= 5) {
400 DRM_ERROR("bad WAIT_REG_MEM\n");
403 /* bit 4 is reg (0) or mem (1) */
404 if (idx_value
& 0x10) {
405 r
= r600_cs_packet_next_reloc(p
, &reloc
);
407 DRM_ERROR("bad WAIT_REG_MEM\n");
410 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
411 ib
[idx
+2] = upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
414 case PACKET3_SURFACE_SYNC
:
415 if (pkt
->count
!= 3) {
416 DRM_ERROR("bad SURFACE_SYNC\n");
419 /* 0xffffffff/0x0 is flush all cache flag */
420 if (radeon_get_ib_value(p
, idx
+ 1) != 0xffffffff ||
421 radeon_get_ib_value(p
, idx
+ 2) != 0) {
422 r
= r600_cs_packet_next_reloc(p
, &reloc
);
424 DRM_ERROR("bad SURFACE_SYNC\n");
427 ib
[idx
+2] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
430 case PACKET3_EVENT_WRITE
:
431 if (pkt
->count
!= 2 && pkt
->count
!= 0) {
432 DRM_ERROR("bad EVENT_WRITE\n");
436 r
= r600_cs_packet_next_reloc(p
, &reloc
);
438 DRM_ERROR("bad EVENT_WRITE\n");
441 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
442 ib
[idx
+2] |= upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
445 case PACKET3_EVENT_WRITE_EOP
:
446 if (pkt
->count
!= 4) {
447 DRM_ERROR("bad EVENT_WRITE_EOP\n");
450 r
= r600_cs_packet_next_reloc(p
, &reloc
);
452 DRM_ERROR("bad EVENT_WRITE\n");
455 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
456 ib
[idx
+2] |= upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
458 case PACKET3_SET_CONFIG_REG
:
459 start_reg
= (idx_value
<< 2) + PACKET3_SET_CONFIG_REG_OFFSET
;
460 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
461 if ((start_reg
< PACKET3_SET_CONFIG_REG_OFFSET
) ||
462 (start_reg
>= PACKET3_SET_CONFIG_REG_END
) ||
463 (end_reg
>= PACKET3_SET_CONFIG_REG_END
)) {
464 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
467 for (i
= 0; i
< pkt
->count
; i
++) {
468 reg
= start_reg
+ (4 * i
);
471 /* use PACKET3_SURFACE_SYNC */
478 case PACKET3_SET_CONTEXT_REG
:
479 start_reg
= (idx_value
<< 2) + PACKET3_SET_CONTEXT_REG_OFFSET
;
480 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
481 if ((start_reg
< PACKET3_SET_CONTEXT_REG_OFFSET
) ||
482 (start_reg
>= PACKET3_SET_CONTEXT_REG_END
) ||
483 (end_reg
>= PACKET3_SET_CONTEXT_REG_END
)) {
484 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
487 for (i
= 0; i
< pkt
->count
; i
++) {
488 reg
= start_reg
+ (4 * i
);
499 case SQ_PGM_START_FS
:
500 case SQ_PGM_START_ES
:
501 case SQ_PGM_START_VS
:
502 case SQ_PGM_START_GS
:
503 case SQ_PGM_START_PS
:
504 r
= r600_cs_packet_next_reloc(p
, &reloc
);
506 DRM_ERROR("bad SET_CONTEXT_REG "
510 ib
[idx
+1+i
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
513 case VGT_DMA_BASE_HI
:
514 /* These should be handled by DRAW_INDEX packet 3 */
515 case VGT_STRMOUT_BASE_OFFSET_0
:
516 case VGT_STRMOUT_BASE_OFFSET_1
:
517 case VGT_STRMOUT_BASE_OFFSET_2
:
518 case VGT_STRMOUT_BASE_OFFSET_3
:
519 case VGT_STRMOUT_BASE_OFFSET_HI_0
:
520 case VGT_STRMOUT_BASE_OFFSET_HI_1
:
521 case VGT_STRMOUT_BASE_OFFSET_HI_2
:
522 case VGT_STRMOUT_BASE_OFFSET_HI_3
:
523 case VGT_STRMOUT_BUFFER_BASE_0
:
524 case VGT_STRMOUT_BUFFER_BASE_1
:
525 case VGT_STRMOUT_BUFFER_BASE_2
:
526 case VGT_STRMOUT_BUFFER_BASE_3
:
527 case VGT_STRMOUT_BUFFER_OFFSET_0
:
528 case VGT_STRMOUT_BUFFER_OFFSET_1
:
529 case VGT_STRMOUT_BUFFER_OFFSET_2
:
530 case VGT_STRMOUT_BUFFER_OFFSET_3
:
531 /* These should be handled by STRMOUT_BUFFER packet 3 */
532 DRM_ERROR("bad context reg: 0x%08x\n", reg
);
539 case PACKET3_SET_RESOURCE
:
540 if (pkt
->count
% 7) {
541 DRM_ERROR("bad SET_RESOURCE\n");
544 start_reg
= (idx_value
<< 2) + PACKET3_SET_RESOURCE_OFFSET
;
545 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
546 if ((start_reg
< PACKET3_SET_RESOURCE_OFFSET
) ||
547 (start_reg
>= PACKET3_SET_RESOURCE_END
) ||
548 (end_reg
>= PACKET3_SET_RESOURCE_END
)) {
549 DRM_ERROR("bad SET_RESOURCE\n");
552 for (i
= 0; i
< (pkt
->count
/ 7); i
++) {
553 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p
, idx
+(i
*7)+6+1))) {
554 case SQ_TEX_VTX_VALID_TEXTURE
:
556 r
= r600_cs_packet_next_reloc(p
, &reloc
);
558 DRM_ERROR("bad SET_RESOURCE\n");
561 ib
[idx
+1+(i
*7)+2] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
563 r
= r600_cs_packet_next_reloc(p
, &reloc
);
565 DRM_ERROR("bad SET_RESOURCE\n");
568 ib
[idx
+1+(i
*7)+3] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
570 case SQ_TEX_VTX_VALID_BUFFER
:
572 r
= r600_cs_packet_next_reloc(p
, &reloc
);
574 DRM_ERROR("bad SET_RESOURCE\n");
577 ib
[idx
+1+(i
*7)+0] += (u32
)((reloc
->lobj
.gpu_offset
) & 0xffffffff);
578 ib
[idx
+1+(i
*7)+2] |= upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
580 case SQ_TEX_VTX_INVALID_TEXTURE
:
581 case SQ_TEX_VTX_INVALID_BUFFER
:
583 DRM_ERROR("bad SET_RESOURCE\n");
588 case PACKET3_SET_ALU_CONST
:
589 start_reg
= (idx_value
<< 2) + PACKET3_SET_ALU_CONST_OFFSET
;
590 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
591 if ((start_reg
< PACKET3_SET_ALU_CONST_OFFSET
) ||
592 (start_reg
>= PACKET3_SET_ALU_CONST_END
) ||
593 (end_reg
>= PACKET3_SET_ALU_CONST_END
)) {
594 DRM_ERROR("bad SET_ALU_CONST\n");
598 case PACKET3_SET_BOOL_CONST
:
599 start_reg
= (idx_value
<< 2) + PACKET3_SET_BOOL_CONST_OFFSET
;
600 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
601 if ((start_reg
< PACKET3_SET_BOOL_CONST_OFFSET
) ||
602 (start_reg
>= PACKET3_SET_BOOL_CONST_END
) ||
603 (end_reg
>= PACKET3_SET_BOOL_CONST_END
)) {
604 DRM_ERROR("bad SET_BOOL_CONST\n");
608 case PACKET3_SET_LOOP_CONST
:
609 start_reg
= (idx_value
<< 2) + PACKET3_SET_LOOP_CONST_OFFSET
;
610 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
611 if ((start_reg
< PACKET3_SET_LOOP_CONST_OFFSET
) ||
612 (start_reg
>= PACKET3_SET_LOOP_CONST_END
) ||
613 (end_reg
>= PACKET3_SET_LOOP_CONST_END
)) {
614 DRM_ERROR("bad SET_LOOP_CONST\n");
618 case PACKET3_SET_CTL_CONST
:
619 start_reg
= (idx_value
<< 2) + PACKET3_SET_CTL_CONST_OFFSET
;
620 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
621 if ((start_reg
< PACKET3_SET_CTL_CONST_OFFSET
) ||
622 (start_reg
>= PACKET3_SET_CTL_CONST_END
) ||
623 (end_reg
>= PACKET3_SET_CTL_CONST_END
)) {
624 DRM_ERROR("bad SET_CTL_CONST\n");
628 case PACKET3_SET_SAMPLER
:
629 if (pkt
->count
% 3) {
630 DRM_ERROR("bad SET_SAMPLER\n");
633 start_reg
= (idx_value
<< 2) + PACKET3_SET_SAMPLER_OFFSET
;
634 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
635 if ((start_reg
< PACKET3_SET_SAMPLER_OFFSET
) ||
636 (start_reg
>= PACKET3_SET_SAMPLER_END
) ||
637 (end_reg
>= PACKET3_SET_SAMPLER_END
)) {
638 DRM_ERROR("bad SET_SAMPLER\n");
642 case PACKET3_SURFACE_BASE_UPDATE
:
643 if (p
->family
>= CHIP_RV770
|| p
->family
== CHIP_R600
) {
644 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
648 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
655 DRM_ERROR("Packet3 opcode %x not supported\n", pkt
->opcode
);
661 int r600_cs_parse(struct radeon_cs_parser
*p
)
663 struct radeon_cs_packet pkt
;
667 r
= r600_cs_packet_parse(p
, &pkt
, p
->idx
);
671 p
->idx
+= pkt
.count
+ 2;
674 r
= r600_cs_parse_packet0(p
, &pkt
);
679 r
= r600_packet3_check(p
, &pkt
);
682 DRM_ERROR("Unknown packet type %d !\n", pkt
.type
);
688 } while (p
->idx
< p
->chunks
[p
->chunk_ib_idx
].length_dw
);
690 for (r
= 0; r
< p
->ib
->length_dw
; r
++) {
691 printk(KERN_INFO
"%05d 0x%08X\n", r
, p
->ib
->ptr
[r
]);
698 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser
*p
)
700 if (p
->chunk_relocs_idx
== -1) {
703 p
->relocs
= kcalloc(1, sizeof(struct radeon_cs_reloc
), GFP_KERNEL
);
704 if (p
->relocs
== NULL
) {
711 * cs_parser_fini() - clean parser states
712 * @parser: parser structure holding parsing context.
713 * @error: error number
715 * If error is set than unvalidate buffer, otherwise just free memory
716 * used by parsing context.
718 static void r600_cs_parser_fini(struct radeon_cs_parser
*parser
, int error
)
722 kfree(parser
->relocs
);
723 for (i
= 0; i
< parser
->nchunks
; i
++) {
724 kfree(parser
->chunks
[i
].kdata
);
726 kfree(parser
->chunks
);
727 kfree(parser
->chunks_array
);
730 int r600_cs_legacy(struct drm_device
*dev
, void *data
, struct drm_file
*filp
,
731 unsigned family
, u32
*ib
, int *l
)
733 struct radeon_cs_parser parser
;
734 struct radeon_cs_chunk
*ib_chunk
;
735 struct radeon_ib fake_ib
;
738 /* initialize parser */
739 memset(&parser
, 0, sizeof(struct radeon_cs_parser
));
742 parser
.family
= family
;
743 parser
.ib
= &fake_ib
;
745 r
= radeon_cs_parser_init(&parser
, data
);
747 DRM_ERROR("Failed to initialize parser !\n");
748 r600_cs_parser_fini(&parser
, r
);
751 r
= r600_cs_parser_relocs_legacy(&parser
);
753 DRM_ERROR("Failed to parse relocation !\n");
754 r600_cs_parser_fini(&parser
, r
);
757 /* Copy the packet into the IB, the parser will read from the
758 * input memory (cached) and write to the IB (which can be
760 ib_chunk
= &parser
.chunks
[parser
.chunk_ib_idx
];
761 parser
.ib
->length_dw
= ib_chunk
->length_dw
;
762 *l
= parser
.ib
->length_dw
;
763 r
= r600_cs_parse(&parser
);
765 DRM_ERROR("Invalid command stream !\n");
766 r600_cs_parser_fini(&parser
, r
);
769 r
= radeon_cs_finish_pages(&parser
);
771 DRM_ERROR("Invalid command stream !\n");
772 r600_cs_parser_fini(&parser
, r
);
775 r600_cs_parser_fini(&parser
, r
);
779 void r600_cs_legacy_init(void)
781 r600_cs_packet_next_reloc
= &r600_cs_packet_next_reloc_nomm
;