2 * Copyright 2008 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Jerome Glisse <glisse@freedesktop.org>
28 #include <drm/radeon_drm.h>
29 #include "radeon_reg.h"
32 static int radeon_cs_parser_relocs(struct radeon_cs_parser
*p
)
34 struct drm_device
*ddev
= p
->rdev
->ddev
;
35 struct radeon_cs_chunk
*chunk
;
39 if (p
->chunk_relocs_idx
== -1) {
42 chunk
= &p
->chunks
[p
->chunk_relocs_idx
];
44 /* FIXME: we assume that each relocs use 4 dwords */
45 p
->nrelocs
= chunk
->length_dw
/ 4;
46 p
->relocs_ptr
= kcalloc(p
->nrelocs
, sizeof(void *), GFP_KERNEL
);
47 if (p
->relocs_ptr
== NULL
) {
50 p
->relocs
= kcalloc(p
->nrelocs
, sizeof(struct radeon_cs_reloc
), GFP_KERNEL
);
51 if (p
->relocs
== NULL
) {
54 for (i
= 0; i
< p
->nrelocs
; i
++) {
55 struct drm_radeon_cs_reloc
*r
;
58 r
= (struct drm_radeon_cs_reloc
*)&chunk
->kdata
[i
*4];
59 for (j
= 0; j
< i
; j
++) {
60 if (r
->handle
== p
->relocs
[j
].handle
) {
61 p
->relocs_ptr
[i
] = &p
->relocs
[j
];
67 p
->relocs
[i
].gobj
= drm_gem_object_lookup(ddev
,
70 if (p
->relocs
[i
].gobj
== NULL
) {
71 DRM_ERROR("gem object lookup failed 0x%x\n",
75 p
->relocs_ptr
[i
] = &p
->relocs
[i
];
76 p
->relocs
[i
].robj
= gem_to_radeon_bo(p
->relocs
[i
].gobj
);
77 p
->relocs
[i
].lobj
.bo
= p
->relocs
[i
].robj
;
78 p
->relocs
[i
].lobj
.wdomain
= r
->write_domain
;
79 p
->relocs
[i
].lobj
.rdomain
= r
->read_domains
;
80 p
->relocs
[i
].lobj
.tv
.bo
= &p
->relocs
[i
].robj
->tbo
;
81 p
->relocs
[i
].handle
= r
->handle
;
82 p
->relocs
[i
].flags
= r
->flags
;
83 radeon_bo_list_add_object(&p
->relocs
[i
].lobj
,
87 p
->relocs
[i
].handle
= 0;
89 return radeon_bo_list_validate(&p
->validated
);
92 static int radeon_cs_get_ring(struct radeon_cs_parser
*p
, u32 ring
, s32 priority
)
94 p
->priority
= priority
;
98 DRM_ERROR("unknown ring id: %d\n", ring
);
100 case RADEON_CS_RING_GFX
:
101 p
->ring
= RADEON_RING_TYPE_GFX_INDEX
;
103 case RADEON_CS_RING_COMPUTE
:
104 if (p
->rdev
->family
>= CHIP_TAHITI
) {
106 p
->ring
= CAYMAN_RING_TYPE_CP1_INDEX
;
108 p
->ring
= CAYMAN_RING_TYPE_CP2_INDEX
;
110 p
->ring
= RADEON_RING_TYPE_GFX_INDEX
;
112 case RADEON_CS_RING_DMA
:
113 if (p
->rdev
->family
>= CHIP_CAYMAN
) {
115 p
->ring
= R600_RING_TYPE_DMA_INDEX
;
117 p
->ring
= CAYMAN_RING_TYPE_DMA1_INDEX
;
118 } else if (p
->rdev
->family
>= CHIP_R600
) {
119 p
->ring
= R600_RING_TYPE_DMA_INDEX
;
128 static void radeon_cs_sync_to(struct radeon_cs_parser
*p
,
129 struct radeon_fence
*fence
)
131 struct radeon_fence
*other
;
136 other
= p
->ib
.sync_to
[fence
->ring
];
137 p
->ib
.sync_to
[fence
->ring
] = radeon_fence_later(fence
, other
);
140 static void radeon_cs_sync_rings(struct radeon_cs_parser
*p
)
144 for (i
= 0; i
< p
->nrelocs
; i
++) {
145 if (!p
->relocs
[i
].robj
)
148 radeon_cs_sync_to(p
, p
->relocs
[i
].robj
->tbo
.sync_obj
);
152 /* XXX: note that this is called from the legacy UMS CS ioctl as well */
153 int radeon_cs_parser_init(struct radeon_cs_parser
*p
, void *data
)
155 struct drm_radeon_cs
*cs
= data
;
156 uint64_t *chunk_array_ptr
;
158 u32 ring
= RADEON_CS_RING_GFX
;
161 if (!cs
->num_chunks
) {
165 INIT_LIST_HEAD(&p
->validated
);
168 p
->ib
.semaphore
= NULL
;
169 p
->const_ib
.sa_bo
= NULL
;
170 p
->const_ib
.semaphore
= NULL
;
171 p
->chunk_ib_idx
= -1;
172 p
->chunk_relocs_idx
= -1;
173 p
->chunk_flags_idx
= -1;
174 p
->chunk_const_ib_idx
= -1;
175 p
->chunks_array
= kcalloc(cs
->num_chunks
, sizeof(uint64_t), GFP_KERNEL
);
176 if (p
->chunks_array
== NULL
) {
179 chunk_array_ptr
= (uint64_t *)(unsigned long)(cs
->chunks
);
180 if (DRM_COPY_FROM_USER(p
->chunks_array
, chunk_array_ptr
,
181 sizeof(uint64_t)*cs
->num_chunks
)) {
185 p
->nchunks
= cs
->num_chunks
;
186 p
->chunks
= kcalloc(p
->nchunks
, sizeof(struct radeon_cs_chunk
), GFP_KERNEL
);
187 if (p
->chunks
== NULL
) {
190 for (i
= 0; i
< p
->nchunks
; i
++) {
191 struct drm_radeon_cs_chunk __user
**chunk_ptr
= NULL
;
192 struct drm_radeon_cs_chunk user_chunk
;
193 uint32_t __user
*cdata
;
195 chunk_ptr
= (void __user
*)(unsigned long)p
->chunks_array
[i
];
196 if (DRM_COPY_FROM_USER(&user_chunk
, chunk_ptr
,
197 sizeof(struct drm_radeon_cs_chunk
))) {
200 p
->chunks
[i
].length_dw
= user_chunk
.length_dw
;
201 p
->chunks
[i
].kdata
= NULL
;
202 p
->chunks
[i
].chunk_id
= user_chunk
.chunk_id
;
203 p
->chunks
[i
].user_ptr
= (void __user
*)(unsigned long)user_chunk
.chunk_data
;
204 if (p
->chunks
[i
].chunk_id
== RADEON_CHUNK_ID_RELOCS
) {
205 p
->chunk_relocs_idx
= i
;
207 if (p
->chunks
[i
].chunk_id
== RADEON_CHUNK_ID_IB
) {
209 /* zero length IB isn't useful */
210 if (p
->chunks
[i
].length_dw
== 0)
213 if (p
->chunks
[i
].chunk_id
== RADEON_CHUNK_ID_CONST_IB
) {
214 p
->chunk_const_ib_idx
= i
;
215 /* zero length CONST IB isn't useful */
216 if (p
->chunks
[i
].length_dw
== 0)
219 if (p
->chunks
[i
].chunk_id
== RADEON_CHUNK_ID_FLAGS
) {
220 p
->chunk_flags_idx
= i
;
221 /* zero length flags aren't useful */
222 if (p
->chunks
[i
].length_dw
== 0)
226 cdata
= (uint32_t *)(unsigned long)user_chunk
.chunk_data
;
227 if ((p
->chunks
[i
].chunk_id
== RADEON_CHUNK_ID_RELOCS
) ||
228 (p
->chunks
[i
].chunk_id
== RADEON_CHUNK_ID_FLAGS
)) {
229 size
= p
->chunks
[i
].length_dw
* sizeof(uint32_t);
230 p
->chunks
[i
].kdata
= kmalloc(size
, GFP_KERNEL
);
231 if (p
->chunks
[i
].kdata
== NULL
) {
234 if (DRM_COPY_FROM_USER(p
->chunks
[i
].kdata
,
235 p
->chunks
[i
].user_ptr
, size
)) {
238 if (p
->chunks
[i
].chunk_id
== RADEON_CHUNK_ID_FLAGS
) {
239 p
->cs_flags
= p
->chunks
[i
].kdata
[0];
240 if (p
->chunks
[i
].length_dw
> 1)
241 ring
= p
->chunks
[i
].kdata
[1];
242 if (p
->chunks
[i
].length_dw
> 2)
243 priority
= (s32
)p
->chunks
[i
].kdata
[2];
248 /* these are KMS only */
250 if ((p
->cs_flags
& RADEON_CS_USE_VM
) &&
251 !p
->rdev
->vm_manager
.enabled
) {
252 DRM_ERROR("VM not active on asic!\n");
256 /* we only support VM on SI+ */
257 if ((p
->rdev
->family
>= CHIP_TAHITI
) &&
258 ((p
->cs_flags
& RADEON_CS_USE_VM
) == 0)) {
259 DRM_ERROR("VM required on SI+!\n");
263 if (radeon_cs_get_ring(p
, ring
, priority
))
267 /* deal with non-vm */
268 if ((p
->chunk_ib_idx
!= -1) &&
269 ((p
->cs_flags
& RADEON_CS_USE_VM
) == 0) &&
270 (p
->chunks
[p
->chunk_ib_idx
].chunk_id
== RADEON_CHUNK_ID_IB
)) {
271 if (p
->chunks
[p
->chunk_ib_idx
].length_dw
> (16 * 1024)) {
272 DRM_ERROR("cs IB too big: %d\n",
273 p
->chunks
[p
->chunk_ib_idx
].length_dw
);
276 if (p
->rdev
&& (p
->rdev
->flags
& RADEON_IS_AGP
)) {
277 p
->chunks
[p
->chunk_ib_idx
].kpage
[0] = kmalloc(PAGE_SIZE
, GFP_KERNEL
);
278 p
->chunks
[p
->chunk_ib_idx
].kpage
[1] = kmalloc(PAGE_SIZE
, GFP_KERNEL
);
279 if (p
->chunks
[p
->chunk_ib_idx
].kpage
[0] == NULL
||
280 p
->chunks
[p
->chunk_ib_idx
].kpage
[1] == NULL
) {
281 kfree(p
->chunks
[p
->chunk_ib_idx
].kpage
[0]);
282 kfree(p
->chunks
[p
->chunk_ib_idx
].kpage
[1]);
283 p
->chunks
[p
->chunk_ib_idx
].kpage
[0] = NULL
;
284 p
->chunks
[p
->chunk_ib_idx
].kpage
[1] = NULL
;
288 p
->chunks
[p
->chunk_ib_idx
].kpage_idx
[0] = -1;
289 p
->chunks
[p
->chunk_ib_idx
].kpage_idx
[1] = -1;
290 p
->chunks
[p
->chunk_ib_idx
].last_copied_page
= -1;
291 p
->chunks
[p
->chunk_ib_idx
].last_page_index
=
292 ((p
->chunks
[p
->chunk_ib_idx
].length_dw
* 4) - 1) / PAGE_SIZE
;
299 * cs_parser_fini() - clean parser states
300 * @parser: parser structure holding parsing context.
301 * @error: error number
303 * If error is set than unvalidate buffer, otherwise just free memory
304 * used by parsing context.
306 static void radeon_cs_parser_fini(struct radeon_cs_parser
*parser
, int error
)
311 ttm_eu_fence_buffer_objects(&parser
->validated
,
314 ttm_eu_backoff_reservation(&parser
->validated
);
317 if (parser
->relocs
!= NULL
) {
318 for (i
= 0; i
< parser
->nrelocs
; i
++) {
319 if (parser
->relocs
[i
].gobj
)
320 drm_gem_object_unreference_unlocked(parser
->relocs
[i
].gobj
);
323 kfree(parser
->track
);
324 kfree(parser
->relocs
);
325 kfree(parser
->relocs_ptr
);
326 for (i
= 0; i
< parser
->nchunks
; i
++) {
327 kfree(parser
->chunks
[i
].kdata
);
328 if ((parser
->rdev
->flags
& RADEON_IS_AGP
)) {
329 kfree(parser
->chunks
[i
].kpage
[0]);
330 kfree(parser
->chunks
[i
].kpage
[1]);
333 kfree(parser
->chunks
);
334 kfree(parser
->chunks_array
);
335 radeon_ib_free(parser
->rdev
, &parser
->ib
);
336 radeon_ib_free(parser
->rdev
, &parser
->const_ib
);
339 static int radeon_cs_ib_chunk(struct radeon_device
*rdev
,
340 struct radeon_cs_parser
*parser
)
342 struct radeon_cs_chunk
*ib_chunk
;
345 if (parser
->chunk_ib_idx
== -1)
348 if (parser
->cs_flags
& RADEON_CS_USE_VM
)
351 ib_chunk
= &parser
->chunks
[parser
->chunk_ib_idx
];
352 /* Copy the packet into the IB, the parser will read from the
353 * input memory (cached) and write to the IB (which can be
356 r
= radeon_ib_get(rdev
, parser
->ring
, &parser
->ib
,
357 NULL
, ib_chunk
->length_dw
* 4);
359 DRM_ERROR("Failed to get ib !\n");
362 parser
->ib
.length_dw
= ib_chunk
->length_dw
;
363 r
= radeon_cs_parse(rdev
, parser
->ring
, parser
);
364 if (r
|| parser
->parser_error
) {
365 DRM_ERROR("Invalid command stream !\n");
368 r
= radeon_cs_finish_pages(parser
);
370 DRM_ERROR("Invalid command stream !\n");
373 radeon_cs_sync_rings(parser
);
374 r
= radeon_ib_schedule(rdev
, &parser
->ib
, NULL
);
376 DRM_ERROR("Failed to schedule IB !\n");
381 static int radeon_bo_vm_update_pte(struct radeon_cs_parser
*parser
,
382 struct radeon_vm
*vm
)
384 struct radeon_device
*rdev
= parser
->rdev
;
385 struct radeon_bo_list
*lobj
;
386 struct radeon_bo
*bo
;
389 r
= radeon_vm_bo_update_pte(rdev
, vm
, rdev
->ring_tmp_bo
.bo
, &rdev
->ring_tmp_bo
.bo
->tbo
.mem
);
393 list_for_each_entry(lobj
, &parser
->validated
, tv
.head
) {
395 r
= radeon_vm_bo_update_pte(parser
->rdev
, vm
, bo
, &bo
->tbo
.mem
);
403 static int radeon_cs_ib_vm_chunk(struct radeon_device
*rdev
,
404 struct radeon_cs_parser
*parser
)
406 struct radeon_cs_chunk
*ib_chunk
;
407 struct radeon_fpriv
*fpriv
= parser
->filp
->driver_priv
;
408 struct radeon_vm
*vm
= &fpriv
->vm
;
411 if (parser
->chunk_ib_idx
== -1)
413 if ((parser
->cs_flags
& RADEON_CS_USE_VM
) == 0)
416 if ((rdev
->family
>= CHIP_TAHITI
) &&
417 (parser
->chunk_const_ib_idx
!= -1)) {
418 ib_chunk
= &parser
->chunks
[parser
->chunk_const_ib_idx
];
419 if (ib_chunk
->length_dw
> RADEON_IB_VM_MAX_SIZE
) {
420 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk
->length_dw
);
423 r
= radeon_ib_get(rdev
, parser
->ring
, &parser
->const_ib
,
424 vm
, ib_chunk
->length_dw
* 4);
426 DRM_ERROR("Failed to get const ib !\n");
429 parser
->const_ib
.is_const_ib
= true;
430 parser
->const_ib
.length_dw
= ib_chunk
->length_dw
;
431 /* Copy the packet into the IB */
432 if (DRM_COPY_FROM_USER(parser
->const_ib
.ptr
, ib_chunk
->user_ptr
,
433 ib_chunk
->length_dw
* 4)) {
436 r
= radeon_ring_ib_parse(rdev
, parser
->ring
, &parser
->const_ib
);
442 ib_chunk
= &parser
->chunks
[parser
->chunk_ib_idx
];
443 if (ib_chunk
->length_dw
> RADEON_IB_VM_MAX_SIZE
) {
444 DRM_ERROR("cs IB too big: %d\n", ib_chunk
->length_dw
);
447 r
= radeon_ib_get(rdev
, parser
->ring
, &parser
->ib
,
448 vm
, ib_chunk
->length_dw
* 4);
450 DRM_ERROR("Failed to get ib !\n");
453 parser
->ib
.length_dw
= ib_chunk
->length_dw
;
454 /* Copy the packet into the IB */
455 if (DRM_COPY_FROM_USER(parser
->ib
.ptr
, ib_chunk
->user_ptr
,
456 ib_chunk
->length_dw
* 4)) {
459 r
= radeon_ring_ib_parse(rdev
, parser
->ring
, &parser
->ib
);
464 mutex_lock(&rdev
->vm_manager
.lock
);
465 mutex_lock(&vm
->mutex
);
466 r
= radeon_vm_alloc_pt(rdev
, vm
);
470 r
= radeon_bo_vm_update_pte(parser
, vm
);
474 radeon_cs_sync_rings(parser
);
475 radeon_cs_sync_to(parser
, vm
->fence
);
476 radeon_cs_sync_to(parser
, radeon_vm_grab_id(rdev
, vm
, parser
->ring
));
478 if ((rdev
->family
>= CHIP_TAHITI
) &&
479 (parser
->chunk_const_ib_idx
!= -1)) {
480 r
= radeon_ib_schedule(rdev
, &parser
->ib
, &parser
->const_ib
);
482 r
= radeon_ib_schedule(rdev
, &parser
->ib
, NULL
);
486 radeon_vm_fence(rdev
, vm
, parser
->ib
.fence
);
490 radeon_vm_add_to_lru(rdev
, vm
);
491 mutex_unlock(&vm
->mutex
);
492 mutex_unlock(&rdev
->vm_manager
.lock
);
496 static int radeon_cs_handle_lockup(struct radeon_device
*rdev
, int r
)
499 r
= radeon_gpu_reset(rdev
);
506 int radeon_cs_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
)
508 struct radeon_device
*rdev
= dev
->dev_private
;
509 struct radeon_cs_parser parser
;
512 down_read(&rdev
->exclusive_lock
);
513 if (!rdev
->accel_working
) {
514 up_read(&rdev
->exclusive_lock
);
517 /* initialize parser */
518 memset(&parser
, 0, sizeof(struct radeon_cs_parser
));
521 parser
.dev
= rdev
->dev
;
522 parser
.family
= rdev
->family
;
523 r
= radeon_cs_parser_init(&parser
, data
);
525 DRM_ERROR("Failed to initialize parser !\n");
526 radeon_cs_parser_fini(&parser
, r
);
527 up_read(&rdev
->exclusive_lock
);
528 r
= radeon_cs_handle_lockup(rdev
, r
);
531 r
= radeon_cs_parser_relocs(&parser
);
533 if (r
!= -ERESTARTSYS
)
534 DRM_ERROR("Failed to parse relocation %d!\n", r
);
535 radeon_cs_parser_fini(&parser
, r
);
536 up_read(&rdev
->exclusive_lock
);
537 r
= radeon_cs_handle_lockup(rdev
, r
);
540 r
= radeon_cs_ib_chunk(rdev
, &parser
);
544 r
= radeon_cs_ib_vm_chunk(rdev
, &parser
);
549 radeon_cs_parser_fini(&parser
, r
);
550 up_read(&rdev
->exclusive_lock
);
551 r
= radeon_cs_handle_lockup(rdev
, r
);
555 int radeon_cs_finish_pages(struct radeon_cs_parser
*p
)
557 struct radeon_cs_chunk
*ibc
= &p
->chunks
[p
->chunk_ib_idx
];
559 int size
= PAGE_SIZE
;
561 for (i
= ibc
->last_copied_page
+ 1; i
<= ibc
->last_page_index
; i
++) {
562 if (i
== ibc
->last_page_index
) {
563 size
= (ibc
->length_dw
* 4) % PAGE_SIZE
;
568 if (DRM_COPY_FROM_USER(p
->ib
.ptr
+ (i
* (PAGE_SIZE
/4)),
569 ibc
->user_ptr
+ (i
* PAGE_SIZE
),
576 static int radeon_cs_update_pages(struct radeon_cs_parser
*p
, int pg_idx
)
579 struct radeon_cs_chunk
*ibc
= &p
->chunks
[p
->chunk_ib_idx
];
581 int size
= PAGE_SIZE
;
582 bool copy1
= (p
->rdev
&& (p
->rdev
->flags
& RADEON_IS_AGP
)) ?
585 for (i
= ibc
->last_copied_page
+ 1; i
< pg_idx
; i
++) {
586 if (DRM_COPY_FROM_USER(p
->ib
.ptr
+ (i
* (PAGE_SIZE
/4)),
587 ibc
->user_ptr
+ (i
* PAGE_SIZE
),
589 p
->parser_error
= -EFAULT
;
594 if (pg_idx
== ibc
->last_page_index
) {
595 size
= (ibc
->length_dw
* 4) % PAGE_SIZE
;
600 new_page
= ibc
->kpage_idx
[0] < ibc
->kpage_idx
[1] ? 0 : 1;
602 ibc
->kpage
[new_page
] = p
->ib
.ptr
+ (pg_idx
* (PAGE_SIZE
/ 4));
604 if (DRM_COPY_FROM_USER(ibc
->kpage
[new_page
],
605 ibc
->user_ptr
+ (pg_idx
* PAGE_SIZE
),
607 p
->parser_error
= -EFAULT
;
611 /* copy to IB for non single case */
613 memcpy((void *)(p
->ib
.ptr
+(pg_idx
*(PAGE_SIZE
/4))), ibc
->kpage
[new_page
], size
);
615 ibc
->last_copied_page
= pg_idx
;
616 ibc
->kpage_idx
[new_page
] = pg_idx
;
621 u32
radeon_get_ib_value(struct radeon_cs_parser
*p
, int idx
)
623 struct radeon_cs_chunk
*ibc
= &p
->chunks
[p
->chunk_ib_idx
];
624 u32 pg_idx
, pg_offset
;
628 pg_idx
= (idx
* 4) / PAGE_SIZE
;
629 pg_offset
= (idx
* 4) % PAGE_SIZE
;
631 if (ibc
->kpage_idx
[0] == pg_idx
)
632 return ibc
->kpage
[0][pg_offset
/4];
633 if (ibc
->kpage_idx
[1] == pg_idx
)
634 return ibc
->kpage
[1][pg_offset
/4];
636 new_page
= radeon_cs_update_pages(p
, pg_idx
);
638 p
->parser_error
= new_page
;
642 idx_value
= ibc
->kpage
[new_page
][pg_offset
/4];
647 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
648 * @parser: parser structure holding parsing context.
649 * @pkt: where to store packet information
651 * Assume that chunk_ib_index is properly set. Will return -EINVAL
652 * if packet is bigger than remaining ib size. or if packets is unknown.
654 int radeon_cs_packet_parse(struct radeon_cs_parser
*p
,
655 struct radeon_cs_packet
*pkt
,
658 struct radeon_cs_chunk
*ib_chunk
= &p
->chunks
[p
->chunk_ib_idx
];
659 struct radeon_device
*rdev
= p
->rdev
;
662 if (idx
>= ib_chunk
->length_dw
) {
663 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
664 idx
, ib_chunk
->length_dw
);
667 header
= radeon_get_ib_value(p
, idx
);
669 pkt
->type
= RADEON_CP_PACKET_GET_TYPE(header
);
670 pkt
->count
= RADEON_CP_PACKET_GET_COUNT(header
);
673 case RADEON_PACKET_TYPE0
:
674 if (rdev
->family
< CHIP_R600
) {
675 pkt
->reg
= R100_CP_PACKET0_GET_REG(header
);
677 RADEON_CP_PACKET0_GET_ONE_REG_WR(header
);
679 pkt
->reg
= R600_CP_PACKET0_GET_REG(header
);
681 case RADEON_PACKET_TYPE3
:
682 pkt
->opcode
= RADEON_CP_PACKET3_GET_OPCODE(header
);
684 case RADEON_PACKET_TYPE2
:
688 DRM_ERROR("Unknown packet type %d at %d !\n", pkt
->type
, idx
);
691 if ((pkt
->count
+ 1 + pkt
->idx
) >= ib_chunk
->length_dw
) {
692 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
693 pkt
->idx
, pkt
->type
, pkt
->count
, ib_chunk
->length_dw
);
700 * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
701 * @p: structure holding the parser context.
703 * Check if the next packet is NOP relocation packet3.
705 bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser
*p
)
707 struct radeon_cs_packet p3reloc
;
710 r
= radeon_cs_packet_parse(p
, &p3reloc
, p
->idx
);
713 if (p3reloc
.type
!= RADEON_PACKET_TYPE3
)
715 if (p3reloc
.opcode
!= RADEON_PACKET3_NOP
)