1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include <drm/ttm/ttm_bo_api.h>
31 #include <drm/ttm/ttm_placement.h>
33 #define VMW_RES_HT_ORDER 12
36 * struct vmw_resource_relocation - Relocation info for resources
38 * @head: List head for the software context's relocation list.
39 * @res: Non-ref-counted pointer to the resource.
40 * @offset: Offset of 4 byte entries into the command buffer where the
41 * id that needs fixup is located.
43 struct vmw_resource_relocation
{
44 struct list_head head
;
45 const struct vmw_resource
*res
;
50 * struct vmw_resource_val_node - Validation info for resources
52 * @head: List head for the software context's resource list.
53 * @hash: Hash entry for quick resouce to val_node lookup.
54 * @res: Ref-counted pointer to the resource.
55 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56 * @new_backup: Refcounted pointer to the new backup buffer.
57 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
58 * @first_usage: Set to true the first time the resource is referenced in
60 * @no_buffer_needed: Resources do not need to allocate buffer backup on
61 * reservation. The command stream will provide one.
63 struct vmw_resource_val_node
{
64 struct list_head head
;
65 struct drm_hash_item hash
;
66 struct vmw_resource
*res
;
67 struct vmw_dma_buffer
*new_backup
;
68 unsigned long new_backup_offset
;
70 bool no_buffer_needed
;
74 * vmw_resource_unreserve - unreserve resources previously reserved for
77 * @list_head: list of resources to unreserve.
78 * @backoff: Whether command submission failed.
80 static void vmw_resource_list_unreserve(struct list_head
*list
,
83 struct vmw_resource_val_node
*val
;
85 list_for_each_entry(val
, list
, head
) {
86 struct vmw_resource
*res
= val
->res
;
87 struct vmw_dma_buffer
*new_backup
=
88 backoff
? NULL
: val
->new_backup
;
90 vmw_resource_unreserve(res
, new_backup
,
91 val
->new_backup_offset
);
92 vmw_dmabuf_unreference(&val
->new_backup
);
98 * vmw_resource_val_add - Add a resource to the software context's
99 * resource list if it's not already on it.
101 * @sw_context: Pointer to the software context.
102 * @res: Pointer to the resource.
103 * @p_node On successful return points to a valid pointer to a
104 * struct vmw_resource_val_node, if non-NULL on entry.
106 static int vmw_resource_val_add(struct vmw_sw_context
*sw_context
,
107 struct vmw_resource
*res
,
108 struct vmw_resource_val_node
**p_node
)
110 struct vmw_resource_val_node
*node
;
111 struct drm_hash_item
*hash
;
114 if (likely(drm_ht_find_item(&sw_context
->res_ht
, (unsigned long) res
,
116 node
= container_of(hash
, struct vmw_resource_val_node
, hash
);
117 node
->first_usage
= false;
118 if (unlikely(p_node
!= NULL
))
123 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
124 if (unlikely(node
== NULL
)) {
125 DRM_ERROR("Failed to allocate a resource validation "
130 node
->hash
.key
= (unsigned long) res
;
131 ret
= drm_ht_insert_item(&sw_context
->res_ht
, &node
->hash
);
132 if (unlikely(ret
!= 0)) {
133 DRM_ERROR("Failed to initialize a resource validation "
138 list_add_tail(&node
->head
, &sw_context
->resource_list
);
139 node
->res
= vmw_resource_reference(res
);
140 node
->first_usage
= true;
142 if (unlikely(p_node
!= NULL
))
149 * vmw_resource_relocation_add - Add a relocation to the relocation list
151 * @list: Pointer to head of relocation list.
152 * @res: The resource.
153 * @offset: Offset into the command buffer currently being parsed where the
154 * id that needs fixup is located. Granularity is 4 bytes.
156 static int vmw_resource_relocation_add(struct list_head
*list
,
157 const struct vmw_resource
*res
,
158 unsigned long offset
)
160 struct vmw_resource_relocation
*rel
;
162 rel
= kmalloc(sizeof(*rel
), GFP_KERNEL
);
163 if (unlikely(rel
== NULL
)) {
164 DRM_ERROR("Failed to allocate a resource relocation.\n");
169 rel
->offset
= offset
;
170 list_add_tail(&rel
->head
, list
);
176 * vmw_resource_relocations_free - Free all relocations on a list
178 * @list: Pointer to the head of the relocation list.
180 static void vmw_resource_relocations_free(struct list_head
*list
)
182 struct vmw_resource_relocation
*rel
, *n
;
184 list_for_each_entry_safe(rel
, n
, list
, head
) {
185 list_del(&rel
->head
);
191 * vmw_resource_relocations_apply - Apply all relocations on a list
193 * @cb: Pointer to the start of the command buffer bein patch. This need
194 * not be the same buffer as the one being parsed when the relocation
195 * list was built, but the contents must be the same modulo the
197 * @list: Pointer to the head of the relocation list.
199 static void vmw_resource_relocations_apply(uint32_t *cb
,
200 struct list_head
*list
)
202 struct vmw_resource_relocation
*rel
;
204 list_for_each_entry(rel
, list
, head
)
205 cb
[rel
->offset
] = rel
->res
->id
;
208 static int vmw_cmd_invalid(struct vmw_private
*dev_priv
,
209 struct vmw_sw_context
*sw_context
,
210 SVGA3dCmdHeader
*header
)
212 return capable(CAP_SYS_ADMIN
) ? : -EINVAL
;
215 static int vmw_cmd_ok(struct vmw_private
*dev_priv
,
216 struct vmw_sw_context
*sw_context
,
217 SVGA3dCmdHeader
*header
)
223 * vmw_bo_to_validate_list - add a bo to a validate list
225 * @sw_context: The software context used for this command submission batch.
226 * @bo: The buffer object to add.
227 * @p_val_node: If non-NULL Will be updated with the validate node number
230 * Returns -EINVAL if the limit of number of buffer objects per command
231 * submission is reached.
233 static int vmw_bo_to_validate_list(struct vmw_sw_context
*sw_context
,
234 struct ttm_buffer_object
*bo
,
235 uint32_t *p_val_node
)
238 struct vmw_validate_buffer
*vval_buf
;
239 struct ttm_validate_buffer
*val_buf
;
240 struct drm_hash_item
*hash
;
243 if (likely(drm_ht_find_item(&sw_context
->res_ht
, (unsigned long) bo
,
245 vval_buf
= container_of(hash
, struct vmw_validate_buffer
,
247 val_buf
= &vval_buf
->base
;
248 val_node
= vval_buf
- sw_context
->val_bufs
;
250 val_node
= sw_context
->cur_val_buf
;
251 if (unlikely(val_node
>= VMWGFX_MAX_VALIDATIONS
)) {
252 DRM_ERROR("Max number of DMA buffers per submission "
256 vval_buf
= &sw_context
->val_bufs
[val_node
];
257 vval_buf
->hash
.key
= (unsigned long) bo
;
258 ret
= drm_ht_insert_item(&sw_context
->res_ht
, &vval_buf
->hash
);
259 if (unlikely(ret
!= 0)) {
260 DRM_ERROR("Failed to initialize a buffer validation "
264 ++sw_context
->cur_val_buf
;
265 val_buf
= &vval_buf
->base
;
266 val_buf
->bo
= ttm_bo_reference(bo
);
267 val_buf
->reserved
= false;
268 list_add_tail(&val_buf
->head
, &sw_context
->validate_nodes
);
271 sw_context
->fence_flags
|= DRM_VMW_FENCE_FLAG_EXEC
;
274 *p_val_node
= val_node
;
280 * vmw_resources_reserve - Reserve all resources on the sw_context's
283 * @sw_context: Pointer to the software context.
285 * Note that since vmware's command submission currently is protected by
286 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
287 * since only a single thread at once will attempt this.
289 static int vmw_resources_reserve(struct vmw_sw_context
*sw_context
)
291 struct vmw_resource_val_node
*val
;
294 list_for_each_entry(val
, &sw_context
->resource_list
, head
) {
295 struct vmw_resource
*res
= val
->res
;
297 ret
= vmw_resource_reserve(res
, val
->no_buffer_needed
);
298 if (unlikely(ret
!= 0))
302 struct ttm_buffer_object
*bo
= &res
->backup
->base
;
304 ret
= vmw_bo_to_validate_list
305 (sw_context
, bo
, NULL
);
307 if (unlikely(ret
!= 0))
315 * vmw_resources_validate - Validate all resources on the sw_context's
318 * @sw_context: Pointer to the software context.
320 * Before this function is called, all resource backup buffers must have
323 static int vmw_resources_validate(struct vmw_sw_context
*sw_context
)
325 struct vmw_resource_val_node
*val
;
328 list_for_each_entry(val
, &sw_context
->resource_list
, head
) {
329 struct vmw_resource
*res
= val
->res
;
331 ret
= vmw_resource_validate(res
);
332 if (unlikely(ret
!= 0)) {
333 if (ret
!= -ERESTARTSYS
)
334 DRM_ERROR("Failed to validate resource.\n");
342 * vmw_cmd_res_check - Check that a resource is present and if so, put it
343 * on the resource validate list unless it's already there.
345 * @dev_priv: Pointer to a device private structure.
346 * @sw_context: Pointer to the software context.
347 * @res_type: Resource type.
348 * @converter: User-space visisble type specific information.
349 * @id: Pointer to the location in the command buffer currently being
350 * parsed from where the user-space resource id handle is located.
352 static int vmw_cmd_res_check(struct vmw_private
*dev_priv
,
353 struct vmw_sw_context
*sw_context
,
354 enum vmw_res_type res_type
,
355 const struct vmw_user_resource_conv
*converter
,
357 struct vmw_resource_val_node
**p_val
)
359 struct vmw_res_cache_entry
*rcache
=
360 &sw_context
->res_cache
[res_type
];
361 struct vmw_resource
*res
;
362 struct vmw_resource_val_node
*node
;
365 if (*id
== SVGA3D_INVALID_ID
)
369 * Fastpath in case of repeated commands referencing the same
373 if (likely(rcache
->valid
&& *id
== rcache
->handle
)) {
374 const struct vmw_resource
*res
= rcache
->res
;
376 rcache
->node
->first_usage
= false;
378 *p_val
= rcache
->node
;
380 return vmw_resource_relocation_add
381 (&sw_context
->res_relocations
, res
,
382 id
- sw_context
->buf_start
);
385 ret
= vmw_user_resource_lookup_handle(dev_priv
,
390 if (unlikely(ret
!= 0)) {
391 DRM_ERROR("Could not find or use resource 0x%08x.\n",
397 rcache
->valid
= true;
399 rcache
->handle
= *id
;
401 ret
= vmw_resource_relocation_add(&sw_context
->res_relocations
,
403 id
- sw_context
->buf_start
);
404 if (unlikely(ret
!= 0))
407 ret
= vmw_resource_val_add(sw_context
, res
, &node
);
408 if (unlikely(ret
!= 0))
414 vmw_resource_unreference(&res
);
418 BUG_ON(sw_context
->error_resource
!= NULL
);
419 sw_context
->error_resource
= res
;
425 * vmw_cmd_cid_check - Check a command header for valid context information.
427 * @dev_priv: Pointer to a device private structure.
428 * @sw_context: Pointer to the software context.
429 * @header: A command header with an embedded user-space context handle.
431 * Convenience function: Call vmw_cmd_res_check with the user-space context
432 * handle embedded in @header.
434 static int vmw_cmd_cid_check(struct vmw_private
*dev_priv
,
435 struct vmw_sw_context
*sw_context
,
436 SVGA3dCmdHeader
*header
)
439 SVGA3dCmdHeader header
;
443 cmd
= container_of(header
, struct vmw_cid_cmd
, header
);
444 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
445 user_context_converter
, &cmd
->cid
, NULL
);
448 static int vmw_cmd_set_render_target_check(struct vmw_private
*dev_priv
,
449 struct vmw_sw_context
*sw_context
,
450 SVGA3dCmdHeader
*header
)
453 SVGA3dCmdHeader header
;
454 SVGA3dCmdSetRenderTarget body
;
458 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
459 if (unlikely(ret
!= 0))
462 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
463 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
464 user_surface_converter
,
465 &cmd
->body
.target
.sid
, NULL
);
469 static int vmw_cmd_surface_copy_check(struct vmw_private
*dev_priv
,
470 struct vmw_sw_context
*sw_context
,
471 SVGA3dCmdHeader
*header
)
474 SVGA3dCmdHeader header
;
475 SVGA3dCmdSurfaceCopy body
;
479 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
480 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
481 user_surface_converter
,
482 &cmd
->body
.src
.sid
, NULL
);
483 if (unlikely(ret
!= 0))
485 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
486 user_surface_converter
,
487 &cmd
->body
.dest
.sid
, NULL
);
490 static int vmw_cmd_stretch_blt_check(struct vmw_private
*dev_priv
,
491 struct vmw_sw_context
*sw_context
,
492 SVGA3dCmdHeader
*header
)
495 SVGA3dCmdHeader header
;
496 SVGA3dCmdSurfaceStretchBlt body
;
500 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
501 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
502 user_surface_converter
,
503 &cmd
->body
.src
.sid
, NULL
);
504 if (unlikely(ret
!= 0))
506 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
507 user_surface_converter
,
508 &cmd
->body
.dest
.sid
, NULL
);
511 static int vmw_cmd_blt_surf_screen_check(struct vmw_private
*dev_priv
,
512 struct vmw_sw_context
*sw_context
,
513 SVGA3dCmdHeader
*header
)
516 SVGA3dCmdHeader header
;
517 SVGA3dCmdBlitSurfaceToScreen body
;
520 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
522 if (unlikely(!sw_context
->kernel
)) {
523 DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd
->header
.id
);
527 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
528 user_surface_converter
,
529 &cmd
->body
.srcImage
.sid
, NULL
);
532 static int vmw_cmd_present_check(struct vmw_private
*dev_priv
,
533 struct vmw_sw_context
*sw_context
,
534 SVGA3dCmdHeader
*header
)
537 SVGA3dCmdHeader header
;
538 SVGA3dCmdPresent body
;
542 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
544 if (unlikely(!sw_context
->kernel
)) {
545 DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd
->header
.id
);
549 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
550 user_surface_converter
, &cmd
->body
.sid
,
555 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
557 * @dev_priv: The device private structure.
558 * @new_query_bo: The new buffer holding query results.
559 * @sw_context: The software context used for this command submission.
561 * This function checks whether @new_query_bo is suitable for holding
562 * query results, and if another buffer currently is pinned for query
563 * results. If so, the function prepares the state of @sw_context for
564 * switching pinned buffers after successful submission of the current
567 static int vmw_query_bo_switch_prepare(struct vmw_private
*dev_priv
,
568 struct ttm_buffer_object
*new_query_bo
,
569 struct vmw_sw_context
*sw_context
)
571 struct vmw_res_cache_entry
*ctx_entry
=
572 &sw_context
->res_cache
[vmw_res_context
];
575 BUG_ON(!ctx_entry
->valid
);
576 sw_context
->last_query_ctx
= ctx_entry
->res
;
578 if (unlikely(new_query_bo
!= sw_context
->cur_query_bo
)) {
580 if (unlikely(new_query_bo
->num_pages
> 4)) {
581 DRM_ERROR("Query buffer too large.\n");
585 if (unlikely(sw_context
->cur_query_bo
!= NULL
)) {
586 sw_context
->needs_post_query_barrier
= true;
587 ret
= vmw_bo_to_validate_list(sw_context
,
588 sw_context
->cur_query_bo
,
590 if (unlikely(ret
!= 0))
593 sw_context
->cur_query_bo
= new_query_bo
;
595 ret
= vmw_bo_to_validate_list(sw_context
,
596 dev_priv
->dummy_query_bo
,
598 if (unlikely(ret
!= 0))
608 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
610 * @dev_priv: The device private structure.
611 * @sw_context: The software context used for this command submission batch.
613 * This function will check if we're switching query buffers, and will then,
614 * issue a dummy occlusion query wait used as a query barrier. When the fence
615 * object following that query wait has signaled, we are sure that all
616 * preceding queries have finished, and the old query buffer can be unpinned.
617 * However, since both the new query buffer and the old one are fenced with
618 * that fence, we can do an asynchronus unpin now, and be sure that the
619 * old query buffer won't be moved until the fence has signaled.
621 * As mentioned above, both the new - and old query buffers need to be fenced
622 * using a sequence emitted *after* calling this function.
624 static void vmw_query_bo_switch_commit(struct vmw_private
*dev_priv
,
625 struct vmw_sw_context
*sw_context
)
628 * The validate list should still hold references to all
632 if (sw_context
->needs_post_query_barrier
) {
633 struct vmw_res_cache_entry
*ctx_entry
=
634 &sw_context
->res_cache
[vmw_res_context
];
635 struct vmw_resource
*ctx
;
638 BUG_ON(!ctx_entry
->valid
);
639 ctx
= ctx_entry
->res
;
641 ret
= vmw_fifo_emit_dummy_query(dev_priv
, ctx
->id
);
643 if (unlikely(ret
!= 0))
644 DRM_ERROR("Out of fifo space for dummy query.\n");
647 if (dev_priv
->pinned_bo
!= sw_context
->cur_query_bo
) {
648 if (dev_priv
->pinned_bo
) {
649 vmw_bo_pin(dev_priv
->pinned_bo
, false);
650 ttm_bo_unref(&dev_priv
->pinned_bo
);
653 if (!sw_context
->needs_post_query_barrier
) {
654 vmw_bo_pin(sw_context
->cur_query_bo
, true);
657 * We pin also the dummy_query_bo buffer so that we
658 * don't need to validate it when emitting
659 * dummy queries in context destroy paths.
662 vmw_bo_pin(dev_priv
->dummy_query_bo
, true);
663 dev_priv
->dummy_query_bo_pinned
= true;
665 BUG_ON(sw_context
->last_query_ctx
== NULL
);
666 dev_priv
->query_cid
= sw_context
->last_query_ctx
->id
;
667 dev_priv
->query_cid_valid
= true;
668 dev_priv
->pinned_bo
=
669 ttm_bo_reference(sw_context
->cur_query_bo
);
675 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
676 * handle to a valid SVGAGuestPtr
678 * @dev_priv: Pointer to a device private structure.
679 * @sw_context: The software context used for this command batch validation.
680 * @ptr: Pointer to the user-space handle to be translated.
681 * @vmw_bo_p: Points to a location that, on successful return will carry
682 * a reference-counted pointer to the DMA buffer identified by the
683 * user-space handle in @id.
685 * This function saves information needed to translate a user-space buffer
686 * handle to a valid SVGAGuestPtr. The translation does not take place
687 * immediately, but during a call to vmw_apply_relocations().
688 * This function builds a relocation list and a list of buffers to validate.
689 * The former needs to be freed using either vmw_apply_relocations() or
690 * vmw_free_relocations(). The latter needs to be freed using
691 * vmw_clear_validations.
693 static int vmw_translate_guest_ptr(struct vmw_private
*dev_priv
,
694 struct vmw_sw_context
*sw_context
,
696 struct vmw_dma_buffer
**vmw_bo_p
)
698 struct vmw_dma_buffer
*vmw_bo
= NULL
;
699 struct ttm_buffer_object
*bo
;
700 uint32_t handle
= ptr
->gmrId
;
701 struct vmw_relocation
*reloc
;
704 ret
= vmw_user_dmabuf_lookup(sw_context
->tfile
, handle
, &vmw_bo
);
705 if (unlikely(ret
!= 0)) {
706 DRM_ERROR("Could not find or use GMR region.\n");
711 if (unlikely(sw_context
->cur_reloc
>= VMWGFX_MAX_RELOCATIONS
)) {
712 DRM_ERROR("Max number relocations per submission"
718 reloc
= &sw_context
->relocs
[sw_context
->cur_reloc
++];
719 reloc
->location
= ptr
;
721 ret
= vmw_bo_to_validate_list(sw_context
, bo
, &reloc
->index
);
722 if (unlikely(ret
!= 0))
729 vmw_dmabuf_unreference(&vmw_bo
);
735 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
737 * @dev_priv: Pointer to a device private struct.
738 * @sw_context: The software context used for this command submission.
739 * @header: Pointer to the command header in the command stream.
741 static int vmw_cmd_begin_query(struct vmw_private
*dev_priv
,
742 struct vmw_sw_context
*sw_context
,
743 SVGA3dCmdHeader
*header
)
745 struct vmw_begin_query_cmd
{
746 SVGA3dCmdHeader header
;
747 SVGA3dCmdBeginQuery q
;
750 cmd
= container_of(header
, struct vmw_begin_query_cmd
,
753 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
754 user_context_converter
, &cmd
->q
.cid
,
759 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
761 * @dev_priv: Pointer to a device private struct.
762 * @sw_context: The software context used for this command submission.
763 * @header: Pointer to the command header in the command stream.
765 static int vmw_cmd_end_query(struct vmw_private
*dev_priv
,
766 struct vmw_sw_context
*sw_context
,
767 SVGA3dCmdHeader
*header
)
769 struct vmw_dma_buffer
*vmw_bo
;
770 struct vmw_query_cmd
{
771 SVGA3dCmdHeader header
;
776 cmd
= container_of(header
, struct vmw_query_cmd
, header
);
777 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
778 if (unlikely(ret
!= 0))
781 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
784 if (unlikely(ret
!= 0))
787 ret
= vmw_query_bo_switch_prepare(dev_priv
, &vmw_bo
->base
, sw_context
);
789 vmw_dmabuf_unreference(&vmw_bo
);
794 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
796 * @dev_priv: Pointer to a device private struct.
797 * @sw_context: The software context used for this command submission.
798 * @header: Pointer to the command header in the command stream.
800 static int vmw_cmd_wait_query(struct vmw_private
*dev_priv
,
801 struct vmw_sw_context
*sw_context
,
802 SVGA3dCmdHeader
*header
)
804 struct vmw_dma_buffer
*vmw_bo
;
805 struct vmw_query_cmd
{
806 SVGA3dCmdHeader header
;
807 SVGA3dCmdWaitForQuery q
;
811 cmd
= container_of(header
, struct vmw_query_cmd
, header
);
812 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
813 if (unlikely(ret
!= 0))
816 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
819 if (unlikely(ret
!= 0))
822 vmw_dmabuf_unreference(&vmw_bo
);
826 static int vmw_cmd_dma(struct vmw_private
*dev_priv
,
827 struct vmw_sw_context
*sw_context
,
828 SVGA3dCmdHeader
*header
)
830 struct vmw_dma_buffer
*vmw_bo
= NULL
;
831 struct vmw_surface
*srf
= NULL
;
833 SVGA3dCmdHeader header
;
834 SVGA3dCmdSurfaceDMA dma
;
838 cmd
= container_of(header
, struct vmw_dma_cmd
, header
);
839 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
842 if (unlikely(ret
!= 0))
845 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
846 user_surface_converter
, &cmd
->dma
.host
.sid
,
848 if (unlikely(ret
!= 0)) {
849 if (unlikely(ret
!= -ERESTARTSYS
))
850 DRM_ERROR("could not find surface for DMA.\n");
854 srf
= vmw_res_to_srf(sw_context
->res_cache
[vmw_res_surface
].res
);
856 vmw_kms_cursor_snoop(srf
, sw_context
->tfile
, &vmw_bo
->base
, header
);
859 vmw_dmabuf_unreference(&vmw_bo
);
863 static int vmw_cmd_draw(struct vmw_private
*dev_priv
,
864 struct vmw_sw_context
*sw_context
,
865 SVGA3dCmdHeader
*header
)
867 struct vmw_draw_cmd
{
868 SVGA3dCmdHeader header
;
869 SVGA3dCmdDrawPrimitives body
;
871 SVGA3dVertexDecl
*decl
= (SVGA3dVertexDecl
*)(
872 (unsigned long)header
+ sizeof(*cmd
));
873 SVGA3dPrimitiveRange
*range
;
878 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
879 if (unlikely(ret
!= 0))
882 cmd
= container_of(header
, struct vmw_draw_cmd
, header
);
883 maxnum
= (header
->size
- sizeof(cmd
->body
)) / sizeof(*decl
);
885 if (unlikely(cmd
->body
.numVertexDecls
> maxnum
)) {
886 DRM_ERROR("Illegal number of vertex declarations.\n");
890 for (i
= 0; i
< cmd
->body
.numVertexDecls
; ++i
, ++decl
) {
891 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
892 user_surface_converter
,
893 &decl
->array
.surfaceId
, NULL
);
894 if (unlikely(ret
!= 0))
898 maxnum
= (header
->size
- sizeof(cmd
->body
) -
899 cmd
->body
.numVertexDecls
* sizeof(*decl
)) / sizeof(*range
);
900 if (unlikely(cmd
->body
.numRanges
> maxnum
)) {
901 DRM_ERROR("Illegal number of index ranges.\n");
905 range
= (SVGA3dPrimitiveRange
*) decl
;
906 for (i
= 0; i
< cmd
->body
.numRanges
; ++i
, ++range
) {
907 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
908 user_surface_converter
,
909 &range
->indexArray
.surfaceId
, NULL
);
910 if (unlikely(ret
!= 0))
917 static int vmw_cmd_tex_state(struct vmw_private
*dev_priv
,
918 struct vmw_sw_context
*sw_context
,
919 SVGA3dCmdHeader
*header
)
921 struct vmw_tex_state_cmd
{
922 SVGA3dCmdHeader header
;
923 SVGA3dCmdSetTextureState state
;
926 SVGA3dTextureState
*last_state
= (SVGA3dTextureState
*)
927 ((unsigned long) header
+ header
->size
+ sizeof(header
));
928 SVGA3dTextureState
*cur_state
= (SVGA3dTextureState
*)
929 ((unsigned long) header
+ sizeof(struct vmw_tex_state_cmd
));
932 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
933 if (unlikely(ret
!= 0))
936 for (; cur_state
< last_state
; ++cur_state
) {
937 if (likely(cur_state
->name
!= SVGA3D_TS_BIND_TEXTURE
))
940 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
941 user_surface_converter
,
942 &cur_state
->value
, NULL
);
943 if (unlikely(ret
!= 0))
950 static int vmw_cmd_check_define_gmrfb(struct vmw_private
*dev_priv
,
951 struct vmw_sw_context
*sw_context
,
954 struct vmw_dma_buffer
*vmw_bo
;
959 SVGAFifoCmdDefineGMRFB body
;
962 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
965 if (unlikely(ret
!= 0))
968 vmw_dmabuf_unreference(&vmw_bo
);
974 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
977 * @dev_priv: Pointer to a device private struct.
978 * @sw_context: The software context being used for this batch.
979 * @header: Pointer to the command header in the command stream.
981 static int vmw_cmd_set_shader(struct vmw_private
*dev_priv
,
982 struct vmw_sw_context
*sw_context
,
983 SVGA3dCmdHeader
*header
)
985 struct vmw_set_shader_cmd
{
986 SVGA3dCmdHeader header
;
987 SVGA3dCmdSetShader body
;
991 cmd
= container_of(header
, struct vmw_set_shader_cmd
,
994 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
995 if (unlikely(ret
!= 0))
1001 static int vmw_cmd_check_not_3d(struct vmw_private
*dev_priv
,
1002 struct vmw_sw_context
*sw_context
,
1003 void *buf
, uint32_t *size
)
1005 uint32_t size_remaining
= *size
;
1008 cmd_id
= le32_to_cpu(((uint32_t *)buf
)[0]);
1010 case SVGA_CMD_UPDATE
:
1011 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate
);
1013 case SVGA_CMD_DEFINE_GMRFB
:
1014 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB
);
1016 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN
:
1017 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen
);
1019 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB
:
1020 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen
);
1023 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id
);
1027 if (*size
> size_remaining
) {
1028 DRM_ERROR("Invalid SVGA command (size mismatch):"
1033 if (unlikely(!sw_context
->kernel
)) {
1034 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id
);
1038 if (cmd_id
== SVGA_CMD_DEFINE_GMRFB
)
1039 return vmw_cmd_check_define_gmrfb(dev_priv
, sw_context
, buf
);
1044 typedef int (*vmw_cmd_func
) (struct vmw_private
*,
1045 struct vmw_sw_context
*,
1048 #define VMW_CMD_DEF(cmd, func) \
1049 [cmd - SVGA_3D_CMD_BASE] = func
1051 static vmw_cmd_func vmw_cmd_funcs
[SVGA_3D_CMD_MAX
] = {
1052 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE
, &vmw_cmd_invalid
),
1053 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY
, &vmw_cmd_invalid
),
1054 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY
, &vmw_cmd_surface_copy_check
),
1055 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT
, &vmw_cmd_stretch_blt_check
),
1056 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA
, &vmw_cmd_dma
),
1057 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE
, &vmw_cmd_invalid
),
1058 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY
, &vmw_cmd_invalid
),
1059 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM
, &vmw_cmd_cid_check
),
1060 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE
, &vmw_cmd_cid_check
),
1061 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE
, &vmw_cmd_cid_check
),
1062 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET
,
1063 &vmw_cmd_set_render_target_check
),
1064 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE
, &vmw_cmd_tex_state
),
1065 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL
, &vmw_cmd_cid_check
),
1066 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA
, &vmw_cmd_cid_check
),
1067 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED
, &vmw_cmd_cid_check
),
1068 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT
, &vmw_cmd_cid_check
),
1069 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE
, &vmw_cmd_cid_check
),
1070 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR
, &vmw_cmd_cid_check
),
1071 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT
, &vmw_cmd_present_check
),
1072 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE
, &vmw_cmd_cid_check
),
1073 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY
, &vmw_cmd_cid_check
),
1074 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER
, &vmw_cmd_set_shader
),
1075 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST
, &vmw_cmd_cid_check
),
1076 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES
, &vmw_cmd_draw
),
1077 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT
, &vmw_cmd_cid_check
),
1078 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY
, &vmw_cmd_begin_query
),
1079 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY
, &vmw_cmd_end_query
),
1080 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY
, &vmw_cmd_wait_query
),
1081 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK
, &vmw_cmd_ok
),
1082 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN
,
1083 &vmw_cmd_blt_surf_screen_check
),
1084 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2
, &vmw_cmd_invalid
),
1085 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS
, &vmw_cmd_invalid
),
1086 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE
, &vmw_cmd_invalid
),
1087 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE
, &vmw_cmd_invalid
),
1090 static int vmw_cmd_check(struct vmw_private
*dev_priv
,
1091 struct vmw_sw_context
*sw_context
,
1092 void *buf
, uint32_t *size
)
1095 uint32_t size_remaining
= *size
;
1096 SVGA3dCmdHeader
*header
= (SVGA3dCmdHeader
*) buf
;
1099 cmd_id
= le32_to_cpu(((uint32_t *)buf
)[0]);
1100 /* Handle any none 3D commands */
1101 if (unlikely(cmd_id
< SVGA_CMD_MAX
))
1102 return vmw_cmd_check_not_3d(dev_priv
, sw_context
, buf
, size
);
1105 cmd_id
= le32_to_cpu(header
->id
);
1106 *size
= le32_to_cpu(header
->size
) + sizeof(SVGA3dCmdHeader
);
1108 cmd_id
-= SVGA_3D_CMD_BASE
;
1109 if (unlikely(*size
> size_remaining
))
1112 if (unlikely(cmd_id
>= SVGA_3D_CMD_MAX
- SVGA_3D_CMD_BASE
))
1115 ret
= vmw_cmd_funcs
[cmd_id
](dev_priv
, sw_context
, header
);
1116 if (unlikely(ret
!= 0))
1121 DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
1122 cmd_id
+ SVGA_3D_CMD_BASE
);
1126 static int vmw_cmd_check_all(struct vmw_private
*dev_priv
,
1127 struct vmw_sw_context
*sw_context
,
1131 int32_t cur_size
= size
;
1134 sw_context
->buf_start
= buf
;
1136 while (cur_size
> 0) {
1138 ret
= vmw_cmd_check(dev_priv
, sw_context
, buf
, &size
);
1139 if (unlikely(ret
!= 0))
1141 buf
= (void *)((unsigned long) buf
+ size
);
1145 if (unlikely(cur_size
!= 0)) {
1146 DRM_ERROR("Command verifier out of sync.\n");
1153 static void vmw_free_relocations(struct vmw_sw_context
*sw_context
)
1155 sw_context
->cur_reloc
= 0;
1158 static void vmw_apply_relocations(struct vmw_sw_context
*sw_context
)
1161 struct vmw_relocation
*reloc
;
1162 struct ttm_validate_buffer
*validate
;
1163 struct ttm_buffer_object
*bo
;
1165 for (i
= 0; i
< sw_context
->cur_reloc
; ++i
) {
1166 reloc
= &sw_context
->relocs
[i
];
1167 validate
= &sw_context
->val_bufs
[reloc
->index
].base
;
1169 switch (bo
->mem
.mem_type
) {
1171 reloc
->location
->offset
+= bo
->offset
;
1172 reloc
->location
->gmrId
= SVGA_GMR_FRAMEBUFFER
;
1175 reloc
->location
->gmrId
= bo
->mem
.start
;
1181 vmw_free_relocations(sw_context
);
1185 * vmw_resource_list_unrefererence - Free up a resource list and unreference
1186 * all resources referenced by it.
1188 * @list: The resource list.
1190 static void vmw_resource_list_unreference(struct list_head
*list
)
1192 struct vmw_resource_val_node
*val
, *val_next
;
1195 * Drop references to resources held during command submission.
1198 list_for_each_entry_safe(val
, val_next
, list
, head
) {
1199 list_del_init(&val
->head
);
1200 vmw_resource_unreference(&val
->res
);
1205 static void vmw_clear_validations(struct vmw_sw_context
*sw_context
)
1207 struct vmw_validate_buffer
*entry
, *next
;
1208 struct vmw_resource_val_node
*val
;
1211 * Drop references to DMA buffers held during command submission.
1213 list_for_each_entry_safe(entry
, next
, &sw_context
->validate_nodes
,
1215 list_del(&entry
->base
.head
);
1216 ttm_bo_unref(&entry
->base
.bo
);
1217 (void) drm_ht_remove_item(&sw_context
->res_ht
, &entry
->hash
);
1218 sw_context
->cur_val_buf
--;
1220 BUG_ON(sw_context
->cur_val_buf
!= 0);
1222 list_for_each_entry(val
, &sw_context
->resource_list
, head
)
1223 (void) drm_ht_remove_item(&sw_context
->res_ht
, &val
->hash
);
1226 static int vmw_validate_single_buffer(struct vmw_private
*dev_priv
,
1227 struct ttm_buffer_object
*bo
)
1233 * Don't validate pinned buffers.
1236 if (bo
== dev_priv
->pinned_bo
||
1237 (bo
== dev_priv
->dummy_query_bo
&&
1238 dev_priv
->dummy_query_bo_pinned
))
1242 * Put BO in VRAM if there is space, otherwise as a GMR.
1243 * If there is no space in VRAM and GMR ids are all used up,
1244 * start evicting GMRs to make room. If the DMA buffer can't be
1245 * used as a GMR, this will return -ENOMEM.
1248 ret
= ttm_bo_validate(bo
, &vmw_vram_gmr_placement
, true, false);
1249 if (likely(ret
== 0 || ret
== -ERESTARTSYS
))
1253 * If that failed, try VRAM again, this time evicting
1254 * previous contents.
1257 DRM_INFO("Falling through to VRAM.\n");
1258 ret
= ttm_bo_validate(bo
, &vmw_vram_placement
, true, false);
1263 static int vmw_validate_buffers(struct vmw_private
*dev_priv
,
1264 struct vmw_sw_context
*sw_context
)
1266 struct vmw_validate_buffer
*entry
;
1269 list_for_each_entry(entry
, &sw_context
->validate_nodes
, base
.head
) {
1270 ret
= vmw_validate_single_buffer(dev_priv
, entry
->base
.bo
);
1271 if (unlikely(ret
!= 0))
1277 static int vmw_resize_cmd_bounce(struct vmw_sw_context
*sw_context
,
1280 if (likely(sw_context
->cmd_bounce_size
>= size
))
1283 if (sw_context
->cmd_bounce_size
== 0)
1284 sw_context
->cmd_bounce_size
= VMWGFX_CMD_BOUNCE_INIT_SIZE
;
1286 while (sw_context
->cmd_bounce_size
< size
) {
1287 sw_context
->cmd_bounce_size
=
1288 PAGE_ALIGN(sw_context
->cmd_bounce_size
+
1289 (sw_context
->cmd_bounce_size
>> 1));
1292 if (sw_context
->cmd_bounce
!= NULL
)
1293 vfree(sw_context
->cmd_bounce
);
1295 sw_context
->cmd_bounce
= vmalloc(sw_context
->cmd_bounce_size
);
1297 if (sw_context
->cmd_bounce
== NULL
) {
1298 DRM_ERROR("Failed to allocate command bounce buffer.\n");
1299 sw_context
->cmd_bounce_size
= 0;
1307 * vmw_execbuf_fence_commands - create and submit a command stream fence
1309 * Creates a fence object and submits a command stream marker.
1310 * If this fails for some reason, We sync the fifo and return NULL.
1311 * It is then safe to fence buffers with a NULL pointer.
1313 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
1314 * a userspace handle if @p_handle is not NULL, otherwise not.
1317 int vmw_execbuf_fence_commands(struct drm_file
*file_priv
,
1318 struct vmw_private
*dev_priv
,
1319 struct vmw_fence_obj
**p_fence
,
1324 bool synced
= false;
1326 /* p_handle implies file_priv. */
1327 BUG_ON(p_handle
!= NULL
&& file_priv
== NULL
);
1329 ret
= vmw_fifo_send_fence(dev_priv
, &sequence
);
1330 if (unlikely(ret
!= 0)) {
1331 DRM_ERROR("Fence submission error. Syncing.\n");
1335 if (p_handle
!= NULL
)
1336 ret
= vmw_user_fence_create(file_priv
, dev_priv
->fman
,
1338 DRM_VMW_FENCE_FLAG_EXEC
,
1341 ret
= vmw_fence_create(dev_priv
->fman
, sequence
,
1342 DRM_VMW_FENCE_FLAG_EXEC
,
1345 if (unlikely(ret
!= 0 && !synced
)) {
1346 (void) vmw_fallback_wait(dev_priv
, false, false,
1348 VMW_FENCE_WAIT_TIMEOUT
);
1356 * vmw_execbuf_copy_fence_user - copy fence object information to
1359 * @dev_priv: Pointer to a vmw_private struct.
1360 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
1361 * @ret: Return value from fence object creation.
1362 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
1363 * which the information should be copied.
1364 * @fence: Pointer to the fenc object.
1365 * @fence_handle: User-space fence handle.
1367 * This function copies fence information to user-space. If copying fails,
1368 * The user-space struct drm_vmw_fence_rep::error member is hopefully
1369 * left untouched, and if it's preloaded with an -EFAULT by user-space,
1370 * the error will hopefully be detected.
1371 * Also if copying fails, user-space will be unable to signal the fence
1372 * object so we wait for it immediately, and then unreference the
1373 * user-space reference.
1376 vmw_execbuf_copy_fence_user(struct vmw_private
*dev_priv
,
1377 struct vmw_fpriv
*vmw_fp
,
1379 struct drm_vmw_fence_rep __user
*user_fence_rep
,
1380 struct vmw_fence_obj
*fence
,
1381 uint32_t fence_handle
)
1383 struct drm_vmw_fence_rep fence_rep
;
1385 if (user_fence_rep
== NULL
)
1388 memset(&fence_rep
, 0, sizeof(fence_rep
));
1390 fence_rep
.error
= ret
;
1392 BUG_ON(fence
== NULL
);
1394 fence_rep
.handle
= fence_handle
;
1395 fence_rep
.seqno
= fence
->seqno
;
1396 vmw_update_seqno(dev_priv
, &dev_priv
->fifo
);
1397 fence_rep
.passed_seqno
= dev_priv
->last_read_seqno
;
1401 * copy_to_user errors will be detected by user space not
1402 * seeing fence_rep::error filled in. Typically
1403 * user-space would have pre-set that member to -EFAULT.
1405 ret
= copy_to_user(user_fence_rep
, &fence_rep
,
1409 * User-space lost the fence object. We need to sync
1410 * and unreference the handle.
1412 if (unlikely(ret
!= 0) && (fence_rep
.error
== 0)) {
1413 ttm_ref_object_base_unref(vmw_fp
->tfile
,
1414 fence_handle
, TTM_REF_USAGE
);
1415 DRM_ERROR("Fence copy error. Syncing.\n");
1416 (void) vmw_fence_obj_wait(fence
, fence
->signal_mask
,
1418 VMW_FENCE_WAIT_TIMEOUT
);
1422 int vmw_execbuf_process(struct drm_file
*file_priv
,
1423 struct vmw_private
*dev_priv
,
1424 void __user
*user_commands
,
1425 void *kernel_commands
,
1426 uint32_t command_size
,
1427 uint64_t throttle_us
,
1428 struct drm_vmw_fence_rep __user
*user_fence_rep
,
1429 struct vmw_fence_obj
**out_fence
)
1431 struct vmw_sw_context
*sw_context
= &dev_priv
->ctx
;
1432 struct vmw_fence_obj
*fence
= NULL
;
1433 struct vmw_resource
*error_resource
;
1434 struct list_head resource_list
;
1435 struct ww_acquire_ctx ticket
;
1440 ret
= mutex_lock_interruptible(&dev_priv
->cmdbuf_mutex
);
1441 if (unlikely(ret
!= 0))
1442 return -ERESTARTSYS
;
1444 if (kernel_commands
== NULL
) {
1445 sw_context
->kernel
= false;
1447 ret
= vmw_resize_cmd_bounce(sw_context
, command_size
);
1448 if (unlikely(ret
!= 0))
1452 ret
= copy_from_user(sw_context
->cmd_bounce
,
1453 user_commands
, command_size
);
1455 if (unlikely(ret
!= 0)) {
1457 DRM_ERROR("Failed copying commands.\n");
1460 kernel_commands
= sw_context
->cmd_bounce
;
1462 sw_context
->kernel
= true;
1464 sw_context
->tfile
= vmw_fpriv(file_priv
)->tfile
;
1465 sw_context
->cur_reloc
= 0;
1466 sw_context
->cur_val_buf
= 0;
1467 sw_context
->fence_flags
= 0;
1468 INIT_LIST_HEAD(&sw_context
->resource_list
);
1469 sw_context
->cur_query_bo
= dev_priv
->pinned_bo
;
1470 sw_context
->last_query_ctx
= NULL
;
1471 sw_context
->needs_post_query_barrier
= false;
1472 memset(sw_context
->res_cache
, 0, sizeof(sw_context
->res_cache
));
1473 INIT_LIST_HEAD(&sw_context
->validate_nodes
);
1474 INIT_LIST_HEAD(&sw_context
->res_relocations
);
1475 if (!sw_context
->res_ht_initialized
) {
1476 ret
= drm_ht_create(&sw_context
->res_ht
, VMW_RES_HT_ORDER
);
1477 if (unlikely(ret
!= 0))
1479 sw_context
->res_ht_initialized
= true;
1482 INIT_LIST_HEAD(&resource_list
);
1483 ret
= vmw_cmd_check_all(dev_priv
, sw_context
, kernel_commands
,
1485 if (unlikely(ret
!= 0))
1488 ret
= vmw_resources_reserve(sw_context
);
1489 if (unlikely(ret
!= 0))
1492 ret
= ttm_eu_reserve_buffers(&ticket
, &sw_context
->validate_nodes
);
1493 if (unlikely(ret
!= 0))
1496 ret
= vmw_validate_buffers(dev_priv
, sw_context
);
1497 if (unlikely(ret
!= 0))
1500 ret
= vmw_resources_validate(sw_context
);
1501 if (unlikely(ret
!= 0))
1505 ret
= vmw_wait_lag(dev_priv
, &dev_priv
->fifo
.marker_queue
,
1508 if (unlikely(ret
!= 0))
1512 cmd
= vmw_fifo_reserve(dev_priv
, command_size
);
1513 if (unlikely(cmd
== NULL
)) {
1514 DRM_ERROR("Failed reserving fifo space for commands.\n");
1519 vmw_apply_relocations(sw_context
);
1520 memcpy(cmd
, kernel_commands
, command_size
);
1522 vmw_resource_relocations_apply(cmd
, &sw_context
->res_relocations
);
1523 vmw_resource_relocations_free(&sw_context
->res_relocations
);
1525 vmw_fifo_commit(dev_priv
, command_size
);
1527 vmw_query_bo_switch_commit(dev_priv
, sw_context
);
1528 ret
= vmw_execbuf_fence_commands(file_priv
, dev_priv
,
1530 (user_fence_rep
) ? &handle
: NULL
);
1532 * This error is harmless, because if fence submission fails,
1533 * vmw_fifo_send_fence will sync. The error will be propagated to
1534 * user-space in @fence_rep
1538 DRM_ERROR("Fence submission error. Syncing.\n");
1540 vmw_resource_list_unreserve(&sw_context
->resource_list
, false);
1541 ttm_eu_fence_buffer_objects(&ticket
, &sw_context
->validate_nodes
,
1544 if (unlikely(dev_priv
->pinned_bo
!= NULL
&&
1545 !dev_priv
->query_cid_valid
))
1546 __vmw_execbuf_release_pinned_bo(dev_priv
, fence
);
1548 vmw_clear_validations(sw_context
);
1549 vmw_execbuf_copy_fence_user(dev_priv
, vmw_fpriv(file_priv
), ret
,
1550 user_fence_rep
, fence
, handle
);
1552 /* Don't unreference when handing fence out */
1553 if (unlikely(out_fence
!= NULL
)) {
1556 } else if (likely(fence
!= NULL
)) {
1557 vmw_fence_obj_unreference(&fence
);
1560 list_splice_init(&sw_context
->resource_list
, &resource_list
);
1561 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
1564 * Unreference resources outside of the cmdbuf_mutex to
1565 * avoid deadlocks in resource destruction paths.
1567 vmw_resource_list_unreference(&resource_list
);
1572 vmw_resource_relocations_free(&sw_context
->res_relocations
);
1573 vmw_free_relocations(sw_context
);
1574 ttm_eu_backoff_reservation(&ticket
, &sw_context
->validate_nodes
);
1575 vmw_resource_list_unreserve(&sw_context
->resource_list
, true);
1576 vmw_clear_validations(sw_context
);
1577 if (unlikely(dev_priv
->pinned_bo
!= NULL
&&
1578 !dev_priv
->query_cid_valid
))
1579 __vmw_execbuf_release_pinned_bo(dev_priv
, NULL
);
1581 list_splice_init(&sw_context
->resource_list
, &resource_list
);
1582 error_resource
= sw_context
->error_resource
;
1583 sw_context
->error_resource
= NULL
;
1584 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
1587 * Unreference resources outside of the cmdbuf_mutex to
1588 * avoid deadlocks in resource destruction paths.
1590 vmw_resource_list_unreference(&resource_list
);
1591 if (unlikely(error_resource
!= NULL
))
1592 vmw_resource_unreference(&error_resource
);
1598 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
1600 * @dev_priv: The device private structure.
1602 * This function is called to idle the fifo and unpin the query buffer
1603 * if the normal way to do this hits an error, which should typically be
1606 static void vmw_execbuf_unpin_panic(struct vmw_private
*dev_priv
)
1608 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
1610 (void) vmw_fallback_wait(dev_priv
, false, true, 0, false, 10*HZ
);
1611 vmw_bo_pin(dev_priv
->pinned_bo
, false);
1612 vmw_bo_pin(dev_priv
->dummy_query_bo
, false);
1613 dev_priv
->dummy_query_bo_pinned
= false;
1618 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
1621 * @dev_priv: The device private structure.
1622 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
1623 * _after_ a query barrier that flushes all queries touching the current
1624 * buffer pointed to by @dev_priv->pinned_bo
1626 * This function should be used to unpin the pinned query bo, or
1627 * as a query barrier when we need to make sure that all queries have
1628 * finished before the next fifo command. (For example on hardware
1629 * context destructions where the hardware may otherwise leak unfinished
1632 * This function does not return any failure codes, but make attempts
1633 * to do safe unpinning in case of errors.
1635 * The function will synchronize on the previous query barrier, and will
1636 * thus not finish until that barrier has executed.
1638 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
1639 * before calling this function.
1641 void __vmw_execbuf_release_pinned_bo(struct vmw_private
*dev_priv
,
1642 struct vmw_fence_obj
*fence
)
1645 struct list_head validate_list
;
1646 struct ttm_validate_buffer pinned_val
, query_val
;
1647 struct vmw_fence_obj
*lfence
= NULL
;
1648 struct ww_acquire_ctx ticket
;
1650 if (dev_priv
->pinned_bo
== NULL
)
1653 INIT_LIST_HEAD(&validate_list
);
1655 pinned_val
.bo
= ttm_bo_reference(dev_priv
->pinned_bo
);
1656 list_add_tail(&pinned_val
.head
, &validate_list
);
1658 query_val
.bo
= ttm_bo_reference(dev_priv
->dummy_query_bo
);
1659 list_add_tail(&query_val
.head
, &validate_list
);
1662 ret
= ttm_eu_reserve_buffers(&ticket
, &validate_list
);
1663 } while (ret
== -ERESTARTSYS
);
1665 if (unlikely(ret
!= 0)) {
1666 vmw_execbuf_unpin_panic(dev_priv
);
1667 goto out_no_reserve
;
1670 if (dev_priv
->query_cid_valid
) {
1671 BUG_ON(fence
!= NULL
);
1672 ret
= vmw_fifo_emit_dummy_query(dev_priv
, dev_priv
->query_cid
);
1673 if (unlikely(ret
!= 0)) {
1674 vmw_execbuf_unpin_panic(dev_priv
);
1677 dev_priv
->query_cid_valid
= false;
1680 vmw_bo_pin(dev_priv
->pinned_bo
, false);
1681 vmw_bo_pin(dev_priv
->dummy_query_bo
, false);
1682 dev_priv
->dummy_query_bo_pinned
= false;
1684 if (fence
== NULL
) {
1685 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
, &lfence
,
1689 ttm_eu_fence_buffer_objects(&ticket
, &validate_list
, (void *) fence
);
1691 vmw_fence_obj_unreference(&lfence
);
1693 ttm_bo_unref(&query_val
.bo
);
1694 ttm_bo_unref(&pinned_val
.bo
);
1695 ttm_bo_unref(&dev_priv
->pinned_bo
);
1701 ttm_eu_backoff_reservation(&ticket
, &validate_list
);
1703 ttm_bo_unref(&query_val
.bo
);
1704 ttm_bo_unref(&pinned_val
.bo
);
1705 ttm_bo_unref(&dev_priv
->pinned_bo
);
1709 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
1712 * @dev_priv: The device private structure.
1714 * This function should be used to unpin the pinned query bo, or
1715 * as a query barrier when we need to make sure that all queries have
1716 * finished before the next fifo command. (For example on hardware
1717 * context destructions where the hardware may otherwise leak unfinished
1720 * This function does not return any failure codes, but make attempts
1721 * to do safe unpinning in case of errors.
1723 * The function will synchronize on the previous query barrier, and will
1724 * thus not finish until that barrier has executed.
1726 void vmw_execbuf_release_pinned_bo(struct vmw_private
*dev_priv
)
1728 mutex_lock(&dev_priv
->cmdbuf_mutex
);
1729 if (dev_priv
->query_cid_valid
)
1730 __vmw_execbuf_release_pinned_bo(dev_priv
, NULL
);
1731 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
1735 int vmw_execbuf_ioctl(struct drm_device
*dev
, void *data
,
1736 struct drm_file
*file_priv
)
1738 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1739 struct drm_vmw_execbuf_arg
*arg
= (struct drm_vmw_execbuf_arg
*)data
;
1740 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
1744 * This will allow us to extend the ioctl argument while
1745 * maintaining backwards compatibility:
1746 * We take different code paths depending on the value of
1750 if (unlikely(arg
->version
!= DRM_VMW_EXECBUF_VERSION
)) {
1751 DRM_ERROR("Incorrect execbuf version.\n");
1752 DRM_ERROR("You're running outdated experimental "
1753 "vmwgfx user-space drivers.");
1757 ret
= ttm_read_lock(&vmaster
->lock
, true);
1758 if (unlikely(ret
!= 0))
1761 ret
= vmw_execbuf_process(file_priv
, dev_priv
,
1762 (void __user
*)(unsigned long)arg
->commands
,
1763 NULL
, arg
->command_size
, arg
->throttle_us
,
1764 (void __user
*)(unsigned long)arg
->fence_rep
,
1767 if (unlikely(ret
!= 0))
1770 vmw_kms_cursor_post_execbuf(dev_priv
);
1773 ttm_read_unlock(&vmaster
->lock
);