1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include "ttm/ttm_bo_api.h"
31 #include "ttm/ttm_placement.h"
33 static int vmw_cmd_invalid(struct vmw_private
*dev_priv
,
34 struct vmw_sw_context
*sw_context
,
35 SVGA3dCmdHeader
*header
)
37 return capable(CAP_SYS_ADMIN
) ? : -EINVAL
;
40 static int vmw_cmd_ok(struct vmw_private
*dev_priv
,
41 struct vmw_sw_context
*sw_context
,
42 SVGA3dCmdHeader
*header
)
47 static int vmw_cmd_cid_check(struct vmw_private
*dev_priv
,
48 struct vmw_sw_context
*sw_context
,
49 SVGA3dCmdHeader
*header
)
52 SVGA3dCmdHeader header
;
57 cmd
= container_of(header
, struct vmw_cid_cmd
, header
);
58 if (likely(sw_context
->cid_valid
&& cmd
->cid
== sw_context
->last_cid
))
61 ret
= vmw_context_check(dev_priv
, sw_context
->tfile
, cmd
->cid
);
62 if (unlikely(ret
!= 0)) {
63 DRM_ERROR("Could not find or use context %u\n",
68 sw_context
->last_cid
= cmd
->cid
;
69 sw_context
->cid_valid
= true;
74 static int vmw_cmd_sid_check(struct vmw_private
*dev_priv
,
75 struct vmw_sw_context
*sw_context
,
78 if (*sid
== SVGA3D_INVALID_ID
)
81 if (unlikely((!sw_context
->sid_valid
||
82 *sid
!= sw_context
->last_sid
))) {
84 int ret
= vmw_surface_check(dev_priv
, sw_context
->tfile
,
87 if (unlikely(ret
!= 0)) {
88 DRM_ERROR("Could ot find or use surface 0x%08x "
95 sw_context
->last_sid
= *sid
;
96 sw_context
->sid_valid
= true;
98 sw_context
->sid_translation
= real_id
;
100 *sid
= sw_context
->sid_translation
;
106 static int vmw_cmd_set_render_target_check(struct vmw_private
*dev_priv
,
107 struct vmw_sw_context
*sw_context
,
108 SVGA3dCmdHeader
*header
)
111 SVGA3dCmdHeader header
;
112 SVGA3dCmdSetRenderTarget body
;
116 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
117 if (unlikely(ret
!= 0))
120 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
121 ret
= vmw_cmd_sid_check(dev_priv
, sw_context
, &cmd
->body
.target
.sid
);
125 static int vmw_cmd_surface_copy_check(struct vmw_private
*dev_priv
,
126 struct vmw_sw_context
*sw_context
,
127 SVGA3dCmdHeader
*header
)
130 SVGA3dCmdHeader header
;
131 SVGA3dCmdSurfaceCopy body
;
135 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
136 ret
= vmw_cmd_sid_check(dev_priv
, sw_context
, &cmd
->body
.src
.sid
);
137 if (unlikely(ret
!= 0))
139 return vmw_cmd_sid_check(dev_priv
, sw_context
, &cmd
->body
.dest
.sid
);
142 static int vmw_cmd_stretch_blt_check(struct vmw_private
*dev_priv
,
143 struct vmw_sw_context
*sw_context
,
144 SVGA3dCmdHeader
*header
)
147 SVGA3dCmdHeader header
;
148 SVGA3dCmdSurfaceStretchBlt body
;
152 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
153 ret
= vmw_cmd_sid_check(dev_priv
, sw_context
, &cmd
->body
.src
.sid
);
154 if (unlikely(ret
!= 0))
156 return vmw_cmd_sid_check(dev_priv
, sw_context
, &cmd
->body
.dest
.sid
);
159 static int vmw_cmd_blt_surf_screen_check(struct vmw_private
*dev_priv
,
160 struct vmw_sw_context
*sw_context
,
161 SVGA3dCmdHeader
*header
)
164 SVGA3dCmdHeader header
;
165 SVGA3dCmdBlitSurfaceToScreen body
;
168 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
169 return vmw_cmd_sid_check(dev_priv
, sw_context
, &cmd
->body
.srcImage
.sid
);
172 static int vmw_cmd_present_check(struct vmw_private
*dev_priv
,
173 struct vmw_sw_context
*sw_context
,
174 SVGA3dCmdHeader
*header
)
177 SVGA3dCmdHeader header
;
178 SVGA3dCmdPresent body
;
181 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
182 return vmw_cmd_sid_check(dev_priv
, sw_context
, &cmd
->body
.sid
);
185 static int vmw_translate_guest_ptr(struct vmw_private
*dev_priv
,
186 struct vmw_sw_context
*sw_context
,
188 struct vmw_dma_buffer
**vmw_bo_p
)
190 struct vmw_dma_buffer
*vmw_bo
= NULL
;
191 struct ttm_buffer_object
*bo
;
192 uint32_t handle
= ptr
->gmrId
;
193 struct vmw_relocation
*reloc
;
194 uint32_t cur_validate_node
;
195 struct ttm_validate_buffer
*val_buf
;
198 ret
= vmw_user_dmabuf_lookup(sw_context
->tfile
, handle
, &vmw_bo
);
199 if (unlikely(ret
!= 0)) {
200 DRM_ERROR("Could not find or use GMR region.\n");
205 if (unlikely(sw_context
->cur_reloc
>= VMWGFX_MAX_RELOCATIONS
)) {
206 DRM_ERROR("Max number relocations per submission"
212 reloc
= &sw_context
->relocs
[sw_context
->cur_reloc
++];
213 reloc
->location
= ptr
;
215 cur_validate_node
= vmw_dmabuf_validate_node(bo
, sw_context
->cur_val_buf
);
216 if (unlikely(cur_validate_node
>= VMWGFX_MAX_GMRS
)) {
217 DRM_ERROR("Max number of DMA buffers per submission"
223 reloc
->index
= cur_validate_node
;
224 if (unlikely(cur_validate_node
== sw_context
->cur_val_buf
)) {
225 val_buf
= &sw_context
->val_bufs
[cur_validate_node
];
226 val_buf
->bo
= ttm_bo_reference(bo
);
227 val_buf
->new_sync_obj_arg
= (void *) dev_priv
;
228 list_add_tail(&val_buf
->head
, &sw_context
->validate_nodes
);
229 ++sw_context
->cur_val_buf
;
235 vmw_dmabuf_unreference(&vmw_bo
);
240 static int vmw_cmd_end_query(struct vmw_private
*dev_priv
,
241 struct vmw_sw_context
*sw_context
,
242 SVGA3dCmdHeader
*header
)
244 struct vmw_dma_buffer
*vmw_bo
;
245 struct vmw_query_cmd
{
246 SVGA3dCmdHeader header
;
251 cmd
= container_of(header
, struct vmw_query_cmd
, header
);
252 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
253 if (unlikely(ret
!= 0))
256 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
259 if (unlikely(ret
!= 0))
262 vmw_dmabuf_unreference(&vmw_bo
);
266 static int vmw_cmd_wait_query(struct vmw_private
*dev_priv
,
267 struct vmw_sw_context
*sw_context
,
268 SVGA3dCmdHeader
*header
)
270 struct vmw_dma_buffer
*vmw_bo
;
271 struct vmw_query_cmd
{
272 SVGA3dCmdHeader header
;
273 SVGA3dCmdWaitForQuery q
;
277 cmd
= container_of(header
, struct vmw_query_cmd
, header
);
278 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
279 if (unlikely(ret
!= 0))
282 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
285 if (unlikely(ret
!= 0))
288 vmw_dmabuf_unreference(&vmw_bo
);
293 static int vmw_cmd_dma(struct vmw_private
*dev_priv
,
294 struct vmw_sw_context
*sw_context
,
295 SVGA3dCmdHeader
*header
)
297 struct vmw_dma_buffer
*vmw_bo
= NULL
;
298 struct ttm_buffer_object
*bo
;
299 struct vmw_surface
*srf
= NULL
;
301 SVGA3dCmdHeader header
;
302 SVGA3dCmdSurfaceDMA dma
;
306 cmd
= container_of(header
, struct vmw_dma_cmd
, header
);
307 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
310 if (unlikely(ret
!= 0))
314 ret
= vmw_user_surface_lookup_handle(dev_priv
, sw_context
->tfile
,
315 cmd
->dma
.host
.sid
, &srf
);
317 DRM_ERROR("could not find surface\n");
322 * Patch command stream with device SID.
325 cmd
->dma
.host
.sid
= srf
->res
.id
;
326 vmw_kms_cursor_snoop(srf
, sw_context
->tfile
, bo
, header
);
328 * FIXME: May deadlock here when called from the
329 * command parsing code.
331 vmw_surface_unreference(&srf
);
334 vmw_dmabuf_unreference(&vmw_bo
);
338 static int vmw_cmd_draw(struct vmw_private
*dev_priv
,
339 struct vmw_sw_context
*sw_context
,
340 SVGA3dCmdHeader
*header
)
342 struct vmw_draw_cmd
{
343 SVGA3dCmdHeader header
;
344 SVGA3dCmdDrawPrimitives body
;
346 SVGA3dVertexDecl
*decl
= (SVGA3dVertexDecl
*)(
347 (unsigned long)header
+ sizeof(*cmd
));
348 SVGA3dPrimitiveRange
*range
;
353 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
354 if (unlikely(ret
!= 0))
357 cmd
= container_of(header
, struct vmw_draw_cmd
, header
);
358 maxnum
= (header
->size
- sizeof(cmd
->body
)) / sizeof(*decl
);
360 if (unlikely(cmd
->body
.numVertexDecls
> maxnum
)) {
361 DRM_ERROR("Illegal number of vertex declarations.\n");
365 for (i
= 0; i
< cmd
->body
.numVertexDecls
; ++i
, ++decl
) {
366 ret
= vmw_cmd_sid_check(dev_priv
, sw_context
,
367 &decl
->array
.surfaceId
);
368 if (unlikely(ret
!= 0))
372 maxnum
= (header
->size
- sizeof(cmd
->body
) -
373 cmd
->body
.numVertexDecls
* sizeof(*decl
)) / sizeof(*range
);
374 if (unlikely(cmd
->body
.numRanges
> maxnum
)) {
375 DRM_ERROR("Illegal number of index ranges.\n");
379 range
= (SVGA3dPrimitiveRange
*) decl
;
380 for (i
= 0; i
< cmd
->body
.numRanges
; ++i
, ++range
) {
381 ret
= vmw_cmd_sid_check(dev_priv
, sw_context
,
382 &range
->indexArray
.surfaceId
);
383 if (unlikely(ret
!= 0))
390 static int vmw_cmd_tex_state(struct vmw_private
*dev_priv
,
391 struct vmw_sw_context
*sw_context
,
392 SVGA3dCmdHeader
*header
)
394 struct vmw_tex_state_cmd
{
395 SVGA3dCmdHeader header
;
396 SVGA3dCmdSetTextureState state
;
399 SVGA3dTextureState
*last_state
= (SVGA3dTextureState
*)
400 ((unsigned long) header
+ header
->size
+ sizeof(header
));
401 SVGA3dTextureState
*cur_state
= (SVGA3dTextureState
*)
402 ((unsigned long) header
+ sizeof(struct vmw_tex_state_cmd
));
405 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
406 if (unlikely(ret
!= 0))
409 for (; cur_state
< last_state
; ++cur_state
) {
410 if (likely(cur_state
->name
!= SVGA3D_TS_BIND_TEXTURE
))
413 ret
= vmw_cmd_sid_check(dev_priv
, sw_context
,
415 if (unlikely(ret
!= 0))
423 typedef int (*vmw_cmd_func
) (struct vmw_private
*,
424 struct vmw_sw_context
*,
427 #define VMW_CMD_DEF(cmd, func) \
428 [cmd - SVGA_3D_CMD_BASE] = func
430 static vmw_cmd_func vmw_cmd_funcs
[SVGA_3D_CMD_MAX
] = {
431 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE
, &vmw_cmd_invalid
),
432 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY
, &vmw_cmd_invalid
),
433 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY
, &vmw_cmd_surface_copy_check
),
434 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT
, &vmw_cmd_stretch_blt_check
),
435 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA
, &vmw_cmd_dma
),
436 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE
, &vmw_cmd_invalid
),
437 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY
, &vmw_cmd_invalid
),
438 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM
, &vmw_cmd_cid_check
),
439 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE
, &vmw_cmd_cid_check
),
440 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE
, &vmw_cmd_cid_check
),
441 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET
,
442 &vmw_cmd_set_render_target_check
),
443 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE
, &vmw_cmd_tex_state
),
444 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL
, &vmw_cmd_cid_check
),
445 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA
, &vmw_cmd_cid_check
),
446 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED
, &vmw_cmd_cid_check
),
447 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT
, &vmw_cmd_cid_check
),
448 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE
, &vmw_cmd_cid_check
),
449 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR
, &vmw_cmd_cid_check
),
450 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT
, &vmw_cmd_present_check
),
451 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE
, &vmw_cmd_cid_check
),
452 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY
, &vmw_cmd_cid_check
),
453 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER
, &vmw_cmd_cid_check
),
454 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST
, &vmw_cmd_cid_check
),
455 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES
, &vmw_cmd_draw
),
456 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT
, &vmw_cmd_cid_check
),
457 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY
, &vmw_cmd_cid_check
),
458 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY
, &vmw_cmd_end_query
),
459 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY
, &vmw_cmd_wait_query
),
460 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK
, &vmw_cmd_ok
),
461 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN
,
462 &vmw_cmd_blt_surf_screen_check
)
465 static int vmw_cmd_check(struct vmw_private
*dev_priv
,
466 struct vmw_sw_context
*sw_context
,
467 void *buf
, uint32_t *size
)
470 uint32_t size_remaining
= *size
;
471 SVGA3dCmdHeader
*header
= (SVGA3dCmdHeader
*) buf
;
474 cmd_id
= ((uint32_t *)buf
)[0];
475 if (cmd_id
== SVGA_CMD_UPDATE
) {
480 cmd_id
= le32_to_cpu(header
->id
);
481 *size
= le32_to_cpu(header
->size
) + sizeof(SVGA3dCmdHeader
);
483 cmd_id
-= SVGA_3D_CMD_BASE
;
484 if (unlikely(*size
> size_remaining
))
487 if (unlikely(cmd_id
>= SVGA_3D_CMD_MAX
- SVGA_3D_CMD_BASE
))
490 ret
= vmw_cmd_funcs
[cmd_id
](dev_priv
, sw_context
, header
);
491 if (unlikely(ret
!= 0))
496 DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
497 cmd_id
+ SVGA_3D_CMD_BASE
);
501 static int vmw_cmd_check_all(struct vmw_private
*dev_priv
,
502 struct vmw_sw_context
*sw_context
,
503 void *buf
, uint32_t size
)
505 int32_t cur_size
= size
;
508 while (cur_size
> 0) {
510 ret
= vmw_cmd_check(dev_priv
, sw_context
, buf
, &size
);
511 if (unlikely(ret
!= 0))
513 buf
= (void *)((unsigned long) buf
+ size
);
517 if (unlikely(cur_size
!= 0)) {
518 DRM_ERROR("Command verifier out of sync.\n");
525 static void vmw_free_relocations(struct vmw_sw_context
*sw_context
)
527 sw_context
->cur_reloc
= 0;
530 static void vmw_apply_relocations(struct vmw_sw_context
*sw_context
)
533 struct vmw_relocation
*reloc
;
534 struct ttm_validate_buffer
*validate
;
535 struct ttm_buffer_object
*bo
;
537 for (i
= 0; i
< sw_context
->cur_reloc
; ++i
) {
538 reloc
= &sw_context
->relocs
[i
];
539 validate
= &sw_context
->val_bufs
[reloc
->index
];
541 if (bo
->mem
.mem_type
== TTM_PL_VRAM
) {
542 reloc
->location
->offset
+= bo
->offset
;
543 reloc
->location
->gmrId
= SVGA_GMR_FRAMEBUFFER
;
545 reloc
->location
->gmrId
= bo
->mem
.start
;
547 vmw_free_relocations(sw_context
);
550 static void vmw_clear_validations(struct vmw_sw_context
*sw_context
)
552 struct ttm_validate_buffer
*entry
, *next
;
554 list_for_each_entry_safe(entry
, next
, &sw_context
->validate_nodes
,
556 list_del(&entry
->head
);
557 vmw_dmabuf_validate_clear(entry
->bo
);
558 ttm_bo_unref(&entry
->bo
);
559 sw_context
->cur_val_buf
--;
561 BUG_ON(sw_context
->cur_val_buf
!= 0);
564 static int vmw_validate_single_buffer(struct vmw_private
*dev_priv
,
565 struct ttm_buffer_object
*bo
)
570 * Put BO in VRAM if there is space, otherwise as a GMR.
571 * If there is no space in VRAM and GMR ids are all used up,
572 * start evicting GMRs to make room. If the DMA buffer can't be
573 * used as a GMR, this will return -ENOMEM.
576 ret
= ttm_bo_validate(bo
, &vmw_vram_gmr_placement
, true, false, false);
577 if (likely(ret
== 0 || ret
== -ERESTARTSYS
))
581 * If that failed, try VRAM again, this time evicting
585 DRM_INFO("Falling through to VRAM.\n");
586 ret
= ttm_bo_validate(bo
, &vmw_vram_placement
, true, false, false);
591 static int vmw_validate_buffers(struct vmw_private
*dev_priv
,
592 struct vmw_sw_context
*sw_context
)
594 struct ttm_validate_buffer
*entry
;
597 list_for_each_entry(entry
, &sw_context
->validate_nodes
, head
) {
598 ret
= vmw_validate_single_buffer(dev_priv
, entry
->bo
);
599 if (unlikely(ret
!= 0))
605 int vmw_execbuf_ioctl(struct drm_device
*dev
, void *data
,
606 struct drm_file
*file_priv
)
608 struct vmw_private
*dev_priv
= vmw_priv(dev
);
609 struct drm_vmw_execbuf_arg
*arg
= (struct drm_vmw_execbuf_arg
*)data
;
610 struct drm_vmw_fence_rep fence_rep
;
611 struct drm_vmw_fence_rep __user
*user_fence_rep
;
616 struct vmw_sw_context
*sw_context
= &dev_priv
->ctx
;
617 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
619 ret
= ttm_read_lock(&vmaster
->lock
, true);
620 if (unlikely(ret
!= 0))
623 ret
= mutex_lock_interruptible(&dev_priv
->cmdbuf_mutex
);
624 if (unlikely(ret
!= 0)) {
626 goto out_no_cmd_mutex
;
629 cmd
= vmw_fifo_reserve(dev_priv
, arg
->command_size
);
630 if (unlikely(cmd
== NULL
)) {
631 DRM_ERROR("Failed reserving fifo space for commands.\n");
636 user_cmd
= (void __user
*)(unsigned long)arg
->commands
;
637 ret
= copy_from_user(cmd
, user_cmd
, arg
->command_size
);
639 if (unlikely(ret
!= 0)) {
641 DRM_ERROR("Failed copying commands.\n");
645 sw_context
->tfile
= vmw_fpriv(file_priv
)->tfile
;
646 sw_context
->cid_valid
= false;
647 sw_context
->sid_valid
= false;
648 sw_context
->cur_reloc
= 0;
649 sw_context
->cur_val_buf
= 0;
651 INIT_LIST_HEAD(&sw_context
->validate_nodes
);
653 ret
= vmw_cmd_check_all(dev_priv
, sw_context
, cmd
, arg
->command_size
);
654 if (unlikely(ret
!= 0))
656 ret
= ttm_eu_reserve_buffers(&sw_context
->validate_nodes
,
657 dev_priv
->val_seq
++);
658 if (unlikely(ret
!= 0))
661 ret
= vmw_validate_buffers(dev_priv
, sw_context
);
662 if (unlikely(ret
!= 0))
665 vmw_apply_relocations(sw_context
);
667 if (arg
->throttle_us
) {
668 ret
= vmw_wait_lag(dev_priv
, &dev_priv
->fifo
.fence_queue
,
671 if (unlikely(ret
!= 0))
675 vmw_fifo_commit(dev_priv
, arg
->command_size
);
677 ret
= vmw_fifo_send_fence(dev_priv
, &sequence
);
679 ttm_eu_fence_buffer_objects(&sw_context
->validate_nodes
,
680 (void *)(unsigned long) sequence
);
681 vmw_clear_validations(sw_context
);
682 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
685 * This error is harmless, because if fence submission fails,
686 * vmw_fifo_send_fence will sync.
690 DRM_ERROR("Fence submission error. Syncing.\n");
692 fence_rep
.error
= ret
;
693 fence_rep
.fence_seq
= (uint64_t) sequence
;
695 user_fence_rep
= (struct drm_vmw_fence_rep __user
*)
696 (unsigned long)arg
->fence_rep
;
699 * copy_to_user errors will be detected by user space not
700 * seeing fence_rep::error filled in.
703 ret
= copy_to_user(user_fence_rep
, &fence_rep
, sizeof(fence_rep
));
705 vmw_kms_cursor_post_execbuf(dev_priv
);
706 ttm_read_unlock(&vmaster
->lock
);
709 vmw_free_relocations(sw_context
);
710 ttm_eu_backoff_reservation(&sw_context
->validate_nodes
);
711 vmw_clear_validations(sw_context
);
713 vmw_fifo_commit(dev_priv
, 0);
715 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
717 ttm_read_unlock(&vmaster
->lock
);