1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_kms.h"
30 /* Might need a hrtimer here? */
31 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
33 static int vmw_surface_dmabuf_pin(struct vmw_framebuffer
*vfb
);
34 static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer
*vfb
);
36 void vmw_display_unit_cleanup(struct vmw_display_unit
*du
)
38 if (du
->cursor_surface
)
39 vmw_surface_unreference(&du
->cursor_surface
);
40 if (du
->cursor_dmabuf
)
41 vmw_dmabuf_unreference(&du
->cursor_dmabuf
);
42 drm_crtc_cleanup(&du
->crtc
);
43 drm_encoder_cleanup(&du
->encoder
);
44 drm_connector_cleanup(&du
->connector
);
48 * Display Unit Cursor functions
51 int vmw_cursor_update_image(struct vmw_private
*dev_priv
,
52 u32
*image
, u32 width
, u32 height
,
53 u32 hotspotX
, u32 hotspotY
)
57 SVGAFifoCmdDefineAlphaCursor cursor
;
59 u32 image_size
= width
* height
* 4;
60 u32 cmd_size
= sizeof(*cmd
) + image_size
;
65 cmd
= vmw_fifo_reserve(dev_priv
, cmd_size
);
66 if (unlikely(cmd
== NULL
)) {
67 DRM_ERROR("Fifo reserve failed.\n");
71 memset(cmd
, 0, sizeof(*cmd
));
73 memcpy(&cmd
[1], image
, image_size
);
75 cmd
->cmd
= cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR
);
76 cmd
->cursor
.id
= cpu_to_le32(0);
77 cmd
->cursor
.width
= cpu_to_le32(width
);
78 cmd
->cursor
.height
= cpu_to_le32(height
);
79 cmd
->cursor
.hotspotX
= cpu_to_le32(hotspotX
);
80 cmd
->cursor
.hotspotY
= cpu_to_le32(hotspotY
);
82 vmw_fifo_commit(dev_priv
, cmd_size
);
87 void vmw_cursor_update_position(struct vmw_private
*dev_priv
,
88 bool show
, int x
, int y
)
90 __le32 __iomem
*fifo_mem
= dev_priv
->mmio_virt
;
93 iowrite32(show
? 1 : 0, fifo_mem
+ SVGA_FIFO_CURSOR_ON
);
94 iowrite32(x
, fifo_mem
+ SVGA_FIFO_CURSOR_X
);
95 iowrite32(y
, fifo_mem
+ SVGA_FIFO_CURSOR_Y
);
96 count
= ioread32(fifo_mem
+ SVGA_FIFO_CURSOR_COUNT
);
97 iowrite32(++count
, fifo_mem
+ SVGA_FIFO_CURSOR_COUNT
);
100 int vmw_du_crtc_cursor_set(struct drm_crtc
*crtc
, struct drm_file
*file_priv
,
101 uint32_t handle
, uint32_t width
, uint32_t height
)
103 struct vmw_private
*dev_priv
= vmw_priv(crtc
->dev
);
104 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
105 struct vmw_display_unit
*du
= vmw_crtc_to_du(crtc
);
106 struct vmw_surface
*surface
= NULL
;
107 struct vmw_dma_buffer
*dmabuf
= NULL
;
111 ret
= vmw_user_surface_lookup_handle(dev_priv
, tfile
,
114 if (!surface
->snooper
.image
) {
115 DRM_ERROR("surface not suitable for cursor\n");
119 ret
= vmw_user_dmabuf_lookup(tfile
,
122 DRM_ERROR("failed to find surface or dmabuf: %i\n", ret
);
128 /* takedown old cursor */
129 if (du
->cursor_surface
) {
130 du
->cursor_surface
->snooper
.crtc
= NULL
;
131 vmw_surface_unreference(&du
->cursor_surface
);
133 if (du
->cursor_dmabuf
)
134 vmw_dmabuf_unreference(&du
->cursor_dmabuf
);
136 /* setup new image */
138 /* vmw_user_surface_lookup takes one reference */
139 du
->cursor_surface
= surface
;
141 du
->cursor_surface
->snooper
.crtc
= crtc
;
142 du
->cursor_age
= du
->cursor_surface
->snooper
.age
;
143 vmw_cursor_update_image(dev_priv
, surface
->snooper
.image
,
144 64, 64, du
->hotspot_x
, du
->hotspot_y
);
146 struct ttm_bo_kmap_obj map
;
147 unsigned long kmap_offset
;
148 unsigned long kmap_num
;
152 /* vmw_user_surface_lookup takes one reference */
153 du
->cursor_dmabuf
= dmabuf
;
156 kmap_num
= (64*64*4) >> PAGE_SHIFT
;
158 ret
= ttm_bo_reserve(&dmabuf
->base
, true, false, false, 0);
159 if (unlikely(ret
!= 0)) {
160 DRM_ERROR("reserve failed\n");
164 ret
= ttm_bo_kmap(&dmabuf
->base
, kmap_offset
, kmap_num
, &map
);
165 if (unlikely(ret
!= 0))
168 virtual = ttm_kmap_obj_virtual(&map
, &dummy
);
169 vmw_cursor_update_image(dev_priv
, virtual, 64, 64,
170 du
->hotspot_x
, du
->hotspot_y
);
174 ttm_bo_unreserve(&dmabuf
->base
);
177 vmw_cursor_update_position(dev_priv
, false, 0, 0);
181 vmw_cursor_update_position(dev_priv
, true, du
->cursor_x
, du
->cursor_y
);
186 int vmw_du_crtc_cursor_move(struct drm_crtc
*crtc
, int x
, int y
)
188 struct vmw_private
*dev_priv
= vmw_priv(crtc
->dev
);
189 struct vmw_display_unit
*du
= vmw_crtc_to_du(crtc
);
190 bool shown
= du
->cursor_surface
|| du
->cursor_dmabuf
? true : false;
192 du
->cursor_x
= x
+ crtc
->x
;
193 du
->cursor_y
= y
+ crtc
->y
;
195 vmw_cursor_update_position(dev_priv
, shown
,
196 du
->cursor_x
, du
->cursor_y
);
201 void vmw_kms_cursor_snoop(struct vmw_surface
*srf
,
202 struct ttm_object_file
*tfile
,
203 struct ttm_buffer_object
*bo
,
204 SVGA3dCmdHeader
*header
)
206 struct ttm_bo_kmap_obj map
;
207 unsigned long kmap_offset
;
208 unsigned long kmap_num
;
214 SVGA3dCmdHeader header
;
215 SVGA3dCmdSurfaceDMA dma
;
219 cmd
= container_of(header
, struct vmw_dma_cmd
, header
);
221 /* No snooper installed */
222 if (!srf
->snooper
.image
)
225 if (cmd
->dma
.host
.face
!= 0 || cmd
->dma
.host
.mipmap
!= 0) {
226 DRM_ERROR("face and mipmap for cursors should never != 0\n");
230 if (cmd
->header
.size
< 64) {
231 DRM_ERROR("at least one full copy box must be given\n");
235 box
= (SVGA3dCopyBox
*)&cmd
[1];
236 box_count
= (cmd
->header
.size
- sizeof(SVGA3dCmdSurfaceDMA
)) /
237 sizeof(SVGA3dCopyBox
);
239 if (cmd
->dma
.guest
.pitch
!= (64 * 4) ||
240 cmd
->dma
.guest
.ptr
.offset
% PAGE_SIZE
||
241 box
->x
!= 0 || box
->y
!= 0 || box
->z
!= 0 ||
242 box
->srcx
!= 0 || box
->srcy
!= 0 || box
->srcz
!= 0 ||
243 box
->w
!= 64 || box
->h
!= 64 || box
->d
!= 1 ||
245 /* TODO handle none page aligned offsets */
246 /* TODO handle partial uploads and pitch != 256 */
247 /* TODO handle more then one copy (size != 64) */
248 DRM_ERROR("lazy programmer, can't handle weird stuff\n");
252 kmap_offset
= cmd
->dma
.guest
.ptr
.offset
>> PAGE_SHIFT
;
253 kmap_num
= (64*64*4) >> PAGE_SHIFT
;
255 ret
= ttm_bo_reserve(bo
, true, false, false, 0);
256 if (unlikely(ret
!= 0)) {
257 DRM_ERROR("reserve failed\n");
261 ret
= ttm_bo_kmap(bo
, kmap_offset
, kmap_num
, &map
);
262 if (unlikely(ret
!= 0))
265 virtual = ttm_kmap_obj_virtual(&map
, &dummy
);
267 memcpy(srf
->snooper
.image
, virtual, 64*64*4);
270 /* we can't call this function from this function since execbuf has
271 * reserved fifo space.
273 * if (srf->snooper.crtc)
274 * vmw_ldu_crtc_cursor_update_image(dev_priv,
275 * srf->snooper.image, 64, 64,
276 * du->hotspot_x, du->hotspot_y);
281 ttm_bo_unreserve(bo
);
284 void vmw_kms_cursor_post_execbuf(struct vmw_private
*dev_priv
)
286 struct drm_device
*dev
= dev_priv
->dev
;
287 struct vmw_display_unit
*du
;
288 struct drm_crtc
*crtc
;
290 mutex_lock(&dev
->mode_config
.mutex
);
292 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
293 du
= vmw_crtc_to_du(crtc
);
294 if (!du
->cursor_surface
||
295 du
->cursor_age
== du
->cursor_surface
->snooper
.age
)
298 du
->cursor_age
= du
->cursor_surface
->snooper
.age
;
299 vmw_cursor_update_image(dev_priv
,
300 du
->cursor_surface
->snooper
.image
,
301 64, 64, du
->hotspot_x
, du
->hotspot_y
);
304 mutex_unlock(&dev
->mode_config
.mutex
);
308 * Generic framebuffer code
311 int vmw_framebuffer_create_handle(struct drm_framebuffer
*fb
,
312 struct drm_file
*file_priv
,
313 unsigned int *handle
)
322 * Surface framebuffer code
325 #define vmw_framebuffer_to_vfbs(x) \
326 container_of(x, struct vmw_framebuffer_surface, base.base)
328 struct vmw_framebuffer_surface
{
329 struct vmw_framebuffer base
;
330 struct vmw_surface
*surface
;
331 struct vmw_dma_buffer
*buffer
;
332 struct delayed_work d_work
;
333 struct mutex work_lock
;
335 struct list_head head
;
336 struct drm_master
*master
;
340 * vmw_kms_idle_workqueues - Flush workqueues on this master
342 * @vmaster - Pointer identifying the master, for the surfaces of which
343 * we idle the dirty work queues.
345 * This function should be called with the ttm lock held in exclusive mode
346 * to idle all dirty work queues before the fifo is taken down.
348 * The work task may actually requeue itself, but after the flush returns we're
349 * sure that there's nothing to present, since the ttm lock is held in
350 * exclusive mode, so the fifo will never get used.
353 void vmw_kms_idle_workqueues(struct vmw_master
*vmaster
)
355 struct vmw_framebuffer_surface
*entry
;
357 mutex_lock(&vmaster
->fb_surf_mutex
);
358 list_for_each_entry(entry
, &vmaster
->fb_surf
, head
) {
359 if (cancel_delayed_work_sync(&entry
->d_work
))
360 (void) entry
->d_work
.work
.func(&entry
->d_work
.work
);
362 (void) cancel_delayed_work_sync(&entry
->d_work
);
364 mutex_unlock(&vmaster
->fb_surf_mutex
);
367 void vmw_framebuffer_surface_destroy(struct drm_framebuffer
*framebuffer
)
369 struct vmw_framebuffer_surface
*vfbs
=
370 vmw_framebuffer_to_vfbs(framebuffer
);
371 struct vmw_master
*vmaster
= vmw_master(vfbs
->master
);
374 mutex_lock(&vmaster
->fb_surf_mutex
);
375 list_del(&vfbs
->head
);
376 mutex_unlock(&vmaster
->fb_surf_mutex
);
378 cancel_delayed_work_sync(&vfbs
->d_work
);
379 drm_master_put(&vfbs
->master
);
380 drm_framebuffer_cleanup(framebuffer
);
381 vmw_surface_unreference(&vfbs
->surface
);
386 static void vmw_framebuffer_present_fs_callback(struct work_struct
*work
)
388 struct delayed_work
*d_work
=
389 container_of(work
, struct delayed_work
, work
);
390 struct vmw_framebuffer_surface
*vfbs
=
391 container_of(d_work
, struct vmw_framebuffer_surface
, d_work
);
392 struct vmw_surface
*surf
= vfbs
->surface
;
393 struct drm_framebuffer
*framebuffer
= &vfbs
->base
.base
;
394 struct vmw_private
*dev_priv
= vmw_priv(framebuffer
->dev
);
397 SVGA3dCmdHeader header
;
398 SVGA3dCmdPresent body
;
403 * Strictly we should take the ttm_lock in read mode before accessing
404 * the fifo, to make sure the fifo is present and up. However,
405 * instead we flush all workqueues under the ttm lock in exclusive mode
406 * before taking down the fifo.
408 mutex_lock(&vfbs
->work_lock
);
409 if (!vfbs
->present_fs
)
412 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
413 if (unlikely(cmd
== NULL
))
416 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_PRESENT
);
417 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
) + sizeof(cmd
->cr
));
418 cmd
->body
.sid
= cpu_to_le32(surf
->res
.id
);
419 cmd
->cr
.x
= cpu_to_le32(0);
420 cmd
->cr
.y
= cpu_to_le32(0);
421 cmd
->cr
.srcx
= cmd
->cr
.x
;
422 cmd
->cr
.srcy
= cmd
->cr
.y
;
423 cmd
->cr
.w
= cpu_to_le32(framebuffer
->width
);
424 cmd
->cr
.h
= cpu_to_le32(framebuffer
->height
);
425 vfbs
->present_fs
= false;
426 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
429 * Will not re-add if already pending.
431 schedule_delayed_work(&vfbs
->d_work
, VMWGFX_PRESENT_RATE
);
433 mutex_unlock(&vfbs
->work_lock
);
437 int vmw_framebuffer_surface_dirty(struct drm_framebuffer
*framebuffer
,
438 struct drm_file
*file_priv
,
439 unsigned flags
, unsigned color
,
440 struct drm_clip_rect
*clips
,
443 struct vmw_private
*dev_priv
= vmw_priv(framebuffer
->dev
);
444 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
445 struct vmw_framebuffer_surface
*vfbs
=
446 vmw_framebuffer_to_vfbs(framebuffer
);
447 struct vmw_surface
*surf
= vfbs
->surface
;
448 struct drm_clip_rect norect
;
454 SVGA3dCmdHeader header
;
455 SVGA3dCmdPresent body
;
459 if (unlikely(vfbs
->master
!= file_priv
->master
))
462 ret
= ttm_read_lock(&vmaster
->lock
, true);
463 if (unlikely(ret
!= 0))
467 !(dev_priv
->fifo
.capabilities
&
468 SVGA_FIFO_CAP_SCREEN_OBJECT
)) {
471 mutex_lock(&vfbs
->work_lock
);
472 vfbs
->present_fs
= true;
473 ret
= schedule_delayed_work(&vfbs
->d_work
, VMWGFX_PRESENT_RATE
);
474 mutex_unlock(&vfbs
->work_lock
);
477 * No work pending, Force immediate present.
479 vmw_framebuffer_present_fs_callback(&vfbs
->d_work
.work
);
481 ttm_read_unlock(&vmaster
->lock
);
488 norect
.x1
= norect
.y1
= 0;
489 norect
.x2
= framebuffer
->width
;
490 norect
.y2
= framebuffer
->height
;
491 } else if (flags
& DRM_MODE_FB_DIRTY_ANNOTATE_COPY
) {
493 inc
= 2; /* skip source rects */
496 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
) + (num_clips
- 1) * sizeof(cmd
->cr
));
497 if (unlikely(cmd
== NULL
)) {
498 DRM_ERROR("Fifo reserve failed.\n");
499 ttm_read_unlock(&vmaster
->lock
);
503 memset(cmd
, 0, sizeof(*cmd
));
505 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_PRESENT
);
506 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
) + num_clips
* sizeof(cmd
->cr
));
507 cmd
->body
.sid
= cpu_to_le32(surf
->res
.id
);
509 for (i
= 0, cr
= &cmd
->cr
; i
< num_clips
; i
++, cr
++, clips
+= inc
) {
510 cr
->x
= cpu_to_le16(clips
->x1
);
511 cr
->y
= cpu_to_le16(clips
->y1
);
514 cr
->w
= cpu_to_le16(clips
->x2
- clips
->x1
);
515 cr
->h
= cpu_to_le16(clips
->y2
- clips
->y1
);
518 vmw_fifo_commit(dev_priv
, sizeof(*cmd
) + (num_clips
- 1) * sizeof(cmd
->cr
));
519 ttm_read_unlock(&vmaster
->lock
);
523 static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs
= {
524 .destroy
= vmw_framebuffer_surface_destroy
,
525 .dirty
= vmw_framebuffer_surface_dirty
,
526 .create_handle
= vmw_framebuffer_create_handle
,
529 static int vmw_kms_new_framebuffer_surface(struct vmw_private
*dev_priv
,
530 struct drm_file
*file_priv
,
531 struct vmw_surface
*surface
,
532 struct vmw_framebuffer
**out
,
533 const struct drm_mode_fb_cmd
537 struct drm_device
*dev
= dev_priv
->dev
;
538 struct vmw_framebuffer_surface
*vfbs
;
539 enum SVGA3dSurfaceFormat format
;
540 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
547 if (unlikely(surface
->mip_levels
[0] != 1 ||
548 surface
->num_sizes
!= 1 ||
549 surface
->sizes
[0].width
< mode_cmd
->width
||
550 surface
->sizes
[0].height
< mode_cmd
->height
||
551 surface
->sizes
[0].depth
!= 1)) {
552 DRM_ERROR("Incompatible surface dimensions "
553 "for requested mode.\n");
557 switch (mode_cmd
->depth
) {
559 format
= SVGA3D_A8R8G8B8
;
562 format
= SVGA3D_X8R8G8B8
;
565 format
= SVGA3D_R5G6B5
;
568 format
= SVGA3D_A1R5G5B5
;
571 format
= SVGA3D_LUMINANCE8
;
574 DRM_ERROR("Invalid color depth: %d\n", mode_cmd
->depth
);
578 if (unlikely(format
!= surface
->format
)) {
579 DRM_ERROR("Invalid surface format for requested mode.\n");
583 vfbs
= kzalloc(sizeof(*vfbs
), GFP_KERNEL
);
589 ret
= drm_framebuffer_init(dev
, &vfbs
->base
.base
,
590 &vmw_framebuffer_surface_funcs
);
594 if (!vmw_surface_reference(surface
)) {
595 DRM_ERROR("failed to reference surface %p\n", surface
);
599 /* XXX get the first 3 from the surface info */
600 vfbs
->base
.base
.bits_per_pixel
= mode_cmd
->bpp
;
601 vfbs
->base
.base
.pitch
= mode_cmd
->pitch
;
602 vfbs
->base
.base
.depth
= mode_cmd
->depth
;
603 vfbs
->base
.base
.width
= mode_cmd
->width
;
604 vfbs
->base
.base
.height
= mode_cmd
->height
;
605 vfbs
->base
.pin
= &vmw_surface_dmabuf_pin
;
606 vfbs
->base
.unpin
= &vmw_surface_dmabuf_unpin
;
607 vfbs
->surface
= surface
;
608 vfbs
->master
= drm_master_get(file_priv
->master
);
609 mutex_init(&vfbs
->work_lock
);
611 mutex_lock(&vmaster
->fb_surf_mutex
);
612 INIT_DELAYED_WORK(&vfbs
->d_work
, &vmw_framebuffer_present_fs_callback
);
613 list_add_tail(&vfbs
->head
, &vmaster
->fb_surf
);
614 mutex_unlock(&vmaster
->fb_surf_mutex
);
621 drm_framebuffer_cleanup(&vfbs
->base
.base
);
629 * Dmabuf framebuffer code
632 #define vmw_framebuffer_to_vfbd(x) \
633 container_of(x, struct vmw_framebuffer_dmabuf, base.base)
635 struct vmw_framebuffer_dmabuf
{
636 struct vmw_framebuffer base
;
637 struct vmw_dma_buffer
*buffer
;
640 void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer
*framebuffer
)
642 struct vmw_framebuffer_dmabuf
*vfbd
=
643 vmw_framebuffer_to_vfbd(framebuffer
);
645 drm_framebuffer_cleanup(framebuffer
);
646 vmw_dmabuf_unreference(&vfbd
->buffer
);
651 int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer
*framebuffer
,
652 struct drm_file
*file_priv
,
653 unsigned flags
, unsigned color
,
654 struct drm_clip_rect
*clips
,
657 struct vmw_private
*dev_priv
= vmw_priv(framebuffer
->dev
);
658 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
659 struct drm_clip_rect norect
;
663 SVGAFifoCmdUpdate body
;
665 int i
, increment
= 1;
667 ret
= ttm_read_lock(&vmaster
->lock
, true);
668 if (unlikely(ret
!= 0))
674 norect
.x1
= norect
.y1
= 0;
675 norect
.x2
= framebuffer
->width
;
676 norect
.y2
= framebuffer
->height
;
677 } else if (flags
& DRM_MODE_FB_DIRTY_ANNOTATE_COPY
) {
682 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
) * num_clips
);
683 if (unlikely(cmd
== NULL
)) {
684 DRM_ERROR("Fifo reserve failed.\n");
685 ttm_read_unlock(&vmaster
->lock
);
689 for (i
= 0; i
< num_clips
; i
++, clips
+= increment
) {
690 cmd
[i
].header
= cpu_to_le32(SVGA_CMD_UPDATE
);
691 cmd
[i
].body
.x
= cpu_to_le32(clips
->x1
);
692 cmd
[i
].body
.y
= cpu_to_le32(clips
->y1
);
693 cmd
[i
].body
.width
= cpu_to_le32(clips
->x2
- clips
->x1
);
694 cmd
[i
].body
.height
= cpu_to_le32(clips
->y2
- clips
->y1
);
697 vmw_fifo_commit(dev_priv
, sizeof(*cmd
) * num_clips
);
698 ttm_read_unlock(&vmaster
->lock
);
703 static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs
= {
704 .destroy
= vmw_framebuffer_dmabuf_destroy
,
705 .dirty
= vmw_framebuffer_dmabuf_dirty
,
706 .create_handle
= vmw_framebuffer_create_handle
,
710 * We need to reserve the start of vram because the host might
711 * scribble to it at mode changes, so we need to reserve it.
713 static int vmw_surface_dmabuf_pin(struct vmw_framebuffer
*vfb
)
715 struct vmw_private
*dev_priv
= vmw_priv(vfb
->base
.dev
);
716 struct vmw_framebuffer_surface
*vfbs
=
717 vmw_framebuffer_to_vfbs(&vfb
->base
);
718 unsigned long size
= vfbs
->base
.base
.pitch
* vfbs
->base
.base
.height
;
721 vfbs
->buffer
= kzalloc(sizeof(*vfbs
->buffer
), GFP_KERNEL
);
722 if (unlikely(vfbs
->buffer
== NULL
))
725 vmw_overlay_pause_all(dev_priv
);
726 ret
= vmw_dmabuf_init(dev_priv
, vfbs
->buffer
, size
,
727 &vmw_vram_ne_placement
,
728 false, &vmw_dmabuf_bo_free
);
729 vmw_overlay_resume_all(dev_priv
);
730 if (unlikely(ret
!= 0))
737 * See vmw_surface_dmabuf_pin.
739 static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer
*vfb
)
741 struct ttm_buffer_object
*bo
;
742 struct vmw_framebuffer_surface
*vfbs
=
743 vmw_framebuffer_to_vfbs(&vfb
->base
);
745 if (unlikely(vfbs
->buffer
== NULL
))
748 bo
= &vfbs
->buffer
->base
;
756 * Pin the dmabuffer to the start of vram.
758 static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer
*vfb
)
760 struct vmw_private
*dev_priv
= vmw_priv(vfb
->base
.dev
);
761 struct vmw_framebuffer_dmabuf
*vfbd
=
762 vmw_framebuffer_to_vfbd(&vfb
->base
);
766 vmw_overlay_pause_all(dev_priv
);
768 ret
= vmw_dmabuf_to_start_of_vram(dev_priv
, vfbd
->buffer
);
770 vmw_overlay_resume_all(dev_priv
);
777 static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer
*vfb
)
779 struct vmw_private
*dev_priv
= vmw_priv(vfb
->base
.dev
);
780 struct vmw_framebuffer_dmabuf
*vfbd
=
781 vmw_framebuffer_to_vfbd(&vfb
->base
);
784 WARN_ON(!vfbd
->buffer
);
788 return vmw_dmabuf_from_vram(dev_priv
, vfbd
->buffer
);
791 static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private
*dev_priv
,
792 struct vmw_dma_buffer
*dmabuf
,
793 struct vmw_framebuffer
**out
,
794 const struct drm_mode_fb_cmd
798 struct drm_device
*dev
= dev_priv
->dev
;
799 struct vmw_framebuffer_dmabuf
*vfbd
;
800 unsigned int requested_size
;
803 requested_size
= mode_cmd
->height
* mode_cmd
->pitch
;
804 if (unlikely(requested_size
> dmabuf
->base
.num_pages
* PAGE_SIZE
)) {
805 DRM_ERROR("Screen buffer object size is too small "
806 "for requested mode.\n");
810 vfbd
= kzalloc(sizeof(*vfbd
), GFP_KERNEL
);
816 ret
= drm_framebuffer_init(dev
, &vfbd
->base
.base
,
817 &vmw_framebuffer_dmabuf_funcs
);
821 if (!vmw_dmabuf_reference(dmabuf
)) {
822 DRM_ERROR("failed to reference dmabuf %p\n", dmabuf
);
826 vfbd
->base
.base
.bits_per_pixel
= mode_cmd
->bpp
;
827 vfbd
->base
.base
.pitch
= mode_cmd
->pitch
;
828 vfbd
->base
.base
.depth
= mode_cmd
->depth
;
829 vfbd
->base
.base
.width
= mode_cmd
->width
;
830 vfbd
->base
.base
.height
= mode_cmd
->height
;
831 vfbd
->base
.pin
= vmw_framebuffer_dmabuf_pin
;
832 vfbd
->base
.unpin
= vmw_framebuffer_dmabuf_unpin
;
833 vfbd
->buffer
= dmabuf
;
839 drm_framebuffer_cleanup(&vfbd
->base
.base
);
847 * Generic Kernel modesetting functions
850 static struct drm_framebuffer
*vmw_kms_fb_create(struct drm_device
*dev
,
851 struct drm_file
*file_priv
,
852 struct drm_mode_fb_cmd
*mode_cmd
)
854 struct vmw_private
*dev_priv
= vmw_priv(dev
);
855 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
856 struct vmw_framebuffer
*vfb
= NULL
;
857 struct vmw_surface
*surface
= NULL
;
858 struct vmw_dma_buffer
*bo
= NULL
;
863 * This code should be conditioned on Screen Objects not being used.
864 * If screen objects are used, we can allocate a GMR to hold the
865 * requested framebuffer.
868 required_size
= mode_cmd
->pitch
* mode_cmd
->height
;
869 if (unlikely(required_size
> (u64
) dev_priv
->vram_size
)) {
870 DRM_ERROR("VRAM size is too small for requested mode.\n");
875 * End conditioned code.
878 ret
= vmw_user_surface_lookup_handle(dev_priv
, tfile
,
879 mode_cmd
->handle
, &surface
);
883 if (!surface
->scanout
)
884 goto err_not_scanout
;
886 ret
= vmw_kms_new_framebuffer_surface(dev_priv
, file_priv
, surface
,
889 /* vmw_user_surface_lookup takes one ref so does new_fb */
890 vmw_surface_unreference(&surface
);
893 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret
);
899 DRM_INFO("%s: trying buffer\n", __func__
);
901 ret
= vmw_user_dmabuf_lookup(tfile
, mode_cmd
->handle
, &bo
);
903 DRM_ERROR("failed to find buffer: %i\n", ret
);
904 return ERR_PTR(-ENOENT
);
907 ret
= vmw_kms_new_framebuffer_dmabuf(dev_priv
, bo
, &vfb
,
910 /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
911 vmw_dmabuf_unreference(&bo
);
914 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret
);
921 DRM_ERROR("surface not marked as scanout\n");
922 /* vmw_user_surface_lookup takes one ref */
923 vmw_surface_unreference(&surface
);
925 return ERR_PTR(-EINVAL
);
928 static struct drm_mode_config_funcs vmw_kms_funcs
= {
929 .fb_create
= vmw_kms_fb_create
,
932 int vmw_kms_init(struct vmw_private
*dev_priv
)
934 struct drm_device
*dev
= dev_priv
->dev
;
937 drm_mode_config_init(dev
);
938 dev
->mode_config
.funcs
= &vmw_kms_funcs
;
939 dev
->mode_config
.min_width
= 1;
940 dev
->mode_config
.min_height
= 1;
941 /* assumed largest fb size */
942 dev
->mode_config
.max_width
= 8192;
943 dev
->mode_config
.max_height
= 8192;
945 ret
= vmw_kms_init_legacy_display_system(dev_priv
);
950 int vmw_kms_close(struct vmw_private
*dev_priv
)
953 * Docs says we should take the lock before calling this function
954 * but since it destroys encoders and our destructor calls
955 * drm_encoder_cleanup which takes the lock we deadlock.
957 drm_mode_config_cleanup(dev_priv
->dev
);
958 vmw_kms_close_legacy_display_system(dev_priv
);
962 int vmw_kms_cursor_bypass_ioctl(struct drm_device
*dev
, void *data
,
963 struct drm_file
*file_priv
)
965 struct drm_vmw_cursor_bypass_arg
*arg
= data
;
966 struct vmw_display_unit
*du
;
967 struct drm_mode_object
*obj
;
968 struct drm_crtc
*crtc
;
972 mutex_lock(&dev
->mode_config
.mutex
);
973 if (arg
->flags
& DRM_VMW_CURSOR_BYPASS_ALL
) {
975 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
976 du
= vmw_crtc_to_du(crtc
);
977 du
->hotspot_x
= arg
->xhot
;
978 du
->hotspot_y
= arg
->yhot
;
981 mutex_unlock(&dev
->mode_config
.mutex
);
985 obj
= drm_mode_object_find(dev
, arg
->crtc_id
, DRM_MODE_OBJECT_CRTC
);
991 crtc
= obj_to_crtc(obj
);
992 du
= vmw_crtc_to_du(crtc
);
994 du
->hotspot_x
= arg
->xhot
;
995 du
->hotspot_y
= arg
->yhot
;
998 mutex_unlock(&dev
->mode_config
.mutex
);
1003 int vmw_kms_write_svga(struct vmw_private
*vmw_priv
,
1004 unsigned width
, unsigned height
, unsigned pitch
,
1005 unsigned bpp
, unsigned depth
)
1007 if (vmw_priv
->capabilities
& SVGA_CAP_PITCHLOCK
)
1008 vmw_write(vmw_priv
, SVGA_REG_PITCHLOCK
, pitch
);
1009 else if (vmw_fifo_have_pitchlock(vmw_priv
))
1010 iowrite32(pitch
, vmw_priv
->mmio_virt
+ SVGA_FIFO_PITCHLOCK
);
1011 vmw_write(vmw_priv
, SVGA_REG_WIDTH
, width
);
1012 vmw_write(vmw_priv
, SVGA_REG_HEIGHT
, height
);
1013 vmw_write(vmw_priv
, SVGA_REG_BITS_PER_PIXEL
, bpp
);
1015 if (vmw_read(vmw_priv
, SVGA_REG_DEPTH
) != depth
) {
1016 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
1017 depth
, bpp
, vmw_read(vmw_priv
, SVGA_REG_DEPTH
));
1024 int vmw_kms_save_vga(struct vmw_private
*vmw_priv
)
1026 struct vmw_vga_topology_state
*save
;
1029 vmw_priv
->vga_width
= vmw_read(vmw_priv
, SVGA_REG_WIDTH
);
1030 vmw_priv
->vga_height
= vmw_read(vmw_priv
, SVGA_REG_HEIGHT
);
1031 vmw_priv
->vga_bpp
= vmw_read(vmw_priv
, SVGA_REG_BITS_PER_PIXEL
);
1032 if (vmw_priv
->capabilities
& SVGA_CAP_PITCHLOCK
)
1033 vmw_priv
->vga_pitchlock
=
1034 vmw_read(vmw_priv
, SVGA_REG_PITCHLOCK
);
1035 else if (vmw_fifo_have_pitchlock(vmw_priv
))
1036 vmw_priv
->vga_pitchlock
= ioread32(vmw_priv
->mmio_virt
+
1037 SVGA_FIFO_PITCHLOCK
);
1039 if (!(vmw_priv
->capabilities
& SVGA_CAP_DISPLAY_TOPOLOGY
))
1042 vmw_priv
->num_displays
= vmw_read(vmw_priv
,
1043 SVGA_REG_NUM_GUEST_DISPLAYS
);
1045 if (vmw_priv
->num_displays
== 0)
1046 vmw_priv
->num_displays
= 1;
1048 for (i
= 0; i
< vmw_priv
->num_displays
; ++i
) {
1049 save
= &vmw_priv
->vga_save
[i
];
1050 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, i
);
1051 save
->primary
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_IS_PRIMARY
);
1052 save
->pos_x
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_POSITION_X
);
1053 save
->pos_y
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_POSITION_Y
);
1054 save
->width
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_WIDTH
);
1055 save
->height
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_HEIGHT
);
1056 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, SVGA_ID_INVALID
);
1057 if (i
== 0 && vmw_priv
->num_displays
== 1 &&
1058 save
->width
== 0 && save
->height
== 0) {
1061 * It should be fairly safe to assume that these
1062 * values are uninitialized.
1065 save
->width
= vmw_priv
->vga_width
- save
->pos_x
;
1066 save
->height
= vmw_priv
->vga_height
- save
->pos_y
;
1073 int vmw_kms_restore_vga(struct vmw_private
*vmw_priv
)
1075 struct vmw_vga_topology_state
*save
;
1078 vmw_write(vmw_priv
, SVGA_REG_WIDTH
, vmw_priv
->vga_width
);
1079 vmw_write(vmw_priv
, SVGA_REG_HEIGHT
, vmw_priv
->vga_height
);
1080 vmw_write(vmw_priv
, SVGA_REG_BITS_PER_PIXEL
, vmw_priv
->vga_bpp
);
1081 if (vmw_priv
->capabilities
& SVGA_CAP_PITCHLOCK
)
1082 vmw_write(vmw_priv
, SVGA_REG_PITCHLOCK
,
1083 vmw_priv
->vga_pitchlock
);
1084 else if (vmw_fifo_have_pitchlock(vmw_priv
))
1085 iowrite32(vmw_priv
->vga_pitchlock
,
1086 vmw_priv
->mmio_virt
+ SVGA_FIFO_PITCHLOCK
);
1088 if (!(vmw_priv
->capabilities
& SVGA_CAP_DISPLAY_TOPOLOGY
))
1091 for (i
= 0; i
< vmw_priv
->num_displays
; ++i
) {
1092 save
= &vmw_priv
->vga_save
[i
];
1093 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, i
);
1094 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_IS_PRIMARY
, save
->primary
);
1095 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_POSITION_X
, save
->pos_x
);
1096 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_POSITION_Y
, save
->pos_y
);
1097 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_WIDTH
, save
->width
);
1098 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_HEIGHT
, save
->height
);
1099 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, SVGA_ID_INVALID
);
1105 bool vmw_kms_validate_mode_vram(struct vmw_private
*dev_priv
,
1109 return ((u64
) pitch
* (u64
) height
) < (u64
) dev_priv
->vram_size
;
1112 u32
vmw_get_vblank_counter(struct drm_device
*dev
, int crtc
)