1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_kms.h"
30 /* Might need a hrtimer here? */
31 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
33 static int vmw_surface_dmabuf_pin(struct vmw_framebuffer
*vfb
);
34 static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer
*vfb
);
36 void vmw_display_unit_cleanup(struct vmw_display_unit
*du
)
38 if (du
->cursor_surface
)
39 vmw_surface_unreference(&du
->cursor_surface
);
40 if (du
->cursor_dmabuf
)
41 vmw_dmabuf_unreference(&du
->cursor_dmabuf
);
42 drm_crtc_cleanup(&du
->crtc
);
43 drm_encoder_cleanup(&du
->encoder
);
44 drm_connector_cleanup(&du
->connector
);
48 * Display Unit Cursor functions
51 int vmw_cursor_update_image(struct vmw_private
*dev_priv
,
52 u32
*image
, u32 width
, u32 height
,
53 u32 hotspotX
, u32 hotspotY
)
57 SVGAFifoCmdDefineAlphaCursor cursor
;
59 u32 image_size
= width
* height
* 4;
60 u32 cmd_size
= sizeof(*cmd
) + image_size
;
65 cmd
= vmw_fifo_reserve(dev_priv
, cmd_size
);
66 if (unlikely(cmd
== NULL
)) {
67 DRM_ERROR("Fifo reserve failed.\n");
71 memset(cmd
, 0, sizeof(*cmd
));
73 memcpy(&cmd
[1], image
, image_size
);
75 cmd
->cmd
= cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR
);
76 cmd
->cursor
.id
= cpu_to_le32(0);
77 cmd
->cursor
.width
= cpu_to_le32(width
);
78 cmd
->cursor
.height
= cpu_to_le32(height
);
79 cmd
->cursor
.hotspotX
= cpu_to_le32(hotspotX
);
80 cmd
->cursor
.hotspotY
= cpu_to_le32(hotspotY
);
82 vmw_fifo_commit(dev_priv
, cmd_size
);
87 void vmw_cursor_update_position(struct vmw_private
*dev_priv
,
88 bool show
, int x
, int y
)
90 __le32 __iomem
*fifo_mem
= dev_priv
->mmio_virt
;
93 iowrite32(show
? 1 : 0, fifo_mem
+ SVGA_FIFO_CURSOR_ON
);
94 iowrite32(x
, fifo_mem
+ SVGA_FIFO_CURSOR_X
);
95 iowrite32(y
, fifo_mem
+ SVGA_FIFO_CURSOR_Y
);
96 count
= ioread32(fifo_mem
+ SVGA_FIFO_CURSOR_COUNT
);
97 iowrite32(++count
, fifo_mem
+ SVGA_FIFO_CURSOR_COUNT
);
100 int vmw_du_crtc_cursor_set(struct drm_crtc
*crtc
, struct drm_file
*file_priv
,
101 uint32_t handle
, uint32_t width
, uint32_t height
)
103 struct vmw_private
*dev_priv
= vmw_priv(crtc
->dev
);
104 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
105 struct vmw_display_unit
*du
= vmw_crtc_to_du(crtc
);
106 struct vmw_surface
*surface
= NULL
;
107 struct vmw_dma_buffer
*dmabuf
= NULL
;
111 ret
= vmw_user_surface_lookup_handle(dev_priv
, tfile
,
114 if (!surface
->snooper
.image
) {
115 DRM_ERROR("surface not suitable for cursor\n");
119 ret
= vmw_user_dmabuf_lookup(tfile
,
122 DRM_ERROR("failed to find surface or dmabuf: %i\n", ret
);
128 /* takedown old cursor */
129 if (du
->cursor_surface
) {
130 du
->cursor_surface
->snooper
.crtc
= NULL
;
131 vmw_surface_unreference(&du
->cursor_surface
);
133 if (du
->cursor_dmabuf
)
134 vmw_dmabuf_unreference(&du
->cursor_dmabuf
);
136 /* setup new image */
138 /* vmw_user_surface_lookup takes one reference */
139 du
->cursor_surface
= surface
;
141 du
->cursor_surface
->snooper
.crtc
= crtc
;
142 du
->cursor_age
= du
->cursor_surface
->snooper
.age
;
143 vmw_cursor_update_image(dev_priv
, surface
->snooper
.image
,
144 64, 64, du
->hotspot_x
, du
->hotspot_y
);
146 struct ttm_bo_kmap_obj map
;
147 unsigned long kmap_offset
;
148 unsigned long kmap_num
;
152 /* vmw_user_surface_lookup takes one reference */
153 du
->cursor_dmabuf
= dmabuf
;
156 kmap_num
= (64*64*4) >> PAGE_SHIFT
;
158 ret
= ttm_bo_reserve(&dmabuf
->base
, true, false, false, 0);
159 if (unlikely(ret
!= 0)) {
160 DRM_ERROR("reserve failed\n");
164 ret
= ttm_bo_kmap(&dmabuf
->base
, kmap_offset
, kmap_num
, &map
);
165 if (unlikely(ret
!= 0))
168 virtual = ttm_kmap_obj_virtual(&map
, &dummy
);
169 vmw_cursor_update_image(dev_priv
, virtual, 64, 64,
170 du
->hotspot_x
, du
->hotspot_y
);
174 ttm_bo_unreserve(&dmabuf
->base
);
177 vmw_cursor_update_position(dev_priv
, false, 0, 0);
181 vmw_cursor_update_position(dev_priv
, true, du
->cursor_x
, du
->cursor_y
);
186 int vmw_du_crtc_cursor_move(struct drm_crtc
*crtc
, int x
, int y
)
188 struct vmw_private
*dev_priv
= vmw_priv(crtc
->dev
);
189 struct vmw_display_unit
*du
= vmw_crtc_to_du(crtc
);
190 bool shown
= du
->cursor_surface
|| du
->cursor_dmabuf
? true : false;
192 du
->cursor_x
= x
+ crtc
->x
;
193 du
->cursor_y
= y
+ crtc
->y
;
195 vmw_cursor_update_position(dev_priv
, shown
,
196 du
->cursor_x
, du
->cursor_y
);
201 void vmw_kms_cursor_snoop(struct vmw_surface
*srf
,
202 struct ttm_object_file
*tfile
,
203 struct ttm_buffer_object
*bo
,
204 SVGA3dCmdHeader
*header
)
206 struct ttm_bo_kmap_obj map
;
207 unsigned long kmap_offset
;
208 unsigned long kmap_num
;
214 SVGA3dCmdHeader header
;
215 SVGA3dCmdSurfaceDMA dma
;
219 cmd
= container_of(header
, struct vmw_dma_cmd
, header
);
221 /* No snooper installed */
222 if (!srf
->snooper
.image
)
225 if (cmd
->dma
.host
.face
!= 0 || cmd
->dma
.host
.mipmap
!= 0) {
226 DRM_ERROR("face and mipmap for cursors should never != 0\n");
230 if (cmd
->header
.size
< 64) {
231 DRM_ERROR("at least one full copy box must be given\n");
235 box
= (SVGA3dCopyBox
*)&cmd
[1];
236 box_count
= (cmd
->header
.size
- sizeof(SVGA3dCmdSurfaceDMA
)) /
237 sizeof(SVGA3dCopyBox
);
239 if (cmd
->dma
.guest
.pitch
!= (64 * 4) ||
240 cmd
->dma
.guest
.ptr
.offset
% PAGE_SIZE
||
241 box
->x
!= 0 || box
->y
!= 0 || box
->z
!= 0 ||
242 box
->srcx
!= 0 || box
->srcy
!= 0 || box
->srcz
!= 0 ||
243 box
->w
!= 64 || box
->h
!= 64 || box
->d
!= 1 ||
245 /* TODO handle none page aligned offsets */
246 /* TODO handle partial uploads and pitch != 256 */
247 /* TODO handle more then one copy (size != 64) */
248 DRM_ERROR("lazy programer, cant handle wierd stuff\n");
252 kmap_offset
= cmd
->dma
.guest
.ptr
.offset
>> PAGE_SHIFT
;
253 kmap_num
= (64*64*4) >> PAGE_SHIFT
;
255 ret
= ttm_bo_reserve(bo
, true, false, false, 0);
256 if (unlikely(ret
!= 0)) {
257 DRM_ERROR("reserve failed\n");
261 ret
= ttm_bo_kmap(bo
, kmap_offset
, kmap_num
, &map
);
262 if (unlikely(ret
!= 0))
265 virtual = ttm_kmap_obj_virtual(&map
, &dummy
);
267 memcpy(srf
->snooper
.image
, virtual, 64*64*4);
270 /* we can't call this function from this function since execbuf has
271 * reserved fifo space.
273 * if (srf->snooper.crtc)
274 * vmw_ldu_crtc_cursor_update_image(dev_priv,
275 * srf->snooper.image, 64, 64,
276 * du->hotspot_x, du->hotspot_y);
281 ttm_bo_unreserve(bo
);
284 void vmw_kms_cursor_post_execbuf(struct vmw_private
*dev_priv
)
286 struct drm_device
*dev
= dev_priv
->dev
;
287 struct vmw_display_unit
*du
;
288 struct drm_crtc
*crtc
;
290 mutex_lock(&dev
->mode_config
.mutex
);
292 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
293 du
= vmw_crtc_to_du(crtc
);
294 if (!du
->cursor_surface
||
295 du
->cursor_age
== du
->cursor_surface
->snooper
.age
)
298 du
->cursor_age
= du
->cursor_surface
->snooper
.age
;
299 vmw_cursor_update_image(dev_priv
,
300 du
->cursor_surface
->snooper
.image
,
301 64, 64, du
->hotspot_x
, du
->hotspot_y
);
304 mutex_unlock(&dev
->mode_config
.mutex
);
308 * Generic framebuffer code
311 int vmw_framebuffer_create_handle(struct drm_framebuffer
*fb
,
312 struct drm_file
*file_priv
,
313 unsigned int *handle
)
322 * Surface framebuffer code
325 #define vmw_framebuffer_to_vfbs(x) \
326 container_of(x, struct vmw_framebuffer_surface, base.base)
328 struct vmw_framebuffer_surface
{
329 struct vmw_framebuffer base
;
330 struct vmw_surface
*surface
;
331 struct vmw_dma_buffer
*buffer
;
332 struct delayed_work d_work
;
333 struct mutex work_lock
;
337 void vmw_framebuffer_surface_destroy(struct drm_framebuffer
*framebuffer
)
339 struct vmw_framebuffer_surface
*vfb
=
340 vmw_framebuffer_to_vfbs(framebuffer
);
342 cancel_delayed_work_sync(&vfb
->d_work
);
343 drm_framebuffer_cleanup(framebuffer
);
344 vmw_surface_unreference(&vfb
->surface
);
349 static void vmw_framebuffer_present_fs_callback(struct work_struct
*work
)
351 struct delayed_work
*d_work
=
352 container_of(work
, struct delayed_work
, work
);
353 struct vmw_framebuffer_surface
*vfbs
=
354 container_of(d_work
, struct vmw_framebuffer_surface
, d_work
);
355 struct vmw_surface
*surf
= vfbs
->surface
;
356 struct drm_framebuffer
*framebuffer
= &vfbs
->base
.base
;
357 struct vmw_private
*dev_priv
= vmw_priv(framebuffer
->dev
);
360 SVGA3dCmdHeader header
;
361 SVGA3dCmdPresent body
;
365 mutex_lock(&vfbs
->work_lock
);
366 if (!vfbs
->present_fs
)
369 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
370 if (unlikely(cmd
== NULL
))
373 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_PRESENT
);
374 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
) + sizeof(cmd
->cr
));
375 cmd
->body
.sid
= cpu_to_le32(surf
->res
.id
);
376 cmd
->cr
.x
= cpu_to_le32(0);
377 cmd
->cr
.y
= cpu_to_le32(0);
378 cmd
->cr
.srcx
= cmd
->cr
.x
;
379 cmd
->cr
.srcy
= cmd
->cr
.y
;
380 cmd
->cr
.w
= cpu_to_le32(framebuffer
->width
);
381 cmd
->cr
.h
= cpu_to_le32(framebuffer
->height
);
382 vfbs
->present_fs
= false;
383 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
386 * Will not re-add if already pending.
388 schedule_delayed_work(&vfbs
->d_work
, VMWGFX_PRESENT_RATE
);
390 mutex_unlock(&vfbs
->work_lock
);
394 int vmw_framebuffer_surface_dirty(struct drm_framebuffer
*framebuffer
,
395 unsigned flags
, unsigned color
,
396 struct drm_clip_rect
*clips
,
399 struct vmw_private
*dev_priv
= vmw_priv(framebuffer
->dev
);
400 struct vmw_framebuffer_surface
*vfbs
=
401 vmw_framebuffer_to_vfbs(framebuffer
);
402 struct vmw_surface
*surf
= vfbs
->surface
;
403 struct drm_clip_rect norect
;
408 SVGA3dCmdHeader header
;
409 SVGA3dCmdPresent body
;
414 !(dev_priv
->fifo
.capabilities
&
415 SVGA_FIFO_CAP_SCREEN_OBJECT
)) {
418 mutex_lock(&vfbs
->work_lock
);
419 vfbs
->present_fs
= true;
420 ret
= schedule_delayed_work(&vfbs
->d_work
, VMWGFX_PRESENT_RATE
);
421 mutex_unlock(&vfbs
->work_lock
);
424 * No work pending, Force immediate present.
426 vmw_framebuffer_present_fs_callback(&vfbs
->d_work
.work
);
434 norect
.x1
= norect
.y1
= 0;
435 norect
.x2
= framebuffer
->width
;
436 norect
.y2
= framebuffer
->height
;
437 } else if (flags
& DRM_MODE_FB_DIRTY_ANNOTATE_COPY
) {
439 inc
= 2; /* skip source rects */
442 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
) + (num_clips
- 1) * sizeof(cmd
->cr
));
443 if (unlikely(cmd
== NULL
)) {
444 DRM_ERROR("Fifo reserve failed.\n");
448 memset(cmd
, 0, sizeof(*cmd
));
450 cmd
->header
.id
= cpu_to_le32(SVGA_3D_CMD_PRESENT
);
451 cmd
->header
.size
= cpu_to_le32(sizeof(cmd
->body
) + num_clips
* sizeof(cmd
->cr
));
452 cmd
->body
.sid
= cpu_to_le32(surf
->res
.id
);
454 for (i
= 0, cr
= &cmd
->cr
; i
< num_clips
; i
++, cr
++, clips
+= inc
) {
455 cr
->x
= cpu_to_le16(clips
->x1
);
456 cr
->y
= cpu_to_le16(clips
->y1
);
459 cr
->w
= cpu_to_le16(clips
->x2
- clips
->x1
);
460 cr
->h
= cpu_to_le16(clips
->y2
- clips
->y1
);
463 vmw_fifo_commit(dev_priv
, sizeof(*cmd
) + (num_clips
- 1) * sizeof(cmd
->cr
));
468 static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs
= {
469 .destroy
= vmw_framebuffer_surface_destroy
,
470 .dirty
= vmw_framebuffer_surface_dirty
,
471 .create_handle
= vmw_framebuffer_create_handle
,
474 int vmw_kms_new_framebuffer_surface(struct vmw_private
*dev_priv
,
475 struct vmw_surface
*surface
,
476 struct vmw_framebuffer
**out
,
477 unsigned width
, unsigned height
)
480 struct drm_device
*dev
= dev_priv
->dev
;
481 struct vmw_framebuffer_surface
*vfbs
;
484 vfbs
= kzalloc(sizeof(*vfbs
), GFP_KERNEL
);
490 ret
= drm_framebuffer_init(dev
, &vfbs
->base
.base
,
491 &vmw_framebuffer_surface_funcs
);
495 if (!vmw_surface_reference(surface
)) {
496 DRM_ERROR("failed to reference surface %p\n", surface
);
500 vfbs
->base
.base
.bits_per_pixel
= 32;
501 vfbs
->base
.base
.pitch
= width
* 32 / 4;
502 vfbs
->base
.base
.depth
= 24;
503 vfbs
->base
.base
.width
= width
;
504 vfbs
->base
.base
.height
= height
;
505 vfbs
->base
.pin
= &vmw_surface_dmabuf_pin
;
506 vfbs
->base
.unpin
= &vmw_surface_dmabuf_unpin
;
507 vfbs
->surface
= surface
;
508 mutex_init(&vfbs
->work_lock
);
509 INIT_DELAYED_WORK(&vfbs
->d_work
, &vmw_framebuffer_present_fs_callback
);
515 drm_framebuffer_cleanup(&vfbs
->base
.base
);
523 * Dmabuf framebuffer code
526 #define vmw_framebuffer_to_vfbd(x) \
527 container_of(x, struct vmw_framebuffer_dmabuf, base.base)
529 struct vmw_framebuffer_dmabuf
{
530 struct vmw_framebuffer base
;
531 struct vmw_dma_buffer
*buffer
;
534 void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer
*framebuffer
)
536 struct vmw_framebuffer_dmabuf
*vfbd
=
537 vmw_framebuffer_to_vfbd(framebuffer
);
539 drm_framebuffer_cleanup(framebuffer
);
540 vmw_dmabuf_unreference(&vfbd
->buffer
);
545 int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer
*framebuffer
,
546 unsigned flags
, unsigned color
,
547 struct drm_clip_rect
*clips
,
550 struct vmw_private
*dev_priv
= vmw_priv(framebuffer
->dev
);
551 struct drm_clip_rect norect
;
554 SVGAFifoCmdUpdate body
;
556 int i
, increment
= 1;
561 norect
.x1
= norect
.y1
= 0;
562 norect
.x2
= framebuffer
->width
;
563 norect
.y2
= framebuffer
->height
;
564 } else if (flags
& DRM_MODE_FB_DIRTY_ANNOTATE_COPY
) {
569 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
) * num_clips
);
570 if (unlikely(cmd
== NULL
)) {
571 DRM_ERROR("Fifo reserve failed.\n");
575 for (i
= 0; i
< num_clips
; i
++, clips
+= increment
) {
576 cmd
[i
].header
= cpu_to_le32(SVGA_CMD_UPDATE
);
577 cmd
[i
].body
.x
= cpu_to_le32(clips
->x1
);
578 cmd
[i
].body
.y
= cpu_to_le32(clips
->y1
);
579 cmd
[i
].body
.width
= cpu_to_le32(clips
->x2
- clips
->x1
);
580 cmd
[i
].body
.height
= cpu_to_le32(clips
->y2
- clips
->y1
);
583 vmw_fifo_commit(dev_priv
, sizeof(*cmd
) * num_clips
);
588 static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs
= {
589 .destroy
= vmw_framebuffer_dmabuf_destroy
,
590 .dirty
= vmw_framebuffer_dmabuf_dirty
,
591 .create_handle
= vmw_framebuffer_create_handle
,
594 static int vmw_surface_dmabuf_pin(struct vmw_framebuffer
*vfb
)
596 struct vmw_private
*dev_priv
= vmw_priv(vfb
->base
.dev
);
597 struct vmw_framebuffer_surface
*vfbs
=
598 vmw_framebuffer_to_vfbs(&vfb
->base
);
599 unsigned long size
= vfbs
->base
.base
.pitch
* vfbs
->base
.base
.height
;
602 vfbs
->buffer
= kzalloc(sizeof(*vfbs
->buffer
), GFP_KERNEL
);
603 if (unlikely(vfbs
->buffer
== NULL
))
606 vmw_overlay_pause_all(dev_priv
);
607 ret
= vmw_dmabuf_init(dev_priv
, vfbs
->buffer
, size
,
608 &vmw_vram_ne_placement
,
609 false, &vmw_dmabuf_bo_free
);
610 vmw_overlay_resume_all(dev_priv
);
615 static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer
*vfb
)
617 struct ttm_buffer_object
*bo
;
618 struct vmw_framebuffer_surface
*vfbs
=
619 vmw_framebuffer_to_vfbs(&vfb
->base
);
621 bo
= &vfbs
->buffer
->base
;
628 static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer
*vfb
)
630 struct vmw_private
*dev_priv
= vmw_priv(vfb
->base
.dev
);
631 struct vmw_framebuffer_dmabuf
*vfbd
=
632 vmw_framebuffer_to_vfbd(&vfb
->base
);
636 vmw_overlay_pause_all(dev_priv
);
638 ret
= vmw_dmabuf_to_start_of_vram(dev_priv
, vfbd
->buffer
);
640 vmw_overlay_resume_all(dev_priv
);
647 static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer
*vfb
)
649 struct vmw_private
*dev_priv
= vmw_priv(vfb
->base
.dev
);
650 struct vmw_framebuffer_dmabuf
*vfbd
=
651 vmw_framebuffer_to_vfbd(&vfb
->base
);
654 WARN_ON(!vfbd
->buffer
);
658 return vmw_dmabuf_from_vram(dev_priv
, vfbd
->buffer
);
661 int vmw_kms_new_framebuffer_dmabuf(struct vmw_private
*dev_priv
,
662 struct vmw_dma_buffer
*dmabuf
,
663 struct vmw_framebuffer
**out
,
664 unsigned width
, unsigned height
)
667 struct drm_device
*dev
= dev_priv
->dev
;
668 struct vmw_framebuffer_dmabuf
*vfbd
;
671 vfbd
= kzalloc(sizeof(*vfbd
), GFP_KERNEL
);
677 ret
= drm_framebuffer_init(dev
, &vfbd
->base
.base
,
678 &vmw_framebuffer_dmabuf_funcs
);
682 if (!vmw_dmabuf_reference(dmabuf
)) {
683 DRM_ERROR("failed to reference dmabuf %p\n", dmabuf
);
687 vfbd
->base
.base
.bits_per_pixel
= 32;
688 vfbd
->base
.base
.pitch
= width
* vfbd
->base
.base
.bits_per_pixel
/ 8;
689 vfbd
->base
.base
.depth
= 24;
690 vfbd
->base
.base
.width
= width
;
691 vfbd
->base
.base
.height
= height
;
692 vfbd
->base
.pin
= vmw_framebuffer_dmabuf_pin
;
693 vfbd
->base
.unpin
= vmw_framebuffer_dmabuf_unpin
;
694 vfbd
->buffer
= dmabuf
;
700 drm_framebuffer_cleanup(&vfbd
->base
.base
);
708 * Generic Kernel modesetting functions
711 static struct drm_framebuffer
*vmw_kms_fb_create(struct drm_device
*dev
,
712 struct drm_file
*file_priv
,
713 struct drm_mode_fb_cmd
*mode_cmd
)
715 struct vmw_private
*dev_priv
= vmw_priv(dev
);
716 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
717 struct vmw_framebuffer
*vfb
= NULL
;
718 struct vmw_surface
*surface
= NULL
;
719 struct vmw_dma_buffer
*bo
= NULL
;
722 ret
= vmw_user_surface_lookup_handle(dev_priv
, tfile
,
723 mode_cmd
->handle
, &surface
);
727 if (!surface
->scanout
)
728 goto err_not_scanout
;
730 ret
= vmw_kms_new_framebuffer_surface(dev_priv
, surface
, &vfb
,
731 mode_cmd
->width
, mode_cmd
->height
);
733 /* vmw_user_surface_lookup takes one ref so does new_fb */
734 vmw_surface_unreference(&surface
);
737 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret
);
743 DRM_INFO("%s: trying buffer\n", __func__
);
745 ret
= vmw_user_dmabuf_lookup(tfile
, mode_cmd
->handle
, &bo
);
747 DRM_ERROR("failed to find buffer: %i\n", ret
);
748 return ERR_PTR(-ENOENT
);
751 ret
= vmw_kms_new_framebuffer_dmabuf(dev_priv
, bo
, &vfb
,
752 mode_cmd
->width
, mode_cmd
->height
);
754 /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
755 vmw_dmabuf_unreference(&bo
);
758 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret
);
765 DRM_ERROR("surface not marked as scanout\n");
766 /* vmw_user_surface_lookup takes one ref */
767 vmw_surface_unreference(&surface
);
769 return ERR_PTR(-EINVAL
);
772 static struct drm_mode_config_funcs vmw_kms_funcs
= {
773 .fb_create
= vmw_kms_fb_create
,
776 int vmw_kms_init(struct vmw_private
*dev_priv
)
778 struct drm_device
*dev
= dev_priv
->dev
;
781 drm_mode_config_init(dev
);
782 dev
->mode_config
.funcs
= &vmw_kms_funcs
;
783 dev
->mode_config
.min_width
= 1;
784 dev
->mode_config
.min_height
= 1;
785 /* assumed largest fb size */
786 dev
->mode_config
.max_width
= 8192;
787 dev
->mode_config
.max_height
= 8192;
789 ret
= vmw_kms_init_legacy_display_system(dev_priv
);
794 int vmw_kms_close(struct vmw_private
*dev_priv
)
797 * Docs says we should take the lock before calling this function
798 * but since it destroys encoders and our destructor calls
799 * drm_encoder_cleanup which takes the lock we deadlock.
801 drm_mode_config_cleanup(dev_priv
->dev
);
802 vmw_kms_close_legacy_display_system(dev_priv
);
806 int vmw_kms_cursor_bypass_ioctl(struct drm_device
*dev
, void *data
,
807 struct drm_file
*file_priv
)
809 struct drm_vmw_cursor_bypass_arg
*arg
= data
;
810 struct vmw_display_unit
*du
;
811 struct drm_mode_object
*obj
;
812 struct drm_crtc
*crtc
;
816 mutex_lock(&dev
->mode_config
.mutex
);
817 if (arg
->flags
& DRM_VMW_CURSOR_BYPASS_ALL
) {
819 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
820 du
= vmw_crtc_to_du(crtc
);
821 du
->hotspot_x
= arg
->xhot
;
822 du
->hotspot_y
= arg
->yhot
;
825 mutex_unlock(&dev
->mode_config
.mutex
);
829 obj
= drm_mode_object_find(dev
, arg
->crtc_id
, DRM_MODE_OBJECT_CRTC
);
835 crtc
= obj_to_crtc(obj
);
836 du
= vmw_crtc_to_du(crtc
);
838 du
->hotspot_x
= arg
->xhot
;
839 du
->hotspot_y
= arg
->yhot
;
842 mutex_unlock(&dev
->mode_config
.mutex
);
847 void vmw_kms_write_svga(struct vmw_private
*vmw_priv
,
848 unsigned width
, unsigned height
, unsigned pitch
,
849 unsigned bbp
, unsigned depth
)
851 if (vmw_priv
->capabilities
& SVGA_CAP_PITCHLOCK
)
852 vmw_write(vmw_priv
, SVGA_REG_PITCHLOCK
, pitch
);
853 else if (vmw_fifo_have_pitchlock(vmw_priv
))
854 iowrite32(pitch
, vmw_priv
->mmio_virt
+ SVGA_FIFO_PITCHLOCK
);
855 vmw_write(vmw_priv
, SVGA_REG_WIDTH
, width
);
856 vmw_write(vmw_priv
, SVGA_REG_HEIGHT
, height
);
857 vmw_write(vmw_priv
, SVGA_REG_BITS_PER_PIXEL
, bbp
);
858 vmw_write(vmw_priv
, SVGA_REG_DEPTH
, depth
);
859 vmw_write(vmw_priv
, SVGA_REG_RED_MASK
, 0x00ff0000);
860 vmw_write(vmw_priv
, SVGA_REG_GREEN_MASK
, 0x0000ff00);
861 vmw_write(vmw_priv
, SVGA_REG_BLUE_MASK
, 0x000000ff);
864 int vmw_kms_save_vga(struct vmw_private
*vmw_priv
)
866 struct vmw_vga_topology_state
*save
;
869 vmw_priv
->vga_width
= vmw_read(vmw_priv
, SVGA_REG_WIDTH
);
870 vmw_priv
->vga_height
= vmw_read(vmw_priv
, SVGA_REG_HEIGHT
);
871 vmw_priv
->vga_depth
= vmw_read(vmw_priv
, SVGA_REG_DEPTH
);
872 vmw_priv
->vga_bpp
= vmw_read(vmw_priv
, SVGA_REG_BITS_PER_PIXEL
);
873 vmw_priv
->vga_pseudo
= vmw_read(vmw_priv
, SVGA_REG_PSEUDOCOLOR
);
874 vmw_priv
->vga_red_mask
= vmw_read(vmw_priv
, SVGA_REG_RED_MASK
);
875 vmw_priv
->vga_blue_mask
= vmw_read(vmw_priv
, SVGA_REG_BLUE_MASK
);
876 vmw_priv
->vga_green_mask
= vmw_read(vmw_priv
, SVGA_REG_GREEN_MASK
);
877 if (vmw_priv
->capabilities
& SVGA_CAP_PITCHLOCK
)
878 vmw_priv
->vga_pitchlock
=
879 vmw_read(vmw_priv
, SVGA_REG_PITCHLOCK
);
880 else if (vmw_fifo_have_pitchlock(vmw_priv
))
881 vmw_priv
->vga_pitchlock
= ioread32(vmw_priv
->mmio_virt
+
882 SVGA_FIFO_PITCHLOCK
);
884 if (!(vmw_priv
->capabilities
& SVGA_CAP_DISPLAY_TOPOLOGY
))
887 vmw_priv
->num_displays
= vmw_read(vmw_priv
,
888 SVGA_REG_NUM_GUEST_DISPLAYS
);
890 for (i
= 0; i
< vmw_priv
->num_displays
; ++i
) {
891 save
= &vmw_priv
->vga_save
[i
];
892 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, i
);
893 save
->primary
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_IS_PRIMARY
);
894 save
->pos_x
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_POSITION_X
);
895 save
->pos_y
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_POSITION_Y
);
896 save
->width
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_WIDTH
);
897 save
->height
= vmw_read(vmw_priv
, SVGA_REG_DISPLAY_HEIGHT
);
898 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, SVGA_ID_INVALID
);
899 if (i
== 0 && vmw_priv
->num_displays
== 1 &&
900 save
->width
== 0 && save
->height
== 0) {
903 * It should be fairly safe to assume that these
904 * values are uninitialized.
907 save
->width
= vmw_priv
->vga_width
- save
->pos_x
;
908 save
->height
= vmw_priv
->vga_height
- save
->pos_y
;
915 int vmw_kms_restore_vga(struct vmw_private
*vmw_priv
)
917 struct vmw_vga_topology_state
*save
;
920 vmw_write(vmw_priv
, SVGA_REG_WIDTH
, vmw_priv
->vga_width
);
921 vmw_write(vmw_priv
, SVGA_REG_HEIGHT
, vmw_priv
->vga_height
);
922 vmw_write(vmw_priv
, SVGA_REG_DEPTH
, vmw_priv
->vga_depth
);
923 vmw_write(vmw_priv
, SVGA_REG_BITS_PER_PIXEL
, vmw_priv
->vga_bpp
);
924 vmw_write(vmw_priv
, SVGA_REG_PSEUDOCOLOR
, vmw_priv
->vga_pseudo
);
925 vmw_write(vmw_priv
, SVGA_REG_RED_MASK
, vmw_priv
->vga_red_mask
);
926 vmw_write(vmw_priv
, SVGA_REG_GREEN_MASK
, vmw_priv
->vga_green_mask
);
927 vmw_write(vmw_priv
, SVGA_REG_BLUE_MASK
, vmw_priv
->vga_blue_mask
);
928 if (vmw_priv
->capabilities
& SVGA_CAP_PITCHLOCK
)
929 vmw_write(vmw_priv
, SVGA_REG_PITCHLOCK
,
930 vmw_priv
->vga_pitchlock
);
931 else if (vmw_fifo_have_pitchlock(vmw_priv
))
932 iowrite32(vmw_priv
->vga_pitchlock
,
933 vmw_priv
->mmio_virt
+ SVGA_FIFO_PITCHLOCK
);
935 if (!(vmw_priv
->capabilities
& SVGA_CAP_DISPLAY_TOPOLOGY
))
938 for (i
= 0; i
< vmw_priv
->num_displays
; ++i
) {
939 save
= &vmw_priv
->vga_save
[i
];
940 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, i
);
941 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_IS_PRIMARY
, save
->primary
);
942 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_POSITION_X
, save
->pos_x
);
943 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_POSITION_Y
, save
->pos_y
);
944 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_WIDTH
, save
->width
);
945 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_HEIGHT
, save
->height
);
946 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, SVGA_ID_INVALID
);
952 int vmw_kms_update_layout_ioctl(struct drm_device
*dev
, void *data
,
953 struct drm_file
*file_priv
)
955 struct vmw_private
*dev_priv
= vmw_priv(dev
);
956 struct drm_vmw_update_layout_arg
*arg
=
957 (struct drm_vmw_update_layout_arg
*)data
;
958 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
959 void __user
*user_rects
;
960 struct drm_vmw_rect
*rects
;
964 ret
= ttm_read_lock(&vmaster
->lock
, true);
965 if (unlikely(ret
!= 0))
968 if (!arg
->num_outputs
) {
969 struct drm_vmw_rect def_rect
= {0, 0, 800, 600};
970 vmw_kms_ldu_update_layout(dev_priv
, 1, &def_rect
);
974 rects_size
= arg
->num_outputs
* sizeof(struct drm_vmw_rect
);
975 rects
= kzalloc(rects_size
, GFP_KERNEL
);
976 if (unlikely(!rects
)) {
981 user_rects
= (void __user
*)(unsigned long)arg
->rects
;
982 ret
= copy_from_user(rects
, user_rects
, rects_size
);
983 if (unlikely(ret
!= 0)) {
984 DRM_ERROR("Failed to get rects.\n");
989 vmw_kms_ldu_update_layout(dev_priv
, arg
->num_outputs
, rects
);
994 ttm_read_unlock(&vmaster
->lock
);
998 u32
vmw_get_vblank_counter(struct drm_device
*dev
, int crtc
)