1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "vmwgfx_drv.h"
30 #include "ttm/ttm_placement.h"
31 #include "ttm/ttm_bo_driver.h"
32 #include "ttm/ttm_object.h"
33 #include "ttm/ttm_module.h"
35 #define VMWGFX_DRIVER_NAME "vmwgfx"
36 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
37 #define VMWGFX_CHIP_SVGAII 0
38 #define VMW_FB_RESERVATION 0
41 * Fully encoded drm commands. Might move to vmw_drm.h
44 #define DRM_IOCTL_VMW_GET_PARAM \
45 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
46 struct drm_vmw_getparam_arg)
47 #define DRM_IOCTL_VMW_ALLOC_DMABUF \
48 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
49 union drm_vmw_alloc_dmabuf_arg)
50 #define DRM_IOCTL_VMW_UNREF_DMABUF \
51 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
52 struct drm_vmw_unref_dmabuf_arg)
53 #define DRM_IOCTL_VMW_CURSOR_BYPASS \
54 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
55 struct drm_vmw_cursor_bypass_arg)
57 #define DRM_IOCTL_VMW_CONTROL_STREAM \
58 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
59 struct drm_vmw_control_stream_arg)
60 #define DRM_IOCTL_VMW_CLAIM_STREAM \
61 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
62 struct drm_vmw_stream_arg)
63 #define DRM_IOCTL_VMW_UNREF_STREAM \
64 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
65 struct drm_vmw_stream_arg)
67 #define DRM_IOCTL_VMW_CREATE_CONTEXT \
68 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
69 struct drm_vmw_context_arg)
70 #define DRM_IOCTL_VMW_UNREF_CONTEXT \
71 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
72 struct drm_vmw_context_arg)
73 #define DRM_IOCTL_VMW_CREATE_SURFACE \
74 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
75 union drm_vmw_surface_create_arg)
76 #define DRM_IOCTL_VMW_UNREF_SURFACE \
77 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
78 struct drm_vmw_surface_arg)
79 #define DRM_IOCTL_VMW_REF_SURFACE \
80 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
81 union drm_vmw_surface_reference_arg)
82 #define DRM_IOCTL_VMW_EXECBUF \
83 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
84 struct drm_vmw_execbuf_arg)
85 #define DRM_IOCTL_VMW_FIFO_DEBUG \
86 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FIFO_DEBUG, \
87 struct drm_vmw_fifo_debug_arg)
88 #define DRM_IOCTL_VMW_FENCE_WAIT \
89 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
90 struct drm_vmw_fence_wait_arg)
91 #define DRM_IOCTL_VMW_UPDATE_LAYOUT \
92 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
93 struct drm_vmw_update_layout_arg)
97 * The core DRM version of this macro doesn't account for
101 #define VMW_IOCTL_DEF(ioctl, func, flags) \
102 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
108 static struct drm_ioctl_desc vmw_ioctls
[] = {
109 VMW_IOCTL_DEF(VMW_GET_PARAM
, vmw_getparam_ioctl
,
110 DRM_AUTH
| DRM_UNLOCKED
),
111 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF
, vmw_dmabuf_alloc_ioctl
,
112 DRM_AUTH
| DRM_UNLOCKED
),
113 VMW_IOCTL_DEF(VMW_UNREF_DMABUF
, vmw_dmabuf_unref_ioctl
,
114 DRM_AUTH
| DRM_UNLOCKED
),
115 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS
,
116 vmw_kms_cursor_bypass_ioctl
,
117 DRM_MASTER
| DRM_CONTROL_ALLOW
| DRM_UNLOCKED
),
119 VMW_IOCTL_DEF(VMW_CONTROL_STREAM
, vmw_overlay_ioctl
,
120 DRM_MASTER
| DRM_CONTROL_ALLOW
| DRM_UNLOCKED
),
121 VMW_IOCTL_DEF(VMW_CLAIM_STREAM
, vmw_stream_claim_ioctl
,
122 DRM_MASTER
| DRM_CONTROL_ALLOW
| DRM_UNLOCKED
),
123 VMW_IOCTL_DEF(VMW_UNREF_STREAM
, vmw_stream_unref_ioctl
,
124 DRM_MASTER
| DRM_CONTROL_ALLOW
| DRM_UNLOCKED
),
126 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT
, vmw_context_define_ioctl
,
127 DRM_AUTH
| DRM_UNLOCKED
),
128 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT
, vmw_context_destroy_ioctl
,
129 DRM_AUTH
| DRM_UNLOCKED
),
130 VMW_IOCTL_DEF(VMW_CREATE_SURFACE
, vmw_surface_define_ioctl
,
131 DRM_AUTH
| DRM_UNLOCKED
),
132 VMW_IOCTL_DEF(VMW_UNREF_SURFACE
, vmw_surface_destroy_ioctl
,
133 DRM_AUTH
| DRM_UNLOCKED
),
134 VMW_IOCTL_DEF(VMW_REF_SURFACE
, vmw_surface_reference_ioctl
,
135 DRM_AUTH
| DRM_UNLOCKED
),
136 VMW_IOCTL_DEF(VMW_EXECBUF
, vmw_execbuf_ioctl
,
137 DRM_AUTH
| DRM_UNLOCKED
),
138 VMW_IOCTL_DEF(VMW_FIFO_DEBUG
, vmw_fifo_debug_ioctl
,
139 DRM_AUTH
| DRM_ROOT_ONLY
| DRM_MASTER
| DRM_UNLOCKED
),
140 VMW_IOCTL_DEF(VMW_FENCE_WAIT
, vmw_fence_wait_ioctl
,
141 DRM_AUTH
| DRM_UNLOCKED
),
142 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT
, vmw_kms_update_layout_ioctl
,
143 DRM_MASTER
| DRM_CONTROL_ALLOW
| DRM_UNLOCKED
)
146 static struct pci_device_id vmw_pci_id_list
[] = {
147 {0x15ad, 0x0405, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, VMWGFX_CHIP_SVGAII
},
151 static int enable_fbdev
;
153 static int vmw_probe(struct pci_dev
*, const struct pci_device_id
*);
154 static void vmw_master_init(struct vmw_master
*);
155 static int vmwgfx_pm_notifier(struct notifier_block
*nb
, unsigned long val
,
158 MODULE_PARM_DESC(enable_fbdev
, "Enable vmwgfx fbdev");
159 module_param_named(enable_fbdev
, enable_fbdev
, int, 0600);
161 static void vmw_print_capabilities(uint32_t capabilities
)
163 DRM_INFO("Capabilities:\n");
164 if (capabilities
& SVGA_CAP_RECT_COPY
)
165 DRM_INFO(" Rect copy.\n");
166 if (capabilities
& SVGA_CAP_CURSOR
)
167 DRM_INFO(" Cursor.\n");
168 if (capabilities
& SVGA_CAP_CURSOR_BYPASS
)
169 DRM_INFO(" Cursor bypass.\n");
170 if (capabilities
& SVGA_CAP_CURSOR_BYPASS_2
)
171 DRM_INFO(" Cursor bypass 2.\n");
172 if (capabilities
& SVGA_CAP_8BIT_EMULATION
)
173 DRM_INFO(" 8bit emulation.\n");
174 if (capabilities
& SVGA_CAP_ALPHA_CURSOR
)
175 DRM_INFO(" Alpha cursor.\n");
176 if (capabilities
& SVGA_CAP_3D
)
178 if (capabilities
& SVGA_CAP_EXTENDED_FIFO
)
179 DRM_INFO(" Extended Fifo.\n");
180 if (capabilities
& SVGA_CAP_MULTIMON
)
181 DRM_INFO(" Multimon.\n");
182 if (capabilities
& SVGA_CAP_PITCHLOCK
)
183 DRM_INFO(" Pitchlock.\n");
184 if (capabilities
& SVGA_CAP_IRQMASK
)
185 DRM_INFO(" Irq mask.\n");
186 if (capabilities
& SVGA_CAP_DISPLAY_TOPOLOGY
)
187 DRM_INFO(" Display Topology.\n");
188 if (capabilities
& SVGA_CAP_GMR
)
190 if (capabilities
& SVGA_CAP_TRACES
)
191 DRM_INFO(" Traces.\n");
194 static int vmw_request_device(struct vmw_private
*dev_priv
)
198 ret
= vmw_fifo_init(dev_priv
, &dev_priv
->fifo
);
199 if (unlikely(ret
!= 0)) {
200 DRM_ERROR("Unable to initialize FIFO.\n");
207 static void vmw_release_device(struct vmw_private
*dev_priv
)
209 vmw_fifo_release(dev_priv
, &dev_priv
->fifo
);
213 * Increase the 3d resource refcount.
214 * If the count was prevously zero, initialize the fifo, switching to svga
215 * mode. Note that the master holds a ref as well, and may request an
216 * explicit switch to svga mode if fb is not running, using @unhide_svga.
218 int vmw_3d_resource_inc(struct vmw_private
*dev_priv
,
223 mutex_lock(&dev_priv
->release_mutex
);
224 if (unlikely(dev_priv
->num_3d_resources
++ == 0)) {
225 ret
= vmw_request_device(dev_priv
);
226 if (unlikely(ret
!= 0))
227 --dev_priv
->num_3d_resources
;
228 } else if (unhide_svga
) {
229 mutex_lock(&dev_priv
->hw_mutex
);
230 vmw_write(dev_priv
, SVGA_REG_ENABLE
,
231 vmw_read(dev_priv
, SVGA_REG_ENABLE
) &
232 ~SVGA_REG_ENABLE_HIDE
);
233 mutex_unlock(&dev_priv
->hw_mutex
);
236 mutex_unlock(&dev_priv
->release_mutex
);
241 * Decrease the 3d resource refcount.
242 * If the count reaches zero, disable the fifo, switching to vga mode.
243 * Note that the master holds a refcount as well, and may request an
244 * explicit switch to vga mode when it releases its refcount to account
245 * for the situation of an X server vt switch to VGA with 3d resources
248 void vmw_3d_resource_dec(struct vmw_private
*dev_priv
,
253 mutex_lock(&dev_priv
->release_mutex
);
254 if (unlikely(--dev_priv
->num_3d_resources
== 0))
255 vmw_release_device(dev_priv
);
256 else if (hide_svga
) {
257 mutex_lock(&dev_priv
->hw_mutex
);
258 vmw_write(dev_priv
, SVGA_REG_ENABLE
,
259 vmw_read(dev_priv
, SVGA_REG_ENABLE
) |
260 SVGA_REG_ENABLE_HIDE
);
261 mutex_unlock(&dev_priv
->hw_mutex
);
264 n3d
= (int32_t) dev_priv
->num_3d_resources
;
265 mutex_unlock(&dev_priv
->release_mutex
);
270 static int vmw_driver_load(struct drm_device
*dev
, unsigned long chipset
)
272 struct vmw_private
*dev_priv
;
276 dev_priv
= kzalloc(sizeof(*dev_priv
), GFP_KERNEL
);
277 if (unlikely(dev_priv
== NULL
)) {
278 DRM_ERROR("Failed allocating a device private struct.\n");
281 memset(dev_priv
, 0, sizeof(*dev_priv
));
284 dev_priv
->vmw_chipset
= chipset
;
285 dev_priv
->last_read_sequence
= (uint32_t) -100;
286 mutex_init(&dev_priv
->hw_mutex
);
287 mutex_init(&dev_priv
->cmdbuf_mutex
);
288 mutex_init(&dev_priv
->release_mutex
);
289 rwlock_init(&dev_priv
->resource_lock
);
290 idr_init(&dev_priv
->context_idr
);
291 idr_init(&dev_priv
->surface_idr
);
292 idr_init(&dev_priv
->stream_idr
);
293 mutex_init(&dev_priv
->init_mutex
);
294 init_waitqueue_head(&dev_priv
->fence_queue
);
295 init_waitqueue_head(&dev_priv
->fifo_queue
);
296 atomic_set(&dev_priv
->fence_queue_waiters
, 0);
297 atomic_set(&dev_priv
->fifo_queue_waiters
, 0);
299 dev_priv
->io_start
= pci_resource_start(dev
->pdev
, 0);
300 dev_priv
->vram_start
= pci_resource_start(dev
->pdev
, 1);
301 dev_priv
->mmio_start
= pci_resource_start(dev
->pdev
, 2);
303 dev_priv
->enable_fb
= enable_fbdev
;
305 mutex_lock(&dev_priv
->hw_mutex
);
307 vmw_write(dev_priv
, SVGA_REG_ID
, SVGA_ID_2
);
308 svga_id
= vmw_read(dev_priv
, SVGA_REG_ID
);
309 if (svga_id
!= SVGA_ID_2
) {
311 DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id
);
312 mutex_unlock(&dev_priv
->hw_mutex
);
316 dev_priv
->capabilities
= vmw_read(dev_priv
, SVGA_REG_CAPABILITIES
);
318 if (dev_priv
->capabilities
& SVGA_CAP_GMR
) {
319 dev_priv
->max_gmr_descriptors
=
321 SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH
);
322 dev_priv
->max_gmr_ids
=
323 vmw_read(dev_priv
, SVGA_REG_GMR_MAX_IDS
);
325 if (dev_priv
->capabilities
& SVGA_CAP_GMR2
) {
326 dev_priv
->max_gmr_pages
=
327 vmw_read(dev_priv
, SVGA_REG_GMRS_MAX_PAGES
);
328 dev_priv
->memory_size
=
329 vmw_read(dev_priv
, SVGA_REG_MEMORY_SIZE
);
332 dev_priv
->vram_size
= vmw_read(dev_priv
, SVGA_REG_VRAM_SIZE
);
333 dev_priv
->mmio_size
= vmw_read(dev_priv
, SVGA_REG_MEM_SIZE
);
334 dev_priv
->fb_max_width
= vmw_read(dev_priv
, SVGA_REG_MAX_WIDTH
);
335 dev_priv
->fb_max_height
= vmw_read(dev_priv
, SVGA_REG_MAX_HEIGHT
);
337 mutex_unlock(&dev_priv
->hw_mutex
);
339 vmw_print_capabilities(dev_priv
->capabilities
);
341 if (dev_priv
->capabilities
& SVGA_CAP_GMR
) {
342 DRM_INFO("Max GMR ids is %u\n",
343 (unsigned)dev_priv
->max_gmr_ids
);
344 DRM_INFO("Max GMR descriptors is %u\n",
345 (unsigned)dev_priv
->max_gmr_descriptors
);
347 if (dev_priv
->capabilities
& SVGA_CAP_GMR2
) {
348 DRM_INFO("Max number of GMR pages is %u\n",
349 (unsigned)dev_priv
->max_gmr_pages
);
350 DRM_INFO("Max dedicated hypervisor graphics memory is %u\n",
351 (unsigned)dev_priv
->memory_size
);
353 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
354 dev_priv
->vram_start
, dev_priv
->vram_size
/ 1024);
355 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
356 dev_priv
->mmio_start
, dev_priv
->mmio_size
/ 1024);
358 ret
= vmw_ttm_global_init(dev_priv
);
359 if (unlikely(ret
!= 0))
363 vmw_master_init(&dev_priv
->fbdev_master
);
364 ttm_lock_set_kill(&dev_priv
->fbdev_master
.lock
, false, SIGTERM
);
365 dev_priv
->active_master
= &dev_priv
->fbdev_master
;
368 ret
= ttm_bo_device_init(&dev_priv
->bdev
,
369 dev_priv
->bo_global_ref
.ref
.object
,
370 &vmw_bo_driver
, VMWGFX_FILE_PAGE_OFFSET
,
372 if (unlikely(ret
!= 0)) {
373 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
377 ret
= ttm_bo_init_mm(&dev_priv
->bdev
, TTM_PL_VRAM
,
378 (dev_priv
->vram_size
>> PAGE_SHIFT
));
379 if (unlikely(ret
!= 0)) {
380 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
384 dev_priv
->has_gmr
= true;
385 if (ttm_bo_init_mm(&dev_priv
->bdev
, VMW_PL_GMR
,
386 dev_priv
->max_gmr_ids
) != 0) {
387 DRM_INFO("No GMR memory available. "
388 "Graphics memory resources are very limited.\n");
389 dev_priv
->has_gmr
= false;
392 dev_priv
->mmio_mtrr
= drm_mtrr_add(dev_priv
->mmio_start
,
393 dev_priv
->mmio_size
, DRM_MTRR_WC
);
395 dev_priv
->mmio_virt
= ioremap_wc(dev_priv
->mmio_start
,
396 dev_priv
->mmio_size
);
398 if (unlikely(dev_priv
->mmio_virt
== NULL
)) {
400 DRM_ERROR("Failed mapping MMIO.\n");
404 /* Need mmio memory to check for fifo pitchlock cap. */
405 if (!(dev_priv
->capabilities
& SVGA_CAP_DISPLAY_TOPOLOGY
) &&
406 !(dev_priv
->capabilities
& SVGA_CAP_PITCHLOCK
) &&
407 !vmw_fifo_have_pitchlock(dev_priv
)) {
409 DRM_ERROR("Hardware has no pitchlock\n");
413 dev_priv
->tdev
= ttm_object_device_init
414 (dev_priv
->mem_global_ref
.object
, 12);
416 if (unlikely(dev_priv
->tdev
== NULL
)) {
417 DRM_ERROR("Unable to initialize TTM object management.\n");
422 dev
->dev_private
= dev_priv
;
424 ret
= pci_request_regions(dev
->pdev
, "vmwgfx probe");
425 dev_priv
->stealth
= (ret
!= 0);
426 if (dev_priv
->stealth
) {
428 * Request at least the mmio PCI resource.
431 DRM_INFO("It appears like vesafb is loaded. "
432 "Ignore above error if any.\n");
433 ret
= pci_request_region(dev
->pdev
, 2, "vmwgfx stealth probe");
434 if (unlikely(ret
!= 0)) {
435 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
439 ret
= vmw_kms_init(dev_priv
);
440 if (unlikely(ret
!= 0))
442 vmw_overlay_init(dev_priv
);
443 if (dev_priv
->enable_fb
) {
444 ret
= vmw_3d_resource_inc(dev_priv
, false);
445 if (unlikely(ret
!= 0))
447 vmw_kms_save_vga(dev_priv
);
448 vmw_fb_init(dev_priv
);
449 DRM_INFO("%s", vmw_fifo_have_3d(dev_priv
) ?
450 "Detected device 3D availability.\n" :
451 "Detected no device 3D availability.\n");
453 DRM_INFO("Delayed 3D detection since we're not "
454 "running the device in SVGA mode yet.\n");
457 if (dev_priv
->capabilities
& SVGA_CAP_IRQMASK
) {
458 ret
= drm_irq_install(dev
);
459 if (unlikely(ret
!= 0)) {
460 DRM_ERROR("Failed installing irq: %d\n", ret
);
465 dev_priv
->pm_nb
.notifier_call
= vmwgfx_pm_notifier
;
466 register_pm_notifier(&dev_priv
->pm_nb
);
471 if (dev_priv
->enable_fb
) {
472 vmw_fb_close(dev_priv
);
473 vmw_kms_restore_vga(dev_priv
);
474 vmw_3d_resource_dec(dev_priv
, false);
477 vmw_overlay_close(dev_priv
);
478 vmw_kms_close(dev_priv
);
480 if (dev_priv
->stealth
)
481 pci_release_region(dev
->pdev
, 2);
483 pci_release_regions(dev
->pdev
);
485 ttm_object_device_release(&dev_priv
->tdev
);
487 iounmap(dev_priv
->mmio_virt
);
489 drm_mtrr_del(dev_priv
->mmio_mtrr
, dev_priv
->mmio_start
,
490 dev_priv
->mmio_size
, DRM_MTRR_WC
);
491 if (dev_priv
->has_gmr
)
492 (void) ttm_bo_clean_mm(&dev_priv
->bdev
, VMW_PL_GMR
);
493 (void)ttm_bo_clean_mm(&dev_priv
->bdev
, TTM_PL_VRAM
);
495 (void)ttm_bo_device_release(&dev_priv
->bdev
);
497 vmw_ttm_global_release(dev_priv
);
499 idr_destroy(&dev_priv
->surface_idr
);
500 idr_destroy(&dev_priv
->context_idr
);
501 idr_destroy(&dev_priv
->stream_idr
);
506 static int vmw_driver_unload(struct drm_device
*dev
)
508 struct vmw_private
*dev_priv
= vmw_priv(dev
);
510 unregister_pm_notifier(&dev_priv
->pm_nb
);
512 if (dev_priv
->ctx
.cmd_bounce
)
513 vfree(dev_priv
->ctx
.cmd_bounce
);
514 if (dev_priv
->capabilities
& SVGA_CAP_IRQMASK
)
515 drm_irq_uninstall(dev_priv
->dev
);
516 if (dev_priv
->enable_fb
) {
517 vmw_fb_close(dev_priv
);
518 vmw_kms_restore_vga(dev_priv
);
519 vmw_3d_resource_dec(dev_priv
, false);
521 vmw_kms_close(dev_priv
);
522 vmw_overlay_close(dev_priv
);
523 if (dev_priv
->stealth
)
524 pci_release_region(dev
->pdev
, 2);
526 pci_release_regions(dev
->pdev
);
528 ttm_object_device_release(&dev_priv
->tdev
);
529 iounmap(dev_priv
->mmio_virt
);
530 drm_mtrr_del(dev_priv
->mmio_mtrr
, dev_priv
->mmio_start
,
531 dev_priv
->mmio_size
, DRM_MTRR_WC
);
532 if (dev_priv
->has_gmr
)
533 (void)ttm_bo_clean_mm(&dev_priv
->bdev
, VMW_PL_GMR
);
534 (void)ttm_bo_clean_mm(&dev_priv
->bdev
, TTM_PL_VRAM
);
535 (void)ttm_bo_device_release(&dev_priv
->bdev
);
536 vmw_ttm_global_release(dev_priv
);
537 idr_destroy(&dev_priv
->surface_idr
);
538 idr_destroy(&dev_priv
->context_idr
);
539 idr_destroy(&dev_priv
->stream_idr
);
546 static void vmw_postclose(struct drm_device
*dev
,
547 struct drm_file
*file_priv
)
549 struct vmw_fpriv
*vmw_fp
;
551 vmw_fp
= vmw_fpriv(file_priv
);
552 ttm_object_file_release(&vmw_fp
->tfile
);
553 if (vmw_fp
->locked_master
)
554 drm_master_put(&vmw_fp
->locked_master
);
558 static int vmw_driver_open(struct drm_device
*dev
, struct drm_file
*file_priv
)
560 struct vmw_private
*dev_priv
= vmw_priv(dev
);
561 struct vmw_fpriv
*vmw_fp
;
564 vmw_fp
= kzalloc(sizeof(*vmw_fp
), GFP_KERNEL
);
565 if (unlikely(vmw_fp
== NULL
))
568 vmw_fp
->tfile
= ttm_object_file_init(dev_priv
->tdev
, 10);
569 if (unlikely(vmw_fp
->tfile
== NULL
))
572 file_priv
->driver_priv
= vmw_fp
;
574 if (unlikely(dev_priv
->bdev
.dev_mapping
== NULL
))
575 dev_priv
->bdev
.dev_mapping
=
576 file_priv
->filp
->f_path
.dentry
->d_inode
->i_mapping
;
585 static long vmw_unlocked_ioctl(struct file
*filp
, unsigned int cmd
,
588 struct drm_file
*file_priv
= filp
->private_data
;
589 struct drm_device
*dev
= file_priv
->minor
->dev
;
590 unsigned int nr
= DRM_IOCTL_NR(cmd
);
593 * Do extra checking on driver private ioctls.
596 if ((nr
>= DRM_COMMAND_BASE
) && (nr
< DRM_COMMAND_END
)
597 && (nr
< DRM_COMMAND_BASE
+ dev
->driver
->num_ioctls
)) {
598 struct drm_ioctl_desc
*ioctl
=
599 &vmw_ioctls
[nr
- DRM_COMMAND_BASE
];
601 if (unlikely(ioctl
->cmd_drv
!= cmd
)) {
602 DRM_ERROR("Invalid command format, ioctl %d\n",
603 nr
- DRM_COMMAND_BASE
);
608 return drm_ioctl(filp
, cmd
, arg
);
611 static int vmw_firstopen(struct drm_device
*dev
)
613 struct vmw_private
*dev_priv
= vmw_priv(dev
);
614 dev_priv
->is_opened
= true;
619 static void vmw_lastclose(struct drm_device
*dev
)
621 struct vmw_private
*dev_priv
= vmw_priv(dev
);
622 struct drm_crtc
*crtc
;
623 struct drm_mode_set set
;
627 * Do nothing on the lastclose call from drm_unload.
630 if (!dev_priv
->is_opened
)
633 dev_priv
->is_opened
= false;
638 set
.connectors
= NULL
;
639 set
.num_connectors
= 0;
641 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
643 ret
= crtc
->funcs
->set_config(&set
);
649 static void vmw_master_init(struct vmw_master
*vmaster
)
651 ttm_lock_init(&vmaster
->lock
);
652 INIT_LIST_HEAD(&vmaster
->fb_surf
);
653 mutex_init(&vmaster
->fb_surf_mutex
);
656 static int vmw_master_create(struct drm_device
*dev
,
657 struct drm_master
*master
)
659 struct vmw_master
*vmaster
;
661 vmaster
= kzalloc(sizeof(*vmaster
), GFP_KERNEL
);
662 if (unlikely(vmaster
== NULL
))
665 vmw_master_init(vmaster
);
666 ttm_lock_set_kill(&vmaster
->lock
, true, SIGTERM
);
667 master
->driver_priv
= vmaster
;
672 static void vmw_master_destroy(struct drm_device
*dev
,
673 struct drm_master
*master
)
675 struct vmw_master
*vmaster
= vmw_master(master
);
677 master
->driver_priv
= NULL
;
682 static int vmw_master_set(struct drm_device
*dev
,
683 struct drm_file
*file_priv
,
686 struct vmw_private
*dev_priv
= vmw_priv(dev
);
687 struct vmw_fpriv
*vmw_fp
= vmw_fpriv(file_priv
);
688 struct vmw_master
*active
= dev_priv
->active_master
;
689 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
692 if (!dev_priv
->enable_fb
) {
693 ret
= vmw_3d_resource_inc(dev_priv
, true);
694 if (unlikely(ret
!= 0))
696 vmw_kms_save_vga(dev_priv
);
697 mutex_lock(&dev_priv
->hw_mutex
);
698 vmw_write(dev_priv
, SVGA_REG_TRACES
, 0);
699 mutex_unlock(&dev_priv
->hw_mutex
);
703 BUG_ON(active
!= &dev_priv
->fbdev_master
);
704 ret
= ttm_vt_lock(&active
->lock
, false, vmw_fp
->tfile
);
705 if (unlikely(ret
!= 0))
706 goto out_no_active_lock
;
708 ttm_lock_set_kill(&active
->lock
, true, SIGTERM
);
709 ret
= ttm_bo_evict_mm(&dev_priv
->bdev
, TTM_PL_VRAM
);
710 if (unlikely(ret
!= 0)) {
711 DRM_ERROR("Unable to clean VRAM on "
715 dev_priv
->active_master
= NULL
;
718 ttm_lock_set_kill(&vmaster
->lock
, false, SIGTERM
);
720 ttm_vt_unlock(&vmaster
->lock
);
721 BUG_ON(vmw_fp
->locked_master
!= file_priv
->master
);
722 drm_master_put(&vmw_fp
->locked_master
);
725 dev_priv
->active_master
= vmaster
;
730 if (!dev_priv
->enable_fb
) {
731 mutex_lock(&dev_priv
->hw_mutex
);
732 vmw_write(dev_priv
, SVGA_REG_TRACES
, 1);
733 mutex_unlock(&dev_priv
->hw_mutex
);
734 vmw_kms_restore_vga(dev_priv
);
735 vmw_3d_resource_dec(dev_priv
, true);
740 static void vmw_master_drop(struct drm_device
*dev
,
741 struct drm_file
*file_priv
,
744 struct vmw_private
*dev_priv
= vmw_priv(dev
);
745 struct vmw_fpriv
*vmw_fp
= vmw_fpriv(file_priv
);
746 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
750 * Make sure the master doesn't disappear while we have
754 vmw_fp
->locked_master
= drm_master_get(file_priv
->master
);
755 ret
= ttm_vt_lock(&vmaster
->lock
, false, vmw_fp
->tfile
);
756 vmw_kms_idle_workqueues(vmaster
);
758 if (unlikely((ret
!= 0))) {
759 DRM_ERROR("Unable to lock TTM at VT switch.\n");
760 drm_master_put(&vmw_fp
->locked_master
);
763 ttm_lock_set_kill(&vmaster
->lock
, true, SIGTERM
);
765 if (!dev_priv
->enable_fb
) {
766 ret
= ttm_bo_evict_mm(&dev_priv
->bdev
, TTM_PL_VRAM
);
767 if (unlikely(ret
!= 0))
768 DRM_ERROR("Unable to clean VRAM on master drop.\n");
769 mutex_lock(&dev_priv
->hw_mutex
);
770 vmw_write(dev_priv
, SVGA_REG_TRACES
, 1);
771 mutex_unlock(&dev_priv
->hw_mutex
);
772 vmw_kms_restore_vga(dev_priv
);
773 vmw_3d_resource_dec(dev_priv
, true);
776 dev_priv
->active_master
= &dev_priv
->fbdev_master
;
777 ttm_lock_set_kill(&dev_priv
->fbdev_master
.lock
, false, SIGTERM
);
778 ttm_vt_unlock(&dev_priv
->fbdev_master
.lock
);
780 if (dev_priv
->enable_fb
)
785 static void vmw_remove(struct pci_dev
*pdev
)
787 struct drm_device
*dev
= pci_get_drvdata(pdev
);
792 static int vmwgfx_pm_notifier(struct notifier_block
*nb
, unsigned long val
,
795 struct vmw_private
*dev_priv
=
796 container_of(nb
, struct vmw_private
, pm_nb
);
797 struct vmw_master
*vmaster
= dev_priv
->active_master
;
800 case PM_HIBERNATION_PREPARE
:
801 case PM_SUSPEND_PREPARE
:
802 ttm_suspend_lock(&vmaster
->lock
);
805 * This empties VRAM and unbinds all GMR bindings.
806 * Buffer contents is moved to swappable memory.
808 ttm_bo_swapout_all(&dev_priv
->bdev
);
811 case PM_POST_HIBERNATION
:
812 case PM_POST_SUSPEND
:
813 case PM_POST_RESTORE
:
814 ttm_suspend_unlock(&vmaster
->lock
);
817 case PM_RESTORE_PREPARE
:
826 * These might not be needed with the virtual SVGA device.
829 static int vmw_pci_suspend(struct pci_dev
*pdev
, pm_message_t state
)
831 struct drm_device
*dev
= pci_get_drvdata(pdev
);
832 struct vmw_private
*dev_priv
= vmw_priv(dev
);
834 if (dev_priv
->num_3d_resources
!= 0) {
835 DRM_INFO("Can't suspend or hibernate "
836 "while 3D resources are active.\n");
840 pci_save_state(pdev
);
841 pci_disable_device(pdev
);
842 pci_set_power_state(pdev
, PCI_D3hot
);
846 static int vmw_pci_resume(struct pci_dev
*pdev
)
848 pci_set_power_state(pdev
, PCI_D0
);
849 pci_restore_state(pdev
);
850 return pci_enable_device(pdev
);
853 static int vmw_pm_suspend(struct device
*kdev
)
855 struct pci_dev
*pdev
= to_pci_dev(kdev
);
856 struct pm_message dummy
;
860 return vmw_pci_suspend(pdev
, dummy
);
863 static int vmw_pm_resume(struct device
*kdev
)
865 struct pci_dev
*pdev
= to_pci_dev(kdev
);
867 return vmw_pci_resume(pdev
);
870 static int vmw_pm_prepare(struct device
*kdev
)
872 struct pci_dev
*pdev
= to_pci_dev(kdev
);
873 struct drm_device
*dev
= pci_get_drvdata(pdev
);
874 struct vmw_private
*dev_priv
= vmw_priv(dev
);
877 * Release 3d reference held by fbdev and potentially
880 dev_priv
->suspended
= true;
881 if (dev_priv
->enable_fb
)
882 vmw_3d_resource_dec(dev_priv
, true);
884 if (dev_priv
->num_3d_resources
!= 0) {
886 DRM_INFO("Can't suspend or hibernate "
887 "while 3D resources are active.\n");
889 if (dev_priv
->enable_fb
)
890 vmw_3d_resource_inc(dev_priv
, true);
891 dev_priv
->suspended
= false;
898 static void vmw_pm_complete(struct device
*kdev
)
900 struct pci_dev
*pdev
= to_pci_dev(kdev
);
901 struct drm_device
*dev
= pci_get_drvdata(pdev
);
902 struct vmw_private
*dev_priv
= vmw_priv(dev
);
905 * Reclaim 3d reference held by fbdev and potentially
908 if (dev_priv
->enable_fb
)
909 vmw_3d_resource_inc(dev_priv
, false);
911 dev_priv
->suspended
= false;
914 static const struct dev_pm_ops vmw_pm_ops
= {
915 .prepare
= vmw_pm_prepare
,
916 .complete
= vmw_pm_complete
,
917 .suspend
= vmw_pm_suspend
,
918 .resume
= vmw_pm_resume
,
921 static struct drm_driver driver
= {
922 .driver_features
= DRIVER_HAVE_IRQ
| DRIVER_IRQ_SHARED
|
924 .load
= vmw_driver_load
,
925 .unload
= vmw_driver_unload
,
926 .firstopen
= vmw_firstopen
,
927 .lastclose
= vmw_lastclose
,
928 .irq_preinstall
= vmw_irq_preinstall
,
929 .irq_postinstall
= vmw_irq_postinstall
,
930 .irq_uninstall
= vmw_irq_uninstall
,
931 .irq_handler
= vmw_irq_handler
,
932 .get_vblank_counter
= vmw_get_vblank_counter
,
933 .reclaim_buffers_locked
= NULL
,
934 .ioctls
= vmw_ioctls
,
935 .num_ioctls
= DRM_ARRAY_SIZE(vmw_ioctls
),
936 .dma_quiescent
= NULL
, /*vmw_dma_quiescent, */
937 .master_create
= vmw_master_create
,
938 .master_destroy
= vmw_master_destroy
,
939 .master_set
= vmw_master_set
,
940 .master_drop
= vmw_master_drop
,
941 .open
= vmw_driver_open
,
942 .postclose
= vmw_postclose
,
944 .owner
= THIS_MODULE
,
946 .release
= drm_release
,
947 .unlocked_ioctl
= vmw_unlocked_ioctl
,
950 .fasync
= drm_fasync
,
951 #if defined(CONFIG_COMPAT)
952 .compat_ioctl
= drm_compat_ioctl
,
954 .llseek
= noop_llseek
,
956 .name
= VMWGFX_DRIVER_NAME
,
957 .desc
= VMWGFX_DRIVER_DESC
,
958 .date
= VMWGFX_DRIVER_DATE
,
959 .major
= VMWGFX_DRIVER_MAJOR
,
960 .minor
= VMWGFX_DRIVER_MINOR
,
961 .patchlevel
= VMWGFX_DRIVER_PATCHLEVEL
964 static struct pci_driver vmw_pci_driver
= {
965 .name
= VMWGFX_DRIVER_NAME
,
966 .id_table
= vmw_pci_id_list
,
968 .remove
= vmw_remove
,
974 static int vmw_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
976 return drm_get_pci_dev(pdev
, ent
, &driver
);
979 static int __init
vmwgfx_init(void)
982 ret
= drm_pci_init(&driver
, &vmw_pci_driver
);
984 DRM_ERROR("Failed initializing DRM.\n");
988 static void __exit
vmwgfx_exit(void)
990 drm_pci_exit(&driver
, &vmw_pci_driver
);
993 module_init(vmwgfx_init
);
994 module_exit(vmwgfx_exit
);
996 MODULE_AUTHOR("VMware Inc. and others");
997 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
998 MODULE_LICENSE("GPL and additional rights");
999 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR
) "."
1000 __stringify(VMWGFX_DRIVER_MINOR
) "."
1001 __stringify(VMWGFX_DRIVER_PATCHLEVEL
) "."