1 /**************************************************************************
3 * Copyright © 2007 David Airlie
4 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
30 #include "vmwgfx_drv.h"
32 #include "ttm/ttm_placement.h"
34 #define VMW_DIRTY_DELAY (HZ / 30)
37 struct vmw_private
*vmw_priv
;
41 struct vmw_dma_buffer
*vmw_bo
;
42 struct ttm_bo_kmap_obj map
;
44 u32 pseudo_palette
[17];
66 static int vmw_fb_setcolreg(unsigned regno
, unsigned red
, unsigned green
,
67 unsigned blue
, unsigned transp
,
70 struct vmw_fb_par
*par
= info
->par
;
71 u32
*pal
= par
->pseudo_palette
;
74 DRM_ERROR("Bad regno %u.\n", regno
);
81 pal
[regno
] = ((red
& 0xff00) << 8) |
83 ((blue
& 0xff00) >> 8);
86 DRM_ERROR("Bad depth %u, bpp %u.\n", par
->depth
, par
->bpp
);
93 static int vmw_fb_check_var(struct fb_var_screeninfo
*var
,
96 int depth
= var
->bits_per_pixel
;
97 struct vmw_fb_par
*par
= info
->par
;
98 struct vmw_private
*vmw_priv
= par
->vmw_priv
;
100 switch (var
->bits_per_pixel
) {
102 depth
= (var
->transp
.length
> 0) ? 32 : 24;
105 DRM_ERROR("Bad bpp %u.\n", var
->bits_per_pixel
);
111 var
->red
.offset
= 16;
112 var
->green
.offset
= 8;
113 var
->blue
.offset
= 0;
115 var
->green
.length
= 8;
116 var
->blue
.length
= 8;
117 var
->transp
.length
= 0;
118 var
->transp
.offset
= 0;
121 var
->red
.offset
= 16;
122 var
->green
.offset
= 8;
123 var
->blue
.offset
= 0;
125 var
->green
.length
= 8;
126 var
->blue
.length
= 8;
127 var
->transp
.length
= 8;
128 var
->transp
.offset
= 24;
131 DRM_ERROR("Bad depth %u.\n", depth
);
135 if (!(vmw_priv
->capabilities
& SVGA_CAP_DISPLAY_TOPOLOGY
) &&
136 (var
->xoffset
!= 0 || var
->yoffset
!= 0)) {
137 DRM_ERROR("Can not handle panning without display topology\n");
141 if ((var
->xoffset
+ var
->xres
) > par
->max_width
||
142 (var
->yoffset
+ var
->yres
) > par
->max_height
) {
143 DRM_ERROR("Requested geom can not fit in framebuffer\n");
150 static int vmw_fb_set_par(struct fb_info
*info
)
152 struct vmw_fb_par
*par
= info
->par
;
153 struct vmw_private
*vmw_priv
= par
->vmw_priv
;
155 vmw_kms_write_svga(vmw_priv
, info
->var
.xres
, info
->var
.yres
,
156 info
->fix
.line_length
,
157 par
->bpp
, par
->depth
);
158 if (vmw_priv
->capabilities
& SVGA_CAP_DISPLAY_TOPOLOGY
) {
159 /* TODO check if pitch and offset changes */
160 vmw_write(vmw_priv
, SVGA_REG_NUM_GUEST_DISPLAYS
, 1);
161 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, 0);
162 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_IS_PRIMARY
, true);
163 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_POSITION_X
, info
->var
.xoffset
);
164 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_POSITION_Y
, info
->var
.yoffset
);
165 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_WIDTH
, info
->var
.xres
);
166 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_HEIGHT
, info
->var
.yres
);
167 vmw_write(vmw_priv
, SVGA_REG_DISPLAY_ID
, SVGA_ID_INVALID
);
170 /* This is really helpful since if this fails the user
171 * can probably not see anything on the screen.
173 WARN_ON(vmw_read(vmw_priv
, SVGA_REG_FB_OFFSET
) != 0);
178 static int vmw_fb_pan_display(struct fb_var_screeninfo
*var
,
179 struct fb_info
*info
)
184 static int vmw_fb_blank(int blank
, struct fb_info
*info
)
193 static void vmw_fb_dirty_flush(struct vmw_fb_par
*par
)
195 struct vmw_private
*vmw_priv
= par
->vmw_priv
;
196 struct fb_info
*info
= vmw_priv
->fb_info
;
197 int stride
= (info
->fix
.line_length
/ 4);
198 int *src
= (int *)info
->screen_base
;
199 __le32 __iomem
*vram_mem
= par
->bo_ptr
;
205 SVGAFifoCmdUpdate body
;
208 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
209 if (!par
->dirty
.active
) {
210 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
215 w
= min(par
->dirty
.x2
, info
->var
.xres
) - x
;
216 h
= min(par
->dirty
.y2
, info
->var
.yres
) - y
;
217 par
->dirty
.x1
= par
->dirty
.x2
= 0;
218 par
->dirty
.y1
= par
->dirty
.y2
= 0;
219 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
221 for (i
= y
* stride
; i
< info
->fix
.smem_len
/ 4; i
+= stride
) {
222 for (k
= i
+x
; k
< i
+x
+w
&& k
< info
->fix
.smem_len
/ 4; k
++)
223 iowrite32(src
[k
], vram_mem
+ k
);
227 cmd
= vmw_fifo_reserve(vmw_priv
, sizeof(*cmd
));
228 if (unlikely(cmd
== NULL
)) {
229 DRM_ERROR("Fifo reserve failed.\n");
233 cmd
->header
= cpu_to_le32(SVGA_CMD_UPDATE
);
234 cmd
->body
.x
= cpu_to_le32(x
);
235 cmd
->body
.y
= cpu_to_le32(y
);
236 cmd
->body
.width
= cpu_to_le32(w
);
237 cmd
->body
.height
= cpu_to_le32(h
);
238 vmw_fifo_commit(vmw_priv
, sizeof(*cmd
));
241 static void vmw_fb_dirty_mark(struct vmw_fb_par
*par
,
242 unsigned x1
, unsigned y1
,
243 unsigned width
, unsigned height
)
245 struct fb_info
*info
= par
->vmw_priv
->fb_info
;
247 unsigned x2
= x1
+ width
;
248 unsigned y2
= y1
+ height
;
250 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
251 if (par
->dirty
.x1
== par
->dirty
.x2
) {
256 /* if we are active start the dirty work
257 * we share the work with the defio system */
258 if (par
->dirty
.active
)
259 schedule_delayed_work(&info
->deferred_work
, VMW_DIRTY_DELAY
);
261 if (x1
< par
->dirty
.x1
)
263 if (y1
< par
->dirty
.y1
)
265 if (x2
> par
->dirty
.x2
)
267 if (y2
> par
->dirty
.y2
)
270 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
273 static void vmw_deferred_io(struct fb_info
*info
,
274 struct list_head
*pagelist
)
276 struct vmw_fb_par
*par
= info
->par
;
277 unsigned long start
, end
, min
, max
;
284 list_for_each_entry(page
, pagelist
, lru
) {
285 start
= page
->index
<< PAGE_SHIFT
;
286 end
= start
+ PAGE_SIZE
- 1;
287 min
= min(min
, start
);
292 y1
= min
/ info
->fix
.line_length
;
293 y2
= (max
/ info
->fix
.line_length
) + 1;
295 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
298 par
->dirty
.x2
= info
->var
.xres
;
300 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
303 vmw_fb_dirty_flush(par
);
306 struct fb_deferred_io vmw_defio
= {
307 .delay
= VMW_DIRTY_DELAY
,
308 .deferred_io
= vmw_deferred_io
,
315 static void vmw_fb_fillrect(struct fb_info
*info
, const struct fb_fillrect
*rect
)
317 cfb_fillrect(info
, rect
);
318 vmw_fb_dirty_mark(info
->par
, rect
->dx
, rect
->dy
,
319 rect
->width
, rect
->height
);
322 static void vmw_fb_copyarea(struct fb_info
*info
, const struct fb_copyarea
*region
)
324 cfb_copyarea(info
, region
);
325 vmw_fb_dirty_mark(info
->par
, region
->dx
, region
->dy
,
326 region
->width
, region
->height
);
329 static void vmw_fb_imageblit(struct fb_info
*info
, const struct fb_image
*image
)
331 cfb_imageblit(info
, image
);
332 vmw_fb_dirty_mark(info
->par
, image
->dx
, image
->dy
,
333 image
->width
, image
->height
);
340 static struct fb_ops vmw_fb_ops
= {
341 .owner
= THIS_MODULE
,
342 .fb_check_var
= vmw_fb_check_var
,
343 .fb_set_par
= vmw_fb_set_par
,
344 .fb_setcolreg
= vmw_fb_setcolreg
,
345 .fb_fillrect
= vmw_fb_fillrect
,
346 .fb_copyarea
= vmw_fb_copyarea
,
347 .fb_imageblit
= vmw_fb_imageblit
,
348 .fb_pan_display
= vmw_fb_pan_display
,
349 .fb_blank
= vmw_fb_blank
,
352 static int vmw_fb_create_bo(struct vmw_private
*vmw_priv
,
353 size_t size
, struct vmw_dma_buffer
**out
)
355 struct vmw_dma_buffer
*vmw_bo
;
356 struct ttm_placement ne_placement
= vmw_vram_ne_placement
;
359 ne_placement
.lpfn
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
362 ret
= ttm_write_lock(&vmw_priv
->fbdev_master
.lock
, false);
363 if (unlikely(ret
!= 0))
366 vmw_bo
= kmalloc(sizeof(*vmw_bo
), GFP_KERNEL
);
370 ret
= vmw_dmabuf_init(vmw_priv
, vmw_bo
, size
,
373 &vmw_dmabuf_bo_free
);
374 if (unlikely(ret
!= 0))
375 goto err_unlock
; /* init frees the buffer on failure */
379 ttm_write_unlock(&vmw_priv
->fbdev_master
.lock
);
384 ttm_write_unlock(&vmw_priv
->fbdev_master
.lock
);
388 int vmw_fb_init(struct vmw_private
*vmw_priv
)
390 struct device
*device
= &vmw_priv
->dev
->pdev
->dev
;
391 struct vmw_fb_par
*par
;
392 struct fb_info
*info
;
393 unsigned initial_width
, initial_height
;
394 unsigned fb_width
, fb_height
;
395 unsigned fb_bbp
, fb_depth
, fb_offset
, fb_pitch
, fb_size
;
399 initial_height
= 600;
404 fb_width
= min(vmw_priv
->fb_max_width
, (unsigned)2048);
405 fb_height
= min(vmw_priv
->fb_max_height
, (unsigned)2048);
407 initial_width
= min(fb_width
, initial_width
);
408 initial_height
= min(fb_height
, initial_height
);
410 fb_pitch
= fb_width
* fb_bbp
/ 8;
411 fb_size
= fb_pitch
* fb_height
;
412 fb_offset
= vmw_read(vmw_priv
, SVGA_REG_FB_OFFSET
);
414 info
= framebuffer_alloc(sizeof(*par
), device
);
421 vmw_priv
->fb_info
= info
;
423 par
->vmw_priv
= vmw_priv
;
424 par
->depth
= fb_depth
;
427 par
->max_width
= fb_width
;
428 par
->max_height
= fb_height
;
431 * Create buffers and alloc memory
433 par
->vmalloc
= vmalloc(fb_size
);
434 if (unlikely(par
->vmalloc
== NULL
)) {
439 ret
= vmw_fb_create_bo(vmw_priv
, fb_size
, &par
->vmw_bo
);
440 if (unlikely(ret
!= 0))
443 ret
= ttm_bo_kmap(&par
->vmw_bo
->base
,
445 par
->vmw_bo
->base
.num_pages
,
447 if (unlikely(ret
!= 0))
449 par
->bo_ptr
= ttm_kmap_obj_virtual(&par
->map
, &par
->bo_iowrite
);
450 par
->bo_size
= fb_size
;
455 strcpy(info
->fix
.id
, "svgadrmfb");
456 info
->fix
.type
= FB_TYPE_PACKED_PIXELS
;
457 info
->fix
.visual
= FB_VISUAL_TRUECOLOR
;
458 info
->fix
.type_aux
= 0;
459 info
->fix
.xpanstep
= 1; /* doing it in hw */
460 info
->fix
.ypanstep
= 1; /* doing it in hw */
461 info
->fix
.ywrapstep
= 0;
462 info
->fix
.accel
= FB_ACCEL_NONE
;
463 info
->fix
.line_length
= fb_pitch
;
465 info
->fix
.smem_start
= 0;
466 info
->fix
.smem_len
= fb_size
;
468 info
->fix
.mmio_start
= 0;
469 info
->fix
.mmio_len
= 0;
471 info
->pseudo_palette
= par
->pseudo_palette
;
472 info
->screen_base
= par
->vmalloc
;
473 info
->screen_size
= fb_size
;
475 info
->flags
= FBINFO_DEFAULT
;
476 info
->fbops
= &vmw_fb_ops
;
478 /* 24 depth per default */
479 info
->var
.red
.offset
= 16;
480 info
->var
.green
.offset
= 8;
481 info
->var
.blue
.offset
= 0;
482 info
->var
.red
.length
= 8;
483 info
->var
.green
.length
= 8;
484 info
->var
.blue
.length
= 8;
485 info
->var
.transp
.offset
= 0;
486 info
->var
.transp
.length
= 0;
488 info
->var
.xres_virtual
= fb_width
;
489 info
->var
.yres_virtual
= fb_height
;
490 info
->var
.bits_per_pixel
= par
->bpp
;
491 info
->var
.xoffset
= 0;
492 info
->var
.yoffset
= 0;
493 info
->var
.activate
= FB_ACTIVATE_NOW
;
494 info
->var
.height
= -1;
495 info
->var
.width
= -1;
497 info
->var
.xres
= initial_width
;
498 info
->var
.yres
= initial_height
;
500 info
->pixmap
.size
= 0;
501 info
->pixmap
.buf_align
= 8;
502 info
->pixmap
.access_align
= 32;
503 info
->pixmap
.flags
= FB_PIXMAP_SYSTEM
;
504 info
->pixmap
.scan_align
= 1;
506 info
->apertures
= alloc_apertures(1);
507 if (!info
->apertures
) {
511 info
->apertures
->ranges
[0].base
= vmw_priv
->vram_start
;
512 info
->apertures
->ranges
[0].size
= vmw_priv
->vram_size
;
515 * Dirty & Deferred IO
517 par
->dirty
.x1
= par
->dirty
.x2
= 0;
518 par
->dirty
.y1
= par
->dirty
.y2
= 0;
519 par
->dirty
.active
= true;
520 spin_lock_init(&par
->dirty
.lock
);
521 info
->fbdefio
= &vmw_defio
;
522 fb_deferred_io_init(info
);
524 ret
= register_framebuffer(info
);
525 if (unlikely(ret
!= 0))
531 fb_deferred_io_cleanup(info
);
533 ttm_bo_kunmap(&par
->map
);
535 ttm_bo_unref((struct ttm_buffer_object
**)&par
->vmw_bo
);
538 framebuffer_release(info
);
539 vmw_priv
->fb_info
= NULL
;
544 int vmw_fb_close(struct vmw_private
*vmw_priv
)
546 struct fb_info
*info
;
547 struct vmw_fb_par
*par
;
548 struct ttm_buffer_object
*bo
;
550 if (!vmw_priv
->fb_info
)
553 info
= vmw_priv
->fb_info
;
555 bo
= &par
->vmw_bo
->base
;
559 fb_deferred_io_cleanup(info
);
560 unregister_framebuffer(info
);
562 ttm_bo_kunmap(&par
->map
);
566 framebuffer_release(info
);
571 int vmw_dmabuf_from_vram(struct vmw_private
*vmw_priv
,
572 struct vmw_dma_buffer
*vmw_bo
)
574 struct ttm_buffer_object
*bo
= &vmw_bo
->base
;
577 ret
= ttm_bo_reserve(bo
, false, false, false, 0);
578 if (unlikely(ret
!= 0))
581 ret
= ttm_bo_validate(bo
, &vmw_sys_placement
, false, false, false);
582 ttm_bo_unreserve(bo
);
587 int vmw_dmabuf_to_start_of_vram(struct vmw_private
*vmw_priv
,
588 struct vmw_dma_buffer
*vmw_bo
)
590 struct ttm_buffer_object
*bo
= &vmw_bo
->base
;
591 struct ttm_placement ne_placement
= vmw_vram_ne_placement
;
594 ne_placement
.lpfn
= bo
->num_pages
;
597 ret
= ttm_write_lock(&vmw_priv
->active_master
->lock
, false);
598 if (unlikely(ret
!= 0))
601 ret
= ttm_bo_reserve(bo
, false, false, false, 0);
602 if (unlikely(ret
!= 0))
605 if (bo
->mem
.mem_type
== TTM_PL_VRAM
&&
606 bo
->mem
.mm_node
->start
< bo
->num_pages
)
607 (void) ttm_bo_validate(bo
, &vmw_sys_placement
, false,
610 ret
= ttm_bo_validate(bo
, &ne_placement
, false, false, false);
612 /* Could probably bug on */
613 WARN_ON(bo
->offset
!= 0);
615 ttm_bo_unreserve(bo
);
617 ttm_write_unlock(&vmw_priv
->active_master
->lock
);
622 int vmw_fb_off(struct vmw_private
*vmw_priv
)
624 struct fb_info
*info
;
625 struct vmw_fb_par
*par
;
628 if (!vmw_priv
->fb_info
)
631 info
= vmw_priv
->fb_info
;
634 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
635 par
->dirty
.active
= false;
636 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
638 flush_scheduled_work();
641 ttm_bo_kunmap(&par
->map
);
643 vmw_dmabuf_from_vram(vmw_priv
, par
->vmw_bo
);
648 int vmw_fb_on(struct vmw_private
*vmw_priv
)
650 struct fb_info
*info
;
651 struct vmw_fb_par
*par
;
656 if (!vmw_priv
->fb_info
)
659 info
= vmw_priv
->fb_info
;
662 /* we are already active */
663 if (par
->bo_ptr
!= NULL
)
666 /* Make sure that all overlays are stoped when we take over */
667 vmw_overlay_stop_all(vmw_priv
);
669 ret
= vmw_dmabuf_to_start_of_vram(vmw_priv
, par
->vmw_bo
);
670 if (unlikely(ret
!= 0)) {
671 DRM_ERROR("could not move buffer to start of VRAM\n");
675 ret
= ttm_bo_kmap(&par
->vmw_bo
->base
,
677 par
->vmw_bo
->base
.num_pages
,
680 par
->bo_ptr
= ttm_kmap_obj_virtual(&par
->map
, &dummy
);
682 spin_lock_irqsave(&par
->dirty
.lock
, flags
);
683 par
->dirty
.active
= true;
684 spin_unlock_irqrestore(&par
->dirty
.lock
, flags
);
687 vmw_fb_set_par(info
);
689 vmw_fb_dirty_mark(par
, 0, 0, info
->var
.xres
, info
->var
.yres
);
691 /* If there already was stuff dirty we wont
692 * schedule a new work, so lets do it now */
693 schedule_delayed_work(&info
->deferred_work
, 0);