1 /**************************************************************************
2 * Copyright (c) 2007, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 **************************************************************************/
19 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
21 #include "ttm/ttm_placement.h"
22 #include "ttm/ttm_execbuf_util.h"
23 #include "psb_ttm_fence_api.h"
27 #define DRM_MEM_TTM 26
29 struct drm_psb_ttm_backend
{
30 struct ttm_backend base
;
32 unsigned int desired_tile_stride
;
33 unsigned int hw_tile_stride
;
36 unsigned long num_pages
;
40 * MSVDX/TOPAZ GPU virtual space looks like this
41 * (We currently use only one MMU context).
42 * PSB_MEM_MMU_START: from 0x00000000~0xe000000, for generic buffers
43 * TTM_PL_CI: from 0xe0000000+half GTT space, for camear/video buffer sharing
44 * TTM_PL_RAR: from TTM_PL_CI+CI size, for RAR/video buffer sharing
45 * TTM_PL_TT: from TTM_PL_RAR+RAR size, for buffers need to mapping into GTT
47 static int psb_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
48 struct ttm_mem_type_manager
*man
)
51 struct drm_psb_private
*dev_priv
=
52 container_of(bdev
, struct drm_psb_private
, bdev
);
53 struct psb_gtt
*pg
= dev_priv
->pg
;
57 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
58 man
->available_caching
= TTM_PL_FLAG_CACHED
|
59 TTM_PL_FLAG_UNCACHED
| TTM_PL_FLAG_WC
;
60 man
->default_caching
= TTM_PL_FLAG_CACHED
;
63 man
->func
= &ttm_bo_manager_func
;
64 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
|
66 man
->gpu_offset
= PSB_MEM_MMU_START
;
67 man
->available_caching
= TTM_PL_FLAG_CACHED
|
68 TTM_PL_FLAG_UNCACHED
| TTM_PL_FLAG_WC
;
69 man
->default_caching
= TTM_PL_FLAG_WC
;
72 man
->func
= &ttm_bo_manager_func
;
73 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
|
74 TTM_MEMTYPE_FLAG_FIXED
;
75 man
->gpu_offset
= pg
->mmu_gatt_start
+ (pg
->ci_start
);
76 man
->available_caching
= TTM_PL_FLAG_UNCACHED
;
77 man
->default_caching
= TTM_PL_FLAG_UNCACHED
;
79 case TTM_PL_RAR
: /* Unmappable RAR memory */
80 man
->func
= &ttm_bo_manager_func
;
81 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
|
82 TTM_MEMTYPE_FLAG_FIXED
;
83 man
->available_caching
= TTM_PL_FLAG_UNCACHED
;
84 man
->default_caching
= TTM_PL_FLAG_UNCACHED
;
85 man
->gpu_offset
= pg
->mmu_gatt_start
+ (pg
->rar_start
);
87 case TTM_PL_TT
: /* Mappable GATT memory */
88 man
->func
= &ttm_bo_manager_func
;
89 #ifdef PSB_WORKING_HOST_MMU_ACCESS
90 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
92 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
|
95 man
->available_caching
= TTM_PL_FLAG_CACHED
|
96 TTM_PL_FLAG_UNCACHED
| TTM_PL_FLAG_WC
;
97 man
->default_caching
= TTM_PL_FLAG_WC
;
98 man
->gpu_offset
= pg
->mmu_gatt_start
+
99 (pg
->rar_start
+ dev_priv
->rar_region_size
);
102 DRM_ERROR("Unsupported memory type %u\n", (unsigned) type
);
109 static void psb_evict_mask(struct ttm_buffer_object
*bo
,
110 struct ttm_placement
*placement
)
112 static uint32_t cur_placement
;
114 cur_placement
= bo
->mem
.placement
& ~TTM_PL_MASK_MEM
;
115 cur_placement
|= TTM_PL_FLAG_SYSTEM
;
119 placement
->num_placement
= 1;
120 placement
->placement
= &cur_placement
;
121 placement
->num_busy_placement
= 0;
122 placement
->busy_placement
= NULL
;
124 /* all buffers evicted to system memory */
125 /* return cur_placement | TTM_PL_FLAG_SYSTEM; */
128 static int psb_invalidate_caches(struct ttm_bo_device
*bdev
,
134 static int psb_move_blit(struct ttm_buffer_object
*bo
,
135 bool evict
, bool no_wait
,
136 struct ttm_mem_reg
*new_mem
)
143 * Flip destination ttm into GATT,
144 * then blit and subsequently move out again.
147 static int psb_move_flip(struct ttm_buffer_object
*bo
,
148 bool evict
, bool interruptible
, bool no_wait
,
149 struct ttm_mem_reg
*new_mem
)
151 /*struct ttm_bo_device *bdev = bo->bdev;*/
152 struct ttm_mem_reg tmp_mem
;
154 struct ttm_placement placement
;
155 uint32_t flags
= TTM_PL_FLAG_TT
;
158 tmp_mem
.mm_node
= NULL
;
162 placement
.num_placement
= 1;
163 placement
.placement
= &flags
;
164 placement
.num_busy_placement
= 0; /* FIXME */
165 placement
.busy_placement
= NULL
;
167 ret
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
, interruptible
,
171 ret
= ttm_tt_bind(bo
->ttm
, &tmp_mem
);
174 ret
= psb_move_blit(bo
, true, no_wait
, &tmp_mem
);
178 ret
= ttm_bo_move_ttm(bo
, evict
, false, no_wait
, new_mem
);
180 if (tmp_mem
.mm_node
) {
181 drm_mm_put_block(tmp_mem
.mm_node
);
182 tmp_mem
.mm_node
= NULL
;
187 static int psb_move(struct ttm_buffer_object
*bo
,
188 bool evict
, bool interruptible
, bool no_wait_reserve
,
189 bool no_wait
, struct ttm_mem_reg
*new_mem
)
191 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
193 if ((old_mem
->mem_type
== TTM_PL_RAR
) ||
194 (new_mem
->mem_type
== TTM_PL_RAR
)) {
195 if (old_mem
->mm_node
) {
196 spin_lock(&bo
->glob
->lru_lock
);
197 drm_mm_put_block(old_mem
->mm_node
);
198 spin_unlock(&bo
->glob
->lru_lock
);
200 old_mem
->mm_node
= NULL
;
202 } else if (old_mem
->mem_type
== TTM_PL_SYSTEM
) {
203 return ttm_bo_move_memcpy(bo
, evict
, false, no_wait
, new_mem
);
204 } else if (new_mem
->mem_type
== TTM_PL_SYSTEM
) {
205 int ret
= psb_move_flip(bo
, evict
, interruptible
,
207 if (unlikely(ret
!= 0)) {
208 if (ret
== -ERESTART
)
211 return ttm_bo_move_memcpy(bo
, evict
, false,
215 if (psb_move_blit(bo
, evict
, no_wait
, new_mem
))
216 return ttm_bo_move_memcpy(bo
, evict
, false, no_wait
,
222 static int drm_psb_tbe_populate(struct ttm_backend
*backend
,
223 unsigned long num_pages
,
225 struct page
*dummy_read_page
,
226 dma_addr_t
*dma_addrs
)
228 struct drm_psb_ttm_backend
*psb_be
=
229 container_of(backend
, struct drm_psb_ttm_backend
, base
);
231 psb_be
->pages
= pages
;
235 static int drm_psb_tbe_unbind(struct ttm_backend
*backend
)
237 struct ttm_bo_device
*bdev
= backend
->bdev
;
238 struct drm_psb_private
*dev_priv
=
239 container_of(bdev
, struct drm_psb_private
, bdev
);
240 struct drm_psb_ttm_backend
*psb_be
=
241 container_of(backend
, struct drm_psb_ttm_backend
, base
);
242 struct psb_mmu_pd
*pd
= psb_mmu_get_default_pd(dev_priv
->mmu
);
243 /* struct ttm_mem_type_manager *man = &bdev->man[psb_be->mem_type]; */
245 if (psb_be
->mem_type
== TTM_PL_TT
) {
246 uint32_t gatt_p_offset
=
247 (psb_be
->offset
- dev_priv
->pg
->mmu_gatt_start
)
250 (void) psb_gtt_remove_pages(dev_priv
->pg
, gatt_p_offset
,
252 psb_be
->desired_tile_stride
,
253 psb_be
->hw_tile_stride
, 0);
256 psb_mmu_remove_pages(pd
, psb_be
->offset
,
258 psb_be
->desired_tile_stride
,
259 psb_be
->hw_tile_stride
);
264 static int drm_psb_tbe_bind(struct ttm_backend
*backend
,
265 struct ttm_mem_reg
*bo_mem
)
267 struct ttm_bo_device
*bdev
= backend
->bdev
;
268 struct drm_psb_private
*dev_priv
=
269 container_of(bdev
, struct drm_psb_private
, bdev
);
270 struct drm_psb_ttm_backend
*psb_be
=
271 container_of(backend
, struct drm_psb_ttm_backend
, base
);
272 struct psb_mmu_pd
*pd
= psb_mmu_get_default_pd(dev_priv
->mmu
);
273 struct ttm_mem_type_manager
*man
= &bdev
->man
[bo_mem
->mem_type
];
274 struct drm_mm_node
*mm_node
= bo_mem
->mm_node
;
278 psb_be
->mem_type
= bo_mem
->mem_type
;
279 psb_be
->num_pages
= bo_mem
->num_pages
;
280 psb_be
->desired_tile_stride
= 0;
281 psb_be
->hw_tile_stride
= 0;
282 psb_be
->offset
= (mm_node
->start
<< PAGE_SHIFT
) +
287 placement
& TTM_PL_FLAG_CACHED
) ? PSB_MMU_CACHED_MEMORY
: 0;
289 if (psb_be
->mem_type
== TTM_PL_TT
) {
290 uint32_t gatt_p_offset
=
291 (psb_be
->offset
- dev_priv
->pg
->mmu_gatt_start
)
294 ret
= psb_gtt_insert_pages(dev_priv
->pg
, psb_be
->pages
,
297 psb_be
->desired_tile_stride
,
298 psb_be
->hw_tile_stride
, type
);
301 ret
= psb_mmu_insert_pages(pd
, psb_be
->pages
,
302 psb_be
->offset
, psb_be
->num_pages
,
303 psb_be
->desired_tile_stride
,
304 psb_be
->hw_tile_stride
, type
);
310 drm_psb_tbe_unbind(backend
);
315 static void drm_psb_tbe_clear(struct ttm_backend
*backend
)
317 struct drm_psb_ttm_backend
*psb_be
=
318 container_of(backend
, struct drm_psb_ttm_backend
, base
);
320 psb_be
->pages
= NULL
;
324 static void drm_psb_tbe_destroy(struct ttm_backend
*backend
)
326 struct drm_psb_ttm_backend
*psb_be
=
327 container_of(backend
, struct drm_psb_ttm_backend
, base
);
333 static struct ttm_backend_func psb_ttm_backend
= {
334 .populate
= drm_psb_tbe_populate
,
335 .clear
= drm_psb_tbe_clear
,
336 .bind
= drm_psb_tbe_bind
,
337 .unbind
= drm_psb_tbe_unbind
,
338 .destroy
= drm_psb_tbe_destroy
,
341 static struct ttm_backend
*drm_psb_tbe_init(struct ttm_bo_device
*bdev
)
343 struct drm_psb_ttm_backend
*psb_be
;
345 psb_be
= kzalloc(sizeof(*psb_be
), GFP_KERNEL
);
348 psb_be
->pages
= NULL
;
349 psb_be
->base
.func
= &psb_ttm_backend
;
350 psb_be
->base
.bdev
= bdev
;
351 return &psb_be
->base
;
354 static int psb_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
,
355 struct ttm_mem_reg
*mem
)
357 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
358 struct drm_psb_private
*dev_priv
=
359 container_of(bdev
, struct drm_psb_private
, bdev
);
360 struct psb_gtt
*pg
= dev_priv
->pg
;
361 struct drm_mm_node
*mm_node
= mem
->mm_node
;
363 mem
->bus
.addr
= NULL
;
365 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
367 mem
->bus
.is_iomem
= false;
368 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
370 switch (mem
->mem_type
) {
375 mem
->bus
.offset
= mm_node
->start
<< PAGE_SHIFT
;
376 mem
->bus
.base
= pg
->gatt_start
;
377 mem
->bus
.is_iomem
= false;
378 /* Don't know whether it is IO_MEM, this flag
379 used in vm_fault handle */
381 case DRM_PSB_MEM_MMU
:
382 mem
->bus
.offset
= mm_node
->start
<< PAGE_SHIFT
;
383 mem
->bus
.base
= 0x00000000;
386 mem
->bus
.offset
= mm_node
->start
<< PAGE_SHIFT
;
387 mem
->bus
.base
= dev_priv
->ci_region_start
;;
388 mem
->bus
.is_iomem
= true;
391 mem
->bus
.offset
= mm_node
->start
<< PAGE_SHIFT
;
392 mem
->bus
.base
= dev_priv
->rar_region_start
;;
393 mem
->bus
.is_iomem
= true;
401 static void psb_ttm_io_mem_free(struct ttm_bo_device
*bdev
,
402 struct ttm_mem_reg
*mem
)
407 * Use this memory type priority if no eviction is needed.
410 static uint32_t psb_mem_prios[] = {
419 * Use this memory type priority if need to evict.
422 static uint32_t psb_busy_prios[] = {
430 struct ttm_bo_driver psb_ttm_bo_driver
= {
432 .mem_type_prio = psb_mem_prios,
433 .mem_busy_prio = psb_busy_prios,
434 .num_mem_type_prio = ARRAY_SIZE(psb_mem_prios),
435 .num_mem_busy_prio = ARRAY_SIZE(psb_busy_prios),
437 .create_ttm_backend_entry
= &drm_psb_tbe_init
,
438 .invalidate_caches
= &psb_invalidate_caches
,
439 .init_mem_type
= &psb_init_mem_type
,
440 .evict_flags
= &psb_evict_mask
,
442 .verify_access
= &psb_verify_access
,
443 .sync_obj_signaled
= &ttm_fence_sync_obj_signaled
,
444 .sync_obj_wait
= &ttm_fence_sync_obj_wait
,
445 .sync_obj_flush
= &ttm_fence_sync_obj_flush
,
446 .sync_obj_unref
= &ttm_fence_sync_obj_unref
,
447 .sync_obj_ref
= &ttm_fence_sync_obj_ref
,
448 .io_mem_reserve
= &psb_ttm_io_mem_reserve
,
449 .io_mem_free
= &psb_ttm_io_mem_free