2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32 * $FreeBSD: head/sys/dev/drm2/radeon/radeon_ttm.c 254885 2013-08-25 19:37:15Z dumbbell $
35 #include <drm/ttm/ttm_bo_api.h>
36 #include <drm/ttm/ttm_bo_driver.h>
37 #include <drm/ttm/ttm_placement.h>
38 #include <drm/ttm/ttm_module.h>
39 #include <drm/ttm/ttm_page_alloc.h>
41 #include <uapi_drm/radeon_drm.h>
42 #include <linux/seq_file.h>
43 #include <linux/slab.h>
44 #include "radeon_reg.h"
47 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
49 static int radeon_ttm_debugfs_init(struct radeon_device
*rdev
);
50 static void radeon_ttm_debugfs_fini(struct radeon_device
*rdev
);
52 static struct radeon_device
*radeon_get_rdev(struct ttm_bo_device
*bdev
)
54 struct radeon_mman
*mman
;
55 struct radeon_device
*rdev
;
57 mman
= container_of(bdev
, struct radeon_mman
, bdev
);
58 rdev
= container_of(mman
, struct radeon_device
, mman
);
66 static int radeon_ttm_mem_global_init(struct drm_global_reference
*ref
)
68 return ttm_mem_global_init(ref
->object
);
71 static void radeon_ttm_mem_global_release(struct drm_global_reference
*ref
)
73 ttm_mem_global_release(ref
->object
);
76 static int radeon_ttm_global_init(struct radeon_device
*rdev
)
78 struct drm_global_reference
*global_ref
;
81 rdev
->mman
.mem_global_referenced
= false;
82 global_ref
= &rdev
->mman
.mem_global_ref
;
83 global_ref
->global_type
= DRM_GLOBAL_TTM_MEM
;
84 global_ref
->size
= sizeof(struct ttm_mem_global
);
85 global_ref
->init
= &radeon_ttm_mem_global_init
;
86 global_ref
->release
= &radeon_ttm_mem_global_release
;
87 r
= drm_global_item_ref(global_ref
);
89 DRM_ERROR("Failed setting up TTM memory accounting "
94 rdev
->mman
.bo_global_ref
.mem_glob
=
95 rdev
->mman
.mem_global_ref
.object
;
96 global_ref
= &rdev
->mman
.bo_global_ref
.ref
;
97 global_ref
->global_type
= DRM_GLOBAL_TTM_BO
;
98 global_ref
->size
= sizeof(struct ttm_bo_global
);
99 global_ref
->init
= &ttm_bo_global_init
;
100 global_ref
->release
= &ttm_bo_global_release
;
101 r
= drm_global_item_ref(global_ref
);
103 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
104 drm_global_item_unref(&rdev
->mman
.mem_global_ref
);
108 rdev
->mman
.mem_global_referenced
= true;
112 static void radeon_ttm_global_fini(struct radeon_device
*rdev
)
114 if (rdev
->mman
.mem_global_referenced
) {
115 drm_global_item_unref(&rdev
->mman
.bo_global_ref
.ref
);
116 drm_global_item_unref(&rdev
->mman
.mem_global_ref
);
117 rdev
->mman
.mem_global_referenced
= false;
121 static int radeon_invalidate_caches(struct ttm_bo_device
*bdev
, uint32_t flags
)
126 static int radeon_init_mem_type(struct ttm_bo_device
*bdev
, uint32_t type
,
127 struct ttm_mem_type_manager
*man
)
129 struct radeon_device
*rdev
;
131 rdev
= radeon_get_rdev(bdev
);
136 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
137 man
->available_caching
= TTM_PL_MASK_CACHING
;
138 man
->default_caching
= TTM_PL_FLAG_CACHED
;
141 man
->func
= &ttm_bo_manager_func
;
142 man
->gpu_offset
= rdev
->mc
.gtt_start
;
143 man
->available_caching
= TTM_PL_MASK_CACHING
;
144 man
->default_caching
= TTM_PL_FLAG_CACHED
;
145 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
| TTM_MEMTYPE_FLAG_CMA
;
147 if (rdev
->flags
& RADEON_IS_AGP
) {
148 if (!rdev
->ddev
->agp
) {
149 DRM_ERROR("AGP is not enabled for memory type %u\n",
153 if (!rdev
->ddev
->agp
->cant_use_aperture
)
154 man
->flags
= TTM_MEMTYPE_FLAG_MAPPABLE
;
155 man
->available_caching
= TTM_PL_FLAG_UNCACHED
|
157 man
->default_caching
= TTM_PL_FLAG_WC
;
162 /* "On-card" video ram */
163 man
->func
= &ttm_bo_manager_func
;
164 man
->gpu_offset
= rdev
->mc
.vram_start
;
165 man
->flags
= TTM_MEMTYPE_FLAG_FIXED
|
166 TTM_MEMTYPE_FLAG_MAPPABLE
;
167 man
->available_caching
= TTM_PL_FLAG_UNCACHED
| TTM_PL_FLAG_WC
;
168 man
->default_caching
= TTM_PL_FLAG_WC
;
171 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type
);
177 static void radeon_evict_flags(struct ttm_buffer_object
*bo
,
178 struct ttm_placement
*placement
)
180 static struct ttm_place placements
= {
183 .flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
186 struct radeon_bo
*rbo
;
188 if (!radeon_ttm_bo_is_radeon_bo(bo
)) {
189 placement
->placement
= &placements
;
190 placement
->busy_placement
= &placements
;
191 placement
->num_placement
= 1;
192 placement
->num_busy_placement
= 1;
195 rbo
= container_of(bo
, struct radeon_bo
, tbo
);
196 switch (bo
->mem
.mem_type
) {
198 if (rbo
->rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
].ready
== false)
199 radeon_ttm_placement_from_domain(rbo
, RADEON_GEM_DOMAIN_CPU
);
201 radeon_ttm_placement_from_domain(rbo
, RADEON_GEM_DOMAIN_GTT
);
205 radeon_ttm_placement_from_domain(rbo
, RADEON_GEM_DOMAIN_CPU
);
207 *placement
= rbo
->placement
;
210 static int radeon_verify_access(struct ttm_buffer_object
*bo
)
215 static void radeon_move_null(struct ttm_buffer_object
*bo
,
216 struct ttm_mem_reg
*new_mem
)
218 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
220 BUG_ON(old_mem
->mm_node
!= NULL
);
222 new_mem
->mm_node
= NULL
;
225 static int radeon_move_blit(struct ttm_buffer_object
*bo
,
226 bool evict
, bool no_wait_gpu
,
227 struct ttm_mem_reg
*new_mem
,
228 struct ttm_mem_reg
*old_mem
)
230 struct radeon_device
*rdev
;
231 uint64_t old_start
, new_start
;
232 struct radeon_fence
*fence
;
235 rdev
= radeon_get_rdev(bo
->bdev
);
236 ridx
= radeon_copy_ring_index(rdev
);
237 old_start
= old_mem
->start
<< PAGE_SHIFT
;
238 new_start
= new_mem
->start
<< PAGE_SHIFT
;
240 switch (old_mem
->mem_type
) {
242 old_start
+= rdev
->mc
.vram_start
;
245 old_start
+= rdev
->mc
.gtt_start
;
248 DRM_ERROR("Unknown placement %d\n", old_mem
->mem_type
);
251 switch (new_mem
->mem_type
) {
253 new_start
+= rdev
->mc
.vram_start
;
256 new_start
+= rdev
->mc
.gtt_start
;
259 DRM_ERROR("Unknown placement %d\n", old_mem
->mem_type
);
262 if (!rdev
->ring
[ridx
].ready
) {
263 DRM_ERROR("Trying to move memory with ring turned off.\n");
267 BUILD_BUG_ON((PAGE_SIZE
% RADEON_GPU_PAGE_SIZE
) != 0);
269 /* sync other rings */
270 fence
= bo
->sync_obj
;
271 r
= radeon_copy(rdev
, old_start
, new_start
,
272 new_mem
->num_pages
* (PAGE_SIZE
/ RADEON_GPU_PAGE_SIZE
), /* GPU pages */
274 /* FIXME: handle copy error */
275 r
= ttm_bo_move_accel_cleanup(bo
, (void *)fence
,
276 evict
, no_wait_gpu
, new_mem
);
277 radeon_fence_unref(&fence
);
281 static int radeon_move_vram_ram(struct ttm_buffer_object
*bo
,
282 bool evict
, bool interruptible
,
284 struct ttm_mem_reg
*new_mem
)
286 struct radeon_device
*rdev
;
287 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
288 struct ttm_mem_reg tmp_mem
;
289 struct ttm_place placements
;
290 struct ttm_placement placement
;
293 rdev
= radeon_get_rdev(bo
->bdev
);
295 tmp_mem
.mm_node
= NULL
;
296 placement
.num_placement
= 1;
297 placement
.placement
= &placements
;
298 placement
.num_busy_placement
= 1;
299 placement
.busy_placement
= &placements
;
302 placements
.flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_TT
;
303 r
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
,
304 interruptible
, no_wait_gpu
);
309 r
= ttm_tt_set_placement_caching(bo
->ttm
, tmp_mem
.placement
);
314 r
= ttm_tt_bind(bo
->ttm
, &tmp_mem
);
318 r
= radeon_move_blit(bo
, true, no_wait_gpu
, &tmp_mem
, old_mem
);
322 r
= ttm_bo_move_ttm(bo
, true, no_wait_gpu
, new_mem
);
324 ttm_bo_mem_put(bo
, &tmp_mem
);
328 static int radeon_move_ram_vram(struct ttm_buffer_object
*bo
,
329 bool evict
, bool interruptible
,
331 struct ttm_mem_reg
*new_mem
)
333 struct radeon_device
*rdev
;
334 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
335 struct ttm_mem_reg tmp_mem
;
336 struct ttm_placement placement
;
337 struct ttm_place placements
;
340 rdev
= radeon_get_rdev(bo
->bdev
);
342 tmp_mem
.mm_node
= NULL
;
343 placement
.num_placement
= 1;
344 placement
.placement
= &placements
;
345 placement
.num_busy_placement
= 1;
346 placement
.busy_placement
= &placements
;
349 placements
.flags
= TTM_PL_MASK_CACHING
| TTM_PL_FLAG_TT
;
350 r
= ttm_bo_mem_space(bo
, &placement
, &tmp_mem
,
351 interruptible
, no_wait_gpu
);
355 r
= ttm_bo_move_ttm(bo
, true, no_wait_gpu
, &tmp_mem
);
359 r
= radeon_move_blit(bo
, true, no_wait_gpu
, new_mem
, old_mem
);
364 ttm_bo_mem_put(bo
, &tmp_mem
);
368 static int radeon_bo_move(struct ttm_buffer_object
*bo
,
369 bool evict
, bool interruptible
,
371 struct ttm_mem_reg
*new_mem
)
373 struct radeon_device
*rdev
;
374 struct ttm_mem_reg
*old_mem
= &bo
->mem
;
377 rdev
= radeon_get_rdev(bo
->bdev
);
378 if (old_mem
->mem_type
== TTM_PL_SYSTEM
&& bo
->ttm
== NULL
) {
379 radeon_move_null(bo
, new_mem
);
382 if ((old_mem
->mem_type
== TTM_PL_TT
&&
383 new_mem
->mem_type
== TTM_PL_SYSTEM
) ||
384 (old_mem
->mem_type
== TTM_PL_SYSTEM
&&
385 new_mem
->mem_type
== TTM_PL_TT
)) {
387 radeon_move_null(bo
, new_mem
);
390 if (!rdev
->ring
[radeon_copy_ring_index(rdev
)].ready
||
391 rdev
->asic
->copy
.copy
== NULL
) {
396 if (old_mem
->mem_type
== TTM_PL_VRAM
&&
397 new_mem
->mem_type
== TTM_PL_SYSTEM
) {
398 r
= radeon_move_vram_ram(bo
, evict
, interruptible
,
399 no_wait_gpu
, new_mem
);
400 } else if (old_mem
->mem_type
== TTM_PL_SYSTEM
&&
401 new_mem
->mem_type
== TTM_PL_VRAM
) {
402 r
= radeon_move_ram_vram(bo
, evict
, interruptible
,
403 no_wait_gpu
, new_mem
);
405 r
= radeon_move_blit(bo
, evict
, no_wait_gpu
, new_mem
, old_mem
);
410 r
= ttm_bo_move_memcpy(bo
, evict
, no_wait_gpu
, new_mem
);
416 /* update statistics */
417 atomic64_add((u64
)bo
->num_pages
<< PAGE_SHIFT
, &rdev
->num_bytes_moved
);
421 static int radeon_ttm_io_mem_reserve(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
423 struct ttm_mem_type_manager
*man
= &bdev
->man
[mem
->mem_type
];
424 struct radeon_device
*rdev
= radeon_get_rdev(bdev
);
426 mem
->bus
.addr
= NULL
;
428 mem
->bus
.size
= mem
->num_pages
<< PAGE_SHIFT
;
430 mem
->bus
.is_iomem
= false;
431 if (!(man
->flags
& TTM_MEMTYPE_FLAG_MAPPABLE
))
433 switch (mem
->mem_type
) {
439 if (rdev
->flags
& RADEON_IS_AGP
) {
440 /* RADEON_IS_AGP is set only if AGP is active */
441 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
442 mem
->bus
.base
= rdev
->mc
.agp_base
;
443 mem
->bus
.is_iomem
= !rdev
->ddev
->agp
->cant_use_aperture
;
448 mem
->bus
.offset
= mem
->start
<< PAGE_SHIFT
;
449 /* check if it's visible */
450 if ((mem
->bus
.offset
+ mem
->bus
.size
) > rdev
->mc
.visible_vram_size
)
452 mem
->bus
.base
= rdev
->mc
.aper_base
;
453 mem
->bus
.is_iomem
= true;
456 * Alpha: use bus.addr to hold the ioremap() return,
457 * so we can modify bus.base below.
459 if (mem
->placement
& TTM_PL_FLAG_WC
)
461 ioremap_wc(mem
->bus
.base
+ mem
->bus
.offset
,
465 ioremap_nocache(mem
->bus
.base
+ mem
->bus
.offset
,
469 * Alpha: Use just the bus offset plus
470 * the hose/domain memory base for bus.base.
471 * It then can be used to build PTEs for VRAM
472 * access, as done in ttm_bo_vm_fault().
474 mem
->bus
.base
= (mem
->bus
.base
& 0x0ffffffffUL
) +
475 rdev
->ddev
->hose
->dense_mem_base
;
484 static void radeon_ttm_io_mem_free(struct ttm_bo_device
*bdev
, struct ttm_mem_reg
*mem
)
488 static int radeon_sync_obj_wait(void *sync_obj
, bool lazy
, bool interruptible
)
490 return radeon_fence_wait((struct radeon_fence
*)sync_obj
, interruptible
);
493 static int radeon_sync_obj_flush(void *sync_obj
)
498 static void radeon_sync_obj_unref(void **sync_obj
)
500 radeon_fence_unref((struct radeon_fence
**)sync_obj
);
503 static void *radeon_sync_obj_ref(void *sync_obj
)
505 return radeon_fence_ref((struct radeon_fence
*)sync_obj
);
508 static bool radeon_sync_obj_signaled(void *sync_obj
)
510 return radeon_fence_signaled((struct radeon_fence
*)sync_obj
);
514 * TTM backend functions.
516 struct radeon_ttm_tt
{
517 struct ttm_dma_tt ttm
;
518 struct radeon_device
*rdev
;
522 static int radeon_ttm_backend_bind(struct ttm_tt
*ttm
,
523 struct ttm_mem_reg
*bo_mem
)
525 struct radeon_ttm_tt
*gtt
= (void*)ttm
;
526 uint32_t flags
= RADEON_GART_PAGE_VALID
| RADEON_GART_PAGE_READ
|
527 RADEON_GART_PAGE_WRITE
;
530 gtt
->offset
= (unsigned long)(bo_mem
->start
<< PAGE_SHIFT
);
531 if (!ttm
->num_pages
) {
532 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
533 ttm
->num_pages
, bo_mem
, ttm
);
535 if (ttm
->caching_state
== tt_cached
)
536 flags
|= RADEON_GART_PAGE_SNOOP
;
537 r
= radeon_gart_bind(gtt
->rdev
, gtt
->offset
, ttm
->num_pages
,
538 ttm
->pages
, gtt
->ttm
.dma_address
, flags
);
540 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
541 ttm
->num_pages
, (unsigned)gtt
->offset
);
547 static int radeon_ttm_backend_unbind(struct ttm_tt
*ttm
)
549 struct radeon_ttm_tt
*gtt
= (void *)ttm
;
551 radeon_gart_unbind(gtt
->rdev
, gtt
->offset
, ttm
->num_pages
);
555 static void radeon_ttm_backend_destroy(struct ttm_tt
*ttm
)
557 struct radeon_ttm_tt
*gtt
= (void *)ttm
;
559 ttm_dma_tt_fini(>t
->ttm
);
563 static struct ttm_backend_func radeon_backend_func
= {
564 .bind
= &radeon_ttm_backend_bind
,
565 .unbind
= &radeon_ttm_backend_unbind
,
566 .destroy
= &radeon_ttm_backend_destroy
,
569 static struct ttm_tt
*radeon_ttm_tt_create(struct ttm_bo_device
*bdev
,
570 unsigned long size
, uint32_t page_flags
,
571 struct page
*dummy_read_page
)
573 struct radeon_device
*rdev
;
574 struct radeon_ttm_tt
*gtt
;
576 rdev
= radeon_get_rdev(bdev
);
579 if (rdev
->flags
& RADEON_IS_AGP
) {
580 return ttm_agp_tt_create(bdev
, rdev
->ddev
->agp
->agpdev
,
581 size
, page_flags
, dummy_read_page
);
583 #endif /* DUMBBELL_WIP */
586 gtt
= kzalloc(sizeof(struct radeon_ttm_tt
), GFP_KERNEL
);
590 gtt
->ttm
.ttm
.func
= &radeon_backend_func
;
592 if (ttm_dma_tt_init(>t
->ttm
, bdev
, size
, page_flags
, dummy_read_page
)) {
596 return >t
->ttm
.ttm
;
599 static int radeon_ttm_tt_populate(struct ttm_tt
*ttm
)
601 struct radeon_device
*rdev
;
602 struct radeon_ttm_tt
*gtt
= (void *)ttm
;
606 bool slave
= !!(ttm
->page_flags
& TTM_PAGE_FLAG_SG
);
607 #endif /* DUMBBELL_WIP */
609 if (ttm
->state
!= tt_unpopulated
)
614 * Maybe unneeded on FreeBSD.
617 if (slave
&& ttm
->sg
) {
618 drm_prime_sg_to_page_addr_arrays(ttm
->sg
, ttm
->pages
,
619 gtt
->ttm
.dma_address
, ttm
->num_pages
);
620 ttm
->state
= tt_unbound
;
623 #endif /* DUMBBELL_WIP */
625 rdev
= radeon_get_rdev(ttm
->bdev
);
628 if (rdev
->flags
& RADEON_IS_AGP
) {
629 return ttm_agp_tt_populate(ttm
);
631 #endif /* DUMBBELL_WIP */
634 #ifdef CONFIG_SWIOTLB
635 if (swiotlb_nr_tbl()) {
636 return ttm_dma_populate(>t
->ttm
, rdev
->dev
);
640 r
= ttm_pool_populate(ttm
);
645 for (i
= 0; i
< ttm
->num_pages
; i
++) {
646 gtt
->ttm
.dma_address
[i
] = VM_PAGE_TO_PHYS((struct vm_page
*)ttm
->pages
[i
]);
648 gtt
->ttm
.dma_address
[i
] = pci_map_page(rdev
->pdev
, ttm
->pages
[i
],
650 PCI_DMA_BIDIRECTIONAL
);
651 if (pci_dma_mapping_error(rdev
->pdev
, gtt
->ttm
.dma_address
[i
])) {
653 pci_unmap_page(rdev
->pdev
, gtt
->ttm
.dma_address
[i
],
654 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
655 gtt
->ttm
.dma_address
[i
] = 0;
657 ttm_pool_unpopulate(ttm
);
660 #endif /* DUMBBELL_WIP */
665 static void radeon_ttm_tt_unpopulate(struct ttm_tt
*ttm
)
667 struct radeon_device
*rdev
;
668 struct radeon_ttm_tt
*gtt
= (void *)ttm
;
670 bool slave
= !!(ttm
->page_flags
& TTM_PAGE_FLAG_SG
);
675 rdev
= radeon_get_rdev(ttm
->bdev
);
678 if (rdev
->flags
& RADEON_IS_AGP
) {
679 ttm_agp_tt_unpopulate(ttm
);
682 #endif /* DUMBBELL_WIP */
685 #ifdef CONFIG_SWIOTLB
686 if (swiotlb_nr_tbl()) {
687 ttm_dma_unpopulate(>t
->ttm
, rdev
->dev
);
692 for (i
= 0; i
< ttm
->num_pages
; i
++) {
693 if (gtt
->ttm
.dma_address
[i
]) {
694 gtt
->ttm
.dma_address
[i
] = 0;
696 pci_unmap_page(rdev
->pdev
, gtt
->ttm
.dma_address
[i
],
697 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
698 #endif /* DUMBBELL_WIP */
702 ttm_pool_unpopulate(ttm
);
705 static struct ttm_bo_driver radeon_bo_driver
= {
706 .ttm_tt_create
= &radeon_ttm_tt_create
,
707 .ttm_tt_populate
= &radeon_ttm_tt_populate
,
708 .ttm_tt_unpopulate
= &radeon_ttm_tt_unpopulate
,
709 .invalidate_caches
= &radeon_invalidate_caches
,
710 .init_mem_type
= &radeon_init_mem_type
,
711 .evict_flags
= &radeon_evict_flags
,
712 .move
= &radeon_bo_move
,
713 .verify_access
= &radeon_verify_access
,
714 .sync_obj_signaled
= &radeon_sync_obj_signaled
,
715 .sync_obj_wait
= &radeon_sync_obj_wait
,
716 .sync_obj_flush
= &radeon_sync_obj_flush
,
717 .sync_obj_unref
= &radeon_sync_obj_unref
,
718 .sync_obj_ref
= &radeon_sync_obj_ref
,
719 .move_notify
= &radeon_bo_move_notify
,
720 .fault_reserve_notify
= &radeon_bo_fault_reserve_notify
,
721 .io_mem_reserve
= &radeon_ttm_io_mem_reserve
,
722 .io_mem_free
= &radeon_ttm_io_mem_free
,
725 int radeon_ttm_init(struct radeon_device
*rdev
)
729 r
= radeon_ttm_global_init(rdev
);
733 /* No others user of address space so set it to 0 */
734 r
= ttm_bo_device_init(&rdev
->mman
.bdev
,
735 rdev
->mman
.bo_global_ref
.ref
.object
,
736 &radeon_bo_driver
, DRM_FILE_PAGE_OFFSET
,
739 DRM_ERROR("failed initializing buffer object driver(%d).\n", r
);
742 rdev
->mman
.initialized
= true;
743 rdev
->ddev
->drm_ttm_bdev
= &rdev
->mman
.bdev
;
744 r
= ttm_bo_init_mm(&rdev
->mman
.bdev
, TTM_PL_VRAM
,
745 rdev
->mc
.real_vram_size
>> PAGE_SHIFT
);
747 DRM_ERROR("Failed initializing VRAM heap.\n");
750 /* Change the size here instead of the init above so only lpfn is affected */
751 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.visible_vram_size
);
753 r
= radeon_bo_create(rdev
, 256 * 1024, PAGE_SIZE
, true,
754 RADEON_GEM_DOMAIN_VRAM
, 0,
755 NULL
, &rdev
->stollen_vga_memory
);
759 r
= radeon_bo_reserve(rdev
->stollen_vga_memory
, false);
761 radeon_bo_unref(&rdev
->stollen_vga_memory
);
764 r
= radeon_bo_pin(rdev
->stollen_vga_memory
, RADEON_GEM_DOMAIN_VRAM
, NULL
);
765 radeon_bo_unreserve(rdev
->stollen_vga_memory
);
767 radeon_bo_unref(&rdev
->stollen_vga_memory
);
770 DRM_INFO("radeon: %uM of VRAM memory ready\n",
771 (unsigned) (rdev
->mc
.real_vram_size
/ (1024 * 1024)));
772 r
= ttm_bo_init_mm(&rdev
->mman
.bdev
, TTM_PL_TT
,
773 rdev
->mc
.gtt_size
>> PAGE_SHIFT
);
775 DRM_ERROR("Failed initializing GTT heap.\n");
776 r2
= radeon_bo_reserve(rdev
->stollen_vga_memory
, false);
777 if (likely(r2
== 0)) {
778 radeon_bo_unpin(rdev
->stollen_vga_memory
);
779 radeon_bo_unreserve(rdev
->stollen_vga_memory
);
781 radeon_bo_unref(&rdev
->stollen_vga_memory
);
784 DRM_INFO("radeon: %uM of GTT memory ready.\n",
785 (unsigned)(rdev
->mc
.gtt_size
/ (1024 * 1024)));
787 r
= radeon_ttm_debugfs_init(rdev
);
789 DRM_ERROR("Failed to init debugfs\n");
790 r2
= radeon_bo_reserve(rdev
->stollen_vga_memory
, false);
791 if (likely(r2
== 0)) {
792 radeon_bo_unpin(rdev
->stollen_vga_memory
);
793 radeon_bo_unreserve(rdev
->stollen_vga_memory
);
795 radeon_bo_unref(&rdev
->stollen_vga_memory
);
801 void radeon_ttm_fini(struct radeon_device
*rdev
)
805 if (!rdev
->mman
.initialized
)
807 radeon_ttm_debugfs_fini(rdev
);
808 if (rdev
->stollen_vga_memory
) {
809 r
= radeon_bo_reserve(rdev
->stollen_vga_memory
, false);
811 radeon_bo_unpin(rdev
->stollen_vga_memory
);
812 radeon_bo_unreserve(rdev
->stollen_vga_memory
);
814 radeon_bo_unref(&rdev
->stollen_vga_memory
);
816 ttm_bo_clean_mm(&rdev
->mman
.bdev
, TTM_PL_VRAM
);
817 ttm_bo_clean_mm(&rdev
->mman
.bdev
, TTM_PL_TT
);
818 ttm_bo_device_release(&rdev
->mman
.bdev
);
819 radeon_gart_fini(rdev
);
820 radeon_ttm_global_fini(rdev
);
821 rdev
->mman
.initialized
= false;
822 DRM_INFO("radeon: ttm finalized\n");
825 /* this should only be called at bootup or when userspace
827 void radeon_ttm_set_active_vram_size(struct radeon_device
*rdev
, u64 size
)
829 struct ttm_mem_type_manager
*man
;
831 if (!rdev
->mman
.initialized
)
834 man
= &rdev
->mman
.bdev
.man
[TTM_PL_VRAM
];
835 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
836 man
->size
= size
>> PAGE_SHIFT
;
840 static struct vm_operations_struct radeon_ttm_vm_ops
;
841 static const struct vm_operations_struct
*ttm_vm_ops
= NULL
;
843 static int radeon_ttm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
845 struct ttm_buffer_object
*bo
;
846 struct radeon_device
*rdev
;
849 bo
= (struct ttm_buffer_object
*)vma
->vm_private_data
;
851 return VM_FAULT_NOPAGE
;
853 rdev
= radeon_get_rdev(bo
->bdev
);
854 lockmgr(&rdev
->pm
.mclk_lock
, LK_SHARED
);
855 r
= ttm_vm_ops
->fault(vma
, vmf
);
856 lockmgr(&rdev
->pm
.mclk_lock
, LK_RELEASE
);
860 int radeon_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
862 struct drm_file
*file_priv
;
863 struct radeon_device
*rdev
;
866 if (unlikely(vma
->vm_pgoff
< DRM_FILE_PAGE_OFFSET
)) {
870 file_priv
= filp
->private_data
;
871 rdev
= file_priv
->minor
->dev
->dev_private
;
875 r
= ttm_bo_mmap(filp
, vma
, &rdev
->mman
.bdev
);
876 if (unlikely(r
!= 0)) {
879 if (unlikely(ttm_vm_ops
== NULL
)) {
880 ttm_vm_ops
= vma
->vm_ops
;
881 radeon_ttm_vm_ops
= *ttm_vm_ops
;
882 radeon_ttm_vm_ops
.fault
= &radeon_ttm_fault
;
884 vma
->vm_ops
= &radeon_ttm_vm_ops
;
887 #endif /* DUMBBELL_WIP */
889 #if defined(CONFIG_DEBUG_FS)
891 static int radeon_mm_dump_table(struct seq_file
*m
, void *data
)
893 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
894 unsigned ttm_pl
= *(int *)node
->info_ent
->data
;
895 struct drm_device
*dev
= node
->minor
->dev
;
896 struct radeon_device
*rdev
= dev
->dev_private
;
897 struct drm_mm
*mm
= (struct drm_mm
*)rdev
->mman
.bdev
.man
[ttm_pl
].priv
;
899 struct ttm_bo_global
*glob
= rdev
->mman
.bdev
.glob
;
901 spin_lock(&glob
->lru_lock
);
902 ret
= drm_mm_dump_table(m
, mm
);
903 spin_unlock(&glob
->lru_lock
);
907 static int ttm_pl_vram
= TTM_PL_VRAM
;
908 static int ttm_pl_tt
= TTM_PL_TT
;
910 static struct drm_info_list radeon_ttm_debugfs_list
[] = {
911 {"radeon_vram_mm", radeon_mm_dump_table
, 0, &ttm_pl_vram
},
912 {"radeon_gtt_mm", radeon_mm_dump_table
, 0, &ttm_pl_tt
},
913 {"ttm_page_pool", ttm_page_alloc_debugfs
, 0, NULL
},
914 #ifdef CONFIG_SWIOTLB
915 {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs
, 0, NULL
}
919 static int radeon_ttm_vram_open(struct inode
*inode
, struct file
*filep
)
921 struct radeon_device
*rdev
= inode
->i_private
;
922 i_size_write(inode
, rdev
->mc
.mc_vram_size
);
923 filep
->private_data
= inode
->i_private
;
927 static ssize_t
radeon_ttm_vram_read(struct file
*f
, char __user
*buf
,
928 size_t size
, loff_t
*pos
)
930 struct radeon_device
*rdev
= f
->private_data
;
934 if (size
& 0x3 || *pos
& 0x3)
941 if (*pos
>= rdev
->mc
.mc_vram_size
)
944 spin_lock_irqsave(&rdev
->mmio_idx_lock
, flags
);
945 WREG32(RADEON_MM_INDEX
, ((uint32_t)*pos
) | 0x80000000);
946 if (rdev
->family
>= CHIP_CEDAR
)
947 WREG32(EVERGREEN_MM_INDEX_HI
, *pos
>> 31);
948 value
= RREG32(RADEON_MM_DATA
);
949 spin_unlock_irqrestore(&rdev
->mmio_idx_lock
, flags
);
951 r
= put_user(value
, (uint32_t *)buf
);
964 static const struct file_operations radeon_ttm_vram_fops
= {
965 .owner
= THIS_MODULE
,
966 .open
= radeon_ttm_vram_open
,
967 .read
= radeon_ttm_vram_read
,
968 .llseek
= default_llseek
971 static int radeon_ttm_gtt_open(struct inode
*inode
, struct file
*filep
)
973 struct radeon_device
*rdev
= inode
->i_private
;
974 i_size_write(inode
, rdev
->mc
.gtt_size
);
975 filep
->private_data
= inode
->i_private
;
979 static ssize_t
radeon_ttm_gtt_read(struct file
*f
, char __user
*buf
,
980 size_t size
, loff_t
*pos
)
982 struct radeon_device
*rdev
= f
->private_data
;
987 loff_t p
= *pos
/ PAGE_SIZE
;
988 unsigned off
= *pos
& ~PAGE_MASK
;
989 size_t cur_size
= min_t(size_t, size
, PAGE_SIZE
- off
);
993 if (p
>= rdev
->gart
.num_cpu_pages
)
996 page
= rdev
->gart
.pages
[p
];
1001 r
= copy_to_user(buf
, ptr
, cur_size
);
1002 kunmap(rdev
->gart
.pages
[p
]);
1004 r
= clear_user(buf
, cur_size
);
1018 static const struct file_operations radeon_ttm_gtt_fops
= {
1019 .owner
= THIS_MODULE
,
1020 .open
= radeon_ttm_gtt_open
,
1021 .read
= radeon_ttm_gtt_read
,
1022 .llseek
= default_llseek
1027 static int radeon_ttm_debugfs_init(struct radeon_device
*rdev
)
1029 #if defined(CONFIG_DEBUG_FS)
1032 struct drm_minor
*minor
= rdev
->ddev
->primary
;
1033 struct dentry
*ent
, *root
= minor
->debugfs_root
;
1035 ent
= debugfs_create_file("radeon_vram", S_IFREG
| S_IRUGO
, root
,
1036 rdev
, &radeon_ttm_vram_fops
);
1038 return PTR_ERR(ent
);
1039 rdev
->mman
.vram
= ent
;
1041 ent
= debugfs_create_file("radeon_gtt", S_IFREG
| S_IRUGO
, root
,
1042 rdev
, &radeon_ttm_gtt_fops
);
1044 return PTR_ERR(ent
);
1045 rdev
->mman
.gtt
= ent
;
1047 count
= ARRAY_SIZE(radeon_ttm_debugfs_list
);
1049 #ifdef CONFIG_SWIOTLB
1050 if (!swiotlb_nr_tbl())
1054 return radeon_debugfs_add_files(rdev
, radeon_ttm_debugfs_list
, count
);
1061 static void radeon_ttm_debugfs_fini(struct radeon_device
*rdev
)
1063 #if defined(CONFIG_DEBUG_FS)
1065 debugfs_remove(rdev
->mman
.vram
);
1066 rdev
->mman
.vram
= NULL
;
1068 debugfs_remove(rdev
->mman
.gtt
);
1069 rdev
->mman
.gtt
= NULL
;