2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
33 #include <uapi_drm/radeon_drm.h>
36 #include "radeon_trace.h"
41 static void radeon_bo_clear_surface_reg(struct radeon_bo
*bo
);
44 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
45 * function are calling it.
48 static void radeon_update_memory_usage(struct radeon_bo
*bo
,
49 unsigned mem_type
, int sign
)
51 struct radeon_device
*rdev
= bo
->rdev
;
52 u64 size
= (u64
)bo
->tbo
.num_pages
<< PAGE_SHIFT
;
57 atomic64_add(size
, &rdev
->gtt_usage
);
59 atomic64_sub(size
, &rdev
->gtt_usage
);
63 atomic64_add(size
, &rdev
->vram_usage
);
65 atomic64_sub(size
, &rdev
->vram_usage
);
70 static void radeon_ttm_bo_destroy(struct ttm_buffer_object
*tbo
)
74 bo
= container_of(tbo
, struct radeon_bo
, tbo
);
76 radeon_update_memory_usage(bo
, bo
->tbo
.mem
.mem_type
, -1);
78 mutex_lock(&bo
->rdev
->gem
.mutex
);
79 list_del_init(&bo
->list
);
80 mutex_unlock(&bo
->rdev
->gem
.mutex
);
81 radeon_bo_clear_surface_reg(bo
);
82 WARN_ON(!list_empty(&bo
->va
));
83 drm_gem_object_release(&bo
->gem_base
);
87 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object
*bo
)
89 if (bo
->destroy
== &radeon_ttm_bo_destroy
)
94 void radeon_ttm_placement_from_domain(struct radeon_bo
*rbo
, u32 domain
)
98 rbo
->placement
.placement
= rbo
->placements
;
99 rbo
->placement
.busy_placement
= rbo
->placements
;
100 if (domain
& RADEON_GEM_DOMAIN_VRAM
)
101 rbo
->placements
[c
++].flags
= TTM_PL_FLAG_WC
|
102 TTM_PL_FLAG_UNCACHED
|
105 if (domain
& RADEON_GEM_DOMAIN_GTT
) {
106 if (rbo
->flags
& RADEON_GEM_GTT_UC
) {
107 rbo
->placements
[c
++].flags
= TTM_PL_FLAG_UNCACHED
|
110 } else if ((rbo
->flags
& RADEON_GEM_GTT_WC
) ||
111 (rbo
->rdev
->flags
& RADEON_IS_AGP
)) {
112 rbo
->placements
[c
++].flags
= TTM_PL_FLAG_WC
|
113 TTM_PL_FLAG_UNCACHED
|
116 rbo
->placements
[c
++].flags
= TTM_PL_FLAG_CACHED
|
121 if (domain
& RADEON_GEM_DOMAIN_CPU
) {
122 if (rbo
->flags
& RADEON_GEM_GTT_UC
) {
123 rbo
->placements
[c
++].flags
= TTM_PL_FLAG_UNCACHED
|
126 } else if ((rbo
->flags
& RADEON_GEM_GTT_WC
) ||
127 rbo
->rdev
->flags
& RADEON_IS_AGP
) {
128 rbo
->placements
[c
++].flags
= TTM_PL_FLAG_WC
|
129 TTM_PL_FLAG_UNCACHED
|
132 rbo
->placements
[c
++].flags
= TTM_PL_FLAG_CACHED
|
137 rbo
->placements
[c
++].flags
= TTM_PL_MASK_CACHING
|
140 rbo
->placement
.num_placement
= c
;
141 rbo
->placement
.num_busy_placement
= c
;
143 for (i
= 0; i
< c
; ++i
) {
144 rbo
->placements
[i
].fpfn
= 0;
145 if ((rbo
->flags
& RADEON_GEM_CPU_ACCESS
) &&
146 (rbo
->placements
[i
].flags
& TTM_PL_FLAG_VRAM
))
147 rbo
->placements
[i
].lpfn
=
148 rbo
->rdev
->mc
.visible_vram_size
>> PAGE_SHIFT
;
150 rbo
->placements
[i
].lpfn
= 0;
154 * Use two-ended allocation depending on the buffer size to
155 * improve fragmentation quality.
156 * 512kb was measured as the most optimal number.
158 if (!((rbo
->flags
& RADEON_GEM_CPU_ACCESS
) &&
159 (rbo
->placements
[i
].flags
& TTM_PL_FLAG_VRAM
)) &&
160 rbo
->tbo
.mem
.size
> 512 * 1024) {
161 for (i
= 0; i
< c
; i
++) {
162 rbo
->placements
[i
].flags
|= TTM_PL_FLAG_TOPDOWN
;
167 int radeon_bo_create(struct radeon_device
*rdev
,
168 unsigned long size
, int byte_align
, bool kernel
, u32 domain
,
169 u32 flags
, struct sg_table
*sg
, struct radeon_bo
**bo_ptr
)
171 struct radeon_bo
*bo
;
172 enum ttm_bo_type type
;
173 unsigned long page_align
= roundup2(byte_align
, PAGE_SIZE
) >> PAGE_SHIFT
;
177 size
= ALIGN(size
, PAGE_SIZE
);
180 type
= ttm_bo_type_kernel
;
182 type
= ttm_bo_type_sg
;
184 type
= ttm_bo_type_device
;
188 acc_size
= ttm_bo_dma_acc_size(&rdev
->mman
.bdev
, size
,
189 sizeof(struct radeon_bo
));
191 bo
= kzalloc(sizeof(struct radeon_bo
), GFP_KERNEL
);
194 r
= drm_gem_object_init(rdev
->ddev
, &bo
->gem_base
, size
);
200 bo
->surface_reg
= -1;
201 INIT_LIST_HEAD(&bo
->list
);
202 INIT_LIST_HEAD(&bo
->va
);
203 bo
->initial_domain
= domain
& (RADEON_GEM_DOMAIN_VRAM
|
204 RADEON_GEM_DOMAIN_GTT
|
205 RADEON_GEM_DOMAIN_CPU
);
208 /* PCI GART is always snooped */
209 if (!(rdev
->flags
& RADEON_IS_PCIE
))
210 bo
->flags
&= ~(RADEON_GEM_GTT_WC
| RADEON_GEM_GTT_UC
);
213 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
214 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
216 bo
->flags
&= ~RADEON_GEM_GTT_WC
;
219 radeon_ttm_placement_from_domain(bo
, domain
);
220 /* Kernel allocation are uninterruptible */
221 lockmgr(&rdev
->pm
.mclk_lock
, LK_SHARED
);
222 r
= ttm_bo_init(&rdev
->mman
.bdev
, &bo
->tbo
, size
, type
,
223 &bo
->placement
, page_align
, !kernel
, NULL
,
224 acc_size
, sg
, &radeon_ttm_bo_destroy
);
225 lockmgr(&rdev
->pm
.mclk_lock
, LK_RELEASE
);
226 if (unlikely(r
!= 0)) {
232 trace_radeon_bo_create(bo
);
238 int radeon_bo_kmap(struct radeon_bo
*bo
, void **ptr
)
249 r
= ttm_bo_kmap(&bo
->tbo
, 0, bo
->tbo
.num_pages
, &bo
->kmap
);
253 bo
->kptr
= ttm_kmap_obj_virtual(&bo
->kmap
, &is_iomem
);
257 radeon_bo_check_tiling(bo
, 0, 0);
261 void radeon_bo_kunmap(struct radeon_bo
*bo
)
263 if (bo
->kptr
== NULL
)
266 radeon_bo_check_tiling(bo
, 0, 0);
267 ttm_bo_kunmap(&bo
->kmap
);
270 struct radeon_bo
*radeon_bo_ref(struct radeon_bo
*bo
)
275 ttm_bo_reference(&bo
->tbo
);
279 void radeon_bo_unref(struct radeon_bo
**bo
)
281 struct ttm_buffer_object
*tbo
;
282 struct radeon_device
*rdev
;
283 struct radeon_bo
*rbo
;
285 if ((rbo
= *bo
) == NULL
)
293 int radeon_bo_pin_restricted(struct radeon_bo
*bo
, u32 domain
, u64 max_offset
,
301 *gpu_addr
= radeon_bo_gpu_offset(bo
);
303 if (max_offset
!= 0) {
306 if (domain
== RADEON_GEM_DOMAIN_VRAM
)
307 domain_start
= bo
->rdev
->mc
.vram_start
;
309 domain_start
= bo
->rdev
->mc
.gtt_start
;
310 if (max_offset
< (radeon_bo_gpu_offset(bo
) - domain_start
)) {
311 DRM_ERROR("radeon_bo_pin_restricted: "
313 "(radeon_bo_gpu_offset(%ju) - "
315 (uintmax_t)max_offset
, (uintmax_t)radeon_bo_gpu_offset(bo
),
316 (uintmax_t)domain_start
);
322 radeon_ttm_placement_from_domain(bo
, domain
);
323 for (i
= 0; i
< bo
->placement
.num_placement
; i
++) {
324 /* force to pin into visible video ram */
325 if ((bo
->placements
[i
].flags
& TTM_PL_FLAG_VRAM
) &&
326 !(bo
->flags
& RADEON_GEM_NO_CPU_ACCESS
) &&
327 (!max_offset
|| max_offset
> bo
->rdev
->mc
.visible_vram_size
))
328 bo
->placements
[i
].lpfn
=
329 bo
->rdev
->mc
.visible_vram_size
>> PAGE_SHIFT
;
331 bo
->placements
[i
].lpfn
= max_offset
>> PAGE_SHIFT
;
333 bo
->placements
[i
].flags
|= TTM_PL_FLAG_NO_EVICT
;
336 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, false, false);
337 if (likely(r
== 0)) {
339 if (gpu_addr
!= NULL
)
340 *gpu_addr
= radeon_bo_gpu_offset(bo
);
341 if (domain
== RADEON_GEM_DOMAIN_VRAM
)
342 bo
->rdev
->vram_pin_size
+= radeon_bo_size(bo
);
344 bo
->rdev
->gart_pin_size
+= radeon_bo_size(bo
);
346 dev_err(bo
->rdev
->dev
, "%p pin failed\n", bo
);
351 int radeon_bo_pin(struct radeon_bo
*bo
, u32 domain
, u64
*gpu_addr
)
353 return radeon_bo_pin_restricted(bo
, domain
, 0, gpu_addr
);
356 int radeon_bo_unpin(struct radeon_bo
*bo
)
360 if (!bo
->pin_count
) {
361 dev_warn(bo
->rdev
->dev
, "%p unpin not necessary\n", bo
);
367 for (i
= 0; i
< bo
->placement
.num_placement
; i
++) {
368 bo
->placements
[i
].lpfn
= 0;
369 bo
->placements
[i
].flags
&= ~TTM_PL_FLAG_NO_EVICT
;
371 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, false, false);
372 if (likely(r
== 0)) {
373 if (bo
->tbo
.mem
.mem_type
== TTM_PL_VRAM
)
374 bo
->rdev
->vram_pin_size
-= radeon_bo_size(bo
);
376 bo
->rdev
->gart_pin_size
-= radeon_bo_size(bo
);
378 dev_err(bo
->rdev
->dev
, "%p validate failed for unpin\n", bo
);
383 int radeon_bo_evict_vram(struct radeon_device
*rdev
)
385 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
386 if (0 && (rdev
->flags
& RADEON_IS_IGP
)) {
387 if (rdev
->mc
.igp_sideport_enabled
== false)
388 /* Useless to evict on IGP chips */
391 return ttm_bo_evict_mm(&rdev
->mman
.bdev
, TTM_PL_VRAM
);
394 void radeon_bo_force_delete(struct radeon_device
*rdev
)
396 struct radeon_bo
*bo
, *n
;
398 if (list_empty(&rdev
->gem
.objects
)) {
401 dev_err(rdev
->dev
, "Userspace still has active objects !\n");
402 list_for_each_entry_safe(bo
, n
, &rdev
->gem
.objects
, list
) {
403 dev_err(rdev
->dev
, "%p %p %lu %lu force free\n",
404 &bo
->gem_base
, bo
, (unsigned long)bo
->gem_base
.size
,
405 *((unsigned long *)&bo
->gem_base
.refcount
));
406 mutex_lock(&bo
->rdev
->gem
.mutex
);
407 list_del_init(&bo
->list
);
408 mutex_unlock(&bo
->rdev
->gem
.mutex
);
409 /* this should unref the ttm bo */
410 drm_gem_object_unreference(&bo
->gem_base
);
414 int radeon_bo_init(struct radeon_device
*rdev
)
416 /* Add an MTRR for the VRAM */
417 if (!rdev
->fastfb_working
) {
418 rdev
->mc
.vram_mtrr
= arch_phys_wc_add(rdev
->mc
.aper_base
,
421 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
422 rdev
->mc
.mc_vram_size
>> 20,
423 (unsigned long long)rdev
->mc
.aper_size
>> 20);
424 DRM_INFO("RAM width %dbits %cDR\n",
425 rdev
->mc
.vram_width
, rdev
->mc
.vram_is_ddr
? 'D' : 'S');
426 return radeon_ttm_init(rdev
);
429 void radeon_bo_fini(struct radeon_device
*rdev
)
431 radeon_ttm_fini(rdev
);
432 arch_phys_wc_del(rdev
->mc
.vram_mtrr
);
435 /* Returns how many bytes TTM can move per IB.
437 static u64
radeon_bo_get_threshold_for_moves(struct radeon_device
*rdev
)
439 u64 real_vram_size
= rdev
->mc
.real_vram_size
;
440 u64 vram_usage
= atomic64_read(&rdev
->vram_usage
);
442 /* This function is based on the current VRAM usage.
444 * - If all of VRAM is free, allow relocating the number of bytes that
445 * is equal to 1/4 of the size of VRAM for this IB.
447 * - If more than one half of VRAM is occupied, only allow relocating
448 * 1 MB of data for this IB.
450 * - From 0 to one half of used VRAM, the threshold decreases
465 * Note: It's a threshold, not a limit. The threshold must be crossed
466 * for buffer relocations to stop, so any buffer of an arbitrary size
467 * can be moved as long as the threshold isn't crossed before
468 * the relocation takes place. We don't want to disable buffer
469 * relocations completely.
471 * The idea is that buffers should be placed in VRAM at creation time
472 * and TTM should only do a minimum number of relocations during
473 * command submission. In practice, you need to submit at least
474 * a dozen IBs to move all buffers to VRAM if they are in GTT.
476 * Also, things can get pretty crazy under memory pressure and actual
477 * VRAM usage can change a lot, so playing safe even at 50% does
478 * consistently increase performance.
481 u64 half_vram
= real_vram_size
>> 1;
482 u64 half_free_vram
= vram_usage
>= half_vram
? 0 : half_vram
- vram_usage
;
483 u64 bytes_moved_threshold
= half_free_vram
>> 1;
484 return max(bytes_moved_threshold
, 1024*1024ull);
487 int radeon_bo_list_validate(struct radeon_device
*rdev
,
488 struct ww_acquire_ctx
*ticket
,
489 struct list_head
*head
, int ring
)
491 struct radeon_cs_reloc
*lobj
;
492 struct radeon_bo
*bo
;
494 u64 bytes_moved
= 0, initial_bytes_moved
;
495 u64 bytes_moved_threshold
= radeon_bo_get_threshold_for_moves(rdev
);
497 r
= ttm_eu_reserve_buffers(ticket
, head
);
498 if (unlikely(r
!= 0)) {
502 list_for_each_entry(lobj
, head
, tv
.head
) {
504 if (!bo
->pin_count
) {
505 u32 domain
= lobj
->prefered_domains
;
506 u32 allowed
= lobj
->allowed_domains
;
508 radeon_mem_type_to_domain(bo
->tbo
.mem
.mem_type
);
510 /* Check if this buffer will be moved and don't move it
511 * if we have moved too many buffers for this IB already.
513 * Note that this allows moving at least one buffer of
514 * any size, because it doesn't take the current "bo"
515 * into account. We don't want to disallow buffer moves
518 if ((allowed
& current_domain
) != 0 &&
519 (domain
& current_domain
) == 0 && /* will be moved */
520 bytes_moved
> bytes_moved_threshold
) {
522 domain
= current_domain
;
526 radeon_ttm_placement_from_domain(bo
, domain
);
527 if (ring
== R600_RING_TYPE_UVD_INDEX
)
528 radeon_uvd_force_into_uvd_segment(bo
, allowed
);
530 initial_bytes_moved
= atomic64_read(&rdev
->num_bytes_moved
);
531 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, true, false);
532 bytes_moved
+= atomic64_read(&rdev
->num_bytes_moved
) -
536 if (r
!= -ERESTARTSYS
&&
537 domain
!= lobj
->allowed_domains
) {
538 domain
= lobj
->allowed_domains
;
541 ttm_eu_backoff_reservation(ticket
, head
);
545 lobj
->gpu_offset
= radeon_bo_gpu_offset(bo
);
546 lobj
->tiling_flags
= bo
->tiling_flags
;
551 int radeon_bo_get_surface_reg(struct radeon_bo
*bo
)
553 struct radeon_device
*rdev
= bo
->rdev
;
554 struct radeon_surface_reg
*reg
;
555 struct radeon_bo
*old_object
;
559 KASSERT(radeon_bo_is_reserved(bo
),
560 ("radeon_bo_get_surface_reg: radeon_bo is not reserved"));
562 if (!bo
->tiling_flags
)
565 if (bo
->surface_reg
>= 0) {
566 reg
= &rdev
->surface_regs
[bo
->surface_reg
];
572 for (i
= 0; i
< RADEON_GEM_MAX_SURFACES
; i
++) {
574 reg
= &rdev
->surface_regs
[i
];
578 old_object
= reg
->bo
;
579 if (old_object
->pin_count
== 0)
583 /* if we are all out */
584 if (i
== RADEON_GEM_MAX_SURFACES
) {
587 /* find someone with a surface reg and nuke their BO */
588 reg
= &rdev
->surface_regs
[steal
];
589 old_object
= reg
->bo
;
590 /* blow away the mapping */
591 DRM_DEBUG("stealing surface reg %d from %p\n", steal
, old_object
);
592 ttm_bo_unmap_virtual(&old_object
->tbo
);
593 old_object
->surface_reg
= -1;
601 radeon_set_surface_reg(rdev
, i
, bo
->tiling_flags
, bo
->pitch
,
602 bo
->tbo
.mem
.start
<< PAGE_SHIFT
,
603 bo
->tbo
.num_pages
<< PAGE_SHIFT
);
607 static void radeon_bo_clear_surface_reg(struct radeon_bo
*bo
)
609 struct radeon_device
*rdev
= bo
->rdev
;
610 struct radeon_surface_reg
*reg
;
612 if (bo
->surface_reg
== -1)
615 reg
= &rdev
->surface_regs
[bo
->surface_reg
];
616 radeon_clear_surface_reg(rdev
, bo
->surface_reg
);
619 bo
->surface_reg
= -1;
622 int radeon_bo_set_tiling_flags(struct radeon_bo
*bo
,
623 uint32_t tiling_flags
, uint32_t pitch
)
625 struct radeon_device
*rdev
= bo
->rdev
;
628 if (rdev
->family
>= CHIP_CEDAR
) {
629 unsigned bankw
, bankh
, mtaspect
, tilesplit
, stilesplit
;
631 bankw
= (tiling_flags
>> RADEON_TILING_EG_BANKW_SHIFT
) & RADEON_TILING_EG_BANKW_MASK
;
632 bankh
= (tiling_flags
>> RADEON_TILING_EG_BANKH_SHIFT
) & RADEON_TILING_EG_BANKH_MASK
;
633 mtaspect
= (tiling_flags
>> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT
) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK
;
634 tilesplit
= (tiling_flags
>> RADEON_TILING_EG_TILE_SPLIT_SHIFT
) & RADEON_TILING_EG_TILE_SPLIT_MASK
;
635 stilesplit
= (tiling_flags
>> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT
) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK
;
669 if (stilesplit
> 6) {
673 r
= radeon_bo_reserve(bo
, false);
674 if (unlikely(r
!= 0))
676 bo
->tiling_flags
= tiling_flags
;
678 radeon_bo_unreserve(bo
);
682 void radeon_bo_get_tiling_flags(struct radeon_bo
*bo
,
683 uint32_t *tiling_flags
,
686 KASSERT(radeon_bo_is_reserved(bo
),
687 ("radeon_bo_get_tiling_flags: radeon_bo is not reserved"));
689 *tiling_flags
= bo
->tiling_flags
;
694 int radeon_bo_check_tiling(struct radeon_bo
*bo
, bool has_moved
,
697 KASSERT((radeon_bo_is_reserved(bo
) || force_drop
),
698 ("radeon_bo_check_tiling: radeon_bo is not reserved && !force_drop"));
700 if (!(bo
->tiling_flags
& RADEON_TILING_SURFACE
))
704 radeon_bo_clear_surface_reg(bo
);
708 if (bo
->tbo
.mem
.mem_type
!= TTM_PL_VRAM
) {
712 if (bo
->surface_reg
>= 0)
713 radeon_bo_clear_surface_reg(bo
);
717 if ((bo
->surface_reg
>= 0) && !has_moved
)
720 return radeon_bo_get_surface_reg(bo
);
723 void radeon_bo_move_notify(struct ttm_buffer_object
*bo
,
724 struct ttm_mem_reg
*new_mem
)
726 struct radeon_bo
*rbo
;
728 if (!radeon_ttm_bo_is_radeon_bo(bo
))
731 rbo
= container_of(bo
, struct radeon_bo
, tbo
);
732 radeon_bo_check_tiling(rbo
, 0, 1);
733 radeon_vm_bo_invalidate(rbo
->rdev
, rbo
);
735 /* update statistics */
739 radeon_update_memory_usage(rbo
, bo
->mem
.mem_type
, -1);
740 radeon_update_memory_usage(rbo
, new_mem
->mem_type
, 1);
743 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object
*bo
)
745 struct radeon_device
*rdev
;
746 struct radeon_bo
*rbo
;
747 unsigned long offset
, size
;
750 if (!radeon_ttm_bo_is_radeon_bo(bo
))
752 rbo
= container_of(bo
, struct radeon_bo
, tbo
);
753 radeon_bo_check_tiling(rbo
, 0, 0);
755 if (bo
->mem
.mem_type
!= TTM_PL_VRAM
)
758 size
= bo
->mem
.num_pages
<< PAGE_SHIFT
;
759 offset
= bo
->mem
.start
<< PAGE_SHIFT
;
760 if ((offset
+ size
) <= rdev
->mc
.visible_vram_size
)
763 /* hurrah the memory is not visible ! */
764 radeon_ttm_placement_from_domain(rbo
, RADEON_GEM_DOMAIN_VRAM
);
765 rbo
->placements
[0].lpfn
= rdev
->mc
.visible_vram_size
>> PAGE_SHIFT
;
766 r
= ttm_bo_validate(bo
, &rbo
->placement
, false, false);
767 if (unlikely(r
== -ENOMEM
)) {
768 radeon_ttm_placement_from_domain(rbo
, RADEON_GEM_DOMAIN_GTT
);
769 return ttm_bo_validate(bo
, &rbo
->placement
, false, false);
770 } else if (unlikely(r
!= 0)) {
774 offset
= bo
->mem
.start
<< PAGE_SHIFT
;
775 /* this should never happen */
776 if ((offset
+ size
) > rdev
->mc
.visible_vram_size
)
782 int radeon_bo_wait(struct radeon_bo
*bo
, u32
*mem_type
, bool no_wait
)
786 r
= ttm_bo_reserve(&bo
->tbo
, true, no_wait
, false, NULL
);
787 if (unlikely(r
!= 0))
789 lockmgr(&bo
->tbo
.bdev
->fence_lock
, LK_EXCLUSIVE
);
791 *mem_type
= bo
->tbo
.mem
.mem_type
;
792 if (bo
->tbo
.sync_obj
)
793 r
= ttm_bo_wait(&bo
->tbo
, true, true, no_wait
);
794 lockmgr(&bo
->tbo
.bdev
->fence_lock
, LK_RELEASE
);
795 ttm_bo_unreserve(&bo
->tbo
);
801 * radeon_bo_reserve - reserve bo
803 * @no_intr: don't return -ERESTARTSYS on pending signal
806 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
807 * a signal. Release all buffer reservations and return to user-space.
809 int radeon_bo_reserve(struct radeon_bo
*bo
, bool no_intr
)
813 r
= ttm_bo_reserve(&bo
->tbo
, !no_intr
, false, false, 0);
814 if (unlikely(r
!= 0)) {
815 if (r
!= -ERESTARTSYS
)
816 dev_err(bo
->rdev
->dev
, "%p reserve failed\n", bo
);