2 * Copyright 2009 Advanced Micro Devices, Inc.
3 * Copyright 2009 Red Hat Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
28 #include "radeon_drm.h"
32 #include "r600_blit_shaders.h"
34 #define DI_PT_RECTLIST 0x11
35 #define DI_INDEX_SIZE_16_BIT 0x0
36 #define DI_SRC_SEL_AUTO_INDEX 0x2
40 #define FMT_8_8_8_8 0x1a
42 #define COLOR_5_6_5 0x8
43 #define COLOR_8_8_8_8 0x1a
45 /* emits 21 on rv770+, 23 on r600 */
47 set_render_target(struct radeon_device
*rdev
, int format
,
48 int w
, int h
, u64 gpu_addr
)
57 cb_color_info
= ((format
<< 2) | (1 << 27));
59 slice
= ((w
* h
) / 64) - 1;
61 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
62 radeon_ring_write(rdev
, (CB_COLOR0_BASE
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
63 radeon_ring_write(rdev
, gpu_addr
>> 8);
65 if (rdev
->family
> CHIP_R600
&& rdev
->family
< CHIP_RV770
) {
66 radeon_ring_write(rdev
, PACKET3(PACKET3_SURFACE_BASE_UPDATE
, 0));
67 radeon_ring_write(rdev
, 2 << 0);
70 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
71 radeon_ring_write(rdev
, (CB_COLOR0_SIZE
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
72 radeon_ring_write(rdev
, (pitch
<< 0) | (slice
<< 10));
74 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
75 radeon_ring_write(rdev
, (CB_COLOR0_VIEW
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
76 radeon_ring_write(rdev
, 0);
78 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
79 radeon_ring_write(rdev
, (CB_COLOR0_INFO
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
80 radeon_ring_write(rdev
, cb_color_info
);
82 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
83 radeon_ring_write(rdev
, (CB_COLOR0_TILE
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
84 radeon_ring_write(rdev
, 0);
86 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
87 radeon_ring_write(rdev
, (CB_COLOR0_FRAG
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
88 radeon_ring_write(rdev
, 0);
90 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
91 radeon_ring_write(rdev
, (CB_COLOR0_MASK
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
92 radeon_ring_write(rdev
, 0);
97 cp_set_surface_sync(struct radeon_device
*rdev
,
98 u32 sync_type
, u32 size
,
103 if (size
== 0xffffffff)
104 cp_coher_size
= 0xffffffff;
106 cp_coher_size
= ((size
+ 255) >> 8);
108 radeon_ring_write(rdev
, PACKET3(PACKET3_SURFACE_SYNC
, 3));
109 radeon_ring_write(rdev
, sync_type
);
110 radeon_ring_write(rdev
, cp_coher_size
);
111 radeon_ring_write(rdev
, mc_addr
>> 8);
112 radeon_ring_write(rdev
, 10); /* poll interval */
115 /* emits 21dw + 1 surface sync = 26dw */
117 set_shaders(struct radeon_device
*rdev
)
120 u32 sq_pgm_resources
;
122 /* setup shader regs */
123 sq_pgm_resources
= (1 << 0);
126 gpu_addr
= rdev
->r600_blit
.shader_gpu_addr
+ rdev
->r600_blit
.vs_offset
;
127 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
128 radeon_ring_write(rdev
, (SQ_PGM_START_VS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
129 radeon_ring_write(rdev
, gpu_addr
>> 8);
131 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
132 radeon_ring_write(rdev
, (SQ_PGM_RESOURCES_VS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
133 radeon_ring_write(rdev
, sq_pgm_resources
);
135 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
136 radeon_ring_write(rdev
, (SQ_PGM_CF_OFFSET_VS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
137 radeon_ring_write(rdev
, 0);
140 gpu_addr
= rdev
->r600_blit
.shader_gpu_addr
+ rdev
->r600_blit
.ps_offset
;
141 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
142 radeon_ring_write(rdev
, (SQ_PGM_START_PS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
143 radeon_ring_write(rdev
, gpu_addr
>> 8);
145 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
146 radeon_ring_write(rdev
, (SQ_PGM_RESOURCES_PS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
147 radeon_ring_write(rdev
, sq_pgm_resources
| (1 << 28));
149 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
150 radeon_ring_write(rdev
, (SQ_PGM_EXPORTS_PS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
151 radeon_ring_write(rdev
, 2);
153 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
154 radeon_ring_write(rdev
, (SQ_PGM_CF_OFFSET_PS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
155 radeon_ring_write(rdev
, 0);
157 gpu_addr
= rdev
->r600_blit
.shader_gpu_addr
+ rdev
->r600_blit
.vs_offset
;
158 cp_set_surface_sync(rdev
, PACKET3_SH_ACTION_ENA
, 512, gpu_addr
);
161 /* emits 9 + 1 sync (5) = 14*/
163 set_vtx_resource(struct radeon_device
*rdev
, u64 gpu_addr
)
165 u32 sq_vtx_constant_word2
;
167 sq_vtx_constant_word2
= ((upper_32_bits(gpu_addr
) & 0xff) | (16 << 8));
169 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_RESOURCE
, 7));
170 radeon_ring_write(rdev
, 0x460);
171 radeon_ring_write(rdev
, gpu_addr
& 0xffffffff);
172 radeon_ring_write(rdev
, 48 - 1);
173 radeon_ring_write(rdev
, sq_vtx_constant_word2
);
174 radeon_ring_write(rdev
, 1 << 0);
175 radeon_ring_write(rdev
, 0);
176 radeon_ring_write(rdev
, 0);
177 radeon_ring_write(rdev
, SQ_TEX_VTX_VALID_BUFFER
<< 30);
179 if ((rdev
->family
== CHIP_RV610
) ||
180 (rdev
->family
== CHIP_RV620
) ||
181 (rdev
->family
== CHIP_RS780
) ||
182 (rdev
->family
== CHIP_RS880
) ||
183 (rdev
->family
== CHIP_RV710
))
184 cp_set_surface_sync(rdev
,
185 PACKET3_TC_ACTION_ENA
, 48, gpu_addr
);
187 cp_set_surface_sync(rdev
,
188 PACKET3_VC_ACTION_ENA
, 48, gpu_addr
);
193 set_tex_resource(struct radeon_device
*rdev
,
194 int format
, int w
, int h
, int pitch
,
197 uint32_t sq_tex_resource_word0
, sq_tex_resource_word1
, sq_tex_resource_word4
;
202 sq_tex_resource_word0
= (1 << 0);
203 sq_tex_resource_word0
|= ((((pitch
>> 3) - 1) << 8) |
206 sq_tex_resource_word1
= (format
<< 26);
207 sq_tex_resource_word1
|= ((h
- 1) << 0);
209 sq_tex_resource_word4
= ((1 << 14) |
215 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_RESOURCE
, 7));
216 radeon_ring_write(rdev
, 0);
217 radeon_ring_write(rdev
, sq_tex_resource_word0
);
218 radeon_ring_write(rdev
, sq_tex_resource_word1
);
219 radeon_ring_write(rdev
, gpu_addr
>> 8);
220 radeon_ring_write(rdev
, gpu_addr
>> 8);
221 radeon_ring_write(rdev
, sq_tex_resource_word4
);
222 radeon_ring_write(rdev
, 0);
223 radeon_ring_write(rdev
, SQ_TEX_VTX_VALID_TEXTURE
<< 30);
228 set_scissors(struct radeon_device
*rdev
, int x1
, int y1
,
231 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 2));
232 radeon_ring_write(rdev
, (PA_SC_SCREEN_SCISSOR_TL
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
233 radeon_ring_write(rdev
, (x1
<< 0) | (y1
<< 16));
234 radeon_ring_write(rdev
, (x2
<< 0) | (y2
<< 16));
236 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 2));
237 radeon_ring_write(rdev
, (PA_SC_GENERIC_SCISSOR_TL
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
238 radeon_ring_write(rdev
, (x1
<< 0) | (y1
<< 16) | (1 << 31));
239 radeon_ring_write(rdev
, (x2
<< 0) | (y2
<< 16));
241 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 2));
242 radeon_ring_write(rdev
, (PA_SC_WINDOW_SCISSOR_TL
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
243 radeon_ring_write(rdev
, (x1
<< 0) | (y1
<< 16) | (1 << 31));
244 radeon_ring_write(rdev
, (x2
<< 0) | (y2
<< 16));
249 draw_auto(struct radeon_device
*rdev
)
251 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
252 radeon_ring_write(rdev
, (VGT_PRIMITIVE_TYPE
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
253 radeon_ring_write(rdev
, DI_PT_RECTLIST
);
255 radeon_ring_write(rdev
, PACKET3(PACKET3_INDEX_TYPE
, 0));
256 radeon_ring_write(rdev
, DI_INDEX_SIZE_16_BIT
);
258 radeon_ring_write(rdev
, PACKET3(PACKET3_NUM_INSTANCES
, 0));
259 radeon_ring_write(rdev
, 1);
261 radeon_ring_write(rdev
, PACKET3(PACKET3_DRAW_INDEX_AUTO
, 1));
262 radeon_ring_write(rdev
, 3);
263 radeon_ring_write(rdev
, DI_SRC_SEL_AUTO_INDEX
);
269 set_default_state(struct radeon_device
*rdev
)
271 u32 sq_config
, sq_gpr_resource_mgmt_1
, sq_gpr_resource_mgmt_2
;
272 u32 sq_thread_resource_mgmt
, sq_stack_resource_mgmt_1
, sq_stack_resource_mgmt_2
;
273 int num_ps_gprs
, num_vs_gprs
, num_temp_gprs
, num_gs_gprs
, num_es_gprs
;
274 int num_ps_threads
, num_vs_threads
, num_gs_threads
, num_es_threads
;
275 int num_ps_stack_entries
, num_vs_stack_entries
, num_gs_stack_entries
, num_es_stack_entries
;
279 switch (rdev
->family
) {
286 num_ps_threads
= 136;
290 num_ps_stack_entries
= 128;
291 num_vs_stack_entries
= 128;
292 num_gs_stack_entries
= 0;
293 num_es_stack_entries
= 0;
302 num_ps_threads
= 144;
306 num_ps_stack_entries
= 40;
307 num_vs_stack_entries
= 40;
308 num_gs_stack_entries
= 32;
309 num_es_stack_entries
= 16;
321 num_ps_threads
= 136;
325 num_ps_stack_entries
= 40;
326 num_vs_stack_entries
= 40;
327 num_gs_stack_entries
= 32;
328 num_es_stack_entries
= 16;
336 num_ps_threads
= 136;
340 num_ps_stack_entries
= 40;
341 num_vs_stack_entries
= 40;
342 num_gs_stack_entries
= 32;
343 num_es_stack_entries
= 16;
351 num_ps_threads
= 188;
355 num_ps_stack_entries
= 256;
356 num_vs_stack_entries
= 256;
357 num_gs_stack_entries
= 0;
358 num_es_stack_entries
= 0;
367 num_ps_threads
= 188;
371 num_ps_stack_entries
= 128;
372 num_vs_stack_entries
= 128;
373 num_gs_stack_entries
= 0;
374 num_es_stack_entries
= 0;
382 num_ps_threads
= 144;
386 num_ps_stack_entries
= 128;
387 num_vs_stack_entries
= 128;
388 num_gs_stack_entries
= 0;
389 num_es_stack_entries
= 0;
393 if ((rdev
->family
== CHIP_RV610
) ||
394 (rdev
->family
== CHIP_RV620
) ||
395 (rdev
->family
== CHIP_RS780
) ||
396 (rdev
->family
== CHIP_RS880
) ||
397 (rdev
->family
== CHIP_RV710
))
400 sq_config
= VC_ENABLE
;
402 sq_config
|= (DX9_CONSTS
|
403 ALU_INST_PREFER_VECTOR
|
409 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(num_ps_gprs
) |
410 NUM_VS_GPRS(num_vs_gprs
) |
411 NUM_CLAUSE_TEMP_GPRS(num_temp_gprs
));
412 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(num_gs_gprs
) |
413 NUM_ES_GPRS(num_es_gprs
));
414 sq_thread_resource_mgmt
= (NUM_PS_THREADS(num_ps_threads
) |
415 NUM_VS_THREADS(num_vs_threads
) |
416 NUM_GS_THREADS(num_gs_threads
) |
417 NUM_ES_THREADS(num_es_threads
));
418 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(num_ps_stack_entries
) |
419 NUM_VS_STACK_ENTRIES(num_vs_stack_entries
));
420 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(num_gs_stack_entries
) |
421 NUM_ES_STACK_ENTRIES(num_es_stack_entries
));
423 /* emit an IB pointing at default state */
424 dwords
= ALIGN(rdev
->r600_blit
.state_len
, 0x10);
425 gpu_addr
= rdev
->r600_blit
.shader_gpu_addr
+ rdev
->r600_blit
.state_offset
;
426 radeon_ring_write(rdev
, PACKET3(PACKET3_INDIRECT_BUFFER
, 2));
427 radeon_ring_write(rdev
, gpu_addr
& 0xFFFFFFFC);
428 radeon_ring_write(rdev
, upper_32_bits(gpu_addr
) & 0xFF);
429 radeon_ring_write(rdev
, dwords
);
432 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONFIG_REG
, 6));
433 radeon_ring_write(rdev
, (SQ_CONFIG
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
434 radeon_ring_write(rdev
, sq_config
);
435 radeon_ring_write(rdev
, sq_gpr_resource_mgmt_1
);
436 radeon_ring_write(rdev
, sq_gpr_resource_mgmt_2
);
437 radeon_ring_write(rdev
, sq_thread_resource_mgmt
);
438 radeon_ring_write(rdev
, sq_stack_resource_mgmt_1
);
439 radeon_ring_write(rdev
, sq_stack_resource_mgmt_2
);
442 static inline uint32_t i2f(uint32_t input
)
444 u32 result
, i
, exponent
, fraction
;
446 if ((input
& 0x3fff) == 0)
447 result
= 0; /* 0 is a special case */
449 exponent
= 140; /* exponent biased by 127; */
450 fraction
= (input
& 0x3fff) << 10; /* cheat and only
451 handle numbers below 2^^15 */
452 for (i
= 0; i
< 14; i
++) {
453 if (fraction
& 0x800000)
456 fraction
= fraction
<< 1; /* keep
457 shifting left until top bit = 1 */
458 exponent
= exponent
- 1;
461 result
= exponent
<< 23 | (fraction
& 0x7fffff); /* mask
462 off top bit; assumed 1 */
467 int r600_blit_init(struct radeon_device
*rdev
)
473 int num_packet2s
= 0;
475 /* pin copy shader into vram if already initialized */
476 if (rdev
->r600_blit
.shader_obj
)
479 mutex_init(&rdev
->r600_blit
.mutex
);
480 rdev
->r600_blit
.state_offset
= 0;
482 if (rdev
->family
>= CHIP_RV770
)
483 rdev
->r600_blit
.state_len
= r7xx_default_size
;
485 rdev
->r600_blit
.state_len
= r6xx_default_size
;
487 dwords
= rdev
->r600_blit
.state_len
;
488 while (dwords
& 0xf) {
489 packet2s
[num_packet2s
++] = PACKET2(0);
493 obj_size
= dwords
* 4;
494 obj_size
= ALIGN(obj_size
, 256);
496 rdev
->r600_blit
.vs_offset
= obj_size
;
497 obj_size
+= r6xx_vs_size
* 4;
498 obj_size
= ALIGN(obj_size
, 256);
500 rdev
->r600_blit
.ps_offset
= obj_size
;
501 obj_size
+= r6xx_ps_size
* 4;
502 obj_size
= ALIGN(obj_size
, 256);
504 r
= radeon_bo_create(rdev
, NULL
, obj_size
, true, RADEON_GEM_DOMAIN_VRAM
,
505 &rdev
->r600_blit
.shader_obj
);
507 DRM_ERROR("r600 failed to allocate shader\n");
511 DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n",
513 rdev
->r600_blit
.vs_offset
, rdev
->r600_blit
.ps_offset
);
515 r
= radeon_bo_reserve(rdev
->r600_blit
.shader_obj
, false);
516 if (unlikely(r
!= 0))
518 r
= radeon_bo_kmap(rdev
->r600_blit
.shader_obj
, &ptr
);
520 DRM_ERROR("failed to map blit object %d\n", r
);
523 if (rdev
->family
>= CHIP_RV770
)
524 memcpy_toio(ptr
+ rdev
->r600_blit
.state_offset
,
525 r7xx_default_state
, rdev
->r600_blit
.state_len
* 4);
527 memcpy_toio(ptr
+ rdev
->r600_blit
.state_offset
,
528 r6xx_default_state
, rdev
->r600_blit
.state_len
* 4);
530 memcpy_toio(ptr
+ rdev
->r600_blit
.state_offset
+ (rdev
->r600_blit
.state_len
* 4),
531 packet2s
, num_packet2s
* 4);
532 memcpy(ptr
+ rdev
->r600_blit
.vs_offset
, r6xx_vs
, r6xx_vs_size
* 4);
533 memcpy(ptr
+ rdev
->r600_blit
.ps_offset
, r6xx_ps
, r6xx_ps_size
* 4);
534 radeon_bo_kunmap(rdev
->r600_blit
.shader_obj
);
535 radeon_bo_unreserve(rdev
->r600_blit
.shader_obj
);
538 r
= radeon_bo_reserve(rdev
->r600_blit
.shader_obj
, false);
539 if (unlikely(r
!= 0))
541 r
= radeon_bo_pin(rdev
->r600_blit
.shader_obj
, RADEON_GEM_DOMAIN_VRAM
,
542 &rdev
->r600_blit
.shader_gpu_addr
);
543 radeon_bo_unreserve(rdev
->r600_blit
.shader_obj
);
545 dev_err(rdev
->dev
, "(%d) pin blit object failed\n", r
);
551 void r600_blit_fini(struct radeon_device
*rdev
)
555 if (rdev
->r600_blit
.shader_obj
== NULL
)
557 /* If we can't reserve the bo, unref should be enough to destroy
558 * it when it becomes idle.
560 r
= radeon_bo_reserve(rdev
->r600_blit
.shader_obj
, false);
562 radeon_bo_unpin(rdev
->r600_blit
.shader_obj
);
563 radeon_bo_unreserve(rdev
->r600_blit
.shader_obj
);
565 radeon_bo_unref(&rdev
->r600_blit
.shader_obj
);
568 int r600_vb_ib_get(struct radeon_device
*rdev
)
571 r
= radeon_ib_get(rdev
, &rdev
->r600_blit
.vb_ib
);
573 DRM_ERROR("failed to get IB for vertex buffer\n");
577 rdev
->r600_blit
.vb_total
= 64*1024;
578 rdev
->r600_blit
.vb_used
= 0;
582 void r600_vb_ib_put(struct radeon_device
*rdev
)
584 radeon_fence_emit(rdev
, rdev
->r600_blit
.vb_ib
->fence
);
585 radeon_ib_free(rdev
, &rdev
->r600_blit
.vb_ib
);
588 int r600_blit_prepare_copy(struct radeon_device
*rdev
, int size_bytes
)
591 int ring_size
, line_size
;
593 /* loops of emits 64 + fence emit possible */
594 int dwords_per_loop
= 76, num_loops
;
596 r
= r600_vb_ib_get(rdev
);
600 /* set_render_target emits 2 extra dwords on rv6xx */
601 if (rdev
->family
> CHIP_R600
&& rdev
->family
< CHIP_RV770
)
602 dwords_per_loop
+= 2;
604 /* 8 bpp vs 32 bpp for xfer unit */
610 max_size
= 8192 * line_size
;
612 /* major loops cover the max size transfer */
613 num_loops
= ((size_bytes
+ max_size
) / max_size
);
614 /* minor loops cover the extra non aligned bits */
615 num_loops
+= ((size_bytes
% line_size
) ? 1 : 0);
616 /* calculate number of loops correctly */
617 ring_size
= num_loops
* dwords_per_loop
;
618 /* set default + shaders */
619 ring_size
+= 40; /* shaders + def state */
620 ring_size
+= 10; /* fence emit for VB IB */
621 ring_size
+= 5; /* done copy */
622 ring_size
+= 10; /* fence emit for done copy */
623 r
= radeon_ring_lock(rdev
, ring_size
);
627 set_default_state(rdev
); /* 14 */
628 set_shaders(rdev
); /* 26 */
632 void r600_blit_done_copy(struct radeon_device
*rdev
, struct radeon_fence
*fence
)
636 if (rdev
->r600_blit
.vb_ib
)
637 r600_vb_ib_put(rdev
);
640 r
= radeon_fence_emit(rdev
, fence
);
642 radeon_ring_unlock_commit(rdev
);
645 void r600_kms_blit_copy(struct radeon_device
*rdev
,
646 u64 src_gpu_addr
, u64 dst_gpu_addr
,
653 DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr
, dst_gpu_addr
,
654 size_bytes
, rdev
->r600_blit
.vb_used
);
655 vb
= (u32
*)(rdev
->r600_blit
.vb_ib
->ptr
+ rdev
->r600_blit
.vb_used
);
656 if ((size_bytes
& 3) || (src_gpu_addr
& 3) || (dst_gpu_addr
& 3)) {
660 int cur_size
= size_bytes
;
661 int src_x
= src_gpu_addr
& 255;
662 int dst_x
= dst_gpu_addr
& 255;
664 src_gpu_addr
= src_gpu_addr
& ~255;
665 dst_gpu_addr
= dst_gpu_addr
& ~255;
667 if (!src_x
&& !dst_x
) {
668 h
= (cur_size
/ max_bytes
);
674 cur_size
= max_bytes
;
676 if (cur_size
> max_bytes
)
677 cur_size
= max_bytes
;
678 if (cur_size
> (max_bytes
- dst_x
))
679 cur_size
= (max_bytes
- dst_x
);
680 if (cur_size
> (max_bytes
- src_x
))
681 cur_size
= (max_bytes
- src_x
);
684 if ((rdev
->r600_blit
.vb_used
+ 48) > rdev
->r600_blit
.vb_total
) {
688 r600_vb_ib_put(rdev
);
690 r600_nomm_put_vb(dev
);
691 r600_nomm_get_vb(dev
);
692 if (!dev_priv
->blit_vb
)
695 vb
= r600_nomm_get_vb_ptr(dev
);
709 vb
[8] = i2f(dst_x
+ cur_size
);
711 vb
[10] = i2f(src_x
+ cur_size
);
715 set_tex_resource(rdev
, FMT_8
,
716 src_x
+ cur_size
, h
, src_x
+ cur_size
,
720 cp_set_surface_sync(rdev
,
721 PACKET3_TC_ACTION_ENA
, (src_x
+ cur_size
* h
), src_gpu_addr
);
724 set_render_target(rdev
, COLOR_8
,
729 set_scissors(rdev
, dst_x
, 0, dst_x
+ cur_size
, h
);
732 vb_gpu_addr
= rdev
->r600_blit
.vb_ib
->gpu_addr
+ rdev
->r600_blit
.vb_used
;
733 set_vtx_resource(rdev
, vb_gpu_addr
);
739 cp_set_surface_sync(rdev
,
740 PACKET3_CB_ACTION_ENA
| PACKET3_CB0_DEST_BASE_ENA
,
741 cur_size
* h
, dst_gpu_addr
);
744 rdev
->r600_blit
.vb_used
+= 12 * 4;
746 src_gpu_addr
+= cur_size
* h
;
747 dst_gpu_addr
+= cur_size
* h
;
748 size_bytes
-= cur_size
* h
;
751 max_bytes
= 8192 * 4;
754 int cur_size
= size_bytes
;
755 int src_x
= (src_gpu_addr
& 255);
756 int dst_x
= (dst_gpu_addr
& 255);
758 src_gpu_addr
= src_gpu_addr
& ~255;
759 dst_gpu_addr
= dst_gpu_addr
& ~255;
761 if (!src_x
&& !dst_x
) {
762 h
= (cur_size
/ max_bytes
);
768 cur_size
= max_bytes
;
770 if (cur_size
> max_bytes
)
771 cur_size
= max_bytes
;
772 if (cur_size
> (max_bytes
- dst_x
))
773 cur_size
= (max_bytes
- dst_x
);
774 if (cur_size
> (max_bytes
- src_x
))
775 cur_size
= (max_bytes
- src_x
);
778 if ((rdev
->r600_blit
.vb_used
+ 48) > rdev
->r600_blit
.vb_total
) {
782 if ((rdev
->blit_vb
->used
+ 48) > rdev
->blit_vb
->total
) {
783 r600_nomm_put_vb(dev
);
784 r600_nomm_get_vb(dev
);
789 vb
= r600_nomm_get_vb_ptr(dev
);
793 vb
[0] = i2f(dst_x
/ 4);
795 vb
[2] = i2f(src_x
/ 4);
798 vb
[4] = i2f(dst_x
/ 4);
800 vb
[6] = i2f(src_x
/ 4);
803 vb
[8] = i2f((dst_x
+ cur_size
) / 4);
805 vb
[10] = i2f((src_x
+ cur_size
) / 4);
809 set_tex_resource(rdev
, FMT_8_8_8_8
,
810 (src_x
+ cur_size
) / 4,
811 h
, (src_x
+ cur_size
) / 4,
814 cp_set_surface_sync(rdev
,
815 PACKET3_TC_ACTION_ENA
, (src_x
+ cur_size
* h
), src_gpu_addr
);
818 set_render_target(rdev
, COLOR_8_8_8_8
,
819 (dst_x
+ cur_size
) / 4, h
,
823 set_scissors(rdev
, (dst_x
/ 4), 0, (dst_x
+ cur_size
/ 4), h
);
825 /* Vertex buffer setup 14 */
826 vb_gpu_addr
= rdev
->r600_blit
.vb_ib
->gpu_addr
+ rdev
->r600_blit
.vb_used
;
827 set_vtx_resource(rdev
, vb_gpu_addr
);
833 cp_set_surface_sync(rdev
,
834 PACKET3_CB_ACTION_ENA
| PACKET3_CB0_DEST_BASE_ENA
,
835 cur_size
* h
, dst_gpu_addr
);
837 /* 78 ring dwords per loop */
839 rdev
->r600_blit
.vb_used
+= 12 * 4;
841 src_gpu_addr
+= cur_size
* h
;
842 dst_gpu_addr
+= cur_size
* h
;
843 size_bytes
-= cur_size
* h
;