1 /*****************************************************************************
2 * copy.c: Fast YV12/NV12 copy
3 *****************************************************************************
4 * Copyright (C) 2010 Laurent Aimar
7 * Authors: Laurent Aimar <fenrir _AT_ videolan _DOT_ org>
8 * Victorien Le Couviour--Tuffet <victorien.lecouviour.tuffet@gmail.com>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU Lesser General Public License as published by
12 * the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this program; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
23 *****************************************************************************/
33 #include <vlc_common.h>
34 #include <vlc_picture.h>
39 static void CopyPlane(uint8_t *dst
, size_t dst_pitch
,
40 const uint8_t *src
, size_t src_pitch
,
41 unsigned height
, int bitshift
);
43 #define ASSERT_PLANE(i) assert(src[i]); \
46 #define ASSERT_2PLANES \
52 #define ASSERT_3PLANES ASSERT_2PLANES; \
55 int CopyInitCache(copy_cache_t
*cache
, unsigned width
)
57 #ifdef CAN_COMPILE_SSE2
58 cache
->size
= __MAX((width
+ 0x3f) & ~ 0x3f, 16384);
59 cache
->buffer
= aligned_alloc(64, cache
->size
);
63 (void) cache
; (void) width
;
68 void CopyCleanCache(copy_cache_t
*cache
)
70 #ifdef CAN_COMPILE_SSE2
71 aligned_free(cache
->buffer
);
79 #ifdef CAN_COMPILE_SSE2
80 /* Copy 16/64 bytes from srcp to dstp loading data with the SSE>=2 instruction
81 * load and storing data with the SSE>=2 instruction store.
84 #define COPY16_SHIFTR(x) \
86 #define COPY16_SHIFTL(x) \
89 #define COPY16_S(dstp, srcp, load, store, shiftstr) \
91 load " 0(%[src]), %%xmm1\n" \
93 store " %%xmm1, 0(%[dst])\n" \
94 : : [dst]"r"(dstp), [src]"r"(srcp) : "memory", "xmm1")
96 #define COPY16(dstp, srcp, load, store) COPY16_S(dstp, srcp, load, store, "")
98 #define COPY64_SHIFTR(x) \
99 "psrlw "x", %%xmm1\n" \
100 "psrlw "x", %%xmm2\n" \
101 "psrlw "x", %%xmm3\n" \
102 "psrlw "x", %%xmm4\n"
103 #define COPY64_SHIFTL(x) \
104 "psllw "x", %%xmm1\n" \
105 "psllw "x", %%xmm2\n" \
106 "psllw "x", %%xmm3\n" \
107 "psllw "x", %%xmm4\n"
109 #define COPY64_S(dstp, srcp, load, store, shiftstr) \
111 load " 0(%[src]), %%xmm1\n" \
112 load " 16(%[src]), %%xmm2\n" \
113 load " 32(%[src]), %%xmm3\n" \
114 load " 48(%[src]), %%xmm4\n" \
116 store " %%xmm1, 0(%[dst])\n" \
117 store " %%xmm2, 16(%[dst])\n" \
118 store " %%xmm3, 32(%[dst])\n" \
119 store " %%xmm4, 48(%[dst])\n" \
120 : : [dst]"r"(dstp), [src]"r"(srcp) : "memory", "xmm1", "xmm2", "xmm3", "xmm4")
122 #define COPY64(dstp, srcp, load, store) \
123 COPY64_S(dstp, srcp, load, store, "")
125 #ifdef COPY_TEST_NOOPTIM
126 # undef vlc_CPU_SSE4_1
127 # define vlc_CPU_SSE4_1() (0)
129 # define vlc_CPU_SSE3() (0)
130 # undef vlc_CPU_SSSE3
131 # define vlc_CPU_SSSE3() (0)
133 # define vlc_CPU_SSE2() (0)
136 /* Optimized copy from "Uncacheable Speculative Write Combining" memory
137 * as used by some video surface.
138 * XXX It is really efficient only when SSE4.1 is available.
141 static void CopyFromUswc(uint8_t *dst
, size_t dst_pitch
,
142 const uint8_t *src
, size_t src_pitch
,
143 unsigned width
, unsigned height
, int bitshift
)
145 assert(((intptr_t)dst
& 0x0f) == 0 && (dst_pitch
& 0x0f) == 0);
147 asm volatile ("mfence");
149 #define SSE_USWC_COPY(shiftstr16, shiftstr64) \
150 for (unsigned y = 0; y < height; y++) { \
151 const unsigned unaligned = (-(uintptr_t)src) & 0x0f; \
152 unsigned x = unaligned; \
153 if (vlc_CPU_SSE4_1()) { \
155 for (; x+63 < width; x += 64) \
156 COPY64_S(&dst[x], &src[x], "movntdqa", "movdqa", shiftstr64); \
158 COPY16_S(dst, src, "movdqu", "movdqa", shiftstr16); \
159 for (; x+63 < width; x += 64) \
160 COPY64_S(&dst[x], &src[x], "movntdqa", "movdqu", shiftstr64); \
164 for (; x+63 < width; x += 64) \
165 COPY64_S(&dst[x], &src[x], "movdqa", "movdqa", shiftstr64); \
167 COPY16_S(dst, src, "movdqu", "movdqa", shiftstr16); \
168 for (; x+63 < width; x += 64) \
169 COPY64_S(&dst[x], &src[x], "movdqa", "movdqu", shiftstr64); \
172 /* The following should not happen since buffers are generally well aligned */ \
174 CopyPlane(&dst[x], dst_pitch - x, &src[x], src_pitch - x, 1, bitshift); \
182 SSE_USWC_COPY("", "")
185 SSE_USWC_COPY(COPY16_SHIFTL("$6"), COPY64_SHIFTL("$6"))
188 SSE_USWC_COPY(COPY16_SHIFTR("$6"), COPY64_SHIFTR("$6"))
191 SSE_USWC_COPY(COPY16_SHIFTR("$2"), COPY64_SHIFTR("$2"))
194 SSE_USWC_COPY(COPY16_SHIFTL("$2"), COPY64_SHIFTL("$2"))
197 SSE_USWC_COPY(COPY16_SHIFTR("$4"), COPY64_SHIFTR("$4"))
200 SSE_USWC_COPY(COPY16_SHIFTL("$2"), COPY64_SHIFTL("$2"))
203 vlc_assert_unreachable();
207 asm volatile ("mfence");
211 static void Copy2d(uint8_t *dst
, size_t dst_pitch
,
212 const uint8_t *src
, size_t src_pitch
,
213 unsigned width
, unsigned height
)
215 assert(((intptr_t)src
& 0x0f) == 0 && (src_pitch
& 0x0f) == 0);
217 for (unsigned y
= 0; y
< height
; y
++) {
220 bool unaligned
= ((intptr_t)dst
& 0x0f) != 0;
222 for (; x
+63 < width
; x
+= 64)
223 COPY64(&dst
[x
], &src
[x
], "movdqa", "movntdq");
225 for (; x
+63 < width
; x
+= 64)
226 COPY64(&dst
[x
], &src
[x
], "movdqa", "movdqu");
229 for (; x
< width
; x
++)
239 SSE_InterleaveUV(uint8_t *dst
, size_t dst_pitch
,
240 uint8_t *srcu
, size_t srcu_pitch
,
241 uint8_t *srcv
, size_t srcv_pitch
,
242 unsigned int width
, unsigned int height
, uint8_t pixel_size
)
244 assert(!((intptr_t)srcu
& 0xf) && !(srcu_pitch
& 0x0f) &&
245 !((intptr_t)srcv
& 0xf) && !(srcv_pitch
& 0x0f));
247 static const uint8_t shuffle_8
[] = { 0, 8,
255 static const uint8_t shuffle_16
[] = { 0, 1, 8, 9,
259 const uint8_t *shuffle
= pixel_size
== 1 ? shuffle_8
: shuffle_16
;
261 for (unsigned int y
= 0; y
< height
; ++y
)
266 "movhpd 0x00(%[src2]), %%xmm0\n" \
267 "movlpd 0x00(%[src1]), %%xmm0\n" \
269 "movhpd 0x08(%[src2]), %%xmm1\n" \
270 "movlpd 0x08(%[src1]), %%xmm1\n" \
272 "movhpd 0x10(%[src2]), %%xmm2\n" \
273 "movlpd 0x10(%[src1]), %%xmm2\n" \
275 "movhpd 0x18(%[src2]), %%xmm3\n" \
276 "movlpd 0x18(%[src1]), %%xmm3\n"
279 "movdqu %%xmm0, 0x00(%[dst])\n" \
280 "movdqu %%xmm1, 0x10(%[dst])\n" \
281 "movdqu %%xmm2, 0x20(%[dst])\n" \
282 "movdqu %%xmm3, 0x30(%[dst])\n"
284 #ifdef CAN_COMPILE_SSSE3
286 for (x
= 0; x
< (width
& ~31); x
+= 32)
289 "movdqu (%[shuffle]), %%xmm7\n"
291 "pshufb %%xmm7, %%xmm0\n"
292 "pshufb %%xmm7, %%xmm1\n"
293 "pshufb %%xmm7, %%xmm2\n"
294 "pshufb %%xmm7, %%xmm3\n"
296 : : [dst
]"r"(dst
+2*x
),
297 [src1
]"r"(srcu
+x
), [src2
]"r"(srcv
+x
),
298 [shuffle
]"r"(shuffle
)
299 : "memory", "xmm0", "xmm1", "xmm2", "xmm3", "xmm7"
305 assert(pixel_size
== 1);
306 for (x
= 0; x
< (width
& ~31); x
+= 32)
310 "movhlps %%xmm0, %%xmm4\n"
311 "punpcklbw %%xmm4, %%xmm0\n"
313 "movhlps %%xmm1, %%xmm4\n"
314 "punpcklbw %%xmm4, %%xmm1\n"
316 "movhlps %%xmm2, %%xmm4\n"
317 "punpcklbw %%xmm4, %%xmm2\n"
319 "movhlps %%xmm3, %%xmm4\n"
320 "punpcklbw %%xmm4, %%xmm3\n"
322 : : [dst
]"r"(dst
+2*x
),
323 [src1
]"r"(srcu
+x
), [src2
]"r"(srcv
+x
)
325 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm7"
333 for (; x
< width
; x
++) {
334 dst
[2*x
+0] = srcu
[x
];
335 dst
[2*x
+1] = srcv
[x
];
340 for (; x
< width
; x
+= 2) {
341 dst
[2*x
+0] = srcu
[x
];
342 dst
[2*x
+1] = srcu
[x
+ 1];
343 dst
[2*x
+2] = srcv
[x
];
344 dst
[2*x
+3] = srcv
[x
+ 1];
354 static void SSE_SplitUV(uint8_t *dstu
, size_t dstu_pitch
,
355 uint8_t *dstv
, size_t dstv_pitch
,
356 const uint8_t *src
, size_t src_pitch
,
357 unsigned width
, unsigned height
, uint8_t pixel_size
)
359 assert(pixel_size
== 1 || pixel_size
== 2);
360 assert(((intptr_t)src
& 0xf) == 0 && (src_pitch
& 0x0f) == 0);
363 "movdqa 0(%[src]), %%xmm0\n" \
364 "movdqa 16(%[src]), %%xmm1\n" \
365 "movdqa 32(%[src]), %%xmm2\n" \
366 "movdqa 48(%[src]), %%xmm3\n"
369 "movq %%xmm0, 0(%[dst1])\n" \
370 "movq %%xmm1, 8(%[dst1])\n" \
371 "movhpd %%xmm0, 0(%[dst2])\n" \
372 "movhpd %%xmm1, 8(%[dst2])\n" \
373 "movq %%xmm2, 16(%[dst1])\n" \
374 "movq %%xmm3, 24(%[dst1])\n" \
375 "movhpd %%xmm2, 16(%[dst2])\n" \
376 "movhpd %%xmm3, 24(%[dst2])\n"
378 #ifdef CAN_COMPILE_SSSE3
381 static const uint8_t shuffle_8
[] = { 0, 2, 4, 6, 8, 10, 12, 14,
382 1, 3, 5, 7, 9, 11, 13, 15 };
383 static const uint8_t shuffle_16
[] = { 0, 1, 4, 5, 8, 9, 12, 13,
384 2, 3, 6, 7, 10, 11, 14, 15 };
385 const uint8_t *shuffle
= pixel_size
== 1 ? shuffle_8
: shuffle_16
;
386 for (unsigned y
= 0; y
< height
; y
++) {
388 for (; x
< (width
& ~31); x
+= 32) {
390 "movdqu (%[shuffle]), %%xmm7\n"
392 "pshufb %%xmm7, %%xmm0\n"
393 "pshufb %%xmm7, %%xmm1\n"
394 "pshufb %%xmm7, %%xmm2\n"
395 "pshufb %%xmm7, %%xmm3\n"
397 : : [dst1
]"r"(&dstu
[x
]), [dst2
]"r"(&dstv
[x
]), [src
]"r"(&src
[2*x
]), [shuffle
]"r"(shuffle
) : "memory", "xmm0", "xmm1", "xmm2", "xmm3", "xmm7");
401 for (; x
< width
; x
++) {
402 dstu
[x
] = src
[2*x
+0];
403 dstv
[x
] = src
[2*x
+1];
408 for (; x
< width
; x
+= 2) {
409 dstu
[x
] = src
[2*x
+0];
410 dstu
[x
+1] = src
[2*x
+1];
411 dstv
[x
] = src
[2*x
+2];
412 dstv
[x
+1] = src
[2*x
+3];
422 assert(pixel_size
== 1);
423 static const uint8_t mask
[] = { 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
424 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00 };
426 for (unsigned y
= 0; y
< height
; y
++)
429 for (; x
< (width
& ~31); x
+= 32) {
431 "movdqu (%[mask]), %%xmm7\n"
433 "movdqa %%xmm0, %%xmm4\n"
434 "movdqa %%xmm1, %%xmm5\n"
435 "movdqa %%xmm2, %%xmm6\n"
438 "pand %%xmm7, %%xmm4\n"
439 "pand %%xmm7, %%xmm5\n"
440 "pand %%xmm7, %%xmm6\n"
441 "packuswb %%xmm4, %%xmm0\n"
442 "packuswb %%xmm5, %%xmm1\n"
443 "pand %%xmm3, %%xmm7\n"
446 "packuswb %%xmm6, %%xmm2\n"
447 "packuswb %%xmm7, %%xmm3\n"
449 : : [dst2
]"r"(&dstu
[x
]), [dst1
]"r"(&dstv
[x
]), [src
]"r"(&src
[2*x
]), [mask
]"r"(mask
) : "memory", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7");
451 for (; x
< width
; x
++) {
452 dstu
[x
] = src
[2*x
+0];
453 dstv
[x
] = src
[2*x
+1];
464 static void SSE_CopyPlane(uint8_t *dst
, size_t dst_pitch
,
465 const uint8_t *src
, size_t src_pitch
,
466 uint8_t *cache
, size_t cache_size
,
467 unsigned height
, int bitshift
)
469 const size_t copy_pitch
= __MIN(src_pitch
, dst_pitch
);
470 const unsigned w16
= (copy_pitch
+15) & ~15;
471 const unsigned hstep
= cache_size
/ w16
;
472 const unsigned cache_width
= __MIN(src_pitch
, hstep
);
475 /* If SSE4.1: CopyFromUswc is faster than memcpy */
476 if (!vlc_CPU_SSE4_1() && bitshift
== 0 && src_pitch
== dst_pitch
)
477 memcpy(dst
, src
, copy_pitch
* height
);
479 for (unsigned y
= 0; y
< height
; y
+= hstep
) {
480 const unsigned hblock
= __MIN(hstep
, height
- y
);
482 /* Copy a bunch of line into our cache */
483 CopyFromUswc(cache
, w16
, src
, src_pitch
, cache_width
, hblock
, bitshift
);
485 /* Copy from our cache to the destination */
486 Copy2d(dst
, dst_pitch
, cache
, w16
, copy_pitch
, hblock
);
489 src
+= src_pitch
* hblock
;
490 dst
+= dst_pitch
* hblock
;
495 SSE_InterleavePlanes(uint8_t *dst
, size_t dst_pitch
,
496 const uint8_t *srcu
, size_t srcu_pitch
,
497 const uint8_t *srcv
, size_t srcv_pitch
,
498 uint8_t *cache
, size_t cache_size
,
499 unsigned int height
, uint8_t pixel_size
, int bitshift
)
501 assert(srcu_pitch
== srcv_pitch
);
502 size_t copy_pitch
= __MIN(dst_pitch
/ 2, srcu_pitch
);
503 unsigned int const w16
= (srcu_pitch
+15) & ~15;
504 unsigned int const hstep
= (cache_size
) / (2*w16
);
505 const unsigned cacheu_width
= __MIN(srcu_pitch
, hstep
);
506 const unsigned cachev_width
= __MIN(srcv_pitch
, hstep
);
509 for (unsigned int y
= 0; y
< height
; y
+= hstep
)
511 unsigned int const hblock
= __MIN(hstep
, height
- y
);
513 /* Copy a bunch of line into our cache */
514 CopyFromUswc(cache
, w16
, srcu
, srcu_pitch
, cacheu_width
, hblock
, bitshift
);
515 CopyFromUswc(cache
+w16
*hblock
, w16
, srcv
, srcv_pitch
,
516 cachev_width
, hblock
, bitshift
);
518 /* Copy from our cache to the destination */
519 SSE_InterleaveUV(dst
, dst_pitch
, cache
, w16
,
520 cache
+ w16
* hblock
, w16
,
521 copy_pitch
, hblock
, pixel_size
);
524 srcu
+= hblock
* srcu_pitch
;
525 srcv
+= hblock
* srcv_pitch
;
526 dst
+= hblock
* dst_pitch
;
530 static void SSE_SplitPlanes(uint8_t *dstu
, size_t dstu_pitch
,
531 uint8_t *dstv
, size_t dstv_pitch
,
532 const uint8_t *src
, size_t src_pitch
,
533 uint8_t *cache
, size_t cache_size
,
534 unsigned height
, uint8_t pixel_size
, int bitshift
)
536 size_t copy_pitch
= __MIN(__MIN(src_pitch
/ 2, dstu_pitch
), dstv_pitch
);
537 const unsigned w16
= (src_pitch
+15) & ~15;
538 const unsigned hstep
= cache_size
/ w16
;
539 const unsigned cache_width
= __MIN(src_pitch
, hstep
);
542 for (unsigned y
= 0; y
< height
; y
+= hstep
) {
543 const unsigned hblock
= __MIN(hstep
, height
- y
);
545 /* Copy a bunch of line into our cache */
546 CopyFromUswc(cache
, w16
, src
, src_pitch
, cache_width
, hblock
, bitshift
);
548 /* Copy from our cache to the destination */
549 SSE_SplitUV(dstu
, dstu_pitch
, dstv
, dstv_pitch
,
550 cache
, w16
, copy_pitch
, hblock
, pixel_size
);
553 src
+= src_pitch
* hblock
;
554 dstu
+= dstu_pitch
* hblock
;
555 dstv
+= dstv_pitch
* hblock
;
559 static void SSE_Copy420_P_to_P(picture_t
*dst
, const uint8_t *src
[static 3],
560 const size_t src_pitch
[static 3], unsigned height
,
561 const copy_cache_t
*cache
)
563 for (unsigned n
= 0; n
< 3; n
++) {
564 const unsigned d
= n
> 0 ? 2 : 1;
565 SSE_CopyPlane(dst
->p
[n
].p_pixels
, dst
->p
[n
].i_pitch
,
566 src
[n
], src_pitch
[n
],
567 cache
->buffer
, cache
->size
,
570 asm volatile ("emms");
574 static void SSE_Copy420_SP_to_SP(picture_t
*dst
, const uint8_t *src
[static 2],
575 const size_t src_pitch
[static 2], unsigned height
,
576 const copy_cache_t
*cache
)
578 SSE_CopyPlane(dst
->p
[0].p_pixels
, dst
->p
[0].i_pitch
, src
[0], src_pitch
[0],
579 cache
->buffer
, cache
->size
, height
, 0);
580 SSE_CopyPlane(dst
->p
[1].p_pixels
, dst
->p
[1].i_pitch
, src
[1], src_pitch
[1],
581 cache
->buffer
, cache
->size
, (height
+1) / 2, 0);
582 asm volatile ("emms");
586 SSE_Copy420_SP_to_P(picture_t
*dest
, const uint8_t *src
[static 2],
587 const size_t src_pitch
[static 2], unsigned int height
,
588 uint8_t pixel_size
, int bitshift
, const copy_cache_t
*cache
)
590 SSE_CopyPlane(dest
->p
[0].p_pixels
, dest
->p
[0].i_pitch
,
591 src
[0], src_pitch
[0], cache
->buffer
, cache
->size
, height
, bitshift
);
593 SSE_SplitPlanes(dest
->p
[1].p_pixels
, dest
->p
[1].i_pitch
,
594 dest
->p
[2].p_pixels
, dest
->p
[2].i_pitch
,
595 src
[1], src_pitch
[1], cache
->buffer
, cache
->size
,
596 (height
+1) / 2, pixel_size
, bitshift
);
597 asm volatile ("emms");
600 static void SSE_Copy420_P_to_SP(picture_t
*dst
, const uint8_t *src
[static 3],
601 const size_t src_pitch
[static 3],
602 unsigned height
, uint8_t pixel_size
,
603 int bitshift
, const copy_cache_t
*cache
)
605 SSE_CopyPlane(dst
->p
[0].p_pixels
, dst
->p
[0].i_pitch
, src
[0], src_pitch
[0],
606 cache
->buffer
, cache
->size
, height
, bitshift
);
607 SSE_InterleavePlanes(dst
->p
[1].p_pixels
, dst
->p
[1].i_pitch
,
608 src
[U_PLANE
], src_pitch
[U_PLANE
],
609 src
[V_PLANE
], src_pitch
[V_PLANE
],
610 cache
->buffer
, cache
->size
, (height
+1) / 2, pixel_size
, bitshift
);
611 asm volatile ("emms");
614 #endif /* CAN_COMPILE_SSE2 */
616 static void CopyPlane(uint8_t *dst
, size_t dst_pitch
,
617 const uint8_t *src
, size_t src_pitch
,
618 unsigned height
, int bitshift
)
620 const size_t copy_pitch
= __MIN(src_pitch
, dst_pitch
);
623 for (unsigned y
= 0; y
< height
; y
++)
625 uint16_t *dst16
= (uint16_t *) dst
;
626 const uint16_t *src16
= (const uint16_t *) src
;
629 for (unsigned x
= 0; x
< (copy_pitch
/ 2); x
++)
630 *dst16
++ = (*src16
++) >> (bitshift
& 0xf);
632 for (unsigned x
= 0; x
< (copy_pitch
/ 2); x
++)
633 *dst16
++ = (*src16
++) << ((-bitshift
) & 0xf);
638 else if (src_pitch
== dst_pitch
)
639 memcpy(dst
, src
, copy_pitch
* height
);
641 for (unsigned y
= 0; y
< height
; y
++) {
642 memcpy(dst
, src
, copy_pitch
);
648 void CopyPacked(picture_t
*dst
, const uint8_t *src
, const size_t src_pitch
,
649 unsigned height
, const copy_cache_t
*cache
)
652 assert(src
); assert(src_pitch
);
655 #ifdef CAN_COMPILE_SSE2
656 if (vlc_CPU_SSE4_1())
657 return SSE_CopyPlane(dst
->p
[0].p_pixels
, dst
->p
[0].i_pitch
, src
, src_pitch
,
658 cache
->buffer
, cache
->size
, height
, 0);
662 CopyPlane(dst
->p
[0].p_pixels
, dst
->p
[0].i_pitch
, src
, src_pitch
,
666 void Copy420_SP_to_SP(picture_t
*dst
, const uint8_t *src
[static 2],
667 const size_t src_pitch
[static 2], unsigned height
,
668 const copy_cache_t
*cache
)
671 #ifdef CAN_COMPILE_SSE2
673 return SSE_Copy420_SP_to_SP(dst
, src
, src_pitch
, height
, cache
);
678 CopyPlane(dst
->p
[0].p_pixels
, dst
->p
[0].i_pitch
,
679 src
[0], src_pitch
[0], height
, 0);
680 CopyPlane(dst
->p
[1].p_pixels
, dst
->p
[1].i_pitch
,
681 src
[1], src_pitch
[1], (height
+1)/2, 0);
684 #define SPLIT_PLANES(type, pitch_den) do { \
685 size_t copy_pitch = __MIN(__MIN(src_pitch / pitch_den, dstu_pitch), dstv_pitch); \
686 for (unsigned y = 0; y < height; y++) { \
687 for (unsigned x = 0; x < copy_pitch; x++) { \
688 ((type *) dstu)[x] = ((const type *) src)[2*x+0]; \
689 ((type *) dstv)[x] = ((const type *) src)[2*x+1]; \
692 dstu += dstu_pitch; \
693 dstv += dstv_pitch; \
697 #define SPLIT_PLANES_SHIFTR(type, pitch_den, bitshift) do { \
698 size_t copy_pitch = __MIN(__MIN(src_pitch / pitch_den, dstu_pitch), dstv_pitch); \
699 for (unsigned y = 0; y < height; y++) { \
700 for (unsigned x = 0; x < copy_pitch; x++) { \
701 ((type *) dstu)[x] = (((const type *) src)[2*x+0]) >> (bitshift); \
702 ((type *) dstv)[x] = (((const type *) src)[2*x+1]) >> (bitshift); \
705 dstu += dstu_pitch; \
706 dstv += dstv_pitch; \
710 #define SPLIT_PLANES_SHIFTL(type, pitch_den, bitshift) do { \
711 size_t copy_pitch = __MIN(__MIN(src_pitch / pitch_den, dstu_pitch), dstv_pitch); \
712 for (unsigned y = 0; y < height; y++) { \
713 for (unsigned x = 0; x < copy_pitch; x++) { \
714 ((type *) dstu)[x] = (((const type *) src)[2*x+0]) << (bitshift); \
715 ((type *) dstv)[x] = (((const type *) src)[2*x+1]) << (bitshift); \
718 dstu += dstu_pitch; \
719 dstv += dstv_pitch; \
723 static void SplitPlanes(uint8_t *dstu
, size_t dstu_pitch
,
724 uint8_t *dstv
, size_t dstv_pitch
,
725 const uint8_t *src
, size_t src_pitch
, unsigned height
)
727 SPLIT_PLANES(uint8_t, 2);
730 static void SplitPlanes16(uint8_t *dstu
, size_t dstu_pitch
,
731 uint8_t *dstv
, size_t dstv_pitch
,
732 const uint8_t *src
, size_t src_pitch
, unsigned height
,
736 SPLIT_PLANES(uint16_t, 4);
737 else if (bitshift
> 0)
738 SPLIT_PLANES_SHIFTR(uint16_t, 4, bitshift
& 0xf);
740 SPLIT_PLANES_SHIFTL(uint16_t, 4, (-bitshift
) & 0xf);
743 void Copy420_SP_to_P(picture_t
*dst
, const uint8_t *src
[static 2],
744 const size_t src_pitch
[static 2], unsigned height
,
745 const copy_cache_t
*cache
)
748 #ifdef CAN_COMPILE_SSE2
750 return SSE_Copy420_SP_to_P(dst
, src
, src_pitch
, height
, 1, 0, cache
);
755 CopyPlane(dst
->p
[0].p_pixels
, dst
->p
[0].i_pitch
,
756 src
[0], src_pitch
[0], height
, 0);
757 SplitPlanes(dst
->p
[1].p_pixels
, dst
->p
[1].i_pitch
,
758 dst
->p
[2].p_pixels
, dst
->p
[2].i_pitch
,
759 src
[1], src_pitch
[1], (height
+1)/2);
762 void Copy420_16_SP_to_P(picture_t
*dst
, const uint8_t *src
[static 2],
763 const size_t src_pitch
[static 2], unsigned height
,
764 int bitshift
, const copy_cache_t
*cache
)
767 assert(bitshift
>= -6 && bitshift
<= 6 && (bitshift
% 2 == 0));
769 #ifdef CAN_COMPILE_SSE3
771 return SSE_Copy420_SP_to_P(dst
, src
, src_pitch
, height
, 2, bitshift
, cache
);
776 CopyPlane(dst
->p
[0].p_pixels
, dst
->p
[0].i_pitch
,
777 src
[0], src_pitch
[0], height
, bitshift
);
778 SplitPlanes16(dst
->p
[1].p_pixels
, dst
->p
[1].i_pitch
,
779 dst
->p
[2].p_pixels
, dst
->p
[2].i_pitch
,
780 src
[1], src_pitch
[1], (height
+1)/2, bitshift
);
783 #define INTERLEAVE_UV() do { \
784 for ( unsigned int line = 0; line < copy_lines; line++ ) { \
785 for ( unsigned int col = 0; col < copy_pitch; col++ ) { \
786 *dstUV++ = *srcU++; \
787 *dstUV++ = *srcV++; \
789 dstUV += i_extra_pitch_uv; \
790 srcU += i_extra_pitch_u; \
791 srcV += i_extra_pitch_v; \
795 #define INTERLEAVE_UV_SHIFTR(bitshitf) do { \
796 for ( unsigned int line = 0; line < copy_lines; line++ ) { \
797 for ( unsigned int col = 0; col < copy_pitch; col++ ) { \
798 *dstUV++ = (*srcU++) >> (bitshitf); \
799 *dstUV++ = (*srcV++) >> (bitshitf); \
801 dstUV += i_extra_pitch_uv; \
802 srcU += i_extra_pitch_u; \
803 srcV += i_extra_pitch_v; \
807 #define INTERLEAVE_UV_SHIFTL(bitshitf) do { \
808 for ( unsigned int line = 0; line < copy_lines; line++ ) { \
809 for ( unsigned int col = 0; col < copy_pitch; col++ ) { \
810 *dstUV++ = (*srcU++) << (bitshitf); \
811 *dstUV++ = (*srcV++) << (bitshitf); \
813 dstUV += i_extra_pitch_uv; \
814 srcU += i_extra_pitch_u; \
815 srcV += i_extra_pitch_v; \
819 void Copy420_P_to_SP(picture_t
*dst
, const uint8_t *src
[static 3],
820 const size_t src_pitch
[static 3], unsigned height
,
821 const copy_cache_t
*cache
)
824 #ifdef CAN_COMPILE_SSE2
826 return SSE_Copy420_P_to_SP(dst
, src
, src_pitch
, height
, 1, 0, cache
);
831 CopyPlane(dst
->p
[0].p_pixels
, dst
->p
[0].i_pitch
,
832 src
[0], src_pitch
[0], height
, 0);
834 const unsigned copy_lines
= (height
+1) / 2;
835 const unsigned copy_pitch
= __MIN(src_pitch
[1], dst
->p
[1].i_pitch
/ 2);
837 const int i_extra_pitch_uv
= dst
->p
[1].i_pitch
- 2 * copy_pitch
;
838 const int i_extra_pitch_u
= src_pitch
[U_PLANE
] - copy_pitch
;
839 const int i_extra_pitch_v
= src_pitch
[V_PLANE
] - copy_pitch
;
841 uint8_t *dstUV
= dst
->p
[1].p_pixels
;
842 const uint8_t *srcU
= src
[U_PLANE
];
843 const uint8_t *srcV
= src
[V_PLANE
];
847 void Copy420_16_P_to_SP(picture_t
*dst
, const uint8_t *src
[static 3],
848 const size_t src_pitch
[static 3], unsigned height
,
849 int bitshift
, const copy_cache_t
*cache
)
852 assert(bitshift
>= -6 && bitshift
<= 6 && (bitshift
% 2 == 0));
853 #ifdef CAN_COMPILE_SSE2
855 return SSE_Copy420_P_to_SP(dst
, src
, src_pitch
, height
, 2, bitshift
, cache
);
860 CopyPlane(dst
->p
[0].p_pixels
, dst
->p
[0].i_pitch
,
861 src
[0], src_pitch
[0], height
, bitshift
);
863 const unsigned copy_lines
= (height
+1) / 2;
864 const unsigned copy_pitch
= src_pitch
[1] / 2;
866 const int i_extra_pitch_uv
= dst
->p
[1].i_pitch
/ 2 - 2 * copy_pitch
;
867 const int i_extra_pitch_u
= src_pitch
[U_PLANE
] / 2 - copy_pitch
;
868 const int i_extra_pitch_v
= src_pitch
[V_PLANE
] / 2 - copy_pitch
;
870 uint16_t *dstUV
= (void*) dst
->p
[1].p_pixels
;
871 const uint16_t *srcU
= (const uint16_t *) src
[U_PLANE
];
872 const uint16_t *srcV
= (const uint16_t *) src
[V_PLANE
];
876 else if (bitshift
> 0)
877 INTERLEAVE_UV_SHIFTR(bitshift
& 0xf);
879 INTERLEAVE_UV_SHIFTL((-bitshift
) & 0xf);
882 void Copy420_P_to_P(picture_t
*dst
, const uint8_t *src
[static 3],
883 const size_t src_pitch
[static 3], unsigned height
,
884 const copy_cache_t
*cache
)
887 #ifdef CAN_COMPILE_SSE2
889 return SSE_Copy420_P_to_P(dst
, src
, src_pitch
, height
, cache
);
894 CopyPlane(dst
->p
[0].p_pixels
, dst
->p
[0].i_pitch
,
895 src
[0], src_pitch
[0], height
, 0);
896 CopyPlane(dst
->p
[1].p_pixels
, dst
->p
[1].i_pitch
,
897 src
[1], src_pitch
[1], (height
+1) / 2, 0);
898 CopyPlane(dst
->p
[2].p_pixels
, dst
->p
[2].i_pitch
,
899 src
[2], src_pitch
[2], (height
+1) / 2, 0);
902 int picture_UpdatePlanes(picture_t
*picture
, uint8_t *data
, unsigned pitch
)
904 /* fill in buffer info in first plane */
905 picture
->p
->p_pixels
= data
;
906 picture
->p
->i_pitch
= pitch
;
907 picture
->p
->i_lines
= picture
->format
.i_height
;
908 assert(picture
->p
->i_visible_pitch
<= picture
->p
->i_pitch
);
909 assert(picture
->p
->i_visible_lines
<= picture
->p
->i_lines
);
911 /* Fill chroma planes for biplanar YUV */
912 if (picture
->format
.i_chroma
== VLC_CODEC_NV12
||
913 picture
->format
.i_chroma
== VLC_CODEC_NV21
||
914 picture
->format
.i_chroma
== VLC_CODEC_P010
) {
916 for (int n
= 1; n
< picture
->i_planes
; n
++) {
917 const plane_t
*o
= &picture
->p
[n
-1];
918 plane_t
*p
= &picture
->p
[n
];
920 p
->p_pixels
= o
->p_pixels
+ o
->i_lines
* o
->i_pitch
;
922 p
->i_lines
= picture
->format
.i_height
;
923 assert(p
->i_visible_pitch
<= p
->i_pitch
);
924 assert(p
->i_visible_lines
<= p
->i_lines
);
926 /* The dx/d3d buffer is always allocated as NV12 */
927 if (vlc_fourcc_AreUVPlanesSwapped(picture
->format
.i_chroma
, VLC_CODEC_NV12
)) {
928 /* TODO : Swap NV21 UV planes to match NV12 */
933 /* Fill chroma planes for planar YUV */
935 if (picture
->format
.i_chroma
== VLC_CODEC_I420
||
936 picture
->format
.i_chroma
== VLC_CODEC_J420
||
937 picture
->format
.i_chroma
== VLC_CODEC_YV12
) {
939 for (int n
= 1; n
< picture
->i_planes
; n
++) {
940 const plane_t
*o
= &picture
->p
[n
-1];
941 plane_t
*p
= &picture
->p
[n
];
943 p
->p_pixels
= o
->p_pixels
+ o
->i_lines
* o
->i_pitch
;
944 p
->i_pitch
= pitch
/ 2;
945 p
->i_lines
= picture
->format
.i_height
/ 2;
947 /* The dx/d3d buffer is always allocated as YV12 */
948 if (vlc_fourcc_AreUVPlanesSwapped(picture
->format
.i_chroma
, VLC_CODEC_YV12
))
949 picture_SwapUV( picture
);
956 #include <vlc_picture.h>
964 void (*conv
)(picture_t
*, const uint8_t *[], const size_t [], unsigned,
965 const copy_cache_t
*);
966 void (*conv16
)(picture_t
*, const uint8_t *[], const size_t [], unsigned, int,
967 const copy_cache_t
*);
973 vlc_fourcc_t src_chroma
;
974 struct test_dst dsts
[3];
977 static const struct test_conv convs
[] = {
978 { .src_chroma
= VLC_CODEC_NV12
,
979 .dsts
= { { VLC_CODEC_I420
, 0, .conv
= Copy420_SP_to_P
},
980 { VLC_CODEC_NV12
, 0, .conv
= Copy420_SP_to_SP
} },
982 { .src_chroma
= VLC_CODEC_I420
,
983 .dsts
= { { VLC_CODEC_I420
, 0, .conv
= Copy420_P_to_P
},
984 { VLC_CODEC_NV12
, 0, .conv
= Copy420_P_to_SP
} },
986 { .src_chroma
= VLC_CODEC_P010
,
987 .dsts
= { { VLC_CODEC_I420_10L
, 6, .conv16
= Copy420_16_SP_to_P
} },
989 { .src_chroma
= VLC_CODEC_I420_10L
,
990 .dsts
= { { VLC_CODEC_P010
, -6, .conv16
= Copy420_16_P_to_SP
} },
993 #define NB_CONVS ARRAY_SIZE(convs)
1000 int i_visible_height
;
1002 static const struct test_size sizes
[] = {
1006 { 560, 369, 540, 350 },
1007 { 1274, 721, 1200, 720 },
1008 { 1920, 1088, 1920, 1080 },
1009 { 3840, 2160, 3840, 2160 },
1010 #if 0 /* too long */
1011 { 8192, 8192, 8192, 8192 },
1014 #define NB_SIZES ARRAY_SIZE(sizes)
1016 static void piccheck(picture_t
*pic
, const vlc_chroma_description_t
*dsc
,
1019 #define ASSERT_COLOR(good) do { \
1020 fprintf(stderr, "error: pixel doesn't match @ plane: %d: %d x %d: 0x%X vs 0x%X\n", i, x, y, *(--p), good); \
1021 assert(!"error: pixel doesn't match"); \
1024 #define PICCHECK(type_u, type_uv, colors_P, color_UV, pitch_den) do { \
1025 for (int i = 0; i < pic->i_planes; ++i) \
1027 const struct plane_t *plane = &pic->p[i]; \
1028 for (int y = 0; y < plane->i_visible_lines; ++y) \
1030 if (pic->i_planes == 2 && i == 1) \
1032 type_uv *p = (type_uv *)&plane->p_pixels[y * plane->i_pitch]; \
1033 for (int x = 0; x < plane->i_visible_pitch / 2 / pitch_den; ++x) \
1035 *(p++) = color_UV; \
1036 else if (*(p++) != color_UV) \
1037 ASSERT_COLOR(color_UV); \
1041 type_u *p = (type_u *) &plane->p_pixels[y * plane->i_pitch]; \
1042 for (int x = 0; x < plane->i_visible_pitch / pitch_den; ++x) \
1044 *(p++) = colors_P[i]; \
1045 else if (*(p++) != colors_P[i]) \
1046 ASSERT_COLOR(colors_P[i]); \
1052 assert(pic
->i_planes
== 2 || pic
->i_planes
== 3);
1053 assert(dsc
->pixel_size
== 1 || dsc
->pixel_size
== 2);
1055 if (dsc
->pixel_size
== 1)
1057 const uint8_t colors_8_P
[3] = { 0x42, 0xF1, 0x36 };
1058 const uint16_t color_8_UV
= ntoh16(0xF136);
1059 PICCHECK(uint8_t, uint16_t, colors_8_P
, color_8_UV
, 1);
1063 const unsigned mask
= (1 << dsc
->pixel_bits
) - 1;
1064 uint16_t colors_16_P
[3] = { 0x1042 &mask
, 0xF114 &mask
, 0x3645 &mask
};
1066 switch (pic
->format
.i_chroma
)
1068 case VLC_CODEC_P010
:
1069 for (size_t i
= 0; i
< 3; ++i
)
1070 colors_16_P
[i
] <<= 6;
1072 case VLC_CODEC_I420_10L
:
1075 vlc_assert_unreachable();
1078 uint32_t color_16_UV
= GetDWLE( &colors_16_P
[1] );
1080 PICCHECK(uint16_t, uint32_t, colors_16_P
, color_16_UV
, 2);
1084 static void pic_rsc_destroy(picture_t
*pic
)
1086 for (unsigned i
= 0; i
< 3; i
++)
1087 free(pic
->p
[i
].p_pixels
);
1091 static picture_t
*pic_new_unaligned(const video_format_t
*fmt
)
1093 /* Allocate a no-aligned picture in order to ease buffer overflow detection
1094 * from the source picture */
1095 const vlc_chroma_description_t
*dsc
= vlc_fourcc_GetChromaDescription(fmt
->i_chroma
);
1097 picture_resource_t rsc
= { .pf_destroy
= pic_rsc_destroy
};
1098 for (unsigned i
= 0; i
< dsc
->plane_count
; i
++)
1100 rsc
.p
[i
].i_lines
= ((fmt
->i_visible_height
+ (dsc
->p
[i
].h
.den
- 1)) / dsc
->p
[i
].h
.den
) * dsc
->p
[i
].h
.num
;
1101 rsc
.p
[i
].i_pitch
= ((fmt
->i_visible_width
+ (dsc
->p
[i
].w
.den
- 1)) / dsc
->p
[i
].w
.den
) * dsc
->p
[i
].w
.num
* dsc
->pixel_size
;
1102 rsc
.p
[i
].p_pixels
= malloc(rsc
.p
[i
].i_lines
* rsc
.p
[i
].i_pitch
);
1103 assert(rsc
.p
[i
].p_pixels
);
1105 return picture_NewFromResource(fmt
, &rsc
);
1112 #ifndef COPY_TEST_NOOPTIM
1113 if (!vlc_CPU_SSE2())
1115 fprintf(stderr
, "WARNING: could not test SSE\n");
1120 for (size_t i
= 0; i
< NB_CONVS
; ++i
)
1122 const struct test_conv
*conv
= &convs
[i
];
1124 for (size_t j
= 0; j
< NB_SIZES
; ++j
)
1126 const struct test_size
*size
= &sizes
[j
];
1128 const vlc_chroma_description_t
*src_dsc
=
1129 vlc_fourcc_GetChromaDescription(conv
->src_chroma
);
1133 video_format_Init(&fmt
, 0);
1134 video_format_Setup(&fmt
, conv
->src_chroma
,
1135 size
->i_width
, size
->i_height
,
1136 size
->i_visible_width
, size
->i_visible_height
,
1138 picture_t
*src
= pic_new_unaligned(&fmt
);
1140 piccheck(src
, src_dsc
, true);
1143 int ret
= CopyInitCache(&cache
, src
->format
.i_width
1144 * src_dsc
->pixel_size
);
1145 assert(ret
== VLC_SUCCESS
);
1147 for (size_t f
= 0; conv
->dsts
[f
].chroma
!= 0; ++f
)
1149 const struct test_dst
*test_dst
= &conv
->dsts
[f
];
1151 const vlc_chroma_description_t
*dst_dsc
=
1152 vlc_fourcc_GetChromaDescription(test_dst
->chroma
);
1154 fmt
.i_chroma
= test_dst
->chroma
;
1155 picture_t
*dst
= picture_NewFromFormat(&fmt
);
1158 const uint8_t * src_planes
[3] = { src
->p
[Y_PLANE
].p_pixels
,
1159 src
->p
[U_PLANE
].p_pixels
,
1160 src
->p
[V_PLANE
].p_pixels
};
1161 const size_t src_pitches
[3] = { src
->p
[Y_PLANE
].i_pitch
,
1162 src
->p
[U_PLANE
].i_pitch
,
1163 src
->p
[V_PLANE
].i_pitch
};
1165 fprintf(stderr
, "testing: %u x %u (vis: %u x %u) %4.4s -> %4.4s\n",
1166 size
->i_width
, size
->i_height
,
1167 size
->i_visible_width
, size
->i_visible_height
,
1168 (const char *) &src
->format
.i_chroma
,
1169 (const char *) &dst
->format
.i_chroma
);
1170 if (test_dst
->bitshift
== 0)
1171 test_dst
->conv(dst
, src_planes
, src_pitches
,
1172 src
->format
.i_visible_height
, &cache
);
1174 test_dst
->conv16(dst
, src_planes
, src_pitches
,
1175 src
->format
.i_visible_height
, test_dst
->bitshift
,
1177 piccheck(dst
, dst_dsc
, false);
1178 picture_Release(dst
);
1180 picture_Release(src
);
1181 CopyCleanCache(&cache
);