2 /* filter_msa_intrinsics.c - MSA optimised filter functions
4 * Copyright (c) 2016 Glenn Randers-Pehrson
5 * Written by Mandar Sahastrabuddhe, August 2016.
6 * Last changed in libpng 1.6.25 [September 1, 2016]
8 * This code is released under the libpng license.
9 * For conditions of distribution and use, see the disclaimer
10 * and license in png.h
14 #include "../pngpriv.h"
16 #ifdef PNG_READ_SUPPORTED
18 /* This code requires -mfpu=msa on the command line: */
19 #if PNG_MIPS_MSA_IMPLEMENTATION == 1 /* intrinsics code from pngpriv.h */
23 /* libpng row pointers are not necessarily aligned to any particular boundary,
24 * however this code will only work with appropriate alignment. mips/mips_init.c
25 * checks for this (and will not compile unless it is done). This code uses
26 * variants of png_aligncast to avoid compiler warnings.
28 #define png_ptr(type,pointer) png_aligncast(type *,pointer)
29 #define png_ptrc(type,pointer) png_aligncastconst(const type *,pointer)
31 /* The following relies on a variable 'temp_pointer' being declared with type
32 * 'type'. This is written this way just to hide the GCC strict aliasing
33 * warning; note that the code is safe because there never is an alias between
34 * the input and output pointers.
36 #define png_ldr(type,pointer)\
37 (temp_pointer = png_ptr(type,pointer), *temp_pointer)
39 #if PNG_MIPS_MSA_OPT > 0
42 #define MSA_SRLI_B(a, b) __msa_srli_b((v16i8) a, b)
46 uint8_t *psrc_lw_m = (uint8_t *) (psrc); \
50 "lw %[val_m], %[psrc_lw_m] \n\t" \
52 : [val_m] "=r" (val_m) \
53 : [psrc_lw_m] "m" (*psrc_lw_m) \
59 #define SH(val, pdst) \
61 uint8_t *pdst_sh_m = (uint8_t *) (pdst); \
62 uint16_t val_m = (val); \
65 "sh %[val_m], %[pdst_sh_m] \n\t" \
67 : [pdst_sh_m] "=m" (*pdst_sh_m) \
68 : [val_m] "r" (val_m) \
72 #define SW(val, pdst) \
74 uint8_t *pdst_sw_m = (uint8_t *) (pdst); \
75 uint32_t val_m = (val); \
78 "sw %[val_m], %[pdst_sw_m] \n\t" \
80 : [pdst_sw_m] "=m" (*pdst_sw_m) \
81 : [val_m] "r" (val_m) \
86 #define SD(val, pdst) \
88 uint8_t *pdst_sd_m = (uint8_t *) (pdst); \
89 uint64_t val_m = (val); \
92 "sd %[val_m], %[pdst_sd_m] \n\t" \
94 : [pdst_sd_m] "=m" (*pdst_sd_m) \
95 : [val_m] "r" (val_m) \
99 #define SD(val, pdst) \
101 uint8_t *pdst_sd_m = (uint8_t *) (pdst); \
102 uint32_t val0_m, val1_m; \
104 val0_m = (uint32_t) ((val) & 0x00000000FFFFFFFF); \
105 val1_m = (uint32_t) (((val) >> 32) & 0x00000000FFFFFFFF); \
107 SW(val0_m, pdst_sd_m); \
108 SW(val1_m, pdst_sd_m + 4); \
112 #define MSA_SRLI_B(a, b) (a >> b)
114 #if (__mips_isa_rev >= 6)
117 uint8_t *psrc_lw_m = (uint8_t *) (psrc); \
121 "lw %[val_m], %[psrc_lw_m] \n\t" \
123 : [val_m] "=r" (val_m) \
124 : [psrc_lw_m] "m" (*psrc_lw_m) \
130 #define SH(val, pdst) \
132 uint8_t *pdst_sh_m = (uint8_t *) (pdst); \
133 uint16_t val_m = (val); \
136 "sh %[val_m], %[pdst_sh_m] \n\t" \
138 : [pdst_sh_m] "=m" (*pdst_sh_m) \
139 : [val_m] "r" (val_m) \
143 #define SW(val, pdst) \
145 uint8_t *pdst_sw_m = (uint8_t *) (pdst); \
146 uint32_t val_m = (val); \
149 "sw %[val_m], %[pdst_sw_m] \n\t" \
151 : [pdst_sw_m] "=m" (*pdst_sw_m) \
152 : [val_m] "r" (val_m) \
157 #define SD(val, pdst) \
159 uint8_t *pdst_sd_m = (uint8_t *) (pdst); \
160 uint64_t val_m = (val); \
163 "sd %[val_m], %[pdst_sd_m] \n\t" \
165 : [pdst_sd_m] "=m" (*pdst_sd_m) \
166 : [val_m] "r" (val_m) \
170 #define SD(val, pdst) \
172 uint8_t *pdst_sd_m = (uint8_t *) (pdst); \
173 uint32_t val0_m, val1_m; \
175 val0_m = (uint32_t) ((val) & 0x00000000FFFFFFFF); \
176 val1_m = (uint32_t) (((val) >> 32) & 0x00000000FFFFFFFF); \
178 SW(val0_m, pdst_sd_m); \
179 SW(val1_m, pdst_sd_m + 4); \
182 #else // !(__mips_isa_rev >= 6)
185 uint8_t *psrc_lw_m = (uint8_t *) (psrc); \
189 "ulw %[val_m], %[psrc_lw_m] \n\t" \
191 : [val_m] "=r" (val_m) \
192 : [psrc_lw_m] "m" (*psrc_lw_m) \
198 #define SH(val, pdst) \
200 uint8_t *pdst_sh_m = (uint8_t *) (pdst); \
201 uint16_t val_m = (val); \
204 "ush %[val_m], %[pdst_sh_m] \n\t" \
206 : [pdst_sh_m] "=m" (*pdst_sh_m) \
207 : [val_m] "r" (val_m) \
211 #define SW(val, pdst) \
213 uint8_t *pdst_sw_m = (uint8_t *) (pdst); \
214 uint32_t val_m = (val); \
217 "usw %[val_m], %[pdst_sw_m] \n\t" \
219 : [pdst_sw_m] "=m" (*pdst_sw_m) \
220 : [val_m] "r" (val_m) \
224 #define SD(val, pdst) \
226 uint8_t *pdst_sd_m = (uint8_t *) (pdst); \
227 uint32_t val0_m, val1_m; \
229 val0_m = (uint32_t) ((val) & 0x00000000FFFFFFFF); \
230 val1_m = (uint32_t) (((val) >> 32) & 0x00000000FFFFFFFF); \
232 SW(val0_m, pdst_sd_m); \
233 SW(val1_m, pdst_sd_m + 4); \
236 #define SW_ZERO(pdst) \
238 uint8_t *pdst_m = (uint8_t *) (pdst); \
241 "usw $0, %[pdst_m] \n\t" \
243 : [pdst_m] "=m" (*pdst_m) \
247 #endif // (__mips_isa_rev >= 6)
250 #define LD_B(RTYPE, psrc) *((RTYPE *) (psrc))
251 #define LD_UB(...) LD_B(v16u8, __VA_ARGS__)
252 #define LD_B2(RTYPE, psrc, stride, out0, out1) \
254 out0 = LD_B(RTYPE, (psrc)); \
255 out1 = LD_B(RTYPE, (psrc) + stride); \
257 #define LD_UB2(...) LD_B2(v16u8, __VA_ARGS__)
258 #define LD_B4(RTYPE, psrc, stride, out0, out1, out2, out3) \
260 LD_B2(RTYPE, (psrc), stride, out0, out1); \
261 LD_B2(RTYPE, (psrc) + 2 * stride , stride, out2, out3); \
263 #define LD_UB4(...) LD_B4(v16u8, __VA_ARGS__)
265 #define ST_B(RTYPE, in, pdst) *((RTYPE *) (pdst)) = (in)
266 #define ST_UB(...) ST_B(v16u8, __VA_ARGS__)
267 #define ST_B2(RTYPE, in0, in1, pdst, stride) \
269 ST_B(RTYPE, in0, (pdst)); \
270 ST_B(RTYPE, in1, (pdst) + stride); \
272 #define ST_UB2(...) ST_B2(v16u8, __VA_ARGS__)
273 #define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) \
275 ST_B2(RTYPE, in0, in1, (pdst), stride); \
276 ST_B2(RTYPE, in2, in3, (pdst) + 2 * stride, stride); \
278 #define ST_UB4(...) ST_B4(v16u8, __VA_ARGS__)
280 #define ADD2(in0, in1, in2, in3, out0, out1) \
285 #define ADD3(in0, in1, in2, in3, in4, in5, \
288 ADD2(in0, in1, in2, in3, out0, out1); \
291 #define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, \
292 out0, out1, out2, out3) \
294 ADD2(in0, in1, in2, in3, out0, out1); \
295 ADD2(in4, in5, in6, in7, out2, out3); \
298 #define ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1) \
300 out0 = (RTYPE) __msa_ilvr_b((v16i8) in0, (v16i8) in1); \
301 out1 = (RTYPE) __msa_ilvr_b((v16i8) in2, (v16i8) in3); \
303 #define ILVR_B2_SH(...) ILVR_B2(v8i16, __VA_ARGS__)
305 #define HSUB_UB2(RTYPE, in0, in1, out0, out1) \
307 out0 = (RTYPE) __msa_hsub_u_h((v16u8) in0, (v16u8) in0); \
308 out1 = (RTYPE) __msa_hsub_u_h((v16u8) in1, (v16u8) in1); \
310 #define HSUB_UB2_SH(...) HSUB_UB2(v8i16, __VA_ARGS__)
312 #define SLDI_B2_0(RTYPE, in0, in1, out0, out1, slide_val) \
314 v16i8 zero_m = { 0 }; \
315 out0 = (RTYPE) __msa_sldi_b((v16i8) zero_m, (v16i8) in0, slide_val); \
316 out1 = (RTYPE) __msa_sldi_b((v16i8) zero_m, (v16i8) in1, slide_val); \
318 #define SLDI_B2_0_UB(...) SLDI_B2_0(v16u8, __VA_ARGS__)
320 #define SLDI_B3_0(RTYPE, in0, in1, in2, out0, out1, out2, slide_val) \
322 v16i8 zero_m = { 0 }; \
323 SLDI_B2_0(RTYPE, in0, in1, out0, out1, slide_val); \
324 out2 = (RTYPE) __msa_sldi_b((v16i8) zero_m, (v16i8) in2, slide_val); \
326 #define SLDI_B3_0_UB(...) SLDI_B3_0(v16u8, __VA_ARGS__)
328 #define ILVEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) \
330 out0 = (RTYPE) __msa_ilvev_w((v4i32) in1, (v4i32) in0); \
331 out1 = (RTYPE) __msa_ilvev_w((v4i32) in3, (v4i32) in2); \
333 #define ILVEV_W2_UB(...) ILVEV_W2(v16u8, __VA_ARGS__)
335 #define ADD_ABS_H3(RTYPE, in0, in1, in2, out0, out1, out2) \
339 out0 = __msa_add_a_h((v8i16) zero, in0); \
340 out1 = __msa_add_a_h((v8i16) zero, in1); \
341 out2 = __msa_add_a_h((v8i16) zero, in2); \
343 #define ADD_ABS_H3_SH(...) ADD_ABS_H3(v8i16, __VA_ARGS__)
345 #define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) \
347 out0 = (RTYPE) __msa_vshf_b((v16i8) mask0, (v16i8) in1, (v16i8) in0); \
348 out1 = (RTYPE) __msa_vshf_b((v16i8) mask1, (v16i8) in3, (v16i8) in2); \
350 #define VSHF_B2_UB(...) VSHF_B2(v16u8, __VA_ARGS__)
352 #define CMP_AND_SELECT(inp0, inp1, inp2, inp3, inp4, inp5, out0) \
354 v8i16 _sel_h0, _sel_h1; \
355 v16u8 _sel_b0, _sel_b1; \
356 _sel_h0 = (v8i16) __msa_clt_u_h((v8u16) inp1, (v8u16) inp0); \
357 _sel_b0 = (v16u8) __msa_pckev_b((v16i8) _sel_h0, (v16i8) _sel_h0); \
358 inp0 = (v8i16) __msa_bmnz_v((v16u8) inp0, (v16u8) inp1, (v16u8) _sel_h0); \
359 inp4 = (v16u8) __msa_bmnz_v(inp3, inp4, _sel_b0); \
360 _sel_h1 = (v8i16) __msa_clt_u_h((v8u16) inp2, (v8u16) inp0); \
361 _sel_b1 = (v16u8) __msa_pckev_b((v16i8) _sel_h1, (v16i8) _sel_h1); \
362 inp4 = (v16u8) __msa_bmnz_v(inp4, inp5, _sel_b1); \
366 void png_read_filter_row_up_msa(png_row_infop row_info
, png_bytep row
,
367 png_const_bytep prev_row
)
369 png_size_t i
, cnt
, cnt16
, cnt32
;
370 png_size_t istop
= row_info
->rowbytes
;
372 png_const_bytep pp
= prev_row
;
373 v16u8 src0
, src1
, src2
, src3
, src4
, src5
, src6
, src7
;
375 for (i
= 0; i
< (istop
>> 6); i
++)
377 LD_UB4(rp
, 16, src0
, src1
, src2
, src3
);
378 LD_UB4(pp
, 16, src4
, src5
, src6
, src7
);
381 ADD4(src0
, src4
, src1
, src5
, src2
, src6
, src3
, src7
,
382 src0
, src1
, src2
, src3
);
384 ST_UB4(src0
, src1
, src2
, src3
, rp
, 16);
390 cnt32
= istop
& 0x20;
391 cnt16
= istop
& 0x10;
398 LD_UB4(rp
, 16, src0
, src1
, src2
, src3
);
399 LD_UB4(pp
, 16, src4
, src5
, src6
, src7
);
401 ADD4(src0
, src4
, src1
, src5
, src2
, src6
, src3
, src7
,
402 src0
, src1
, src2
, src3
);
404 ST_UB4(src0
, src1
, src2
, src3
, rp
, 16);
407 else if (cnt16
|| cnt
)
409 LD_UB2(rp
, 16, src0
, src1
);
410 LD_UB2(pp
, 16, src4
, src5
);
412 src2
= LD_UB(rp
+ 32);
415 ADD3(src0
, src4
, src1
, src5
, src2
, src6
, src0
, src1
, src2
);
417 ST_UB2(src0
, src1
, rp
, 16);
424 LD_UB2(rp
, 16, src0
, src1
);
425 LD_UB2(pp
, 16, src4
, src5
);
427 ADD2(src0
, src4
, src1
, src5
, src0
, src1
);
429 ST_UB2(src0
, src1
, rp
, 16);
433 else if (cnt16
&& cnt
)
435 LD_UB2(rp
, 16, src0
, src1
);
436 LD_UB2(pp
, 16, src4
, src5
);
438 ADD2(src0
, src4
, src1
, src5
, src0
, src1
);
440 ST_UB2(src0
, src1
, rp
, 16);
443 else if (cnt16
|| cnt
)
457 void png_read_filter_row_sub4_msa(png_row_infop row_info
, png_bytep row
,
458 png_const_bytep prev_row
)
461 png_size_t istop
= row_info
->rowbytes
;
463 png_bytep nxt
= row
+ 4;
465 v16u8 src0
, src1
, src2
, src3
, src4
;
473 src0
= (v16u8
) __msa_insert_w((v4i32
) zero
, 0, inp0
);
475 for (count
= 0; count
< istop
; count
+= 16)
480 src2
= (v16u8
) __msa_sldi_b((v16i8
) zero
, (v16i8
) src1
, 4);
481 src3
= (v16u8
) __msa_sldi_b((v16i8
) zero
, (v16i8
) src1
, 8);
482 src4
= (v16u8
) __msa_sldi_b((v16i8
) zero
, (v16i8
) src1
, 12);
488 ILVEV_W2_UB(src1
, src2
, src3
, src4
, dst0
, dst1
);
489 dst0
= (v16u8
) __msa_pckev_d((v2i64
) dst1
, (v2i64
) dst0
);
496 void png_read_filter_row_sub3_msa(png_row_infop row_info
, png_bytep row
,
497 png_const_bytep prev_row
)
500 png_size_t istop
= row_info
->rowbytes
;
502 png_bytep nxt
= row
+ 3;
505 v16u8 src0
, src1
, src2
, src3
, src4
, dst0
, dst1
;
507 v16i8 mask0
= { 0, 1, 2, 16, 17, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
508 v16i8 mask1
= { 0, 1, 2, 3, 4, 5, 16, 17, 18, 19, 20, 21, 0, 0, 0, 0 };
514 src0
= (v16u8
) __msa_insert_w((v4i32
) zero
, 0, inp0
);
516 for (count
= 0; count
< istop
; count
+= 12)
521 src2
= (v16u8
) __msa_sldi_b((v16i8
) zero
, (v16i8
) src1
, 3);
522 src3
= (v16u8
) __msa_sldi_b((v16i8
) zero
, (v16i8
) src1
, 6);
523 src4
= (v16u8
) __msa_sldi_b((v16i8
) zero
, (v16i8
) src1
, 9);
529 VSHF_B2_UB(src1
, src2
, src3
, src4
, mask0
, mask0
, dst0
, dst1
);
530 dst0
= (v16u8
) __msa_vshf_b(mask1
, (v16i8
) dst1
, (v16i8
) dst0
);
531 out0
= __msa_copy_s_d((v2i64
) dst0
, 0);
532 out1
= __msa_copy_s_w((v4i32
) dst0
, 2);
541 void png_read_filter_row_avg4_msa(png_row_infop row_info
, png_bytep row
,
542 png_const_bytep prev_row
)
547 png_const_bytep pp
= prev_row
;
548 png_size_t istop
= row_info
->rowbytes
- 4;
549 int32_t inp0
, inp1
, out0
;
550 v16u8 src0
, src1
, src2
, src3
, src4
, src5
, src6
, src7
, src8
, src9
, dst0
, dst1
;
557 src0
= (v16u8
) __msa_insert_w((v4i32
) zero
, 0, inp0
);
558 src1
= (v16u8
) __msa_insert_w((v4i32
) zero
, 0, inp1
);
559 src0
= (v16u8
) MSA_SRLI_B(src0
, 1);
561 out0
= __msa_copy_s_w((v4i32
) src1
, 0);
565 for (i
= 0; i
< istop
; i
+= 16)
572 SLDI_B2_0_UB(src2
, src6
, src3
, src7
, 4);
573 SLDI_B2_0_UB(src2
, src6
, src4
, src8
, 8);
574 SLDI_B2_0_UB(src2
, src6
, src5
, src9
, 12);
575 src2
= __msa_ave_u_b(src2
, src1
);
577 src3
= __msa_ave_u_b(src3
, src6
);
579 src4
= __msa_ave_u_b(src4
, src7
);
581 src5
= __msa_ave_u_b(src5
, src8
);
584 ILVEV_W2_UB(src6
, src7
, src8
, src9
, dst0
, dst1
);
585 dst0
= (v16u8
) __msa_pckev_d((v2i64
) dst1
, (v2i64
) dst0
);
592 void png_read_filter_row_avg3_msa(png_row_infop row_info
, png_bytep row
,
593 png_const_bytep prev_row
)
598 png_const_bytep pp
= prev_row
;
599 png_size_t istop
= row_info
->rowbytes
- 3;
601 int32_t inp0
, inp1
, out1
;
603 v16u8 src0
, src1
, src2
, src3
, src4
, src5
, src6
, src7
, src8
, src9
, dst0
, dst1
;
605 v16i8 mask0
= { 0, 1, 2, 16, 17, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
606 v16i8 mask1
= { 0, 1, 2, 3, 4, 5, 16, 17, 18, 19, 20, 21, 0, 0, 0, 0 };
612 src0
= (v16u8
) __msa_insert_w((v4i32
) zero
, 0, inp0
);
613 src1
= (v16u8
) __msa_insert_w((v4i32
) zero
, 0, inp1
);
614 src0
= (v16u8
) MSA_SRLI_B(src0
, 1);
616 out2
= __msa_copy_s_h((v8i16
) src1
, 0);
622 for (i
= 0; i
< istop
; i
+= 12)
629 SLDI_B2_0_UB(src2
, src6
, src3
, src7
, 3);
630 SLDI_B2_0_UB(src2
, src6
, src4
, src8
, 6);
631 SLDI_B2_0_UB(src2
, src6
, src5
, src9
, 9);
632 src2
= __msa_ave_u_b(src2
, src1
);
634 src3
= __msa_ave_u_b(src3
, src6
);
636 src4
= __msa_ave_u_b(src4
, src7
);
638 src5
= __msa_ave_u_b(src5
, src8
);
641 VSHF_B2_UB(src6
, src7
, src8
, src9
, mask0
, mask0
, dst0
, dst1
);
642 dst0
= (v16u8
) __msa_vshf_b(mask1
, (v16i8
) dst1
, (v16i8
) dst0
);
643 out0
= __msa_copy_s_d((v2i64
) dst0
, 0);
644 out1
= __msa_copy_s_w((v4i32
) dst0
, 2);
653 void png_read_filter_row_paeth4_msa(png_row_infop row_info
,
655 png_const_bytep prev_row
)
657 int32_t count
, rp_end
;
659 png_const_bytep prev_nxt
;
660 int32_t inp0
, inp1
, res0
;
661 v16u8 src0
, src1
, src2
, src3
, src4
, src5
, src6
, src7
, src8
, src9
;
662 v16u8 src10
, src11
, src12
, src13
, dst0
, dst1
;
663 v8i16 vec0
, vec1
, vec2
;
672 src0
= (v16u8
) __msa_insert_w((v4i32
) zero
, 0, inp0
);
673 src1
= (v16u8
) __msa_insert_w((v4i32
) zero
, 0, inp1
);
676 res0
= __msa_copy_s_w((v4i32
) src1
, 0);
682 rp_end
= row_info
->rowbytes
- 4;
684 for (count
= 0; count
< rp_end
; count
+= 16)
686 src2
= LD_UB(prev_nxt
);
688 src6
= LD_UB(prev_row
);
692 SLDI_B3_0_UB(src2
, src6
, src10
, src3
, src7
, src11
, 4);
693 SLDI_B3_0_UB(src2
, src6
, src10
, src4
, src8
, src12
, 8);
694 SLDI_B3_0_UB(src2
, src6
, src10
, src5
, src9
, src13
, 12);
695 ILVR_B2_SH(src2
, src6
, src1
, src6
, vec0
, vec1
);
696 HSUB_UB2_SH(vec0
, vec1
, vec0
, vec1
);
698 ADD_ABS_H3_SH(vec0
, vec1
, vec2
, vec0
, vec1
, vec2
);
699 CMP_AND_SELECT(vec0
, vec1
, vec2
, src1
, src2
, src6
, src10
);
700 ILVR_B2_SH(src3
, src7
, src10
, src7
, vec0
, vec1
);
701 HSUB_UB2_SH(vec0
, vec1
, vec0
, vec1
);
703 ADD_ABS_H3_SH(vec0
, vec1
, vec2
, vec0
, vec1
, vec2
);
704 CMP_AND_SELECT(vec0
, vec1
, vec2
, src10
, src3
, src7
, src11
);
705 ILVR_B2_SH(src4
, src8
, src11
, src8
, vec0
, vec1
);
706 HSUB_UB2_SH(vec0
, vec1
, vec0
, vec1
);
708 ADD_ABS_H3_SH(vec0
, vec1
, vec2
, vec0
, vec1
, vec2
);
709 CMP_AND_SELECT(vec0
, vec1
, vec2
, src11
, src4
, src8
, src12
);
710 ILVR_B2_SH(src5
, src9
, src12
, src9
, vec0
, vec1
);
711 HSUB_UB2_SH(vec0
, vec1
, vec0
, vec1
);
713 ADD_ABS_H3_SH(vec0
, vec1
, vec2
, vec0
, vec1
, vec2
);
714 CMP_AND_SELECT(vec0
, vec1
, vec2
, src12
, src5
, src9
, src13
);
716 ILVEV_W2_UB(src10
, src11
, src12
, src1
, dst0
, dst1
);
717 dst0
= (v16u8
) __msa_pckev_d((v2i64
) dst1
, (v2i64
) dst0
);
724 void png_read_filter_row_paeth3_msa(png_row_infop row_info
,
726 png_const_bytep prev_row
)
728 int32_t count
, rp_end
;
730 png_const_bytep prev_nxt
;
732 int32_t inp0
, inp1
, out1
;
734 v16u8 src0
, src1
, src2
, src3
, src4
, src5
, src6
, src7
, src8
, src9
, dst0
, dst1
;
735 v16u8 src10
, src11
, src12
, src13
;
736 v8i16 vec0
, vec1
, vec2
;
738 v16i8 mask0
= { 0, 1, 2, 16, 17, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
739 v16i8 mask1
= { 0, 1, 2, 3, 4, 5, 16, 17, 18, 19, 20, 21, 0, 0, 0, 0 };
747 src0
= (v16u8
) __msa_insert_w((v4i32
) zero
, 0, inp0
);
748 src1
= (v16u8
) __msa_insert_w((v4i32
) zero
, 0, inp1
);
751 out2
= __msa_copy_s_h((v8i16
) src1
, 0);
759 rp_end
= row_info
->rowbytes
- 3;
761 for (count
= 0; count
< rp_end
; count
+= 12)
763 src2
= LD_UB(prev_nxt
);
765 src6
= LD_UB(prev_row
);
769 SLDI_B3_0_UB(src2
, src6
, src10
, src3
, src7
, src11
, 3);
770 SLDI_B3_0_UB(src2
, src6
, src10
, src4
, src8
, src12
, 6);
771 SLDI_B3_0_UB(src2
, src6
, src10
, src5
, src9
, src13
, 9);
772 ILVR_B2_SH(src2
, src6
, src1
, src6
, vec0
, vec1
);
773 HSUB_UB2_SH(vec0
, vec1
, vec0
, vec1
);
775 ADD_ABS_H3_SH(vec0
, vec1
, vec2
, vec0
, vec1
, vec2
);
776 CMP_AND_SELECT(vec0
, vec1
, vec2
, src1
, src2
, src6
, src10
);
777 ILVR_B2_SH(src3
, src7
, src10
, src7
, vec0
, vec1
);
778 HSUB_UB2_SH(vec0
, vec1
, vec0
, vec1
);
780 ADD_ABS_H3_SH(vec0
, vec1
, vec2
, vec0
, vec1
, vec2
);
781 CMP_AND_SELECT(vec0
, vec1
, vec2
, src10
, src3
, src7
, src11
);
782 ILVR_B2_SH(src4
, src8
, src11
, src8
, vec0
, vec1
);
783 HSUB_UB2_SH(vec0
, vec1
, vec0
, vec1
);
785 ADD_ABS_H3_SH(vec0
, vec1
, vec2
, vec0
, vec1
, vec2
);
786 CMP_AND_SELECT(vec0
, vec1
, vec2
, src11
, src4
, src8
, src12
);
787 ILVR_B2_SH(src5
, src9
, src12
, src9
, vec0
, vec1
);
788 HSUB_UB2_SH(vec0
, vec1
, vec0
, vec1
);
790 ADD_ABS_H3_SH(vec0
, vec1
, vec2
, vec0
, vec1
, vec2
);
791 CMP_AND_SELECT(vec0
, vec1
, vec2
, src12
, src5
, src9
, src13
);
793 VSHF_B2_UB(src10
, src11
, src12
, src13
, mask0
, mask0
, dst0
, dst1
);
794 dst0
= (v16u8
) __msa_vshf_b(mask1
, (v16i8
) dst1
, (v16i8
) dst0
);
795 out0
= __msa_copy_s_d((v2i64
) dst0
, 0);
796 out1
= __msa_copy_s_w((v4i32
) dst0
, 2);
805 #endif /* PNG_MIPS_MSA_OPT > 0 */
806 #endif /* PNG_MIPS_MSA_IMPLEMENTATION == 1 (intrinsics) */