3 * Copyright (C) 2000-2003 Michel Lespinasse <walken@zoy.org>
4 * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
6 * This file is part of mpeg2dec, a free MPEG-2 video stream decoder.
7 * See http://libmpeg2.sourceforge.net/ for updates.
9 * mpeg2dec is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * mpeg2dec is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
34 #include "attributes.h"
35 #include "mpeg2_internal.h"
37 typedef vector
signed char vector_s8_t
;
38 typedef vector
unsigned char vector_u8_t
;
39 typedef vector
signed short vector_s16_t
;
40 typedef vector
unsigned short vector_u16_t
;
41 typedef vector
signed int vector_s32_t
;
42 typedef vector
unsigned int vector_u32_t
;
44 #if defined(HAVE_ALTIVEC_H) && !defined(__APPLE_CC__) && (__GNUC__ * 100 + __GNUC_MINOR__ < 303)
45 /* work around gcc <3.3 vec_mergel bug */
46 static inline vector_s16_t
my_vec_mergel (vector_s16_t
const A
,
49 static const vector_u8_t mergel
= {
50 0x08, 0x09, 0x18, 0x19, 0x0a, 0x0b, 0x1a, 0x1b,
51 0x0c, 0x0d, 0x1c, 0x1d, 0x0e, 0x0f, 0x1e, 0x1f
53 return vec_perm (A
, B
, mergel
);
56 #define vec_mergel my_vec_mergel
59 #ifdef HAVE_ALTIVEC_H /* gnu */
60 #define VEC_S16(a,b,c,d,e,f,g,h) {a, b, c, d, e, f, g, h}
62 #define VEC_S16(a,b,c,d,e,f,g,h) (vector_s16_t) (a, b, c, d, e, f, g, h)
65 static const vector_s16_t constants
ATTR_ALIGN(16) =
66 VEC_S16 (23170, 13573, 6518, 21895, -23170, -21895, 32, 31);
67 static const vector_s16_t constants_1
ATTR_ALIGN(16) =
68 VEC_S16 (16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725);
69 static const vector_s16_t constants_2
ATTR_ALIGN(16) =
70 VEC_S16 (16069, 22289, 20995, 18895, 16069, 18895, 20995, 22289);
71 static const vector_s16_t constants_3
ATTR_ALIGN(16) =
72 VEC_S16 (21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692);
73 static const vector_s16_t constants_4
ATTR_ALIGN(16) =
74 VEC_S16 (13623, 18895, 17799, 16019, 13623, 16019, 17799, 18895);
77 vector_s16_t vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7; \
78 vector_s16_t vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7; \
79 vector_s16_t a0, a1, a2, ma2, c4, mc4, zero, bias; \
80 vector_s16_t t0, t1, t2, t3, t4, t5, t6, t7, t8; \
83 c4 = vec_splat (constants, 0); \
84 a0 = vec_splat (constants, 1); \
85 a1 = vec_splat (constants, 2); \
86 a2 = vec_splat (constants, 3); \
87 mc4 = vec_splat (constants, 4); \
88 ma2 = vec_splat (constants, 5); \
89 bias = (vector_s16_t)vec_splat ((vector_s32_t)constants, 3); \
91 zero = vec_splat_s16 (0); \
93 vx0 = vec_adds (block[0], block[4]); \
94 vx4 = vec_subs (block[0], block[4]); \
95 t5 = vec_mradds (vx0, constants_1, zero); \
96 t0 = vec_mradds (vx4, constants_1, zero); \
98 vx1 = vec_mradds (a1, block[7], block[1]); \
99 vx7 = vec_mradds (a1, block[1], vec_subs (zero, block[7])); \
100 t1 = vec_mradds (vx1, constants_2, zero); \
101 t8 = vec_mradds (vx7, constants_2, zero); \
103 vx2 = vec_mradds (a0, block[6], block[2]); \
104 vx6 = vec_mradds (a0, block[2], vec_subs (zero, block[6])); \
105 t2 = vec_mradds (vx2, constants_3, zero); \
106 t4 = vec_mradds (vx6, constants_3, zero); \
108 vx3 = vec_mradds (block[3], constants_4, zero); \
109 vx5 = vec_mradds (block[5], constants_4, zero); \
110 t7 = vec_mradds (a2, vx5, vx3); \
111 t3 = vec_mradds (ma2, vx3, vx5); \
113 t6 = vec_adds (t8, t3); \
114 t3 = vec_subs (t8, t3); \
115 t8 = vec_subs (t1, t7); \
116 t1 = vec_adds (t1, t7); \
117 t6 = vec_mradds (a0, t6, t6); /* a0+1 == 2*c4 */ \
118 t1 = vec_mradds (a0, t1, t1); /* a0+1 == 2*c4 */ \
120 t7 = vec_adds (t5, t2); \
121 t2 = vec_subs (t5, t2); \
122 t5 = vec_adds (t0, t4); \
123 t0 = vec_subs (t0, t4); \
124 t4 = vec_subs (t8, t3); \
125 t3 = vec_adds (t8, t3); \
127 vy0 = vec_adds (t7, t1); \
128 vy7 = vec_subs (t7, t1); \
129 vy1 = vec_adds (t5, t3); \
130 vy6 = vec_subs (t5, t3); \
131 vy2 = vec_adds (t0, t4); \
132 vy5 = vec_subs (t0, t4); \
133 vy3 = vec_adds (t2, t6); \
134 vy4 = vec_subs (t2, t6); \
136 vx0 = vec_mergeh (vy0, vy4); \
137 vx1 = vec_mergel (vy0, vy4); \
138 vx2 = vec_mergeh (vy1, vy5); \
139 vx3 = vec_mergel (vy1, vy5); \
140 vx4 = vec_mergeh (vy2, vy6); \
141 vx5 = vec_mergel (vy2, vy6); \
142 vx6 = vec_mergeh (vy3, vy7); \
143 vx7 = vec_mergel (vy3, vy7); \
145 vy0 = vec_mergeh (vx0, vx4); \
146 vy1 = vec_mergel (vx0, vx4); \
147 vy2 = vec_mergeh (vx1, vx5); \
148 vy3 = vec_mergel (vx1, vx5); \
149 vy4 = vec_mergeh (vx2, vx6); \
150 vy5 = vec_mergel (vx2, vx6); \
151 vy6 = vec_mergeh (vx3, vx7); \
152 vy7 = vec_mergel (vx3, vx7); \
154 vx0 = vec_mergeh (vy0, vy4); \
155 vx1 = vec_mergel (vy0, vy4); \
156 vx2 = vec_mergeh (vy1, vy5); \
157 vx3 = vec_mergel (vy1, vy5); \
158 vx4 = vec_mergeh (vy2, vy6); \
159 vx5 = vec_mergel (vy2, vy6); \
160 vx6 = vec_mergeh (vy3, vy7); \
161 vx7 = vec_mergel (vy3, vy7); \
163 vx0 = vec_adds (vx0, bias); \
164 t5 = vec_adds (vx0, vx4); \
165 t0 = vec_subs (vx0, vx4); \
167 t1 = vec_mradds (a1, vx7, vx1); \
168 t8 = vec_mradds (a1, vx1, vec_subs (zero, vx7)); \
170 t2 = vec_mradds (a0, vx6, vx2); \
171 t4 = vec_mradds (a0, vx2, vec_subs (zero, vx6)); \
173 t7 = vec_mradds (a2, vx5, vx3); \
174 t3 = vec_mradds (ma2, vx3, vx5); \
176 t6 = vec_adds (t8, t3); \
177 t3 = vec_subs (t8, t3); \
178 t8 = vec_subs (t1, t7); \
179 t1 = vec_adds (t1, t7); \
181 t7 = vec_adds (t5, t2); \
182 t2 = vec_subs (t5, t2); \
183 t5 = vec_adds (t0, t4); \
184 t0 = vec_subs (t0, t4); \
185 t4 = vec_subs (t8, t3); \
186 t3 = vec_adds (t8, t3); \
188 vy0 = vec_adds (t7, t1); \
189 vy7 = vec_subs (t7, t1); \
190 vy1 = vec_mradds (c4, t3, t5); \
191 vy6 = vec_mradds (mc4, t3, t5); \
192 vy2 = vec_mradds (c4, t4, t0); \
193 vy5 = vec_mradds (mc4, t4, t0); \
194 vy3 = vec_adds (t2, t6); \
195 vy4 = vec_subs (t2, t6); \
197 shift = vec_splat_u16 (6); \
198 vx0 = vec_sra (vy0, shift); \
199 vx1 = vec_sra (vy1, shift); \
200 vx2 = vec_sra (vy2, shift); \
201 vx3 = vec_sra (vy3, shift); \
202 vx4 = vec_sra (vy4, shift); \
203 vx5 = vec_sra (vy5, shift); \
204 vx6 = vec_sra (vy6, shift); \
205 vx7 = vec_sra (vy7, shift);
207 void mpeg2_idct_copy_altivec (int16_t * const _block
, uint8_t * dest
,
210 vector_s16_t
* const block
= (vector_s16_t
*)_block
;
215 #define COPY(dest,src) \
216 tmp = vec_packsu (src, src); \
217 vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \
218 vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
220 COPY (dest
, vx0
) dest
+= stride
;
221 COPY (dest
, vx1
) dest
+= stride
;
222 COPY (dest
, vx2
) dest
+= stride
;
223 COPY (dest
, vx3
) dest
+= stride
;
224 COPY (dest
, vx4
) dest
+= stride
;
225 COPY (dest
, vx5
) dest
+= stride
;
226 COPY (dest
, vx6
) dest
+= stride
;
229 block
[0] = block
[1] = block
[2] = block
[3] = zero
;
230 block
[4] = block
[5] = block
[6] = block
[7] = zero
;
233 void mpeg2_idct_add_altivec (const int last
, int16_t * const _block
,
234 uint8_t * dest
, const int stride
)
236 vector_s16_t
* const block
= (vector_s16_t
*)_block
;
238 vector_s16_t tmp2
, tmp3
;
241 vector_u8_t p0
, p1
, p
;
245 p0
= vec_lvsl (0, dest
);
246 p1
= vec_lvsl (stride
, dest
);
247 p
= vec_splat_u8 (-1);
248 perm0
= vec_mergeh (p
, p0
);
249 perm1
= vec_mergeh (p
, p1
);
251 #define ADD(dest,src,perm) \
252 /* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \
253 tmp = vec_ld (0, dest); \
254 tmp2 = (vector_s16_t)vec_perm (tmp, (vector_u8_t)zero, perm); \
255 tmp3 = vec_adds (tmp2, src); \
256 tmp = vec_packsu (tmp3, tmp3); \
257 vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \
258 vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
260 ADD (dest
, vx0
, perm0
) dest
+= stride
;
261 ADD (dest
, vx1
, perm1
) dest
+= stride
;
262 ADD (dest
, vx2
, perm0
) dest
+= stride
;
263 ADD (dest
, vx3
, perm1
) dest
+= stride
;
264 ADD (dest
, vx4
, perm0
) dest
+= stride
;
265 ADD (dest
, vx5
, perm1
) dest
+= stride
;
266 ADD (dest
, vx6
, perm0
) dest
+= stride
;
267 ADD (dest
, vx7
, perm1
)
269 block
[0] = block
[1] = block
[2] = block
[3] = zero
;
270 block
[4] = block
[5] = block
[6] = block
[7] = zero
;
273 void mpeg2_idct_altivec_init (void)
277 /* the altivec idct uses a transposed input, so we patch scan tables */
278 for (i
= 0; i
< 64; i
++) {
279 j
= mpeg2_scan_norm
[i
];
280 mpeg2_scan_norm
[i
] = (j
>> 3) | ((j
& 7) << 3);
281 j
= mpeg2_scan_alt
[i
];
282 mpeg2_scan_alt
[i
] = (j
>> 3) | ((j
& 7) << 3);