2 * Copyright (C) 2012 Intel Corporation
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; version 2
10 #if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__)
12 #include <linux/raid/pq.h>
15 static int raid6_has_ssse3(void)
17 return boot_cpu_has(X86_FEATURE_XMM
) &&
18 boot_cpu_has(X86_FEATURE_XMM2
) &&
19 boot_cpu_has(X86_FEATURE_SSSE3
);
22 static void raid6_2data_recov_ssse3(int disks
, size_t bytes
, int faila
,
23 int failb
, void **ptrs
)
26 const u8
*pbmul
; /* P multiplier table for B data */
27 const u8
*qmul
; /* Q multiplier table (for both) */
28 static const u8
__aligned(16) x0f
[16] = {
29 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
30 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f};
32 p
= (u8
*)ptrs
[disks
-2];
33 q
= (u8
*)ptrs
[disks
-1];
35 /* Compute syndrome with zero for the missing data pages
36 Use the dead data pages as temporary storage for
37 delta p and delta q */
38 dp
= (u8
*)ptrs
[faila
];
39 ptrs
[faila
] = (void *)raid6_empty_zero_page
;
41 dq
= (u8
*)ptrs
[failb
];
42 ptrs
[failb
] = (void *)raid6_empty_zero_page
;
45 raid6_call
.gen_syndrome(disks
, bytes
, ptrs
);
47 /* Restore pointer table */
53 /* Now, pick the proper data tables */
54 pbmul
= raid6_vgfmul
[raid6_gfexi
[failb
-faila
]];
55 qmul
= raid6_vgfmul
[raid6_gfinv
[raid6_gfexp
[faila
] ^
60 asm volatile("movdqa %0,%%xmm7" : : "m" (x0f
[0]));
63 asm volatile("movdqa %0,%%xmm6" : : "m" (qmul
[0]));
64 asm volatile("movdqa %0,%%xmm14" : : "m" (pbmul
[0]));
65 asm volatile("movdqa %0,%%xmm15" : : "m" (pbmul
[16]));
71 /* xmm6, xmm14, xmm15 */
73 asm volatile("movdqa %0,%%xmm1" : : "m" (q
[0]));
74 asm volatile("movdqa %0,%%xmm9" : : "m" (q
[16]));
75 asm volatile("movdqa %0,%%xmm0" : : "m" (p
[0]));
76 asm volatile("movdqa %0,%%xmm8" : : "m" (p
[16]));
77 asm volatile("pxor %0,%%xmm1" : : "m" (dq
[0]));
78 asm volatile("pxor %0,%%xmm9" : : "m" (dq
[16]));
79 asm volatile("pxor %0,%%xmm0" : : "m" (dp
[0]));
80 asm volatile("pxor %0,%%xmm8" : : "m" (dp
[16]));
84 asm volatile("movdqa %xmm6,%xmm4");
85 asm volatile("movdqa %0,%%xmm5" : : "m" (qmul
[16]));
86 asm volatile("movdqa %xmm6,%xmm12");
87 asm volatile("movdqa %xmm5,%xmm13");
88 asm volatile("movdqa %xmm1,%xmm3");
89 asm volatile("movdqa %xmm9,%xmm11");
90 asm volatile("movdqa %xmm0,%xmm2"); /* xmm2/10 = px */
91 asm volatile("movdqa %xmm8,%xmm10");
92 asm volatile("psraw $4,%xmm1");
93 asm volatile("psraw $4,%xmm9");
94 asm volatile("pand %xmm7,%xmm3");
95 asm volatile("pand %xmm7,%xmm11");
96 asm volatile("pand %xmm7,%xmm1");
97 asm volatile("pand %xmm7,%xmm9");
98 asm volatile("pshufb %xmm3,%xmm4");
99 asm volatile("pshufb %xmm11,%xmm12");
100 asm volatile("pshufb %xmm1,%xmm5");
101 asm volatile("pshufb %xmm9,%xmm13");
102 asm volatile("pxor %xmm4,%xmm5");
103 asm volatile("pxor %xmm12,%xmm13");
107 asm volatile("movdqa %xmm14,%xmm4");
108 asm volatile("movdqa %xmm15,%xmm1");
109 asm volatile("movdqa %xmm14,%xmm12");
110 asm volatile("movdqa %xmm15,%xmm9");
111 asm volatile("movdqa %xmm2,%xmm3");
112 asm volatile("movdqa %xmm10,%xmm11");
113 asm volatile("psraw $4,%xmm2");
114 asm volatile("psraw $4,%xmm10");
115 asm volatile("pand %xmm7,%xmm3");
116 asm volatile("pand %xmm7,%xmm11");
117 asm volatile("pand %xmm7,%xmm2");
118 asm volatile("pand %xmm7,%xmm10");
119 asm volatile("pshufb %xmm3,%xmm4");
120 asm volatile("pshufb %xmm11,%xmm12");
121 asm volatile("pshufb %xmm2,%xmm1");
122 asm volatile("pshufb %xmm10,%xmm9");
123 asm volatile("pxor %xmm4,%xmm1");
124 asm volatile("pxor %xmm12,%xmm9");
126 /* xmm1/9 = pbmul[px] */
127 asm volatile("pxor %xmm5,%xmm1");
128 asm volatile("pxor %xmm13,%xmm9");
129 /* xmm1/9 = db = DQ */
130 asm volatile("movdqa %%xmm1,%0" : "=m" (dq
[0]));
131 asm volatile("movdqa %%xmm9,%0" : "=m" (dq
[16]));
133 asm volatile("pxor %xmm1,%xmm0");
134 asm volatile("pxor %xmm9,%xmm8");
135 asm volatile("movdqa %%xmm0,%0" : "=m" (dp
[0]));
136 asm volatile("movdqa %%xmm8,%0" : "=m" (dp
[16]));
144 asm volatile("movdqa %0,%%xmm1" : : "m" (*q
));
145 asm volatile("movdqa %0,%%xmm0" : : "m" (*p
));
146 asm volatile("pxor %0,%%xmm1" : : "m" (*dq
));
147 asm volatile("pxor %0,%%xmm0" : : "m" (*dp
));
152 asm volatile("movdqa %0,%%xmm4" : : "m" (qmul
[0]));
153 asm volatile("movdqa %0,%%xmm5" : : "m" (qmul
[16]));
155 asm volatile("movdqa %xmm1,%xmm3");
156 asm volatile("psraw $4,%xmm1");
157 asm volatile("pand %xmm7,%xmm3");
158 asm volatile("pand %xmm7,%xmm1");
159 asm volatile("pshufb %xmm3,%xmm4");
160 asm volatile("pshufb %xmm1,%xmm5");
161 asm volatile("pxor %xmm4,%xmm5");
163 asm volatile("movdqa %xmm0,%xmm2"); /* xmm2 = px */
167 asm volatile("movdqa %0,%%xmm4" : : "m" (pbmul
[0]));
168 asm volatile("movdqa %0,%%xmm1" : : "m" (pbmul
[16]));
169 asm volatile("movdqa %xmm2,%xmm3");
170 asm volatile("psraw $4,%xmm2");
171 asm volatile("pand %xmm7,%xmm3");
172 asm volatile("pand %xmm7,%xmm2");
173 asm volatile("pshufb %xmm3,%xmm4");
174 asm volatile("pshufb %xmm2,%xmm1");
175 asm volatile("pxor %xmm4,%xmm1");
177 /* xmm1 = pbmul[px] */
178 asm volatile("pxor %xmm5,%xmm1");
180 asm volatile("movdqa %%xmm1,%0" : "=m" (*dq
));
182 asm volatile("pxor %xmm1,%xmm0");
183 asm volatile("movdqa %%xmm0,%0" : "=m" (*dp
));
197 static void raid6_datap_recov_ssse3(int disks
, size_t bytes
, int faila
,
201 const u8
*qmul
; /* Q multiplier table */
202 static const u8
__aligned(16) x0f
[16] = {
203 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
204 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f};
206 p
= (u8
*)ptrs
[disks
-2];
207 q
= (u8
*)ptrs
[disks
-1];
209 /* Compute syndrome with zero for the missing data page
210 Use the dead data page as temporary storage for delta q */
211 dq
= (u8
*)ptrs
[faila
];
212 ptrs
[faila
] = (void *)raid6_empty_zero_page
;
215 raid6_call
.gen_syndrome(disks
, bytes
, ptrs
);
217 /* Restore pointer table */
221 /* Now, pick the proper data tables */
222 qmul
= raid6_vgfmul
[raid6_gfinv
[raid6_gfexp
[faila
]]];
226 asm volatile("movdqa %0, %%xmm7" : : "m" (x0f
[0]));
230 asm volatile("movdqa %0, %%xmm3" : : "m" (dq
[0]));
231 asm volatile("movdqa %0, %%xmm4" : : "m" (dq
[16]));
232 asm volatile("pxor %0, %%xmm3" : : "m" (q
[0]));
233 asm volatile("movdqa %0, %%xmm0" : : "m" (qmul
[0]));
235 /* xmm3 = q[0] ^ dq[0] */
237 asm volatile("pxor %0, %%xmm4" : : "m" (q
[16]));
238 asm volatile("movdqa %0, %%xmm1" : : "m" (qmul
[16]));
240 /* xmm4 = q[16] ^ dq[16] */
242 asm volatile("movdqa %xmm3, %xmm6");
243 asm volatile("movdqa %xmm4, %xmm8");
245 /* xmm4 = xmm8 = q[16] ^ dq[16] */
247 asm volatile("psraw $4, %xmm3");
248 asm volatile("pand %xmm7, %xmm6");
249 asm volatile("pand %xmm7, %xmm3");
250 asm volatile("pshufb %xmm6, %xmm0");
251 asm volatile("pshufb %xmm3, %xmm1");
252 asm volatile("movdqa %0, %%xmm10" : : "m" (qmul
[0]));
253 asm volatile("pxor %xmm0, %xmm1");
254 asm volatile("movdqa %0, %%xmm11" : : "m" (qmul
[16]));
256 /* xmm1 = qmul[q[0] ^ dq[0]] */
258 asm volatile("psraw $4, %xmm4");
259 asm volatile("pand %xmm7, %xmm8");
260 asm volatile("pand %xmm7, %xmm4");
261 asm volatile("pshufb %xmm8, %xmm10");
262 asm volatile("pshufb %xmm4, %xmm11");
263 asm volatile("movdqa %0, %%xmm2" : : "m" (p
[0]));
264 asm volatile("pxor %xmm10, %xmm11");
265 asm volatile("movdqa %0, %%xmm12" : : "m" (p
[16]));
267 /* xmm11 = qmul[q[16] ^ dq[16]] */
269 asm volatile("pxor %xmm1, %xmm2");
271 /* xmm2 = p[0] ^ qmul[q[0] ^ dq[0]] */
273 asm volatile("pxor %xmm11, %xmm12");
275 /* xmm12 = p[16] ^ qmul[q[16] ^ dq[16]] */
277 asm volatile("movdqa %%xmm1, %0" : "=m" (dq
[0]));
278 asm volatile("movdqa %%xmm11, %0" : "=m" (dq
[16]));
280 asm volatile("movdqa %%xmm2, %0" : "=m" (p
[0]));
281 asm volatile("movdqa %%xmm12, %0" : "=m" (p
[16]));
289 asm volatile("movdqa %0, %%xmm3" : : "m" (dq
[0]));
290 asm volatile("movdqa %0, %%xmm0" : : "m" (qmul
[0]));
291 asm volatile("pxor %0, %%xmm3" : : "m" (q
[0]));
292 asm volatile("movdqa %0, %%xmm1" : : "m" (qmul
[16]));
294 /* xmm3 = *q ^ *dq */
296 asm volatile("movdqa %xmm3, %xmm6");
297 asm volatile("movdqa %0, %%xmm2" : : "m" (p
[0]));
298 asm volatile("psraw $4, %xmm3");
299 asm volatile("pand %xmm7, %xmm6");
300 asm volatile("pand %xmm7, %xmm3");
301 asm volatile("pshufb %xmm6, %xmm0");
302 asm volatile("pshufb %xmm3, %xmm1");
303 asm volatile("pxor %xmm0, %xmm1");
305 /* xmm1 = qmul[*q ^ *dq */
307 asm volatile("pxor %xmm1, %xmm2");
309 /* xmm2 = *p ^ qmul[*q ^ *dq] */
311 asm volatile("movdqa %%xmm1, %0" : "=m" (dq
[0]));
312 asm volatile("movdqa %%xmm2, %0" : "=m" (p
[0]));
324 const struct raid6_recov_calls raid6_recov_ssse3
= {
325 .data2
= raid6_2data_recov_ssse3
,
326 .datap
= raid6_datap_recov_ssse3
,
327 .valid
= raid6_has_ssse3
,