23 #include <linux/kernel.h>
25 #include <linux/module.h>
31 async_sum_product(
struct page *
dest,
struct page **srcs,
unsigned char *coef,
35 &dest, 1, srcs, 2, len);
37 const u8 *amul, *bmul;
70 amul = raid6_gfmul[coef[0]];
71 bmul = raid6_gfmul[coef[1]];
86 async_mult(
struct page *dest,
struct page *
src,
u8 coef,
size_t len,
90 &dest, 1, &src, 1, len);
124 qmul = raid6_gfmul[coef];
135 __2data_recov_4(
int disks,
size_t bytes,
int faila,
int failb,
140 struct page *srcs[2];
141 unsigned char coef[2];
157 coef[0] = raid6_gfexi[failb-faila];
158 coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
160 tx = async_sum_product(b, srcs, coef, bytes, submit);
167 tx =
async_xor(a, srcs, 0, 2, bytes, submit);
174 __2data_recov_5(
int disks,
size_t bytes,
int faila,
int failb,
179 struct page *srcs[2];
180 unsigned char coef[2];
185 int good_srcs, good,
i;
189 for (i = 0; i < disks-2; i++) {
190 if (blocks[i] ==
NULL)
192 if (i == faila || i == failb)
213 tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit);
220 tx =
async_xor(dp, srcs, 0, 2, bytes, submit);
227 tx =
async_xor(dq, srcs, 0, 2, bytes, submit);
232 coef[0] = raid6_gfexi[failb-faila];
233 coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
235 tx = async_sum_product(dq, srcs, coef, bytes, submit);
242 tx =
async_xor(dp, srcs, 0, 2, bytes, submit);
248 __2data_recov_n(
int disks,
size_t bytes,
int faila,
int failb,
253 struct page *srcs[2];
254 unsigned char coef[2];
268 blocks[faila] =
NULL;
269 blocks[disks-2] =
dp;
271 blocks[failb] =
NULL;
272 blocks[disks-1] =
dq;
288 tx =
async_xor(dp, srcs, 0, 2, bytes, submit);
295 tx =
async_xor(dq, srcs, 0, 2, bytes, submit);
300 coef[0] = raid6_gfexi[failb-faila];
301 coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]];
303 tx = async_sum_product(dq, srcs, coef, bytes, submit);
310 tx =
async_xor(dp, srcs, 0, 2, bytes, submit);
329 int non_zero_srcs,
i;
335 pr_debug(
"%s: disks: %d len: %zu\n", __func__, disks, bytes);
342 if (!async_dma_find_channel(
DMA_PQ) || !scribble) {
343 void **
ptrs = scribble ? scribble : (
void **) blocks;
346 for (i = 0; i < disks; i++)
347 if (blocks[i] ==
NULL)
354 async_tx_sync_epilog(submit);
360 for (i = 0; i < disks-2 && non_zero_srcs < 4; i++)
363 switch (non_zero_srcs) {
375 return __2data_recov_4(disks, bytes, faila, failb, blocks, submit);
382 return __2data_recov_5(disks, bytes, faila, failb, blocks, submit);
384 return __2data_recov_n(disks, bytes, faila, failb, blocks, submit);
408 int good_srcs, good,
i;
409 struct page *srcs[2];
411 pr_debug(
"%s: disks: %d len: %zu\n", __func__, disks, bytes);
418 if (!async_dma_find_channel(
DMA_PQ) || !scribble) {
419 void **
ptrs = scribble ? scribble : (
void **) blocks;
422 for (i = 0; i < disks; i++)
423 if (blocks[i] ==
NULL)
430 async_tx_sync_epilog(submit);
437 for (i = 0; i < disks-2; i++) {
456 blocks[faila] =
NULL;
457 blocks[disks-1] =
dq;
462 if (good_srcs == 1) {
463 struct page *g = blocks[good];
471 tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit);
483 coef = raid6_gfinv[raid6_gfexp[faila]];
489 tx =
async_xor(dq, srcs, 0, 2, bytes, submit);
492 tx = async_mult(dq, dq, coef, bytes, submit);
498 tx =
async_xor(p, srcs, 0, 2, bytes, submit);