22 #include <linux/kernel.h>
24 #include <linux/module.h>
34 static struct page *pq_scribble_page;
42 #define P(b, d) (b[d-2])
43 #define Q(b, d) (b[d-1])
50 const unsigned char *scfs,
unsigned int offset,
int disks,
60 int src_cnt = disks - 2;
61 unsigned char coefs[src_cnt];
62 unsigned short pq_src_cnt;
83 for (i = 0, idx = 0; i < src_cnt; i++) {
84 if (blocks[i] ==
NULL)
94 submit->
flags = flags_orig;
95 pq_src_cnt =
min(src_cnt, dma_maxpq(dma, dma_flags));
100 if (src_cnt > pq_src_cnt) {
108 submit->
cb_fn = cb_fn_orig;
124 &coefs[src_off], len,
129 dma_async_issue_pending(chan);
136 src_cnt -= pq_src_cnt;
137 src_off += pq_src_cnt;
149 do_sync_gen_syndrome(
struct page **blocks,
unsigned int offset,
int disks,
158 srcs = (
void **) blocks;
160 for (i = 0; i < disks; i++) {
161 if (blocks[i] ==
NULL) {
163 srcs[
i] = (
void*)raid6_empty_zero_page;
168 async_tx_sync_epilog(submit);
200 int src_cnt = disks - 2;
202 &
P(blocks, disks), 2,
203 blocks, src_cnt, len);
207 BUG_ON(disks > 255 || !(
P(blocks, disks) ||
Q(blocks, disks)));
214 if (dma_src && device &&
215 (src_cnt <= dma_maxpq(device, 0) ||
217 is_dma_pq_aligned(device, offset, 0, len)) {
219 pr_debug(
"%s: (async) disks: %d len: %zu\n",
220 __func__, disks, len);
221 return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset,
222 disks, len, dma_src, submit);
226 pr_debug(
"%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
231 if (!
P(blocks, disks)) {
232 P(blocks, disks) = pq_scribble_page;
235 if (!
Q(blocks, disks)) {
236 Q(blocks, disks) = pq_scribble_page;
239 do_sync_gen_syndrome(blocks, offset, disks, len, submit);
248 #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
275 struct dma_chan *
chan = pq_val_chan(submit, blocks, disks, len);
278 unsigned char coefs[disks-2];
290 if (dma_src && device && disks <= dma_maxpq(device, 0) &&
291 is_dma_pq_aligned(device, offset, 0, len)) {
292 struct device *
dev = device->
dev;
296 pr_debug(
"%s: (async) disks: %d len: %zu\n",
297 __func__, disks, len);
298 if (!
P(blocks, disks))
304 if (!
Q(blocks, disks))
313 for (i = 0; i < disks-2; i++)
318 coefs[src_cnt] = raid6_gfexp[
i];
331 dma_async_issue_pending(chan);
337 struct page *p_src =
P(blocks, disks);
338 struct page *q_src =
Q(blocks, disks);
342 void *cb_param_orig = submit->
cb_param;
345 pr_debug(
"%s: (sync) disks: %d len: %zu\n",
346 __func__, disks, len);
351 BUG_ON(!spare || !scribble);
364 tx =
async_xor(spare, blocks, offset, disks-2, len, submit);
372 P(blocks, disks) =
NULL;
373 Q(blocks, disks) = spare;
374 init_async_submit(submit, 0,
NULL,
NULL,
NULL, scribble);
383 P(blocks, disks) = p_src;
384 Q(blocks, disks) = q_src;
386 submit->
cb_fn = cb_fn_orig;
388 submit->
flags = flags_orig;
389 async_tx_sync_epilog(submit);
396 static int __init async_pq_init(
void)
400 if (pq_scribble_page)
403 pr_err(
"%s: failed to allocate required spare page\n", __func__);
408 static void __exit async_pq_exit(
void)