Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
dma_v3.c
Go to the documentation of this file.
1 /*
2  * This file is provided under a dual BSD/GPLv2 license. When using or
3  * redistributing this file, you may do so under either license.
4  *
5  * GPL LICENSE SUMMARY
6  *
7  * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms and conditions of the GNU General Public License,
11  * version 2, as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along with
19  * this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  *
22  * The full GNU General Public License is included in this distribution in
23  * the file called "COPYING".
24  *
25  * BSD LICENSE
26  *
27  * Copyright(c) 2004-2009 Intel Corporation. All rights reserved.
28  *
29  * Redistribution and use in source and binary forms, with or without
30  * modification, are permitted provided that the following conditions are met:
31  *
32  * * Redistributions of source code must retain the above copyright
33  * notice, this list of conditions and the following disclaimer.
34  * * Redistributions in binary form must reproduce the above copyright
35  * notice, this list of conditions and the following disclaimer in
36  * the documentation and/or other materials provided with the
37  * distribution.
38  * * Neither the name of Intel Corporation nor the names of its
39  * contributors may be used to endorse or promote products derived
40  * from this software without specific prior written permission.
41  *
42  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
43  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
46  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
47  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
48  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
49  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
50  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
51  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
52  * POSSIBILITY OF SUCH DAMAGE.
53  */
54 
55 /*
56  * Support routines for v3+ hardware
57  */
58 
59 #include <linux/pci.h>
60 #include <linux/gfp.h>
61 #include <linux/dmaengine.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/prefetch.h>
64 #include "../dmaengine.h"
65 #include "registers.h"
66 #include "hw.h"
67 #include "dma.h"
68 #include "dma_v2.h"
69 
70 /* ioat hardware assumes at least two sources for raid operations */
71 #define src_cnt_to_sw(x) ((x) + 2)
72 #define src_cnt_to_hw(x) ((x) - 2)
73 
74 /* provide a lookup table for setting the source address in the base or
75  * extended descriptor of an xor or pq descriptor
76  */
77 static const u8 xor_idx_to_desc = 0xe0;
78 static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
79 static const u8 pq_idx_to_desc = 0xf8;
80 static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
81 
82 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
83 {
84  struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
85 
86  return raw->field[xor_idx_to_field[idx]];
87 }
88 
89 static void xor_set_src(struct ioat_raw_descriptor *descs[2],
91 {
92  struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
93 
94  raw->field[xor_idx_to_field[idx]] = addr + offset;
95 }
96 
97 static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
98 {
99  struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
100 
101  return raw->field[pq_idx_to_field[idx]];
102 }
103 
104 static void pq_set_src(struct ioat_raw_descriptor *descs[2],
105  dma_addr_t addr, u32 offset, u8 coef, int idx)
106 {
107  struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
108  struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
109 
110  raw->field[pq_idx_to_field[idx]] = addr + offset;
111  pq->coef[idx] = coef;
112 }
113 
114 static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat,
115  struct ioat_ring_ent *desc, int idx)
116 {
117  struct ioat_chan_common *chan = &ioat->base;
118  struct pci_dev *pdev = chan->device->pdev;
119  size_t len = desc->len;
120  size_t offset = len - desc->hw->size;
121  struct dma_async_tx_descriptor *tx = &desc->txd;
122  enum dma_ctrl_flags flags = tx->flags;
123 
124  switch (desc->hw->ctl_f.op) {
125  case IOAT_OP_COPY:
126  if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */
127  ioat_dma_unmap(chan, flags, len, desc->hw);
128  break;
129  case IOAT_OP_FILL: {
130  struct ioat_fill_descriptor *hw = desc->fill;
131 
132  if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
133  ioat_unmap(pdev, hw->dst_addr - offset, len,
134  PCI_DMA_FROMDEVICE, flags, 1);
135  break;
136  }
137  case IOAT_OP_XOR_VAL:
138  case IOAT_OP_XOR: {
139  struct ioat_xor_descriptor *xor = desc->xor;
140  struct ioat_ring_ent *ext;
141  struct ioat_xor_ext_descriptor *xor_ex = NULL;
142  int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt);
143  struct ioat_raw_descriptor *descs[2];
144  int i;
145 
146  if (src_cnt > 5) {
147  ext = ioat2_get_ring_ent(ioat, idx + 1);
148  xor_ex = ext->xor_ex;
149  }
150 
151  if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
152  descs[0] = (struct ioat_raw_descriptor *) xor;
153  descs[1] = (struct ioat_raw_descriptor *) xor_ex;
154  for (i = 0; i < src_cnt; i++) {
155  dma_addr_t src = xor_get_src(descs, i);
156 
157  ioat_unmap(pdev, src - offset, len,
158  PCI_DMA_TODEVICE, flags, 0);
159  }
160 
161  /* dest is a source in xor validate operations */
162  if (xor->ctl_f.op == IOAT_OP_XOR_VAL) {
163  ioat_unmap(pdev, xor->dst_addr - offset, len,
164  PCI_DMA_TODEVICE, flags, 1);
165  break;
166  }
167  }
168 
169  if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
170  ioat_unmap(pdev, xor->dst_addr - offset, len,
171  PCI_DMA_FROMDEVICE, flags, 1);
172  break;
173  }
174  case IOAT_OP_PQ_VAL:
175  case IOAT_OP_PQ: {
176  struct ioat_pq_descriptor *pq = desc->pq;
177  struct ioat_ring_ent *ext;
178  struct ioat_pq_ext_descriptor *pq_ex = NULL;
179  int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
180  struct ioat_raw_descriptor *descs[2];
181  int i;
182 
183  if (src_cnt > 3) {
184  ext = ioat2_get_ring_ent(ioat, idx + 1);
185  pq_ex = ext->pq_ex;
186  }
187 
188  /* in the 'continue' case don't unmap the dests as sources */
189  if (dmaf_p_disabled_continue(flags))
190  src_cnt--;
191  else if (dmaf_continue(flags))
192  src_cnt -= 3;
193 
194  if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
195  descs[0] = (struct ioat_raw_descriptor *) pq;
196  descs[1] = (struct ioat_raw_descriptor *) pq_ex;
197  for (i = 0; i < src_cnt; i++) {
198  dma_addr_t src = pq_get_src(descs, i);
199 
200  ioat_unmap(pdev, src - offset, len,
201  PCI_DMA_TODEVICE, flags, 0);
202  }
203 
204  /* the dests are sources in pq validate operations */
205  if (pq->ctl_f.op == IOAT_OP_XOR_VAL) {
206  if (!(flags & DMA_PREP_PQ_DISABLE_P))
207  ioat_unmap(pdev, pq->p_addr - offset,
208  len, PCI_DMA_TODEVICE, flags, 0);
209  if (!(flags & DMA_PREP_PQ_DISABLE_Q))
210  ioat_unmap(pdev, pq->q_addr - offset,
211  len, PCI_DMA_TODEVICE, flags, 0);
212  break;
213  }
214  }
215 
216  if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
217  if (!(flags & DMA_PREP_PQ_DISABLE_P))
218  ioat_unmap(pdev, pq->p_addr - offset, len,
219  PCI_DMA_BIDIRECTIONAL, flags, 1);
220  if (!(flags & DMA_PREP_PQ_DISABLE_Q))
221  ioat_unmap(pdev, pq->q_addr - offset, len,
222  PCI_DMA_BIDIRECTIONAL, flags, 1);
223  }
224  break;
225  }
226  default:
227  dev_err(&pdev->dev, "%s: unknown op type: %#x\n",
228  __func__, desc->hw->ctl_f.op);
229  }
230 }
231 
232 static bool desc_has_ext(struct ioat_ring_ent *desc)
233 {
234  struct ioat_dma_descriptor *hw = desc->hw;
235 
236  if (hw->ctl_f.op == IOAT_OP_XOR ||
237  hw->ctl_f.op == IOAT_OP_XOR_VAL) {
238  struct ioat_xor_descriptor *xor = desc->xor;
239 
240  if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
241  return true;
242  } else if (hw->ctl_f.op == IOAT_OP_PQ ||
243  hw->ctl_f.op == IOAT_OP_PQ_VAL) {
244  struct ioat_pq_descriptor *pq = desc->pq;
245 
246  if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
247  return true;
248  }
249 
250  return false;
251 }
252 
260 static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
261 {
262  struct ioat_chan_common *chan = &ioat->base;
263  struct ioat_ring_ent *desc;
264  bool seen_current = false;
265  int idx = ioat->tail, i;
266  u16 active;
267 
268  dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
269  __func__, ioat->head, ioat->tail, ioat->issued);
270 
271  active = ioat2_ring_active(ioat);
272  for (i = 0; i < active && !seen_current; i++) {
273  struct dma_async_tx_descriptor *tx;
274 
276  prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
277  desc = ioat2_get_ring_ent(ioat, idx + i);
278  dump_desc_dbg(ioat, desc);
279  tx = &desc->txd;
280  if (tx->cookie) {
281  dma_cookie_complete(tx);
282  ioat3_dma_unmap(ioat, desc, idx + i);
283  if (tx->callback) {
284  tx->callback(tx->callback_param);
285  tx->callback = NULL;
286  }
287  }
288 
289  if (tx->phys == phys_complete)
290  seen_current = true;
291 
292  /* skip extended descriptors */
293  if (desc_has_ext(desc)) {
294  BUG_ON(i + 1 >= active);
295  i++;
296  }
297  }
298  smp_mb(); /* finish all descriptor reads before incrementing tail */
299  ioat->tail = idx + i;
300  BUG_ON(active && !seen_current); /* no active descs have written a completion? */
301  chan->last_completion = phys_complete;
302 
303  if (active - i == 0) {
304  dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
305  __func__);
307  mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
308  }
309  /* 5 microsecond delay per pending descriptor */
310  writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
311  chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
312 }
313 
314 static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
315 {
316  struct ioat_chan_common *chan = &ioat->base;
317  dma_addr_t phys_complete;
318 
319  spin_lock_bh(&chan->cleanup_lock);
320  if (ioat_cleanup_preamble(chan, &phys_complete))
321  __cleanup(ioat, phys_complete);
322  spin_unlock_bh(&chan->cleanup_lock);
323 }
324 
325 static void ioat3_cleanup_event(unsigned long data)
326 {
327  struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
328 
329  ioat3_cleanup(ioat);
331 }
332 
333 static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
334 {
335  struct ioat_chan_common *chan = &ioat->base;
336  dma_addr_t phys_complete;
337 
338  ioat2_quiesce(chan, 0);
339  if (ioat_cleanup_preamble(chan, &phys_complete))
340  __cleanup(ioat, phys_complete);
341 
342  __ioat2_restart_chan(ioat);
343 }
344 
345 static void ioat3_timer_event(unsigned long data)
346 {
347  struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
348  struct ioat_chan_common *chan = &ioat->base;
349 
350  if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
351  dma_addr_t phys_complete;
352  u64 status;
353 
354  status = ioat_chansts(chan);
355 
356  /* when halted due to errors check for channel
357  * programming errors before advancing the completion state
358  */
359  if (is_ioat_halted(status)) {
360  u32 chanerr;
361 
362  chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
363  dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
364  __func__, chanerr);
365  if (test_bit(IOAT_RUN, &chan->state))
366  BUG_ON(is_ioat_bug(chanerr));
367  else /* we never got off the ground */
368  return;
369  }
370 
371  /* if we haven't made progress and we have already
372  * acknowledged a pending completion once, then be more
373  * forceful with a restart
374  */
375  spin_lock_bh(&chan->cleanup_lock);
376  if (ioat_cleanup_preamble(chan, &phys_complete))
377  __cleanup(ioat, phys_complete);
378  else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
379  spin_lock_bh(&ioat->prep_lock);
380  ioat3_restart_channel(ioat);
381  spin_unlock_bh(&ioat->prep_lock);
382  } else {
384  mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
385  }
386  spin_unlock_bh(&chan->cleanup_lock);
387  } else {
388  u16 active;
389 
390  /* if the ring is idle, empty, and oversized try to step
391  * down the size
392  */
393  spin_lock_bh(&chan->cleanup_lock);
394  spin_lock_bh(&ioat->prep_lock);
395  active = ioat2_ring_active(ioat);
396  if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
397  reshape_ring(ioat, ioat->alloc_order-1);
398  spin_unlock_bh(&ioat->prep_lock);
399  spin_unlock_bh(&chan->cleanup_lock);
400 
401  /* keep shrinking until we get back to our minimum
402  * default size
403  */
404  if (ioat->alloc_order > ioat_get_alloc_order())
405  mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
406  }
407 }
408 
409 static enum dma_status
410 ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
411  struct dma_tx_state *txstate)
412 {
413  struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
414  enum dma_status ret;
415 
416  ret = dma_cookie_status(c, cookie, txstate);
417  if (ret == DMA_SUCCESS)
418  return ret;
419 
420  ioat3_cleanup(ioat);
421 
422  return dma_cookie_status(c, cookie, txstate);
423 }
424 
425 static struct dma_async_tx_descriptor *
426 ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value,
427  size_t len, unsigned long flags)
428 {
429  struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
430  struct ioat_ring_ent *desc;
431  size_t total_len = len;
432  struct ioat_fill_descriptor *fill;
433  u64 src_data = (0x0101010101010101ULL) * (value & 0xff);
434  int num_descs, idx, i;
435 
436  num_descs = ioat2_xferlen_to_descs(ioat, len);
437  if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0)
438  idx = ioat->head;
439  else
440  return NULL;
441  i = 0;
442  do {
443  size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
444 
445  desc = ioat2_get_ring_ent(ioat, idx + i);
446  fill = desc->fill;
447 
448  fill->size = xfer_size;
449  fill->src_data = src_data;
450  fill->dst_addr = dest;
451  fill->ctl = 0;
452  fill->ctl_f.op = IOAT_OP_FILL;
453 
454  len -= xfer_size;
455  dest += xfer_size;
456  dump_desc_dbg(ioat, desc);
457  } while (++i < num_descs);
458 
459  desc->txd.flags = flags;
460  desc->len = total_len;
461  fill->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
462  fill->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
463  fill->ctl_f.compl_write = 1;
464  dump_desc_dbg(ioat, desc);
465 
466  /* we leave the channel locked to ensure in order submission */
467  return &desc->txd;
468 }
469 
470 static struct dma_async_tx_descriptor *
471 __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
472  dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
473  size_t len, unsigned long flags)
474 {
475  struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
476  struct ioat_ring_ent *compl_desc;
477  struct ioat_ring_ent *desc;
478  struct ioat_ring_ent *ext;
479  size_t total_len = len;
480  struct ioat_xor_descriptor *xor;
481  struct ioat_xor_ext_descriptor *xor_ex = NULL;
482  struct ioat_dma_descriptor *hw;
483  int num_descs, with_ext, idx, i;
484  u32 offset = 0;
485  u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
486 
487  BUG_ON(src_cnt < 2);
488 
489  num_descs = ioat2_xferlen_to_descs(ioat, len);
490  /* we need 2x the number of descriptors to cover greater than 5
491  * sources
492  */
493  if (src_cnt > 5) {
494  with_ext = 1;
495  num_descs *= 2;
496  } else
497  with_ext = 0;
498 
499  /* completion writes from the raid engine may pass completion
500  * writes from the legacy engine, so we need one extra null
501  * (legacy) descriptor to ensure all completion writes arrive in
502  * order.
503  */
504  if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0)
505  idx = ioat->head;
506  else
507  return NULL;
508  i = 0;
509  do {
510  struct ioat_raw_descriptor *descs[2];
511  size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
512  int s;
513 
514  desc = ioat2_get_ring_ent(ioat, idx + i);
515  xor = desc->xor;
516 
517  /* save a branch by unconditionally retrieving the
518  * extended descriptor xor_set_src() knows to not write
519  * to it in the single descriptor case
520  */
521  ext = ioat2_get_ring_ent(ioat, idx + i + 1);
522  xor_ex = ext->xor_ex;
523 
524  descs[0] = (struct ioat_raw_descriptor *) xor;
525  descs[1] = (struct ioat_raw_descriptor *) xor_ex;
526  for (s = 0; s < src_cnt; s++)
527  xor_set_src(descs, src[s], offset, s);
528  xor->size = xfer_size;
529  xor->dst_addr = dest + offset;
530  xor->ctl = 0;
531  xor->ctl_f.op = op;
532  xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
533 
534  len -= xfer_size;
535  offset += xfer_size;
536  dump_desc_dbg(ioat, desc);
537  } while ((i += 1 + with_ext) < num_descs);
538 
539  /* last xor descriptor carries the unmap parameters and fence bit */
540  desc->txd.flags = flags;
541  desc->len = total_len;
542  if (result)
543  desc->result = result;
544  xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
545 
546  /* completion descriptor carries interrupt bit */
547  compl_desc = ioat2_get_ring_ent(ioat, idx + i);
548  compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
549  hw = compl_desc->hw;
550  hw->ctl = 0;
551  hw->ctl_f.null = 1;
552  hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
553  hw->ctl_f.compl_write = 1;
555  dump_desc_dbg(ioat, compl_desc);
556 
557  /* we leave the channel locked to ensure in order submission */
558  return &compl_desc->txd;
559 }
560 
561 static struct dma_async_tx_descriptor *
562 ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
563  unsigned int src_cnt, size_t len, unsigned long flags)
564 {
565  return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
566 }
567 
570  unsigned int src_cnt, size_t len,
571  enum sum_check_flags *result, unsigned long flags)
572 {
573  /* the cleanup routine only sets bits on validate failure, it
574  * does not clear bits on validate success... so clear it here
575  */
576  *result = 0;
577 
578  return __ioat3_prep_xor_lock(chan, result, src[0], &src[1],
579  src_cnt - 1, len, flags);
580 }
581 
582 static void
583 dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext)
584 {
585  struct device *dev = to_dev(&ioat->base);
586  struct ioat_pq_descriptor *pq = desc->pq;
587  struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
588  struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
589  int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
590  int i;
591 
592  dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
593  " sz: %#x ctl: %#x (op: %d int: %d compl: %d pq: '%s%s' src_cnt: %d)\n",
594  desc_id(desc), (unsigned long long) desc->txd.phys,
595  (unsigned long long) (pq_ex ? pq_ex->next : pq->next),
596  desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en,
597  pq->ctl_f.compl_write,
598  pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
599  pq->ctl_f.src_cnt);
600  for (i = 0; i < src_cnt; i++)
601  dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
602  (unsigned long long) pq_get_src(descs, i), pq->coef[i]);
603  dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
604  dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
605 }
606 
607 static struct dma_async_tx_descriptor *
608 __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
609  const dma_addr_t *dst, const dma_addr_t *src,
610  unsigned int src_cnt, const unsigned char *scf,
611  size_t len, unsigned long flags)
612 {
613  struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
614  struct ioat_chan_common *chan = &ioat->base;
615  struct ioat_ring_ent *compl_desc;
616  struct ioat_ring_ent *desc;
617  struct ioat_ring_ent *ext;
618  size_t total_len = len;
619  struct ioat_pq_descriptor *pq;
620  struct ioat_pq_ext_descriptor *pq_ex = NULL;
621  struct ioat_dma_descriptor *hw;
622  u32 offset = 0;
623  u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
624  int i, s, idx, with_ext, num_descs;
625 
626  dev_dbg(to_dev(chan), "%s\n", __func__);
627  /* the engine requires at least two sources (we provide
628  * at least 1 implied source in the DMA_PREP_CONTINUE case)
629  */
630  BUG_ON(src_cnt + dmaf_continue(flags) < 2);
631 
632  num_descs = ioat2_xferlen_to_descs(ioat, len);
633  /* we need 2x the number of descriptors to cover greater than 3
634  * sources (we need 1 extra source in the q-only continuation
635  * case and 3 extra sources in the p+q continuation case.
636  */
637  if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
638  (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
639  with_ext = 1;
640  num_descs *= 2;
641  } else
642  with_ext = 0;
643 
644  /* completion writes from the raid engine may pass completion
645  * writes from the legacy engine, so we need one extra null
646  * (legacy) descriptor to ensure all completion writes arrive in
647  * order.
648  */
649  if (likely(num_descs) &&
650  ioat2_check_space_lock(ioat, num_descs+1) == 0)
651  idx = ioat->head;
652  else
653  return NULL;
654  i = 0;
655  do {
656  struct ioat_raw_descriptor *descs[2];
657  size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
658 
659  desc = ioat2_get_ring_ent(ioat, idx + i);
660  pq = desc->pq;
661 
662  /* save a branch by unconditionally retrieving the
663  * extended descriptor pq_set_src() knows to not write
664  * to it in the single descriptor case
665  */
666  ext = ioat2_get_ring_ent(ioat, idx + i + with_ext);
667  pq_ex = ext->pq_ex;
668 
669  descs[0] = (struct ioat_raw_descriptor *) pq;
670  descs[1] = (struct ioat_raw_descriptor *) pq_ex;
671 
672  for (s = 0; s < src_cnt; s++)
673  pq_set_src(descs, src[s], offset, scf[s], s);
674 
675  /* see the comment for dma_maxpq in include/linux/dmaengine.h */
676  if (dmaf_p_disabled_continue(flags))
677  pq_set_src(descs, dst[1], offset, 1, s++);
678  else if (dmaf_continue(flags)) {
679  pq_set_src(descs, dst[0], offset, 0, s++);
680  pq_set_src(descs, dst[1], offset, 1, s++);
681  pq_set_src(descs, dst[1], offset, 0, s++);
682  }
683  pq->size = xfer_size;
684  pq->p_addr = dst[0] + offset;
685  pq->q_addr = dst[1] + offset;
686  pq->ctl = 0;
687  pq->ctl_f.op = op;
688  pq->ctl_f.src_cnt = src_cnt_to_hw(s);
689  pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
690  pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
691 
692  len -= xfer_size;
693  offset += xfer_size;
694  } while ((i += 1 + with_ext) < num_descs);
695 
696  /* last pq descriptor carries the unmap parameters and fence bit */
697  desc->txd.flags = flags;
698  desc->len = total_len;
699  if (result)
700  desc->result = result;
701  pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
702  dump_pq_desc_dbg(ioat, desc, ext);
703 
704  /* completion descriptor carries interrupt bit */
705  compl_desc = ioat2_get_ring_ent(ioat, idx + i);
706  compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
707  hw = compl_desc->hw;
708  hw->ctl = 0;
709  hw->ctl_f.null = 1;
710  hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
711  hw->ctl_f.compl_write = 1;
713  dump_desc_dbg(ioat, compl_desc);
714 
715  /* we leave the channel locked to ensure in order submission */
716  return &compl_desc->txd;
717 }
718 
719 static struct dma_async_tx_descriptor *
720 ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
721  unsigned int src_cnt, const unsigned char *scf, size_t len,
722  unsigned long flags)
723 {
724  /* specify valid address for disabled result */
725  if (flags & DMA_PREP_PQ_DISABLE_P)
726  dst[0] = dst[1];
727  if (flags & DMA_PREP_PQ_DISABLE_Q)
728  dst[1] = dst[0];
729 
730  /* handle the single source multiply case from the raid6
731  * recovery path
732  */
733  if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
734  dma_addr_t single_source[2];
735  unsigned char single_source_coef[2];
736 
737  BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
738  single_source[0] = src[0];
739  single_source[1] = src[0];
740  single_source_coef[0] = scf[0];
741  single_source_coef[1] = 0;
742 
743  return __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
744  single_source_coef, len, flags);
745  } else
746  return __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, scf,
747  len, flags);
748 }
749 
752  unsigned int src_cnt, const unsigned char *scf, size_t len,
753  enum sum_check_flags *pqres, unsigned long flags)
754 {
755  /* specify valid address for disabled result */
756  if (flags & DMA_PREP_PQ_DISABLE_P)
757  pq[0] = pq[1];
758  if (flags & DMA_PREP_PQ_DISABLE_Q)
759  pq[1] = pq[0];
760 
761  /* the cleanup routine only sets bits on validate failure, it
762  * does not clear bits on validate success... so clear it here
763  */
764  *pqres = 0;
765 
766  return __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
767  flags);
768 }
769 
770 static struct dma_async_tx_descriptor *
771 ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
772  unsigned int src_cnt, size_t len, unsigned long flags)
773 {
774  unsigned char scf[src_cnt];
775  dma_addr_t pq[2];
776 
777  memset(scf, 0, src_cnt);
778  pq[0] = dst;
779  flags |= DMA_PREP_PQ_DISABLE_Q;
780  pq[1] = dst; /* specify valid address for disabled result */
781 
782  return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
783  flags);
784 }
785 
788  unsigned int src_cnt, size_t len,
789  enum sum_check_flags *result, unsigned long flags)
790 {
791  unsigned char scf[src_cnt];
792  dma_addr_t pq[2];
793 
794  /* the cleanup routine only sets bits on validate failure, it
795  * does not clear bits on validate success... so clear it here
796  */
797  *result = 0;
798 
799  memset(scf, 0, src_cnt);
800  pq[0] = src[0];
801  flags |= DMA_PREP_PQ_DISABLE_Q;
802  pq[1] = pq[0]; /* specify valid address for disabled result */
803 
804  return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf,
805  len, flags);
806 }
807 
808 static struct dma_async_tx_descriptor *
809 ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
810 {
811  struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
812  struct ioat_ring_ent *desc;
813  struct ioat_dma_descriptor *hw;
814 
815  if (ioat2_check_space_lock(ioat, 1) == 0)
816  desc = ioat2_get_ring_ent(ioat, ioat->head);
817  else
818  return NULL;
819 
820  hw = desc->hw;
821  hw->ctl = 0;
822  hw->ctl_f.null = 1;
823  hw->ctl_f.int_en = 1;
824  hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
825  hw->ctl_f.compl_write = 1;
827  hw->src_addr = 0;
828  hw->dst_addr = 0;
829 
830  desc->txd.flags = flags;
831  desc->len = 1;
832 
833  dump_desc_dbg(ioat, desc);
834 
835  /* we leave the channel locked to ensure in order submission */
836  return &desc->txd;
837 }
838 
839 static void __devinit ioat3_dma_test_callback(void *dma_async_param)
840 {
841  struct completion *cmp = dma_async_param;
842 
843  complete(cmp);
844 }
845 
846 #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
847 static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
848 {
849  int i, src_idx;
850  struct page *dest;
851  struct page *xor_srcs[IOAT_NUM_SRC_TEST];
852  struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
853  dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
854  dma_addr_t dma_addr, dest_dma;
855  struct dma_async_tx_descriptor *tx;
856  struct dma_chan *dma_chan;
858  u8 cmp_byte = 0;
859  u32 cmp_word;
860  u32 xor_val_result;
861  int err = 0;
862  struct completion cmp;
863  unsigned long tmo;
864  struct device *dev = &device->pdev->dev;
865  struct dma_device *dma = &device->common;
866 
867  dev_dbg(dev, "%s\n", __func__);
868 
869  if (!dma_has_cap(DMA_XOR, dma->cap_mask))
870  return 0;
871 
872  for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
873  xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
874  if (!xor_srcs[src_idx]) {
875  while (src_idx--)
876  __free_page(xor_srcs[src_idx]);
877  return -ENOMEM;
878  }
879  }
880 
881  dest = alloc_page(GFP_KERNEL);
882  if (!dest) {
883  while (src_idx--)
884  __free_page(xor_srcs[src_idx]);
885  return -ENOMEM;
886  }
887 
888  /* Fill in src buffers */
889  for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
890  u8 *ptr = page_address(xor_srcs[src_idx]);
891  for (i = 0; i < PAGE_SIZE; i++)
892  ptr[i] = (1 << src_idx);
893  }
894 
895  for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
896  cmp_byte ^= (u8) (1 << src_idx);
897 
898  cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
899  (cmp_byte << 8) | cmp_byte;
900 
901  memset(page_address(dest), 0, PAGE_SIZE);
902 
903  dma_chan = container_of(dma->channels.next, struct dma_chan,
904  device_node);
905  if (dma->device_alloc_chan_resources(dma_chan) < 1) {
906  err = -ENODEV;
907  goto out;
908  }
909 
910  /* test xor */
911  dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
912  for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
913  dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
914  DMA_TO_DEVICE);
915  tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
916  IOAT_NUM_SRC_TEST, PAGE_SIZE,
918 
919  if (!tx) {
920  dev_err(dev, "Self-test xor prep failed\n");
921  err = -ENODEV;
922  goto free_resources;
923  }
924 
925  async_tx_ack(tx);
926  init_completion(&cmp);
927  tx->callback = ioat3_dma_test_callback;
928  tx->callback_param = &cmp;
929  cookie = tx->tx_submit(tx);
930  if (cookie < 0) {
931  dev_err(dev, "Self-test xor setup failed\n");
932  err = -ENODEV;
933  goto free_resources;
934  }
935  dma->device_issue_pending(dma_chan);
936 
938 
939  if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
940  dev_err(dev, "Self-test xor timed out\n");
941  err = -ENODEV;
942  goto free_resources;
943  }
944 
946  for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
947  u32 *ptr = page_address(dest);
948  if (ptr[i] != cmp_word) {
949  dev_err(dev, "Self-test xor failed compare\n");
950  err = -ENODEV;
951  goto free_resources;
952  }
953  }
955 
956  /* skip validate if the capability is not present */
957  if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
958  goto free_resources;
959 
960  /* validate the sources with the destintation page */
961  for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
962  xor_val_srcs[i] = xor_srcs[i];
963  xor_val_srcs[i] = dest;
964 
965  xor_val_result = 1;
966 
967  for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
968  dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
969  DMA_TO_DEVICE);
970  tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
971  IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
972  &xor_val_result, DMA_PREP_INTERRUPT);
973  if (!tx) {
974  dev_err(dev, "Self-test zero prep failed\n");
975  err = -ENODEV;
976  goto free_resources;
977  }
978 
979  async_tx_ack(tx);
980  init_completion(&cmp);
981  tx->callback = ioat3_dma_test_callback;
982  tx->callback_param = &cmp;
983  cookie = tx->tx_submit(tx);
984  if (cookie < 0) {
985  dev_err(dev, "Self-test zero setup failed\n");
986  err = -ENODEV;
987  goto free_resources;
988  }
989  dma->device_issue_pending(dma_chan);
990 
992 
993  if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
994  dev_err(dev, "Self-test validate timed out\n");
995  err = -ENODEV;
996  goto free_resources;
997  }
998 
999  if (xor_val_result != 0) {
1000  dev_err(dev, "Self-test validate failed compare\n");
1001  err = -ENODEV;
1002  goto free_resources;
1003  }
1004 
1005  /* skip memset if the capability is not present */
1006  if (!dma_has_cap(DMA_MEMSET, dma_chan->device->cap_mask))
1007  goto free_resources;
1008 
1009  /* test memset */
1010  dma_addr = dma_map_page(dev, dest, 0,
1012  tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE,
1014  if (!tx) {
1015  dev_err(dev, "Self-test memset prep failed\n");
1016  err = -ENODEV;
1017  goto free_resources;
1018  }
1019 
1020  async_tx_ack(tx);
1021  init_completion(&cmp);
1022  tx->callback = ioat3_dma_test_callback;
1023  tx->callback_param = &cmp;
1024  cookie = tx->tx_submit(tx);
1025  if (cookie < 0) {
1026  dev_err(dev, "Self-test memset setup failed\n");
1027  err = -ENODEV;
1028  goto free_resources;
1029  }
1030  dma->device_issue_pending(dma_chan);
1031 
1032  tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1033 
1034  if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
1035  dev_err(dev, "Self-test memset timed out\n");
1036  err = -ENODEV;
1037  goto free_resources;
1038  }
1039 
1040  for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
1041  u32 *ptr = page_address(dest);
1042  if (ptr[i]) {
1043  dev_err(dev, "Self-test memset failed compare\n");
1044  err = -ENODEV;
1045  goto free_resources;
1046  }
1047  }
1048 
1049  /* test for non-zero parity sum */
1050  xor_val_result = 0;
1051  for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1052  dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
1053  DMA_TO_DEVICE);
1054  tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
1055  IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
1056  &xor_val_result, DMA_PREP_INTERRUPT);
1057  if (!tx) {
1058  dev_err(dev, "Self-test 2nd zero prep failed\n");
1059  err = -ENODEV;
1060  goto free_resources;
1061  }
1062 
1063  async_tx_ack(tx);
1064  init_completion(&cmp);
1065  tx->callback = ioat3_dma_test_callback;
1066  tx->callback_param = &cmp;
1067  cookie = tx->tx_submit(tx);
1068  if (cookie < 0) {
1069  dev_err(dev, "Self-test 2nd zero setup failed\n");
1070  err = -ENODEV;
1071  goto free_resources;
1072  }
1073  dma->device_issue_pending(dma_chan);
1074 
1075  tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1076 
1077  if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
1078  dev_err(dev, "Self-test 2nd validate timed out\n");
1079  err = -ENODEV;
1080  goto free_resources;
1081  }
1082 
1083  if (xor_val_result != SUM_CHECK_P_RESULT) {
1084  dev_err(dev, "Self-test validate failed compare\n");
1085  err = -ENODEV;
1086  goto free_resources;
1087  }
1088 
1089 free_resources:
1090  dma->device_free_chan_resources(dma_chan);
1091 out:
1092  src_idx = IOAT_NUM_SRC_TEST;
1093  while (src_idx--)
1094  __free_page(xor_srcs[src_idx]);
1095  __free_page(dest);
1096  return err;
1097 }
1098 
1099 static int __devinit ioat3_dma_self_test(struct ioatdma_device *device)
1100 {
1101  int rc = ioat_dma_self_test(device);
1102 
1103  if (rc)
1104  return rc;
1105 
1106  rc = ioat_xor_val_self_test(device);
1107  if (rc)
1108  return rc;
1109 
1110  return 0;
1111 }
1112 
1113 static int ioat3_reset_hw(struct ioat_chan_common *chan)
1114 {
1115  /* throw away whatever the channel was doing and get it
1116  * initialized, with ioat3 specific workarounds
1117  */
1118  struct ioatdma_device *device = chan->device;
1119  struct pci_dev *pdev = device->pdev;
1120  u32 chanerr;
1121  u16 dev_id;
1122  int err;
1123 
1124  ioat2_quiesce(chan, msecs_to_jiffies(100));
1125 
1126  chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
1127  writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
1128 
1129  /* -= IOAT ver.3 workarounds =- */
1130  /* Write CHANERRMSK_INT with 3E07h to mask out the errors
1131  * that can cause stability issues for IOAT ver.3, and clear any
1132  * pending errors
1133  */
1134  pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
1135  err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
1136  if (err) {
1137  dev_err(&pdev->dev, "channel error register unreachable\n");
1138  return err;
1139  }
1140  pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1141 
1142  /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1143  * (workaround for spurious config parity error after restart)
1144  */
1145  pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1146  if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
1147  pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
1148 
1149  return ioat2_reset_sync(chan, msecs_to_jiffies(200));
1150 }
1151 
1152 static bool is_jf_ioat(struct pci_dev *pdev)
1153 {
1154  switch (pdev->device) {
1165  return true;
1166  default:
1167  return false;
1168  }
1169 }
1170 
1171 static bool is_snb_ioat(struct pci_dev *pdev)
1172 {
1173  switch (pdev->device) {
1184  return true;
1185  default:
1186  return false;
1187  }
1188 }
1189 
1190 int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1191 {
1192  struct pci_dev *pdev = device->pdev;
1193  int dca_en = system_has_dca_enabled(pdev);
1194  struct dma_device *dma;
1195  struct dma_chan *c;
1196  struct ioat_chan_common *chan;
1197  bool is_raid_device = false;
1198  int err;
1199  u32 cap;
1200 
1202  device->reset_hw = ioat3_reset_hw;
1203  device->self_test = ioat3_dma_self_test;
1204  dma = &device->common;
1209 
1210  if (is_jf_ioat(pdev) || is_snb_ioat(pdev))
1211  dma->copy_align = 6;
1212 
1214  dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
1215 
1216  cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
1217 
1218  /* dca is incompatible with raid operations */
1219  if (dca_en && (cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
1220  cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
1221 
1222  if (cap & IOAT_CAP_XOR) {
1223  is_raid_device = true;
1224  dma->max_xor = 8;
1225  dma->xor_align = 6;
1226 
1227  dma_cap_set(DMA_XOR, dma->cap_mask);
1228  dma->device_prep_dma_xor = ioat3_prep_xor;
1229 
1232  }
1233  if (cap & IOAT_CAP_PQ) {
1234  is_raid_device = true;
1235  dma_set_maxpq(dma, 8, 0);
1236  dma->pq_align = 6;
1237 
1238  dma_cap_set(DMA_PQ, dma->cap_mask);
1239  dma->device_prep_dma_pq = ioat3_prep_pq;
1240 
1243 
1244  if (!(cap & IOAT_CAP_XOR)) {
1245  dma->max_xor = 8;
1246  dma->xor_align = 6;
1247 
1248  dma_cap_set(DMA_XOR, dma->cap_mask);
1249  dma->device_prep_dma_xor = ioat3_prep_pqxor;
1250 
1253  }
1254  }
1255  if (is_raid_device && (cap & IOAT_CAP_FILL_BLOCK)) {
1257  dma->device_prep_dma_memset = ioat3_prep_memset_lock;
1258  }
1259 
1260 
1261  if (is_raid_device) {
1262  dma->device_tx_status = ioat3_tx_status;
1263  device->cleanup_fn = ioat3_cleanup_event;
1264  device->timer_fn = ioat3_timer_event;
1265  } else {
1267  device->cleanup_fn = ioat2_cleanup_event;
1268  device->timer_fn = ioat2_timer_event;
1269  }
1270 
1271  #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
1274  #endif
1275 
1276  #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
1279  #endif
1280 
1281  err = ioat_probe(device);
1282  if (err)
1283  return err;
1284  ioat_set_tcp_copy_break(262144);
1285 
1287  chan = to_chan_common(c);
1289  chan->reg_base + IOAT_DCACTRL_OFFSET);
1290  }
1291 
1292  err = ioat_register(device);
1293  if (err)
1294  return err;
1295 
1296  ioat_kobject_add(device, &ioat2_ktype);
1297 
1298  if (dca)
1299  device->dca = ioat3_dca_init(pdev, device->reg_base);
1300 
1301  return 0;
1302 }