Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
sata_dwc_460ex.c
Go to the documentation of this file.
1 /*
2  * drivers/ata/sata_dwc_460ex.c
3  *
4  * Synopsys DesignWare Cores (DWC) SATA host driver
5  *
6  * Author: Mark Miesfeld <[email protected]>
7  *
8  * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <[email protected]>
9  * Copyright 2008 DENX Software Engineering
10  *
11  * Based on versions provided by AMCC and Synopsys which are:
12  * Copyright 2006 Applied Micro Circuits Corporation
13  * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED
14  *
15  * This program is free software; you can redistribute it and/or modify it
16  * under the terms of the GNU General Public License as published by the
17  * Free Software Foundation; either version 2 of the License, or (at your
18  * option) any later version.
19  */
20 
21 #ifdef CONFIG_SATA_DWC_DEBUG
22 #define DEBUG
23 #endif
24 
25 #ifdef CONFIG_SATA_DWC_VDEBUG
26 #define VERBOSE_DEBUG
27 #define DEBUG_NCQ
28 #endif
29 
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/device.h>
34 #include <linux/of_platform.h>
35 #include <linux/platform_device.h>
36 #include <linux/libata.h>
37 #include <linux/slab.h>
38 #include "libata.h"
39 
40 #include <scsi/scsi_host.h>
41 #include <scsi/scsi_cmnd.h>
42 
43 /* These two are defined in "libata.h" */
44 #undef DRV_NAME
45 #undef DRV_VERSION
46 #define DRV_NAME "sata-dwc"
47 #define DRV_VERSION "1.3"
48 
49 /* SATA DMA driver Globals */
50 #define DMA_NUM_CHANS 1
51 #define DMA_NUM_CHAN_REGS 8
52 
53 /* SATA DMA Register definitions */
54 #define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length*/
55 
56 struct dmareg {
57  u32 low; /* Low bits 0-31 */
58  u32 high; /* High bits 32-63 */
59 };
60 
61 /* DMA Per Channel registers */
62 struct dma_chan_regs {
63  struct dmareg sar; /* Source Address */
64  struct dmareg dar; /* Destination address */
65  struct dmareg llp; /* Linked List Pointer */
66  struct dmareg ctl; /* Control */
67  struct dmareg sstat; /* Source Status not implemented in core */
68  struct dmareg dstat; /* Destination Status not implemented in core*/
69  struct dmareg sstatar; /* Source Status Address not impl in core */
70  struct dmareg dstatar; /* Destination Status Address not implemente */
71  struct dmareg cfg; /* Config */
72  struct dmareg sgr; /* Source Gather */
73  struct dmareg dsr; /* Destination Scatter */
74 };
75 
76 /* Generic Interrupt Registers */
78  struct dmareg tfr; /* Transfer Interrupt */
79  struct dmareg block; /* Block Interrupt */
80  struct dmareg srctran; /* Source Transfer Interrupt */
81  struct dmareg dsttran; /* Dest Transfer Interrupt */
82  struct dmareg error; /* Error */
83 };
84 
85 struct ahb_dma_regs {
87  struct dma_interrupt_regs interrupt_raw; /* Raw Interrupt */
88  struct dma_interrupt_regs interrupt_status; /* Interrupt Status */
89  struct dma_interrupt_regs interrupt_mask; /* Interrupt Mask */
90  struct dma_interrupt_regs interrupt_clear; /* Interrupt Clear */
91  struct dmareg statusInt; /* Interrupt combined*/
92  struct dmareg rq_srcreg; /* Src Trans Req */
93  struct dmareg rq_dstreg; /* Dst Trans Req */
94  struct dmareg rq_sgl_srcreg; /* Sngl Src Trans Req*/
95  struct dmareg rq_sgl_dstreg; /* Sngl Dst Trans Req*/
96  struct dmareg rq_lst_srcreg; /* Last Src Trans Req*/
97  struct dmareg rq_lst_dstreg; /* Last Dst Trans Req*/
98  struct dmareg dma_cfg; /* DMA Config */
99  struct dmareg dma_chan_en; /* DMA Channel Enable*/
100  struct dmareg dma_id; /* DMA ID */
101  struct dmareg dma_test; /* DMA Test */
102  struct dmareg res1; /* reserved */
103  struct dmareg res2; /* reserved */
104  /*
105  * DMA Comp Params
106  * Param 6 = dma_param[0], Param 5 = dma_param[1],
107  * Param 4 = dma_param[2] ...
108  */
109  struct dmareg dma_params[6];
110 };
111 
112 /* Data structure for linked list item */
113 struct lli {
114  u32 sar; /* Source Address */
115  u32 dar; /* Destination address */
116  u32 llp; /* Linked List Pointer */
117  struct dmareg ctl; /* Control */
118  struct dmareg dstat; /* Destination Status */
119 };
120 
121 enum {
122  SATA_DWC_DMAC_LLI_SZ = (sizeof(struct lli)),
125  SATA_DWC_DMAC_LLI_NUM),
129 };
130 
131 /* DMA Register Operation Bits */
132 enum {
133  DMA_EN = 0x00000001, /* Enable AHB DMA */
134  DMA_CTL_LLP_SRCEN = 0x10000000, /* Blk chain enable Src */
135  DMA_CTL_LLP_DSTEN = 0x08000000, /* Blk chain enable Dst */
136 };
137 
138 #define DMA_CTL_BLK_TS(size) ((size) & 0x000000FFF) /* Blk Transfer size */
139 #define DMA_CHANNEL(ch) (0x00000001 << (ch)) /* Select channel */
140  /* Enable channel */
141 #define DMA_ENABLE_CHAN(ch) ((0x00000001 << (ch)) | \
142  ((0x000000001 << (ch)) << 8))
143  /* Disable channel */
144 #define DMA_DISABLE_CHAN(ch) (0x00000000 | ((0x000000001 << (ch)) << 8))
145  /* Transfer Type & Flow Controller */
146 #define DMA_CTL_TTFC(type) (((type) & 0x7) << 20)
147 #define DMA_CTL_SMS(num) (((num) & 0x3) << 25) /* Src Master Select */
148 #define DMA_CTL_DMS(num) (((num) & 0x3) << 23)/* Dst Master Select */
149  /* Src Burst Transaction Length */
150 #define DMA_CTL_SRC_MSIZE(size) (((size) & 0x7) << 14)
151  /* Dst Burst Transaction Length */
152 #define DMA_CTL_DST_MSIZE(size) (((size) & 0x7) << 11)
153  /* Source Transfer Width */
154 #define DMA_CTL_SRC_TRWID(size) (((size) & 0x7) << 4)
155  /* Destination Transfer Width */
156 #define DMA_CTL_DST_TRWID(size) (((size) & 0x7) << 1)
157 
158 /* Assign HW handshaking interface (x) to destination / source peripheral */
159 #define DMA_CFG_HW_HS_DEST(int_num) (((int_num) & 0xF) << 11)
160 #define DMA_CFG_HW_HS_SRC(int_num) (((int_num) & 0xF) << 7)
161 #define DMA_CFG_HW_CH_PRIOR(int_num) (((int_num) & 0xF) << 5)
162 #define DMA_LLP_LMS(addr, master) (((addr) & 0xfffffffc) | (master))
163 
164 /*
165  * This define is used to set block chaining disabled in the control low
166  * register. It is already in little endian format so it can be &'d dirctly.
167  * It is essentially: cpu_to_le32(~(DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN))
168  */
169 enum {
171  DMA_CTL_TTFC_P2M_DMAC = 0x00000002, /* Per to mem, DMAC cntr */
172  DMA_CTL_TTFC_M2P_PER = 0x00000003, /* Mem to per, peripheral cntr */
173  DMA_CTL_SINC_INC = 0x00000000, /* Source Address Increment */
174  DMA_CTL_SINC_DEC = 0x00000200,
175  DMA_CTL_SINC_NOCHANGE = 0x00000400,
176  DMA_CTL_DINC_INC = 0x00000000, /* Destination Address Increment */
177  DMA_CTL_DINC_DEC = 0x00000080,
178  DMA_CTL_DINC_NOCHANGE = 0x00000100,
179  DMA_CTL_INT_EN = 0x00000001, /* Interrupt Enable */
180 
181 /* Channel Configuration Register high bits */
182  DMA_CFG_FCMOD_REQ = 0x00000001, /* Flow Control - request based */
183  DMA_CFG_PROTCTL = (0x00000003 << 2),/* Protection Control */
184 
185 /* Channel Configuration Register low bits */
186  DMA_CFG_RELD_DST = 0x80000000, /* Reload Dest / Src Addr */
187  DMA_CFG_RELD_SRC = 0x40000000,
188  DMA_CFG_HS_SELSRC = 0x00000800, /* Software handshake Src/ Dest */
189  DMA_CFG_HS_SELDST = 0x00000400,
190  DMA_CFG_FIFOEMPTY = (0x00000001 << 9), /* FIFO Empty bit */
191 
192 /* Channel Linked List Pointer Register */
193  DMA_LLP_AHBMASTER1 = 0, /* List Master Select */
195 
197 
200 };
201 
202 /* DWC SATA Registers */
204  u32 fptagr; /* 1st party DMA tag */
205  u32 fpbor; /* 1st party DMA buffer offset */
206  u32 fptcr; /* 1st party DMA Xfr count */
207  u32 dmacr; /* DMA Control */
208  u32 dbtsr; /* DMA Burst Transac size */
209  u32 intpr; /* Interrupt Pending */
210  u32 intmr; /* Interrupt Mask */
211  u32 errmr; /* Error Mask */
212  u32 llcr; /* Link Layer Control */
213  u32 phycr; /* PHY Control */
214  u32 physr; /* PHY Status */
215  u32 rxbistpd; /* Recvd BIST pattern def register */
216  u32 rxbistpd1; /* Recvd BIST data dword1 */
217  u32 rxbistpd2; /* Recvd BIST pattern data dword2 */
218  u32 txbistpd; /* Trans BIST pattern def register */
219  u32 txbistpd1; /* Trans BIST data dword1 */
220  u32 txbistpd2; /* Trans BIST data dword2 */
221  u32 bistcr; /* BIST Control Register */
222  u32 bistfctr; /* BIST FIS Count Register */
223  u32 bistsr; /* BIST Status Register */
224  u32 bistdecr; /* BIST Dword Error count register */
225  u32 res[15]; /* Reserved locations */
226  u32 testr; /* Test Register */
227  u32 versionr; /* Version Register */
228  u32 idr; /* ID Register */
229  u32 unimpl[192]; /* Unimplemented */
230  u32 dmadr[256]; /* FIFO Locations in DMA Mode */
231 };
232 
233 enum {
236  SCR_SERROR_DIAG_X = 0x04000000,
237 /* DWC SATA Register Operations */
244  SATA_DWC_INTPR_DMAT = 0x00000001,
245  SATA_DWC_INTPR_NEWFP = 0x00000002,
246  SATA_DWC_INTPR_PMABRT = 0x00000004,
247  SATA_DWC_INTPR_ERR = 0x00000008,
249  SATA_DWC_INTPR_IPF = 0x10000000,
250  SATA_DWC_INTMR_DMATM = 0x00000001,
251  SATA_DWC_INTMR_NEWFPM = 0x00000002,
253  SATA_DWC_INTMR_ERRM = 0x00000008,
255  SATA_DWC_LLCR_SCRAMEN = 0x00000001,
257  SATA_DWC_LLCR_RPDEN = 0x00000004,
258 /* This is all error bits, zero's are reserved fields. */
260 };
261 
262 #define SATA_DWC_SCR0_SPD_GET(v) (((v) >> 4) & 0x0000000F)
263 #define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) |\
264  SATA_DWC_DMACR_TMOD_TXCHEN)
265 #define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) |\
266  SATA_DWC_DMACR_TMOD_TXCHEN)
267 #define SATA_DWC_DBTSR_MWR(size) (((size)/4) & SATA_DWC_TXFIFO_DEPTH)
268 #define SATA_DWC_DBTSR_MRD(size) ((((size)/4) & SATA_DWC_RXFIFO_DEPTH)\
269  << 16)
271  struct device *dev; /* generic device struct */
272  struct ata_probe_ent *pe; /* ptr to probe-ent */
273  struct ata_host *host;
275  struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */
276  int irq_dma;
277 };
278 
279 #define SATA_DWC_QCMD_MAX 32
280 
284  struct lli *llit[SATA_DWC_QCMD_MAX]; /* DMA LLI table */
288 };
289 
290 /*
291  * Commonly used DWC SATA driver Macros
292  */
293 #define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)\
294  (host)->private_data)
295 #define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)\
296  (ap)->host->private_data)
297 #define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)\
298  (ap)->private_data)
299 #define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)\
300  (qc)->ap->host->private_data)
301 #define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)\
302  (hsdevp)->hsdev)
303 
304 enum {
309 
313 };
314 
321  struct device *dwc_dev;
323 };
325 /*
326  * Prototypes
327  */
328 static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag);
329 static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
330  u32 check_status);
331 static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status);
332 static void sata_dwc_port_stop(struct ata_port *ap);
333 static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
334 static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq);
335 static void dma_dwc_exit(struct sata_dwc_device *hsdev);
336 static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems,
337  struct lli *lli, dma_addr_t dma_lli,
338  void __iomem *addr, int dir);
339 static void dma_dwc_xfer_start(int dma_ch);
340 
341 static const char *get_prot_descript(u8 protocol)
342 {
343  switch ((enum ata_tf_protocols)protocol) {
344  case ATA_PROT_NODATA:
345  return "ATA no data";
346  case ATA_PROT_PIO:
347  return "ATA PIO";
348  case ATA_PROT_DMA:
349  return "ATA DMA";
350  case ATA_PROT_NCQ:
351  return "ATA NCQ";
352  case ATAPI_PROT_NODATA:
353  return "ATAPI no data";
354  case ATAPI_PROT_PIO:
355  return "ATAPI PIO";
356  case ATAPI_PROT_DMA:
357  return "ATAPI DMA";
358  default:
359  return "unknown";
360  }
361 }
362 
363 static const char *get_dma_dir_descript(int dma_dir)
364 {
365  switch ((enum dma_data_direction)dma_dir) {
366  case DMA_BIDIRECTIONAL:
367  return "bidirectional";
368  case DMA_TO_DEVICE:
369  return "to device";
370  case DMA_FROM_DEVICE:
371  return "from device";
372  default:
373  return "none";
374  }
375 }
376 
377 static void sata_dwc_tf_dump(struct ata_taskfile *tf)
378 {
379  dev_vdbg(host_pvt.dwc_dev, "taskfile cmd: 0x%02x protocol: %s flags:"
380  "0x%lx device: %x\n", tf->command,
381  get_prot_descript(tf->protocol), tf->flags, tf->device);
382  dev_vdbg(host_pvt.dwc_dev, "feature: 0x%02x nsect: 0x%x lbal: 0x%x "
383  "lbam: 0x%x lbah: 0x%x\n", tf->feature, tf->nsect, tf->lbal,
384  tf->lbam, tf->lbah);
385  dev_vdbg(host_pvt.dwc_dev, "hob_feature: 0x%02x hob_nsect: 0x%x "
386  "hob_lbal: 0x%x hob_lbam: 0x%x hob_lbah: 0x%x\n",
387  tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam,
388  tf->hob_lbah);
389 }
390 
391 /*
392  * Function: get_burst_length_encode
393  * arguments: datalength: length in bytes of data
394  * returns value to be programmed in register corresponding to data length
395  * This value is effectively the log(base 2) of the length
396  */
397 static int get_burst_length_encode(int datalength)
398 {
399  int items = datalength >> 2; /* div by 4 to get lword count */
400 
401  if (items >= 64)
402  return 5;
403 
404  if (items >= 32)
405  return 4;
406 
407  if (items >= 16)
408  return 3;
409 
410  if (items >= 8)
411  return 2;
412 
413  if (items >= 4)
414  return 1;
415 
416  return 0;
417 }
418 
419 static void clear_chan_interrupts(int c)
420 {
421  out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.tfr.low),
422  DMA_CHANNEL(c));
423  out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.block.low),
424  DMA_CHANNEL(c));
425  out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.srctran.low),
426  DMA_CHANNEL(c));
427  out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.dsttran.low),
428  DMA_CHANNEL(c));
429  out_le32(&(host_pvt.sata_dma_regs->interrupt_clear.error.low),
430  DMA_CHANNEL(c));
431 }
432 
433 /*
434  * Function: dma_request_channel
435  * arguments: None
436  * returns channel number if available else -1
437  * This function assigns the next available DMA channel from the list to the
438  * requester
439  */
440 static int dma_request_channel(void)
441 {
442  /* Check if the channel is not currently in use */
443  if (!(in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) &
444  DMA_CHANNEL(host_pvt.dma_channel)))
445  return host_pvt.dma_channel;
446  dev_err(host_pvt.dwc_dev, "%s Channel %d is currently in use\n",
447  __func__, host_pvt.dma_channel);
448  return -1;
449 }
450 
451 /*
452  * Function: dma_dwc_interrupt
453  * arguments: irq, dev_id, pt_regs
454  * returns channel number if available else -1
455  * Interrupt Handler for DW AHB SATA DMA
456  */
457 static irqreturn_t dma_dwc_interrupt(int irq, void *hsdev_instance)
458 {
459  int chan;
460  u32 tfr_reg, err_reg;
461  unsigned long flags;
462  struct sata_dwc_device *hsdev =
463  (struct sata_dwc_device *)hsdev_instance;
464  struct ata_host *host = (struct ata_host *)hsdev->host;
465  struct ata_port *ap;
466  struct sata_dwc_device_port *hsdevp;
467  u8 tag = 0;
468  unsigned int port = 0;
469 
470  spin_lock_irqsave(&host->lock, flags);
471  ap = host->ports[port];
472  hsdevp = HSDEVP_FROM_AP(ap);
473  tag = ap->link.active_tag;
474 
475  tfr_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.tfr\
476  .low));
477  err_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.error\
478  .low));
479 
480  dev_dbg(ap->dev, "eot=0x%08x err=0x%08x pending=%d active port=%d\n",
481  tfr_reg, err_reg, hsdevp->dma_pending[tag], port);
482 
483  chan = host_pvt.dma_channel;
484  if (chan >= 0) {
485  /* Check for end-of-transfer interrupt. */
486  if (tfr_reg & DMA_CHANNEL(chan)) {
487  /*
488  * Each DMA command produces 2 interrupts. Only
489  * complete the command after both interrupts have been
490  * seen. (See sata_dwc_isr())
491  */
492  host_pvt.dma_interrupt_count++;
493  sata_dwc_clear_dmacr(hsdevp, tag);
494 
495  if (hsdevp->dma_pending[tag] ==
497  dev_err(ap->dev, "DMA not pending eot=0x%08x "
498  "err=0x%08x tag=0x%02x pending=%d\n",
499  tfr_reg, err_reg, tag,
500  hsdevp->dma_pending[tag]);
501  }
502 
503  if ((host_pvt.dma_interrupt_count % 2) == 0)
504  sata_dwc_dma_xfer_complete(ap, 1);
505 
506  /* Clear the interrupt */
507  out_le32(&(host_pvt.sata_dma_regs->interrupt_clear\
508  .tfr.low),
509  DMA_CHANNEL(chan));
510  }
511 
512  /* Check for error interrupt. */
513  if (err_reg & DMA_CHANNEL(chan)) {
514  /* TODO Need error handler ! */
515  dev_err(ap->dev, "error interrupt err_reg=0x%08x\n",
516  err_reg);
517 
518  /* Clear the interrupt. */
519  out_le32(&(host_pvt.sata_dma_regs->interrupt_clear\
520  .error.low),
521  DMA_CHANNEL(chan));
522  }
523  }
524  spin_unlock_irqrestore(&host->lock, flags);
525  return IRQ_HANDLED;
526 }
527 
528 /*
529  * Function: dma_request_interrupts
530  * arguments: hsdev
531  * returns status
532  * This function registers ISR for a particular DMA channel interrupt
533  */
534 static int dma_request_interrupts(struct sata_dwc_device *hsdev, int irq)
535 {
536  int retval = 0;
537  int chan = host_pvt.dma_channel;
538 
539  if (chan >= 0) {
540  /* Unmask error interrupt */
541  out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.error.low,
542  DMA_ENABLE_CHAN(chan));
543 
544  /* Unmask end-of-transfer interrupt */
545  out_le32(&(host_pvt.sata_dma_regs)->interrupt_mask.tfr.low,
546  DMA_ENABLE_CHAN(chan));
547  }
548 
549  retval = request_irq(irq, dma_dwc_interrupt, 0, "SATA DMA", hsdev);
550  if (retval) {
551  dev_err(host_pvt.dwc_dev, "%s: could not get IRQ %d\n",
552  __func__, irq);
553  return -ENODEV;
554  }
555 
556  /* Mark this interrupt as requested */
557  hsdev->irq_dma = irq;
558  return 0;
559 }
560 
561 /*
562  * Function: map_sg_to_lli
563  * The Synopsis driver has a comment proposing that better performance
564  * is possible by only enabling interrupts on the last item in the linked list.
565  * However, it seems that could be a problem if an error happened on one of the
566  * first items. The transfer would halt, but no error interrupt would occur.
567  * Currently this function sets interrupts enabled for each linked list item:
568  * DMA_CTL_INT_EN.
569  */
570 static int map_sg_to_lli(struct scatterlist *sg, int num_elems,
571  struct lli *lli, dma_addr_t dma_lli,
572  void __iomem *dmadr_addr, int dir)
573 {
574  int i, idx = 0;
575  int fis_len = 0;
576  dma_addr_t next_llp;
577  int bl;
578  int sms_val, dms_val;
579 
580  sms_val = 0;
581  dms_val = 1 + host_pvt.dma_channel;
582  dev_dbg(host_pvt.dwc_dev, "%s: sg=%p nelem=%d lli=%p dma_lli=0x%08x"
583  " dmadr=0x%08x\n", __func__, sg, num_elems, lli, (u32)dma_lli,
584  (u32)dmadr_addr);
585 
586  bl = get_burst_length_encode(AHB_DMA_BRST_DFLT);
587 
588  for (i = 0; i < num_elems; i++, sg++) {
589  u32 addr, offset;
590  u32 sg_len, len;
591 
592  addr = (u32) sg_dma_address(sg);
593  sg_len = sg_dma_len(sg);
594 
595  dev_dbg(host_pvt.dwc_dev, "%s: elem=%d sg_addr=0x%x sg_len"
596  "=%d\n", __func__, i, addr, sg_len);
597 
598  while (sg_len) {
599  if (idx >= SATA_DWC_DMAC_LLI_NUM) {
600  /* The LLI table is not large enough. */
601  dev_err(host_pvt.dwc_dev, "LLI table overrun "
602  "(idx=%d)\n", idx);
603  break;
604  }
605  len = (sg_len > SATA_DWC_DMAC_CTRL_TSIZE_MAX) ?
607 
608  offset = addr & 0xffff;
609  if ((offset + sg_len) > 0x10000)
610  len = 0x10000 - offset;
611 
612  /*
613  * Make sure a LLI block is not created that will span
614  * 8K max FIS boundary. If the block spans such a FIS
615  * boundary, there is a chance that a DMA burst will
616  * cross that boundary -- this results in an error in
617  * the host controller.
618  */
619  if (fis_len + len > 8192) {
620  dev_dbg(host_pvt.dwc_dev, "SPLITTING: fis_len="
621  "%d(0x%x) len=%d(0x%x)\n", fis_len,
622  fis_len, len, len);
623  len = 8192 - fis_len;
624  fis_len = 0;
625  } else {
626  fis_len += len;
627  }
628  if (fis_len == 8192)
629  fis_len = 0;
630 
631  /*
632  * Set DMA addresses and lower half of control register
633  * based on direction.
634  */
635  if (dir == DMA_FROM_DEVICE) {
636  lli[idx].dar = cpu_to_le32(addr);
637  lli[idx].sar = cpu_to_le32((u32)dmadr_addr);
638 
639  lli[idx].ctl.low = cpu_to_le32(
641  DMA_CTL_SMS(sms_val) |
642  DMA_CTL_DMS(dms_val) |
643  DMA_CTL_SRC_MSIZE(bl) |
644  DMA_CTL_DST_MSIZE(bl) |
646  DMA_CTL_SRC_TRWID(2) |
647  DMA_CTL_DST_TRWID(2) |
651  } else { /* DMA_TO_DEVICE */
652  lli[idx].sar = cpu_to_le32(addr);
653  lli[idx].dar = cpu_to_le32((u32)dmadr_addr);
654 
655  lli[idx].ctl.low = cpu_to_le32(
657  DMA_CTL_SMS(dms_val) |
658  DMA_CTL_DMS(sms_val) |
659  DMA_CTL_SRC_MSIZE(bl) |
660  DMA_CTL_DST_MSIZE(bl) |
662  DMA_CTL_SRC_TRWID(2) |
663  DMA_CTL_DST_TRWID(2) |
667  }
668 
669  dev_dbg(host_pvt.dwc_dev, "%s setting ctl.high len: "
670  "0x%08x val: 0x%08x\n", __func__,
671  len, DMA_CTL_BLK_TS(len / 4));
672 
673  /* Program the LLI CTL high register */
674  lli[idx].ctl.high = cpu_to_le32(DMA_CTL_BLK_TS\
675  (len / 4));
676 
677  /* Program the next pointer. The next pointer must be
678  * the physical address, not the virtual address.
679  */
680  next_llp = (dma_lli + ((idx + 1) * sizeof(struct \
681  lli)));
682 
683  /* The last 2 bits encode the list master select. */
684  next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER2);
685 
686  lli[idx].llp = cpu_to_le32(next_llp);
687  idx++;
688  sg_len -= len;
689  addr += len;
690  }
691  }
692 
693  /*
694  * The last next ptr has to be zero and the last control low register
695  * has to have LLP_SRC_EN and LLP_DST_EN (linked list pointer source
696  * and destination enable) set back to 0 (disabled.) This is what tells
697  * the core that this is the last item in the linked list.
698  */
699  if (idx) {
700  lli[idx-1].llp = 0x00000000;
701  lli[idx-1].ctl.low &= DMA_CTL_LLP_DISABLE_LE32;
702 
703  /* Flush cache to memory */
704  dma_cache_sync(NULL, lli, (sizeof(struct lli) * idx),
706  }
707 
708  return idx;
709 }
710 
711 /*
712  * Function: dma_dwc_xfer_start
713  * arguments: Channel number
714  * Return : None
715  * Enables the DMA channel
716  */
717 static void dma_dwc_xfer_start(int dma_ch)
718 {
719  /* Enable the DMA channel */
720  out_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low),
721  in_le32(&(host_pvt.sata_dma_regs->dma_chan_en.low)) |
722  DMA_ENABLE_CHAN(dma_ch));
723 }
724 
725 static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems,
726  struct lli *lli, dma_addr_t dma_lli,
727  void __iomem *addr, int dir)
728 {
729  int dma_ch;
730  int num_lli;
731  /* Acquire DMA channel */
732  dma_ch = dma_request_channel();
733  if (dma_ch == -1) {
734  dev_err(host_pvt.dwc_dev, "%s: dma channel unavailable\n",
735  __func__);
736  return -EAGAIN;
737  }
738 
739  /* Convert SG list to linked list of items (LLIs) for AHB DMA */
740  num_lli = map_sg_to_lli(sg, num_elems, lli, dma_lli, addr, dir);
741 
742  dev_dbg(host_pvt.dwc_dev, "%s sg: 0x%p, count: %d lli: %p dma_lli:"
743  " 0x%0xlx addr: %p lli count: %d\n", __func__, sg, num_elems,
744  lli, (u32)dma_lli, addr, num_lli);
745 
746  clear_chan_interrupts(dma_ch);
747 
748  /* Program the CFG register. */
749  out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.high),
750  DMA_CFG_HW_HS_SRC(dma_ch) | DMA_CFG_HW_HS_DEST(dma_ch) |
752  out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].cfg.low),
753  DMA_CFG_HW_CH_PRIOR(dma_ch));
754 
755  /* Program the address of the linked list */
756  out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].llp.low),
757  DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER2));
758 
759  /* Program the CTL register with src enable / dst enable */
760  out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].ctl.low),
762  return dma_ch;
763 }
764 
765 /*
766  * Function: dma_dwc_exit
767  * arguments: None
768  * returns status
769  * This function exits the SATA DMA driver
770  */
771 static void dma_dwc_exit(struct sata_dwc_device *hsdev)
772 {
773  dev_dbg(host_pvt.dwc_dev, "%s:\n", __func__);
774  if (host_pvt.sata_dma_regs) {
775  iounmap(host_pvt.sata_dma_regs);
776  host_pvt.sata_dma_regs = NULL;
777  }
778 
779  if (hsdev->irq_dma) {
780  free_irq(hsdev->irq_dma, hsdev);
781  hsdev->irq_dma = 0;
782  }
783 }
784 
785 /*
786  * Function: dma_dwc_init
787  * arguments: hsdev
788  * returns status
789  * This function initializes the SATA DMA driver
790  */
791 static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
792 {
793  int err;
794 
795  err = dma_request_interrupts(hsdev, irq);
796  if (err) {
797  dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns"
798  " %d\n", __func__, err);
799  goto error_out;
800  }
801 
802  /* Enabe DMA */
803  out_le32(&(host_pvt.sata_dma_regs->dma_cfg.low), DMA_EN);
804 
805  dev_notice(host_pvt.dwc_dev, "DMA initialized\n");
806  dev_dbg(host_pvt.dwc_dev, "SATA DMA registers=0x%p\n", host_pvt.\ sata_dma_regs);
807 
808  return 0;
809 
810 error_out:
811  dma_dwc_exit(hsdev);
812 
813  return err;
814 }
815 
816 static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
817 {
818  if (scr > SCR_NOTIFICATION) {
819  dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
820  __func__, scr);
821  return -EINVAL;
822  }
823 
824  *val = in_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4));
825  dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
826  __func__, link->ap->print_id, scr, *val);
827 
828  return 0;
829 }
830 
831 static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
832 {
833  dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
834  __func__, link->ap->print_id, scr, val);
835  if (scr > SCR_NOTIFICATION) {
836  dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
837  __func__, scr);
838  return -EINVAL;
839  }
840  out_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4), val);
841 
842  return 0;
843 }
844 
845 static u32 core_scr_read(unsigned int scr)
846 {
847  return in_le32((void __iomem *)(host_pvt.scr_addr_sstatus) +\
848  (scr * 4));
849 }
850 
851 static void core_scr_write(unsigned int scr, u32 val)
852 {
853  out_le32((void __iomem *)(host_pvt.scr_addr_sstatus) + (scr * 4),
854  val);
855 }
856 
857 static void clear_serror(void)
858 {
859  u32 val;
860  val = core_scr_read(SCR_ERROR);
861  core_scr_write(SCR_ERROR, val);
862 
863 }
864 
865 static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
866 {
867  out_le32(&hsdev->sata_dwc_regs->intpr,
868  in_le32(&hsdev->sata_dwc_regs->intpr));
869 }
870 
871 static u32 qcmd_tag_to_mask(u8 tag)
872 {
873  return 0x00000001 << (tag & 0x1f);
874 }
875 
876 /* See ahci.c */
877 static void sata_dwc_error_intr(struct ata_port *ap,
878  struct sata_dwc_device *hsdev, uint intpr)
879 {
880  struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
881  struct ata_eh_info *ehi = &ap->link.eh_info;
882  unsigned int err_mask = 0, action = 0;
883  struct ata_queued_cmd *qc;
884  u32 serror;
885  u8 status, tag;
886  u32 err_reg;
887 
888  ata_ehi_clear_desc(ehi);
889 
890  serror = core_scr_read(SCR_ERROR);
891  status = ap->ops->sff_check_status(ap);
892 
893  err_reg = in_le32(&(host_pvt.sata_dma_regs->interrupt_status.error.\ low));
894  tag = ap->link.active_tag;
895 
896  dev_err(ap->dev, "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x "
897  "dma_intp=%d pending=%d issued=%d dma_err_status=0x%08x\n",
898  __func__, serror, intpr, status, host_pvt.dma_interrupt_count,
899  hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag], err_reg);
900 
901  /* Clear error register and interrupt bit */
902  clear_serror();
903  clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR);
904 
905  /* This is the only error happening now. TODO check for exact error */
906 
907  err_mask |= AC_ERR_HOST_BUS;
908  action |= ATA_EH_RESET;
909 
910  /* Pass this on to EH */
911  ehi->serror |= serror;
912  ehi->action |= action;
913 
914  qc = ata_qc_from_tag(ap, tag);
915  if (qc)
916  qc->err_mask |= err_mask;
917  else
918  ehi->err_mask |= err_mask;
919 
920  ata_port_abort(ap);
921 }
922 
923 /*
924  * Function : sata_dwc_isr
925  * arguments : irq, void *dev_instance, struct pt_regs *regs
926  * Return value : irqreturn_t - status of IRQ
927  * This Interrupt handler called via port ops registered function.
928  * .irq_handler = sata_dwc_isr
929  */
930 static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
931 {
932  struct ata_host *host = (struct ata_host *)dev_instance;
933  struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host);
934  struct ata_port *ap;
935  struct ata_queued_cmd *qc;
936  unsigned long flags;
937  u8 status, tag;
938  int handled, num_processed, port = 0;
939  uint intpr, sactive, sactive2, tag_mask;
940  struct sata_dwc_device_port *hsdevp;
941  host_pvt.sata_dwc_sactive_issued = 0;
942 
943  spin_lock_irqsave(&host->lock, flags);
944 
945  /* Read the interrupt register */
946  intpr = in_le32(&hsdev->sata_dwc_regs->intpr);
947 
948  ap = host->ports[port];
949  hsdevp = HSDEVP_FROM_AP(ap);
950 
951  dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr,
952  ap->link.active_tag);
953 
954  /* Check for error interrupt */
955  if (intpr & SATA_DWC_INTPR_ERR) {
956  sata_dwc_error_intr(ap, hsdev, intpr);
957  handled = 1;
958  goto DONE;
959  }
960 
961  /* Check for DMA SETUP FIS (FP DMA) interrupt */
962  if (intpr & SATA_DWC_INTPR_NEWFP) {
963  clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP);
964 
965  tag = (u8)(in_le32(&hsdev->sata_dwc_regs->fptagr));
966  dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag);
967  if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND)
968  dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag);
969 
970  host_pvt.sata_dwc_sactive_issued |= qcmd_tag_to_mask(tag);
971 
972  qc = ata_qc_from_tag(ap, tag);
973  /*
974  * Start FP DMA for NCQ command. At this point the tag is the
975  * active tag. It is the tag that matches the command about to
976  * be completed.
977  */
978  qc->ap->link.active_tag = tag;
979  sata_dwc_bmdma_start_by_tag(qc, tag);
980 
981  handled = 1;
982  goto DONE;
983  }
984  sactive = core_scr_read(SCR_ACTIVE);
985  tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive;
986 
987  /* If no sactive issued and tag_mask is zero then this is not NCQ */
988  if (host_pvt.sata_dwc_sactive_issued == 0 && tag_mask == 0) {
989  if (ap->link.active_tag == ATA_TAG_POISON)
990  tag = 0;
991  else
992  tag = ap->link.active_tag;
993  qc = ata_qc_from_tag(ap, tag);
994 
995  /* DEV interrupt w/ no active qc? */
996  if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
997  dev_err(ap->dev, "%s interrupt with no active qc "
998  "qc=%p\n", __func__, qc);
999  ap->ops->sff_check_status(ap);
1000  handled = 1;
1001  goto DONE;
1002  }
1003  status = ap->ops->sff_check_status(ap);
1004 
1005  qc->ap->link.active_tag = tag;
1007 
1008  if (status & ATA_ERR) {
1009  dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status);
1010  sata_dwc_qc_complete(ap, qc, 1);
1011  handled = 1;
1012  goto DONE;
1013  }
1014 
1015  dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n",
1016  __func__, get_prot_descript(qc->tf.protocol));
1017 DRVSTILLBUSY:
1018  if (ata_is_dma(qc->tf.protocol)) {
1019  /*
1020  * Each DMA transaction produces 2 interrupts. The DMAC
1021  * transfer complete interrupt and the SATA controller
1022  * operation done interrupt. The command should be
1023  * completed only after both interrupts are seen.
1024  */
1025  host_pvt.dma_interrupt_count++;
1026  if (hsdevp->dma_pending[tag] == \
1028  dev_err(ap->dev, "%s: DMA not pending "
1029  "intpr=0x%08x status=0x%08x pending"
1030  "=%d\n", __func__, intpr, status,
1031  hsdevp->dma_pending[tag]);
1032  }
1033 
1034  if ((host_pvt.dma_interrupt_count % 2) == 0)
1035  sata_dwc_dma_xfer_complete(ap, 1);
1036  } else if (ata_is_pio(qc->tf.protocol)) {
1037  ata_sff_hsm_move(ap, qc, status, 0);
1038  handled = 1;
1039  goto DONE;
1040  } else {
1041  if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
1042  goto DRVSTILLBUSY;
1043  }
1044 
1045  handled = 1;
1046  goto DONE;
1047  }
1048 
1049  /*
1050  * This is a NCQ command. At this point we need to figure out for which
1051  * tags we have gotten a completion interrupt. One interrupt may serve
1052  * as completion for more than one operation when commands are queued
1053  * (NCQ). We need to process each completed command.
1054  */
1055 
1056  /* process completed commands */
1057  sactive = core_scr_read(SCR_ACTIVE);
1058  tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive;
1059 
1060  if (sactive != 0 || (host_pvt.sata_dwc_sactive_issued) > 1 || \
1061  tag_mask > 1) {
1062  dev_dbg(ap->dev, "%s NCQ:sactive=0x%08x sactive_issued=0x%08x"
1063  "tag_mask=0x%08x\n", __func__, sactive,
1064  host_pvt.sata_dwc_sactive_issued, tag_mask);
1065  }
1066 
1067  if ((tag_mask | (host_pvt.sata_dwc_sactive_issued)) != \
1068  (host_pvt.sata_dwc_sactive_issued)) {
1069  dev_warn(ap->dev, "Bad tag mask? sactive=0x%08x "
1070  "(host_pvt.sata_dwc_sactive_issued)=0x%08x tag_mask"
1071  "=0x%08x\n", sactive, host_pvt.sata_dwc_sactive_issued,
1072  tag_mask);
1073  }
1074 
1075  /* read just to clear ... not bad if currently still busy */
1076  status = ap->ops->sff_check_status(ap);
1077  dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status);
1078 
1079  tag = 0;
1080  num_processed = 0;
1081  while (tag_mask) {
1082  num_processed++;
1083  while (!(tag_mask & 0x00000001)) {
1084  tag++;
1085  tag_mask <<= 1;
1086  }
1087 
1088  tag_mask &= (~0x00000001);
1089  qc = ata_qc_from_tag(ap, tag);
1090 
1091  /* To be picked up by completion functions */
1092  qc->ap->link.active_tag = tag;
1094 
1095  /* Let libata/scsi layers handle error */
1096  if (status & ATA_ERR) {
1097  dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__,
1098  status);
1099  sata_dwc_qc_complete(ap, qc, 1);
1100  handled = 1;
1101  goto DONE;
1102  }
1103 
1104  /* Process completed command */
1105  dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__,
1106  get_prot_descript(qc->tf.protocol));
1107  if (ata_is_dma(qc->tf.protocol)) {
1108  host_pvt.dma_interrupt_count++;
1109  if (hsdevp->dma_pending[tag] == \
1111  dev_warn(ap->dev, "%s: DMA not pending?\n",
1112  __func__);
1113  if ((host_pvt.dma_interrupt_count % 2) == 0)
1114  sata_dwc_dma_xfer_complete(ap, 1);
1115  } else {
1116  if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
1117  goto STILLBUSY;
1118  }
1119  continue;
1120 
1121 STILLBUSY:
1122  ap->stats.idle_irq++;
1123  dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n",
1124  ap->print_id);
1125  } /* while tag_mask */
1126 
1127  /*
1128  * Check to see if any commands completed while we were processing our
1129  * initial set of completed commands (read status clears interrupts,
1130  * so we might miss a completed command interrupt if one came in while
1131  * we were processing --we read status as part of processing a completed
1132  * command).
1133  */
1134  sactive2 = core_scr_read(SCR_ACTIVE);
1135  if (sactive2 != sactive) {
1136  dev_dbg(ap->dev, "More completed - sactive=0x%x sactive2"
1137  "=0x%x\n", sactive, sactive2);
1138  }
1139  handled = 1;
1140 
1141 DONE:
1142  spin_unlock_irqrestore(&host->lock, flags);
1143  return IRQ_RETVAL(handled);
1144 }
1145 
1146 static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
1147 {
1148  struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp);
1149 
1150  if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) {
1151  out_le32(&(hsdev->sata_dwc_regs->dmacr),
1153  in_le32(&(hsdev->sata_dwc_regs->dmacr))));
1154  } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) {
1155  out_le32(&(hsdev->sata_dwc_regs->dmacr),
1157  in_le32(&(hsdev->sata_dwc_regs->dmacr))));
1158  } else {
1159  /*
1160  * This should not happen, it indicates the driver is out of
1161  * sync. If it does happen, clear dmacr anyway.
1162  */
1163  dev_err(host_pvt.dwc_dev, "%s DMA protocol RX and"
1164  "TX DMA not pending tag=0x%02x pending=%d"
1165  " dmacr: 0x%08x\n", __func__, tag,
1166  hsdevp->dma_pending[tag],
1167  in_le32(&(hsdev->sata_dwc_regs->dmacr)));
1168  out_le32(&(hsdev->sata_dwc_regs->dmacr),
1170  }
1171 }
1172 
1173 static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
1174 {
1175  struct ata_queued_cmd *qc;
1176  struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1177  struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
1178  u8 tag = 0;
1179 
1180  tag = ap->link.active_tag;
1181  qc = ata_qc_from_tag(ap, tag);
1182  if (!qc) {
1183  dev_err(ap->dev, "failed to get qc");
1184  return;
1185  }
1186 
1187 #ifdef DEBUG_NCQ
1188  if (tag > 0) {
1189  dev_info(ap->dev, "%s tag=%u cmd=0x%02x dma dir=%s proto=%s "
1190  "dmacr=0x%08x\n", __func__, qc->tag, qc->tf.command,
1191  get_dma_dir_descript(qc->dma_dir),
1192  get_prot_descript(qc->tf.protocol),
1193  in_le32(&(hsdev->sata_dwc_regs->dmacr)));
1194  }
1195 #endif
1196 
1197  if (ata_is_dma(qc->tf.protocol)) {
1198  if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
1199  dev_err(ap->dev, "%s DMA protocol RX and TX DMA not "
1200  "pending dmacr: 0x%08x\n", __func__,
1201  in_le32(&(hsdev->sata_dwc_regs->dmacr)));
1202  }
1203 
1205  sata_dwc_qc_complete(ap, qc, check_status);
1206  ap->link.active_tag = ATA_TAG_POISON;
1207  } else {
1208  sata_dwc_qc_complete(ap, qc, check_status);
1209  }
1210 }
1211 
1212 static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
1213  u32 check_status)
1214 {
1215  u8 status = 0;
1216  u32 mask = 0x0;
1217  u8 tag = qc->tag;
1218  struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1219  host_pvt.sata_dwc_sactive_queued = 0;
1220  dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status);
1221 
1222  if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX)
1223  dev_err(ap->dev, "TX DMA PENDING\n");
1224  else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX)
1225  dev_err(ap->dev, "RX DMA PENDING\n");
1226  dev_dbg(ap->dev, "QC complete cmd=0x%02x status=0x%02x ata%u:"
1227  " protocol=%d\n", qc->tf.command, status, ap->print_id,
1228  qc->tf.protocol);
1229 
1230  /* clear active bit */
1231  mask = (~(qcmd_tag_to_mask(tag)));
1232  host_pvt.sata_dwc_sactive_queued = (host_pvt.sata_dwc_sactive_queued) \
1233  & mask;
1234  host_pvt.sata_dwc_sactive_issued = (host_pvt.sata_dwc_sactive_issued) \
1235  & mask;
1236  ata_qc_complete(qc);
1237  return 0;
1238 }
1239 
1240 static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
1241 {
1242  /* Enable selective interrupts by setting the interrupt maskregister*/
1243  out_le32(&hsdev->sata_dwc_regs->intmr,
1248  /*
1249  * Unmask the error bits that should trigger an error interrupt by
1250  * setting the error mask register.
1251  */
1253 
1254  dev_dbg(host_pvt.dwc_dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n",
1255  __func__, in_le32(&hsdev->sata_dwc_regs->intmr),
1256  in_le32(&hsdev->sata_dwc_regs->errmr));
1257 }
1258 
1259 static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base)
1260 {
1261  port->cmd_addr = (void *)base + 0x00;
1262  port->data_addr = (void *)base + 0x00;
1263 
1264  port->error_addr = (void *)base + 0x04;
1265  port->feature_addr = (void *)base + 0x04;
1266 
1267  port->nsect_addr = (void *)base + 0x08;
1268 
1269  port->lbal_addr = (void *)base + 0x0c;
1270  port->lbam_addr = (void *)base + 0x10;
1271  port->lbah_addr = (void *)base + 0x14;
1272 
1273  port->device_addr = (void *)base + 0x18;
1274  port->command_addr = (void *)base + 0x1c;
1275  port->status_addr = (void *)base + 0x1c;
1276 
1277  port->altstatus_addr = (void *)base + 0x20;
1278  port->ctl_addr = (void *)base + 0x20;
1279 }
1280 
1281 /*
1282  * Function : sata_dwc_port_start
1283  * arguments : struct ata_ioports *port
1284  * Return value : returns 0 if success, error code otherwise
1285  * This function allocates the scatter gather LLI table for AHB DMA
1286  */
1287 static int sata_dwc_port_start(struct ata_port *ap)
1288 {
1289  int err = 0;
1290  struct sata_dwc_device *hsdev;
1291  struct sata_dwc_device_port *hsdevp = NULL;
1292  struct device *pdev;
1293  int i;
1294 
1295  hsdev = HSDEV_FROM_AP(ap);
1296 
1297  dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no);
1298 
1299  hsdev->host = ap->host;
1300  pdev = ap->host->dev;
1301  if (!pdev) {
1302  dev_err(ap->dev, "%s: no ap->host->dev\n", __func__);
1303  err = -ENODEV;
1304  goto CLEANUP;
1305  }
1306 
1307  /* Allocate Port Struct */
1308  hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL);
1309  if (!hsdevp) {
1310  dev_err(ap->dev, "%s: kmalloc failed for hsdevp\n", __func__);
1311  err = -ENOMEM;
1312  goto CLEANUP;
1313  }
1314  hsdevp->hsdev = hsdev;
1315 
1316  for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
1317  hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
1318 
1319  ap->bmdma_prd = 0; /* set these so libata doesn't use them */
1320  ap->bmdma_prd_dma = 0;
1321 
1322  /*
1323  * DMA - Assign scatter gather LLI table. We can't use the libata
1324  * version since it's PRD is IDE PCI specific.
1325  */
1326  for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
1327  hsdevp->llit[i] = dma_alloc_coherent(pdev,
1329  &(hsdevp->llit_dma[i]),
1330  GFP_ATOMIC);
1331  if (!hsdevp->llit[i]) {
1332  dev_err(ap->dev, "%s: dma_alloc_coherent failed\n",
1333  __func__);
1334  err = -ENOMEM;
1335  goto CLEANUP_ALLOC;
1336  }
1337  }
1338 
1339  if (ap->port_no == 0) {
1340  dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
1341  __func__);
1342  out_le32(&hsdev->sata_dwc_regs->dmacr,
1344 
1345  dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n",
1346  __func__);
1347  out_le32(&hsdev->sata_dwc_regs->dbtsr,
1350  }
1351 
1352  /* Clear any error bits before libata starts issuing commands */
1353  clear_serror();
1354  ap->private_data = hsdevp;
1355  dev_dbg(ap->dev, "%s: done\n", __func__);
1356  return 0;
1357 
1358 CLEANUP_ALLOC:
1359  kfree(hsdevp);
1360 CLEANUP:
1361  dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id);
1362  return err;
1363 }
1364 
1365 static void sata_dwc_port_stop(struct ata_port *ap)
1366 {
1367  int i;
1368  struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
1369  struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1370 
1371  dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id);
1372 
1373  if (hsdevp && hsdev) {
1374  /* deallocate LLI table */
1375  for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
1376  dma_free_coherent(ap->host->dev,
1378  hsdevp->llit[i], hsdevp->llit_dma[i]);
1379  }
1380 
1381  kfree(hsdevp);
1382  }
1383  ap->private_data = NULL;
1384 }
1385 
1386 /*
1387  * Function : sata_dwc_exec_command_by_tag
1388  * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued
1389  * Return value : None
1390  * This function keeps track of individual command tag ids and calls
1391  * ata_exec_command in libata
1392  */
1393 static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
1394  struct ata_taskfile *tf,
1395  u8 tag, u32 cmd_issued)
1396 {
1397  unsigned long flags;
1398  struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1399 
1400  dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command,
1401  ata_get_cmd_descript(tf->command), tag);
1402 
1403  spin_lock_irqsave(&ap->host->lock, flags);
1404  hsdevp->cmd_issued[tag] = cmd_issued;
1405  spin_unlock_irqrestore(&ap->host->lock, flags);
1406  /*
1407  * Clear SError before executing a new command.
1408  * sata_dwc_scr_write and read can not be used here. Clearing the PM
1409  * managed SError register for the disk needs to be done before the
1410  * task file is loaded.
1411  */
1412  clear_serror();
1413  ata_sff_exec_command(ap, tf);
1414 }
1415 
1416 static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag)
1417 {
1418  sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag,
1420 }
1421 
1422 static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc)
1423 {
1424  u8 tag = qc->tag;
1425 
1426  if (ata_is_ncq(qc->tf.protocol)) {
1427  dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
1428  __func__, qc->ap->link.sactive, tag);
1429  } else {
1430  tag = 0;
1431  }
1432  sata_dwc_bmdma_setup_by_tag(qc, tag);
1433 }
1434 
1435 static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
1436 {
1437  int start_dma;
1438  u32 reg, dma_chan;
1439  struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc);
1440  struct ata_port *ap = qc->ap;
1441  struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1442  int dir = qc->dma_dir;
1443  dma_chan = hsdevp->dma_chan[tag];
1444 
1445  if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) {
1446  start_dma = 1;
1447  if (dir == DMA_TO_DEVICE)
1449  else
1451  } else {
1452  dev_err(ap->dev, "%s: Command not pending cmd_issued=%d "
1453  "(tag=%d) DMA NOT started\n", __func__,
1454  hsdevp->cmd_issued[tag], tag);
1455  start_dma = 0;
1456  }
1457 
1458  dev_dbg(ap->dev, "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s "
1459  "start_dma? %x\n", __func__, qc, tag, qc->tf.command,
1460  get_dma_dir_descript(qc->dma_dir), start_dma);
1461  sata_dwc_tf_dump(&(qc->tf));
1462 
1463  if (start_dma) {
1464  reg = core_scr_read(SCR_ERROR);
1465  if (reg & SATA_DWC_SERROR_ERR_BITS) {
1466  dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n",
1467  __func__, reg);
1468  }
1469 
1470  if (dir == DMA_TO_DEVICE)
1471  out_le32(&hsdev->sata_dwc_regs->dmacr,
1473  else
1474  out_le32(&hsdev->sata_dwc_regs->dmacr,
1476 
1477  /* Enable AHB DMA transfer on the specified channel */
1478  dma_dwc_xfer_start(dma_chan);
1479  }
1480 }
1481 
1482 static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
1483 {
1484  u8 tag = qc->tag;
1485 
1486  if (ata_is_ncq(qc->tf.protocol)) {
1487  dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
1488  __func__, qc->ap->link.sactive, tag);
1489  } else {
1490  tag = 0;
1491  }
1492  dev_dbg(qc->ap->dev, "%s\n", __func__);
1493  sata_dwc_bmdma_start_by_tag(qc, tag);
1494 }
1495 
1496 /*
1497  * Function : sata_dwc_qc_prep_by_tag
1498  * arguments : ata_queued_cmd *qc, u8 tag
1499  * Return value : None
1500  * qc_prep for a particular queued command based on tag
1501  */
1502 static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
1503 {
1504  struct scatterlist *sg = qc->sg;
1505  struct ata_port *ap = qc->ap;
1506  int dma_chan;
1507  struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
1508  struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1509 
1510  dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n",
1511  __func__, ap->port_no, get_dma_dir_descript(qc->dma_dir),
1512  qc->n_elem);
1513 
1514  dma_chan = dma_dwc_xfer_setup(sg, qc->n_elem, hsdevp->llit[tag],
1515  hsdevp->llit_dma[tag],
1516  (void *__iomem)(&hsdev->sata_dwc_regs->\ dmadr), qc->dma_dir);
1517  if (dma_chan < 0) {
1518  dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n",
1519  __func__, dma_chan);
1520  return;
1521  }
1522  hsdevp->dma_chan[tag] = dma_chan;
1523 }
1524 
1525 static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
1526 {
1527  u32 sactive;
1528  u8 tag = qc->tag;
1529  struct ata_port *ap = qc->ap;
1530 
1531 #ifdef DEBUG_NCQ
1532  if (qc->tag > 0 || ap->link.sactive > 1)
1533  dev_info(ap->dev, "%s ap id=%d cmd(0x%02x)=%s qc tag=%d "
1534  "prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n",
1535  __func__, ap->print_id, qc->tf.command,
1536  ata_get_cmd_descript(qc->tf.command),
1537  qc->tag, get_prot_descript(qc->tf.protocol),
1538  ap->link.active_tag, ap->link.sactive);
1539 #endif
1540 
1541  if (!ata_is_ncq(qc->tf.protocol))
1542  tag = 0;
1543  sata_dwc_qc_prep_by_tag(qc, tag);
1544 
1545  if (ata_is_ncq(qc->tf.protocol)) {
1546  sactive = core_scr_read(SCR_ACTIVE);
1547  sactive |= (0x00000001 << tag);
1548  core_scr_write(SCR_ACTIVE, sactive);
1549 
1550  dev_dbg(qc->ap->dev, "%s: tag=%d ap->link.sactive = 0x%08x "
1551  "sactive=0x%08x\n", __func__, tag, qc->ap->link.sactive,
1552  sactive);
1553 
1554  ap->ops->sff_tf_load(ap, &qc->tf);
1555  sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag,
1557  } else {
1558  ata_sff_qc_issue(qc);
1559  }
1560  return 0;
1561 }
1562 
1563 /*
1564  * Function : sata_dwc_qc_prep
1565  * arguments : ata_queued_cmd *qc
1566  * Return value : None
1567  * qc_prep for a particular queued command
1568  */
1569 
1570 static void sata_dwc_qc_prep(struct ata_queued_cmd *qc)
1571 {
1572  if ((qc->dma_dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO))
1573  return;
1574 
1575 #ifdef DEBUG_NCQ
1576  if (qc->tag > 0)
1577  dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n",
1578  __func__, qc->tag, qc->ap->link.active_tag);
1579 
1580  return ;
1581 #endif
1582 }
1583 
1584 static void sata_dwc_error_handler(struct ata_port *ap)
1585 {
1587 }
1588 
1589 int sata_dwc_hardreset(struct ata_link *link, unsigned int *class,
1590  unsigned long deadline)
1591 {
1592  struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap);
1593  int ret;
1594 
1595  ret = sata_sff_hardreset(link, class, deadline);
1596 
1597  sata_dwc_enable_interrupts(hsdev);
1598 
1599  /* Reconfigure the DMA control register */
1600  out_le32(&hsdev->sata_dwc_regs->dmacr,
1602 
1603  /* Reconfigure the DMA Burst Transaction Size register */
1604  out_le32(&hsdev->sata_dwc_regs->dbtsr,
1607 
1608  return ret;
1609 }
1610 
1611 /*
1612  * scsi mid-layer and libata interface structures
1613  */
1614 static struct scsi_host_template sata_dwc_sht = {
1616  /*
1617  * test-only: Currently this driver doesn't handle NCQ
1618  * correctly. We enable NCQ but set the queue depth to a
1619  * max of 1. This will get fixed in in a future release.
1620  */
1621  .sg_tablesize = LIBATA_MAX_PRD,
1622  .can_queue = ATA_DEF_QUEUE, /* ATA_MAX_QUEUE */
1623  .dma_boundary = ATA_DMA_BOUNDARY,
1624 };
1625 
1626 static struct ata_port_operations sata_dwc_ops = {
1627  .inherits = &ata_sff_port_ops,
1628 
1629  .error_handler = sata_dwc_error_handler,
1630  .hardreset = sata_dwc_hardreset,
1631 
1632  .qc_prep = sata_dwc_qc_prep,
1633  .qc_issue = sata_dwc_qc_issue,
1634 
1635  .scr_read = sata_dwc_scr_read,
1636  .scr_write = sata_dwc_scr_write,
1637 
1638  .port_start = sata_dwc_port_start,
1639  .port_stop = sata_dwc_port_stop,
1640 
1641  .bmdma_setup = sata_dwc_bmdma_setup,
1642  .bmdma_start = sata_dwc_bmdma_start,
1643 };
1644 
1645 static const struct ata_port_info sata_dwc_port_info[] = {
1646  {
1647  .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
1648  .pio_mask = ATA_PIO4,
1649  .udma_mask = ATA_UDMA6,
1650  .port_ops = &sata_dwc_ops,
1651  },
1652 };
1653 
1654 static int sata_dwc_probe(struct platform_device *ofdev)
1655 {
1656  struct sata_dwc_device *hsdev;
1657  u32 idr, versionr;
1658  char *ver = (char *)&versionr;
1659  u8 *base = NULL;
1660  int err = 0;
1661  int irq, rc;
1662  struct ata_host *host;
1663  struct ata_port_info pi = sata_dwc_port_info[0];
1664  const struct ata_port_info *ppi[] = { &pi, NULL };
1665  struct device_node *np = ofdev->dev.of_node;
1666  u32 dma_chan;
1667 
1668  /* Allocate DWC SATA device */
1669  hsdev = kzalloc(sizeof(*hsdev), GFP_KERNEL);
1670  if (hsdev == NULL) {
1671  dev_err(&ofdev->dev, "kmalloc failed for hsdev\n");
1672  err = -ENOMEM;
1673  goto error;
1674  }
1675 
1676  if (of_property_read_u32(np, "dma-channel", &dma_chan)) {
1677  dev_warn(&ofdev->dev, "no dma-channel property set."
1678  " Use channel 0\n");
1679  dma_chan = 0;
1680  }
1681  host_pvt.dma_channel = dma_chan;
1682 
1683  /* Ioremap SATA registers */
1684  base = of_iomap(ofdev->dev.of_node, 0);
1685  if (!base) {
1686  dev_err(&ofdev->dev, "ioremap failed for SATA register"
1687  " address\n");
1688  err = -ENODEV;
1689  goto error_kmalloc;
1690  }
1691  hsdev->reg_base = base;
1692  dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n");
1693 
1694  /* Synopsys DWC SATA specific Registers */
1695  hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET);
1696 
1697  /* Allocate and fill host */
1698  host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
1699  if (!host) {
1700  dev_err(&ofdev->dev, "ata_host_alloc_pinfo failed\n");
1701  err = -ENOMEM;
1702  goto error_iomap;
1703  }
1704 
1705  host->private_data = hsdev;
1706 
1707  /* Setup port */
1708  host->ports[0]->ioaddr.cmd_addr = base;
1709  host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
1710  host_pvt.scr_addr_sstatus = base + SATA_DWC_SCR_OFFSET;
1711  sata_dwc_setup_port(&host->ports[0]->ioaddr, (unsigned long)base);
1712 
1713  /* Read the ID and Version Registers */
1714  idr = in_le32(&hsdev->sata_dwc_regs->idr);
1715  versionr = in_le32(&hsdev->sata_dwc_regs->versionr);
1716  dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n",
1717  idr, ver[0], ver[1], ver[2]);
1718 
1719  /* Get SATA DMA interrupt number */
1720  irq = irq_of_parse_and_map(ofdev->dev.of_node, 1);
1721  if (irq == NO_IRQ) {
1722  dev_err(&ofdev->dev, "no SATA DMA irq\n");
1723  err = -ENODEV;
1724  goto error_out;
1725  }
1726 
1727  /* Get physical SATA DMA register base address */
1728  host_pvt.sata_dma_regs = of_iomap(ofdev->dev.of_node, 1);
1729  if (!(host_pvt.sata_dma_regs)) {
1730  dev_err(&ofdev->dev, "ioremap failed for AHBDMA register"
1731  " address\n");
1732  err = -ENODEV;
1733  goto error_out;
1734  }
1735 
1736  /* Save dev for later use in dev_xxx() routines */
1737  host_pvt.dwc_dev = &ofdev->dev;
1738 
1739  /* Initialize AHB DMAC */
1740  dma_dwc_init(hsdev, irq);
1741 
1742  /* Enable SATA Interrupts */
1743  sata_dwc_enable_interrupts(hsdev);
1744 
1745  /* Get SATA interrupt number */
1746  irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
1747  if (irq == NO_IRQ) {
1748  dev_err(&ofdev->dev, "no SATA DMA irq\n");
1749  err = -ENODEV;
1750  goto error_out;
1751  }
1752 
1753  /*
1754  * Now, register with libATA core, this will also initiate the
1755  * device discovery process, invoking our port_start() handler &
1756  * error_handler() to execute a dummy Softreset EH session
1757  */
1758  rc = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
1759 
1760  if (rc != 0)
1761  dev_err(&ofdev->dev, "failed to activate host");
1762 
1763  dev_set_drvdata(&ofdev->dev, host);
1764  return 0;
1765 
1766 error_out:
1767  /* Free SATA DMA resources */
1768  dma_dwc_exit(hsdev);
1769 
1770 error_iomap:
1771  iounmap(base);
1772 error_kmalloc:
1773  kfree(hsdev);
1774 error:
1775  return err;
1776 }
1777 
1778 static int sata_dwc_remove(struct platform_device *ofdev)
1779 {
1780  struct device *dev = &ofdev->dev;
1781  struct ata_host *host = dev_get_drvdata(dev);
1782  struct sata_dwc_device *hsdev = host->private_data;
1783 
1784  ata_host_detach(host);
1785  dev_set_drvdata(dev, NULL);
1786 
1787  /* Free SATA DMA resources */
1788  dma_dwc_exit(hsdev);
1789 
1790  iounmap(hsdev->reg_base);
1791  kfree(hsdev);
1792  kfree(host);
1793  dev_dbg(&ofdev->dev, "done\n");
1794  return 0;
1795 }
1796 
1797 static const struct of_device_id sata_dwc_match[] = {
1798  { .compatible = "amcc,sata-460ex", },
1799  {}
1800 };
1801 MODULE_DEVICE_TABLE(of, sata_dwc_match);
1802 
1803 static struct platform_driver sata_dwc_driver = {
1804  .driver = {
1805  .name = DRV_NAME,
1806  .owner = THIS_MODULE,
1807  .of_match_table = sata_dwc_match,
1808  },
1809  .probe = sata_dwc_probe,
1810  .remove = sata_dwc_remove,
1811 };
1812 
1813 module_platform_driver(sata_dwc_driver);
1814 
1815 MODULE_LICENSE("GPL");
1816 MODULE_AUTHOR("Mark Miesfeld <[email protected]>");
1817 MODULE_DESCRIPTION("DesignWare Cores SATA controller low lever driver");
1819