Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
sata_nv.c
Go to the documentation of this file.
1 /*
2  * sata_nv.c - NVIDIA nForce SATA
3  *
4  * Copyright 2004 NVIDIA Corp. All rights reserved.
5  * Copyright 2004 Andrew Chew
6  *
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2, or (at your option)
11  * any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; see the file COPYING. If not, write to
20  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  *
23  * libata documentation is available via 'make {ps|pdf}docs',
24  * as Documentation/DocBook/libata.*
25  *
26  * No hardware documentation available outside of NVIDIA.
27  * This driver programs the NVIDIA SATA controller in a similar
28  * fashion as with other PCI IDE BMDMA controllers, with a few
29  * NV-specific details such as register offsets, SATA phy location,
30  * hotplug info, etc.
31  *
32  * CK804/MCP04 controllers support an alternate programming interface
33  * similar to the ADMA specification (with some modifications).
34  * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35  * sent through the legacy interface.
36  *
37  */
38 
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/gfp.h>
42 #include <linux/pci.h>
43 #include <linux/init.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/interrupt.h>
47 #include <linux/device.h>
48 #include <scsi/scsi_host.h>
49 #include <scsi/scsi_device.h>
50 #include <linux/libata.h>
51 
52 #define DRV_NAME "sata_nv"
53 #define DRV_VERSION "3.5"
54 
55 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
56 
57 enum {
59 
60  NV_PORTS = 2,
66 
67  /* INT_STATUS/ENABLE */
68  NV_INT_STATUS = 0x10,
69  NV_INT_ENABLE = 0x11,
72 
73  /* INT_STATUS/ENABLE bits */
74  NV_INT_DEV = 0x01,
75  NV_INT_PM = 0x02,
76  NV_INT_ADDED = 0x04,
78 
79  NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
80 
81  NV_INT_ALL = 0x0f,
84 
85  /* INT_CONFIG */
86  NV_INT_CONFIG = 0x12,
87  NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
88 
89  // For PCI config register 20
96 
106 
107  /* BAR5 offset to ADMA general registers */
108  NV_ADMA_GEN = 0x400,
111 
112  /* BAR5 offset to ADMA ports */
113  NV_ADMA_PORT = 0x480,
114 
115  /* size of ADMA port register space */
117 
118  /* ADMA port registers */
119  NV_ADMA_CTL = 0x40,
122  NV_ADMA_STAT = 0x44,
128 
129  /* NV_ADMA_CTL register bits */
132  NV_ADMA_CTL_GO = (1 << 7),
133  NV_ADMA_CTL_AIEN = (1 << 8),
136 
137  /* CPB response flag bits */
138  NV_CPB_RESP_DONE = (1 << 0),
142 
143  /* CPB control flag bits */
145  NV_CPB_CTL_QUEUE = (1 << 1),
147  NV_CPB_CTL_IEN = (1 << 3),
148  NV_CPB_CTL_FPDMA = (1 << 4),
149 
150  /* APRD flags */
151  NV_APRD_WRITE = (1 << 1),
152  NV_APRD_END = (1 << 2),
153  NV_APRD_CONT = (1 << 3),
154 
155  /* NV_ADMA_STAT flags */
162  NV_ADMA_STAT_IDLE = (1 << 8),
164  NV_ADMA_STAT_STOPPED = (1 << 10),
165  NV_ADMA_STAT_DONE = (1 << 12),
168 
169  /* port flags */
172 
173  /* MCP55 reg offset */
174  NV_CTL_MCP55 = 0x400,
178 
179  /* MCP55 */
181  NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
183 
184  /* SWNCQ ENABLE BITS*/
187 
188  /* SW NCQ status bits*/
189  NV_SWNCQ_IRQ_DEV = (1 << 0),
190  NV_SWNCQ_IRQ_PM = (1 << 1),
191  NV_SWNCQ_IRQ_ADDED = (1 << 2),
193 
198 
201 
202 };
203 
204 /* ADMA Physical Region Descriptor - one SG segment */
205 struct nv_adma_prd {
211 };
212 
214  CMDEND = (1 << 15), /* end of command list */
215  WNB = (1 << 14), /* wait-not-BSY */
216  IGN = (1 << 13), /* ignore this entry */
217  CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
218  DA2 = (1 << (2 + 8)),
219  DA1 = (1 << (1 + 8)),
220  DA0 = (1 << (0 + 8)),
221 };
222 
223 /* ADMA Command Parameter Block
224  The first 5 SG segments are stored inside the Command Parameter Block itself.
225  If there are more than 5 segments the remainder are stored in a separate
226  memory area indicated by next_aprd. */
227 struct nv_adma_cpb {
228  u8 resp_flags; /* 0 */
229  u8 reserved1; /* 1 */
230  u8 ctl_flags; /* 2 */
231  /* len is length of taskfile in 64 bit words */
232  u8 len; /* 3 */
233  u8 tag; /* 4 */
234  u8 next_cpb_idx; /* 5 */
235  __le16 reserved2; /* 6-7 */
236  __le16 tf[12]; /* 8-31 */
237  struct nv_adma_prd aprd[5]; /* 32-111 */
238  __le64 next_aprd; /* 112-119 */
239  __le64 reserved3; /* 120-127 */
240 };
241 
242 
244  struct nv_adma_cpb *cpb;
246  struct nv_adma_prd *aprd;
254 };
255 
256 struct nv_host_priv {
257  unsigned long type;
258 };
259 
260 struct defer_queue {
262  unsigned int head;
263  unsigned int tail;
264  unsigned int tag[ATA_MAX_QUEUE];
265 };
266 
268  ncq_saw_d2h = (1U << 0),
269  ncq_saw_dmas = (1U << 1),
270  ncq_saw_sdb = (1U << 2),
271  ncq_saw_backout = (1U << 3),
272 };
273 
275  struct ata_bmdma_prd *prd; /* our SG list */
276  dma_addr_t prd_dma; /* and its DMA mapping */
281 
282  unsigned int last_issue_tag;
283 
284  /* fifo circular queue to store deferral command */
286 
287  /* for NCQ interrupt analysis */
291 
292  unsigned int ncq_flags;
293 };
294 
295 
296 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
297 
298 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
299 #ifdef CONFIG_PM
300 static int nv_pci_device_resume(struct pci_dev *pdev);
301 #endif
302 static void nv_ck804_host_stop(struct ata_host *host);
303 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
304 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
305 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
306 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
307 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
308 
309 static int nv_hardreset(struct ata_link *link, unsigned int *class,
310  unsigned long deadline);
311 static void nv_nf2_freeze(struct ata_port *ap);
312 static void nv_nf2_thaw(struct ata_port *ap);
313 static void nv_ck804_freeze(struct ata_port *ap);
314 static void nv_ck804_thaw(struct ata_port *ap);
315 static int nv_adma_slave_config(struct scsi_device *sdev);
316 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
317 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
318 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
319 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
320 static void nv_adma_irq_clear(struct ata_port *ap);
321 static int nv_adma_port_start(struct ata_port *ap);
322 static void nv_adma_port_stop(struct ata_port *ap);
323 #ifdef CONFIG_PM
324 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
325 static int nv_adma_port_resume(struct ata_port *ap);
326 #endif
327 static void nv_adma_freeze(struct ata_port *ap);
328 static void nv_adma_thaw(struct ata_port *ap);
329 static void nv_adma_error_handler(struct ata_port *ap);
330 static void nv_adma_host_stop(struct ata_host *host);
331 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
332 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
333 
334 static void nv_mcp55_thaw(struct ata_port *ap);
335 static void nv_mcp55_freeze(struct ata_port *ap);
336 static void nv_swncq_error_handler(struct ata_port *ap);
337 static int nv_swncq_slave_config(struct scsi_device *sdev);
338 static int nv_swncq_port_start(struct ata_port *ap);
339 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
340 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
341 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
342 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
343 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
344 #ifdef CONFIG_PM
345 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
346 static int nv_swncq_port_resume(struct ata_port *ap);
347 #endif
348 
350 {
353  NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
358 };
359 
360 static const struct pci_device_id nv_pci_tbl[] = {
375 
376  { } /* terminate list */
377 };
378 
379 static struct pci_driver nv_pci_driver = {
380  .name = DRV_NAME,
381  .id_table = nv_pci_tbl,
382  .probe = nv_init_one,
383 #ifdef CONFIG_PM
384  .suspend = ata_pci_device_suspend,
385  .resume = nv_pci_device_resume,
386 #endif
387  .remove = ata_pci_remove_one,
388 };
389 
390 static struct scsi_host_template nv_sht = {
391  ATA_BMDMA_SHT(DRV_NAME),
392 };
393 
394 static struct scsi_host_template nv_adma_sht = {
396  .can_queue = NV_ADMA_MAX_CPBS,
397  .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
398  .dma_boundary = NV_ADMA_DMA_BOUNDARY,
399  .slave_configure = nv_adma_slave_config,
400 };
401 
402 static struct scsi_host_template nv_swncq_sht = {
404  .can_queue = ATA_MAX_QUEUE,
405  .sg_tablesize = LIBATA_MAX_PRD,
406  .dma_boundary = ATA_DMA_BOUNDARY,
407  .slave_configure = nv_swncq_slave_config,
408 };
409 
410 /*
411  * NV SATA controllers have various different problems with hardreset
412  * protocol depending on the specific controller and device.
413  *
414  * GENERIC:
415  *
416  * bko11195 reports that link doesn't come online after hardreset on
417  * generic nv's and there have been several other similar reports on
418  * linux-ide.
419  *
420  * bko12351#c23 reports that warmplug on MCP61 doesn't work with
421  * softreset.
422  *
423  * NF2/3:
424  *
425  * bko3352 reports nf2/3 controllers can't determine device signature
426  * reliably after hardreset. The following thread reports detection
427  * failure on cold boot with the standard debouncing timing.
428  *
429  * http://thread.gmane.org/gmane.linux.ide/34098
430  *
431  * bko12176 reports that hardreset fails to bring up the link during
432  * boot on nf2.
433  *
434  * CK804:
435  *
436  * For initial probing after boot and hot plugging, hardreset mostly
437  * works fine on CK804 but curiously, reprobing on the initial port
438  * by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
439  * FIS in somewhat undeterministic way.
440  *
441  * SWNCQ:
442  *
443  * bko12351 reports that when SWNCQ is enabled, for hotplug to work,
444  * hardreset should be used and hardreset can't report proper
445  * signature, which suggests that mcp5x is closer to nf2 as long as
446  * reset quirkiness is concerned.
447  *
448  * bko12703 reports that boot probing fails for intel SSD with
449  * hardreset. Link fails to come online. Softreset works fine.
450  *
451  * The failures are varied but the following patterns seem true for
452  * all flavors.
453  *
454  * - Softreset during boot always works.
455  *
456  * - Hardreset during boot sometimes fails to bring up the link on
457  * certain comibnations and device signature acquisition is
458  * unreliable.
459  *
460  * - Hardreset is often necessary after hotplug.
461  *
462  * So, preferring softreset for boot probing and error handling (as
463  * hardreset might bring down the link) but using hardreset for
464  * post-boot probing should work around the above issues in most
465  * cases. Define nv_hardreset() which only kicks in for post-boot
466  * probing and use it for all variants.
467  */
468 static struct ata_port_operations nv_generic_ops = {
469  .inherits = &ata_bmdma_port_ops,
470  .lost_interrupt = ATA_OP_NULL,
471  .scr_read = nv_scr_read,
472  .scr_write = nv_scr_write,
473  .hardreset = nv_hardreset,
474 };
475 
476 static struct ata_port_operations nv_nf2_ops = {
477  .inherits = &nv_generic_ops,
478  .freeze = nv_nf2_freeze,
479  .thaw = nv_nf2_thaw,
480 };
481 
482 static struct ata_port_operations nv_ck804_ops = {
483  .inherits = &nv_generic_ops,
484  .freeze = nv_ck804_freeze,
485  .thaw = nv_ck804_thaw,
486  .host_stop = nv_ck804_host_stop,
487 };
488 
489 static struct ata_port_operations nv_adma_ops = {
490  .inherits = &nv_ck804_ops,
491 
492  .check_atapi_dma = nv_adma_check_atapi_dma,
493  .sff_tf_read = nv_adma_tf_read,
494  .qc_defer = ata_std_qc_defer,
495  .qc_prep = nv_adma_qc_prep,
496  .qc_issue = nv_adma_qc_issue,
497  .sff_irq_clear = nv_adma_irq_clear,
498 
499  .freeze = nv_adma_freeze,
500  .thaw = nv_adma_thaw,
501  .error_handler = nv_adma_error_handler,
502  .post_internal_cmd = nv_adma_post_internal_cmd,
503 
504  .port_start = nv_adma_port_start,
505  .port_stop = nv_adma_port_stop,
506 #ifdef CONFIG_PM
507  .port_suspend = nv_adma_port_suspend,
508  .port_resume = nv_adma_port_resume,
509 #endif
510  .host_stop = nv_adma_host_stop,
511 };
512 
513 static struct ata_port_operations nv_swncq_ops = {
514  .inherits = &nv_generic_ops,
515 
517  .qc_prep = nv_swncq_qc_prep,
518  .qc_issue = nv_swncq_qc_issue,
519 
520  .freeze = nv_mcp55_freeze,
521  .thaw = nv_mcp55_thaw,
522  .error_handler = nv_swncq_error_handler,
523 
524 #ifdef CONFIG_PM
525  .port_suspend = nv_swncq_port_suspend,
526  .port_resume = nv_swncq_port_resume,
527 #endif
528  .port_start = nv_swncq_port_start,
529 };
530 
531 struct nv_pi_priv {
534 };
535 
536 #define NV_PI_PRIV(_irq_handler, _sht) \
537  &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
538 
539 static const struct ata_port_info nv_port_info[] = {
540  /* generic */
541  {
542  .flags = ATA_FLAG_SATA,
543  .pio_mask = NV_PIO_MASK,
544  .mwdma_mask = NV_MWDMA_MASK,
545  .udma_mask = NV_UDMA_MASK,
546  .port_ops = &nv_generic_ops,
547  .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
548  },
549  /* nforce2/3 */
550  {
551  .flags = ATA_FLAG_SATA,
552  .pio_mask = NV_PIO_MASK,
553  .mwdma_mask = NV_MWDMA_MASK,
554  .udma_mask = NV_UDMA_MASK,
555  .port_ops = &nv_nf2_ops,
556  .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
557  },
558  /* ck804 */
559  {
560  .flags = ATA_FLAG_SATA,
561  .pio_mask = NV_PIO_MASK,
562  .mwdma_mask = NV_MWDMA_MASK,
563  .udma_mask = NV_UDMA_MASK,
564  .port_ops = &nv_ck804_ops,
565  .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
566  },
567  /* ADMA */
568  {
569  .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
570  .pio_mask = NV_PIO_MASK,
571  .mwdma_mask = NV_MWDMA_MASK,
572  .udma_mask = NV_UDMA_MASK,
573  .port_ops = &nv_adma_ops,
574  .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
575  },
576  /* MCP5x */
577  {
578  .flags = ATA_FLAG_SATA,
579  .pio_mask = NV_PIO_MASK,
580  .mwdma_mask = NV_MWDMA_MASK,
581  .udma_mask = NV_UDMA_MASK,
582  .port_ops = &nv_generic_ops,
583  .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
584  },
585  /* SWNCQ */
586  {
587  .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
588  .pio_mask = NV_PIO_MASK,
589  .mwdma_mask = NV_MWDMA_MASK,
590  .udma_mask = NV_UDMA_MASK,
591  .port_ops = &nv_swncq_ops,
592  .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
593  },
594 };
595 
596 MODULE_AUTHOR("NVIDIA");
597 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
598 MODULE_LICENSE("GPL");
599 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
601 
602 static bool adma_enabled;
603 static bool swncq_enabled = 1;
604 static bool msi_enabled;
605 
606 static void nv_adma_register_mode(struct ata_port *ap)
607 {
608  struct nv_adma_port_priv *pp = ap->private_data;
609  void __iomem *mmio = pp->ctl_block;
610  u16 tmp, status;
611  int count = 0;
612 
614  return;
615 
616  status = readw(mmio + NV_ADMA_STAT);
617  while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
618  ndelay(50);
619  status = readw(mmio + NV_ADMA_STAT);
620  count++;
621  }
622  if (count == 20)
623  ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
624  status);
625 
626  tmp = readw(mmio + NV_ADMA_CTL);
627  writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
628 
629  count = 0;
630  status = readw(mmio + NV_ADMA_STAT);
631  while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
632  ndelay(50);
633  status = readw(mmio + NV_ADMA_STAT);
634  count++;
635  }
636  if (count == 20)
637  ata_port_warn(ap,
638  "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
639  status);
640 
642 }
643 
644 static void nv_adma_mode(struct ata_port *ap)
645 {
646  struct nv_adma_port_priv *pp = ap->private_data;
647  void __iomem *mmio = pp->ctl_block;
648  u16 tmp, status;
649  int count = 0;
650 
651  if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
652  return;
653 
655 
656  tmp = readw(mmio + NV_ADMA_CTL);
657  writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
658 
659  status = readw(mmio + NV_ADMA_STAT);
660  while (((status & NV_ADMA_STAT_LEGACY) ||
661  !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
662  ndelay(50);
663  status = readw(mmio + NV_ADMA_STAT);
664  count++;
665  }
666  if (count == 20)
667  ata_port_warn(ap,
668  "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
669  status);
670 
672 }
673 
674 static int nv_adma_slave_config(struct scsi_device *sdev)
675 {
676  struct ata_port *ap = ata_shost_to_port(sdev->host);
677  struct nv_adma_port_priv *pp = ap->private_data;
678  struct nv_adma_port_priv *port0, *port1;
679  struct scsi_device *sdev0, *sdev1;
680  struct pci_dev *pdev = to_pci_dev(ap->host->dev);
681  unsigned long segment_boundary, flags;
682  unsigned short sg_tablesize;
683  int rc;
684  int adma_enable;
685  u32 current_reg, new_reg, config_mask;
686 
687  rc = ata_scsi_slave_config(sdev);
688 
689  if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
690  /* Not a proper libata device, ignore */
691  return rc;
692 
693  spin_lock_irqsave(ap->lock, flags);
694 
695  if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
696  /*
697  * NVIDIA reports that ADMA mode does not support ATAPI commands.
698  * Therefore ATAPI commands are sent through the legacy interface.
699  * However, the legacy interface only supports 32-bit DMA.
700  * Restrict DMA parameters as required by the legacy interface
701  * when an ATAPI device is connected.
702  */
703  segment_boundary = ATA_DMA_BOUNDARY;
704  /* Subtract 1 since an extra entry may be needed for padding, see
705  libata-scsi.c */
706  sg_tablesize = LIBATA_MAX_PRD - 1;
707 
708  /* Since the legacy DMA engine is in use, we need to disable ADMA
709  on the port. */
710  adma_enable = 0;
711  nv_adma_register_mode(ap);
712  } else {
713  segment_boundary = NV_ADMA_DMA_BOUNDARY;
714  sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
715  adma_enable = 1;
716  }
717 
718  pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
719 
720  if (ap->port_no == 1)
721  config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
723  else
724  config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
726 
727  if (adma_enable) {
728  new_reg = current_reg | config_mask;
730  } else {
731  new_reg = current_reg & ~config_mask;
733  }
734 
735  if (current_reg != new_reg)
736  pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
737 
738  port0 = ap->host->ports[0]->private_data;
739  port1 = ap->host->ports[1]->private_data;
740  sdev0 = ap->host->ports[0]->link.device[0].sdev;
741  sdev1 = ap->host->ports[1]->link.device[0].sdev;
742  if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
743  (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
753  if (sdev0)
755  ATA_DMA_MASK);
756  if (sdev1)
758  ATA_DMA_MASK);
759 
760  pci_set_dma_mask(pdev, ATA_DMA_MASK);
761  } else {
763  pci_set_dma_mask(pdev, pp->adma_dma_mask);
764  if (sdev0)
766  pp->adma_dma_mask);
767  if (sdev1)
769  pp->adma_dma_mask);
770  }
771 
772  blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
773  blk_queue_max_segments(sdev->request_queue, sg_tablesize);
774  ata_port_info(ap,
775  "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
776  (unsigned long long)*ap->host->dev->dma_mask,
777  segment_boundary, sg_tablesize);
778 
779  spin_unlock_irqrestore(ap->lock, flags);
780 
781  return rc;
782 }
783 
784 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
785 {
786  struct nv_adma_port_priv *pp = qc->ap->private_data;
787  return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
788 }
789 
790 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
791 {
792  /* Other than when internal or pass-through commands are executed,
793  the only time this function will be called in ADMA mode will be
794  if a command fails. In the failure case we don't care about going
795  into register mode with ADMA commands pending, as the commands will
796  all shortly be aborted anyway. We assume that NCQ commands are not
797  issued via passthrough, which is the only way that switching into
798  ADMA mode could abort outstanding commands. */
799  nv_adma_register_mode(ap);
800 
801  ata_sff_tf_read(ap, tf);
802 }
803 
804 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
805 {
806  unsigned int idx = 0;
807 
808  if (tf->flags & ATA_TFLAG_ISADDR) {
809  if (tf->flags & ATA_TFLAG_LBA48) {
810  cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
811  cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
812  cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
813  cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
814  cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
815  cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
816  } else
817  cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
818 
819  cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
820  cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
821  cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
822  cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
823  }
824 
825  if (tf->flags & ATA_TFLAG_DEVICE)
826  cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
827 
828  cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
829 
830  while (idx < 12)
831  cpb[idx++] = cpu_to_le16(IGN);
832 
833  return idx;
834 }
835 
836 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
837 {
838  struct nv_adma_port_priv *pp = ap->private_data;
839  u8 flags = pp->cpb[cpb_num].resp_flags;
840 
841  VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
842 
843  if (unlikely((force_err ||
844  flags & (NV_CPB_RESP_ATA_ERR |
846  NV_CPB_RESP_CPB_ERR)))) {
847  struct ata_eh_info *ehi = &ap->link.eh_info;
848  int freeze = 0;
849 
850  ata_ehi_clear_desc(ehi);
851  __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
852  if (flags & NV_CPB_RESP_ATA_ERR) {
853  ata_ehi_push_desc(ehi, "ATA error");
854  ehi->err_mask |= AC_ERR_DEV;
855  } else if (flags & NV_CPB_RESP_CMD_ERR) {
856  ata_ehi_push_desc(ehi, "CMD error");
857  ehi->err_mask |= AC_ERR_DEV;
858  } else if (flags & NV_CPB_RESP_CPB_ERR) {
859  ata_ehi_push_desc(ehi, "CPB error");
860  ehi->err_mask |= AC_ERR_SYSTEM;
861  freeze = 1;
862  } else {
863  /* notifier error, but no error in CPB flags? */
864  ata_ehi_push_desc(ehi, "unknown");
865  ehi->err_mask |= AC_ERR_OTHER;
866  freeze = 1;
867  }
868  /* Kill all commands. EH will determine what actually failed. */
869  if (freeze)
870  ata_port_freeze(ap);
871  else
872  ata_port_abort(ap);
873  return -1;
874  }
875 
876  if (likely(flags & NV_CPB_RESP_DONE))
877  return 1;
878  return 0;
879 }
880 
881 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
882 {
883  struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
884 
885  /* freeze if hotplugged */
886  if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
887  ata_port_freeze(ap);
888  return 1;
889  }
890 
891  /* bail out if not our interrupt */
892  if (!(irq_stat & NV_INT_DEV))
893  return 0;
894 
895  /* DEV interrupt w/ no active qc? */
896  if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
898  return 1;
899  }
900 
901  /* handle interrupt */
902  return ata_bmdma_port_intr(ap, qc);
903 }
904 
905 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
906 {
907  struct ata_host *host = dev_instance;
908  int i, handled = 0;
909  u32 notifier_clears[2];
910 
911  spin_lock(&host->lock);
912 
913  for (i = 0; i < host->n_ports; i++) {
914  struct ata_port *ap = host->ports[i];
915  struct nv_adma_port_priv *pp = ap->private_data;
916  void __iomem *mmio = pp->ctl_block;
917  u16 status;
918  u32 gen_ctl;
919  u32 notifier, notifier_error;
920 
921  notifier_clears[i] = 0;
922 
923  /* if ADMA is disabled, use standard ata interrupt handler */
925  u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
926  >> (NV_INT_PORT_SHIFT * i);
927  handled += nv_host_intr(ap, irq_stat);
928  continue;
929  }
930 
931  /* if in ATA register mode, check for standard interrupts */
932  if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
933  u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
934  >> (NV_INT_PORT_SHIFT * i);
935  if (ata_tag_valid(ap->link.active_tag))
940  irq_stat |= NV_INT_DEV;
941  handled += nv_host_intr(ap, irq_stat);
942  }
943 
944  notifier = readl(mmio + NV_ADMA_NOTIFIER);
945  notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
946  notifier_clears[i] = notifier | notifier_error;
947 
948  gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
949 
950  if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
951  !notifier_error)
952  /* Nothing to do */
953  continue;
954 
955  status = readw(mmio + NV_ADMA_STAT);
956 
957  /*
958  * Clear status. Ensure the controller sees the
959  * clearing before we start looking at any of the CPB
960  * statuses, so that any CPB completions after this
961  * point in the handler will raise another interrupt.
962  */
963  writew(status, mmio + NV_ADMA_STAT);
964  readw(mmio + NV_ADMA_STAT); /* flush posted write */
965  rmb();
966 
967  handled++; /* irq handled if we got here */
968 
969  /* freeze if hotplugged or controller error */
970  if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
974  struct ata_eh_info *ehi = &ap->link.eh_info;
975 
976  ata_ehi_clear_desc(ehi);
977  __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
978  if (status & NV_ADMA_STAT_TIMEOUT) {
979  ehi->err_mask |= AC_ERR_SYSTEM;
980  ata_ehi_push_desc(ehi, "timeout");
981  } else if (status & NV_ADMA_STAT_HOTPLUG) {
982  ata_ehi_hotplugged(ehi);
983  ata_ehi_push_desc(ehi, "hotplug");
984  } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
985  ata_ehi_hotplugged(ehi);
986  ata_ehi_push_desc(ehi, "hot unplug");
987  } else if (status & NV_ADMA_STAT_SERROR) {
988  /* let EH analyze SError and figure out cause */
989  ata_ehi_push_desc(ehi, "SError");
990  } else
991  ata_ehi_push_desc(ehi, "unknown");
992  ata_port_freeze(ap);
993  continue;
994  }
995 
996  if (status & (NV_ADMA_STAT_DONE |
999  u32 check_commands = notifier_clears[i];
1000  u32 done_mask = 0;
1001  int pos, rc;
1002 
1003  if (status & NV_ADMA_STAT_CPBERR) {
1004  /* check all active commands */
1005  if (ata_tag_valid(ap->link.active_tag))
1006  check_commands = 1 <<
1007  ap->link.active_tag;
1008  else
1009  check_commands = ap->link.sactive;
1010  }
1011 
1012  /* check CPBs for completed commands */
1013  while ((pos = ffs(check_commands))) {
1014  pos--;
1015  rc = nv_adma_check_cpb(ap, pos,
1016  notifier_error & (1 << pos));
1017  if (rc > 0)
1018  done_mask |= 1 << pos;
1019  else if (unlikely(rc < 0))
1020  check_commands = 0;
1021  check_commands &= ~(1 << pos);
1022  }
1023  ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
1024  }
1025  }
1026 
1027  if (notifier_clears[0] || notifier_clears[1]) {
1028  /* Note: Both notifier clear registers must be written
1029  if either is set, even if one is zero, according to NVIDIA. */
1030  struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1031  writel(notifier_clears[0], pp->notifier_clear_block);
1032  pp = host->ports[1]->private_data;
1033  writel(notifier_clears[1], pp->notifier_clear_block);
1034  }
1035 
1036  spin_unlock(&host->lock);
1037 
1038  return IRQ_RETVAL(handled);
1039 }
1040 
1041 static void nv_adma_freeze(struct ata_port *ap)
1042 {
1043  struct nv_adma_port_priv *pp = ap->private_data;
1044  void __iomem *mmio = pp->ctl_block;
1045  u16 tmp;
1046 
1047  nv_ck804_freeze(ap);
1048 
1050  return;
1051 
1052  /* clear any outstanding CK804 notifications */
1054  ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1055 
1056  /* Disable interrupt */
1057  tmp = readw(mmio + NV_ADMA_CTL);
1059  mmio + NV_ADMA_CTL);
1060  readw(mmio + NV_ADMA_CTL); /* flush posted write */
1061 }
1062 
1063 static void nv_adma_thaw(struct ata_port *ap)
1064 {
1065  struct nv_adma_port_priv *pp = ap->private_data;
1066  void __iomem *mmio = pp->ctl_block;
1067  u16 tmp;
1068 
1069  nv_ck804_thaw(ap);
1070 
1072  return;
1073 
1074  /* Enable interrupt */
1075  tmp = readw(mmio + NV_ADMA_CTL);
1077  mmio + NV_ADMA_CTL);
1078  readw(mmio + NV_ADMA_CTL); /* flush posted write */
1079 }
1080 
1081 static void nv_adma_irq_clear(struct ata_port *ap)
1082 {
1083  struct nv_adma_port_priv *pp = ap->private_data;
1084  void __iomem *mmio = pp->ctl_block;
1085  u32 notifier_clears[2];
1086 
1087  if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1088  ata_bmdma_irq_clear(ap);
1089  return;
1090  }
1091 
1092  /* clear any outstanding CK804 notifications */
1094  ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1095 
1096  /* clear ADMA status */
1097  writew(0xffff, mmio + NV_ADMA_STAT);
1098 
1099  /* clear notifiers - note both ports need to be written with
1100  something even though we are only clearing on one */
1101  if (ap->port_no == 0) {
1102  notifier_clears[0] = 0xFFFFFFFF;
1103  notifier_clears[1] = 0;
1104  } else {
1105  notifier_clears[0] = 0;
1106  notifier_clears[1] = 0xFFFFFFFF;
1107  }
1108  pp = ap->host->ports[0]->private_data;
1109  writel(notifier_clears[0], pp->notifier_clear_block);
1110  pp = ap->host->ports[1]->private_data;
1111  writel(notifier_clears[1], pp->notifier_clear_block);
1112 }
1113 
1114 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1115 {
1116  struct nv_adma_port_priv *pp = qc->ap->private_data;
1117 
1119  ata_bmdma_post_internal_cmd(qc);
1120 }
1121 
1122 static int nv_adma_port_start(struct ata_port *ap)
1123 {
1124  struct device *dev = ap->host->dev;
1125  struct nv_adma_port_priv *pp;
1126  int rc;
1127  void *mem;
1128  dma_addr_t mem_dma;
1129  void __iomem *mmio;
1130  struct pci_dev *pdev = to_pci_dev(dev);
1131  u16 tmp;
1132 
1133  VPRINTK("ENTER\n");
1134 
1135  /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1136  pad buffers */
1137  rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1138  if (rc)
1139  return rc;
1140  rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1141  if (rc)
1142  return rc;
1143 
1144  /* we might fallback to bmdma, allocate bmdma resources */
1145  rc = ata_bmdma_port_start(ap);
1146  if (rc)
1147  return rc;
1148 
1149  pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1150  if (!pp)
1151  return -ENOMEM;
1152 
1153  mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1154  ap->port_no * NV_ADMA_PORT_SIZE;
1155  pp->ctl_block = mmio;
1156  pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1157  pp->notifier_clear_block = pp->gen_block +
1158  NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1159 
1160  /* Now that the legacy PRD and padding buffer are allocated we can
1161  safely raise the DMA mask to allocate the CPB/APRD table.
1162  These are allowed to fail since we store the value that ends up
1163  being used to set as the bounce limit in slave_config later if
1164  needed. */
1165  pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1166  pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1167  pp->adma_dma_mask = *dev->dma_mask;
1168 
1170  &mem_dma, GFP_KERNEL);
1171  if (!mem)
1172  return -ENOMEM;
1174 
1175  /*
1176  * First item in chunk of DMA memory:
1177  * 128-byte command parameter block (CPB)
1178  * one for each command tag
1179  */
1180  pp->cpb = mem;
1181  pp->cpb_dma = mem_dma;
1182 
1183  writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1184  writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1185 
1187  mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1188 
1189  /*
1190  * Second item: block of ADMA_SGTBL_LEN s/g entries
1191  */
1192  pp->aprd = mem;
1193  pp->aprd_dma = mem_dma;
1194 
1195  ap->private_data = pp;
1196 
1197  /* clear any outstanding interrupt conditions */
1198  writew(0xffff, mmio + NV_ADMA_STAT);
1199 
1200  /* initialize port variables */
1202 
1203  /* clear CPB fetch count */
1204  writew(0, mmio + NV_ADMA_CPB_COUNT);
1205 
1206  /* clear GO for register mode, enable interrupt */
1207  tmp = readw(mmio + NV_ADMA_CTL);
1210 
1211  tmp = readw(mmio + NV_ADMA_CTL);
1213  readw(mmio + NV_ADMA_CTL); /* flush posted write */
1214  udelay(1);
1216  readw(mmio + NV_ADMA_CTL); /* flush posted write */
1217 
1218  return 0;
1219 }
1220 
1221 static void nv_adma_port_stop(struct ata_port *ap)
1222 {
1223  struct nv_adma_port_priv *pp = ap->private_data;
1224  void __iomem *mmio = pp->ctl_block;
1225 
1226  VPRINTK("ENTER\n");
1227  writew(0, mmio + NV_ADMA_CTL);
1228 }
1229 
1230 #ifdef CONFIG_PM
1231 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1232 {
1233  struct nv_adma_port_priv *pp = ap->private_data;
1234  void __iomem *mmio = pp->ctl_block;
1235 
1236  /* Go to register mode - clears GO */
1237  nv_adma_register_mode(ap);
1238 
1239  /* clear CPB fetch count */
1240  writew(0, mmio + NV_ADMA_CPB_COUNT);
1241 
1242  /* disable interrupt, shut down port */
1243  writew(0, mmio + NV_ADMA_CTL);
1244 
1245  return 0;
1246 }
1247 
1248 static int nv_adma_port_resume(struct ata_port *ap)
1249 {
1250  struct nv_adma_port_priv *pp = ap->private_data;
1251  void __iomem *mmio = pp->ctl_block;
1252  u16 tmp;
1253 
1254  /* set CPB block location */
1255  writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1256  writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1257 
1258  /* clear any outstanding interrupt conditions */
1259  writew(0xffff, mmio + NV_ADMA_STAT);
1260 
1261  /* initialize port variables */
1263 
1264  /* clear CPB fetch count */
1265  writew(0, mmio + NV_ADMA_CPB_COUNT);
1266 
1267  /* clear GO for register mode, enable interrupt */
1268  tmp = readw(mmio + NV_ADMA_CTL);
1271 
1272  tmp = readw(mmio + NV_ADMA_CTL);
1274  readw(mmio + NV_ADMA_CTL); /* flush posted write */
1275  udelay(1);
1277  readw(mmio + NV_ADMA_CTL); /* flush posted write */
1278 
1279  return 0;
1280 }
1281 #endif
1282 
1283 static void nv_adma_setup_port(struct ata_port *ap)
1284 {
1285  void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1286  struct ata_ioports *ioport = &ap->ioaddr;
1287 
1288  VPRINTK("ENTER\n");
1289 
1290  mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1291 
1292  ioport->cmd_addr = mmio;
1293  ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1294  ioport->error_addr =
1295  ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1296  ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1297  ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1298  ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1299  ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1300  ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1301  ioport->status_addr =
1302  ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1303  ioport->altstatus_addr =
1304  ioport->ctl_addr = mmio + 0x20;
1305 }
1306 
1307 static int nv_adma_host_init(struct ata_host *host)
1308 {
1309  struct pci_dev *pdev = to_pci_dev(host->dev);
1310  unsigned int i;
1311  u32 tmp32;
1312 
1313  VPRINTK("ENTER\n");
1314 
1315  /* enable ADMA on the ports */
1316  pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1317  tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1321 
1322  pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1323 
1324  for (i = 0; i < host->n_ports; i++)
1325  nv_adma_setup_port(host->ports[i]);
1326 
1327  return 0;
1328 }
1329 
1330 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1331  struct scatterlist *sg,
1332  int idx,
1333  struct nv_adma_prd *aprd)
1334 {
1335  u8 flags = 0;
1336  if (qc->tf.flags & ATA_TFLAG_WRITE)
1337  flags |= NV_APRD_WRITE;
1338  if (idx == qc->n_elem - 1)
1339  flags |= NV_APRD_END;
1340  else if (idx != 4)
1341  flags |= NV_APRD_CONT;
1342 
1343  aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1344  aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1345  aprd->flags = flags;
1346  aprd->packet_len = 0;
1347 }
1348 
1349 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1350 {
1351  struct nv_adma_port_priv *pp = qc->ap->private_data;
1352  struct nv_adma_prd *aprd;
1353  struct scatterlist *sg;
1354  unsigned int si;
1355 
1356  VPRINTK("ENTER\n");
1357 
1358  for_each_sg(qc->sg, sg, qc->n_elem, si) {
1359  aprd = (si < 5) ? &cpb->aprd[si] :
1360  &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1361  nv_adma_fill_aprd(qc, sg, si, aprd);
1362  }
1363  if (si > 5)
1364  cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1365  else
1366  cpb->next_aprd = cpu_to_le64(0);
1367 }
1368 
1369 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1370 {
1371  struct nv_adma_port_priv *pp = qc->ap->private_data;
1372 
1373  /* ADMA engine can only be used for non-ATAPI DMA commands,
1374  or interrupt-driven no-data commands. */
1375  if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1376  (qc->tf.flags & ATA_TFLAG_POLLING))
1377  return 1;
1378 
1379  if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1380  (qc->tf.protocol == ATA_PROT_NODATA))
1381  return 0;
1382 
1383  return 1;
1384 }
1385 
1386 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1387 {
1388  struct nv_adma_port_priv *pp = qc->ap->private_data;
1389  struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1392 
1393  if (nv_adma_use_reg_mode(qc)) {
1395  (qc->flags & ATA_QCFLAG_DMAMAP));
1396  nv_adma_register_mode(qc->ap);
1397  ata_bmdma_qc_prep(qc);
1398  return;
1399  }
1400 
1402  wmb();
1403  cpb->ctl_flags = 0;
1404  wmb();
1405 
1406  cpb->len = 3;
1407  cpb->tag = qc->tag;
1408  cpb->next_cpb_idx = 0;
1409 
1410  /* turn on NCQ flags for NCQ commands */
1411  if (qc->tf.protocol == ATA_PROT_NCQ)
1412  ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1413 
1414  VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1415 
1416  nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1417 
1418  if (qc->flags & ATA_QCFLAG_DMAMAP) {
1419  nv_adma_fill_sg(qc, cpb);
1420  ctl_flags |= NV_CPB_CTL_APRD_VALID;
1421  } else
1422  memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1423 
1424  /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1425  until we are finished filling in all of the contents */
1426  wmb();
1427  cpb->ctl_flags = ctl_flags;
1428  wmb();
1429  cpb->resp_flags = 0;
1430 }
1431 
1432 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1433 {
1434  struct nv_adma_port_priv *pp = qc->ap->private_data;
1435  void __iomem *mmio = pp->ctl_block;
1436  int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1437 
1438  VPRINTK("ENTER\n");
1439 
1440  /* We can't handle result taskfile with NCQ commands, since
1441  retrieving the taskfile switches us out of ADMA mode and would abort
1442  existing commands. */
1443  if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1444  (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1445  ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
1446  return AC_ERR_SYSTEM;
1447  }
1448 
1449  if (nv_adma_use_reg_mode(qc)) {
1450  /* use ATA register mode */
1451  VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1453  (qc->flags & ATA_QCFLAG_DMAMAP));
1454  nv_adma_register_mode(qc->ap);
1455  return ata_bmdma_qc_issue(qc);
1456  } else
1457  nv_adma_mode(qc->ap);
1458 
1459  /* write append register, command tag in lower 8 bits
1460  and (number of cpbs to append -1) in top 8 bits */
1461  wmb();
1462 
1463  if (curr_ncq != pp->last_issue_ncq) {
1464  /* Seems to need some delay before switching between NCQ and
1465  non-NCQ commands, else we get command timeouts and such. */
1466  udelay(20);
1467  pp->last_issue_ncq = curr_ncq;
1468  }
1469 
1470  writew(qc->tag, mmio + NV_ADMA_APPEND);
1471 
1472  DPRINTK("Issued tag %u\n", qc->tag);
1473 
1474  return 0;
1475 }
1476 
1477 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1478 {
1479  struct ata_host *host = dev_instance;
1480  unsigned int i;
1481  unsigned int handled = 0;
1482  unsigned long flags;
1483 
1484  spin_lock_irqsave(&host->lock, flags);
1485 
1486  for (i = 0; i < host->n_ports; i++) {
1487  struct ata_port *ap = host->ports[i];
1488  struct ata_queued_cmd *qc;
1489 
1490  qc = ata_qc_from_tag(ap, ap->link.active_tag);
1491  if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1492  handled += ata_bmdma_port_intr(ap, qc);
1493  } else {
1494  /*
1495  * No request pending? Clear interrupt status
1496  * anyway, in case there's one pending.
1497  */
1498  ap->ops->sff_check_status(ap);
1499  }
1500  }
1501 
1502  spin_unlock_irqrestore(&host->lock, flags);
1503 
1504  return IRQ_RETVAL(handled);
1505 }
1506 
1507 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1508 {
1509  int i, handled = 0;
1510 
1511  for (i = 0; i < host->n_ports; i++) {
1512  handled += nv_host_intr(host->ports[i], irq_stat);
1513  irq_stat >>= NV_INT_PORT_SHIFT;
1514  }
1515 
1516  return IRQ_RETVAL(handled);
1517 }
1518 
1519 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1520 {
1521  struct ata_host *host = dev_instance;
1522  u8 irq_stat;
1523  irqreturn_t ret;
1524 
1525  spin_lock(&host->lock);
1526  irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1527  ret = nv_do_interrupt(host, irq_stat);
1528  spin_unlock(&host->lock);
1529 
1530  return ret;
1531 }
1532 
1533 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1534 {
1535  struct ata_host *host = dev_instance;
1536  u8 irq_stat;
1537  irqreturn_t ret;
1538 
1539  spin_lock(&host->lock);
1540  irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1541  ret = nv_do_interrupt(host, irq_stat);
1542  spin_unlock(&host->lock);
1543 
1544  return ret;
1545 }
1546 
1547 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1548 {
1549  if (sc_reg > SCR_CONTROL)
1550  return -EINVAL;
1551 
1552  *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1553  return 0;
1554 }
1555 
1556 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1557 {
1558  if (sc_reg > SCR_CONTROL)
1559  return -EINVAL;
1560 
1561  iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1562  return 0;
1563 }
1564 
1565 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1566  unsigned long deadline)
1567 {
1568  struct ata_eh_context *ehc = &link->eh_context;
1569 
1570  /* Do hardreset iff it's post-boot probing, please read the
1571  * comment above port ops for details.
1572  */
1573  if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1574  !ata_dev_enabled(link->device))
1576  NULL, NULL);
1577  else {
1578  const unsigned long *timing = sata_ehc_deb_timing(ehc);
1579  int rc;
1580 
1581  if (!(ehc->i.flags & ATA_EHI_QUIET))
1582  ata_link_info(link,
1583  "nv: skipping hardreset on occupied port\n");
1584 
1585  /* make sure the link is online */
1586  rc = sata_link_resume(link, timing, deadline);
1587  /* whine about phy resume failure but proceed */
1588  if (rc && rc != -EOPNOTSUPP)
1589  ata_link_warn(link, "failed to resume link (errno=%d)\n",
1590  rc);
1591  }
1592 
1593  /* device signature acquisition is unreliable */
1594  return -EAGAIN;
1595 }
1596 
1597 static void nv_nf2_freeze(struct ata_port *ap)
1598 {
1599  void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1600  int shift = ap->port_no * NV_INT_PORT_SHIFT;
1601  u8 mask;
1602 
1603  mask = ioread8(scr_addr + NV_INT_ENABLE);
1604  mask &= ~(NV_INT_ALL << shift);
1605  iowrite8(mask, scr_addr + NV_INT_ENABLE);
1606 }
1607 
1608 static void nv_nf2_thaw(struct ata_port *ap)
1609 {
1610  void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1611  int shift = ap->port_no * NV_INT_PORT_SHIFT;
1612  u8 mask;
1613 
1614  iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1615 
1616  mask = ioread8(scr_addr + NV_INT_ENABLE);
1617  mask |= (NV_INT_MASK << shift);
1618  iowrite8(mask, scr_addr + NV_INT_ENABLE);
1619 }
1620 
1621 static void nv_ck804_freeze(struct ata_port *ap)
1622 {
1623  void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1624  int shift = ap->port_no * NV_INT_PORT_SHIFT;
1625  u8 mask;
1626 
1627  mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1628  mask &= ~(NV_INT_ALL << shift);
1629  writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1630 }
1631 
1632 static void nv_ck804_thaw(struct ata_port *ap)
1633 {
1634  void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1635  int shift = ap->port_no * NV_INT_PORT_SHIFT;
1636  u8 mask;
1637 
1638  writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1639 
1640  mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1641  mask |= (NV_INT_MASK << shift);
1642  writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1643 }
1644 
1645 static void nv_mcp55_freeze(struct ata_port *ap)
1646 {
1647  void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1648  int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1649  u32 mask;
1650 
1651  writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1652 
1653  mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1654  mask &= ~(NV_INT_ALL_MCP55 << shift);
1655  writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1656 }
1657 
1658 static void nv_mcp55_thaw(struct ata_port *ap)
1659 {
1660  void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1661  int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1662  u32 mask;
1663 
1664  writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1665 
1666  mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1667  mask |= (NV_INT_MASK_MCP55 << shift);
1668  writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1669 }
1670 
1671 static void nv_adma_error_handler(struct ata_port *ap)
1672 {
1673  struct nv_adma_port_priv *pp = ap->private_data;
1674  if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1675  void __iomem *mmio = pp->ctl_block;
1676  int i;
1677  u16 tmp;
1678 
1679  if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1680  u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1681  u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1682  u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1683  u32 status = readw(mmio + NV_ADMA_STAT);
1684  u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1685  u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1686 
1687  ata_port_err(ap,
1688  "EH in ADMA mode, notifier 0x%X "
1689  "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1690  "next cpb count 0x%X next cpb idx 0x%x\n",
1691  notifier, notifier_error, gen_ctl, status,
1692  cpb_count, next_cpb_idx);
1693 
1694  for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1695  struct nv_adma_cpb *cpb = &pp->cpb[i];
1696  if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1697  ap->link.sactive & (1 << i))
1698  ata_port_err(ap,
1699  "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1700  i, cpb->ctl_flags, cpb->resp_flags);
1701  }
1702  }
1703 
1704  /* Push us back into port register mode for error handling. */
1705  nv_adma_register_mode(ap);
1706 
1707  /* Mark all of the CPBs as invalid to prevent them from
1708  being executed */
1709  for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1710  pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1711 
1712  /* clear CPB fetch count */
1713  writew(0, mmio + NV_ADMA_CPB_COUNT);
1714 
1715  /* Reset channel */
1716  tmp = readw(mmio + NV_ADMA_CTL);
1718  readw(mmio + NV_ADMA_CTL); /* flush posted write */
1719  udelay(1);
1721  readw(mmio + NV_ADMA_CTL); /* flush posted write */
1722  }
1723 
1724  ata_bmdma_error_handler(ap);
1725 }
1726 
1727 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1728 {
1729  struct nv_swncq_port_priv *pp = ap->private_data;
1730  struct defer_queue *dq = &pp->defer_queue;
1731 
1732  /* queue is full */
1733  WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1734  dq->defer_bits |= (1 << qc->tag);
1735  dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1736 }
1737 
1738 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1739 {
1740  struct nv_swncq_port_priv *pp = ap->private_data;
1741  struct defer_queue *dq = &pp->defer_queue;
1742  unsigned int tag;
1743 
1744  if (dq->head == dq->tail) /* null queue */
1745  return NULL;
1746 
1747  tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1748  dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1749  WARN_ON(!(dq->defer_bits & (1 << tag)));
1750  dq->defer_bits &= ~(1 << tag);
1751 
1752  return ata_qc_from_tag(ap, tag);
1753 }
1754 
1755 static void nv_swncq_fis_reinit(struct ata_port *ap)
1756 {
1757  struct nv_swncq_port_priv *pp = ap->private_data;
1758 
1759  pp->dhfis_bits = 0;
1760  pp->dmafis_bits = 0;
1761  pp->sdbfis_bits = 0;
1762  pp->ncq_flags = 0;
1763 }
1764 
1765 static void nv_swncq_pp_reinit(struct ata_port *ap)
1766 {
1767  struct nv_swncq_port_priv *pp = ap->private_data;
1768  struct defer_queue *dq = &pp->defer_queue;
1769 
1770  dq->head = 0;
1771  dq->tail = 0;
1772  dq->defer_bits = 0;
1773  pp->qc_active = 0;
1775  nv_swncq_fis_reinit(ap);
1776 }
1777 
1778 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1779 {
1780  struct nv_swncq_port_priv *pp = ap->private_data;
1781 
1782  writew(fis, pp->irq_block);
1783 }
1784 
1785 static void __ata_bmdma_stop(struct ata_port *ap)
1786 {
1787  struct ata_queued_cmd qc;
1788 
1789  qc.ap = ap;
1790  ata_bmdma_stop(&qc);
1791 }
1792 
1793 static void nv_swncq_ncq_stop(struct ata_port *ap)
1794 {
1795  struct nv_swncq_port_priv *pp = ap->private_data;
1796  unsigned int i;
1797  u32 sactive;
1798  u32 done_mask;
1799 
1800  ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1801  ap->qc_active, ap->link.sactive);
1802  ata_port_err(ap,
1803  "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1804  "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1805  pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1806  pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1807 
1808  ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
1809  ap->ops->sff_check_status(ap),
1810  ioread8(ap->ioaddr.error_addr));
1811 
1812  sactive = readl(pp->sactive_block);
1813  done_mask = pp->qc_active ^ sactive;
1814 
1815  ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
1816  for (i = 0; i < ATA_MAX_QUEUE; i++) {
1817  u8 err = 0;
1818  if (pp->qc_active & (1 << i))
1819  err = 0;
1820  else if (done_mask & (1 << i))
1821  err = 1;
1822  else
1823  continue;
1824 
1825  ata_port_err(ap,
1826  "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1827  (pp->dhfis_bits >> i) & 0x1,
1828  (pp->dmafis_bits >> i) & 0x1,
1829  (pp->sdbfis_bits >> i) & 0x1,
1830  (sactive >> i) & 0x1,
1831  (err ? "error! tag doesn't exit" : " "));
1832  }
1833 
1834  nv_swncq_pp_reinit(ap);
1835  ap->ops->sff_irq_clear(ap);
1836  __ata_bmdma_stop(ap);
1837  nv_swncq_irq_clear(ap, 0xffff);
1838 }
1839 
1840 static void nv_swncq_error_handler(struct ata_port *ap)
1841 {
1842  struct ata_eh_context *ehc = &ap->link.eh_context;
1843 
1844  if (ap->link.sactive) {
1845  nv_swncq_ncq_stop(ap);
1846  ehc->i.action |= ATA_EH_RESET;
1847  }
1848 
1849  ata_bmdma_error_handler(ap);
1850 }
1851 
1852 #ifdef CONFIG_PM
1853 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1854 {
1855  void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1856  u32 tmp;
1857 
1858  /* clear irq */
1859  writel(~0, mmio + NV_INT_STATUS_MCP55);
1860 
1861  /* disable irq */
1862  writel(0, mmio + NV_INT_ENABLE_MCP55);
1863 
1864  /* disable swncq */
1865  tmp = readl(mmio + NV_CTL_MCP55);
1866  tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1867  writel(tmp, mmio + NV_CTL_MCP55);
1868 
1869  return 0;
1870 }
1871 
1872 static int nv_swncq_port_resume(struct ata_port *ap)
1873 {
1874  void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1875  u32 tmp;
1876 
1877  /* clear irq */
1878  writel(~0, mmio + NV_INT_STATUS_MCP55);
1879 
1880  /* enable irq */
1881  writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1882 
1883  /* enable swncq */
1884  tmp = readl(mmio + NV_CTL_MCP55);
1886 
1887  return 0;
1888 }
1889 #endif
1890 
1891 static void nv_swncq_host_init(struct ata_host *host)
1892 {
1893  u32 tmp;
1894  void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1895  struct pci_dev *pdev = to_pci_dev(host->dev);
1896  u8 regval;
1897 
1898  /* disable ECO 398 */
1899  pci_read_config_byte(pdev, 0x7f, &regval);
1900  regval &= ~(1 << 7);
1901  pci_write_config_byte(pdev, 0x7f, regval);
1902 
1903  /* enable swncq */
1904  tmp = readl(mmio + NV_CTL_MCP55);
1905  VPRINTK("HOST_CTL:0x%X\n", tmp);
1907 
1908  /* enable irq intr */
1909  tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1910  VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1911  writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1912 
1913  /* clear port irq */
1914  writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1915 }
1916 
1917 static int nv_swncq_slave_config(struct scsi_device *sdev)
1918 {
1919  struct ata_port *ap = ata_shost_to_port(sdev->host);
1920  struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1921  struct ata_device *dev;
1922  int rc;
1923  u8 rev;
1924  u8 check_maxtor = 0;
1925  unsigned char model_num[ATA_ID_PROD_LEN + 1];
1926 
1927  rc = ata_scsi_slave_config(sdev);
1928  if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1929  /* Not a proper libata device, ignore */
1930  return rc;
1931 
1932  dev = &ap->link.device[sdev->id];
1933  if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1934  return rc;
1935 
1936  /* if MCP51 and Maxtor, then disable ncq */
1939  check_maxtor = 1;
1940 
1941  /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1944  pci_read_config_byte(pdev, 0x8, &rev);
1945  if (rev <= 0xa2)
1946  check_maxtor = 1;
1947  }
1948 
1949  if (!check_maxtor)
1950  return rc;
1951 
1952  ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1953 
1954  if (strncmp(model_num, "Maxtor", 6) == 0) {
1956  ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1957  sdev->queue_depth);
1958  }
1959 
1960  return rc;
1961 }
1962 
1963 static int nv_swncq_port_start(struct ata_port *ap)
1964 {
1965  struct device *dev = ap->host->dev;
1966  void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1967  struct nv_swncq_port_priv *pp;
1968  int rc;
1969 
1970  /* we might fallback to bmdma, allocate bmdma resources */
1971  rc = ata_bmdma_port_start(ap);
1972  if (rc)
1973  return rc;
1974 
1975  pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1976  if (!pp)
1977  return -ENOMEM;
1978 
1979  pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1980  &pp->prd_dma, GFP_KERNEL);
1981  if (!pp->prd)
1982  return -ENOMEM;
1983  memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1984 
1985  ap->private_data = pp;
1986  pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1987  pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1988  pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1989 
1990  return 0;
1991 }
1992 
1993 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1994 {
1995  if (qc->tf.protocol != ATA_PROT_NCQ) {
1996  ata_bmdma_qc_prep(qc);
1997  return;
1998  }
1999 
2000  if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2001  return;
2002 
2003  nv_swncq_fill_sg(qc);
2004 }
2005 
2006 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
2007 {
2008  struct ata_port *ap = qc->ap;
2009  struct scatterlist *sg;
2010  struct nv_swncq_port_priv *pp = ap->private_data;
2011  struct ata_bmdma_prd *prd;
2012  unsigned int si, idx;
2013 
2014  prd = pp->prd + ATA_MAX_PRD * qc->tag;
2015 
2016  idx = 0;
2017  for_each_sg(qc->sg, sg, qc->n_elem, si) {
2018  u32 addr, offset;
2019  u32 sg_len, len;
2020 
2021  addr = (u32)sg_dma_address(sg);
2022  sg_len = sg_dma_len(sg);
2023 
2024  while (sg_len) {
2025  offset = addr & 0xffff;
2026  len = sg_len;
2027  if ((offset + sg_len) > 0x10000)
2028  len = 0x10000 - offset;
2029 
2030  prd[idx].addr = cpu_to_le32(addr);
2031  prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2032 
2033  idx++;
2034  sg_len -= len;
2035  addr += len;
2036  }
2037  }
2038 
2039  prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2040 }
2041 
2042 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2043  struct ata_queued_cmd *qc)
2044 {
2045  struct nv_swncq_port_priv *pp = ap->private_data;
2046 
2047  if (qc == NULL)
2048  return 0;
2049 
2050  DPRINTK("Enter\n");
2051 
2052  writel((1 << qc->tag), pp->sactive_block);
2053  pp->last_issue_tag = qc->tag;
2054  pp->dhfis_bits &= ~(1 << qc->tag);
2055  pp->dmafis_bits &= ~(1 << qc->tag);
2056  pp->qc_active |= (0x1 << qc->tag);
2057 
2058  ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2059  ap->ops->sff_exec_command(ap, &qc->tf);
2060 
2061  DPRINTK("Issued tag %u\n", qc->tag);
2062 
2063  return 0;
2064 }
2065 
2066 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2067 {
2068  struct ata_port *ap = qc->ap;
2069  struct nv_swncq_port_priv *pp = ap->private_data;
2070 
2071  if (qc->tf.protocol != ATA_PROT_NCQ)
2072  return ata_bmdma_qc_issue(qc);
2073 
2074  DPRINTK("Enter\n");
2075 
2076  if (!pp->qc_active)
2077  nv_swncq_issue_atacmd(ap, qc);
2078  else
2079  nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2080 
2081  return 0;
2082 }
2083 
2084 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2085 {
2086  u32 serror;
2087  struct ata_eh_info *ehi = &ap->link.eh_info;
2088 
2089  ata_ehi_clear_desc(ehi);
2090 
2091  /* AHCI needs SError cleared; otherwise, it might lock up */
2092  sata_scr_read(&ap->link, SCR_ERROR, &serror);
2093  sata_scr_write(&ap->link, SCR_ERROR, serror);
2094 
2095  /* analyze @irq_stat */
2096  if (fis & NV_SWNCQ_IRQ_ADDED)
2097  ata_ehi_push_desc(ehi, "hot plug");
2098  else if (fis & NV_SWNCQ_IRQ_REMOVED)
2099  ata_ehi_push_desc(ehi, "hot unplug");
2100 
2101  ata_ehi_hotplugged(ehi);
2102 
2103  /* okay, let's hand over to EH */
2104  ehi->serror |= serror;
2105 
2106  ata_port_freeze(ap);
2107 }
2108 
2109 static int nv_swncq_sdbfis(struct ata_port *ap)
2110 {
2111  struct ata_queued_cmd *qc;
2112  struct nv_swncq_port_priv *pp = ap->private_data;
2113  struct ata_eh_info *ehi = &ap->link.eh_info;
2114  u32 sactive;
2115  u32 done_mask;
2116  u8 host_stat;
2117  u8 lack_dhfis = 0;
2118 
2119  host_stat = ap->ops->bmdma_status(ap);
2120  if (unlikely(host_stat & ATA_DMA_ERR)) {
2121  /* error when transferring data to/from memory */
2122  ata_ehi_clear_desc(ehi);
2123  ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2124  ehi->err_mask |= AC_ERR_HOST_BUS;
2125  ehi->action |= ATA_EH_RESET;
2126  return -EINVAL;
2127  }
2128 
2129  ap->ops->sff_irq_clear(ap);
2130  __ata_bmdma_stop(ap);
2131 
2132  sactive = readl(pp->sactive_block);
2133  done_mask = pp->qc_active ^ sactive;
2134 
2135  pp->qc_active &= ~done_mask;
2136  pp->dhfis_bits &= ~done_mask;
2137  pp->dmafis_bits &= ~done_mask;
2138  pp->sdbfis_bits |= done_mask;
2139  ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2140 
2141  if (!ap->qc_active) {
2142  DPRINTK("over\n");
2143  nv_swncq_pp_reinit(ap);
2144  return 0;
2145  }
2146 
2147  if (pp->qc_active & pp->dhfis_bits)
2148  return 0;
2149 
2150  if ((pp->ncq_flags & ncq_saw_backout) ||
2151  (pp->qc_active ^ pp->dhfis_bits))
2152  /* if the controller can't get a device to host register FIS,
2153  * The driver needs to reissue the new command.
2154  */
2155  lack_dhfis = 1;
2156 
2157  DPRINTK("id 0x%x QC: qc_active 0x%x,"
2158  "SWNCQ:qc_active 0x%X defer_bits %X "
2159  "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2160  ap->print_id, ap->qc_active, pp->qc_active,
2161  pp->defer_queue.defer_bits, pp->dhfis_bits,
2162  pp->dmafis_bits, pp->last_issue_tag);
2163 
2164  nv_swncq_fis_reinit(ap);
2165 
2166  if (lack_dhfis) {
2167  qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2168  nv_swncq_issue_atacmd(ap, qc);
2169  return 0;
2170  }
2171 
2172  if (pp->defer_queue.defer_bits) {
2173  /* send deferral queue command */
2174  qc = nv_swncq_qc_from_dq(ap);
2175  WARN_ON(qc == NULL);
2176  nv_swncq_issue_atacmd(ap, qc);
2177  }
2178 
2179  return 0;
2180 }
2181 
2182 static inline u32 nv_swncq_tag(struct ata_port *ap)
2183 {
2184  struct nv_swncq_port_priv *pp = ap->private_data;
2185  u32 tag;
2186 
2187  tag = readb(pp->tag_block) >> 2;
2188  return (tag & 0x1f);
2189 }
2190 
2191 static void nv_swncq_dmafis(struct ata_port *ap)
2192 {
2193  struct ata_queued_cmd *qc;
2194  unsigned int rw;
2195  u8 dmactl;
2196  u32 tag;
2197  struct nv_swncq_port_priv *pp = ap->private_data;
2198 
2199  __ata_bmdma_stop(ap);
2200  tag = nv_swncq_tag(ap);
2201 
2202  DPRINTK("dma setup tag 0x%x\n", tag);
2203  qc = ata_qc_from_tag(ap, tag);
2204 
2205  if (unlikely(!qc))
2206  return;
2207 
2208  rw = qc->tf.flags & ATA_TFLAG_WRITE;
2209 
2210  /* load PRD table addr. */
2211  iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2212  ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2213 
2214  /* specify data direction, triple-check start bit is clear */
2215  dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2216  dmactl &= ~ATA_DMA_WR;
2217  if (!rw)
2218  dmactl |= ATA_DMA_WR;
2219 
2220  iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2221 }
2222 
2223 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2224 {
2225  struct nv_swncq_port_priv *pp = ap->private_data;
2226  struct ata_queued_cmd *qc;
2227  struct ata_eh_info *ehi = &ap->link.eh_info;
2228  u32 serror;
2229  u8 ata_stat;
2230 
2231  ata_stat = ap->ops->sff_check_status(ap);
2232  nv_swncq_irq_clear(ap, fis);
2233  if (!fis)
2234  return;
2235 
2236  if (ap->pflags & ATA_PFLAG_FROZEN)
2237  return;
2238 
2239  if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2240  nv_swncq_hotplug(ap, fis);
2241  return;
2242  }
2243 
2244  if (!pp->qc_active)
2245  return;
2246 
2247  if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2248  return;
2249  ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2250 
2251  if (ata_stat & ATA_ERR) {
2252  ata_ehi_clear_desc(ehi);
2253  ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2254  ehi->err_mask |= AC_ERR_DEV;
2255  ehi->serror |= serror;
2256  ehi->action |= ATA_EH_RESET;
2257  ata_port_freeze(ap);
2258  return;
2259  }
2260 
2261  if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2262  /* If the IRQ is backout, driver must issue
2263  * the new command again some time later.
2264  */
2265  pp->ncq_flags |= ncq_saw_backout;
2266  }
2267 
2268  if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2269  pp->ncq_flags |= ncq_saw_sdb;
2270  DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2271  "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2272  ap->print_id, pp->qc_active, pp->dhfis_bits,
2273  pp->dmafis_bits, readl(pp->sactive_block));
2274  if (nv_swncq_sdbfis(ap) < 0)
2275  goto irq_error;
2276  }
2277 
2278  if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2279  /* The interrupt indicates the new command
2280  * was transmitted correctly to the drive.
2281  */
2282  pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2283  pp->ncq_flags |= ncq_saw_d2h;
2284  if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2285  ata_ehi_push_desc(ehi, "illegal fis transaction");
2286  ehi->err_mask |= AC_ERR_HSM;
2287  ehi->action |= ATA_EH_RESET;
2288  goto irq_error;
2289  }
2290 
2291  if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2292  !(pp->ncq_flags & ncq_saw_dmas)) {
2293  ata_stat = ap->ops->sff_check_status(ap);
2294  if (ata_stat & ATA_BUSY)
2295  goto irq_exit;
2296 
2297  if (pp->defer_queue.defer_bits) {
2298  DPRINTK("send next command\n");
2299  qc = nv_swncq_qc_from_dq(ap);
2300  nv_swncq_issue_atacmd(ap, qc);
2301  }
2302  }
2303  }
2304 
2305  if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2306  /* program the dma controller with appropriate PRD buffers
2307  * and start the DMA transfer for requested command.
2308  */
2309  pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2310  pp->ncq_flags |= ncq_saw_dmas;
2311  nv_swncq_dmafis(ap);
2312  }
2313 
2314 irq_exit:
2315  return;
2316 irq_error:
2317  ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2318  ata_port_freeze(ap);
2319  return;
2320 }
2321 
2322 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2323 {
2324  struct ata_host *host = dev_instance;
2325  unsigned int i;
2326  unsigned int handled = 0;
2327  unsigned long flags;
2328  u32 irq_stat;
2329 
2330  spin_lock_irqsave(&host->lock, flags);
2331 
2332  irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2333 
2334  for (i = 0; i < host->n_ports; i++) {
2335  struct ata_port *ap = host->ports[i];
2336 
2337  if (ap->link.sactive) {
2338  nv_swncq_host_interrupt(ap, (u16)irq_stat);
2339  handled = 1;
2340  } else {
2341  if (irq_stat) /* reserve Hotplug */
2342  nv_swncq_irq_clear(ap, 0xfff0);
2343 
2344  handled += nv_host_intr(ap, (u8)irq_stat);
2345  }
2346  irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2347  }
2348 
2349  spin_unlock_irqrestore(&host->lock, flags);
2350 
2351  return IRQ_RETVAL(handled);
2352 }
2353 
2354 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2355 {
2356  const struct ata_port_info *ppi[] = { NULL, NULL };
2357  struct nv_pi_priv *ipriv;
2358  struct ata_host *host;
2359  struct nv_host_priv *hpriv;
2360  int rc;
2361  u32 bar;
2362  void __iomem *base;
2363  unsigned long type = ent->driver_data;
2364 
2365  // Make sure this is a SATA controller by counting the number of bars
2366  // (NVIDIA SATA controllers will always have six bars). Otherwise,
2367  // it's an IDE controller and we ignore it.
2368  for (bar = 0; bar < 6; bar++)
2369  if (pci_resource_start(pdev, bar) == 0)
2370  return -ENODEV;
2371 
2373 
2374  rc = pcim_enable_device(pdev);
2375  if (rc)
2376  return rc;
2377 
2378  /* determine type and allocate host */
2379  if (type == CK804 && adma_enabled) {
2380  dev_notice(&pdev->dev, "Using ADMA mode\n");
2381  type = ADMA;
2382  } else if (type == MCP5x && swncq_enabled) {
2383  dev_notice(&pdev->dev, "Using SWNCQ mode\n");
2384  type = SWNCQ;
2385  }
2386 
2387  ppi[0] = &nv_port_info[type];
2388  ipriv = ppi[0]->private_data;
2389  rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2390  if (rc)
2391  return rc;
2392 
2393  hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2394  if (!hpriv)
2395  return -ENOMEM;
2396  hpriv->type = type;
2397  host->private_data = hpriv;
2398 
2399  /* request and iomap NV_MMIO_BAR */
2400  rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2401  if (rc)
2402  return rc;
2403 
2404  /* configure SCR access */
2405  base = host->iomap[NV_MMIO_BAR];
2406  host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2407  host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2408 
2409  /* enable SATA space for CK804 */
2410  if (type >= CK804) {
2411  u8 regval;
2412 
2413  pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2415  pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2416  }
2417 
2418  /* init ADMA */
2419  if (type == ADMA) {
2420  rc = nv_adma_host_init(host);
2421  if (rc)
2422  return rc;
2423  } else if (type == SWNCQ)
2424  nv_swncq_host_init(host);
2425 
2426  if (msi_enabled) {
2427  dev_notice(&pdev->dev, "Using MSI\n");
2428  pci_enable_msi(pdev);
2429  }
2430 
2431  pci_set_master(pdev);
2432  return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2433 }
2434 
2435 #ifdef CONFIG_PM
2436 static int nv_pci_device_resume(struct pci_dev *pdev)
2437 {
2438  struct ata_host *host = dev_get_drvdata(&pdev->dev);
2439  struct nv_host_priv *hpriv = host->private_data;
2440  int rc;
2441 
2442  rc = ata_pci_device_do_resume(pdev);
2443  if (rc)
2444  return rc;
2445 
2446  if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2447  if (hpriv->type >= CK804) {
2448  u8 regval;
2449 
2450  pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2452  pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2453  }
2454  if (hpriv->type == ADMA) {
2455  u32 tmp32;
2456  struct nv_adma_port_priv *pp;
2457  /* enable/disable ADMA on the ports appropriately */
2458  pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2459 
2460  pp = host->ports[0]->private_data;
2462  tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2464  else
2465  tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
2467  pp = host->ports[1]->private_data;
2469  tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2471  else
2472  tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
2474 
2475  pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2476  }
2477  }
2478 
2479  ata_host_resume(host);
2480 
2481  return 0;
2482 }
2483 #endif
2484 
2485 static void nv_ck804_host_stop(struct ata_host *host)
2486 {
2487  struct pci_dev *pdev = to_pci_dev(host->dev);
2488  u8 regval;
2489 
2490  /* disable SATA space for CK804 */
2491  pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2493  pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2494 }
2495 
2496 static void nv_adma_host_stop(struct ata_host *host)
2497 {
2498  struct pci_dev *pdev = to_pci_dev(host->dev);
2499  u32 tmp32;
2500 
2501  /* disable ADMA on the ports */
2502  pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2503  tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2507 
2508  pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2509 
2510  nv_ck804_host_stop(host);
2511 }
2512 
2513 module_pci_driver(nv_pci_driver);
2514 
2515 module_param_named(adma, adma_enabled, bool, 0444);
2516 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2517 module_param_named(swncq, swncq_enabled, bool, 0444);
2518 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2519 module_param_named(msi, msi_enabled, bool, 0444);
2520 MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");