52 #include <linux/kernel.h>
53 #include <linux/module.h>
54 #include <linux/pci.h>
61 #include <linux/device.h>
66 #include <linux/bitops.h>
75 #define DRV_NAME "sata_mv"
76 #define DRV_VERSION "1.28"
88 static int irq_coalescing_io_count;
91 "IRQ coalescing I/O count threshold (0..255)");
93 static int irq_coalescing_usecs;
96 "IRQ coalescing time threshold in usecs");
443 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
444 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
445 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
446 #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
447 #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
449 #define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
450 #define WINDOW_BASE(i) (0x20034 + ((i) << 4))
556 #if defined(CONFIG_HAVE_CLK)
558 struct clk **port_clks;
586 static int mv_port_start(
struct ata_port *ap);
587 static void mv_port_stop(
struct ata_port *ap);
592 static int mv_hardreset(
struct ata_link *
link,
unsigned int *
class,
593 unsigned long deadline);
594 static void mv_eh_freeze(
struct ata_port *ap);
595 static void mv_eh_thaw(
struct ata_port *ap);
616 static void mv_soc_enable_leds(
struct mv_host_priv *hpriv,
621 void __iomem *mmio,
unsigned int n_hc);
622 static void mv_soc_reset_flash(
struct mv_host_priv *hpriv,
625 static void mv_soc_65n_phy_errata(
struct mv_host_priv *hpriv,
630 static int mv_stop_edma(
struct ata_port *ap);
631 static int mv_stop_edma_engine(
void __iomem *port_mmio);
632 static void mv_edma_cfg(
struct ata_port *ap,
int want_ncq,
int want_edma);
634 static void mv_pmp_select(
struct ata_port *ap,
int pmp);
635 static int mv_pmp_hardreset(
struct ata_link *
link,
unsigned int *
class,
636 unsigned long deadline);
637 static int mv_softreset(
struct ata_link *
link,
unsigned int *
class,
638 unsigned long deadline);
639 static void mv_pmp_error_handler(
struct ata_port *ap);
640 static void mv_process_crpb_entries(
struct ata_port *ap,
643 static void mv_sff_irq_clear(
struct ata_port *ap);
648 static u8 mv_bmdma_status(
struct ata_port *ap);
649 static u8 mv_sff_check_status(
struct ata_port *ap);
674 .qc_defer = mv_qc_defer,
675 .qc_prep = mv_qc_prep,
676 .qc_issue = mv_qc_issue,
678 .freeze = mv_eh_freeze,
680 .hardreset = mv_hardreset,
682 .scr_read = mv5_scr_read,
683 .scr_write = mv5_scr_write,
685 .port_start = mv_port_start,
686 .port_stop = mv_port_stop,
690 .inherits = &ata_bmdma_port_ops,
694 .qc_defer = mv_qc_defer,
695 .qc_prep = mv_qc_prep,
696 .qc_issue = mv_qc_issue,
698 .dev_config = mv6_dev_config,
700 .freeze = mv_eh_freeze,
702 .hardreset = mv_hardreset,
703 .softreset = mv_softreset,
704 .pmp_hardreset = mv_pmp_hardreset,
705 .pmp_softreset = mv_softreset,
706 .error_handler = mv_pmp_error_handler,
708 .scr_read = mv_scr_read,
709 .scr_write = mv_scr_write,
711 .sff_check_status = mv_sff_check_status,
712 .sff_irq_clear = mv_sff_irq_clear,
713 .check_atapi_dma = mv_check_atapi_dma,
714 .bmdma_setup = mv_bmdma_setup,
715 .bmdma_start = mv_bmdma_start,
716 .bmdma_stop = mv_bmdma_stop,
717 .bmdma_status = mv_bmdma_status,
719 .port_start = mv_port_start,
720 .port_stop = mv_port_stop,
724 .inherits = &mv6_ops,
726 .qc_prep = mv_qc_prep_iie,
734 .port_ops = &mv5_ops,
740 .port_ops = &mv5_ops,
746 .port_ops = &mv5_ops,
752 .port_ops = &mv6_ops,
758 .port_ops = &mv6_ops,
764 .port_ops = &mv_iie_ops,
770 .port_ops = &mv_iie_ops,
776 .port_ops = &mv_iie_ops,
811 static const struct mv_hw_ops mv5xxx_ops = {
812 .phy_errata = mv5_phy_errata,
813 .enable_leds = mv5_enable_leds,
814 .read_preamp = mv5_read_preamp,
815 .reset_hc = mv5_reset_hc,
816 .reset_flash = mv5_reset_flash,
817 .reset_bus = mv5_reset_bus,
820 static const struct mv_hw_ops mv6xxx_ops = {
821 .phy_errata = mv6_phy_errata,
822 .enable_leds = mv6_enable_leds,
823 .read_preamp = mv6_read_preamp,
824 .reset_hc = mv6_reset_hc,
825 .reset_flash = mv6_reset_flash,
826 .reset_bus = mv_reset_pci_bus,
829 static const struct mv_hw_ops mv_soc_ops = {
830 .phy_errata = mv6_phy_errata,
831 .enable_leds = mv_soc_enable_leds,
832 .read_preamp = mv_soc_read_preamp,
833 .reset_hc = mv_soc_reset_hc,
834 .reset_flash = mv_soc_reset_flash,
835 .reset_bus = mv_soc_reset_bus,
838 static const struct mv_hw_ops mv_soc_65n_ops = {
839 .phy_errata = mv_soc_65n_phy_errata,
840 .enable_leds = mv_soc_enable_leds,
841 .reset_hc = mv_soc_reset_hc,
842 .reset_flash = mv_soc_reset_flash,
843 .reset_bus = mv_soc_reset_bus,
856 static inline unsigned int mv_hc_from_port(
unsigned int port)
861 static inline unsigned int mv_hardport_from_port(
unsigned int port)
877 #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
879 shift = mv_hc_from_port(port) * HC_SHIFT; \
880 hardport = mv_hardport_from_port(port); \
881 shift += hardport * 2; \
884 static inline void __iomem *mv_hc_base(
void __iomem *base,
unsigned int hc)
889 static inline void __iomem *mv_hc_base_from_port(
void __iomem *base,
892 return mv_hc_base(base, mv_hc_from_port(port));
895 static inline void __iomem *mv_port_base(
void __iomem *base,
unsigned int port)
897 return mv_hc_base_from_port(base, port) +
902 static void __iomem *mv5_phy_base(
void __iomem *mmio,
unsigned int port)
904 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
905 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
907 return hc_mmio + ofs;
918 return mv_port_base(mv_host_base(ap->
host), ap->
port_no);
921 static inline int mv_get_hc_count(
unsigned long port_flags)
936 static void mv_save_cached_regs(
struct ata_port *ap)
938 void __iomem *port_mmio = mv_ap_base(ap);
956 static inline void mv_write_cached_reg(
void __iomem *addr,
u32 *old,
u32 new)
970 laddr = (
long)addr & 0xffff;
971 if (laddr >= 0x300 && laddr <= 0x33c) {
973 if (laddr == 0x4 || laddr == 0
xc) {
982 static void mv_set_edma_ptrs(
void __iomem *port_mmio,
1030 static void mv_set_main_irq_mask(
struct ata_host *
host,
1031 u32 disable_bits,
u32 enable_bits)
1034 u32 old_mask, new_mask;
1037 new_mask = (old_mask & ~disable_bits) | enable_bits;
1038 if (new_mask != old_mask) {
1040 mv_write_main_irq_mask(new_mask, hpriv);
1044 static void mv_enable_port_irqs(
struct ata_port *ap,
1045 unsigned int port_bits)
1047 unsigned int shift, hardport, port = ap->
port_no;
1048 u32 disable_bits, enable_bits;
1053 enable_bits = port_bits << shift;
1054 mv_set_main_irq_mask(ap->
host, disable_bits, enable_bits);
1057 static void mv_clear_and_enable_port_irqs(
struct ata_port *ap,
1059 unsigned int port_irqs)
1062 int hardport = mv_hardport_from_port(ap->
port_no);
1063 void __iomem *hc_mmio = mv_hc_base_from_port(
1078 mv_enable_port_irqs(ap, port_irqs);
1081 static void mv_set_irq_coalescing(
struct ata_host *host,
1086 u32 coal_enable = 0;
1087 unsigned long flags;
1093 if (!usecs || !count) {
1105 mv_set_main_irq_mask(host, coal_disable, 0);
1107 if (is_dual_hc && !
IS_GEN_I(hpriv)) {
1124 hc_mmio = mv_hc_base_from_port(mmio, 0);
1139 mv_set_main_irq_mask(host, 0, coal_enable);
1140 spin_unlock_irqrestore(&host->
lock, flags);
1154 static void mv_start_edma(
struct ata_port *ap,
void __iomem *port_mmio,
1161 if (want_ncq != using_ncq)
1167 mv_edma_cfg(ap, want_ncq, 1);
1169 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1177 static void mv_wait_for_edma_empty_idle(
struct ata_port *ap)
1179 void __iomem *port_mmio = mv_ap_base(ap);
1181 const int per_loop = 5,
timeout = (15 * 1000 / per_loop);
1193 if ((edma_stat & empty_idle) == empty_idle)
1207 static int mv_stop_edma_engine(
void __iomem *port_mmio)
1215 for (i = 10000; i > 0; i--) {
1224 static int mv_stop_edma(
struct ata_port *ap)
1226 void __iomem *port_mmio = mv_ap_base(ap);
1233 mv_wait_for_edma_empty_idle(ap);
1234 if (mv_stop_edma_engine(port_mmio)) {
1238 mv_edma_cfg(ap, 0, 0);
1246 for (b = 0; b <
bytes; ) {
1248 for (w = 0; b < bytes && w < 4; w++) {
1256 #if defined(ATA_DEBUG) || defined(CONFIG_PCI)
1257 static void mv_dump_pci_cfg(
struct pci_dev *pdev,
unsigned bytes)
1262 for (b = 0; b <
bytes; ) {
1264 for (w = 0; b < bytes && w < 4; w++) {
1265 (
void) pci_read_config_dword(pdev, b, &dw);
1278 void __iomem *hc_base = mv_hc_base(mmio_base,
1281 int start_port,
num_ports,
p, start_hc, num_hcs, hc;
1284 start_hc = start_port = 0;
1290 num_ports = num_hcs = 1;
1292 DPRINTK(
"All registers for port(s) %u-%u:\n", start_port,
1293 num_ports > 1 ? num_ports - 1 : start_port);
1296 DPRINTK(
"PCI config space regs:\n");
1297 mv_dump_pci_cfg(pdev, 0x68);
1300 mv_dump_mem(mmio_base+0xc00, 0x3c);
1301 mv_dump_mem(mmio_base+0xd00, 0x34);
1302 mv_dump_mem(mmio_base+0xf00, 0x4);
1303 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1304 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1305 hc_base = mv_hc_base(mmio_base, hc);
1306 DPRINTK(
"HC regs (HC %i):\n", hc);
1307 mv_dump_mem(hc_base, 0x1c);
1309 for (p = start_port; p < start_port + num_ports; p++) {
1310 port_base = mv_port_base(mmio_base, p);
1311 DPRINTK(
"EDMA regs (port %i):\n", p);
1312 mv_dump_mem(port_base, 0x54);
1313 DPRINTK(
"SATA regs (port %i):\n", p);
1314 mv_dump_mem(port_base+0x300, 0x60);
1319 static unsigned int mv_scr_offset(
unsigned int sc_reg_in)
1323 switch (sc_reg_in) {
1341 unsigned int ofs = mv_scr_offset(sc_reg_in);
1343 if (ofs != 0xffffffffU) {
1344 *val =
readl(mv_ap_base(link->
ap) + ofs);
1350 static int mv_scr_write(
struct ata_link *link,
unsigned int sc_reg_in,
u32 val)
1352 unsigned int ofs = mv_scr_offset(sc_reg_in);
1354 if (ofs != 0xffffffffU) {
1355 void __iomem *addr = mv_ap_base(link->
ap) + ofs;
1370 if ((val & 0xf) == 1 || (
readl(addr) & 0xf) == 1)
1373 writelfl(val, addr);
1379 static void mv6_dev_config(
struct ata_device *adev)
1388 if (sata_pmp_attached(adev->
link->ap)) {
1391 "NCQ disabled for command-based switching\n");
1441 if (ata_is_ncq(qc->
tf.protocol))
1452 static void mv_config_fbs(
struct ata_port *ap,
int want_ncq,
int want_fbs)
1457 u32 fiscfg, *old_fiscfg = &pp->
cached.fiscfg;
1458 u32 ltmode, *old_ltmode = &pp->
cached.ltmode;
1459 u32 haltcond, *old_haltcond = &pp->
cached.haltcond;
1475 port_mmio = mv_ap_base(ap);
1476 mv_write_cached_reg(port_mmio +
FISCFG, old_fiscfg, fiscfg);
1477 mv_write_cached_reg(port_mmio +
LTMODE, old_ltmode, ltmode);
1478 mv_write_cached_reg(port_mmio +
EDMA_HALTCOND, old_haltcond, haltcond);
1481 static void mv_60x1_errata_sata25(
struct ata_port *ap,
int want_ncq)
1489 new = old | (1 << 22);
1491 new = old & ~(1 << 22);
1508 static void mv_bmdma_enable_iie(
struct ata_port *ap,
int enable_bmdma)
1511 u32 new, *old = &pp->
cached.unknown_rsvd;
1534 static void mv_soc_led_blink_enable(
struct ata_port *ap)
1544 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->
port_no);
1549 static void mv_soc_led_blink_disable(
struct ata_port *ap)
1561 for (port = 0; port < hpriv->
n_ports; port++) {
1570 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->
port_no);
1575 static void mv_edma_cfg(
struct ata_port *ap,
int want_ncq,
int want_edma)
1580 void __iomem *port_mmio = mv_ap_base(ap);
1592 mv_60x1_errata_sata25(ap, want_ncq);
1595 int want_fbs = sata_pmp_attached(ap);
1604 want_fbs &= want_ncq;
1606 mv_config_fbs(ap, want_ncq, want_fbs);
1621 mv_bmdma_enable_iie(ap, !want_edma);
1625 mv_soc_led_blink_enable(ap);
1627 mv_soc_led_blink_disable(ap);
1636 writelfl(cfg, port_mmio +
EDMA_CFG);
1639 static void mv_port_free_dma_mem(
struct ata_port *ap)
1678 static int mv_port_start(
struct ata_port *ap)
1683 unsigned long flags;
1698 goto out_port_free_dma_mem;
1709 if (tag == 0 || !
IS_GEN_I(hpriv)) {
1713 goto out_port_free_dma_mem;
1721 mv_save_cached_regs(ap);
1722 mv_edma_cfg(ap, 0, 0);
1723 spin_unlock_irqrestore(ap->
lock, flags);
1727 out_port_free_dma_mem:
1728 mv_port_free_dma_mem(ap);
1741 static void mv_port_stop(
struct ata_port *ap)
1743 unsigned long flags;
1747 mv_enable_port_irqs(ap, 0);
1748 spin_unlock_irqrestore(ap->
lock, flags);
1749 mv_port_free_dma_mem(ap);
1777 if (offset + len > 0x10000)
1798 static void mv_crqb_pack_cmd(
__le16 *cmdw,
u8 data,
u8 addr,
unsigned last)
1813 static void mv_sff_irq_clear(
struct ata_port *ap)
1815 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap),
ERR_IRQ);
1834 switch (scmd->
cmnd[0]) {
1860 void __iomem *port_mmio = mv_ap_base(ap);
1875 ap->
ops->sff_exec_command(ap, &qc->
tf);
1888 void __iomem *port_mmio = mv_ap_base(ap);
1905 static void mv_bmdma_stop_ap(
struct ata_port *ap)
1907 void __iomem *port_mmio = mv_ap_base(ap);
1913 cmd &= ~ATA_DMA_START;
1923 mv_bmdma_stop_ap(qc->
ap);
1935 static u8 mv_bmdma_status(
struct ata_port *ap)
1937 void __iomem *port_mmio = mv_ap_base(ap);
1956 mv_bmdma_stop_ap(ap);
1982 if (qc->
dev->multi_count > 7) {
2027 mv_rw_multi_errata_sata24(qc);
2037 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
2044 pp->
crqb[in_index].sg_addr =
2046 pp->
crqb[in_index].sg_addr_hi =
2050 cw = &pp->
crqb[in_index].ata_cmd[0];
2130 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
2182 static u8 mv_sff_check_status(
struct ata_port *ap)
2201 static unsigned int mv_send_fis(
struct ata_port *ap,
u32 *
fis,
int nwords)
2203 void __iomem *port_mmio = mv_ap_base(ap);
2204 u32 ifctl, old_ifctl, ifstat;
2205 int i, timeout = 200, final_word = nwords - 1;
2209 ifctl = 0x100 | (old_ifctl & 0xf);
2213 for (i = 0; i < final_word; ++
i)
2217 writelfl(ifctl | 0x200, port_mmio +
SATA_IFCTL);
2226 }
while (!(ifstat & 0x1000) && --timeout);
2232 if ((ifstat & 0x3000) != 0x1000) {
2270 switch (qc->
tf.protocol) {
2308 static int limit_warnings = 10;
2310 void __iomem *port_mmio = mv_ap_base(ap);
2313 unsigned int port_irqs;
2317 switch (qc->
tf.protocol) {
2320 if (!ap->
ops->bmdma_setup)
2326 mv_start_edma(ap, port_mmio, pp, qc->
tf.protocol);
2350 ": attempting PIO w/multiple DRQ: "
2351 "this may fail due to h/w errata\n");
2373 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
2374 mv_pmp_select(ap, qc->
dev->link->pmp);
2390 return mv_qc_issue_fis(qc);
2392 return ata_bmdma_qc_issue(qc);
2402 qc = ata_qc_from_tag(ap, ap->
link.active_tag);
2408 static void mv_pmp_error_handler(
struct ata_port *ap)
2410 unsigned int pmp, pmp_map;
2422 for (pmp = 0; pmp_map != 0; pmp++) {
2423 unsigned int this_pmp = (1 << pmp);
2424 if (pmp_map & this_pmp) {
2426 pmp_map &= ~this_pmp;
2435 static unsigned int mv_get_err_pmp_map(
struct ata_port *ap)
2437 void __iomem *port_mmio = mv_ap_base(ap);
2442 static void mv_pmp_eh_prep(
struct ata_port *ap,
unsigned int pmp_map)
2450 ehi = &ap->
link.eh_info;
2451 for (pmp = 0; pmp_map != 0; pmp++) {
2452 unsigned int this_pmp = (1 << pmp);
2453 if (pmp_map & this_pmp) {
2456 pmp_map &= ~this_pmp;
2467 static int mv_req_q_empty(
struct ata_port *ap)
2469 void __iomem *port_mmio = mv_ap_base(ap);
2470 u32 in_ptr, out_ptr;
2476 return (in_ptr == out_ptr);
2479 static int mv_handle_fbs_ncq_dev_err(
struct ata_port *ap)
2483 unsigned int old_map, new_map;
2498 new_map = old_map | mv_get_err_pmp_map(ap);
2500 if (old_map != new_map) {
2502 mv_pmp_eh_prep(ap, new_map & ~old_map);
2507 "%s: pmp_map=%04x qc_map=%04x failed_links=%d nr_active_links=%d\n",
2513 mv_process_crpb_entries(ap, pp);
2523 static int mv_handle_fbs_non_ncq_dev_err(
struct ata_port *ap)
2539 static int mv_handle_dev_err(
struct ata_port *ap,
u32 edma_err_cause)
2562 __func__, edma_err_cause, pp->
pp_flags);
2565 return mv_handle_fbs_ncq_dev_err(ap);
2574 __func__, edma_err_cause, pp->
pp_flags);
2577 return mv_handle_fbs_non_ncq_dev_err(ap);
2582 static void mv_unexpected_intr(
struct ata_port *ap,
int edma_was_enabled)
2585 char *when =
"idle";
2588 if (edma_was_enabled) {
2589 when =
"EDMA enabled";
2612 static void mv_err_intr(
struct ata_port *ap)
2614 void __iomem *port_mmio = mv_ap_base(ap);
2615 u32 edma_err_cause, eh_freeze_mask,
serr = 0;
2619 unsigned int action = 0, err_mask = 0;
2639 if (edma_err_cause & EDMA_ERR_DEV) {
2644 if (mv_handle_dev_err(ap, edma_err_cause))
2648 qc = mv_get_active_qc(ap);
2653 if (
IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2656 u32 ec = edma_err_cause &
2667 if (edma_err_cause & EDMA_ERR_DEV) {
2680 ata_ehi_hotplugged(ehi);
2682 "dev disconnect" :
"dev connect");
2730 }
else if (edma_err_cause & eh_freeze_mask) {
2747 static bool mv_process_crpb_response(
struct ata_port *ap,
2759 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2769 if (!ac_err_mask(ata_status))
2777 void __iomem *port_mmio = mv_ap_base(ap);
2780 bool work_done =
false;
2797 tag = ap->
link.active_tag;
2802 if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
2803 done_mask |= 1 <<
tag;
2817 static void mv_port_intr(
struct ata_port *ap,
u32 port_cause)
2820 int edma_was_enabled;
2832 if (edma_was_enabled && (port_cause &
DONE_IRQ)) {
2833 mv_process_crpb_entries(ap, pp);
2835 mv_handle_fbs_ncq_dev_err(ap);
2842 }
else if (!edma_was_enabled) {
2845 ata_bmdma_port_intr(ap, qc);
2847 mv_unexpected_intr(ap, edma_was_enabled);
2859 static int mv_host_intr(
struct ata_host *host,
u32 main_irq_cause)
2863 unsigned int handled = 0,
port;
2869 for (port = 0; port < hpriv->
n_ports; port++) {
2871 unsigned int p, shift, hardport, port_cause;
2878 if (hardport == 0) {
2880 u32 port_mask, ack_irqs;
2904 if ((port + p) >= hpriv->
n_ports)
2906 port_mask = (DONE_IRQ |
ERR_IRQ) << (p * 2);
2907 if (hc_cause & port_mask)
2910 hc_mmio = mv_hc_base_from_port(mmio, port);
2917 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
2919 mv_port_intr(ap, port_cause);
2935 dev_err(host->
dev,
"PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
2937 DPRINTK(
"All regs @ PCI error\n");
2942 for (i = 0; i < host->
n_ports; i++) {
2945 ehi = &ap->
link.eh_info;
2949 "PCI err cause 0x%08x", err_cause);
2952 qc = ata_qc_from_tag(ap, ap->
link.active_tag);
2978 static irqreturn_t mv_interrupt(
int irq,
void *dev_instance)
2980 struct ata_host *host = dev_instance;
2982 unsigned int handled = 0;
2984 u32 main_irq_cause, pending_irqs;
2986 spin_lock(&host->
lock);
2990 mv_write_main_irq_mask(0, hpriv);
2998 if (pending_irqs && main_irq_cause != 0xffffffffU) {
3000 handled = mv_pci_error(host, hpriv->
base);
3002 handled = mv_host_intr(host, pending_irqs);
3009 spin_unlock(&host->
lock);
3014 static unsigned int mv5_scr_offset(
unsigned int sc_reg_in)
3018 switch (sc_reg_in) {
3022 ofs = sc_reg_in *
sizeof(
u32);
3031 static int mv5_scr_read(
struct ata_link *link,
unsigned int sc_reg_in,
u32 *val)
3035 void __iomem *addr = mv5_phy_base(mmio, link->
ap->port_no);
3036 unsigned int ofs = mv5_scr_offset(sc_reg_in);
3038 if (ofs != 0xffffffffU) {
3039 *val =
readl(addr + ofs);
3045 static int mv5_scr_write(
struct ata_link *link,
unsigned int sc_reg_in,
u32 val)
3049 void __iomem *addr = mv5_phy_base(mmio, link->
ap->port_no);
3050 unsigned int ofs = mv5_scr_offset(sc_reg_in);
3052 if (ofs != 0xffffffffU) {
3053 writelfl(val, addr + ofs);
3072 mv_reset_pci_bus(host, mmio);
3083 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
3108 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
3109 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
3133 #define ZERO(reg) writel(0, port_mmio + (reg))
3137 void __iomem *port_mmio = mv_port_base(mmio, port);
3139 mv_reset_channel(hpriv, mmio, port);
3157 #define ZERO(reg) writel(0, hc_mmio + (reg))
3161 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3169 tmp =
readl(hc_mmio + 0x20);
3172 writel(tmp, hc_mmio + 0x20);
3179 unsigned int hc,
port;
3181 for (hc = 0; hc < n_hc; hc++) {
3183 mv5_reset_hc_port(hpriv, mmio,
3184 (hc * MV_PORTS_PER_HC) + port);
3186 mv5_reset_one_hc(hpriv, mmio, hc);
3193 #define ZERO(reg) writel(0, mmio + (reg))
3194 static void mv_reset_pci_bus(
struct ata_host *host,
void __iomem *mmio)
3220 mv5_reset_flash(hpriv, mmio);
3224 tmp |= (1 << 5) | (1 << 6);
3250 for (i = 0; i < 1000; i++) {
3299 if ((tmp & (1 << 0)) == 0) {
3305 port_mmio = mv_port_base(mmio, idx);
3320 void __iomem *port_mmio = mv_port_base(mmio, port);
3329 if (fix_phy_mode2) {
3338 m2 &= ~((1 << 16) | (1 << 31));
3349 m3 = (m3 & 0x1f) | (0x5555601 << 5);
3355 if (fix_phy_mode4) {
3395 static void mv_soc_enable_leds(
struct mv_host_priv *hpriv,
3401 static void mv_soc_read_preamp(
struct mv_host_priv *hpriv,
int idx,
3407 port_mmio = mv_port_base(mmio, idx);
3415 #define ZERO(reg) writel(0, port_mmio + (reg))
3416 static void mv_soc_reset_hc_port(
struct mv_host_priv *hpriv,
3417 void __iomem *mmio,
unsigned int port)
3419 void __iomem *port_mmio = mv_port_base(mmio, port);
3421 mv_reset_channel(hpriv, mmio, port);
3440 #define ZERO(reg) writel(0, hc_mmio + (reg))
3441 static void mv_soc_reset_one_hc(
struct mv_host_priv *hpriv,
3444 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
3455 void __iomem *mmio,
unsigned int n_hc)
3459 for (port = 0; port < hpriv->
n_ports; port++)
3460 mv_soc_reset_hc_port(hpriv, mmio, port);
3462 mv_soc_reset_one_hc(hpriv, mmio);
3467 static void mv_soc_reset_flash(
struct mv_host_priv *hpriv,
3473 static void mv_soc_reset_bus(
struct ata_host *host,
void __iomem *mmio)
3478 static void mv_soc_65n_phy_errata(
struct mv_host_priv *hpriv,
3479 void __iomem *mmio,
unsigned int port)
3481 void __iomem *port_mmio = mv_port_base(mmio, port);
3485 reg &= ~(0x3 << 27);
3487 reg &= ~(0x3 << 29);
3499 reg &= ~(0x1 << 14);
3505 reg &= ~(0x1 << 14);
3518 void __iomem *port0_mmio = mv_port_base(hpriv->
base, 0);
3525 static void mv_setup_ifcfg(
void __iomem *port_mmio,
int want_gen2i)
3529 ifcfg = (ifcfg & 0xf7f) | 0x9b1000;
3538 void __iomem *port_mmio = mv_port_base(mmio, port_no);
3545 mv_stop_edma_engine(port_mmio);
3550 mv_setup_ifcfg(port_mmio, 1);
3561 hpriv->
ops->phy_errata(hpriv, mmio, port_no);
3567 static void mv_pmp_select(
struct ata_port *ap,
int pmp)
3569 if (sata_pmp_supported(ap)) {
3570 void __iomem *port_mmio = mv_ap_base(ap);
3572 int old = reg & 0xf;
3575 reg = (reg & ~0xf) | pmp;
3581 static int mv_pmp_hardreset(
struct ata_link *link,
unsigned int *
class,
3582 unsigned long deadline)
3584 mv_pmp_select(link->
ap, sata_srst_pmp(link));
3588 static int mv_softreset(
struct ata_link *link,
unsigned int *
class,
3589 unsigned long deadline)
3591 mv_pmp_select(link->
ap, sata_srst_pmp(link));
3595 static int mv_hardreset(
struct ata_link *link,
unsigned int *
class,
3596 unsigned long deadline)
3602 int rc, attempts = 0,
extra = 0;
3606 mv_reset_channel(hpriv, mmio, ap->
port_no);
3613 const unsigned long *
timing =
3622 if (!
IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3624 mv_setup_ifcfg(mv_ap_base(ap), 0);
3628 }
while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
3629 mv_save_cached_regs(ap);
3630 mv_edma_cfg(ap, 0, 0);
3635 static void mv_eh_freeze(
struct ata_port *ap)
3638 mv_enable_port_irqs(ap, 0);
3641 static void mv_eh_thaw(
struct ata_port *ap)
3644 unsigned int port = ap->
port_no;
3645 unsigned int hardport = mv_hardport_from_port(port);
3646 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->
base, port);
3647 void __iomem *port_mmio = mv_ap_base(ap);
3657 mv_enable_port_irqs(ap, ERR_IRQ);
3672 static void mv_port_init(
struct ata_ioports *port,
void __iomem *port_mmio)
3689 port->altstatus_addr = port->ctl_addr = shd_base +
SHD_CTL_AST;
3692 serr = port_mmio + mv_scr_offset(
SCR_ERROR);
3693 writelfl(
readl(serr), serr);
3699 VPRINTK(
"EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
3705 static unsigned int mv_in_pcix_mode(
struct ata_host *host)
3719 static int mv_pci_cut_through_okay(
struct ata_host *host)
3725 if (!mv_in_pcix_mode(host)) {
3733 static void mv_60x1b2_errata_pci7(
struct ata_host *host)
3739 if (mv_in_pcix_mode(host)) {
3751 switch (board_idx) {
3753 hpriv->
ops = &mv5xxx_ops;
3765 "Applying 50XXB2 workarounds to unknown rev\n");
3773 hpriv->
ops = &mv5xxx_ops;
3785 "Applying B2 workarounds to unknown rev\n");
3793 hpriv->
ops = &mv6xxx_ops;
3798 mv_60x1b2_errata_pci7(host);
3806 "Applying B2 workarounds to unknown rev\n");
3835 " BIOS CORRUPTS DATA on all attached drives,"
3836 " regardless of if/how they are configured."
3839 " use sectors 8-9 on \"Legacy\" drives,"
3840 " and avoid the final two gigabytes on"
3841 " all RocketRAID BIOS initialized drives.\n");
3845 hpriv->
ops = &mv6xxx_ops;
3847 if (board_idx ==
chip_6042 && mv_pci_cut_through_okay(host))
3856 "Applying 60X1C0 workarounds to unknown rev\n");
3862 if (soc_is_65n(hpriv))
3863 hpriv->
ops = &mv_soc_65n_ops;
3865 hpriv->
ops = &mv_soc_ops;
3871 dev_err(host->
dev,
"BUG: invalid board index %u\n", board_idx);
3899 static int mv_init_host(
struct ata_host *host)
3901 int rc = 0, n_hc,
port, hc;
3905 rc = mv_chip_id(host, hpriv->
board_idx);
3921 mv_set_main_irq_mask(host, ~0, 0);
3923 n_hc = mv_get_hc_count(host->
ports[0]->flags);
3925 for (port = 0; port < host->
n_ports; port++)
3926 if (hpriv->
ops->read_preamp)
3927 hpriv->
ops->read_preamp(hpriv, port, mmio);
3929 rc = hpriv->
ops->reset_hc(hpriv, mmio, n_hc);
3933 hpriv->
ops->reset_flash(hpriv, mmio);
3934 hpriv->
ops->reset_bus(host, mmio);
3935 hpriv->
ops->enable_leds(hpriv, mmio);
3937 for (port = 0; port < host->
n_ports; port++) {
3939 void __iomem *port_mmio = mv_port_base(mmio, port);
3941 mv_port_init(&ap->ioaddr, port_mmio);
3944 for (hc = 0; hc < n_hc; hc++) {
3945 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3947 VPRINTK(
"HC%i: HC config=0x%08x HC IRQ cause "
3948 "(before clear)=0x%08x\n", hc,
3968 mv_set_main_irq_mask(host, 0, PCI_ERR);
3969 mv_set_irq_coalescing(host, irq_coalescing_io_count,
3970 irq_coalescing_usecs);
3995 static void mv_conf_mbus_windows(
struct mv_host_priv *hpriv,
4000 for (i = 0; i < 4; i++) {
4005 for (i = 0; i < dram->
num_cs; i++) {
4006 const struct mbus_dram_window *
cs = dram->
cs +
i;
4008 writel(((cs->size - 1) & 0xffff0000) |
4009 (cs->mbus_attr << 8) |
4033 int n_ports = 0, irq = 0;
4035 #if defined(CONFIG_HAVE_CLK)
4045 dev_err(&pdev->
dev,
"invalid number of resources\n");
4057 if (pdev->
dev.of_node) {
4058 of_property_read_u32(pdev->
dev.of_node,
"nr-ports", &n_ports);
4061 mv_platform_data = pdev->
dev.platform_data;
4062 n_ports = mv_platform_data->
n_ports;
4069 if (!host || !hpriv)
4071 #if defined(CONFIG_HAVE_CLK)
4073 sizeof(
struct clk *) * n_ports,
4075 if (!hpriv->port_clks)
4084 resource_size(res));
4087 #if defined(CONFIG_HAVE_CLK)
4089 if (IS_ERR(hpriv->clk))
4090 dev_notice(&pdev->
dev,
"cannot get optional clkdev\n");
4092 clk_prepare_enable(hpriv->clk);
4094 for (port = 0; port < n_ports; port++) {
4096 sprintf(port_number,
"%d", port);
4098 if (!IS_ERR(hpriv->port_clks[port]))
4099 clk_prepare_enable(hpriv->port_clks[port]);
4108 mv_conf_mbus_windows(hpriv, dram);
4110 rc = mv_create_dma_pools(hpriv, &pdev->
dev);
4115 rc = mv_init_host(host);
4120 (
unsigned)MV_MAX_Q_DEPTH, host->
n_ports);
4127 #if defined(CONFIG_HAVE_CLK)
4128 if (!IS_ERR(hpriv->clk)) {
4129 clk_disable_unprepare(hpriv->clk);
4132 for (port = 0; port < n_ports; port++) {
4133 if (!IS_ERR(hpriv->port_clks[port])) {
4134 clk_disable_unprepare(hpriv->port_clks[port]);
4135 clk_put(hpriv->port_clks[port]);
4153 struct ata_host *host = platform_get_drvdata(pdev);
4154 #if defined(CONFIG_HAVE_CLK)
4160 #if defined(CONFIG_HAVE_CLK)
4161 if (!IS_ERR(hpriv->clk)) {
4162 clk_disable_unprepare(hpriv->clk);
4165 for (port = 0; port < host->
n_ports; port++) {
4166 if (!IS_ERR(hpriv->port_clks[port])) {
4167 clk_disable_unprepare(hpriv->port_clks[port]);
4168 clk_put(hpriv->port_clks[port]);
4178 struct ata_host *host = platform_get_drvdata(pdev);
4180 return ata_host_suspend(host, state);
4187 struct ata_host *host = platform_get_drvdata(pdev);
4199 mv_conf_mbus_windows(hpriv, dram);
4202 ret = mv_init_host(host);
4207 ata_host_resume(host);
4213 #define mv_platform_suspend NULL
4214 #define mv_platform_resume NULL
4219 { .compatible =
"marvell,orion-sata", },
4226 .probe = mv_platform_probe,
4239 static int mv_pci_init_one(
struct pci_dev *pdev,
4242 static int mv_pci_device_resume(
struct pci_dev *pdev);
4248 .id_table = mv_pci_tbl,
4249 .probe = mv_pci_init_one,
4250 .remove = ata_pci_remove_one,
4252 .suspend = ata_pci_device_suspend,
4253 .resume = mv_pci_device_resume,
4259 static int pci_go_64(
struct pci_dev *pdev)
4264 rc = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(64));
4266 rc = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32));
4269 "64-bit DMA enable failed\n");
4276 dev_err(&pdev->
dev,
"32-bit DMA enable failed\n");
4279 rc = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32));
4282 "32-bit consistent DMA enable failed\n");
4299 static void mv_print_info(
struct ata_host *host)
4304 const char *scc_s, *
gen;
4312 else if (scc == 0x01)
4326 dev_info(&pdev->
dev,
"Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4327 gen, (
unsigned)MV_MAX_Q_DEPTH, host->
n_ports,
4339 static int mv_pci_init_one(
struct pci_dev *pdev,
4355 if (!host || !hpriv)
4374 rc = pci_go_64(pdev);
4378 rc = mv_create_dma_pools(hpriv, &pdev->
dev);
4382 for (port = 0; port < host->
n_ports; port++) {
4384 void __iomem *port_mmio = mv_port_base(hpriv->
base, port);
4385 unsigned int offset = port_mmio - hpriv->
base;
4392 rc = mv_init_host(host);
4397 if (msi && pci_enable_msi(pdev) == 0)
4400 mv_dump_pci_cfg(pdev, 0x68);
4401 mv_print_info(host);
4406 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
4410 static int mv_pci_device_resume(
struct pci_dev *pdev)
4412 struct ata_host *host = pci_get_drvdata(pdev);
4415 rc = ata_pci_device_do_resume(pdev);
4420 rc = mv_init_host(host);
4424 ata_host_resume(host);
4434 static int __init mv_init(
void)
4438 rc = pci_register_driver(&mv_pci_driver);
4451 static void __exit mv_exit(
void)