32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #define DRV_NAME "via-rhine"
35 #define DRV_VERSION "1.5.0"
36 #define DRV_RELDATE "2010-10-09"
38 #include <linux/types.h>
43 #define RHINE_MSG_DEFAULT \
48 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49 defined(CONFIG_SPARC) || defined(__ia64__) || \
50 defined(__sh__) || defined(__mips__)
51 static int rx_copybreak = 1518;
53 static int rx_copybreak;
67 static const int multicast_filter_limit = 32;
77 #define TX_RING_SIZE 16
78 #define TX_QUEUE_LEN 10
79 #define RX_RING_SIZE 64
84 #define TX_TIMEOUT (2*HZ)
86 #define PKT_BUF_SZ 1536
88 #include <linux/module.h>
90 #include <linux/kernel.h>
91 #include <linux/string.h>
93 #include <linux/errno.h>
96 #include <linux/pci.h>
98 #include <linux/netdevice.h>
103 #include <linux/mii.h>
104 #include <linux/ethtool.h>
106 #include <linux/if_vlan.h>
107 #include <linux/bitops.h>
109 #include <asm/processor.h>
112 #include <asm/uaccess.h>
121 #ifdef CONFIG_VIA_RHINE_MMIO
134 MODULE_PARM_DESC(rx_copybreak,
"VIA Rhine copy breakpoint for copy-only-tiny-frames");
135 MODULE_PARM_DESC(avoid_D3,
"Avoid power state D3 (work-around for broken BIOSes)");
271 #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
343 static const int mmio_verify_registers[] = {
396 #define TXDESC 0x00e08000
469 #define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
470 #define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
471 #define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
473 #define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
474 #define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
475 #define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
477 #define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
478 #define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
479 #define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
481 #define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
482 #define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
483 #define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
494 static irqreturn_t rhine_interrupt(
int irq,
void *dev_instance);
500 static const struct ethtool_ops netdev_ethtool_ops;
502 static int rhine_vlan_rx_add_vid(
struct net_device *
dev,
unsigned short vid);
503 static int rhine_vlan_rx_kill_vid(
struct net_device *
dev,
unsigned short vid);
511 for (i = 0; i < 1024; i++) {
512 bool has_mask_bits = !!(
ioread8(ioaddr + reg) &
mask);
514 if (low ^ has_mask_bits)
520 "count: %04d\n", low ?
"low" :
"high", reg, mask, i);
526 rhine_wait_bit(rp, reg, mask,
false);
531 rhine_wait_bit(rp, reg, mask,
true);
593 reason =
"Magic packet";
596 reason =
"Link went up";
599 reason =
"Link went down";
602 reason =
"Unicast packet";
605 reason =
"Multicast/broadcast packet";
610 netdev_info(dev,
"Woke system up. Reason: %s\n",
616 static void rhine_chip_reset(
struct net_device *dev)
626 netdev_info(dev,
"Reset not complete yet. Trying harder.\n");
633 rhine_wait_bit_low(rp,
ChipCmd1, Cmd1Reset);
637 netif_info(rp,
hw, dev,
"Reset %s\n", (cmd1 & Cmd1Reset) ?
638 "failed" :
"succeeded");
667 for (i = 0; i < 1024; i++) {
672 pr_info(
"%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
680 enable_mmio(pioaddr, rp->
quirks);
689 #ifdef CONFIG_NET_POLL_CONTROLLER
690 static void rhine_poll(
struct net_device *dev)
696 rhine_interrupt(irq, dev);
701 static void rhine_kick_tx_threshold(
struct rhine_private *rp)
717 "Abort %08x, frame dropped\n", status);
721 rhine_kick_tx_threshold(rp);
722 netif_info(rp, tx_err ,dev,
"Transmitter underrun, "
723 "Tx threshold now %02x\n", rp->
tx_thresh);
727 netif_info(rp, tx_err, dev,
"Tx descriptor write-back race\n");
730 (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
731 rhine_kick_tx_threshold(rp);
732 netif_info(rp, tx_err, dev,
"Unspecified error. "
733 "Tx threshold now %02x\n", rp->
tx_thresh);
736 rhine_restart_tx(dev);
739 static void rhine_update_rx_crc_and_missed_errord(
struct rhine_private *rp)
758 #define RHINE_EVENT_NAPI_RX (IntrRxDone | \
766 #define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \
770 #define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
772 #define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \
773 RHINE_EVENT_NAPI_TX | \
775 #define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange)
776 #define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
787 status = rhine_get_events(rp);
791 work_done += rhine_rx(dev, budget);
803 if (status & RHINE_EVENT_NAPI_TX_ERR)
804 rhine_tx_err(rp, status);
808 spin_lock(&rp->
lock);
809 rhine_update_rx_crc_and_missed_errord(rp);
810 spin_unlock(&rp->
lock);
814 enable_mask &= ~RHINE_EVENT_SLOW;
818 if (work_done < budget) {
831 rhine_chip_reset(dev);
838 rhine_reload_eeprom(pioaddr, dev);
842 .ndo_open = rhine_open,
843 .ndo_stop = rhine_close,
844 .ndo_start_xmit = rhine_start_tx,
845 .ndo_get_stats = rhine_get_stats,
846 .ndo_set_rx_mode = rhine_set_rx_mode,
850 .ndo_do_ioctl = netdev_ioctl,
851 .ndo_tx_timeout = rhine_tx_timeout,
852 .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
853 .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
854 #ifdef CONFIG_NET_POLL_CONTROLLER
855 .ndo_poll_controller = rhine_poll,
903 name =
"Rhine III (Management Adapter)";
915 "32-bit PCI DMA addresses not supported by the card!?\n");
923 dev_err(&pdev->
dev,
"Insufficient PCI resources, aborting\n");
939 rp = netdev_priv(dev);
948 goto err_out_free_netdev;
950 ioaddr = pci_iomap(pdev, bar, io_size);
954 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
955 pci_name(pdev), io_size, memaddr);
956 goto err_out_free_res;
960 enable_mmio(pioaddr, quirks);
964 while (mmio_verify_registers[i]) {
965 int reg = mmio_verify_registers[i++];
966 unsigned char a =
inb(pioaddr+reg);
967 unsigned char b =
readb(ioaddr+reg);
971 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
981 rhine_power_init(dev);
982 rhine_hw_init(dev, pioaddr);
984 for (i = 0; i < 6; i++)
987 if (!is_valid_ether_addr(dev->
dev_addr)) {
989 netdev_err(dev,
"Invalid MAC address: %pM\n", dev->
dev_addr);
990 eth_hw_addr_random(dev);
991 netdev_info(dev,
"Using random MAC address: %pM\n",
998 phy_id =
ioread8(ioaddr + 0x6C);
1006 rp->
mii_if.mdio_read = mdio_read;
1007 rp->
mii_if.mdio_write = mdio_write;
1008 rp->
mii_if.phy_id_mask = 0x1f;
1009 rp->
mii_if.reg_num_mask = 0x1f;
1030 netdev_info(dev,
"VIA %s at 0x%lx, %pM, IRQ %d\n",
1039 pci_set_drvdata(pdev, dev);
1043 int mii_status = mdio_read(dev, phy_id, 1);
1045 mdio_write(dev, phy_id,
MII_BMCR, mii_cmd);
1046 if (mii_status != 0xffff && mii_status != 0x0000) {
1047 rp->
mii_if.advertising = mdio_read(dev, phy_id, 4);
1049 "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1051 mii_status, rp->
mii_if.advertising,
1052 mdio_read(dev, phy_id, 5));
1064 netif_info(rp, probe, dev,
"No D3 power state at shutdown\n");
1072 err_out_free_netdev:
1078 static int alloc_ring(
struct net_device* dev)
1089 netdev_err(dev,
"Could not allocate DMA memory\n");
1113 static void free_ring(
struct net_device* dev)
1131 static void alloc_rbufs(
struct net_device *dev)
1147 next +=
sizeof(
struct rx_desc);
1171 static void free_rbufs(
struct net_device* dev)
1181 pci_unmap_single(rp->
pdev,
1190 static void alloc_tbufs(
struct net_device* dev)
1202 next +=
sizeof(
struct tx_desc);
1211 static void free_tbufs(
struct net_device* dev)
1222 pci_unmap_single(rp->
pdev,
1234 static void rhine_check_media(
struct net_device *dev,
unsigned int init_media)
1241 if (rp->
mii_if.full_duplex)
1249 rp->
mii_if.force_media, netif_carrier_ok(dev));
1253 static void rhine_set_carrier(
struct mii_if_info *mii)
1260 if (!netif_carrier_ok(dev))
1263 rhine_check_media(dev, 0);
1289 for (i = 0; i < 6; i++, addr++)
1308 static void rhine_set_vlan_cam(
void __iomem *ioaddr,
int idx,
u8 *addr)
1335 static void rhine_set_cam_mask(
void __iomem *ioaddr,
u32 mask)
1354 static void rhine_set_vlan_cam_mask(
void __iomem *ioaddr,
u32 mask)
1373 static void rhine_init_cam_filter(
struct net_device *dev)
1379 rhine_set_vlan_cam_mask(ioaddr, 0);
1380 rhine_set_cam_mask(ioaddr, 0);
1393 static void rhine_update_vcam(
struct net_device *dev)
1402 rhine_set_vlan_cam(ioaddr, i, (
u8 *)&vid);
1407 rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1410 static int rhine_vlan_rx_add_vid(
struct net_device *dev,
unsigned short vid)
1414 spin_lock_bh(&rp->
lock);
1416 rhine_update_vcam(dev);
1417 spin_unlock_bh(&rp->
lock);
1421 static int rhine_vlan_rx_kill_vid(
struct net_device *dev,
unsigned short vid)
1425 spin_lock_bh(&rp->
lock);
1427 rhine_update_vcam(dev);
1428 spin_unlock_bh(&rp->
lock);
1438 for (i = 0; i < 6; i++)
1451 rhine_set_rx_mode(dev);
1454 rhine_init_cam_filter(dev);
1456 napi_enable(&rp->
napi);
1462 rhine_check_media(dev, 1);
1506 static int mdio_read(
struct net_device *dev,
int phy_id,
int regnum)
1512 rhine_disable_linkmon(rp);
1518 rhine_wait_bit_low(rp,
MIICmd, 0x40);
1521 rhine_enable_linkmon(rp);
1525 static void mdio_write(
struct net_device *dev,
int phy_id,
int regnum,
int value)
1530 rhine_disable_linkmon(rp);
1537 rhine_wait_bit_low(rp,
MIICmd, 0x20);
1539 rhine_enable_linkmon(rp);
1559 static int rhine_open(
struct net_device *dev)
1570 netif_dbg(rp, ifup, dev,
"%s() irq %d\n", __func__, rp->
pdev->irq);
1572 rc = alloc_ring(dev);
1579 rhine_chip_reset(dev);
1580 rhine_task_enable(rp);
1583 netif_dbg(rp, ifup, dev,
"%s() Done - status %04x MII status: %04x\n",
1587 netif_start_queue(dev);
1603 napi_disable(&rp->
napi);
1604 spin_lock_bh(&rp->
lock);
1613 rhine_chip_reset(dev);
1616 spin_unlock_bh(&rp->
lock);
1619 dev->
stats.tx_errors++;
1620 netif_wake_queue(dev);
1626 static void rhine_tx_timeout(
struct net_device *dev)
1631 netdev_warn(dev,
"Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1663 dev->
stats.tx_dropped++;
1713 netif_stop_queue(dev);
1715 netif_dbg(rp, tx_queued, dev,
"Transmit frame #%d queued in slot %d\n",
1729 static irqreturn_t rhine_interrupt(
int irq,
void *dev_instance)
1736 status = rhine_get_events(rp);
1738 netif_dbg(rp,
intr, dev,
"Interrupt, status %08x\n", status);
1743 rhine_irq_disable(rp);
1744 napi_schedule(&rp->
napi);
1748 netif_err(rp,
intr, dev,
"Something Wicked happened! %08x\n",
1769 if (txstatus & 0x8000) {
1771 "Transmit error, Tx status %08x\n", txstatus);
1772 dev->
stats.tx_errors++;
1773 if (txstatus & 0x0400)
1774 dev->
stats.tx_carrier_errors++;
1775 if (txstatus & 0x0200)
1776 dev->
stats.tx_window_errors++;
1777 if (txstatus & 0x0100)
1778 dev->
stats.tx_aborted_errors++;
1779 if (txstatus & 0x0080)
1780 dev->
stats.tx_heartbeat_errors++;
1782 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1783 dev->
stats.tx_fifo_errors++;
1790 dev->
stats.collisions += (txstatus >> 3) & 0x0F;
1792 dev->
stats.collisions += txstatus & 0x0F;
1794 (txstatus >> 3) & 0xF, txstatus & 0xF);
1796 dev->
stats.tx_packets++;
1800 pci_unmap_single(rp->
pdev,
1807 entry = (++rp->
dirty_tx) % TX_RING_SIZE;
1810 netif_wake_queue(dev);
1824 u8 *trailer = (
u8 *)skb->
data + ((data_size + 3) & ~3) + 2;
1843 int data_size = desc_status >> 16;
1845 if (desc_status & DescOwn)
1852 if ((desc_status &
RxWholePkt) != RxWholePkt) {
1854 "Oversized Ethernet frame spanned multiple buffers, "
1855 "entry %#x length %d status %08x!\n",
1859 "Oversized Ethernet frame %p vs %p\n",
1862 dev->
stats.rx_length_errors++;
1863 }
else if (desc_status &
RxErr) {
1866 "%s() Rx error %08x\n", __func__,
1868 dev->
stats.rx_errors++;
1869 if (desc_status & 0x0030)
1870 dev->
stats.rx_length_errors++;
1871 if (desc_status & 0x0048)
1872 dev->
stats.rx_fifo_errors++;
1873 if (desc_status & 0x0004)
1874 dev->
stats.rx_frame_errors++;
1875 if (desc_status & 0x0002) {
1877 spin_lock(&rp->
lock);
1878 dev->
stats.rx_crc_errors++;
1879 spin_unlock(&rp->
lock);
1890 if (pkt_len < rx_copybreak)
1891 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1893 pci_dma_sync_single_for_cpu(rp->
pdev,
1898 skb_copy_to_linear_data(skb,
1902 pci_dma_sync_single_for_device(rp->
pdev,
1909 netdev_err(dev,
"Inconsistent Rx descriptor chain\n");
1914 pci_unmap_single(rp->
pdev,
1921 vlan_tci = rhine_get_vlan_tci(skb, data_size);
1925 if (
unlikely(desc_length & DescTag))
1926 __vlan_hwaccel_put_tag(skb, vlan_tci);
1929 dev->
stats.rx_packets++;
1931 entry = (++rp->
cur_rx) % RX_RING_SIZE;
1940 skb = netdev_alloc_skb(dev, rp->
rx_buf_sz);
1945 pci_map_single(rp->
pdev, skb->
data,
1956 static void rhine_restart_tx(
struct net_device *dev) {
1966 intr_status = rhine_get_events(rp);
1987 netif_warn(rp, tx_err, dev,
"another error occurred %08x\n",
1993 static void rhine_slow_event_task(
struct work_struct *work)
2005 intr_status = rhine_get_events(rp);
2009 rhine_check_media(dev, 0);
2014 napi_disable(&rp->
napi);
2015 rhine_irq_disable(rp);
2017 napi_enable(&rp->
napi);
2018 napi_schedule(&rp->
napi);
2028 spin_lock_bh(&rp->
lock);
2029 rhine_update_rx_crc_and_missed_errord(rp);
2030 spin_unlock_bh(&rp->
lock);
2035 static void rhine_set_rx_mode(
struct net_device *dev)
2058 rhine_set_cam(ioaddr, i, ha->
addr);
2062 rhine_set_cam_mask(ioaddr, mCAMmask);
2064 memset(mc_filter, 0,
sizeof(mc_filter));
2068 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2111 rhine_set_carrier(&rp->
mii_if);
2117 static int netdev_nway_reset(
struct net_device *dev)
2152 spin_lock_irq(&rp->
lock);
2156 spin_unlock_irq(&rp->
lock);
2171 spin_lock_irq(&rp->
lock);
2173 spin_unlock_irq(&rp->
lock);
2178 static const struct ethtool_ops netdev_ethtool_ops = {
2179 .get_drvinfo = netdev_get_drvinfo,
2180 .get_settings = netdev_get_settings,
2181 .set_settings = netdev_set_settings,
2182 .nway_reset = netdev_nway_reset,
2183 .get_link = netdev_get_link,
2184 .get_msglevel = netdev_get_msglevel,
2185 .set_msglevel = netdev_set_msglevel,
2186 .get_wol = rhine_get_wol,
2187 .set_wol = rhine_set_wol,
2195 if (!netif_running(dev))
2200 rhine_set_carrier(&rp->
mii_if);
2206 static int rhine_close(
struct net_device *dev)
2211 rhine_task_disable(rp);
2212 napi_disable(&rp->
napi);
2213 netif_stop_queue(dev);
2215 netif_dbg(rp, ifdown, dev,
"Shutting down ethercard, status was %04x\n",
2221 rhine_irq_disable(rp);
2237 struct net_device *dev = pci_get_drvdata(pdev);
2247 pci_set_drvdata(pdev,
NULL);
2250 static void rhine_shutdown (
struct pci_dev *pdev)
2252 struct net_device *dev = pci_get_drvdata(pdev);
2259 rhine_power_init(dev);
2265 spin_lock(&rp->
lock);
2291 spin_unlock(&rp->
lock);
2301 #ifdef CONFIG_PM_SLEEP
2305 struct net_device *dev = pci_get_drvdata(pdev);
2308 if (!netif_running(dev))
2311 rhine_task_disable(rp);
2312 rhine_irq_disable(rp);
2313 napi_disable(&rp->
napi);
2317 rhine_shutdown(pdev);
2322 static int rhine_resume(
struct device *device)
2325 struct net_device *dev = pci_get_drvdata(pdev);
2328 if (!netif_running(dev))
2334 rhine_power_init(dev);
2339 rhine_task_enable(rp);
2340 spin_lock_bh(&rp->
lock);
2342 spin_unlock_bh(&rp->
lock);
2350 #define RHINE_PM_OPS (&rhine_pm_ops)
2354 #define RHINE_PM_OPS NULL
2360 .id_table = rhine_pci_tbl,
2361 .probe = rhine_init_one,
2363 .shutdown = rhine_shutdown,
2385 static int __init rhine_init(
void)
2394 pr_warn(
"Broken BIOS detected, avoid_D3 enabled\n");
2399 return pci_register_driver(&rhine_driver);
2403 static void __exit rhine_cleanup(
void)