49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
51 #define DRV_NAME "8139cp"
52 #define DRV_VERSION "1.3"
53 #define DRV_RELDATE "Mar 22, 2004"
56 #include <linux/module.h>
58 #include <linux/kernel.h>
59 #include <linux/compiler.h>
60 #include <linux/netdevice.h>
64 #include <linux/pci.h>
67 #include <linux/ethtool.h>
69 #include <linux/mii.h>
70 #include <linux/if_vlan.h>
74 #include <linux/tcp.h>
75 #include <linux/udp.h>
79 #include <asm/uaccess.h>
90 static int debug = -1;
96 static int multicast_filter_limit = 32;
98 MODULE_PARM_DESC (multicast_filter_limit,
"8139cp: maximum number of filtered multicast addresses");
100 #define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
103 #define CP_NUM_STATS 14
104 #define CP_STATS_SIZE 64
105 #define CP_REGS_SIZE (0xff + 1)
106 #define CP_REGS_VER 1
107 #define CP_RX_RING_SIZE 64
108 #define CP_TX_RING_SIZE 64
109 #define CP_RING_BYTES \
110 ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \
111 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \
113 #define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1))
114 #define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1))
115 #define TX_BUFFS_AVAIL(CP) \
116 (((CP)->tx_tail <= (CP)->tx_head) ? \
117 (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
118 (CP)->tx_tail - (CP)->tx_head - 1)
120 #define PKT_BUF_SZ 1536
121 #define CP_INTERNAL_PHY 32
124 #define RX_FIFO_THRESH 5
125 #define RX_DMA_BURST 4
126 #define TX_DMA_BURST 6
127 #define TX_EARLY_THRESH 256
130 #define TX_TIMEOUT (6*HZ)
133 #define CP_MIN_MTU 60
134 #define CP_MAX_MTU 4096
258 IFG = (1 << 25) | (1 << 24),
291 static const unsigned int cp_rx_config =
353 #define cpr8(reg) readb(cp->regs + (reg))
354 #define cpr16(reg) readw(cp->regs + (reg))
355 #define cpr32(reg) readl(cp->regs + (reg))
356 #define cpw8(reg,val) writeb((val), cp->regs + (reg))
357 #define cpw16(reg,val) writew((val), cp->regs + (reg))
358 #define cpw32(reg,val) writel((val), cp->regs + (reg))
359 #define cpw8_f(reg,val) do { \
360 writeb((val), cp->regs + (reg)); \
361 readb(cp->regs + (reg)); \
363 #define cpw16_f(reg,val) do { \
364 writew((val), cp->regs + (reg)); \
365 readw(cp->regs + (reg)); \
367 #define cpw32_f(reg,val) do { \
368 writel((val), cp->regs + (reg)); \
369 readl(cp->regs + (reg)); \
376 #ifdef CONFIG_NET_POLL_CONTROLLER
394 } ethtool_stats_keys[] = {
412 static inline void cp_set_rxbufsize (
struct cp_private *
cp)
414 unsigned int mtu = cp->
dev->mtu;
430 cp->
dev->stats.rx_packets++;
431 cp->
dev->stats.rx_bytes += skb->
len;
434 __vlan_hwaccel_put_tag(skb,
swab16(opts2 & 0xffff));
439 static void cp_rx_err_acct (
struct cp_private *cp,
unsigned rx_tail,
442 netif_dbg(cp, rx_err, cp->
dev,
"rx err, slot %d status 0x%x len %d\n",
443 rx_tail, status, len);
444 cp->
dev->stats.rx_errors++;
446 cp->
dev->stats.rx_frame_errors++;
448 cp->
dev->stats.rx_crc_errors++;
450 cp->
dev->stats.rx_length_errors++;
452 cp->
dev->stats.rx_length_errors++;
454 cp->
dev->stats.rx_fifo_errors++;
457 static inline unsigned int cp_rx_csum_ok (
u32 status)
459 unsigned int protocol = (status >> 16) & 0x3;
472 unsigned int rx_tail = cp->
rx_tail;
486 skb = cp->
rx_skb[rx_tail];
494 len = (status & 0x1fff) - 4;
503 cp_rx_err_acct(cp, rx_tail, status, len);
504 dev->
stats.rx_dropped++;
510 cp_rx_err_acct(cp, rx_tail, status, len);
515 rx_tail, status, len);
517 new_skb = netdev_alloc_skb_ip_align(dev, buflen);
519 dev->
stats.rx_dropped++;
527 if (cp_rx_csum_ok(status))
530 skb_checksum_none_assert(skb);
536 cp->
rx_skb[rx_tail] = new_skb;
538 cp_rx_skb(cp, skb, desc);
542 cp->
rx_ring[rx_tail].opts2 = 0;
570 spin_unlock_irqrestore(&cp->
lock, flags);
576 static irqreturn_t cp_interrupt (
int irq,
void *dev_instance)
584 cp = netdev_priv(dev);
587 if (!status || (status == 0xFFFF))
590 netif_dbg(cp,
intr, dev,
"intr, status %04x cmd %02x cpcmd %04x\n",
595 spin_lock(&cp->
lock);
598 if (
unlikely(!netif_running(dev))) {
600 spin_unlock(&cp->
lock);
605 if (napi_schedule_prep(&cp->
napi)) {
615 spin_unlock(&cp->
lock);
622 netdev_err(dev,
"PCI bus error, status=%04x, PCI status=%04x\n",
631 #ifdef CONFIG_NET_POLL_CONTROLLER
636 static void cp_poll_controller(
struct net_device *dev)
639 const int irq = cp->
pdev->irq;
642 cp_interrupt(irq, dev);
649 unsigned tx_head = cp->tx_head;
652 while (tx_tail != tx_head) {
659 if (status & DescOwn)
662 skb = cp->
tx_skb[tx_tail];
672 "tx err, status 0x%x\n", status);
673 cp->
dev->stats.tx_errors++;
675 cp->
dev->stats.tx_window_errors++;
677 cp->
dev->stats.tx_aborted_errors++;
679 cp->
dev->stats.tx_carrier_errors++;
681 cp->
dev->stats.tx_fifo_errors++;
683 cp->
dev->stats.collisions +=
685 cp->
dev->stats.tx_packets++;
686 cp->
dev->stats.tx_bytes += skb->
len;
688 "tx done, slot %d\n", tx_tail);
701 netif_wake_queue(cp->
dev);
704 static inline u32 cp_tx_vlan_tag(
struct sk_buff *skb)
716 unsigned long intr_flags;
724 netif_stop_queue(dev);
725 spin_unlock_irqrestore(&cp->
lock, intr_flags);
726 netdev_err(dev,
"BUG! Tx Ring full when queue awake!\n");
732 mss = skb_shinfo(skb)->gso_size;
736 if (skb_shinfo(skb)->nr_frags == 0) {
752 const struct iphdr *
ip = ip_hdr(skb);
768 u32 first_len, first_eor;
771 const struct iphdr *ip = ip_hdr(skb);
777 first_len = skb_headlen(skb);
783 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
789 len = skb_frag_size(this_frag);
791 skb_frag_address(this_frag),
809 if (frag == skb_shinfo(skb)->nr_frags - 1)
824 txd = &cp->
tx_ring[first_entry];
846 netif_dbg(cp, tx_queued, cp->
dev,
"tx queued, slot %d, skblen %d\n",
849 netif_stop_queue(dev);
851 spin_unlock_irqrestore(&cp->
lock, intr_flags);
861 static void __cp_set_rx_mode (
struct net_device *dev)
873 mc_filter[1] = mc_filter[0] = 0xffffffff;
878 mc_filter[1] = mc_filter[0] = 0xffffffff;
882 mc_filter[1] = mc_filter[0] = 0;
886 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
899 static void cp_set_rx_mode (
struct net_device *dev)
905 __cp_set_rx_mode(dev);
906 spin_unlock_irqrestore (&cp->
lock, flags);
909 static void __cp_get_stats(
struct cp_private *cp)
923 if (netif_running(dev) && netif_device_present(dev))
925 spin_unlock_irqrestore(&cp->
lock, flags);
930 static void cp_stop_hw (
struct cp_private *cp)
942 static void cp_reset_hw (
struct cp_private *cp)
944 unsigned work = 1000;
955 netdev_err(cp->
dev,
"hardware reset timeout\n");
958 static inline void cp_start_hw (
struct cp_private *cp)
964 static void cp_enable_irq(
struct cp_private *cp)
969 static void cp_init_hw (
struct cp_private *cp)
985 __cp_set_rx_mode(dev);
1011 static int cp_refill_rx(
struct cp_private *cp)
1020 skb = netdev_alloc_skb_ip_align(dev, cp->
rx_buf_sz);
1030 if (i == (CP_RX_RING_SIZE - 1))
1045 static void cp_init_rings_index (
struct cp_private *cp)
1048 cp->tx_head = cp->
tx_tail = 0;
1051 static int cp_init_rings (
struct cp_private *cp)
1056 cp_init_rings_index(cp);
1058 return cp_refill_rx (cp);
1061 static int cp_alloc_rings (
struct cp_private *cp)
1074 rc = cp_init_rings(cp);
1081 static void cp_clean_rings (
struct cp_private *cp)
1091 dev_kfree_skb(cp->
rx_skb[i]);
1105 cp->
dev->stats.tx_dropped++;
1116 static void cp_free_rings (
struct cp_private *cp)
1128 const int irq = cp->
pdev->irq;
1131 netif_dbg(cp, ifup, dev,
"enabling interface\n");
1133 rc = cp_alloc_rings(cp);
1137 napi_enable(&cp->
napi);
1149 netif_start_queue(dev);
1154 napi_disable(&cp->
napi);
1163 unsigned long flags;
1165 napi_disable(&cp->
napi);
1167 netif_dbg(cp, ifdown, dev,
"disabling interface\n");
1171 netif_stop_queue(dev);
1176 spin_unlock_irqrestore(&cp->
lock, flags);
1184 static void cp_tx_timeout(
struct net_device *dev)
1187 unsigned long flags;
1190 netdev_warn(dev,
"Transmit timeout, status %2x %4x %4x %4x\n",
1198 rc = cp_init_rings(cp);
1201 netif_wake_queue(dev);
1203 spin_unlock_irqrestore(&cp->
lock, flags);
1207 static int cp_change_mtu(
struct net_device *dev,
int new_mtu)
1211 unsigned long flags;
1214 if (new_mtu < CP_MIN_MTU || new_mtu >
CP_MAX_MTU)
1218 if (!netif_running(dev)) {
1220 cp_set_rxbufsize(cp);
1230 cp_set_rxbufsize(cp);
1232 rc = cp_init_rings(cp);
1235 spin_unlock_irqrestore(&cp->
lock, flags);
1241 static const char mii_2_8139_map[8] = {
1256 return location < 8 && mii_2_8139_map[
location] ?
1257 readw(cp->
regs + mii_2_8139_map[location]) : 0;
1261 static void mdio_write(
struct net_device *dev,
int phy_id,
int location,
1266 if (location == 0) {
1270 }
else if (location < 8 && mii_2_8139_map[location])
1271 cpw16(mii_2_8139_map[location], value);
1275 static int netdev_set_wol (
struct cp_private *cp,
1308 static void netdev_get_wol (
struct cp_private *cp,
1339 static void cp_get_ringparam(
struct net_device *dev,
1348 static int cp_get_regs_len(
struct net_device *dev)
1353 static int cp_get_sset_count (
struct net_device *dev,
int sset)
1367 unsigned long flags;
1371 spin_unlock_irqrestore(&cp->
lock, flags);
1380 unsigned long flags;
1384 spin_unlock_irqrestore(&cp->
lock, flags);
1389 static int cp_nway_reset(
struct net_device *dev)
1401 static void cp_set_msglevel(
struct net_device *dev,
u32 value)
1410 unsigned long flags;
1417 if (features & NETIF_F_RXCSUM)
1428 spin_unlock_irqrestore(&cp->
lock, flags);
1437 unsigned long flags;
1446 spin_unlock_irqrestore(&cp->
lock, flags);
1452 unsigned long flags;
1455 netdev_get_wol (cp, wol);
1456 spin_unlock_irqrestore (&cp->
lock, flags);
1462 unsigned long flags;
1466 rc = netdev_set_wol (cp, wol);
1467 spin_unlock_irqrestore (&cp->
lock, flags);
1474 switch (stringset) {
1476 memcpy(buf, ðtool_stats_keys,
sizeof(ethtool_stats_keys));
1484 static void cp_get_ethtool_stats (
struct net_device *dev,
1502 for (i = 0; i < 1000; i++) {
1525 tmp_stats[i++] = cp->
cp_stats.rx_frags;
1531 static const struct ethtool_ops cp_ethtool_ops = {
1532 .get_drvinfo = cp_get_drvinfo,
1533 .get_regs_len = cp_get_regs_len,
1534 .get_sset_count = cp_get_sset_count,
1535 .get_settings = cp_get_settings,
1536 .set_settings = cp_set_settings,
1537 .nway_reset = cp_nway_reset,
1539 .get_msglevel = cp_get_msglevel,
1540 .set_msglevel = cp_set_msglevel,
1541 .get_regs = cp_get_regs,
1542 .get_wol = cp_get_wol,
1543 .set_wol = cp_set_wol,
1544 .get_strings = cp_get_strings,
1545 .get_ethtool_stats = cp_get_ethtool_stats,
1546 .get_eeprom_len = cp_get_eeprom_len,
1547 .get_eeprom = cp_get_eeprom,
1548 .set_eeprom = cp_set_eeprom,
1549 .get_ringparam = cp_get_ringparam,
1556 unsigned long flags;
1558 if (!netif_running(dev))
1563 spin_unlock_irqrestore(&cp->
lock, flags);
1567 static int cp_set_mac_address(
struct net_device *dev,
void *p)
1572 if (!is_valid_ether_addr(addr->
sa_data))
1577 spin_lock_irq(&cp->
lock);
1584 spin_unlock_irq(&cp->
lock);
1592 #define EE_SHIFT_CLK 0x04
1594 #define EE_DATA_WRITE 0x02
1595 #define EE_WRITE_0 0x00
1596 #define EE_WRITE_1 0x02
1597 #define EE_DATA_READ 0x01
1598 #define EE_ENB (0x80 | EE_CS)
1604 #define eeprom_delay() readb(ee_addr)
1607 #define EE_EXTEND_CMD (4)
1608 #define EE_WRITE_CMD (5)
1609 #define EE_READ_CMD (6)
1610 #define EE_ERASE_CMD (7)
1612 #define EE_EWDS_ADDR (0)
1613 #define EE_WRAL_ADDR (1)
1614 #define EE_ERAL_ADDR (2)
1615 #define EE_EWEN_ADDR (3)
1617 #define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1619 static void eeprom_cmd_start(
void __iomem *ee_addr)
1626 static void eeprom_cmd(
void __iomem *ee_addr,
int cmd,
int cmd_len)
1631 for (i = cmd_len - 1; i >= 0; i--) {
1642 static void eeprom_cmd_end(
void __iomem *ee_addr)
1648 static void eeprom_extend_cmd(
void __iomem *ee_addr,
int extend_cmd,
1651 int cmd = (
EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1653 eeprom_cmd_start(ee_addr);
1654 eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1655 eeprom_cmd_end(ee_addr);
1663 int read_cmd = location | (
EE_READ_CMD << addr_len);
1665 eeprom_cmd_start(ee_addr);
1666 eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1668 for (i = 16; i > 0; i--) {
1678 eeprom_cmd_end(ee_addr);
1683 static void write_eeprom(
void __iomem *ioaddr,
int location,
u16 val,
1692 eeprom_cmd_start(ee_addr);
1693 eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1694 eeprom_cmd(ee_addr, val, 16);
1695 eeprom_cmd_end(ee_addr);
1697 eeprom_cmd_start(ee_addr);
1698 for (i = 0; i < 20000; i++)
1701 eeprom_cmd_end(ee_addr);
1706 static int cp_get_eeprom_len(
struct net_device *dev)
1711 spin_lock_irq(&cp->
lock);
1713 spin_unlock_irq(&cp->
lock);
1718 static int cp_get_eeprom(
struct net_device *dev,
1722 unsigned int addr_len;
1730 spin_lock_irq(&cp->
lock);
1734 if (eeprom->
offset & 1) {
1736 data[i++] = (
u8)(val >> 8);
1740 while (i < len - 1) {
1742 data[i++] = (
u8)val;
1743 data[i++] = (
u8)(val >> 8);
1752 spin_unlock_irq(&cp->
lock);
1756 static int cp_set_eeprom(
struct net_device *dev,
1760 unsigned int addr_len;
1769 spin_lock_irq(&cp->
lock);
1773 if (eeprom->
offset & 1) {
1775 val |= (
u16)data[i++] << 8;
1776 write_eeprom(cp->
regs, offset, val, addr_len);
1780 while (i < len - 1) {
1781 val = (
u16)data[i++];
1782 val |= (
u16)data[i++] << 8;
1783 write_eeprom(cp->
regs, offset, val, addr_len);
1789 val |= (
u16)data[i];
1790 write_eeprom(cp->
regs, offset, val, addr_len);
1793 spin_unlock_irq(&cp->
lock);
1798 static void cp_set_d3_state (
struct cp_private *cp)
1800 pci_enable_wake (cp->
pdev, 0, 1);
1805 .ndo_open = cp_open,
1806 .ndo_stop = cp_close,
1808 .ndo_set_mac_address = cp_set_mac_address,
1809 .ndo_set_rx_mode = cp_set_rx_mode,
1810 .ndo_get_stats = cp_get_stats,
1811 .ndo_do_ioctl = cp_ioctl,
1812 .ndo_start_xmit = cp_start_xmit,
1813 .ndo_tx_timeout = cp_tx_timeout,
1814 .ndo_set_features = cp_set_features,
1816 .ndo_change_mtu = cp_change_mtu,
1819 #ifdef CONFIG_NET_POLL_CONTROLLER
1820 .ndo_poll_controller = cp_poll_controller,
1831 unsigned int addr_len,
i, pci_using_dac;
1834 static int version_printed;
1835 if (version_printed++ == 0)
1842 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1847 dev = alloc_etherdev(
sizeof(
struct cp_private));
1852 cp = netdev_priv(dev);
1858 cp->
mii_if.mdio_read = mdio_read;
1859 cp->
mii_if.mdio_write = mdio_write;
1861 cp->
mii_if.phy_id_mask = 0x1f;
1862 cp->
mii_if.reg_num_mask = 0x1f;
1863 cp_set_rxbufsize(cp);
1871 goto err_out_disable;
1885 dev_err(&pdev->
dev,
"MMIO resource (%llx) too small\n",
1892 !pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(64)) &&
1901 "No usable DMA configuration, aborting\n");
1904 rc = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32));
1907 "No usable consistent DMA configuration, aborting\n");
1921 dev_err(&pdev->
dev,
"Cannot map PCI MMIO (%Lx@%Lx)\n",
1923 (
unsigned long long)pciaddr);
1931 addr_len =
read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1932 for (i = 0; i < 3; i++)
1957 netdev_info(dev,
"RTL-8139C+ at 0x%p, %pM, IRQ %d\n",
1960 pci_set_drvdata(pdev, dev);
1966 cp_set_d3_state (cp);
1983 static void cp_remove_one (
struct pci_dev *pdev)
1985 struct net_device *dev = pci_get_drvdata(pdev);
1995 pci_set_drvdata(pdev,
NULL);
2002 struct net_device *dev = pci_get_drvdata(pdev);
2004 unsigned long flags;
2006 if (!netif_running(dev))
2010 netif_stop_queue (dev);
2018 spin_unlock_irqrestore (&cp->
lock, flags);
2027 static int cp_resume (
struct pci_dev *pdev)
2029 struct net_device *dev = pci_get_drvdata (pdev);
2031 unsigned long flags;
2033 if (!netif_running(dev))
2040 pci_enable_wake(pdev,
PCI_D0, 0);
2043 cp_init_rings_index (cp);
2046 netif_start_queue (dev);
2052 spin_unlock_irqrestore (&cp->
lock, flags);
2060 .id_table = cp_pci_tbl,
2061 .probe = cp_init_one,
2062 .remove = cp_remove_one,
2064 .resume = cp_resume,
2065 .suspend = cp_suspend,
2069 static int __init cp_init (
void)
2074 return pci_register_driver(&cp_driver);