25 #define DRV_NAME "sundance"
26 #define DRV_VERSION "1.2"
27 #define DRV_RELDATE "11-Sep-2006"
35 static const int multicast_filter_limit = 32;
41 static int rx_copybreak;
42 static int flowctrl=1;
68 #define TX_RING_SIZE 32
69 #define TX_QUEUE_LEN (TX_RING_SIZE - 1)
70 #define RX_RING_SIZE 64
72 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
73 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
77 #define TX_TIMEOUT (4*HZ)
78 #define PKT_BUF_SZ 1536
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/string.h>
85 #include <linux/errno.h>
88 #include <linux/pci.h>
89 #include <linux/netdevice.h>
93 #include <linux/bitops.h>
94 #include <asm/uaccess.h>
95 #include <asm/processor.h>
101 #include <linux/ethtool.h>
102 #include <linux/mii.h>
107 " Written by Donald Becker\n";
118 MODULE_PARM_DESC(rx_copybreak,
"Sundance Alta copy breakpoint for copy-only-tiny-frames");
198 #ifndef CONFIG_SUNDANCE_MMIO
203 { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
204 { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
205 { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
206 { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
222 {
"D-Link DFE-550TX FAST Ethernet Adapter"},
223 {
"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
224 {
"D-Link DFE-580TX 4 port Server Adapter"},
225 {
"D-Link DFE-530TXS FAST Ethernet Adapter"},
226 {
"D-Link DL10050-based FAST Ethernet Adapter"},
227 {
"Sundance Technology Alta"},
228 {
"IC Plus Corporation IP100A FAST Ethernet Adapter"},
298 #define ASIC_HI_WORD(x) ((x) + 2)
356 #define PRIV_ALIGN 15
412 #define EEPROM_SA_OFFSET 0x10
413 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
414 IntrDrvRqst | IntrTxDone | StatsMax | \
424 static void netdev_timer(
unsigned long data);
430 static void rx_poll(
unsigned long data);
431 static void tx_poll(
unsigned long data);
433 static void netdev_error(
struct net_device *
dev,
int intr_status);
434 static void netdev_error(
struct net_device *
dev,
int intr_status);
443 static void sundance_reset(
struct net_device *
dev,
unsigned long reset_cmd)
454 if (--countdown == 0) {
463 .ndo_open = netdev_open,
464 .ndo_stop = netdev_close,
465 .ndo_start_xmit = start_tx,
467 .ndo_set_rx_mode = set_rx_mode,
468 .ndo_do_ioctl = netdev_ioctl,
470 .ndo_change_mtu = change_mtu,
471 .ndo_set_mac_address = sundance_set_mac_addr,
493 int phy, phy_end, phy_idx = 0;
497 static int printed_version;
498 if (!printed_version++)
508 dev = alloc_etherdev(
sizeof(*np));
520 for (i = 0; i < 3; i++)
525 np = netdev_priv(dev);
538 goto err_out_cleardev;
545 goto err_out_unmap_tx;
550 np->
mii_if.mdio_read = mdio_read;
551 np->
mii_if.mdio_write = mdio_write;
552 np->
mii_if.phy_id_mask = 0x1f;
553 np->
mii_if.reg_num_mask = 0x1f;
560 pci_set_drvdata(pdev, dev);
564 goto err_out_unmap_rx;
567 dev->
name, pci_id_tbl[chip_idx].name, ioaddr,
577 if (sundance_pci_tbl[np->
chip_id].device == 0x0200) {
584 for (; phy <= phy_end && phy_idx <
MII_CNT; phy++) {
585 int phyx = phy & 0x1f;
586 int mii_status = mdio_read(dev, phyx,
MII_BMSR);
587 if (mii_status != 0xffff && mii_status != 0x0000) {
588 np->
phys[phy_idx++] = phyx;
590 if ((mii_status & 0x0040) == 0)
593 "0x%4.4x advertising %4.4x.\n",
594 dev->
name, phyx, mii_status, np->
mii_if.advertising);
600 printk(
KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
602 goto err_out_unregister;
610 if (media[card_idx] !=
NULL) {
612 if (
strcmp (media[card_idx],
"100mbps_fd") == 0 ||
613 strcmp (media[card_idx],
"4") == 0) {
615 np->
mii_if.full_duplex = 1;
616 }
else if (
strcmp (media[card_idx],
"100mbps_hd") == 0 ||
617 strcmp (media[card_idx],
"3") == 0) {
619 np->
mii_if.full_duplex = 0;
620 }
else if (
strcmp (media[card_idx],
"10mbps_fd") == 0 ||
621 strcmp (media[card_idx],
"2") == 0) {
623 np->
mii_if.full_duplex = 1;
624 }
else if (
strcmp (media[card_idx],
"10mbps_hd") == 0 ||
625 strcmp (media[card_idx],
"1") == 0) {
627 np->
mii_if.full_duplex = 0;
641 np->
mii_if.full_duplex = 1;
659 np->
speed, np->
mii_if.full_duplex ?
"Full" :
"Half");
667 sundance_reset(dev, 0x00ff << 16);
683 pci_set_drvdata(pdev,
NULL);
692 static int change_mtu(
struct net_device *dev,
int new_mtu)
694 if ((new_mtu < 68) || (new_mtu > 8191))
696 if (netif_running(dev))
702 #define eeprom_delay(ee_addr) ioread32(ee_addr)
706 int boguscnt = 10000;
713 }
while (--boguscnt > 0);
724 #define mdio_delay() ioread8(mdio_addr)
729 #define MDIO_EnbIn (0)
730 #define MDIO_WRITE0 (MDIO_EnbOutput)
731 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
735 static void mdio_sync(
void __iomem *mdio_addr)
740 while (--bits >= 0) {
752 int mii_cmd = (0xf6 << 10) | (phy_id << 5) |
location;
756 mdio_sync(mdio_addr);
759 for (i = 15; i >= 0; i--) {
768 for (i = 19; i > 0; i--) {
775 return (retval>>1) & 0xffff;
778 static void mdio_write(
struct net_device *dev,
int phy_id,
int location,
int value)
782 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
786 mdio_sync(mdio_addr);
789 for (i = 31; i >= 0; i--) {
798 for (i = 2; i > 0; i--) {
812 np = netdev_priv(dev);
813 phy_id = np->
phys[0];
816 bmsr = mdio_read(dev, phy_id,
MII_BMSR);
820 }
while (--wait > 0);
824 static int netdev_open(
struct net_device *dev)
828 const int irq = np->
pci_dev->irq;
848 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
870 if (np->
pci_dev->revision >= 0x14)
872 netif_start_queue(dev);
876 spin_unlock_irqrestore(&np->
lock, flags);
882 "MAC Control %x, %4.4x %4.4x.\n",
891 np->
timer.function = netdev_timer;
900 static void check_duplex(
struct net_device *dev)
904 int mii_lpa = mdio_read(dev, np->
phys[0],
MII_LPA);
905 int negotiated = mii_lpa & np->
mii_if.advertising;
909 if (!np->
an_enable || mii_lpa == 0xffff) {
910 if (np->
mii_if.full_duplex)
917 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
918 if (np->
mii_if.full_duplex != duplex) {
922 "negotiated capability %4.4x.\n", dev->
name,
923 duplex ?
"full" :
"half", np->
phys[0], negotiated);
928 static void netdev_timer(
unsigned long data)
933 int next_tick = 10*
HZ;
952 netif_stop_queue(dev);
973 netif_queue_stopped(dev));
984 spin_unlock_irqrestore(&np->
lock, flag);
989 dev->
stats.tx_errors++;
991 netif_wake_queue(dev);
1013 ((i+1)%RX_RING_SIZE)*
sizeof(*np->
rx_ring));
1022 netdev_alloc_skb(dev, np->
rx_buf_sz + 2);
1026 skb_reserve(skb, 2);
1031 np->
rx_ring[i].frag[0].addr)) {
1046 static void tx_poll (
unsigned long data)
1089 txdesc->
frag[0].addr))
1101 !netif_queue_stopped(dev)) {
1104 netif_stop_queue (dev);
1108 "%s: Transmit frame #%d queued in slot %d.\n",
1116 dev->
stats.tx_dropped++;
1144 dev->
stats.tx_dropped++;
1177 dev->
name, intr_status);
1193 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1196 (
"%s: Transmit status is %2.2x.\n",
1197 dev->
name, tx_status);
1198 if (tx_status & 0x1e) {
1200 printk(
"%s: Transmit error status %4.4x.\n",
1201 dev->
name, tx_status);
1202 dev->
stats.tx_errors++;
1203 if (tx_status & 0x10)
1204 dev->
stats.tx_fifo_errors++;
1205 if (tx_status & 0x08)
1206 dev->
stats.collisions++;
1207 if (tx_status & 0x04)
1208 dev->
stats.tx_fifo_errors++;
1209 if (tx_status & 0x02)
1210 dev->
stats.tx_window_errors++;
1216 if (tx_status & 0x10) {
1238 hw_frame_id = (tx_status >> 8) & 0xff;
1243 if (np->
pci_dev->revision >= 0x14) {
1244 spin_lock(&np->
lock);
1251 if (sw_frame_id == hw_frame_id &&
1255 if (sw_frame_id == (hw_frame_id + 1) %
1268 spin_unlock(&np->
lock);
1270 spin_lock(&np->
lock);
1287 spin_unlock(&np->
lock);
1290 if (netif_queue_stopped(dev) &&
1293 netif_wake_queue (dev);
1297 netdev_error(dev, intr_status);
1305 static void rx_poll(
unsigned long data)
1310 int boguscnt = np->
budget;
1320 if (--boguscnt < 0) {
1323 if (!(frame_status &
DescOwn))
1325 pkt_len = frame_status & 0x1fff;
1329 if (frame_status & 0x001f4000) {
1334 dev->
stats.rx_errors++;
1335 if (frame_status & 0x00100000)
1336 dev->
stats.rx_length_errors++;
1337 if (frame_status & 0x00010000)
1338 dev->
stats.rx_fifo_errors++;
1339 if (frame_status & 0x00060000)
1340 dev->
stats.rx_frame_errors++;
1341 if (frame_status & 0x00080000)
1342 dev->
stats.rx_crc_errors++;
1343 if (frame_status & 0x00100000) {
1346 dev->
name, frame_status);
1350 #ifndef final_version
1353 ", bogus_cnt %d.\n",
1358 if (pkt_len < rx_copybreak &&
1359 (skb = netdev_alloc_skb(dev, pkt_len + 2)) !=
NULL) {
1360 skb_reserve(skb, 2);
1364 skb_copy_to_linear_data(skb, np->
rx_skbuff[entry]->data, pkt_len);
1380 entry = (entry + 1) % RX_RING_SIZE;
1400 static void refill_rx (
struct net_device *dev)
1412 skb = netdev_alloc_skb(dev, np->
rx_buf_sz + 2);
1416 skb_reserve(skb, 2);
1421 np->
rx_ring[entry].frag[0].addr)) {
1434 static void netdev_error(
struct net_device *dev,
int intr_status)
1438 u16 mii_ctl, mii_advertise, mii_lpa;
1442 if (mdio_wait_link(dev, 10) == 0) {
1445 mii_advertise = mdio_read(dev, np->
phys[0],
1448 mii_advertise &= mii_lpa;
1453 printk(
"100Mbps, full duplex\n");
1456 printk(
"100Mbps, half duplex\n");
1459 printk(
"10Mbps, full duplex\n");
1462 printk(
"10Mbps, half duplex\n");
1494 dev->
name, intr_status);
1503 unsigned long flags;
1504 u8 late_coll, single_coll, mult_coll;
1514 np->
xstats.tx_multiple_collisions += mult_coll;
1516 np->
xstats.tx_single_collisions += single_coll;
1518 np->
xstats.tx_late_collisions += late_coll;
1519 dev->
stats.collisions += mult_coll
1536 spin_unlock_irqrestore(&np->
statlock, flags);
1541 static void set_rx_mode(
struct net_device *dev)
1550 memset(mc_filter, 0xff,
sizeof(mc_filter));
1555 memset(mc_filter, 0xff,
sizeof(mc_filter));
1562 memset (mc_filter, 0,
sizeof (mc_filter));
1565 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1566 if (crc & 0x80000000) index |= 1 <<
bit;
1567 mc_filter[index/16] |= (1 << (index % 16));
1575 mc_filter[3] |= 0x0200;
1577 for (i = 0; i < 4; i++)
1582 static int __set_mac_addr(
struct net_device *dev)
1597 static int sundance_set_mac_addr(
struct net_device *dev,
void *data)
1601 if (!is_valid_ether_addr(addr->
sa_data))
1604 __set_mac_addr(dev);
1609 static const struct {
1611 } sundance_stats[] = {
1612 {
"tx_multiple_collisions" },
1613 {
"tx_single_collisions" },
1614 {
"tx_late_collisions" },
1616 {
"tx_deferred_excessive" },
1624 static int check_if_running(
struct net_device *dev)
1626 if (!netif_running(dev))
1642 spin_lock_irq(&np->
lock);
1644 spin_unlock_irq(&np->
lock);
1652 spin_lock_irq(&np->
lock);
1654 spin_unlock_irq(&np->
lock);
1658 static int nway_reset(
struct net_device *dev)
1682 static void get_strings(
struct net_device *dev,
u32 stringset,
1686 memcpy(data, sundance_stats,
sizeof(sundance_stats));
1689 static int get_sset_count(
struct net_device *dev,
int sset)
1699 static void get_ethtool_stats(
struct net_device *dev,
1706 data[i++] = np->
xstats.tx_multiple_collisions;
1707 data[i++] = np->
xstats.tx_single_collisions;
1708 data[i++] = np->
xstats.tx_late_collisions;
1709 data[i++] = np->
xstats.tx_deferred;
1710 data[i++] = np->
xstats.tx_deferred_excessive;
1711 data[i++] = np->
xstats.tx_aborted;
1712 data[i++] = np->
xstats.tx_bcasts;
1713 data[i++] = np->
xstats.rx_bcasts;
1714 data[i++] = np->
xstats.tx_mcasts;
1715 data[i++] = np->
xstats.rx_mcasts;
1719 .
begin = check_if_running,
1737 if (!netif_running(dev))
1740 spin_lock_irq(&np->
lock);
1742 spin_unlock_irq(&np->
lock);
1747 static int netdev_close(
struct net_device *dev)
1762 netif_stop_queue(dev);
1766 "Rx %4.4x Int %2.2x.\n",
1782 for (i = 2000; i > 0; i--) {
1791 for (i = 2000; i > 0; i--) {
1804 np->
tx_ring[i].frag[0].length);
1807 for (i = 0; i < 4 ; i++) {
1810 np->
rx_ring[i].frag[0].length);
1849 struct net_device *dev = pci_get_drvdata(pdev);
1861 pci_set_drvdata(pdev,
NULL);
1869 struct net_device *dev = pci_get_drvdata(pci_dev);
1871 if (!netif_running(dev))
1883 static int sundance_resume(
struct pci_dev *pci_dev)
1885 struct net_device *dev = pci_get_drvdata(pci_dev);
1888 if (!netif_running(dev))
1894 err = netdev_open(dev);
1911 .id_table = sundance_pci_tbl,
1912 .probe = sundance_probe1,
1915 .suspend = sundance_suspend,
1916 .resume = sundance_resume,
1920 static int __init sundance_init(
void)
1926 return pci_register_driver(&sundance_driver);
1929 static void __exit sundance_exit(
void)