47 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49 #define DRV_NAME "winbond-840"
50 #define DRV_VERSION "1.01-e"
51 #define DRV_RELDATE "Sep-11-2006"
70 static int max_interrupt_work = 20;
73 static int multicast_filter_limit = 32;
77 static int rx_copybreak;
86 static int full_duplex[
MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
95 #define TX_QUEUE_LEN 10
96 #define TX_QUEUE_LEN_RESTART 5
98 #define TX_BUFLIMIT (1024-128)
104 #define TX_FIFO_SIZE (2048)
105 #define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
110 #define TX_TIMEOUT (2*HZ)
113 #include <linux/module.h>
114 #include <linux/kernel.h>
115 #include <linux/string.h>
117 #include <linux/errno.h>
120 #include <linux/pci.h>
122 #include <linux/netdevice.h>
127 #include <linux/ethtool.h>
128 #include <linux/mii.h>
129 #include <linux/rtnetlink.h>
131 #include <linux/bitops.h>
132 #include <asm/uaccess.h>
133 #include <asm/processor.h>
140 #define PKT_BUF_SZ 1536
146 " http://www.scyld.com/network/drivers.html\n";
159 MODULE_PARM_DESC(max_interrupt_work,
"winbond-840 maximum events handled per interrupt");
161 MODULE_PARM_DESC(rx_copybreak,
"winbond-840 copy breakpoint for copy-only-tiny-frames");
162 MODULE_PARM_DESC(multicast_filter_limit,
"winbond-840 maximum number of filtered multicast addresses");
223 { 0x1050, 0x0840,
PCI_ANY_ID, 0x8153, 0, 0, 0 },
330 static void netdev_timer(
unsigned long data);
339 static void netdev_error(
struct net_device *
dev,
int intr_status);
345 static const struct ethtool_ops netdev_ethtool_ops;
349 .ndo_open = netdev_open,
350 .ndo_stop = netdev_close,
351 .ndo_start_xmit = start_tx,
353 .ndo_set_rx_mode = set_rx_mode,
354 .ndo_do_ioctl = netdev_ioctl,
380 pr_warn(
"Device %s disabled due to DMA limitations\n",
384 dev = alloc_etherdev(
sizeof(*np));
394 goto err_out_free_res;
396 for (i = 0; i < 3; i++)
403 np = netdev_priv(dev);
406 np->
drv_flags = pci_id_tbl[chip_idx].drv_flags;
409 np->
mii_if.mdio_read = mdio_read;
410 np->
mii_if.mdio_write = mdio_write;
413 pci_set_drvdata(pdev, dev);
421 np->
mii_if.full_duplex = 1;
424 "ignoring user supplied media type %d",
428 np->
mii_if.full_duplex = 1;
430 if (np->
mii_if.full_duplex)
431 np->
mii_if.force_media = 1;
440 goto err_out_cleardev;
443 pci_id_tbl[chip_idx].name, ioaddr, dev->
dev_addr, irq);
446 int phy, phy_idx = 0;
447 for (phy = 1; phy < 32 && phy_idx <
MII_CNT; phy++) {
448 int mii_status = mdio_read(dev, phy,
MII_BMSR);
449 if (mii_status != 0xffff && mii_status != 0x0000) {
455 "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n",
456 np->
mii, phy, mii_status,
464 "MII PHY not found -- this device may not operate correctly\n");
472 pci_set_drvdata(pdev,
NULL);
493 #define eeprom_delay(ee_addr) ioread32(ee_addr)
514 for (i = 10; i >= 0; i--) {
524 for (i = 16; i > 0; i--) {
544 #define mdio_delay(mdio_addr) ioread32(mdio_addr)
551 #define MDIO_WRITE0 (MDIO_EnbOutput)
552 #define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
556 static void mdio_sync(
void __iomem *mdio_addr)
561 while (--bits >= 0) {
573 int mii_cmd = (0xf6 << 10) | (phy_id << 5) |
location;
576 if (mii_preamble_required)
577 mdio_sync(mdio_addr);
580 for (i = 15; i >= 0; i--) {
589 for (i = 20; i > 0; i--) {
596 return (retval>>1) & 0xffff;
599 static void mdio_write(
struct net_device *dev,
int phy_id,
int location,
int value)
603 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
606 if (location == 4 && phy_id == np->
phys[0])
609 if (mii_preamble_required)
610 mdio_sync(mdio_addr);
613 for (i = 31; i >= 0; i--) {
622 for (i = 2; i > 0; i--) {
631 static int netdev_open(
struct net_device *dev)
635 const int irq = np->
pci_dev->irq;
646 netdev_dbg(dev,
"w89c840_open() irq %d\n", irq);
648 if((i=alloc_ringdesc(dev)))
651 spin_lock_irq(&np->
lock);
654 spin_unlock_irq(&np->
lock);
656 netif_start_queue(dev);
664 np->
timer.function = netdev_timer;
672 #define MII_DAVICOM_DM9101 0x0181b800
674 static int update_link(
struct net_device *dev)
682 if (mii_reg == 0xffff)
686 if (!(mii_reg & 0x4)) {
687 if (netif_carrier_ok(dev)) {
690 "MII #%d reports no link. Disabling watchdog\n",
696 if (!netif_carrier_ok(dev)) {
699 "MII #%d link is back. Enabling watchdog\n",
719 negotiated = mii_reg & np->
mii_if.advertising;
722 fasteth = negotiated & 0x380;
724 duplex |= np->
mii_if.force_media;
726 result = np->
csr6 & ~0x20000200;
730 result |= 0x20000000;
733 "Setting %dMBit-%s-duplex based on MII#%d\n",
734 fasteth ? 100 : 10, duplex ?
"full" :
"half",
739 #define RXTX_TIMEOUT 2000
740 static inline void update_csr6(
struct net_device *dev,
int new)
746 if (!netif_device_present(dev))
757 t = (csr5 >> 17) & 0x07;
760 t = (csr5 >> 20) & 0x07;
768 "couldn't stop rxtx, IntrStatus %xh\n", csr5);
777 np->
mii_if.full_duplex = 1;
780 static void netdev_timer(
unsigned long data)
787 netdev_dbg(dev,
"Media selection timer tick, status %08x config %08x\n",
790 spin_lock_irq(&np->
lock);
791 update_csr6(dev, update_link(dev));
792 spin_unlock_irq(&np->
lock);
797 static void init_rxtx_rings(
struct net_device *dev)
877 for (i = 0; i < 6; i++)
898 #if defined (__i386__) && !defined(MODULE)
903 "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n");
907 #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
909 #elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC)
912 #warning Processor architecture undefined
920 update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
933 const int irq = np->
pci_dev->irq;
935 dev_warn(&dev->
dev,
"Transmit timed out, status %08x, resetting...\n",
954 spin_lock_irq(&np->
lock);
965 init_rxtx_rings(dev);
967 spin_unlock_irq(&np->
lock);
970 netif_wake_queue(dev);
972 np->
stats.tx_errors++;
976 static int alloc_ringdesc(
struct net_device *dev)
988 init_rxtx_rings(dev);
1025 if(entry == TX_RING_SIZE-1)
1038 spin_lock_irq(&np->
lock);
1050 netif_stop_queue(dev);
1054 spin_unlock_irq(&np->
lock);
1057 netdev_dbg(dev,
"Transmit frame #%d queued in slot %d\n",
1063 static void netdev_tx_done(
struct net_device *dev)
1072 if (tx_status & 0x8000) {
1073 #ifndef final_version
1075 netdev_dbg(dev,
"Transmit error, Tx status %08x\n",
1078 np->
stats.tx_errors++;
1079 if (tx_status & 0x0104) np->
stats.tx_aborted_errors++;
1080 if (tx_status & 0x0C80) np->
stats.tx_carrier_errors++;
1081 if (tx_status & 0x0200) np->
stats.tx_window_errors++;
1082 if (tx_status & 0x0002) np->
stats.tx_fifo_errors++;
1083 if ((tx_status & 0x0080) && np->
mii_if.full_duplex == 0)
1084 np->
stats.tx_heartbeat_errors++;
1086 #ifndef final_version
1088 netdev_dbg(dev,
"Transmit slot %d ok, Tx status %08x\n",
1092 np->
stats.collisions += (tx_status >> 3) & 15;
1093 np->
stats.tx_packets++;
1109 netif_wake_queue(dev);
1120 int work_limit = max_interrupt_work;
1123 if (!netif_device_present(dev))
1132 netdev_dbg(dev,
"Interrupt, status %04x\n", intr_status);
1146 spin_lock(&np->
lock);
1147 netdev_tx_done(dev);
1148 spin_unlock(&np->
lock);
1154 netdev_error(dev, intr_status);
1156 if (--work_limit < 0) {
1158 "Too much work at interrupt, status=0x%04x\n",
1162 spin_lock(&np->
lock);
1163 if (netif_device_present(dev)) {
1167 spin_unlock(&np->
lock);
1173 netdev_dbg(dev,
"exiting interrupt, status=%#4.4x\n",
1187 netdev_dbg(dev,
" In netdev_rx(), entry %d status %04x\n",
1188 entry, np->
rx_ring[entry].status);
1192 while (--work_limit >= 0) {
1197 netdev_dbg(dev,
" netdev_rx() status was %08x\n",
1201 if ((status & 0x38008300) != 0x0300) {
1202 if ((status & 0x38000300) != 0x0300) {
1204 if ((status & 0xffff) != 0x7fff) {
1206 "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n",
1208 np->
stats.rx_length_errors++;
1210 }
else if (status & 0x8000) {
1213 netdev_dbg(dev,
"Receive error, Rx status %08x\n",
1215 np->
stats.rx_errors++;
1216 if (status & 0x0890) np->
stats.rx_length_errors++;
1217 if (status & 0x004C) np->
stats.rx_frame_errors++;
1218 if (status & 0x0002) np->
stats.rx_crc_errors++;
1223 int pkt_len = ((status >> 16) & 0x7ff) - 4;
1225 #ifndef final_version
1227 netdev_dbg(dev,
" netdev_rx() normal Rx pkt length %d status %x\n",
1232 if (pkt_len < rx_copybreak &&
1233 (skb = netdev_alloc_skb(dev, pkt_len + 2)) !=
NULL) {
1234 skb_reserve(skb, 2);
1238 skb_copy_to_linear_data(skb, np->
rx_skbuff[entry]->data, pkt_len);
1250 #ifndef final_version
1253 netdev_dbg(dev,
" Rx data %pM %pM %02x%02x %pI4\n",
1260 np->
stats.rx_packets++;
1263 entry = (++np->
cur_rx) % RX_RING_SIZE;
1272 skb = netdev_alloc_skb(dev, np->
rx_buf_sz);
1288 static void netdev_error(
struct net_device *dev,
int intr_status)
1294 netdev_dbg(dev,
"Abnormal event, %08x\n", intr_status);
1295 if (intr_status == 0xffffffff)
1297 spin_lock(&np->
lock);
1305 new = np->
csr6 + 0x4000;
1307 new = (np->
csr6 >> 14)&0x7f;
1312 new = (np->
csr6 & ~(0x7F << 14)) | (
new<<14);
1314 netdev_dbg(dev,
"Tx underflow, new csr6 %08x\n",
new);
1315 update_csr6(dev,
new);
1317 if (intr_status &
RxDied) {
1318 np->
stats.rx_errors++;
1322 if (netif_device_present(dev))
1327 spin_unlock(&np->
lock);
1336 spin_lock_irq(&np->
lock);
1337 if (netif_running(dev) && netif_device_present(dev))
1339 spin_unlock_irq(&np->
lock);
1353 memset(mc_filter, 0xff,
sizeof(mc_filter));
1359 memset(mc_filter, 0xff,
sizeof(mc_filter));
1364 memset(mc_filter, 0,
sizeof(mc_filter));
1370 mc_filter[filbit >> 5] |= 1 << (filbit & 31);
1379 static void set_rx_mode(
struct net_device *dev)
1382 u32 rx_mode = __set_rx_mode(dev);
1383 spin_lock_irq(&np->
lock);
1384 update_csr6(dev, (np->
csr6 & ~0x00F8) | rx_mode);
1385 spin_unlock_irq(&np->
lock);
1402 spin_lock_irq(&np->
lock);
1404 spin_unlock_irq(&np->
lock);
1414 spin_lock_irq(&np->
lock);
1416 spin_unlock_irq(&np->
lock);
1421 static int netdev_nway_reset(
struct net_device *dev)
1438 static void netdev_set_msglevel(
struct net_device *dev,
u32 value)
1443 static const struct ethtool_ops netdev_ethtool_ops = {
1444 .get_drvinfo = netdev_get_drvinfo,
1445 .get_settings = netdev_get_settings,
1446 .set_settings = netdev_set_settings,
1447 .nway_reset = netdev_nway_reset,
1448 .get_link = netdev_get_link,
1449 .get_msglevel = netdev_get_msglevel,
1450 .set_msglevel = netdev_set_msglevel,
1464 spin_lock_irq(&np->
lock);
1466 spin_unlock_irq(&np->
lock);
1470 spin_lock_irq(&np->
lock);
1472 spin_unlock_irq(&np->
lock);
1479 static int netdev_close(
struct net_device *dev)
1484 netif_stop_queue(dev);
1487 netdev_dbg(dev,
"Shutting down ethercard, status was %08x Config %08x\n",
1490 netdev_dbg(dev,
"Queue pointers were Tx %d / %d, Rx %d / %d\n",
1496 spin_lock_irq(&np->
lock);
1498 update_csr6(dev, 0);
1500 spin_unlock_irq(&np->
lock);
1529 free_rxtx_rings(np);
1537 struct net_device *dev = pci_get_drvdata(pdev);
1547 pci_set_drvdata(pdev,
NULL);
1577 struct net_device *dev = pci_get_drvdata (pdev);
1582 if (netif_running (dev)) {
1585 spin_lock_irq(&np->
lock);
1587 update_csr6(dev, 0);
1589 spin_unlock_irq(&np->
lock);
1592 netif_tx_disable(dev);
1602 free_rxtx_rings(np);
1610 static int w840_resume (
struct pci_dev *pdev)
1612 struct net_device *dev = pci_get_drvdata (pdev);
1617 if (netif_device_present(dev))
1619 if (netif_running(dev)) {
1622 "pci_enable_device failed in resume\n");
1625 spin_lock_irq(&np->
lock);
1630 init_rxtx_rings(dev);
1632 spin_unlock_irq(&np->
lock);
1634 netif_wake_queue(dev);
1648 .id_table = w840_pci_tbl,
1649 .probe = w840_probe1,
1652 .suspend = w840_suspend,
1653 .resume = w840_resume,
1657 static int __init w840_init(
void)
1660 return pci_register_driver(&w840_driver);
1663 static void __exit w840_exit(
void)