27 #define DRV_NAME "fealnx"
28 #define DRV_VERSION "2.52"
29 #define DRV_RELDATE "Sep-11-2006"
32 static int max_interrupt_work = 20;
35 static int multicast_filter_limit = 32;
39 static int rx_copybreak;
47 static int full_duplex[
MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
58 #define TX_RING_SIZE 6
59 #define RX_RING_SIZE 12
60 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct fealnx_desc)
61 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct fealnx_desc)
65 #define TX_TIMEOUT (2*HZ)
67 #define PKT_BUF_SZ 1536
71 #include <linux/module.h>
72 #include <linux/kernel.h>
73 #include <linux/string.h>
75 #include <linux/errno.h>
78 #include <linux/pci.h>
79 #include <linux/netdevice.h>
83 #include <linux/mii.h>
84 #include <linux/ethtool.h>
87 #include <linux/bitops.h>
89 #include <asm/processor.h>
91 #include <asm/uaccess.h>
92 #include <asm/byteorder.h>
108 #define RUN_AT(x) (jiffies + (x))
119 MODULE_PARM_DESC(max_interrupt_work,
"fealnx maximum events handled per interrupt");
121 MODULE_PARM_DESC(rx_copybreak,
"fealnx copy breakpoint for copy-only-tiny-frames");
122 MODULE_PARM_DESC(multicast_filter_limit,
"fealnx maximum number of filtered multicast addresses");
153 static const struct chip_info skel_netdrv_tbl[]
__devinitconst = {
300 #define MASK_MIIR_MII_READ 0x00000000
301 #define MASK_MIIR_MII_WRITE 0x00000008
302 #define MASK_MIIR_MII_MDO 0x00000004
303 #define MASK_MIIR_MII_MDI 0x00000002
304 #define MASK_MIIR_MII_MDC 0x00000001
307 #define OP_READ 0x6000
308 #define OP_WRITE 0x5002
313 #define MysonPHYID 0xd0000302
315 #define MysonPHYID0 0x0302
316 #define StatusRegister 18
317 #define SPEED100 0x0400 // bit10
318 #define FULLMODE 0x0800 // bit11
324 #define SeeqPHYID0 0x0016
326 #define MIIRegister18 18
327 #define SPD_DET_100 0x80
328 #define DPLX_DET_FULL 0x40
333 #define AhdocPHYID0 0x0022
335 #define DiagnosticReg 18
336 #define DPLX_FULL 0x0800
337 #define Speed_100 0x0400
343 #define MarvellPHYID0 0x0141
344 #define LevelOnePHYID0 0x0013
346 #define MII1000BaseTControlReg 9
347 #define MII1000BaseTStatusReg 10
348 #define SpecificReg 17
351 #define PHYAbletoPerform1000FullDuplex 0x0200
352 #define PHYAbletoPerform1000HalfDuplex 0x0100
353 #define PHY1000AbilityMask 0x300
356 #define SpeedMask 0x0c000
357 #define Speed_1000M 0x08000
358 #define Speed_100M 0x4000
360 #define Full_Duplex 0x2000
363 #define LXT1000_100M 0x08000
364 #define LXT1000_1000M 0x0c000
365 #define LXT1000_Full 0x200
369 #define LinkIsUp2 0x00040000
372 #define LinkIsUp 0x0004
418 unsigned char phys[2];
429 static void netdev_timer(
unsigned long data);
440 static const struct ethtool_ops netdev_ethtool_ops;
445 static void stop_nic_rx(
void __iomem *ioaddr,
long crvalue)
456 static void stop_nic_rxtx(
void __iomem *ioaddr,
long crvalue)
468 .ndo_open = netdev_open,
469 .ndo_stop = netdev_close,
470 .ndo_start_xmit = start_tx,
472 .ndo_set_rx_mode = set_rx_mode,
473 .ndo_do_ioctl = mii_ioctl,
474 .ndo_tx_timeout = fealnx_tx_timeout,
485 static int card_idx = -1;
501 static int printed_version;
502 if (!printed_version++)
507 sprintf(boardname,
"fealnx%d", card_idx);
518 "region size %ld too small, aborting\n", len);
528 ioaddr = pci_iomap(pdev, bar, len);
542 for (i = 0; i < 6; ++
i)
549 np = netdev_priv(dev);
553 np->
flags = skel_netdrv_tbl[chip_id].flags;
554 pci_set_drvdata(pdev, dev);
556 np->
mii.mdio_read = mdio_read;
557 np->
mii.mdio_write = mdio_write;
558 np->
mii.phy_id_mask = 0x1f;
559 np->
mii.reg_num_mask = 0x1f;
564 goto err_out_free_dev;
572 goto err_out_free_rx;
579 int phy, phy_idx = 0;
583 int mii_status = mdio_read(dev, phy, 1);
585 if (mii_status != 0xffff && mii_status != 0x0000) {
588 "MII PHY found at address %d, status "
589 "0x%4.4x.\n", phy, mii_status);
594 data = mdio_read(dev, np->
phys[0], 2);
614 "MII PHY not found -- this device may "
615 "not operate correctly.\n");
633 np->
mii.full_duplex = 1;
637 if (card_idx <
MAX_UNITS && full_duplex[card_idx] > 0)
638 np->
mii.full_duplex = full_duplex[card_idx];
640 if (np->
mii.full_duplex) {
641 dev_info(&pdev->
dev,
"Media type forced to Full Duplex.\n");
647 data = mdio_read(dev, np->
phys[0], 9);
648 data = (data & 0xfcff) | 0x0200;
649 mdio_write(dev, np->
phys[0], 9, data);
656 np->
mii.force_media = 1;
665 goto err_out_free_tx;
668 dev->
name, skel_netdrv_tbl[chip_id].chip_name, ioaddr,
689 struct net_device *dev = pci_get_drvdata(pdev);
702 pci_set_drvdata(pdev,
NULL);
708 static ulong m80x_send_cmd_to_phy(
void __iomem *miiport,
int opcode,
int phyad,
int regad)
721 for (i = 0; i < 32; i++) {
732 data = opcode | (phyad << 7) | (regad << 2);
750 if (mask == 0x2 && opcode ==
OP_READ)
757 static int mdio_read(
struct net_device *dev,
int phyad,
int regad)
764 miir = m80x_send_cmd_to_phy(miiport,
OP_READ, phyad, regad);
792 return data & 0xffff;
796 static void mdio_write(
struct net_device *dev,
int phyad,
int regad,
int data)
803 miir = m80x_send_cmd_to_phy(miiport,
OP_WRITE, phyad, regad);
828 static int netdev_open(
struct net_device *dev)
832 const int irq = np->
pci_dev->irq;
841 for (i = 0; i < 3; i++)
843 ioaddr +
PAR0 + i*2);
872 #if defined(__i386__) && !defined(MODULE)
884 if (np->
pci_dev->device == 0x891) {
898 np->
mii.full_duplex = np->
mii.force_media;
904 netif_start_queue(dev);
917 np->
timer.function = netdev_timer;
930 static void getlinkstatus(
struct net_device *dev)
936 unsigned int i, DelayTime = 0x1000;
941 for (i = 0; i < DelayTime; ++
i) {
949 for (i = 0; i < DelayTime; ++
i) {
960 static void getlinktype(
struct net_device *dev)
1063 static void allocate_rx_buffers(
struct net_device *dev)
1071 skb = netdev_alloc_skb(dev, np->
rx_buf_sz);
1087 static void netdev_timer(
unsigned long data)
1092 int old_crvalue = np->
crvalue;
1093 unsigned int old_linkok = np->
linkok;
1094 unsigned long flags;
1105 if ((old_linkok == 0) && (np->
linkok == 1)) {
1107 if (np->
crvalue != old_crvalue) {
1108 stop_nic_rxtx(ioaddr, np->
crvalue);
1114 allocate_rx_buffers(dev);
1116 spin_unlock_irqrestore(&np->
lock, flags);
1125 static void reset_and_disable_rxtx(
struct net_device *dev)
1132 stop_nic_rxtx(ioaddr, 0);
1151 static void enable_rxtx(
struct net_device *dev)
1156 reset_rx_descriptors(dev);
1180 unsigned long flags;
1188 reset_and_disable_rxtx(dev);
1192 netif_start_queue(dev);
1196 spin_unlock_irqrestore(&np->
lock, flags);
1200 static void fealnx_tx_timeout(
struct net_device *dev)
1204 unsigned long flags;
1208 "%s: Transmit timed out, status %8.8x, resetting...\n",
1215 (
unsigned int) np->
rx_ring[i].status);
1225 reset_and_disable_rxtx(dev);
1226 reset_tx_descriptors(dev);
1229 spin_unlock_irqrestore(&np->
lock, flags);
1232 dev->
stats.tx_errors++;
1233 netif_wake_queue(dev);
1238 static void init_ring(
struct net_device *dev)
1304 unsigned long flags;
1312 #if defined(one_buffer)
1319 if (np->
pci_dev->device == 0x891)
1324 #elif defined(two_buffer)
1325 if (skb->
len > BPT) {
1342 if (np->
pci_dev->device == 0x891)
1344 next->
buffer = pci_map_single(ep->pci_dev, skb->
data + BPT,
1359 if (np->
pci_dev->device == 0x891)
1368 netif_stop_queue(dev);
1372 spin_unlock_irqrestore(&np->
lock, flags);
1379 static void reset_tx_descriptors(
struct net_device *dev)
1408 np->
tx_ring[TX_RING_SIZE - 1].next_desc_logical = &np->
tx_ring[0];
1413 static void reset_rx_descriptors(
struct net_device *dev)
1419 allocate_rx_buffers(dev);
1439 long boguscnt = max_interrupt_work;
1440 unsigned int num_tx = 0;
1443 spin_lock(&np->
lock);
1471 if (intr_status &
TUNF)
1474 if (intr_status &
CNTOVF) {
1476 dev->
stats.rx_missed_errors +=
1480 dev->
stats.rx_crc_errors +=
1484 if (intr_status & (
RI |
RBU)) {
1485 if (intr_status &
RI)
1488 stop_nic_rx(ioaddr, np->
crvalue);
1489 reset_rx_descriptors(dev);
1498 if (!(tx_control &
TXLD)) {
1501 next = np->
cur_tx->next_desc_logical;
1502 tx_status = next->
status;
1506 if (tx_status &
TXOWN)
1511 dev->
stats.tx_errors++;
1513 dev->
stats.tx_aborted_errors++;
1514 if (tx_status &
CSL)
1515 dev->
stats.tx_carrier_errors++;
1517 dev->
stats.tx_window_errors++;
1518 if (tx_status &
UDF)
1519 dev->
stats.tx_fifo_errors++;
1520 if ((tx_status &
HF) && np->
mii.full_duplex == 0)
1521 dev->
stats.tx_heartbeat_errors++;
1524 dev->
stats.tx_bytes +=
1527 dev->
stats.collisions +=
1529 dev->
stats.tx_packets++;
1532 dev->
stats.tx_bytes +=
1534 dev->
stats.tx_packets++;
1543 if (np->
cur_tx->control & TXLD) {
1555 netif_wake_queue(dev);
1562 dev->
stats.tx_errors += (data & 0xff000000) >> 24;
1563 dev->
stats.tx_aborted_errors +=
1564 (data & 0xff000000) >> 24;
1565 dev->
stats.tx_window_errors +=
1566 (data & 0x00ff0000) >> 16;
1567 dev->
stats.collisions += (data & 0x0000ffff);
1570 if (--boguscnt < 0) {
1572 "status=0x%4.4x.\n", dev->
name, intr_status);
1577 stop_nic_rxtx(ioaddr, 0);
1578 netif_stop_queue(dev);
1596 dev->
stats.rx_crc_errors +=
1605 spin_unlock(&np->
lock);
1628 if ((!((rx_status &
RXFSD) && (rx_status &
RXLSD))) ||
1630 if (rx_status & ErrorSummary) {
1633 "%s: Receive error, Rx status %8.8x.\n",
1634 dev->
name, rx_status);
1636 dev->
stats.rx_errors++;
1638 dev->
stats.rx_length_errors++;
1639 if (rx_status &
RXER)
1640 dev->
stats.rx_frame_errors++;
1641 if (rx_status &
CRC)
1642 dev->
stats.rx_crc_errors++;
1644 int need_to_reset = 0;
1647 if (rx_status & RXFSD) {
1652 while (desno <= np->really_rx_count) {
1665 if (need_to_reset == 0) {
1668 dev->
stats.rx_length_errors++;
1671 for (i = 0; i < desno; ++
i) {
1672 if (!np->
cur_rx->skbuff) {
1674 "%s: I'm scared\n", dev->
name);
1682 stop_nic_rx(ioaddr, np->
crvalue);
1683 reset_rx_descriptors(dev);
1694 #ifndef final_version
1697 " status %x.\n", pkt_len, rx_status);
1702 if (pkt_len < rx_copybreak &&
1703 (skb = netdev_alloc_skb(dev, pkt_len + 2)) !=
NULL) {
1704 skb_reserve(skb, 2);
1705 pci_dma_sync_single_for_cpu(np->
pci_dev,
1711 #if ! defined(__alpha__)
1712 skb_copy_to_linear_data(skb,
1713 np->
cur_rx->skbuff->data, pkt_len);
1717 np->
cur_rx->skbuff->data, pkt_len);
1719 pci_dma_sync_single_for_device(np->
pci_dev,
1734 dev->
stats.rx_packets++;
1742 allocate_rx_buffers(dev);
1754 if (netif_running(dev)) {
1755 dev->
stats.rx_missed_errors +=
1757 dev->
stats.rx_crc_errors +=
1766 static void set_rx_mode(
struct net_device *dev)
1769 unsigned long flags;
1772 spin_unlock_irqrestore(lp, flags);
1777 static void __set_rx_mode(
struct net_device *dev)
1785 memset(mc_filter, 0xff,
sizeof(mc_filter));
1790 memset(mc_filter, 0xff,
sizeof(mc_filter));
1795 memset(mc_filter, 0,
sizeof(mc_filter));
1799 mc_filter[bit >> 5] |= (1 <<
bit);
1804 stop_nic_rxtx(ioaddr, np->
crvalue);
1827 spin_lock_irq(&np->
lock);
1829 spin_unlock_irq(&np->
lock);
1839 spin_lock_irq(&np->
lock);
1841 spin_unlock_irq(&np->
lock);
1846 static int netdev_nway_reset(
struct net_device *dev)
1868 static const struct ethtool_ops netdev_ethtool_ops = {
1869 .get_drvinfo = netdev_get_drvinfo,
1870 .get_settings = netdev_get_settings,
1871 .set_settings = netdev_set_settings,
1872 .nway_reset = netdev_nway_reset,
1873 .get_link = netdev_get_link,
1874 .get_msglevel = netdev_get_msglevel,
1875 .set_msglevel = netdev_set_msglevel,
1883 if (!netif_running(dev))
1886 spin_lock_irq(&np->
lock);
1888 spin_unlock_irq(&np->
lock);
1894 static int netdev_close(
struct net_device *dev)
1900 netif_stop_queue(dev);
1906 stop_nic_rxtx(ioaddr, 0);
1951 .id_table = fealnx_pci_tbl,
1952 .probe = fealnx_init_one,
1956 static int __init fealnx_init(
void)
1963 return pci_register_driver(&fealnx_driver);
1966 static void __exit fealnx_exit(
void)