28 #define DRV_NAME "epic100"
29 #define DRV_VERSION "2.1"
30 #define DRV_RELDATE "Sept 11, 2006"
40 static int full_duplex[
MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
44 static int rx_copybreak;
53 #define TX_RING_SIZE 256
54 #define TX_QUEUE_LEN 240
55 #define RX_RING_SIZE 256
56 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc)
57 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc)
61 #define TX_TIMEOUT (2*HZ)
63 #define PKT_BUF_SZ 1536
67 #define TX_FIFO_THRESH 256
68 #define RX_FIFO_THRESH 1
70 #include <linux/module.h>
71 #include <linux/kernel.h>
72 #include <linux/string.h>
74 #include <linux/errno.h>
77 #include <linux/pci.h>
79 #include <linux/netdevice.h>
84 #include <linux/ethtool.h>
85 #include <linux/mii.h>
87 #include <linux/bitops.h>
89 #include <asm/uaccess.h>
90 #include <asm/byteorder.h>
108 MODULE_PARM_DESC(rx_copybreak,
"EPIC/100 copy breakpoint for copy-only-tiny-frames");
146 #define EPIC_TOTAL_SIZE 0x100
185 #define ew16(reg, val) iowrite16(val, ioaddr + (reg))
186 #define ew32(reg, val) iowrite32(val, ioaddr + (reg))
187 #define er8(reg) ioread8(ioaddr + (reg))
188 #define er16(reg) ioread16(ioaddr + (reg))
189 #define er32(reg) ioread32(ioaddr + (reg))
216 #define EpicRemoved 0xffffffff
218 #define EpicNapiEvent (TxEmpty | TxDone | \
219 RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
220 #define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent)
222 static const u16 media2miictl[16] = {
223 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0,
224 0, 0, 0, 0, 0, 0, 0, 0 };
251 #define PRIV_ALIGN 15
294 static void epic_timer(
unsigned long data);
301 static irqreturn_t epic_interrupt(
int irq,
void *dev_instance);
303 static const struct ethtool_ops netdev_ethtool_ops;
309 .ndo_open = epic_open,
310 .ndo_stop = epic_close,
311 .ndo_start_xmit = epic_start_xmit,
312 .ndo_tx_timeout = epic_tx_timeout,
313 .ndo_get_stats = epic_get_stats,
314 .ndo_set_rx_mode = set_rx_mode,
315 .ndo_do_ioctl = netdev_ioctl,
324 static int card_idx = -1;
336 static int printed_version;
337 if (!printed_version++)
351 goto err_out_disable;
358 goto err_out_disable;
362 dev = alloc_etherdev(
sizeof (*ep));
364 goto err_out_free_res;
368 ioaddr = pci_iomap(pdev,
EPIC_BAR, 0);
371 goto err_out_free_netdev;
374 pci_set_drvdata(pdev,
dev);
375 ep = netdev_priv(
dev);
378 ep->mii.mdio_read = mdio_read;
379 ep->mii.mdio_write = mdio_write;
380 ep->mii.phy_id_mask = 0x1f;
381 ep->mii.reg_num_mask = 0x1f;
385 goto err_out_iounmap;
386 ep->tx_ring = ring_space;
387 ep->tx_ring_dma = ring_dma;
391 goto err_out_unmap_tx;
392 ep->rx_ring = ring_space;
393 ep->rx_ring_dma = ring_dma;
395 if (
dev->mem_start) {
398 }
else if (card_idx >= 0 && card_idx <
MAX_UNITS) {
401 if (full_duplex[card_idx] >= 0)
402 duplex = full_duplex[card_idx];
407 ep->reschedule_in_poll = 0;
413 for (i = 16; i > 0; i--)
423 for (i = 0; i < 3; i++)
428 for (i = 0; i < 64; i++)
430 i % 16 == 15 ?
"\n" :
"");
434 ep->chip_id = chip_idx;
435 ep->chip_flags = pci_id_tbl[chip_idx].
drv_flags;
444 int phy, phy_idx = 0;
445 for (phy = 1; phy < 32 && phy_idx <
sizeof(ep->phys); phy++) {
447 if (mii_status != 0xffff && mii_status != 0x0000) {
448 ep->phys[phy_idx++] =
phy;
450 "MII transceiver #%d control "
451 "%4.4x status %4.4x.\n",
452 phy, mdio_read(
dev, phy, 0), mii_status);
455 ep->mii_phy_cnt = phy_idx;
460 "Autonegotiation advertising %4.4x link "
462 ep->mii.advertising, mdio_read(
dev, phy, 5));
463 }
else if ( ! (ep->chip_flags &
NO_MII)) {
465 "***WARNING***: No MII transceiver found!\n");
469 ep->mii.phy_id = ep->phys[0];
479 ep->mii.force_media = ep->mii.full_duplex = 1;
480 dev_info(&pdev->
dev,
"Forced full duplex requested.\n");
482 dev->if_port = ep->default_port =
option;
485 dev->netdev_ops = &epic_netdev_ops;
486 dev->ethtool_ops = &netdev_ethtool_ops;
492 goto err_out_unmap_rx;
495 dev->name, pci_id_tbl[chip_idx].
name,
520 #define EE_SHIFT_CLK 0x04
522 #define EE_DATA_WRITE 0x08
523 #define EE_WRITE_0 0x01
524 #define EE_WRITE_1 0x09
525 #define EE_DATA_READ 0x10
526 #define EE_ENB (0x0001 | EE_CS)
532 #define eeprom_delay() er32(EECTL)
535 #define EE_WRITE_CMD (5 << 6)
536 #define EE_READ64_CMD (6 << 6)
537 #define EE_READ256_CMD (6 << 8)
538 #define EE_ERASE_CMD (7 << 6)
547 static inline void __epic_pci_commit(
void __iomem *ioaddr)
560 __epic_pci_commit(ioaddr);
577 int read_cmd = location |
584 for (i = 12; i >= 0; i--) {
593 for (i = 16; i > 0; i--) {
607 #define MII_WRITEOP 2
612 int read_cmd = (phy_id << 9) | (location << 4) |
MII_READOP;
617 for (i = 400; i > 0; i--) {
621 if (phy_id == 1 && location < 6 &&
640 for (i = 10000; i > 0; i--) {
658 napi_enable(&ep->
napi);
661 napi_disable(&ep->
napi);
669 for (i = 16; i > 0; i--)
696 for (i = 0; i < 3; i++)
702 if (media2miictl[dev->
if_port & 15]) {
712 int mii_lpa = mdio_read(dev, ep->
phys[0],
MII_LPA);
713 if (mii_lpa != 0xffff) {
715 ep->
mii.full_duplex = 1;
720 " register read of %4.4x.\n", dev->
name,
721 ep->
mii.full_duplex ?
"full" :
"half",
722 ep->
phys[0], mii_lpa);
734 netif_start_queue(dev);
743 "status %4.4x %s-duplex.\n",
745 ep->
mii.full_duplex ?
"full" :
"half");
753 ep->
timer.function = epic_timer;
761 static void epic_pause(
struct net_device *dev)
767 netif_stop_queue (dev);
785 static void epic_restart(
struct net_device *dev)
799 for (i = 16; i > 0; i--)
811 for (i = 0; i < 3; i++)
832 " interrupt %4.4x.\n",
836 static void check_media(
struct net_device *dev)
841 int negotiated = mii_lpa & ep->
mii.advertising;
842 int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
844 if (ep->
mii.force_media)
846 if (mii_lpa == 0xffff)
848 if (ep->
mii.full_duplex != duplex) {
851 " partner capability of %4.4x.\n", dev->
name,
852 ep->
mii.full_duplex ?
"full" :
"half", ep->
phys[0], mii_lpa);
857 static void epic_timer(
unsigned long data)
862 int next_tick = 5*
HZ;
868 "IntStatus %4.4x RxStatus %4.4x.\n", dev->
name,
878 static void epic_tx_timeout(
struct net_device *dev)
892 dev->
stats.tx_fifo_errors++;
900 dev->
stats.tx_errors++;
902 netif_wake_queue(dev);
906 static void epic_init_ring(
struct net_device *dev)
955 int entry, free_count;
974 ctrl_word = 0x100000;
976 ctrl_word = 0x140000;
978 ctrl_word = 0x100000;
981 ctrl_word = 0x140000;
991 netif_stop_queue(dev);
993 spin_unlock_irqrestore(&ep->
lock, flags);
999 "flag %2.2x Tx status %8.8x.\n", dev->
name, skb->
len,
1010 #ifndef final_version
1017 if (status & 0x1050)
1019 if (status & 0x0008)
1021 if (status & 0x0040)
1023 if (status & 0x0010)
1029 unsigned int dirty_tx, cur_tx;
1036 for (dirty_tx = ep->
dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1044 if (
likely(txstatus & 0x0001)) {
1045 dev->
stats.collisions += (txstatus >> 8) & 15;
1046 dev->
stats.tx_packets++;
1049 epic_tx_error(dev, ep, txstatus);
1059 #ifndef final_version
1060 if (cur_tx - dirty_tx > TX_RING_SIZE) {
1062 "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1071 netif_wake_queue(dev);
1077 static irqreturn_t epic_interrupt(
int irq,
void *dev_instance)
1082 unsigned int handled = 0;
1101 if (napi_schedule_prep(&ep->
napi)) {
1102 epic_napi_irq_off(dev, ep);
1108 status &= ~EpicNapiEvent;
1158 if (rx_work_limit > budget)
1159 rx_work_limit = budget;
1162 while ((ep->
rx_ring[entry].rxstatus & DescOwn) == 0) {
1167 if (--rx_work_limit < 0)
1169 if (status & 0x2006) {
1173 if (status & 0x2000) {
1175 "multiple buffers, status %4.4x!\n", dev->
name, status);
1176 dev->
stats.rx_length_errors++;
1177 }
else if (status & 0x0006)
1179 dev->
stats.rx_errors++;
1183 short pkt_len = (status >> 16) - 4;
1189 dev->
name, status, pkt_len);
1194 if (pkt_len < rx_copybreak &&
1195 (skb = netdev_alloc_skb(dev, pkt_len + 2)) !=
NULL) {
1196 skb_reserve(skb, 2);
1197 pci_dma_sync_single_for_cpu(ep->
pci_dev,
1201 skb_copy_to_linear_data(skb, ep->
rx_skbuff[entry]->data, pkt_len);
1203 pci_dma_sync_single_for_device(ep->
pci_dev,
1216 dev->
stats.rx_packets++;
1220 entry = (++ep->
cur_rx) % RX_RING_SIZE;
1231 skb_reserve(skb, 2);
1252 dev->
stats.rx_errors++;
1253 if (status & (RxOverflow |
RxFull))
1268 work_done += epic_rx(dev, budget);
1270 epic_rx_err(dev, ep);
1272 if (work_done < budget) {
1273 unsigned long flags;
1284 epic_napi_irq_on(dev, ep);
1288 spin_unlock_irqrestore(&ep->
napi_lock, flags);
1297 static int epic_close(
struct net_device *dev)
1305 netif_stop_queue(dev);
1306 napi_disable(&ep->
napi);
1314 epic_disable_int(dev, ep);
1327 pci_unmap_single(pdev, ep->
rx_ring[i].bufaddr,
1331 ep->
rx_ring[
i].bufaddr = 0xBADF00D0;
1338 pci_unmap_single(pdev, ep->
tx_ring[i].bufaddr, skb->
len,
1354 if (netif_running(dev)) {
1370 static void set_rx_mode(
struct net_device *dev)
1380 memset(mc_filter, 0xff,
sizeof(mc_filter));
1385 memset(mc_filter, 0xff,
sizeof(mc_filter));
1393 memset(mc_filter, 0,
sizeof(mc_filter));
1395 unsigned int bit_nr =
1397 mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1402 for (i = 0; i < 4; i++)
1422 spin_lock_irq(&np->
lock);
1424 spin_unlock_irq(&np->
lock);
1434 spin_lock_irq(&np->
lock);
1436 spin_unlock_irq(&np->
lock);
1441 static int netdev_nway_reset(
struct net_device *dev)
1458 static void netdev_set_msglevel(
struct net_device *dev,
u32 value)
1463 static int ethtool_begin(
struct net_device *dev)
1469 if (!netif_running(dev)) {
1476 static void ethtool_complete(
struct net_device *dev)
1482 if (!netif_running(dev)) {
1488 static const struct ethtool_ops netdev_ethtool_ops = {
1489 .get_drvinfo = netdev_get_drvinfo,
1490 .get_settings = netdev_get_settings,
1491 .set_settings = netdev_set_settings,
1492 .nway_reset = netdev_nway_reset,
1493 .get_link = netdev_get_link,
1494 .get_msglevel = netdev_get_msglevel,
1495 .set_msglevel = netdev_set_msglevel,
1496 .begin = ethtool_begin,
1497 .complete = ethtool_complete
1508 if (! netif_running(dev)) {
1514 spin_lock_irq(&np->
lock);
1516 spin_unlock_irq(&np->
lock);
1519 if (! netif_running(dev)) {
1529 struct net_device *dev = pci_get_drvdata(pdev);
1539 pci_set_drvdata(pdev,
NULL);
1548 struct net_device *dev = pci_get_drvdata(pdev);
1552 if (!netif_running(dev))
1562 static int epic_resume (
struct pci_dev *pdev)
1564 struct net_device *dev = pci_get_drvdata(pdev);
1566 if (!netif_running(dev))
1578 .id_table = epic_pci_tbl,
1579 .probe = epic_init_one,
1582 .suspend = epic_suspend,
1583 .resume = epic_resume,
1588 static int __init epic_init (
void)
1596 return pci_register_driver(&epic_driver);
1600 static void __exit epic_cleanup (
void)