30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #define DRV_NAME "de2104x"
33 #define DRV_VERSION "0.7"
34 #define DRV_RELDATE "Mar 17, 2004"
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/netdevice.h>
42 #include <linux/pci.h>
44 #include <linux/ethtool.h>
45 #include <linux/compiler.h>
46 #include <linux/rtnetlink.h>
48 #include <linux/slab.h>
52 #include <asm/uaccess.h>
53 #include <asm/unaligned.h>
64 static int debug = -1;
69 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
70 defined(CONFIG_SPARC) || defined(__ia64__) || \
71 defined(__sh__) || defined(__mips__)
72 static int rx_copybreak = 1518;
74 static int rx_copybreak = 100;
77 MODULE_PARM_DESC (rx_copybreak,
"de2104x Breakpoint at which Rx packets are copied");
79 #define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
88 #ifndef CONFIG_DE2104X_DSL
91 #define DSL CONFIG_DE2104X_DSL
94 #define DE_RX_RING_SIZE 64
95 #define DE_TX_RING_SIZE 64
96 #define DE_RING_BYTES \
97 ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
98 (sizeof(struct de_desc) * DE_TX_RING_SIZE))
99 #define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1))
100 #define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1))
101 #define TX_BUFFS_AVAIL(CP) \
102 (((CP)->tx_tail <= (CP)->tx_head) ? \
103 (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \
104 (CP)->tx_tail - (CP)->tx_head - 1)
106 #define PKT_BUF_SZ 1536
109 #define DE_SETUP_SKB ((struct sk_buff *) 1)
110 #define DE_DUMMY_SKB ((struct sk_buff *) 2)
111 #define DE_SETUP_FRAME_WORDS 96
112 #define DE_EEPROM_WORDS 256
113 #define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16))
114 #define DE_MAX_MEDIA 5
116 #define DE_MEDIA_TP_AUTO 0
117 #define DE_MEDIA_BNC 1
118 #define DE_MEDIA_AUI 2
119 #define DE_MEDIA_TP 3
120 #define DE_MEDIA_TP_FD 4
121 #define DE_MEDIA_INVALID DE_MAX_MEDIA
122 #define DE_MEDIA_FIRST 0
123 #define DE_MEDIA_LAST (DE_MAX_MEDIA - 1)
124 #define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC)
126 #define DE_TIMER_LINK (60 * HZ)
127 #define DE_TIMER_NO_LINK (5 * HZ)
129 #define DE_NUM_REGS 16
130 #define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32))
131 #define DE_REGS_VER 1
134 #define TX_TIMEOUT (6*HZ)
140 #define FULL_DUPLEX_MAGIC 0x6969
252 static const u32 de_intr_mask =
336 static void de_clean_rings (
struct de_private *de);
338 static void de21040_media_timer (
unsigned long data);
339 static void de21041_media_timer (
unsigned long data);
340 static unsigned int de_ok_to_advertise (
struct de_private *de,
u32 new_media);
362 static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
363 static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
364 static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
367 static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
368 static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
370 static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
371 static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
374 #define dr32(reg) ioread32(de->regs + (reg))
375 #define dw32(reg, val) iowrite32((val), de->regs + (reg))
378 static void de_rx_err_acct (
struct de_private *de,
unsigned rx_tail,
382 "rx err, slot %d status 0x%x len %d\n",
383 rx_tail, status, len);
385 if ((status & 0x38000300) != 0x0300) {
387 if ((status & 0xffff) != 0x7fff) {
389 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
396 if (status & 0x0890) de->
net_stats.rx_length_errors++;
404 unsigned rx_tail = de->
rx_tail;
413 unsigned copying_skb,
buflen;
415 skb = de->
rx_skb[rx_tail].skb;
422 len = ((status >> 16) & 0x7ff) - 4;
423 mapping = de->
rx_skb[rx_tail].mapping;
430 if (
unlikely((status & 0x38008300) != 0x0300)) {
431 de_rx_err_acct(de, rx_tail, status, len);
435 copying_skb = (len <= rx_copybreak);
438 "rx slot %d status 0x%x len %d copying? %d\n",
439 rx_tail, status, len, copying_skb);
442 copy_skb = netdev_alloc_skb(de->
dev, buflen);
451 pci_unmap_single(de->
pdev, mapping,
456 de->
rx_skb[rx_tail].mapping =
457 pci_map_single(de->
pdev, copy_skb->
data,
459 de->
rx_skb[rx_tail].skb = copy_skb;
463 skb_copy_from_linear_data(skb,
skb_put(copy_skb, len),
492 netdev_warn(de->
dev,
"rx work limit reached\n");
497 static irqreturn_t de_interrupt (
int irq,
void *dev_instance)
507 netif_dbg(de,
intr, dev,
"intr, status %08x mode %08x desc %u/%u/%u\n",
519 spin_lock(&de->
lock);
525 de_media_interrupt(de, status);
527 spin_unlock(&de->
lock);
535 "PCI bus error, status=%08x, PCI status=%04x\n",
547 while (tx_tail != tx_head) {
553 if (status & DescOwn)
556 skb = de->
tx_skb[tx_tail].skb;
562 pci_unmap_single(de->
pdev, de->
tx_skb[tx_tail].mapping,
567 pci_unmap_single(de->
pdev, de->
tx_skb[tx_tail].mapping,
573 "tx err, status 0x%x\n",
588 "tx done, slot %d\n", tx_tail);
602 netif_wake_queue(de->
dev);
613 spin_lock_irq(&de->
lock);
617 netif_stop_queue(dev);
618 spin_unlock_irq(&de->
lock);
645 netif_dbg(de, tx_queued, dev,
"tx queued, slot %d, skblen %d\n",
649 netif_stop_queue(dev);
651 spin_unlock_irq(&de->
lock);
664 static void build_setup_frame_hash(
u16 *setup_frm,
struct net_device *dev)
672 memset(hash_table, 0,
sizeof(hash_table));
673 __set_bit_le(255, hash_table);
678 __set_bit_le(index, hash_table);
681 for (i = 0; i < 32; i++) {
682 *setup_frm++ = hash_table[
i];
683 *setup_frm++ = hash_table[
i];
689 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
690 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
691 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
694 static void build_setup_frame_perfect(
u16 *setup_frm,
struct net_device *dev)
704 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
705 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
706 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
714 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
715 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
716 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
720 static void __de_set_rx_mode (
struct net_device *dev)
762 dummy_txd->
addr1 = 0;
794 netif_stop_queue(dev);
804 static void de_set_rx_mode (
struct net_device *dev)
810 __de_set_rx_mode(dev);
811 spin_unlock_irqrestore (&de->
lock, flags);
814 static inline void de_rx_missed(
struct de_private *de,
u32 rx_missed)
822 static void __de_get_stats(
struct de_private *de)
826 de_rx_missed(de, tmp);
834 spin_lock_irq(&de->
lock);
835 if (netif_running(dev) && netif_device_present(dev))
837 spin_unlock_irq(&de->
lock);
842 static inline int de_is_running (
struct de_private *de)
847 static void de_stop_rxtx (
struct de_private *de)
850 unsigned int i = 1300/100;
853 if (macmode &
RxTx) {
863 if (!de_is_running(de))
868 netdev_warn(de->
dev,
"timeout expired, stopping DMA\n");
871 static inline void de_start_rxtx (
struct de_private *de)
876 if ((macmode & RxTx) != RxTx) {
882 static void de_stop_hw (
struct de_private *de)
900 if (!netif_carrier_ok(de->
dev)) {
907 static void de_link_down(
struct de_private *de)
909 if (netif_carrier_ok(de->
dev)) {
915 static void de_set_media (
struct de_private *de)
920 if (de_is_running(de))
921 netdev_warn(de->
dev,
"chip is running while changing media!\n");
945 macmode, de->
media[media].csr13,
946 de->
media[media].csr14, de->
media[media].csr15);
951 static void de_next_media (
struct de_private *de,
const u32 *media,
952 unsigned int n_media)
956 for (i = 0; i < n_media; i++) {
957 if (de_ok_to_advertise(de, media[i])) {
964 static void de21040_media_timer (
unsigned long data)
969 unsigned int carrier;
980 if (!netif_carrier_ok(dev))
995 de_next_media(de, &next_state, 1);
998 de_next_media(de, &next_state, 1);
1003 spin_unlock_irqrestore(&de->
lock, flags);
1015 static unsigned int de_ok_to_advertise (
struct de_private *de,
u32 new_media)
1017 switch (new_media) {
1045 static void de21041_media_timer (
unsigned long data)
1050 unsigned int carrier;
1051 unsigned long flags;
1067 if (!netif_carrier_ok(dev))
1071 "%s link ok, mode %x status %x\n",
1085 unsigned int have_media = 1;
1120 static const u32 next_states[] = {
1123 de_next_media(de, next_states,
ARRAY_SIZE(next_states));
1125 static const u32 next_states[] = {
1128 de_next_media(de, next_states,
ARRAY_SIZE(next_states));
1130 static const u32 next_states[] = {
1133 de_next_media(de, next_states,
ARRAY_SIZE(next_states));
1139 spin_unlock_irqrestore(&de->
lock, flags);
1151 static void de_media_interrupt (
struct de_private *de,
u32 status)
1182 static int de_reset_mac (
struct de_private *de)
1201 for (tmp = 0; tmp < 5; tmp++) {
1211 if (status == 0xffffffff)
1216 static void de_adapter_wake (
struct de_private *de)
1223 pci_read_config_dword(de->
pdev,
PCIPM, &pmctl);
1226 pci_write_config_dword(de->
pdev,
PCIPM, pmctl);
1233 static void de_adapter_sleep (
struct de_private *de)
1241 pci_read_config_dword(de->
pdev,
PCIPM, &pmctl);
1243 pci_write_config_dword(de->
pdev,
PCIPM, pmctl);
1246 static int de_init_hw (
struct de_private *de)
1252 de_adapter_wake(de);
1256 rc = de_reset_mac(de);
1271 de_set_rx_mode(dev);
1276 static int de_refill_rx (
struct de_private *de)
1292 if (i == (DE_RX_RING_SIZE - 1))
1308 static int de_init_rings (
struct de_private *de)
1316 return de_refill_rx (de);
1319 static int de_alloc_rings (
struct de_private *de)
1325 return de_init_rings(de);
1328 static void de_clean_rings (
struct de_private *de)
1341 pci_unmap_single(de->
pdev, de->
rx_skb[i].mapping,
1343 dev_kfree_skb(de->
rx_skb[i].skb);
1352 pci_unmap_single(de->
pdev,
1357 pci_unmap_single(de->
pdev,
1369 static void de_free_rings (
struct de_private *de)
1380 const int irq = de->
pdev->irq;
1383 netif_dbg(de, ifup, dev,
"enabling interface\n");
1387 rc = de_alloc_rings(de);
1389 netdev_err(dev,
"ring allocation failure, err=%d\n", rc);
1397 netdev_err(dev,
"IRQ %d request failure, err=%d\n", irq, rc);
1401 rc = de_init_hw(de);
1403 netdev_err(dev,
"h/w init failure, err=%d\n", rc);
1404 goto err_out_free_irq;
1407 netif_start_queue(dev);
1422 unsigned long flags;
1424 netif_dbg(de, ifdown, dev,
"disabling interface\n");
1430 netif_stop_queue(dev);
1432 spin_unlock_irqrestore(&de->
lock, flags);
1437 de_adapter_sleep(de);
1441 static void de_tx_timeout (
struct net_device *dev)
1444 const int irq = de->
pdev->irq;
1446 netdev_dbg(dev,
"NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1453 spin_lock_irq(&de->
lock);
1456 netif_stop_queue(dev);
1459 spin_unlock_irq(&de->
lock);
1472 netif_wake_queue(dev);
1482 rbuf[i] =
dr32(i * 8);
1485 de_rx_missed(de, rbuf[8]);
1507 ethtool_cmd_speed_set(ecmd, 10);
1529 if (ethtool_cmd_speed(ecmd) != 10)
1547 switch (ecmd->
port) {
1587 if (netif_running(de->
dev))
1603 static int de_get_regs_len(
struct net_device *dev)
1613 spin_lock_irq(&de->
lock);
1614 rc = __de_get_settings(de, ecmd);
1615 spin_unlock_irq(&de->
lock);
1625 spin_lock_irq(&de->
lock);
1626 rc = __de_set_settings(de, ecmd);
1627 spin_unlock_irq(&de->
lock);
1639 static void de_set_msglevel(
struct net_device *dev,
u32 msglvl)
1646 static int de_get_eeprom(
struct net_device *dev,
1653 if ((eeprom->
offset != 0) || (eeprom->
magic != 0) ||
1661 static int de_nway_reset(
struct net_device *dev)
1668 if (netif_carrier_ok(de->
dev))
1685 spin_lock_irq(&de->
lock);
1686 __de_get_regs(de, data);
1687 spin_unlock_irq(&de->
lock);
1690 static const struct ethtool_ops de_ethtool_ops = {
1692 .get_drvinfo = de_get_drvinfo,
1693 .get_regs_len = de_get_regs_len,
1694 .get_settings = de_get_settings,
1695 .set_settings = de_set_settings,
1696 .get_msglevel = de_get_msglevel,
1697 .set_msglevel = de_set_msglevel,
1698 .get_eeprom = de_get_eeprom,
1699 .nway_reset = de_nway_reset,
1700 .get_regs = de_get_regs,
1710 for (i = 0; i < 6; i++) {
1711 int value, boguscnt = 100000;
1715 }
while (value < 0 && --boguscnt > 0);
1719 pr_warn(
"timeout reading 21040 MAC address byte %u\n",
1739 de->
media[
i].csr13 = t21040_csr13[
i];
1740 de->
media[
i].csr14 = t21040_csr14[
i];
1741 de->
media[
i].csr15 = t21040_csr15[
i];
1756 int read_cmd = location | (
EE_READ_CMD << addr_len);
1762 for (i = 4 + addr_len; i >= 0; i--) {
1773 for (i = 16; i > 0; i--) {
1788 unsigned i, sa_offset = 0, ofs;
1790 unsigned ee_addr_size = tulip_read_eeprom(de->
regs, 0xff, 8) & 0x40000 ? 8 : 6;
1803 #ifndef CONFIG_MIPS_COBALT
1805 for (i = 0; i < 8; i ++)
1806 if (ee_data[i] != ee_data[16+i])
1812 for (i = 0; i < 6; i ++)
1813 de->
dev->dev_addr[i] = ee_data[i + sa_offset];
1826 if ((
sizeof(ee_data) - ofs) <
1839 pr_info(
"de%d: SROM leaf offset %u, default media %s\n",
1845 de->
media[
i].csr13 = 0xffff;
1846 de->
media[
i].csr14 = 0xffff;
1847 de->
media[
i].csr15 = 0xffff;
1853 bufp = ((
void *)il) +
sizeof(*il);
1854 for (i = 0; i < il->
n_blocks; i++) {
1887 pr_info(
"de%d: media block #%u: %s",
1889 media_name[de->
media[idx].type]);
1891 bufp +=
sizeof (ib->
opts);
1897 bufp +=
sizeof(ib->
csr13) +
sizeof(ib->
csr14) +
1902 de->
media[idx].csr13,
1903 de->
media[idx].csr14,
1904 de->
media[idx].csr15);
1920 if (de->
media[i].csr13 == 0xffff)
1921 de->
media[
i].csr13 = t21041_csr13[
i];
1922 if (de->
media[i].csr14 == 0xffff) {
1925 if (de->
pdev->revision < 0x20)
1926 de->
media[
i].csr14 = t21041_csr14_brk[
i];
1928 de->
media[
i].csr14 = t21041_csr14[
i];
1930 if (de->
media[i].csr15 == 0xffff)
1931 de->
media[
i].csr15 = t21041_csr15[
i];
1941 de->
media[i].type = i;
1953 .ndo_open = de_open,
1954 .ndo_stop = de_close,
1955 .ndo_set_rx_mode = de_set_rx_mode,
1956 .ndo_start_xmit = de_start_xmit,
1957 .ndo_get_stats = de_get_stats,
1958 .ndo_tx_timeout = de_tx_timeout,
1971 unsigned long pciaddr;
1982 dev = alloc_etherdev(
sizeof(
struct de_private));
1991 de = netdev_priv(dev);
2015 goto err_out_disable;
2018 if (pdev->
irq < 2) {
2020 pr_err(
"invalid irq (%d) for pci dev %s\n",
2021 pdev->
irq, pci_name(pdev));
2029 pr_err(
"no MMIO resource for pci dev %s\n", pci_name(pdev));
2034 pr_err(
"MMIO resource (%llx) too small on pci dev %s\n",
2044 pr_err(
"Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
2046 pciaddr, pci_name(pdev));
2051 de_adapter_wake(de);
2054 rc = de_reset_mac(de);
2056 pr_err(
"Cannot reset MAC, pci dev %s\n", pci_name(pdev));
2064 de21040_get_mac_address(de);
2065 de21040_get_media_info(de);
2067 de21041_get_srom_info(de);
2076 netdev_info(dev,
"%s at %p, %pM, IRQ %d\n",
2077 de->
de21040 ?
"21040" :
"21041",
2080 pci_set_drvdata(pdev, dev);
2086 de_adapter_sleep(de);
2104 struct net_device *dev = pci_get_drvdata(pdev);
2113 pci_set_drvdata(pdev,
NULL);
2121 struct net_device *dev = pci_get_drvdata (pdev);
2125 if (netif_running (dev)) {
2126 const int irq = pdev->
irq;
2131 spin_lock_irq(&de->
lock);
2134 netif_stop_queue(dev);
2138 spin_unlock_irq(&de->
lock);
2147 de_adapter_sleep(de);
2156 static int de_resume (
struct pci_dev *pdev)
2158 struct net_device *dev = pci_get_drvdata (pdev);
2163 if (netif_device_present(dev))
2165 if (!netif_running(dev))
2168 netdev_err(dev,
"pci_enable_device failed in resume\n");
2185 .id_table = de_pci_tbl,
2186 .probe = de_init_one,
2189 .suspend = de_suspend,
2190 .resume = de_resume,
2194 static int __init de_init (
void)
2199 return pci_register_driver(&de_driver);
2202 static void __exit de_exit (
void)