13 #define DRV_NAME "DL2000/TC902x-based linux driver"
14 #define DRV_VERSION "v1.19"
15 #define DRV_RELDATE "2007/08/12"
19 #define dw32(reg, val) iowrite32(val, ioaddr + (reg))
20 #define dw16(reg, val) iowrite16(val, ioaddr + (reg))
21 #define dw8(reg, val) iowrite8(val, ioaddr + (reg))
22 #define dr32(reg) ioread32(ioaddr + (reg))
23 #define dr16(reg) ioread16(ioaddr + (reg))
24 #define dr8(reg) ioread8(ioaddr + (reg))
33 static int tx_flow=-1;
34 static int rx_flow=-1;
35 static int copy_thresh;
36 static int rx_coalesce=10;
37 static int rx_timeout=200;
38 static int tx_coalesce=16;
57 #define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \
58 UpdateStats | LinkEvent)
67 static const int max_intrloop = 50;
68 static const int multicast_filter_limit = 0x40;
71 static void rio_timer (
unsigned long data);
94 static int mii_read (
struct net_device *
dev,
int phy_addr,
int reg_num);
95 static int mii_write (
struct net_device *
dev,
int phy_addr,
int reg_num,
101 .ndo_open = rio_open,
102 .ndo_start_xmit = start_xmit,
103 .ndo_stop = rio_close,
107 .ndo_set_rx_mode = set_multicast,
108 .ndo_do_ioctl = rio_ioctl,
109 .ndo_tx_timeout = rio_tx_timeout,
110 .ndo_change_mtu = change_mtu,
122 static int version_printed;
126 if (!version_printed++)
136 goto err_out_disable;
142 dev = alloc_etherdev (
sizeof (*np));
147 np = netdev_priv(dev);
150 ioaddr = pci_iomap(pdev, 0, 0);
157 ioaddr = pci_iomap(pdev, 1, 0);
159 goto err_out_iounmap;
171 if (media[card_idx] !=
NULL) {
173 if (
strcmp (media[card_idx],
"auto") == 0 ||
174 strcmp (media[card_idx],
"autosense") == 0 ||
175 strcmp (media[card_idx],
"0") == 0 ) {
177 }
else if (
strcmp (media[card_idx],
"100mbps_fd") == 0 ||
178 strcmp (media[card_idx],
"4") == 0) {
181 }
else if (
strcmp (media[card_idx],
"100mbps_hd") == 0 ||
182 strcmp (media[card_idx],
"3") == 0) {
185 }
else if (
strcmp (media[card_idx],
"10mbps_fd") == 0 ||
186 strcmp (media[card_idx],
"2") == 0) {
189 }
else if (
strcmp (media[card_idx],
"10mbps_hd") == 0 ||
190 strcmp (media[card_idx],
"1") == 0) {
193 }
else if (
strcmp (media[card_idx],
"1000mbps_fd") == 0 ||
194 strcmp (media[card_idx],
"6") == 0) {
197 }
else if (
strcmp (media[card_idx],
"1000mbps_hd") == 0 ||
198 strcmp (media[card_idx],
"5") == 0) {
205 if (
jumbo[card_idx] != 0) {
210 if (mtu[card_idx] > 0 && mtu[card_idx] <
PACKET_SIZE)
211 dev->
mtu = mtu[card_idx];
213 np->
vlan = (
vlan[card_idx] > 0 &&
vlan[card_idx] < 4096) ?
234 pci_set_drvdata (pdev, dev);
238 goto err_out_iounmap;
244 goto err_out_unmap_tx;
252 err = find_miiphy (dev);
254 goto err_out_unmap_rx;
265 mii_set_media_pcs (dev);
269 if (np->
speed == 1000)
276 goto err_out_unmap_rx;
287 "rx_coalesce:\t%d packets\n"
288 "rx_timeout: \t%d ns\n",
316 int i, phy_found = 0;
317 np = netdev_priv(dev);
320 for (i = 31; i >= 0; i--) {
321 int mii_status = mii_read (dev, i, 1);
322 if (mii_status != 0xffff && mii_status != 0x0000) {
347 for (i = 0; i < 128; i++)
361 for (i = 0; i < 6; i++)
370 psib = (
u8 *) sromdata;
374 if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) {
391 j = (next - i > 255) ? 255 : next - i;
414 const int irq = np->
pdev->irq;
437 for (i = 0; i < 6; i++)
469 np->
timer.function = rio_timer;
482 netif_start_queue (dev);
489 rio_timer (
unsigned long data)
494 int next_tick = 1*
HZ;
507 skb = netdev_alloc_skb_ip_align(dev,
512 "%s: Still unable to re-allocate Rx skbuff.#%d\n",
527 spin_unlock_irqrestore (&np->
rx_lock, flags);
562 ((i+1)%TX_RING_SIZE) *
569 ((i + 1) % RX_RING_SIZE) *
581 skb = netdev_alloc_skb_ip_align(dev, np->
rx_buf_sz);
585 "%s: alloc_list: allocate Rx buffer error! ",
609 u64 tfc_vlan_tag = 0;
653 if ((np->
cur_tx - np->
old_tx + TX_RING_SIZE) % TX_RING_SIZE
656 }
else if (!netif_queue_stopped(dev)) {
657 netif_stop_queue (dev);
671 rio_interrupt (
int irq,
void *dev_instance)
677 int cnt = max_intrloop;
684 if (int_status == 0 || --cnt < 0)
689 receive_packet (dev);
694 if (tx_status & 0x01)
695 tx_error (dev, tx_status);
697 rio_free_tx (dev, 1);
703 rio_error (dev, int_status);
721 unsigned long flag = 0;
729 while (entry != np->
cur_tx) {
735 pci_unmap_single (np->
pdev,
736 desc_to_dma(&np->
tx_ring[entry]),
744 entry = (entry + 1) % TX_RING_SIZE;
750 spin_unlock_irqrestore(&np->
tx_lock, flag);
756 if (netif_queue_stopped(dev) &&
757 ((np->
cur_tx - np->
old_tx + TX_RING_SIZE) % TX_RING_SIZE
759 netif_wake_queue (dev);
764 tx_error (
struct net_device *dev,
int tx_status)
771 frame_id = (tx_status & 0xffff0000);
772 printk (
KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
773 dev->
name, tx_status, frame_id);
774 np->
stats.tx_errors++;
776 if (tx_status & 0x10) {
777 np->
stats.tx_fifo_errors++;
783 for (i = 50; i > 0; i--) {
788 rio_free_tx (dev, 1);
797 if (tx_status & 0x04) {
798 np->
stats.tx_fifo_errors++;
802 for (i = 50; i > 0; i--) {
811 if (tx_status & 0x08)
812 np->
stats.collisions16++;
814 if (tx_status & 0x08)
815 np->
stats.collisions++;
841 pkt_len = frame_status & 0xffff;
846 np->
stats.rx_errors++;
848 np->
stats.rx_length_errors++;
850 np->
stats.rx_crc_errors++;
852 np->
stats.rx_frame_errors++;
854 np->
stats.rx_fifo_errors++;
859 if (pkt_len > copy_thresh) {
860 pci_unmap_single (np->
pdev,
866 }
else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
867 pci_dma_sync_single_for_cpu(np->
pdev,
871 skb_copy_to_linear_data (skb,
875 pci_dma_sync_single_for_device(np->
pdev,
883 if (np->
pdev->pci_rev_id >= 0x0c &&
890 entry = (entry + 1) % RX_RING_SIZE;
896 while (entry != np->
cur_rx) {
900 skb = netdev_alloc_skb_ip_align(dev, np->
rx_buf_sz);
904 "%s: receive_packet: "
905 "Unable to re-allocate Rx skbuff.#%d\n",
918 entry = (entry + 1) % RX_RING_SIZE;
926 rio_error (
struct net_device *dev,
int int_status)
934 if (mii_wait_link (dev, 10) == 0) {
937 mii_get_media_pcs (dev);
940 if (np->
speed == 1000)
970 dev->
name, int_status);
984 unsigned int stat_reg;
1000 np->
stats.tx_aborted_errors += stat_reg;
1001 np->
stats.tx_errors += stat_reg;
1004 np->
stats.tx_carrier_errors += stat_reg;
1005 np->
stats.tx_errors += stat_reg;
1027 for (i = 0x100; i <= 0x150; i += 4)
1082 for (i = 0x100; i <= 0x150; i += 4)
1095 change_mtu (
struct net_device *dev,
int new_mtu)
1100 if ((new_mtu < 68) || (new_mtu >
max)) {
1117 hash_table[0] = hash_table[1] = 0;
1119 hash_table[1] |= 0x02000000;
1138 for (bit = 0; bit < 6; bit++)
1139 if (crc & (1 << (31 - bit)))
1140 index |= (1 <<
bit);
1141 hash_table[index / 32] |= (1 << (index % 32));
1187 ethtool_cmd_speed_set(cmd, np->
speed);
1190 ethtool_cmd_speed_set(cmd, -1);
1216 if (np->
speed == 1000) {
1219 printk(
"Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n");
1221 switch (ethtool_cmd_speed(cmd)) {
1247 .get_settings = rio_get_settings,
1248 .set_settings = rio_set_settings,
1249 .get_link = rio_get_link,
1262 miidata->
phy_id = phy_addr;
1270 mii_write (dev, phy_addr, miidata->
reg_num, miidata->
val_in);
1278 #define EEP_READ 0x0200
1279 #define EEP_BUSY 0x8000
1300 #define mii_delay() dr8(PhyCtrl)
1334 for (i = len - 1; i >= 0; i--) {
1335 mii_sendbit (dev, data & (1 << i));
1340 mii_read (
struct net_device *dev,
int phy_addr,
int reg_num)
1347 mii_send_bits (dev, 0xffffffff, 32);
1350 cmd = (0x06 << 10 | phy_addr << 5 | reg_num);
1351 mii_send_bits (dev, cmd, 14);
1353 if (mii_getbit (dev))
1356 for (i = 0; i < 16; i++) {
1357 retval |= mii_getbit (dev);
1362 return (retval >> 1) & 0xffff;
1368 mii_write (
struct net_device *dev,
int phy_addr,
int reg_num,
u16 data)
1373 mii_send_bits (dev, 0xffffffff, 32);
1376 cmd = (0x5002 << 16) | (phy_addr << 23) | (reg_num << 18) | data;
1377 mii_send_bits (dev, cmd, 32);
1389 np = netdev_priv(dev);
1393 bmsr = mii_read (dev, phy_addr,
MII_BMSR);
1397 }
while (--wait > 0);
1410 np = netdev_priv(dev);
1413 bmsr = mii_read (dev, phy_addr,
MII_BMSR);
1420 mii_read (dev, phy_addr,
MII_LPA);
1495 np = netdev_priv(dev);
1501 bmsr = mii_read (dev, phy_addr,
MII_BMSR);
1503 ~(ADVERTISE_100FULL | ADVERTISE_10FULL |
1504 ADVERTISE_100HALF | ADVERTISE_10HALF |
1527 mii_write (dev, phy_addr,
MII_BMCR, bmcr);
1537 bmcr = mii_read (dev, phy_addr,
MII_BMCR);
1539 mii_write (dev, phy_addr,
MII_BMCR, bmcr);
1543 mii_write (dev, phy_addr,
MII_BMCR, bmcr);
1551 if (np->
speed == 100) {
1554 }
else if (np->
speed == 10) {
1566 mscr |= MII_MSCR_CFG_ENABLE;
1567 mscr &= ~MII_MSCR_CFG_VALUE = 0;
1569 mii_write (dev, phy_addr,
MII_BMCR, bmcr);
1583 np = netdev_priv(dev);
1586 bmsr = mii_read (dev, phy_addr,
PCS_BMSR);
1588 if (!(bmsr & BMSR_ANEGCOMPLETE)) {
1592 negotiate = mii_read (dev, phy_addr,
PCS_ANAR) &
1639 np = netdev_priv(dev);
1645 esr = mii_read (dev, phy_addr,
PCS_ESR);
1648 ~PCS_ANAR_FULL_DUPLEX;
1659 mii_write (dev, phy_addr,
MII_BMCR, bmcr);
1665 mii_write (dev, phy_addr,
MII_BMCR, bmcr);
1674 mii_write (dev, phy_addr,
MII_BMCR, bmcr);
1694 netif_stop_queue (dev);
1709 pci_unmap_single(pdev, desc_to_dma(&np->
rx_ring[i]),
1711 dev_kfree_skb (skb);
1720 pci_unmap_single(pdev, desc_to_dma(&np->
tx_ring[i]),
1722 dev_kfree_skb (skb);
1731 rio_remove1 (
struct pci_dev *pdev)
1733 struct net_device *dev = pci_get_drvdata (pdev);
1751 pci_set_drvdata (pdev,
NULL);
1756 .id_table = rio_pci_tbl,
1757 .probe = rio_probe1,
1764 return pci_register_driver(&rio_driver);