33 #include <linux/module.h>
35 #include <linux/pci.h>
36 #include <linux/netdevice.h>
41 #include <linux/ethtool.h>
42 #include <linux/mii.h>
43 #include <linux/if_vlan.h>
47 #include <linux/tcp.h>
50 #include <linux/slab.h>
51 #include <linux/prefetch.h>
55 #include <asm/byteorder.h>
69 static int debug = -1;
76 static void c2_tx_interrupt(
struct net_device *netdev);
77 static void c2_rx_interrupt(
struct net_device *netdev);
79 static void c2_tx_timeout(
struct net_device *netdev);
80 static int c2_change_mtu(
struct net_device *netdev,
int new_mtu);
90 static void c2_print_macaddr(
struct net_device *netdev)
123 elem = tx_ring->
start;
125 txp_desc = mmio_txp_ring;
126 for (i = 0; i < tx_ring->
count; i++, elem++, tx_desc++, txp_desc++) {
138 elem->ht_desc = tx_desc;
141 if (i == tx_ring->
count - 1) {
142 elem->next = tx_ring->
start;
143 tx_desc->next_offset = base;
145 elem->next = elem + 1;
146 tx_desc->next_offset =
147 base + (i + 1) *
sizeof(*tx_desc);
172 elem = rx_ring->
start;
174 rxp_desc = mmio_rxp_ring;
175 for (i = 0; i < rx_ring->
count; i++, elem++, rx_desc++, rxp_desc++) {
190 elem->ht_desc = rx_desc;
193 if (i == rx_ring->
count - 1) {
194 elem->next = rx_ring->
start;
195 rx_desc->next_offset = base;
197 elem->next = elem + 1;
198 rx_desc->next_offset =
199 base + (i + 1) *
sizeof(*rx_desc);
209 static inline int c2_rx_alloc(
struct c2_port *c2_port,
struct c2_element *elem)
220 pr_debug(
"%s: out of memory for receive\n",
232 pci_map_single(c2dev->
pcidev, skb->
data, maplen,
249 rx_desc->
len = maplen;
258 static int c2_rx_fill(
struct c2_port *c2_port)
264 elem = rx_ring->
start;
266 if (c2_rx_alloc(c2_port, elem)) {
270 }
while ((elem = elem->
next) != rx_ring->
start);
277 static void c2_rx_clean(
struct c2_port *c2_port)
284 elem = rx_ring->
start;
300 dev_kfree_skb(elem->
skb);
303 }
while ((elem = elem->
next) != rx_ring->
start);
324 static void c2_tx_clean(
struct c2_port *c2_port)
334 elem = tx_ring->
start;
350 c2_port->
netdev->stats.tx_dropped++;
361 c2_tx_free(c2_port->
c2dev, elem);
363 }
while ((elem = elem->
next) != tx_ring->
start);
370 netif_wake_queue(c2_port->
netdev);
372 spin_unlock_irqrestore(&c2_port->
tx_lock, flags);
379 static void c2_tx_interrupt(
struct net_device *netdev)
381 struct c2_port *c2_port = netdev_priv(netdev);
401 pr_debug(
"%s: tx done slot %3Zu status 0x%x len "
404 txp_htxd.flags, txp_htxd.len);
407 c2_tx_free(c2dev, elem);
413 if (netif_queue_stopped(netdev)
415 netif_wake_queue(netdev);
417 spin_unlock(&c2_port->
tx_lock);
420 static void c2_rx_error(
struct c2_port *c2_port,
struct c2_element *elem)
426 rxp_hdr->
len > (rx_desc->
len -
sizeof(*rxp_hdr))) {
428 pr_debug(
" rx_desc : %p\n", rx_desc);
430 elem - c2_port->
rx_ring.start);
432 pr_debug(
" rxp_hdr : %p [PA %p]\n", rxp_hdr,
433 (
void *)
__pa((
unsigned long) rxp_hdr));
441 elem->
skb->data = elem->
skb->head;
442 skb_reset_tail_pointer(elem->
skb);
445 memset(elem->
skb->data, 0,
sizeof(*rxp_hdr));
458 c2_port->
netdev->stats.rx_dropped++;
461 static void c2_rx_interrupt(
struct net_device *netdev)
463 struct c2_port *c2_port = netdev_priv(netdev);
489 buflen = rxp_hdr->
len;
493 buflen > (rx_desc->
len -
sizeof(*rxp_hdr))) {
494 c2_rx_error(c2_port, elem);
502 if (c2_rx_alloc(c2_port, elem)) {
503 c2_rx_error(c2_port, elem);
508 pci_unmap_single(c2dev->
pcidev, mapaddr, maplen,
525 skb->
data +=
sizeof(*rxp_hdr);
526 skb_set_tail_pointer(skb, buflen);
532 netdev->
stats.rx_packets++;
541 spin_unlock_irqrestore(&c2dev->
lock, flags);
549 unsigned int netisr0, dmaisr;
562 c2_rx_interrupt(c2dev->
netdev);
563 c2_tx_interrupt(c2dev->
netdev);
587 struct c2_port *c2_port = netdev_priv(netdev);
591 struct in_device *in_dev;
592 size_t rx_size, tx_size;
594 unsigned int netimr0;
600 c2_set_rxbufsize(c2_port);
606 c2_port->
mem_size = tx_size + rx_size;
610 pr_debug(
"Unable to allocate memory for "
611 "host descriptor rings\n");
619 c2_rx_ring_alloc(&c2_port->
rx_ring, c2_port->
mem, c2_port->
dma,
621 pr_debug(
"Unable to create RX ring\n");
626 if (c2_rx_fill(c2_port)) {
627 pr_debug(
"Unable to fill RX ring\n");
632 if ((ret = c2_tx_ring_alloc(&c2_port->
tx_ring, c2_port->
mem + rx_size,
633 c2_port->
dma + rx_size,
635 pr_debug(
"Unable to create TX ring\n");
652 for (i = 0, elem = c2_port->
rx_ring.start; i < c2_port->rx_ring.
count;
661 netif_start_queue(netdev);
674 in_dev = in_dev_get(netdev);
675 IN_DEV_CONF_SET(in_dev, ARP_IGNORE, 1);
681 c2_rx_clean(c2_port);
693 struct c2_port *c2_port = netdev_priv(netdev);
697 pr_debug(
"%s: disabling interface\n",
701 c2_tx_interrupt(netdev);
704 netif_stop_queue(netdev);
720 c2_tx_clean(c2_port);
721 c2_rx_clean(c2_port);
732 static void c2_reset(
struct c2_port *c2_port)
749 pr_debug(
"c2_reset: failed to quiesce the hardware!\n");
751 cur_rx &= ~C2_PCI_HRX_QUI;
760 struct c2_port *c2_port = netdev_priv(netdev);
772 netif_stop_queue(netdev);
773 spin_unlock_irqrestore(&c2_port->
tx_lock, flags);
775 pr_debug(
"%s: Tx ring full when queue awake!\n",
780 maplen = skb_headlen(skb);
797 netdev->
stats.tx_packets++;
801 if (skb_shinfo(skb)->nr_frags) {
802 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
804 maplen = skb_frag_size(frag);
805 mapaddr = skb_frag_dma_map(&c2dev->
pcidev->dev, frag,
820 netdev->
stats.tx_packets++;
826 c2_port->
tx_avail -= (skb_shinfo(skb)->nr_frags + 1);
829 netif_stop_queue(netdev);
831 pr_debug(
"%s: transmit queue full\n",
835 spin_unlock_irqrestore(&c2_port->
tx_lock, flags);
842 static void c2_tx_timeout(
struct net_device *netdev)
844 struct c2_port *c2_port = netdev_priv(netdev);
849 c2_tx_clean(c2_port);
852 static int c2_change_mtu(
struct net_device *netdev,
int new_mtu)
859 netdev->
mtu = new_mtu;
861 if (netif_running(netdev)) {
873 .ndo_start_xmit = c2_xmit_frame,
874 .ndo_tx_timeout = c2_tx_timeout,
875 .ndo_change_mtu = c2_change_mtu,
884 struct c2_port *c2_port =
NULL;
885 struct net_device *netdev = alloc_etherdev(
sizeof(*c2_port));
888 pr_debug(
"c2_port etherdev alloc failed");
898 c2_port = netdev_priv(netdev);
900 c2_port->
c2dev = c2dev;
911 if (!is_valid_ether_addr(netdev->
dev_addr)) {
913 c2_print_macaddr(netdev);
927 unsigned long reg0_start, reg0_flags, reg0_len;
928 unsigned long reg2_start, reg2_flags, reg2_len;
929 unsigned long reg4_start, reg4_flags, reg4_len;
930 unsigned kva_map_size;
958 pr_debug(
"BAR0 size = 0x%lX bytes\n", reg0_len);
959 pr_debug(
"BAR2 size = 0x%lX bytes\n", reg2_len);
960 pr_debug(
"BAR4 size = 0x%lX bytes\n", reg4_len);
964 !(reg2_flags & IORESOURCE_MEM) || !(reg4_flags & IORESOURCE_MEM)) {
1008 "Unable to remap adapter PCI registers in BAR4\n");
1014 for (i = 0; i <
sizeof(c2_magic); i++) {
1017 "[%d/%Zd: got 0x%x, exp 0x%x]. Use the cc_flash "
1018 "utility to update your boot loader\n",
1019 i + 1,
sizeof(c2_magic),
1032 "[fw=%u, c2=%u], Adapter not claimed\n",
1042 printk(
KERN_ERR PFX
"Downlevel FIrmware level. You should be using "
1043 "the OpenIB device support kit. "
1044 "[fw=0x%x, c2=0x%x], Adapter not claimed\n",
1062 memset(c2dev, 0,
sizeof(*c2dev));
1076 pci_name(pcidev), pcidev->
irq);
1082 pci_set_drvdata(pcidev, c2dev);
1085 if ((netdev = c2_devinit(c2dev, mmio_regs)) ==
NULL) {
1105 netif_stop_queue(netdev);
1147 c2_print_macaddr(netdev);
1196 struct c2_dev *c2dev = pci_get_drvdata(pcidev);
1232 pci_set_drvdata(pcidev,
NULL);
1237 .id_table = c2_pci_table,
1242 static int __init c2_init_module(
void)
1244 return pci_register_driver(&c2_pci_driver);
1247 static void __exit c2_exit_module(
void)