65 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
95 static void bdx_set_ethtool_ops(
struct net_device *netdev);
101 static void print_hw_id(
struct pci_dev *pdev)
103 struct pci_nic *nic = pci_get_drvdata(pdev);
104 u16 pci_link_status = 0;
111 nic->
port_num == 1 ?
"" :
", 2-Port");
112 pr_info(
"srom 0x%x fpga %d build %u lane# %d max_pl 0x%x mrrs 0x%x\n",
119 static void print_fw_id(
struct pci_nic *nic)
126 netdev_info(ndev,
"%s, Port %c\n",
135 #define bdx_enable_interrupts(priv) \
136 do { WRITE_REG(priv, regIMR, IR_RUN); } while (0)
137 #define bdx_disable_interrupts(priv) \
138 do { WRITE_REG(priv, regIMR, 0); } while (0)
164 pr_err(
"pci_alloc_consistent failed\n");
201 static void bdx_link_changed(
struct bdx_priv *priv)
206 if (netif_carrier_ok(priv->
ndev)) {
207 netif_stop_queue(priv->
ndev);
209 netdev_err(priv->
ndev,
"Link Down\n");
212 if (!netif_carrier_ok(priv->
ndev)) {
213 netif_wake_queue(priv->
ndev);
215 netdev_err(priv->
ndev,
"Link Up\n");
223 bdx_rx_alloc_skbs(priv, &priv->
rxf_fifo0);
228 bdx_link_changed(priv);
231 netdev_err(priv->
ndev,
"PCI-E Link Fault\n");
234 netdev_err(priv->
ndev,
"PCI-E Time Out\n");
255 struct bdx_priv *priv = netdev_priv(ndev);
266 bdx_isr_extra(priv, isr);
269 if (
likely(napi_schedule_prep(&priv->
napi))) {
296 bdx_tx_cleanup(priv);
297 work_done = bdx_rx_receive(priv, &priv->
rxd_fifo0, budget);
298 if ((work_done < budget) ||
300 DBG(
"rx poll is done. backing to isr-driven\n");
322 static int bdx_fw_load(
struct bdx_priv *priv)
334 bdx_tx_push_desc_safe(priv, (
char *)fw->
data, fw->
size);
337 for (i = 0; i < 200; i++) {
352 netdev_err(priv->
ndev,
"firmware loading failed\n");
354 DBG(
"VPC = 0x%x VIC = 0x%x INIT_STATUS = 0x%x i=%d\n",
360 DBG(
"%s: firmware loading success\n", priv->
ndev->name);
370 DBG(
"mac0=%x mac1=%x mac2=%x\n",
381 DBG(
"mac0=%x mac1=%x mac2=%x\n",
391 static int bdx_hw_start(
struct bdx_priv *priv)
397 bdx_link_changed(priv);
422 bdx_restore_mac(priv->
ndev, priv);
427 #define BDX_IRQ_TYPE ((priv->nic->irq_type == IRQ_MSI) ? 0 : IRQF_SHARED)
441 static void bdx_hw_stop(
struct bdx_priv *priv)
448 netif_stop_queue(priv->
ndev);
466 for (i = 0; i < 70; i++,
mdelay(10))
472 pr_err(
"HW reset failed\n");
476 static int bdx_hw_reset(
struct bdx_priv *priv)
481 if (priv->
port == 0) {
490 for (i = 0; i < 70; i++,
mdelay(10))
496 pr_err(
"HW reset failed\n");
500 static int bdx_sw_reset(
struct bdx_priv *priv)
514 for (i = 0; i < 50; i++) {
520 netdev_err(priv->
ndev,
"SW reset timeout. continuing anyway\n");
549 DBG(
"%x = %x\n", i,
READ_REG(priv, i) & TXF_WPTR_WR_PTR);
555 static int bdx_reset(
struct bdx_priv *priv)
558 RET((priv->
pdev->device == 0x3009)
560 : bdx_sw_reset(priv));
579 priv = netdev_priv(ndev);
581 napi_disable(&priv->
napi);
608 priv = netdev_priv(ndev);
610 if (netif_running(ndev))
611 netif_stop_queue(priv->
ndev);
613 if ((rc = bdx_tx_init(priv)) ||
614 (rc = bdx_rx_init(priv)) ||
615 (rc = bdx_fw_load(priv)))
618 bdx_rx_alloc_skbs(priv, &priv->
rxf_fifo0);
620 rc = bdx_hw_start(priv);
624 napi_enable(&priv->
napi);
626 print_fw_id(priv->
nic);
643 struct bdx_priv *priv = netdev_priv(ndev);
649 DBG(
"jiffies=%ld cmd=%d\n", jiffies, cmd);
653 pr_err(
"can't copy from user\n");
656 DBG(
"%d 0x%x 0x%x\n", data[0], data[1], data[2]);
665 error = bdx_range_check(priv, data[1]);
669 DBG(
"read_reg(0x%x)=0x%x (dec %d)\n", data[1], data[2],
671 error =
copy_to_user(ifr->ifr_data, data,
sizeof(data));
677 error = bdx_range_check(priv, data[1]);
681 DBG(
"write_reg(0x%x, 0x%x)\n", data[1], data[2]);
690 static int bdx_ioctl(
struct net_device *ndev,
struct ifreq *ifr,
int cmd)
694 RET(bdx_ioctl_priv(ndev, ifr, cmd));
709 struct bdx_priv *priv = netdev_priv(ndev);
713 DBG2(
"vid=%d value=%d\n", (
int)vid, enable);
715 pr_err(
"invalid VID: %u (> 4096)\n", vid);
721 DBG2(
"reg=%x, val=%x, bit=%d\n", reg, val, bit);
726 DBG2(
"new val %x\n", val);
738 __bdx_vlan_rx_vid(ndev, vid, 1);
747 static int bdx_vlan_rx_kill_vid(
struct net_device *ndev,
unsigned short vid)
749 __bdx_vlan_rx_vid(ndev, vid, 0);
760 static int bdx_change_mtu(
struct net_device *ndev,
int new_mtu)
764 if (new_mtu == ndev->
mtu)
769 netdev_err(ndev,
"mtu %d is less then minimal %d\n",
775 if (netif_running(ndev)) {
782 static void bdx_setmulti(
struct net_device *ndev)
784 struct bdx_priv *priv = netdev_priv(ndev);
826 val |= (1 << (hash % 32));
840 static int bdx_set_mac(
struct net_device *ndev,
void *
p)
842 struct bdx_priv *priv = netdev_priv(ndev);
851 bdx_restore_mac(ndev, priv);
855 static int bdx_read_mac(
struct bdx_priv *priv)
857 u16 macAddress[3],
i;
866 for (i = 0; i < 3; i++) {
867 priv->
ndev->dev_addr[i * 2 + 1] = macAddress[
i];
868 priv->
ndev->dev_addr[i * 2] = macAddress[
i] >> 8;
873 static u64 bdx_read_l2stat(
struct bdx_priv *priv,
int reg)
883 static void bdx_update_stats(
struct bdx_priv *priv)
886 u64 *stats_vector = (
u64 *) stats;
893 for (i = 0; i < 12; i++) {
894 stats_vector[
i] = bdx_read_l2stat(priv, addr);
900 for (; i < 16; i++) {
901 stats_vector[
i] = bdx_read_l2stat(priv, addr);
907 for (; i < 19; i++) {
908 stats_vector[
i] = bdx_read_l2stat(priv, addr);
914 for (; i < 23; i++) {
915 stats_vector[
i] = bdx_read_l2stat(priv, addr);
922 static void print_rxdd(
struct rxd_desc *rxdd,
u32 rxd_val1,
u16 len,
924 static void print_rxfd(
struct rxf_desc *rxfd);
930 static void bdx_rxdb_destroy(
struct rxdb *db)
935 static struct rxdb *bdx_rxdb_create(
int nelem)
941 + (nelem *
sizeof(
int))
942 + (nelem *
sizeof(
struct rx_map)));
944 db->
stack = (
int *)(db + 1);
948 for (i = 0; i <
nelem; i++)
949 db->
stack[i] = nelem - i - 1;
956 static inline int bdx_rxdb_alloc_elem(
struct rxdb *db)
962 static inline void *bdx_rxdb_addr_elem(
struct rxdb *db,
int n)
968 static inline int bdx_rxdb_available(
struct rxdb *db)
973 static inline void bdx_rxdb_free_elem(
struct rxdb *db,
int n)
1001 static int bdx_rx_init(
struct bdx_priv *priv)
1022 netdev_err(priv->
ndev,
"Rx init failed\n");
1038 DBG(
"total=%d free=%d busy=%d\n", db->
nelem, bdx_rxdb_available(db),
1039 db->
nelem - bdx_rxdb_available(db));
1040 while (bdx_rxdb_available(db) > 0) {
1041 i = bdx_rxdb_alloc_elem(db);
1042 dm = bdx_rxdb_addr_elem(db, i);
1045 for (i = 0; i < db->
nelem; i++) {
1046 dm = bdx_rxdb_addr_elem(db, i);
1048 pci_unmap_single(priv->
pdev,
1049 dm->
dma, f->
m.pktsz,
1051 dev_kfree_skb(dm->
skb);
1062 static void bdx_rx_free(
struct bdx_priv *priv)
1066 bdx_rx_free_skbs(priv, &priv->
rxf_fifo0);
1067 bdx_rxdb_destroy(priv->
rxdb);
1070 bdx_fifo_free(priv, &priv->
rxf_fifo0.m);
1071 bdx_fifo_free(priv, &priv->
rxd_fifo0.m);
1102 dno = bdx_rxdb_available(db) - 1;
1106 pr_err(
"NO MEM: netdev_alloc_skb failed\n");
1111 idx = bdx_rxdb_alloc_elem(db);
1112 dm = bdx_rxdb_addr_elem(db, idx);
1113 dm->
dma = pci_map_single(priv->
pdev,
1114 skb->
data, f->
m.pktsz,
1117 rxfd = (
struct rxf_desc *)(f->
m.va + f->
m.wptr);
1126 delta = f->
m.wptr - f->
m.memsz;
1130 memcpy(f->
m.va, f->
m.va + f->
m.memsz, delta);
1131 DBG(
"wrapped descriptor\n");
1137 WRITE_REG(priv, f->
m.reg_WPTR, f->
m.wptr & TXF_WPTR_WR_PTR);
1148 DBG(
"%s: vlan rcv vlan '%x' vtag '%x'\n",
1167 DBG(
"priv=%p rxdd=%p\n", priv, rxdd);
1170 DBG(
"db=%p f=%p\n", db, f);
1171 dm = bdx_rxdb_addr_elem(db, rxdd->
va_lo);
1174 rxfd = (
struct rxf_desc *)(f->
m.va + f->
m.wptr);
1183 delta = f->
m.wptr - f->
m.memsz;
1187 memcpy(f->
m.va, f->
m.va + f->
m.memsz, delta);
1188 DBG(
"wrapped descriptor\n");
1207 static int bdx_rx_receive(
struct bdx_priv *priv,
struct rxd_fifo *f,
int budget)
1228 size = f->
m.wptr - f->
m.rptr;
1230 size = f->
m.memsz +
size;
1234 rxdd = (
struct rxd_desc *)(f->
m.va + f->
m.rptr);
1241 print_rxdd(rxdd, rxd_val1, len, rxd_vlan);
1249 f->
m.rptr += tmp_len;
1251 tmp_len = f->
m.rptr - f->
m.memsz;
1253 f->
m.rptr = tmp_len;
1255 DBG(
"wrapped desc rptr=%d tmp_len=%d\n",
1256 f->
m.rptr, tmp_len);
1257 memcpy(f->
m.va + f->
m.memsz, f->
m.va, tmp_len);
1263 ndev->
stats.rx_errors++;
1264 bdx_recycle_skb(priv, rxdd);
1270 dm = bdx_rxdb_addr_elem(db, rxdd->
va_lo);
1277 pci_dma_sync_single_for_cpu(priv->
pdev,
1278 dm->
dma, rxf_fifo->
m.pktsz,
1281 bdx_recycle_skb(priv, rxdd);
1284 pci_unmap_single(priv->
pdev,
1285 dm->
dma, rxf_fifo->
m.pktsz,
1287 bdx_rxdb_free_elem(db, rxdd->
va_lo);
1297 skb_checksum_none_assert(skb);
1301 NETIF_RX_MUX(priv, rxd_val1, rxd_vlan, skb);
1303 if (++done >= max_done)
1310 WRITE_REG(priv, f->
m.reg_RPTR, f->
m.rptr & TXF_WPTR_WR_PTR);
1312 bdx_rx_alloc_skbs(priv, &priv->
rxf_fifo0);
1320 static void print_rxdd(
struct rxd_desc *rxdd,
u32 rxd_val1,
u16 len,
1323 DBG(
"ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d va_lo %d va_hi %d\n",
1332 static void print_rxfd(
struct rxf_desc *rxfd)
1334 DBG(
"=== RxF desc CHIP ORDER/ENDIANNESS =============\n"
1335 "info 0x%x va_lo %u pa_lo 0x%x pa_hi 0x%x len 0x%x\n",
1383 static inline int bdx_tx_db_size(
struct txdb *db)
1387 taken = db->
size + 1 + taken;
1389 return db->
size - taken;
1397 static inline void __bdx_tx_db_ptr_next(
struct txdb *db,
struct tx_map **pptr)
1416 static inline void bdx_tx_db_inc_rptr(
struct txdb *db)
1419 __bdx_tx_db_ptr_next(db, &db->
rptr);
1426 static inline void bdx_tx_db_inc_wptr(
struct txdb *db)
1428 __bdx_tx_db_ptr_next(db, &db->
wptr);
1440 static int bdx_tx_db_init(
struct txdb *
d,
int sz_type)
1442 int memsz =
FIFO_SIZE * (1 << (sz_type + 1));
1453 d->size = memsz /
sizeof(
struct tx_map) - 1;
1454 d->end =
d->start +
d->size + 1;
1467 static void bdx_tx_db_close(
struct txdb *
d)
1504 int nr_frags = skb_shinfo(skb)->nr_frags;
1507 db->
wptr->len = skb_headlen(skb);
1508 db->
wptr->addr.dma = pci_map_single(priv->
pdev, skb->
data,
1513 DBG(
"=== pbl len: 0x%x ================\n", pbl->
len);
1514 DBG(
"=== pbl pa_lo: 0x%x ================\n", pbl->
pa_lo);
1515 DBG(
"=== pbl pa_hi: 0x%x ================\n", pbl->
pa_hi);
1516 bdx_tx_db_inc_wptr(db);
1518 for (i = 0; i < nr_frags; i++) {
1521 frag = &skb_shinfo(skb)->frags[
i];
1522 db->
wptr->len = skb_frag_size(frag);
1523 db->
wptr->addr.dma = skb_frag_dma_map(&priv->
pdev->dev, frag,
1524 0, skb_frag_size(frag),
1531 bdx_tx_db_inc_wptr(db);
1535 db->
wptr->len = -txd_sizes[nr_frags].bytes;
1537 bdx_tx_db_inc_wptr(db);
1543 static void __init init_txd_sizes(
void)
1550 lwords = 7 + (i * 3);
1553 txd_sizes[
i].qwords = lwords >> 1;
1554 txd_sizes[
i].bytes = lwords << 2;
1560 static int bdx_tx_init(
struct bdx_priv *priv)
1577 #ifdef BDX_DELAY_WPTR
1583 netdev_err(priv->
ndev,
"Tx init failed\n");
1593 static inline int bdx_tx_space(
struct bdx_priv *priv)
1599 fsize = f->
m.rptr - f->
m.wptr;
1601 fsize = f->
m.memsz + fsize;
1619 struct bdx_priv *priv = netdev_priv(ndev);
1621 int txd_checksum = 7;
1623 int txd_vlan_id = 0;
1627 int nr_frags = skb_shinfo(skb)->nr_frags;
1630 unsigned long flags;
1634 if (!spin_trylock(&priv->
tx_lock)) {
1636 DBG(
"%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",
1643 txdd = (
struct txd_desc *)(f->
m.va + f->
m.wptr);
1647 if (skb_shinfo(skb)->gso_size) {
1648 txd_mss = skb_shinfo(skb)->gso_size;
1650 DBG(
"skb %p skb len %d gso size = %d\n", skb, skb->
len,
1664 (txd_sizes[nr_frags].
qwords, txd_checksum, txd_vtag,
1665 txd_lgsnd, txd_vlan_id));
1666 DBG(
"=== TxD desc =====================\n");
1667 DBG(
"=== w1: 0x%x ================\n", txdd->
txd_val1);
1668 DBG(
"=== w2: mss 0x%x len 0x%x\n", txdd->
mss, txdd->
length);
1670 bdx_tx_map_skb(priv, skb, txdd);
1675 f->
m.wptr += txd_sizes[nr_frags].bytes;
1676 len = f->
m.wptr - f->
m.memsz;
1681 memcpy(f->
m.va, f->
m.va + f->
m.memsz, len);
1686 priv->
tx_level -= txd_sizes[nr_frags].bytes;
1688 #ifdef BDX_DELAY_WPTR
1694 WRITE_REG(priv, f->
m.reg_WPTR, f->
m.wptr & TXF_WPTR_WR_PTR);
1699 f->
m.wptr & TXF_WPTR_WR_PTR);
1707 WRITE_REG(priv, f->
m.reg_WPTR, f->
m.wptr & TXF_WPTR_WR_PTR);
1713 ndev->
stats.tx_packets++;
1717 DBG(
"%s: %s: TX Q STOP level %d\n",
1719 netif_stop_queue(ndev);
1722 spin_unlock_irqrestore(&priv->
tx_lock, flags);
1733 static void bdx_tx_cleanup(
struct bdx_priv *priv)
1743 while (f->
m.wptr != f->
m.rptr) {
1745 f->
m.rptr &= f->
m.size_mask;
1752 pci_unmap_page(priv->
pdev, db->
rptr->addr.dma,
1754 bdx_tx_db_inc_rptr(db);
1755 }
while (db->
rptr->len > 0);
1756 tx_level -= db->
rptr->len;
1760 bdx_tx_db_inc_rptr(db);
1764 BDX_ASSERT((f->
m.wptr & TXF_WPTR_WR_PTR) >= f->
m.memsz);
1765 WRITE_REG(priv, f->
m.reg_RPTR, f->
m.rptr & TXF_WPTR_WR_PTR);
1772 #ifdef BDX_DELAY_WPTR
1776 priv->
txd_fifo0.m.wptr & TXF_WPTR_WR_PTR);
1781 netif_carrier_ok(priv->
ndev) &&
1783 DBG(
"%s: %s: TX Q WAKE level %d\n",
1785 netif_wake_queue(priv->
ndev);
1794 static void bdx_tx_free_skbs(
struct bdx_priv *priv)
1801 pci_unmap_page(priv->
pdev, db->
rptr->addr.dma,
1804 dev_kfree_skb(db->
rptr->addr.skb);
1805 bdx_tx_db_inc_rptr(db);
1811 static void bdx_tx_free(
struct bdx_priv *priv)
1814 bdx_tx_free_skbs(priv);
1815 bdx_fifo_free(priv, &priv->
txd_fifo0.m);
1816 bdx_fifo_free(priv, &priv->
txf_fifo0.m);
1817 bdx_tx_db_close(&priv->
txdb);
1831 static void bdx_tx_push_desc(
struct bdx_priv *priv,
void *data,
int size)
1834 int i = f->
m.memsz - f->
m.wptr;
1840 memcpy(f->
m.va + f->
m.wptr, data, size);
1843 memcpy(f->
m.va + f->
m.wptr, data, i);
1844 f->
m.wptr = size -
i;
1845 memcpy(f->
m.va, data + i, f->
m.wptr);
1847 WRITE_REG(priv, f->
m.reg_WPTR, f->
m.wptr & TXF_WPTR_WR_PTR);
1859 static void bdx_tx_push_desc_safe(
struct bdx_priv *priv,
void *data,
int size)
1868 int avail = bdx_tx_space(priv) - 8;
1870 if (timer++ > 300) {
1871 DBG(
"timeout while writing desc to TxD fifo\n");
1877 avail =
min(avail, size);
1878 DBG(
"about to push %d bytes starting %p size %d\n", avail,
1880 bdx_tx_push_desc(priv, data, avail);
1888 .ndo_open = bdx_open,
1889 .ndo_stop = bdx_close,
1890 .ndo_start_xmit = bdx_tx_transmit,
1892 .ndo_do_ioctl = bdx_ioctl,
1893 .ndo_set_rx_mode = bdx_setmulti,
1894 .ndo_change_mtu = bdx_change_mtu,
1895 .ndo_set_mac_address = bdx_set_mac,
1896 .ndo_vlan_rx_add_vid = bdx_vlan_rx_add_vid,
1897 .ndo_vlan_rx_kill_vid = bdx_vlan_rx_kill_vid,
1923 unsigned long pciaddr;
1938 if (!(err = pci_set_dma_mask(pdev,
DMA_BIT_MASK(64))) &&
1939 !(err = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(64)))) {
1942 if ((err = pci_set_dma_mask(pdev,
DMA_BIT_MASK(32))) ||
1943 (err = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32)))) {
1944 pr_err(
"No usable DMA configuration, aborting\n");
1959 pr_err(
"no MMIO resource\n");
1965 pr_err(
"MMIO resource (%x) too small\n", regionSize);
1972 pr_err(
"ioremap failed\n");
1976 if (pdev->
irq < 2) {
1978 pr_err(
"invalid irq (%d)\n", pdev->
irq);
1981 pci_set_drvdata(pdev, nic);
1983 if (pdev->
device == 0x3014)
1990 bdx_hw_reset_direct(nic->
regs);
1995 err = pci_enable_msi(pdev);
1997 pr_err(
"Can't eneble msi. error is %d\n", err);
2001 DBG(
"HW does not support MSI\n");
2005 for (port = 0; port < nic->
port_num; port++) {
2006 ndev = alloc_etherdev(
sizeof(
struct bdx_priv));
2015 bdx_set_ethtool_ops(ndev);
2032 priv = nic->
priv[
port] = netdev_priv(ndev);
2044 DBG(
"HW statistics not supported\n");
2071 if (bdx_read_mac(priv)) {
2072 pr_err(
"load MAC address failed\n");
2078 pr_err(
"register_netdev failed\n");
2082 netif_stop_queue(ndev);
2113 "FrameSequenceErrors",
2117 "InFrameAlignErrors",
2149 struct bdx_priv *priv = netdev_priv(netdev);
2180 struct bdx_priv *priv = netdev_priv(netdev);
2204 struct bdx_priv *priv = netdev_priv(netdev);
2233 struct bdx_priv *priv = netdev_priv(netdev);
2253 if ((rx_coal > 0x7FFF) || (tx_coal > 0x7FFF) ||
2254 (rx_max_coal > 0xF) || (tx_max_coal > 0xF))
2272 static inline int bdx_rx_fifo_size_to_packets(
int rx_size)
2278 static inline int bdx_tx_fifo_size_to_packets(
int tx_size)
2291 struct bdx_priv *priv = netdev_priv(netdev);
2308 struct bdx_priv *priv = netdev_priv(netdev);
2312 for (; rx_size < 4; rx_size++) {
2313 if (bdx_rx_fifo_size_to_packets(rx_size) >= ring->
rx_pending)
2319 for (; tx_size < 4; tx_size++) {
2320 if (bdx_tx_fifo_size_to_packets(tx_size) >= ring->
tx_pending)
2339 if (netif_running(netdev)) {
2351 static void bdx_get_strings(
struct net_device *netdev,
u32 stringset,
u8 *data)
2353 switch (stringset) {
2355 memcpy(data, *bdx_stat_names,
sizeof(bdx_stat_names));
2364 static int bdx_get_sset_count(
struct net_device *netdev,
int stringset)
2366 struct bdx_priv *priv = netdev_priv(netdev);
2368 switch (stringset) {
2384 static void bdx_get_ethtool_stats(
struct net_device *netdev,
2387 struct bdx_priv *priv = netdev_priv(netdev);
2392 bdx_update_stats(priv);
2403 static void bdx_set_ethtool_ops(
struct net_device *netdev)
2405 static const struct ethtool_ops bdx_ethtool_ops = {
2407 .get_drvinfo = bdx_get_drvinfo,
2409 .get_coalesce = bdx_get_coalesce,
2410 .set_coalesce = bdx_set_coalesce,
2411 .get_ringparam = bdx_get_ringparam,
2412 .set_ringparam = bdx_set_ringparam,
2413 .get_strings = bdx_get_strings,
2414 .get_sset_count = bdx_get_sset_count,
2415 .get_ethtool_stats = bdx_get_ethtool_stats,
2432 struct pci_nic *nic = pci_get_drvdata(pdev);
2436 for (port = 0; port < nic->
port_num; port++) {
2451 pci_set_drvdata(pdev,
NULL);
2459 .id_table = bdx_pci_tbl,
2467 static void __init print_driver_id(
void)
2473 static int __init bdx_module_init(
void)
2478 RET(pci_register_driver(&bdx_pci_driver));
2483 static void __exit bdx_module_exit(
void)