27 #include <linux/module.h>
33 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
48 #define VMXNET3_MAX_DEVICES 10
49 static int enable_mq = 1;
50 static int irq_share_mode;
80 for (i = 0; i < adapter->
intr.num_intrs; i++)
81 vmxnet3_enable_intr(adapter, i);
82 adapter->
shared->devRead.intrConf.intrCtrl &=
92 adapter->
shared->devRead.intrConf.intrCtrl |=
94 for (i = 0; i < adapter->
intr.num_intrs; i++)
95 vmxnet3_disable_intr(adapter, i);
142 vmxnet3_check_link(
struct vmxnet3_adapter *adapter,
bool affectTxQueue)
151 spin_unlock_irqrestore(&adapter->
cmd_lock, flags);
157 if (!netif_carrier_ok(adapter->
netdev))
162 vmxnet3_tq_start(&adapter->
tx_queue[i],
168 if (netif_carrier_ok(adapter->
netdev))
173 vmxnet3_tq_stop(&adapter->
tx_queue[i], adapter);
187 vmxnet3_ack_events(adapter, events);
191 vmxnet3_check_link(adapter,
true);
198 spin_unlock_irqrestore(&adapter->
cmd_lock, flags);
201 if (adapter->
tqd_start[i].status.stopped)
203 "%s: tq[%d] error 0x%x\n",
207 if (adapter->
rqd_start[i].status.stopped)
209 "%s: rq[%d] error 0x%x\n",
217 #ifdef __BIG_ENDIAN_BITFIELD
230 static void vmxnet3_RxDescToCPU(
const struct Vmxnet3_RxDesc *srcDesc,
240 static void vmxnet3_TxDescToLe(
const struct Vmxnet3_TxDesc *srcDesc,
248 for (i = 2; i > 0; i--) {
260 u32 *src = (
u32 *)srcDesc;
261 u32 *dst = (
u32 *)dstDesc;
284 #ifdef __BIG_ENDIAN_BITFIELD
286 # define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
287 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
288 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
289 # define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
290 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
291 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
292 # define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
293 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
294 VMXNET3_TCD_GEN_SIZE)
295 # define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
296 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
297 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
299 vmxnet3_RxCompToCPU((rcd), (tmp)); \
301 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
303 vmxnet3_RxDescToCPU((rxd), (tmp)); \
308 # define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
309 # define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
310 # define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
311 # define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
312 # define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
313 # define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
352 while (tq->
tx_ring.next2comp != eop_idx) {
361 vmxnet3_cmd_ring_adv_next2comp(&tq->
tx_ring);
380 &gdesc->
tcd), tq, adapter->
pdev,
383 vmxnet3_comp_ring_adv_next2proc(&tq->
comp_ring);
389 if (
unlikely(vmxnet3_tq_stopped(tq, adapter) &&
390 vmxnet3_cmd_ring_desc_avail(&tq->
tx_ring) >
392 netif_carrier_ok(adapter->
netdev))) {
393 vmxnet3_tq_wake(tq, adapter);
412 vmxnet3_unmap_tx_buf(tbi, adapter->
pdev);
417 vmxnet3_cmd_ring_adv_next2comp(&tq->
tx_ring);
421 for (i = 0; i < tq->
tx_ring.size; i++) {
468 vmxnet3_tq_destroy(&adapter->
tx_queue[i], adapter);
495 for (i = 0; i < tq->
tx_ring.size; i++)
546 vmxnet3_tq_destroy(tq, adapter);
556 vmxnet3_tq_cleanup(&adapter->
tx_queue[i], adapter);
569 int num_allocated = 0;
574 while (num_allocated <= num_to_alloc) {
583 rbi->
skb = dev_alloc_skb(rbi->
len +
586 rq->
stats.rx_buf_alloc_failure++;
606 rq->
stats.rx_buf_alloc_failure++;
625 if (num_allocated == num_to_alloc)
630 vmxnet3_cmd_ring_adv_next2fill(ring);
635 "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
636 "%u, uncommitted %u\n", num_allocated, ring->
next2fill,
642 return num_allocated;
651 skb_shinfo(skb)->nr_frags;
655 __skb_frag_set_page(frag, rbi->
page);
657 skb_frag_size_set(frag, rcd->
len);
660 skb_shinfo(skb)->nr_frags++;
670 unsigned long buf_offset;
695 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
699 vmxnet3_cmd_ring_adv_next2fill(&tq->
tx_ring);
722 skb->
data + buf_offset, buf_size,
735 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
738 vmxnet3_cmd_ring_adv_next2fill(&tq->
tx_ring);
745 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
750 len = skb_frag_size(frag);
761 tbi->
dma_addr = skb_frag_dma_map(&adapter->
pdev->dev, frag,
762 buf_offset, buf_size,
775 "txd[%u]: 0x%llu %u %u\n",
778 vmxnet3_cmd_ring_adv_next2fill(&tq->
tx_ring);
801 vmxnet3_tq_init(&adapter->
tx_queue[i], adapter);
838 const struct iphdr *iph = ip_hdr(skb);
866 tq->
stats.oversized_hdr++;
875 "copy %u bytes to dataRing[%u]\n",
885 vmxnet3_prepare_tso(
struct sk_buff *skb,
888 struct tcphdr *tcph = tcp_hdr(skb);
891 struct iphdr *iph = ip_hdr(skb);
897 struct ipv6hdr *iph = ipv6_hdr(skb);
904 static int txd_estimate(
const struct sk_buff *skb)
909 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
939 #ifdef __BIG_ENDIAN_BITFIELD
944 count = txd_estimate(skb);
948 ctx.
mss = skb_shinfo(skb)->gso_size;
950 if (skb_header_cloned(skb)) {
953 tq->
stats.drop_tso++;
956 tq->
stats.copy_skb_header++;
958 vmxnet3_prepare_tso(skb, &ctx);
965 if (skb_linearize(skb) != 0) {
966 tq->
stats.drop_too_many_frags++;
969 tq->
stats.linearized++;
978 if (count > vmxnet3_cmd_ring_desc_avail(&tq->
tx_ring)) {
979 tq->
stats.tx_ring_full++;
981 "tx queue stopped on %s, next2comp %u"
982 " next2fill %u\n", adapter->
netdev->name,
985 vmxnet3_tq_stop(tq, adapter);
986 spin_unlock_irqrestore(&tq->
tx_lock, flags);
991 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
1010 tq->
stats.drop_hdr_inspect_err++;
1011 goto unlock_drop_pkt;
1015 vmxnet3_map_pkt(skb, &ctx, tq, adapter->
pdev, adapter);
1021 #ifdef __BIG_ENDIAN_BITFIELD
1022 gdesc = &tempTxDesc;
1031 gdesc->
txd.msscof = ctx.
mss;
1032 le32_add_cpu(&tq->
shared->txNumDeferred, (skb->
len -
1033 gdesc->
txd.hlen + ctx.
mss - 1) / ctx.
mss);
1042 gdesc->
txd.msscof = 0;
1044 le32_add_cpu(&tq->
shared->txNumDeferred, 1);
1055 #ifdef __BIG_ENDIAN_BITFIELD
1064 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1069 spin_unlock_irqrestore(&tq->
tx_lock, flags);
1073 tq->
shared->txNumDeferred = 0;
1082 tq->
stats.drop_oversized_hdr++;
1084 spin_unlock_irqrestore(&tq->
tx_lock, flags);
1086 tq->
stats.drop_total++;
1098 return vmxnet3_tq_xmit(skb,
1118 if (gdesc->
rcd.csum) {
1122 skb_checksum_none_assert(skb);
1126 skb_checksum_none_assert(skb);
1135 rq->
stats.drop_err++;
1137 rq->
stats.drop_fcs++;
1139 rq->
stats.drop_total++;
1162 static const u32 rxprod_reg[2] = {
1166 bool skip_page_frags =
false;
1169 #ifdef __BIG_ENDIAN_BITFIELD
1183 if (num_rxd >= quota) {
1193 ring = rq->
rx_ring + ring_idx;
1202 vmxnet3_rx_error(rq, rcd, ctx, adapter);
1217 "rxRing[%u][%u] 0 length\n",
1222 skip_page_frags =
false;
1225 if (new_skb ==
NULL) {
1229 rq->
stats.rx_buf_alloc_failure++;
1231 rq->
stats.drop_total++;
1232 skip_page_frags =
true;
1246 rbi->
skb->data, rbi->
len,
1261 if (skip_page_frags)
1271 rq->
stats.rx_buf_alloc_failure++;
1272 dev_kfree_skb(ctx->
skb);
1274 skip_page_frags =
true;
1279 pci_unmap_page(adapter->
pdev,
1283 vmxnet3_append_frag(ctx->
skb, rcd, rbi);
1287 rbi->
page = new_page;
1300 vmxnet3_rx_csum(adapter, skb,
1305 __vlan_hwaccel_put_tag(skb, rcd->
tci);
1318 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1319 ring = rq->
rx_ring + ring_idx;
1320 while (num_to_alloc) {
1327 vmxnet3_cmd_ring_adv_next2fill(ring);
1334 rxprod_reg[ring_idx] + rq->
qid * 8,
1339 vmxnet3_comp_ring_adv_next2proc(&rq->
comp_ring);
1355 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1356 for (i = 0; i < rq->
rx_ring[ring_idx].size; i++) {
1357 #ifdef __BIG_ENDIAN_BITFIELD
1361 &rq->
rx_ring[ring_idx].base[i].rxd, &rxDesc);
1365 pci_unmap_single(adapter->
pdev, rxd->
addr,
1367 dev_kfree_skb(rq->
buf_info[ring_idx][i].skb);
1371 pci_unmap_page(adapter->
pdev, rxd->
addr,
1379 rq->
rx_ring[ring_idx].next2fill =
1380 rq->
rx_ring[ring_idx].next2comp = 0;
1395 vmxnet3_rq_cleanup(&adapter->
rx_queue[i], adapter);
1406 for (i = 0; i < 2; i++) {
1408 for (j = 0; j < rq->
rx_ring[
i].size; j++)
1416 for (i = 0; i < 2; i++) {
1443 for (i = 0; i < rq->
rx_ring[0].size; i++) {
1454 for (i = 0; i < rq->
rx_ring[1].size; i++) {
1460 for (i = 0; i < 2; i++) {
1468 if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->
rx_ring[0].size - 1,
1473 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->
rx_ring[1].size - 1, adapter);
1495 err = vmxnet3_rq_init(&adapter->
rx_queue[i], adapter);
1498 "initialize rx queue%i\n",
1499 adapter->
netdev->name, i);
1515 for (i = 0; i < 2; i++) {
1522 adapter->
netdev->name, i);
1559 err = vmxnet3_rq_create(&adapter->
rx_queue[i], adapter);
1562 "%s: failed to create rx queue%i\n",
1563 adapter->
netdev->name, i);
1579 int rcd_done = 0,
i;
1581 vmxnet3_process_events(adapter);
1583 vmxnet3_tq_tx_complete(&adapter->
tx_queue[i], adapter);
1586 rcd_done += vmxnet3_rq_rx_complete(&adapter->
rx_queue[i],
1599 rxd_done = vmxnet3_do_poll(rx_queue->
adapter, budget);
1601 if (rxd_done < budget) {
1603 vmxnet3_enable_all_intrs(rx_queue->
adapter);
1614 vmxnet3_poll_rx_only(
struct napi_struct *napi,
int budget)
1627 vmxnet3_tq_tx_complete(tq, adapter);
1630 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
1632 if (rxd_done < budget) {
1634 vmxnet3_enable_intr(adapter, rq->
comp_ring.intr_idx);
1640 #ifdef CONFIG_PCI_MSI
1648 vmxnet3_msix_tx(
int irq,
void *
data)
1654 vmxnet3_disable_intr(adapter, tq->
comp_ring.intr_idx);
1661 vmxnet3_tq_tx_complete(txq, adapter);
1664 vmxnet3_tq_tx_complete(tq, adapter);
1666 vmxnet3_enable_intr(adapter, tq->
comp_ring.intr_idx);
1678 vmxnet3_msix_rx(
int irq,
void *data)
1685 vmxnet3_disable_intr(adapter, rq->
comp_ring.intr_idx);
1686 napi_schedule(&rq->
napi);
1705 vmxnet3_msix_event(
int irq,
void *data)
1712 vmxnet3_disable_intr(adapter, adapter->
intr.event_intr_idx);
1714 if (adapter->
shared->ecr)
1715 vmxnet3_process_events(adapter);
1717 vmxnet3_enable_intr(adapter, adapter->
intr.event_intr_idx);
1742 vmxnet3_disable_all_intrs(adapter);
1744 napi_schedule(&adapter->
rx_queue[0].napi);
1749 #ifdef CONFIG_NET_POLL_CONTROLLER
1758 vmxnet3_disable_all_intrs(adapter);
1760 vmxnet3_do_poll(adapter, adapter->
rx_queue[0].rx_ring[0].size);
1761 vmxnet3_enable_all_intrs(adapter);
1773 #ifdef CONFIG_PCI_MSI
1778 adapter->
netdev->name, vector);
1780 intr->msix_entries[vector].vector,
1786 adapter->
netdev->name, vector);
1790 "Failed to request irq for MSIX, %s, "
1800 adapter->
tx_queue[i].comp_ring.intr_idx
1815 adapter->
netdev->name, vector);
1818 adapter->
netdev->name, vector);
1819 err =
request_irq(intr->msix_entries[vector].vector,
1830 adapter->
rx_queue[
i].comp_ring.intr_idx = vector++;
1834 adapter->
netdev->name, vector);
1835 err =
request_irq(intr->msix_entries[vector].vector,
1836 vmxnet3_msix_event, 0,
1850 #ifdef CONFIG_PCI_MSI
1856 ":%d\n", adapter->
netdev->name, intr->
type, err);
1871 adapter->
intr.event_intr_idx = 0;
1873 adapter->
tx_queue[i].comp_ring.intr_idx = 0;
1874 adapter->
rx_queue[0].comp_ring.intr_idx = 0;
1878 "allocated\n", adapter->
netdev->name, intr->
type,
1892 switch (intr->
type) {
1893 #ifdef CONFIG_PCI_MSI
1900 free_irq(intr->msix_entries[vector++].vector,
1908 free_irq(intr->msix_entries[vector++].vector,
1912 free_irq(intr->msix_entries[vector].vector,
1933 u32 *vfTable = adapter->
shared->devRead.rxFilterConf.vfTable;
1950 u32 *vfTable = adapter->
shared->devRead.rxFilterConf.vfTable;
1951 unsigned long flags;
1957 spin_unlock_irqrestore(&adapter->
cmd_lock, flags);
1967 vmxnet3_vlan_rx_kill_vid(
struct net_device *netdev,
u16 vid)
1972 u32 *vfTable = adapter->
shared->devRead.rxFilterConf.vfTable;
1973 unsigned long flags;
1979 spin_unlock_irqrestore(&adapter->
cmd_lock, flags);
2015 unsigned long flags;
2017 &adapter->
shared->devRead.rxFilterConf;
2022 u32 *vfTable = adapter->
shared->devRead.rxFilterConf.vfTable;
2027 vmxnet3_restore_vlan(adapter);
2037 new_table = vmxnet3_copy_mc(netdev);
2046 ", setting ALL_MULTI\n", netdev->name);
2058 if (new_mode != rxConf->
rxMode) {
2068 spin_unlock_irqrestore(&adapter->
cmd_lock, flags);
2096 memset(shared, 0,
sizeof(*shared));
2102 devRead->
misc.driverInfo.gos.gosBits = (
sizeof(
void *) == 4 ?
2106 *((
u32 *)&devRead->
misc.driverInfo.gos));
2170 memset(adapter->rss_conf, 0,
sizeof(*adapter->rss_conf));
2185 rssConf->
indTable[i] = ethtool_rxfh_indir_default(
2196 devRead->
intrConf.autoMask = adapter->
intr.mask_mode ==
2199 for (i = 0; i < adapter->
intr.num_intrs; i++)
2200 devRead->
intrConf.modLevels[i] = adapter->
intr.mod_levels[i];
2202 devRead->
intrConf.eventIntrIdx = adapter->
intr.event_intr_idx;
2207 vmxnet3_restore_vlan(adapter);
2208 vmxnet3_write_mac_addr(adapter, adapter->
netdev->dev_addr);
2219 unsigned long flags;
2221 dev_dbg(&adapter->
netdev->dev,
"%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2222 " ring sizes %u %u %u\n", adapter->
netdev->name,
2225 adapter->
rx_queue[0].rx_ring[0].size,
2226 adapter->
rx_queue[0].rx_ring[1].size);
2228 vmxnet3_tq_init_all(adapter);
2229 err = vmxnet3_rq_init_all(adapter);
2232 adapter->
netdev->name, err);
2236 err = vmxnet3_request_irqs(adapter);
2239 adapter->
netdev->name, err);
2243 vmxnet3_setup_driver_shared(adapter);
2253 spin_unlock_irqrestore(&adapter->
cmd_lock, flags);
2257 adapter->
netdev->name, ret);
2265 adapter->
rx_queue[i].rx_ring[0].next2fill);
2267 (i * VMXNET3_REG_ALIGN)),
2268 adapter->
rx_queue[i].rx_ring[1].next2fill);
2272 vmxnet3_set_mc(adapter->
netdev);
2278 vmxnet3_check_link(adapter,
true);
2280 napi_enable(&adapter->
rx_queue[i].napi);
2281 vmxnet3_enable_all_intrs(adapter);
2288 vmxnet3_free_irqs(adapter);
2292 vmxnet3_rq_cleanup_all(adapter);
2300 unsigned long flags;
2303 spin_unlock_irqrestore(&adapter->
cmd_lock, flags);
2311 unsigned long flags;
2319 spin_unlock_irqrestore(&adapter->
cmd_lock, flags);
2320 vmxnet3_disable_all_intrs(adapter);
2323 napi_disable(&adapter->
rx_queue[i].napi);
2324 netif_tx_disable(adapter->
netdev);
2328 vmxnet3_tq_cleanup_all(adapter);
2329 vmxnet3_rq_cleanup_all(adapter);
2330 vmxnet3_free_irqs(adapter);
2343 tmp = (mac[5] << 8) | mac[4];
2349 vmxnet3_set_mac_addr(
struct net_device *netdev,
void *
p)
2355 vmxnet3_write_mac_addr(adapter, addr->
sa_data);
2364 vmxnet3_alloc_pci_resources(
struct vmxnet3_adapter *adapter,
bool *dma64)
2367 unsigned long mmio_start, mmio_len;
2373 pci_name(pdev), err);
2378 if (pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(64)) != 0) {
2380 "for adapter %s\n", pci_name(pdev));
2388 "%s\n", pci_name(pdev));
2399 "error %d\n", pci_name(pdev), err);
2451 size_t sz,
i, ring0_size, ring1_size, comp_size;
2475 ring0_size = adapter->
rx_queue[0].rx_ring[0].size;
2476 ring0_size = (ring0_size + sz - 1) / sz * sz;
2479 ring1_size = adapter->
rx_queue[0].rx_ring[1].size;
2480 comp_size = ring0_size + ring1_size;
2484 rq->
rx_ring[0].size = ring0_size;
2485 rq->
rx_ring[1].size = ring1_size;
2493 u32 rx_ring_size,
u32 rx_ring2_size)
2499 tq->
tx_ring.size = tx_ring_size;
2506 err = vmxnet3_tq_create(tq, adapter);
2515 adapter->
rx_queue[0].rx_ring[0].size = rx_ring_size;
2516 adapter->
rx_queue[0].rx_ring[1].size = rx_ring2_size;
2517 vmxnet3_adjust_rx_ring_size(adapter);
2524 err = vmxnet3_rq_create(rq, adapter);
2528 "queues. Aborting.\n");
2551 adapter = netdev_priv(netdev);
2613 napi_enable(&adapter->
rx_queue[i].napi);
2619 vmxnet3_change_mtu(
struct net_device *netdev,
int new_mtu)
2627 netdev->
mtu = new_mtu;
2636 if (netif_running(netdev)) {
2642 vmxnet3_adjust_rx_ring_size(adapter);
2643 err = vmxnet3_rq_create_all(adapter);
2646 " error %d. Closing it.\n", netdev->
name, err);
2653 "Closing it\n", netdev->
name, err);
2682 netdev_info(adapter->
netdev,
2683 "features: sg csum vlan jf tso tsoIPv6 lro%s\n",
2684 dma64 ?
" highDMA" :
"");
2697 mac[4] = tmp & 0xff;
2698 mac[5] = (tmp >> 8) & 0xff;
2701 #ifdef CONFIG_PCI_MSI
2717 int err = 0, vector_threshold;
2720 while (vectors >= vector_threshold) {
2726 }
else if (err < 0) {
2727 netdev_err(adapter->
netdev,
2728 "Failed to enable MSI-X, error: %d\n", err);
2730 }
else if (err < vector_threshold) {
2736 netdev_err(adapter->
netdev,
2737 "Failed to enable %d MSI-X, trying %d instead\n",
2738 vectors, vector_threshold);
2739 vectors = vector_threshold;
2743 netdev_info(adapter->
netdev,
2744 "Number of MSI-X interrupts which can be allocated are lower than min threshold required.\n");
2755 unsigned long flags;
2762 spin_unlock_irqrestore(&adapter->
cmd_lock, flags);
2763 adapter->
intr.type = cfg & 0x3;
2764 adapter->
intr.mask_mode = (cfg >> 2) & 0x3;
2770 #ifdef CONFIG_PCI_MSI
2780 adapter->
intr.num_intrs += 1;
2782 adapter->
intr.num_intrs = (adapter->
intr.num_intrs >
2784 ? adapter->
intr.num_intrs :
2787 for (vector = 0; vector < adapter->
intr.num_intrs; vector++)
2788 adapter->
intr.msix_entries[vector].entry = vector;
2790 err = vmxnet3_acquire_msix_vectors(adapter,
2791 adapter->
intr.num_intrs);
2801 adapter->
intr.num_intrs =
2810 netdev_info(adapter->
netdev,
2811 "Failed to enable MSI-X, error %d . Limiting #rx queues to 1, try MSI.\n",
2819 err = pci_enable_msi(adapter->
pdev);
2822 adapter->
intr.num_intrs = 1;
2833 adapter->
intr.num_intrs = 1;
2850 vmxnet3_tx_timeout(
struct net_device *netdev)
2857 netif_wake_queue(adapter->
netdev);
2874 if (netif_running(adapter->
netdev)) {
2889 vmxnet3_probe_device(
struct pci_dev *pdev,
2894 .ndo_stop = vmxnet3_close,
2895 .ndo_start_xmit = vmxnet3_xmit_frame,
2896 .ndo_set_mac_address = vmxnet3_set_mac_addr,
2897 .ndo_change_mtu = vmxnet3_change_mtu,
2900 .ndo_tx_timeout = vmxnet3_tx_timeout,
2901 .ndo_set_rx_mode = vmxnet3_set_mc,
2902 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
2903 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
2904 #ifdef CONFIG_NET_POLL_CONTROLLER
2905 .ndo_poll_controller = vmxnet3_netpoll,
2938 max(num_tx_queues, num_rx_queues));
2940 num_tx_queues, num_rx_queues);
2945 pci_set_drvdata(pdev, netdev);
2946 adapter = netdev_priv(netdev);
2958 goto err_alloc_shared;
2973 goto err_alloc_queue_desc;
2987 if (adapter->rss_conf ==
NULL) {
2993 err = vmxnet3_alloc_pci_resources(adapter, &dma64);
3002 " %s\n", ver, pci_name(pdev));
3012 "adapter %s\n", ver, pci_name(pdev));
3018 vmxnet3_declare_features(adapter, dma64);
3027 vmxnet3_alloc_intr_resources(adapter);
3032 adapter->rss =
true;
3035 adapter->rss =
false;
3039 vmxnet3_read_mac_addr(adapter, mac);
3054 vmxnet3_poll_rx_only, 64);
3072 vmxnet3_check_link(adapter,
false);
3077 vmxnet3_free_intr_resources(adapter);
3079 vmxnet3_free_pci_resources(adapter);
3082 kfree(adapter->rss_conf);
3089 err_alloc_queue_desc:
3093 pci_set_drvdata(pdev,
NULL);
3100 vmxnet3_remove_device(
struct pci_dev *pdev)
3102 struct net_device *netdev = pci_get_drvdata(pdev);
3120 vmxnet3_free_intr_resources(adapter);
3121 vmxnet3_free_pci_resources(adapter);
3123 kfree(adapter->rss_conf);
3143 struct net_device *netdev = pci_get_drvdata(pdev);
3149 struct in_device *in_dev;
3150 struct in_ifaddr *ifa;
3151 unsigned long flags;
3154 if (!netif_running(netdev))
3158 napi_disable(&adapter->
rx_queue[i].napi);
3160 vmxnet3_disable_all_intrs(adapter);
3161 vmxnet3_free_irqs(adapter);
3162 vmxnet3_free_intr_resources(adapter);
3165 netif_tx_stop_all_queues(netdev);
3169 memset(pmConf, 0,
sizeof(*pmConf));
3182 in_dev = in_dev_get(netdev);
3186 ifa = (
struct in_ifaddr *)in_dev->ifa_list;
3195 (pmConf->
filters[
i].patternSize - 1) / 8 + 1;
3204 arpreq = (
u8 *)(ahdr + 1);
3207 arpreq += 2 * ETH_ALEN +
sizeof(
u32);
3208 *(
u32 *)arpreq = ifa->ifa_address;
3211 pmConf->
filters[i].mask[0] = 0x00;
3212 pmConf->
filters[i].mask[1] = 0x30;
3213 pmConf->
filters[i].mask[2] = 0x30;
3214 pmConf->
filters[i].mask[3] = 0x00;
3215 pmConf->
filters[i].mask[4] = 0xC0;
3216 pmConf->
filters[i].mask[5] = 0x03;
3238 spin_unlock_irqrestore(&adapter->
cmd_lock, flags);
3251 vmxnet3_resume(
struct device *device)
3254 unsigned long flags;
3256 struct net_device *netdev = pci_get_drvdata(pdev);
3260 if (!netif_running(netdev))
3265 memset(pmConf, 0,
sizeof(*pmConf));
3280 pci_enable_wake(pdev,
PCI_D0, 0);
3285 spin_unlock_irqrestore(&adapter->
cmd_lock, flags);
3286 vmxnet3_alloc_intr_resources(adapter);
3287 vmxnet3_request_irqs(adapter);
3289 napi_enable(&adapter->
rx_queue[i].napi);
3290 vmxnet3_enable_all_intrs(adapter);
3295 static const struct dev_pm_ops vmxnet3_pm_ops = {
3297 .resume = vmxnet3_resume,
3303 .id_table = vmxnet3_pciid_table,
3304 .probe = vmxnet3_probe_device,
3307 .driver.pm = &vmxnet3_pm_ops,
3313 vmxnet3_init_module(
void)
3317 return pci_register_driver(&vmxnet3_driver);
3324 vmxnet3_exit_module(
void)