33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/types.h>
36 #include <linux/bitops.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/netdevice.h>
41 #include <linux/string.h>
44 #include <linux/tcp.h>
46 #include <linux/ipv6.h>
47 #include <linux/slab.h>
50 #include <linux/ethtool.h>
52 #include <linux/if_vlan.h>
53 #include <linux/prefetch.h>
58 static const char ixgbevf_driver_string[] =
59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
61 #define DRV_VERSION "2.6.0-k"
63 static char ixgbevf_copyright[] =
64 "Copyright (c) 2009 - 2012 Intel Corporation.";
95 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
96 static int debug = -1;
103 static inline void ixgbevf_release_rx_desc(
struct ixgbe_hw *
hw,
130 if (direction == -1) {
140 index = ((16 * (queue & 1)) + (8 *
direction));
142 ivar &= ~(0xFF <<
index);
143 ivar |= (msix_vector <<
index);
152 if (tx_buffer_info->
dma) {
163 tx_buffer_info->
dma = 0;
165 if (tx_buffer_info->
skb) {
173 #define IXGBE_MAX_TXD_PWR 14
174 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
177 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
178 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
180 static void ixgbevf_tx_timeout(
struct net_device *netdev);
193 unsigned int i, eop,
count = 0;
204 (count < tx_ring->count)) {
205 bool cleaned =
false;
210 for ( ; !cleaned; count++) {
214 cleaned = (i == eop);
215 skb = tx_buffer_info->
skb;
217 if (cleaned && skb) {
218 unsigned int segs, bytecount;
221 segs = skb_shinfo(skb)->gso_segs ?: 1;
223 bytecount = ((segs - 1) * skb_headlen(skb)) +
225 total_packets += segs;
226 total_bytes += bytecount;
229 ixgbevf_unmap_and_free_tx_resource(tx_ring,
232 tx_desc->
wb.status = 0;
235 if (i == tx_ring->
count)
246 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
253 if (__netif_subqueue_stopped(tx_ring->
netdev,
256 netif_wake_subqueue(tx_ring->
netdev,
262 u64_stats_update_begin(&tx_ring->
syncp);
265 u64_stats_update_end(&tx_ring->
syncp);
267 q_vector->
tx.total_packets += total_packets;
269 return count < tx_ring->
count;
288 __vlan_hwaccel_put_tag(skb, tag);
299 static inline void ixgbevf_rx_checksum(
struct ixgbevf_adapter *adapter,
303 skb_checksum_none_assert(skb);
345 while (cleaned_count--) {
349 skb = netdev_alloc_skb_ip_align(rx_ring->
netdev,
365 if (i == rx_ring->
count)
374 ixgbevf_release_rx_desc(&adapter->
hw, rx_ring, i);
378 static inline void ixgbevf_irq_enable_queues(
struct ixgbevf_adapter *adapter,
397 int cleaned_count = 0;
398 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
412 skb = rx_buffer_info->
skb;
416 if (rx_buffer_info->
dma) {
420 rx_buffer_info->
dma = 0;
425 if (i == rx_ring->
count)
457 ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb);
460 total_rx_bytes += skb->
len;
468 u32 header_fixup_len = skb_headlen(skb);
469 if (header_fixup_len < 14)
474 ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
477 rx_desc->
wb.upper.status_error = 0;
481 ixgbevf_alloc_rx_buffers(adapter, rx_ring,
497 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
499 u64_stats_update_begin(&rx_ring->
syncp);
502 u64_stats_update_end(&rx_ring->
syncp);
503 q_vector->rx.total_packets += total_rx_packets;
504 q_vector->rx.total_bytes += total_rx_bytes;
524 bool clean_complete =
true;
527 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
531 if (q_vector->
rx.count > 1)
532 per_ring_budget =
max(budget/q_vector->
rx.count, 1);
534 per_ring_budget = budget;
537 clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
545 if (adapter->rx_itr_setting & 1)
546 ixgbevf_set_itr(q_vector);
548 ixgbevf_irq_enable_queues(adapter,
549 1 << q_vector->v_idx);
562 int v_idx = q_vector->v_idx;
584 int q_vectors,
v_idx;
593 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
595 q_vector = adapter->
q_vector[v_idx];
598 ixgbevf_set_ivar(adapter, 0, ring->
reg_idx, v_idx);
601 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
603 if (q_vector->
tx.ring && !q_vector->
rx.ring) {
605 if (adapter->tx_itr_setting == 1)
608 q_vector->
itr = adapter->tx_itr_setting;
611 if (adapter->rx_itr_setting == 1)
614 q_vector->
itr = adapter->rx_itr_setting;
618 adapter->eims_enable_mask |= 1 << v_idx;
620 ixgbevf_write_eitr(q_vector);
623 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
625 adapter->eims_other = 1 << v_idx;
626 adapter->eims_enable_mask |= adapter->eims_other;
656 u8 itr_setting = ring_container->
itr;
667 timepassed_us = q_vector->
itr >> 2;
668 bytes_perint = bytes / timepassed_us;
670 switch (itr_setting) {
672 if (bytes_perint > 10)
676 if (bytes_perint > 20)
678 else if (bytes_perint <= 10)
682 if (bytes_perint <= 20)
692 ring_container->
itr = itr_setting;
697 u32 new_itr = q_vector->
itr;
700 ixgbevf_update_itr(q_vector, &q_vector->
tx);
701 ixgbevf_update_itr(q_vector, &q_vector->rx);
703 current_itr =
max(q_vector->rx.
itr, q_vector->
tx.itr);
705 switch (current_itr) {
719 if (new_itr != q_vector->
itr) {
721 new_itr = (10 * new_itr * q_vector->
itr) /
722 ((9 * new_itr) + q_vector->
itr);
725 q_vector->
itr = new_itr;
727 ixgbevf_write_eitr(q_vector);
736 hw->
mac.get_link_status = 1;
752 static irqreturn_t ixgbevf_msix_clean_rings(
int irq,
void *data)
757 if (q_vector->rx.ring || q_vector->
tx.ring)
758 napi_schedule(&q_vector->
napi);
768 a->
rx_ring[r_idx].next = q_vector->rx.ring;
769 q_vector->rx.ring = &a->
rx_ring[r_idx];
770 q_vector->rx.count++;
778 a->
tx_ring[t_idx].next = q_vector->
tx.ring;
780 q_vector->
tx.count++;
793 static int ixgbevf_map_rings_to_vectors(
struct ixgbevf_adapter *adapter)
797 int rxr_idx = 0, txr_idx = 0;
811 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
812 map_vector_to_rxq(adapter, v_start, rxr_idx);
814 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
815 map_vector_to_txq(adapter, v_start, txr_idx);
825 for (i = v_start; i < q_vectors; i++) {
827 for (j = 0; j < rqpv; j++) {
828 map_vector_to_rxq(adapter, i, rxr_idx);
833 for (i = v_start; i < q_vectors; i++) {
835 for (j = 0; j < tqpv; j++) {
836 map_vector_to_txq(adapter, i, txr_idx);
860 for (vector = 0; vector < q_vectors; vector++) {
864 if (q_vector->
tx.ring && q_vector->rx.ring) {
866 "%s-%s-%d", netdev->
name,
"TxRx", ri++);
868 }
else if (q_vector->rx.ring) {
870 "%s-%s-%d", netdev->
name,
"rx", ri++);
871 }
else if (q_vector->
tx.ring) {
873 "%s-%s-%d", netdev->
name,
"tx", ti++);
878 err =
request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
879 q_vector->
name, q_vector);
882 "request_irq failed for MSIX interrupt "
884 goto free_queue_irqs;
889 &ixgbevf_msix_other, 0, netdev->
name, adapter);
892 "request_irq for msix_other failed: %d\n", err);
893 goto free_queue_irqs;
910 static inline void ixgbevf_reset_q_vectors(
struct ixgbevf_adapter *adapter)
914 for (i = 0; i < q_vectors; i++) {
916 q_vector->rx.ring =
NULL;
918 q_vector->rx.count = 0;
919 q_vector->
tx.count = 0;
934 err = ixgbevf_request_msix_irqs(adapter);
938 "request_irq failed, Error %d\n", err);
953 for (; i >= 0; i--) {
955 if (!adapter->
q_vector[i]->rx.ring &&
963 ixgbevf_reset_q_vectors(adapter);
970 static inline void ixgbevf_irq_disable(
struct ixgbevf_adapter *adapter)
1008 u32 i,
j, tdlen, txctrl;
1033 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1035 static void ixgbevf_configure_srrctl(
struct ixgbevf_adapter *adapter,
int index)
1053 static void ixgbevf_set_rx_buffer_len(
struct ixgbevf_adapter *adapter)
1084 adapter->
rx_ring[i].rx_buf_len = rx_buf_len;
1104 ixgbevf_set_rx_buffer_len(adapter);
1121 ixgbevf_configure_srrctl(adapter, j);
1131 if (!hw->
mac.ops.set_vfta)
1137 err = hw->
mac.ops.set_vfta(hw, vid, 0,
true);
1153 static int ixgbevf_vlan_rx_kill_vid(
struct net_device *netdev,
u16 vid)
1162 if (hw->
mac.ops.set_vfta)
1163 err = hw->
mac.ops.set_vfta(hw, vid, 0,
false);
1177 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
1187 pr_err(
"Too many unicast filters - No Space\n");
1194 hw->
mac.ops.set_uc_addr(hw, ++count, ha->
addr);
1202 hw->
mac.ops.set_uc_addr(hw, 0,
NULL);
1216 static void ixgbevf_set_rx_mode(
struct net_device *netdev)
1224 if (hw->
mac.ops.update_mc_addr_list)
1225 hw->
mac.ops.update_mc_addr_list(hw, netdev);
1227 ixgbevf_write_uc_addr_list(netdev);
1238 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1239 q_vector = adapter->
q_vector[q_idx];
1240 napi_enable(&q_vector->
napi);
1250 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1251 q_vector = adapter->
q_vector[q_idx];
1252 napi_disable(&q_vector->
napi);
1261 ixgbevf_set_rx_mode(netdev);
1263 ixgbevf_restore_vlan(adapter);
1265 ixgbevf_configure_tx(adapter);
1266 ixgbevf_configure_rx(adapter);
1269 ixgbevf_alloc_rx_buffers(adapter, ring,
1274 #define IXGBE_MAX_RX_DESC_POLL 10
1275 static inline void ixgbevf_rx_desc_queue_enable(
struct ixgbevf_adapter *adapter,
1279 int j = adapter->
rx_ring[rxr].reg_idx;
1288 if (k >= IXGBE_MAX_RX_DESC_POLL) {
1289 hw_dbg(hw,
"RXDCTL.ENABLE on Rx queue %d "
1290 "not set within the polling period\n", rxr);
1293 ixgbevf_release_rx_desc(&adapter->
hw, &adapter->
rx_ring[rxr],
1294 (adapter->
rx_ring[rxr].count - 1));
1300 if (adapter->
stats.vfgprc || adapter->
stats.vfgptc) {
1301 adapter->
stats.saved_reset_vfgprc += adapter->
stats.vfgprc -
1302 adapter->
stats.base_vfgprc;
1303 adapter->
stats.saved_reset_vfgptc += adapter->
stats.vfgptc -
1304 adapter->
stats.base_vfgptc;
1305 adapter->
stats.saved_reset_vfgorc += adapter->
stats.vfgorc -
1306 adapter->
stats.base_vfgorc;
1307 adapter->
stats.saved_reset_vfgotc += adapter->
stats.vfgotc -
1308 adapter->
stats.base_vfgotc;
1309 adapter->
stats.saved_reset_vfmprc += adapter->
stats.vfmprc -
1310 adapter->
stats.base_vfmprc;
1314 static void ixgbevf_init_last_counter_stats(
struct ixgbevf_adapter *adapter)
1320 adapter->
stats.last_vfgorc |=
1324 adapter->
stats.last_vfgotc |=
1328 adapter->
stats.base_vfgprc = adapter->
stats.last_vfgprc;
1329 adapter->
stats.base_vfgorc = adapter->
stats.last_vfgorc;
1330 adapter->
stats.base_vfgptc = adapter->
stats.last_vfgptc;
1331 adapter->
stats.base_vfgotc = adapter->
stats.last_vfgotc;
1332 adapter->
stats.base_vfmprc = adapter->
stats.last_vfmprc;
1340 int err = 0,
idx = 0;
1366 txdctl |= (8 << 16);
1377 for (i = 0; i < num_rx_rings; i++) {
1387 ixgbevf_rx_desc_queue_enable(adapter, i);
1390 ixgbevf_configure_msix(adapter);
1394 if (hw->
mac.ops.set_rar) {
1395 if (is_valid_ether_addr(hw->
mac.addr))
1396 hw->
mac.ops.set_rar(hw, 0, hw->
mac.addr, 0);
1398 hw->
mac.ops.set_rar(hw, 0, hw->
mac.perm_addr, 0);
1404 ixgbevf_napi_enable_all(adapter);
1407 netif_tx_start_all_queues(netdev);
1409 ixgbevf_save_reset_stats(adapter);
1410 ixgbevf_init_last_counter_stats(adapter);
1412 hw->
mac.get_link_status = 1;
1420 ixgbevf_negotiate_api(adapter);
1422 ixgbevf_configure(adapter);
1424 ixgbevf_up_complete(adapter);
1429 ixgbevf_irq_enable(adapter);
1448 for (i = 0; i < rx_ring->
count; i++) {
1452 if (rx_buffer_info->
dma) {
1456 rx_buffer_info->
dma = 0;
1458 if (rx_buffer_info->
skb) {
1464 dev_kfree_skb(
this);
1501 for (i = 0; i < tx_ring->
count; i++) {
1503 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1524 static void ixgbevf_clean_all_rx_rings(
struct ixgbevf_adapter *adapter)
1529 ixgbevf_clean_rx_ring(adapter, &adapter->
rx_ring[i]);
1536 static void ixgbevf_clean_all_tx_rings(
struct ixgbevf_adapter *adapter)
1541 ixgbevf_clean_tx_ring(adapter, &adapter->
tx_ring[i]);
1555 netif_tx_disable(netdev);
1559 netif_tx_stop_all_queues(netdev);
1561 ixgbevf_irq_disable(adapter);
1563 ixgbevf_napi_disable_all(adapter);
1582 if (!pci_channel_offline(adapter->
pdev))
1585 ixgbevf_clean_all_tx_rings(adapter);
1586 ixgbevf_clean_all_rx_rings(adapter);
1616 if (hw->
mac.ops.reset_hw(hw))
1617 hw_dbg(hw,
"PF still resetting\n");
1619 hw->
mac.ops.init_hw(hw);
1623 if (is_valid_ether_addr(adapter->
hw.mac.addr)) {
1631 static void ixgbevf_acquire_msix_vectors(
struct ixgbevf_adapter *adapter,
1634 int err, vector_threshold;
1647 while (vectors >= vector_threshold) {
1658 if (vectors < vector_threshold) {
1664 "Unable to allocate MSI-X interrupts\n");
1710 goto err_tx_ring_allocation;
1715 goto err_rx_ring_allocation;
1735 err_rx_ring_allocation:
1737 err_tx_ring_allocation:
1748 static int ixgbevf_set_interrupt_capability(
struct ixgbevf_adapter *adapter)
1774 for (vector = 0; vector < v_budget; vector++)
1777 ixgbevf_acquire_msix_vectors(adapter, v_budget);
1783 err = netif_set_real_num_rx_queues(netdev, adapter->
num_rx_queues);
1798 int q_idx, num_q_vectors;
1803 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1808 q_vector->
v_idx = q_idx;
1811 adapter->
q_vector[q_idx] = q_vector;
1819 q_vector = adapter->
q_vector[q_idx];
1837 int q_idx, num_q_vectors;
1843 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1847 if (q_idx < napi_vectors)
1858 static void ixgbevf_reset_interrupt_capability(
struct ixgbevf_adapter *adapter)
1870 static int ixgbevf_init_interrupt_scheme(
struct ixgbevf_adapter *adapter)
1875 ixgbevf_set_num_queues(adapter);
1877 err = ixgbevf_set_interrupt_capability(adapter);
1880 "Unable to setup interrupt capabilities\n");
1881 goto err_set_interrupt;
1884 err = ixgbevf_alloc_q_vectors(adapter);
1886 hw_dbg(&adapter->
hw,
"Unable to allocate memory for queue "
1888 goto err_alloc_q_vectors;
1891 err = ixgbevf_alloc_queues(adapter);
1893 pr_err(
"Unable to allocate memory for queues\n");
1894 goto err_alloc_queues;
1897 hw_dbg(&adapter->
hw,
"Multiqueue %s: Rx Queue count = %u, "
1898 "Tx Queue count = %u\n",
1906 ixgbevf_free_q_vectors(adapter);
1907 err_alloc_q_vectors:
1908 ixgbevf_reset_interrupt_capability(adapter);
1920 static void ixgbevf_clear_interrupt_scheme(
struct ixgbevf_adapter *adapter)
1925 ixgbevf_free_q_vectors(adapter);
1926 ixgbevf_reset_interrupt_capability(adapter);
1952 hw->
mbx.ops.init_params(hw);
1955 err = hw->
mac.ops.reset_hw(hw);
1958 "PF still in reset state, assigning new address\n");
1959 eth_hw_addr_random(adapter->
netdev);
1961 adapter->
netdev->addr_len);
1963 err = hw->
mac.ops.init_hw(hw);
1965 pr_err(
"init_shared_code failed: %d\n", err);
1969 adapter->
netdev->addr_len);
1990 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
1992 u32 current_counter = IXGBE_READ_REG(hw, reg); \
1993 if (current_counter < last_counter) \
1994 counter += 0x100000000LL; \
1995 last_counter = current_counter; \
1996 counter &= 0xFFFFFFFF00000000LL; \
1997 counter |= current_counter; \
2000 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2002 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2003 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2004 u64 current_counter = (current_counter_msb << 32) | \
2005 current_counter_lsb; \
2006 if (current_counter < last_counter) \
2007 counter += 0x1000000000LL; \
2008 last_counter = current_counter; \
2009 counter &= 0xFFFFFFF000000000LL; \
2010 counter |= current_counter; \
2021 adapter->
stats.vfgprc);
2023 adapter->
stats.vfgptc);
2025 adapter->
stats.last_vfgorc,
2026 adapter->
stats.vfgorc);
2028 adapter->
stats.last_vfgotc,
2029 adapter->
stats.vfgotc);
2031 adapter->
stats.vfmprc);
2038 static void ixgbevf_watchdog(
unsigned long data)
2051 goto watchdog_short_circuit;
2056 if (qv->rx.ring || qv->
tx.ring)
2062 watchdog_short_circuit:
2070 static void ixgbevf_tx_timeout(
struct net_device *netdev)
2097 static void ixgbevf_watchdog_task(
struct work_struct *work)
2113 if (hw->
mac.ops.check_link) {
2118 need_reset = hw->
mac.ops.check_link(hw, &link_speed,
2127 netif_tx_stop_all_queues(netdev);
2141 if (!netif_carrier_ok(netdev)) {
2142 hw_dbg(&adapter->
hw,
"NIC Link is Up, %u Gbps\n",
2146 netif_tx_wake_all_queues(netdev);
2151 if (netif_carrier_ok(netdev)) {
2152 hw_dbg(&adapter->
hw,
"NIC Link is Down\n");
2154 netif_tx_stop_all_queues(netdev);
2181 ixgbevf_clean_tx_ring(adapter, tx_ring);
2198 static void ixgbevf_free_all_tx_resources(
struct ixgbevf_adapter *adapter)
2243 hw_dbg(&adapter->
hw,
"Unable to allocate memory for the transmit "
2244 "descriptor ring\n");
2258 static int ixgbevf_setup_all_tx_resources(
struct ixgbevf_adapter *adapter)
2267 "Allocation for Tx Queue %u failed\n", i);
2299 if (!rx_ring->
desc) {
2301 "Unable to allocate memory for "
2302 "the receive descriptor ring\n");
2326 static int ixgbevf_setup_all_rx_resources(
struct ixgbevf_adapter *adapter)
2335 "Allocation for Rx Queue %u failed\n", i);
2353 ixgbevf_clean_rx_ring(adapter, rx_ring);
2370 static void ixgbevf_free_all_rx_resources(
struct ixgbevf_adapter *adapter)
2392 static int ixgbevf_open(
struct net_device *netdev)
2408 pr_err(
"Unable to start - perhaps the PF Driver isn't "
2410 goto err_setup_reset;
2414 ixgbevf_negotiate_api(adapter);
2417 err = ixgbevf_setup_all_tx_resources(adapter);
2422 err = ixgbevf_setup_all_rx_resources(adapter);
2426 ixgbevf_configure(adapter);
2433 ixgbevf_map_rings_to_vectors(adapter);
2435 ixgbevf_up_complete(adapter);
2439 err = ixgbevf_request_irq(adapter);
2443 ixgbevf_irq_enable(adapter);
2449 ixgbevf_free_irq(adapter);
2451 ixgbevf_free_all_rx_resources(adapter);
2453 ixgbevf_free_all_tx_resources(adapter);
2472 static int ixgbevf_close(
struct net_device *netdev)
2477 ixgbevf_free_irq(adapter);
2479 ixgbevf_free_all_tx_resources(adapter);
2480 ixgbevf_free_all_rx_resources(adapter);
2485 static void ixgbevf_tx_ctxtdesc(
struct ixgbevf_ring *tx_ring,
2486 u32 vlan_macip_lens,
u32 type_tucmd,
2512 if (!skb_is_gso(skb))
2515 if (skb_header_cloned(skb)) {
2525 struct iphdr *iph = ip_hdr(skb);
2533 }
else if (skb_is_gso_v6(skb)) {
2534 ipv6_hdr(skb)->payload_len = 0;
2535 tcp_hdr(skb)->check =
2537 &ipv6_hdr(skb)->
daddr,
2542 l4len = tcp_hdrlen(skb);
2544 *hdr_len = skb_transport_offset(skb) + l4len;
2552 vlan_macip_lens = skb_network_header_len(skb);
2556 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2557 type_tucmd, mss_l4len_idx);
2562 static bool ixgbevf_tx_csum(
struct ixgbevf_ring *tx_ring,
2568 u32 vlan_macip_lens = 0;
2569 u32 mss_l4len_idx = 0;
2576 vlan_macip_lens |= skb_network_header_len(skb);
2578 l4_hdr = ip_hdr(skb)->protocol;
2581 vlan_macip_lens |= skb_network_header_len(skb);
2582 l4_hdr = ipv6_hdr(skb)->nexthdr;
2587 "partial checksum but proto=%x!\n",
2596 mss_l4len_idx = tcp_hdrlen(skb) <<
2601 mss_l4len_idx =
sizeof(
struct sctphdr) <<
2605 mss_l4len_idx =
sizeof(
struct udphdr) <<
2606 IXGBE_ADVTXD_L4LEN_SHIFT;
2611 "partial checksum but l4 proto=%x!\n",
2622 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2623 type_tucmd, mss_l4len_idx);
2628 static int ixgbevf_tx_map(
struct ixgbevf_ring *tx_ring,
2634 unsigned int total = skb->
len;
2637 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2643 len =
min(skb_headlen(skb), total);
2662 if (i == tx_ring->
count)
2666 for (f = 0; f < nr_frags; f++) {
2669 frag = &skb_shinfo(skb)->frags[
f];
2670 len =
min((
unsigned int)skb_frag_size(frag), total);
2678 tx_buffer_info->
dma =
2679 skb_frag_dma_map(tx_ring->
dev, frag,
2683 tx_buffer_info->
dma))
2692 if (i == tx_ring->
count)
2700 i = tx_ring->
count - 1;
2710 dev_err(tx_ring->
dev,
"TX DMA map failed\n");
2713 tx_buffer_info->
dma = 0;
2718 while (count >= 0) {
2722 i += tx_ring->
count;
2724 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
2730 static void ixgbevf_tx_queue(
struct ixgbevf_ring *tx_ring,
int tx_flags,
2731 int count,
u32 paylen,
u8 hdr_len)
2735 u32 olinfo_status = 0, cmd_type_len = 0;
2773 tx_desc->
read.cmd_type_len =
2777 if (i == tx_ring->
count)
2786 static int __ixgbevf_maybe_stop_tx(
struct ixgbevf_ring *tx_ring,
int size)
2807 static int ixgbevf_maybe_stop_tx(
struct ixgbevf_ring *tx_ring,
int size)
2811 return __ixgbevf_maybe_stop_tx(tx_ring, size);
2819 unsigned int tx_flags = 0;
2823 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2827 tx_ring = &adapter->
tx_ring[r_idx];
2836 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2837 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2840 count += skb_shinfo(skb)->nr_frags;
2842 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
2857 tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
2865 else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
2868 ixgbevf_tx_queue(tx_ring, tx_flags,
2869 ixgbevf_tx_map(tx_ring, skb, tx_flags, first),
2893 static int ixgbevf_set_mac(
struct net_device *netdev,
void *
p)
2899 if (!is_valid_ether_addr(addr->
sa_data))
2907 if (hw->
mac.ops.set_rar)
2908 hw->
mac.ops.set_rar(hw, 0, hw->
mac.addr, 0);
2922 static int ixgbevf_change_mtu(
struct net_device *netdev,
int new_mtu)
2932 if ((new_mtu < 68) || (max_frame > max_possible_frame))
2935 hw_dbg(&adapter->
hw,
"changing MTU from %d to %d\n",
2936 netdev->
mtu, new_mtu);
2938 netdev->
mtu = new_mtu;
2940 if (netif_running(netdev))
2948 struct net_device *netdev = pci_get_drvdata(pdev);
2956 if (netif_running(netdev)) {
2959 ixgbevf_free_irq(adapter);
2960 ixgbevf_free_all_tx_resources(adapter);
2961 ixgbevf_free_all_rx_resources(adapter);
2965 ixgbevf_clear_interrupt_scheme(adapter);
2979 static int ixgbevf_resume(
struct pci_dev *pdev)
2995 dev_err(&pdev->
dev,
"Cannot enable PCI device from suspend\n");
3001 err = ixgbevf_init_interrupt_scheme(adapter);
3004 dev_err(&pdev->
dev,
"Cannot initialize interrupts\n");
3010 if (netif_running(netdev)) {
3011 err = ixgbevf_open(netdev);
3022 static void ixgbevf_shutdown(
struct pci_dev *pdev)
3043 start = u64_stats_fetch_begin_bh(&ring->
syncp);
3046 }
while (u64_stats_fetch_retry_bh(&ring->
syncp, start));
3054 start = u64_stats_fetch_begin_bh(&ring->
syncp);
3057 }
while (u64_stats_fetch_retry_bh(&ring->
syncp, start));
3066 .ndo_open = ixgbevf_open,
3067 .ndo_stop = ixgbevf_close,
3068 .ndo_start_xmit = ixgbevf_xmit_frame,
3069 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
3070 .ndo_get_stats64 = ixgbevf_get_stats,
3072 .ndo_set_mac_address = ixgbevf_set_mac,
3073 .ndo_change_mtu = ixgbevf_change_mtu,
3074 .ndo_tx_timeout = ixgbevf_tx_timeout,
3075 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3076 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
3079 static void ixgbevf_assign_netdev_ops(
struct net_device *
dev)
3104 static int cards_found;
3105 int err, pci_using_dac;
3121 "configuration, aborting\n");
3130 dev_err(&pdev->
dev,
"pci_request_regions failed 0x%x\n", err);
3140 goto err_alloc_etherdev;
3145 pci_set_drvdata(pdev, netdev);
3146 adapter = netdev_priv(netdev);
3148 adapter->
netdev = netdev;
3149 adapter->
pdev = pdev;
3167 ixgbevf_assign_netdev_ops(netdev);
3179 err = ixgbevf_sw_init(adapter);
3186 if (!is_valid_ether_addr(netdev->
dev_addr)) {
3187 pr_err(
"invalid MAC address\n");
3222 err = ixgbevf_init_interrupt_scheme(adapter);
3227 if (hw->
mac.ops.get_bus_info)
3228 hw->
mac.ops.get_bus_info(hw);
3238 ixgbevf_init_last_counter_stats(adapter);
3245 hw_dbg(hw,
"Intel(R) 82599 Virtual Function\n");
3250 ixgbevf_clear_interrupt_scheme(adapter);
3252 ixgbevf_reset_interrupt_capability(adapter);
3275 struct net_device *netdev = pci_get_drvdata(pdev);
3285 if (netdev->
reg_state == NETREG_REGISTERED)
3288 ixgbevf_clear_interrupt_scheme(adapter);
3289 ixgbevf_reset_interrupt_capability(adapter);
3294 hw_dbg(&adapter->
hw,
"Remove complete\n");
3315 struct net_device *netdev = pci_get_drvdata(pdev);
3323 if (netif_running(netdev))
3341 struct net_device *netdev = pci_get_drvdata(pdev);
3346 "Cannot re-enable PCI device after reset.\n");
3365 static void ixgbevf_io_resume(
struct pci_dev *pdev)
3367 struct net_device *netdev = pci_get_drvdata(pdev);
3370 if (netif_running(netdev))
3378 .error_detected = ixgbevf_io_error_detected,
3379 .slot_reset = ixgbevf_io_slot_reset,
3380 .resume = ixgbevf_io_resume,
3385 .id_table = ixgbevf_pci_tbl,
3386 .probe = ixgbevf_probe,
3390 .suspend = ixgbevf_suspend,
3391 .resume = ixgbevf_resume,
3393 .shutdown = ixgbevf_shutdown,
3394 .err_handler = &ixgbevf_err_handler
3403 static int __init ixgbevf_init_module(
void)
3406 pr_info(
"%s - version %s\n", ixgbevf_driver_string,
3409 pr_info(
"%s\n", ixgbevf_copyright);
3411 ret = pci_register_driver(&ixgbevf_driver);
3423 static void __exit ixgbevf_exit_module(
void)
3433 char *ixgbevf_get_hw_dev_name(
struct ixgbe_hw *hw)
3436 return adapter->
netdev->name;