28 #include <linux/types.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
33 #include <linux/string.h>
37 #include <linux/tcp.h>
40 #include <linux/ipv6.h>
41 #include <linux/slab.h>
44 #include <linux/ethtool.h>
46 #include <linux/if_vlan.h>
47 #include <linux/prefetch.h>
56 static const char ixgbe_driver_string[] =
57 "Intel(R) 10 Gigabit PCI Express Network Driver";
59 char ixgbe_default_device_descr[] =
60 "Intel(R) 10 Gigabit Network Connection";
62 static char ixgbe_default_device_descr[] =
63 "Intel(R) 10 Gigabit Network Connection";
68 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
69 __stringify(BUILD) "-k"
71 static const char ixgbe_copyright[] =
72 "Copyright (c) 1999-2012 Intel Corporation.";
74 static const struct ixgbe_info *ixgbe_info_tbl[] = {
123 #ifdef CONFIG_IXGBE_DCA
133 #ifdef CONFIG_PCI_IOV
134 static unsigned int max_vfs;
137 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63");
140 static unsigned int allow_unsupported_sfp;
143 "Allow unsupported and untested SFP+ modules on 82599-based adapters");
145 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
146 static int debug = -1;
162 static void ixgbe_service_event_complete(
struct ixgbe_adapter *adapter)
218 switch (reginfo->
ofs) {
220 for (i = 0; i < 64; i++)
224 for (i = 0; i < 64; i++)
228 for (i = 0; i < 64; i++)
232 for (i = 0; i < 64; i++)
236 for (i = 0; i < 64; i++)
240 for (i = 0; i < 64; i++)
244 for (i = 0; i < 64; i++)
248 for (i = 0; i < 64; i++)
252 for (i = 0; i < 64; i++)
256 for (i = 0; i < 64; i++)
260 for (i = 0; i < 64; i++)
264 for (i = 0; i < 64; i++)
268 for (i = 0; i < 64; i++)
272 for (i = 0; i < 64; i++)
281 for (i = 0; i < 8; i++) {
282 snprintf(rname, 16,
"%s[%d-%d]", reginfo->
name, i*8, i*8+7);
284 for (
j = 0;
j < 8;
j++)
317 "trans_start last_rx\n");
318 pr_info(
"%-15s %016lX %016lX %016lX\n",
327 pr_info(
" Register Name Value\n");
329 reginfo->
name; reginfo++) {
330 ixgbe_regdump(hw, reginfo);
334 if (!netdev || !netif_running(netdev))
338 pr_info(
"Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
340 tx_ring = adapter->tx_ring[
n];
342 pr_info(
" %5d %5X %5X %016llX %04X %p %016llX\n",
352 goto rx_ring_summary;
368 tx_ring = adapter->tx_ring[
n];
369 pr_info(
"------------------------------------\n");
371 pr_info(
"------------------------------------\n");
372 pr_info(
"T [desc] [address 63:0 ] "
373 "[PlPOIdStDDt Ln] [bi->dma ] "
374 "leng ntw timestamp bi->skb\n");
376 for (i = 0; tx_ring->
desc && (i < tx_ring->
count); i++) {
379 u0 = (
struct my_u0 *)tx_desc;
380 pr_info(
"T [0x%03X] %016llX %016llX %016llX"
381 " %04X %p %016llX %p", i,
403 tx_buffer->
skb->data,
412 pr_info(
"Queue [NTU] [NTC]\n");
447 pr_info(
"------------------------------------\n");
449 pr_info(
"------------------------------------\n");
450 pr_info(
"R [desc] [ PktBuf A0] "
451 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
452 "<-- Adv Rx Read format\n");
453 pr_info(
"RWB[desc] [PcsmIpSHl PtRs] "
454 "[vl er S cks ln] ---------------- [bi->skb] "
455 "<-- Adv Rx Write-Back format\n");
457 for (i = 0; i < rx_ring->
count; i++) {
460 u0 = (
struct my_u0 *)rx_desc;
465 "%016llX ---------------- %p", i,
468 rx_buffer_info->
skb);
471 "%016llX %016llX %p", i,
475 rx_buffer_info->
skb);
478 rx_buffer_info->
dma) {
483 ixgbe_rx_bufsz(rx_ring),
true);
501 static void ixgbe_release_hw_control(
struct ixgbe_adapter *adapter)
511 static void ixgbe_get_hw_control(
struct ixgbe_adapter *adapter)
534 switch (hw->
mac.type) {
539 index = (((direction * 64) + queue) >> 2) & 0x1F;
541 ivar &= ~(0xFF << (8 * (queue & 0x3)));
542 ivar |= (msix_vector << (8 * (queue & 0x3)));
547 if (direction == -1) {
550 index = ((queue & 1) * 8);
552 ivar &= ~(0xFF <<
index);
553 ivar |= (msix_vector <<
index);
559 index = ((16 * (queue & 1)) + (8 *
direction));
561 ivar &= ~(0xFF <<
index);
562 ivar |= (msix_vector <<
index);
571 static inline void ixgbe_irq_rearm_queues(
struct ixgbe_adapter *adapter,
576 switch (adapter->
hw.mac.type) {
583 mask = (qmask & 0xFFFFFFFF);
585 mask = (qmask >> 32);
596 if (tx_buffer->
skb) {
615 static void ixgbe_update_xoff_rx_lfc(
struct ixgbe_adapter *adapter)
626 switch (hw->
mac.type) {
641 &adapter->tx_ring[i]->
state);
644 static void ixgbe_update_xoff_received(
struct ixgbe_adapter *adapter)
650 bool pfc_en = adapter->
dcb_cfg.pfc_mode_enable;
656 ixgbe_update_xoff_rx_lfc(adapter);
662 switch (hw->
mac.type) {
684 return ring->
stats.packets;
696 return (head < tail) ?
697 tail - head : (tail + ring->
count -
head);
702 static inline bool ixgbe_check_tx_hang(
struct ixgbe_ring *tx_ring)
704 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
723 if ((tx_done_old == tx_done) && tx_pending) {
741 static void ixgbe_tx_timeout_reset(
struct ixgbe_adapter *adapter)
747 ixgbe_service_event_schedule(adapter);
763 unsigned int budget = q_vector->
tx.work_limit;
792 total_packets += tx_buffer->
gso_segs;
794 #ifdef CONFIG_IXGBE_PTP
813 while (tx_desc != eop_desc) {
852 u64_stats_update_begin(&tx_ring->
syncp);
854 tx_ring->
stats.packets += total_packets;
855 u64_stats_update_end(&tx_ring->
syncp);
857 q_vector->
tx.total_packets += total_packets;
862 e_err(drv,
"Detected Tx Unit Hang\n"
864 " TDH, TDT <%x>, <%x>\n"
865 " next_to_use <%x>\n"
866 " next_to_clean <%x>\n"
867 "tx_buffer_info[next_to_clean]\n"
868 " time_stamp <%lx>\n"
879 "tx hang %d detected on queue %d, resetting adapter\n",
883 ixgbe_tx_timeout_reset(adapter);
889 netdev_tx_completed_queue(txring_txq(tx_ring),
890 total_packets, total_bytes);
892 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
893 if (
unlikely(total_packets && netif_carrier_ok(tx_ring->
netdev) &&
894 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
899 if (__netif_subqueue_stopped(tx_ring->
netdev,
902 netif_wake_subqueue(tx_ring->
netdev,
911 #ifdef CONFIG_IXGBE_DCA
912 static void ixgbe_update_tx_dca(
struct ixgbe_adapter *adapter,
920 switch (hw->
mac.type) {
946 static void ixgbe_update_rx_dca(
struct ixgbe_adapter *adapter,
955 switch (hw->
mac.type) {
982 if (q_vector->cpu == cpu)
986 ixgbe_update_tx_dca(adapter, ring, cpu);
989 ixgbe_update_rx_dca(adapter, ring, cpu);
1006 for (i = 0; i < adapter->num_q_vectors; i++) {
1007 adapter->q_vector[
i]->cpu = -1;
1008 ixgbe_update_dca(adapter->q_vector[i]);
1012 static int __ixgbe_notify_dca(
struct device *
dev,
void *data)
1015 unsigned long event = *(
unsigned long *)data;
1027 ixgbe_setup_dca(adapter);
1044 static inline void ixgbe_rx_hash(
struct ixgbe_ring *ring,
1060 static inline bool ixgbe_rx_is_fcoe(
struct ixgbe_ring *ring,
1063 __le16 pkt_info = rx_desc->
wb.lower.lo_dword.hs_rss.pkt_info;
1078 static inline void ixgbe_rx_checksum(
struct ixgbe_ring *ring,
1082 skb_checksum_none_assert(skb);
1099 __le16 pkt_info = rx_desc->
wb.lower.lo_dword.hs_rss.pkt_info;
1117 static inline void ixgbe_release_rx_desc(
struct ixgbe_ring *rx_ring,
u32 val)
1133 static bool ixgbe_alloc_mapped_page(
struct ixgbe_ring *rx_ring,
1146 bi->
skb, ixgbe_rx_pg_order(rx_ring));
1148 rx_ring->
rx_stats.alloc_rx_page_failed++;
1166 rx_ring->
rx_stats.alloc_rx_page_failed++;
1193 i -= rx_ring->
count;
1196 if (!ixgbe_alloc_mapped_page(rx_ring, bi))
1211 i -= rx_ring->
count;
1215 rx_desc->
read.hdr_addr = 0;
1218 }
while (cleaned_count);
1220 i += rx_ring->
count;
1223 ixgbe_release_rx_desc(rx_ring, i);
1237 static unsigned int ixgbe_get_headlen(
unsigned char *data,
1238 unsigned int max_len)
1241 unsigned char *network;
1260 protocol =
hdr.eth->h_proto;
1268 protocol =
hdr.vlan->h_vlan_encapsulated_proto;
1274 if ((
hdr.network - data) > (max_len -
sizeof(
struct iphdr)))
1278 hlen = (
hdr.network[0] & 0x0F) << 2;
1281 if (hlen <
sizeof(
struct iphdr))
1282 return hdr.network - data;
1285 nexthdr =
hdr.ipv4->protocol;
1286 hdr.network += hlen;
1299 if ((
hdr.network - data) > (max_len -
sizeof(
struct tcphdr)))
1303 hlen = (
hdr.network[12] & 0xF0) >> 2;
1306 if (hlen <
sizeof(
struct tcphdr))
1307 return hdr.network - data;
1309 hdr.network += hlen;
1318 if ((
hdr.network - data) < max_len)
1319 return hdr.network - data;
1324 static void ixgbe_set_rsc_gso_size(
struct ixgbe_ring *ring,
1334 static void ixgbe_update_rsc_stats(
struct ixgbe_ring *rx_ring,
1344 ixgbe_set_rsc_gso_size(rx_ring, skb);
1360 static void ixgbe_process_skb_fields(
struct ixgbe_ring *rx_ring,
1366 ixgbe_update_rsc_stats(rx_ring, skb);
1368 ixgbe_rx_hash(rx_ring, rx_desc, skb);
1370 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1372 #ifdef CONFIG_IXGBE_PTP
1379 __vlan_hwaccel_put_tag(skb, vid);
1409 static bool ixgbe_is_non_eop(
struct ixgbe_ring *rx_ring,
1416 ntc = (ntc < rx_ring->
count) ? ntc : 0;
1423 __le32 rsc_enabled = rx_desc->
wb.lower.lo_dword.data &
1430 IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
1462 static void ixgbe_pull_tail(
struct ixgbe_ring *rx_ring,
1467 unsigned int pull_len;
1474 va = skb_frag_address(frag);
1483 skb_copy_to_linear_data(skb, va,
ALIGN(pull_len,
sizeof(
long)));
1486 skb_frag_size_sub(frag, pull_len);
1489 skb->
tail += pull_len;
1502 static void ixgbe_dma_sync_frag(
struct ixgbe_ring *rx_ring,
1509 IXGBE_CB(skb)->page_released =
false;
1516 ixgbe_rx_bufsz(rx_ring),
1540 static bool ixgbe_cleanup_headers(
struct ixgbe_ring *rx_ring,
1547 if (
unlikely(ixgbe_test_staterr(rx_desc,
1555 if (skb_is_nonlinear(skb))
1556 ixgbe_pull_tail(rx_ring, skb);
1560 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
1570 __skb_put(skb, pad_len);
1583 static void ixgbe_reuse_rx_page(
struct ixgbe_ring *rx_ring,
1597 new_buff->
dma = old_buff->
dma;
1603 ixgbe_rx_bufsz(rx_ring),
1622 static bool ixgbe_add_rx_frag(
struct ixgbe_ring *rx_ring,
1627 struct page *page = rx_buffer->
page;
1629 #if (PAGE_SIZE < 8192)
1630 unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
1634 ixgbe_rx_bufsz(rx_ring);
1640 memcpy(__skb_put(skb, size), va,
ALIGN(size,
sizeof(
long)));
1658 #if (PAGE_SIZE < 8192)
1660 if (
unlikely(page_count(page) != 1))
1694 page = rx_buffer->
page;
1697 skb = rx_buffer->
skb;
1705 #if L1_CACHE_BYTES < 128
1710 skb = netdev_alloc_skb_ip_align(rx_ring->
netdev,
1713 rx_ring->
rx_stats.alloc_rx_buff_failed++;
1736 ixgbe_dma_sync_frag(rx_ring, skb);
1743 ixgbe_rx_bufsz(rx_ring),
1748 if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
1750 ixgbe_reuse_rx_page(rx_ring, rx_buffer);
1751 }
else if (
IXGBE_CB(skb)->dma == rx_buffer->
dma) {
1753 IXGBE_CB(skb)->page_released =
true;
1786 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1790 unsigned int mss = 0;
1792 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
1806 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
1817 skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
1826 if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
1830 if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
1834 total_rx_bytes += skb->
len;
1838 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
1842 if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
1845 if (ddp_bytes > 0) {
1847 mss = rx_ring->
netdev->mtu -
1854 total_rx_bytes += ddp_bytes;
1865 ixgbe_rx_skb(q_vector, skb);
1869 }
while (
likely(budget));
1871 u64_stats_update_begin(&rx_ring->
syncp);
1872 rx_ring->
stats.packets += total_rx_packets;
1873 rx_ring->
stats.bytes += total_rx_bytes;
1874 u64_stats_update_end(&rx_ring->
syncp);
1875 q_vector->rx.total_packets += total_rx_packets;
1876 q_vector->rx.total_bytes += total_rx_bytes;
1891 static void ixgbe_configure_msix(
struct ixgbe_adapter *adapter)
1899 u32 eitrsel = (1 << (adapter->
num_vfs - 32)) - 1;
1909 q_vector = adapter->
q_vector[v_idx];
1912 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1915 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
1917 if (q_vector->
tx.ring && !q_vector->
rx.ring) {
1919 if (adapter->tx_itr_setting == 1)
1922 q_vector->
itr = adapter->tx_itr_setting;
1925 if (adapter->rx_itr_setting == 1)
1928 q_vector->
itr = adapter->rx_itr_setting;
1934 switch (adapter->hw.mac.type) {
1941 ixgbe_set_ivar(adapter, -1, 1, v_idx);
1986 u8 itr_setting = ring_container->
itr;
1997 timepassed_us = q_vector->
itr >> 2;
1998 bytes_perint = bytes / timepassed_us;
2000 switch (itr_setting) {
2002 if (bytes_perint > 10)
2006 if (bytes_perint > 20)
2008 else if (bytes_perint <= 10)
2012 if (bytes_perint <= 20)
2022 ring_container->
itr = itr_setting;
2037 int v_idx = q_vector->
v_idx;
2040 switch (adapter->
hw.mac.type) {
2043 itr_reg |= (itr_reg << 16);
2061 u32 new_itr = q_vector->
itr;
2064 ixgbe_update_itr(q_vector, &q_vector->
tx);
2065 ixgbe_update_itr(q_vector, &q_vector->rx);
2067 current_itr =
max(q_vector->rx.
itr, q_vector->
tx.itr);
2069 switch (current_itr) {
2084 if (new_itr != q_vector->
itr) {
2086 new_itr = (10 * new_itr * q_vector->
itr) /
2087 ((9 * new_itr) + q_vector->
itr);
2090 q_vector->
itr = new_itr;
2100 static void ixgbe_check_overtemp_subtask(
struct ixgbe_adapter *adapter)
2127 if (!(eicr & IXGBE_EICR_LSC) && hw->
mac.ops.check_link) {
2131 hw->
mac.ops.check_link(hw, &autoneg, &link_up,
false);
2143 if (!(eicr & IXGBE_EICR_GPI_SDP0))
2148 "Network adapter has been stopped because it has over heated. "
2149 "Restart the computer. If the problem persists, "
2150 "power off the system and replace the adapter\n");
2155 static void ixgbe_check_fan_failure(
struct ixgbe_adapter *adapter,
u32 eicr)
2161 e_crit(probe,
"Fan has stopped, replace the adapter\n");
2167 static void ixgbe_check_overtemp_event(
struct ixgbe_adapter *adapter,
u32 eicr)
2172 switch (adapter->
hw.mac.type) {
2178 if (((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)) &&
2182 ixgbe_service_event_schedule(adapter);
2195 "Network adapter has been stopped because it has over heated. "
2196 "Restart the computer. If the problem persists, "
2197 "power off the system and replace the adapter\n");
2209 ixgbe_service_event_schedule(adapter);
2218 ixgbe_service_event_schedule(adapter);
2233 ixgbe_service_event_schedule(adapter);
2237 static inline void ixgbe_irq_enable_queues(
struct ixgbe_adapter *adapter,
2243 switch (hw->
mac.type) {
2250 mask = (qmask & 0xFFFFFFFF);
2253 mask = (qmask >> 32);
2263 static inline void ixgbe_irq_disable_queues(
struct ixgbe_adapter *adapter,
2269 switch (hw->
mac.type) {
2276 mask = (qmask & 0xFFFFFFFF);
2279 mask = (qmask >> 32);
2293 static inline void ixgbe_irq_enable(
struct ixgbe_adapter *adapter,
bool queues,
2303 switch (adapter->
hw.mac.type) {
2315 switch (adapter->
hw.mac.type) {
2327 #ifdef CONFIG_IXGBE_PTP
2338 ixgbe_irq_enable_queues(adapter, ~0);
2343 static irqreturn_t ixgbe_msix_other(
int irq,
void *data)
2358 if (eicr & IXGBE_EICR_LSC)
2359 ixgbe_check_lsc(adapter);
2364 switch (hw->
mac.type) {
2368 e_info(
link,
"Received unrecoverable ECC Err, please "
2372 int reinit_count = 0;
2384 ixgbe_service_event_schedule(adapter);
2387 ixgbe_check_sfp_event(adapter, eicr);
2388 ixgbe_check_overtemp_event(adapter, eicr);
2394 ixgbe_check_fan_failure(adapter, eicr);
2396 #ifdef CONFIG_IXGBE_PTP
2403 ixgbe_irq_enable(adapter,
false,
false);
2408 static irqreturn_t ixgbe_msix_clean_rings(
int irq,
void *data)
2414 if (q_vector->rx.ring || q_vector->
tx.ring)
2415 napi_schedule(&q_vector->
napi);
2433 int per_ring_budget;
2434 bool clean_complete =
true;
2436 #ifdef CONFIG_IXGBE_DCA
2438 ixgbe_update_dca(q_vector);
2442 clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
2446 if (q_vector->rx.count > 1)
2447 per_ring_budget =
max(budget/q_vector->rx.count, 1);
2449 per_ring_budget = budget;
2452 clean_complete &= ixgbe_clean_rx_irq(q_vector, ring,
2456 if (!clean_complete)
2462 ixgbe_set_itr(q_vector);
2464 ixgbe_irq_enable_queues(adapter, ((
u64)1 << q_vector->
v_idx));
2476 static int ixgbe_request_msix_irqs(
struct ixgbe_adapter *adapter)
2482 for (vector = 0; vector < adapter->
num_q_vectors; vector++) {
2486 if (q_vector->
tx.ring && q_vector->rx.ring) {
2488 "%s-%s-%d", netdev->
name,
"TxRx", ri++);
2490 }
else if (q_vector->rx.ring) {
2492 "%s-%s-%d", netdev->
name,
"rx", ri++);
2493 }
else if (q_vector->
tx.ring) {
2495 "%s-%s-%d", netdev->
name,
"tx", ti++);
2500 err =
request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
2501 q_vector->
name, q_vector);
2503 e_err(probe,
"request_irq failed for MSIX interrupt "
2504 "Error: %d\n", err);
2505 goto free_queue_irqs;
2510 irq_set_affinity_hint(entry->vector,
2516 ixgbe_msix_other, 0, netdev->
name, adapter);
2518 e_err(probe,
"request_irq for msix_other failed: %d\n", err);
2519 goto free_queue_irqs;
2527 irq_set_affinity_hint(adapter->
msix_entries[vector].vector,
2544 static irqreturn_t ixgbe_intr(
int irq,
void *data)
2569 ixgbe_irq_enable(adapter,
true,
true);
2573 if (eicr & IXGBE_EICR_LSC)
2574 ixgbe_check_lsc(adapter);
2576 switch (hw->
mac.type) {
2578 ixgbe_check_sfp_event(adapter, eicr);
2581 if (eicr & IXGBE_EICR_ECC)
2582 e_info(
link,
"Received unrecoverable ECC err, please "
2584 ixgbe_check_overtemp_event(adapter, eicr);
2590 ixgbe_check_fan_failure(adapter, eicr);
2591 #ifdef CONFIG_IXGBE_PTP
2592 if (
unlikely(eicr & IXGBE_EICR_TIMESYNC))
2597 napi_schedule(&q_vector->
napi);
2604 ixgbe_irq_enable(adapter,
false,
false);
2622 err = ixgbe_request_msix_irqs(adapter);
2625 netdev->
name, adapter);
2628 netdev->
name, adapter);
2631 e_err(probe,
"request_irq failed, Error %d\n", err);
2645 for (vector = 0; vector < adapter->
num_q_vectors; vector++) {
2650 if (!q_vector->rx.ring && !q_vector->
tx.ring)
2654 irq_set_affinity_hint(entry->vector,
NULL);
2666 static inline void ixgbe_irq_disable(
struct ixgbe_adapter *adapter)
2668 switch (adapter->
hw.mac.type) {
2698 static void ixgbe_configure_msi_and_legacy(
struct ixgbe_adapter *adapter)
2710 ixgbe_set_ivar(adapter, 0, 0, 0);
2711 ixgbe_set_ivar(adapter, 1, 0, 0);
2713 e_info(hw,
"Legacy interrupt IVAR setup done\n");
2754 txdctl |= (1 << 16);
2756 txdctl |= (8 << 16);
2762 txdctl |= (1 << 8) |
2790 e_err(drv,
"Could not enable Tx Queue %d\n", reg_idx);
2797 u8 tcs = netdev_get_num_tc(adapter->
netdev);
2847 static void ixgbe_configure_tx(
struct ixgbe_adapter *adapter)
2853 ixgbe_setup_mtqc(adapter);
2867 static void ixgbe_enable_rx_drop(
struct ixgbe_adapter *adapter,
2879 static void ixgbe_disable_rx_drop(
struct ixgbe_adapter *adapter,
2891 #ifdef CONFIG_IXGBE_DCB
2894 static void ixgbe_set_rx_drop_en(
struct ixgbe_adapter *adapter)
2898 bool pfc_en = adapter->
dcb_cfg.pfc_mode_enable;
2915 ixgbe_enable_rx_drop(adapter, adapter->
rx_ring[i]);
2918 ixgbe_disable_rx_drop(adapter, adapter->
rx_ring[i]);
2922 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2924 static void ixgbe_configure_srrctl(
struct ixgbe_adapter *adapter,
2956 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
2957 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
2958 0x6A3E67EA, 0x14364D17, 0x3BED200D};
2959 u32 mrqc = 0, reta = 0;
2973 for (i = 0; i < 10; i++)
2977 for (i = 0, j = 0; i < 128; i++, j++) {
2982 reta = (reta << 8) | (j * 0x11);
2996 u8 tcs = netdev_get_num_tc(adapter->
netdev);
3036 static void ixgbe_configure_rscctl(
struct ixgbe_adapter *adapter,
3057 #define IXGBE_MAX_RX_DESC_POLL 10
3058 static void ixgbe_rx_desc_queue_enable(
struct ixgbe_adapter *adapter,
3077 e_err(drv,
"RXDCTL.ENABLE on Rx queue %d not set within "
3078 "the polling period\n", reg_idx);
3107 e_err(drv,
"RXDCTL.ENABLE on Rx queue %d not cleared within "
3108 "the polling period\n", reg_idx);
3132 ixgbe_configure_srrctl(adapter, ring);
3133 ixgbe_configure_rscctl(adapter, ring);
3151 rxdctl &= ~0x3FFFFF;
3159 ixgbe_rx_desc_queue_enable(adapter, ring);
3163 static void ixgbe_setup_psrtype(
struct ixgbe_adapter *adapter)
3189 static void ixgbe_configure_virtualization(
struct ixgbe_adapter *adapter)
3192 u32 reg_offset, vf_shift;
3193 u32 gcr_ext, vmdctl;
3206 vf_shift =
VMDQ_P(0) % 32;
3207 reg_offset = (
VMDQ_P(0) >= 32) ? 1 : 0;
3241 hw->
mac.ops.set_mac_anti_spoofing(hw, (adapter->
num_vfs != 0),
3244 for (i = 0; i < adapter->
num_vfs; i++) {
3245 if (!adapter->
vfinfo[i].spoofchk_enabled)
3250 static void ixgbe_set_rx_buffer_len(
struct ixgbe_adapter *adapter)
3262 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
3263 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3295 static void ixgbe_setup_rdrxctl(
struct ixgbe_adapter *adapter)
3300 switch (hw->
mac.type) {
3338 static void ixgbe_configure_rx(
struct ixgbe_adapter *adapter)
3348 ixgbe_setup_psrtype(adapter);
3349 ixgbe_setup_rdrxctl(adapter);
3352 ixgbe_setup_mrqc(adapter);
3355 ixgbe_set_rx_buffer_len(adapter);
3370 hw->
mac.ops.enable_rx_dma(hw, rxctrl);
3379 hw->
mac.ops.set_vfta(&adapter->
hw, vid,
VMDQ_P(0),
true);
3385 static int ixgbe_vlan_rx_kill_vid(
struct net_device *netdev,
u16 vid)
3391 hw->
mac.ops.set_vfta(&adapter->
hw, vid,
VMDQ_P(0),
false);
3401 static void ixgbe_vlan_filter_disable(
struct ixgbe_adapter *adapter)
3415 static void ixgbe_vlan_filter_enable(
struct ixgbe_adapter *adapter)
3430 static void ixgbe_vlan_strip_disable(
struct ixgbe_adapter *adapter)
3436 switch (hw->
mac.type) {
3460 static void ixgbe_vlan_strip_enable(
struct ixgbe_adapter *adapter)
3466 switch (hw->
mac.type) {
3486 static void ixgbe_restore_vlan(
struct ixgbe_adapter *adapter)
3490 ixgbe_vlan_rx_add_vid(adapter->
netdev, 0);
3493 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
3509 unsigned int rar_entries = hw->
mac.num_rar_entries - 1;
3523 if (!hw->
mac.ops.set_rar)
3529 hw->
mac.ops.set_rar(hw, rar_entries--, ha->
addr,
3535 for (; rar_entries > 0 ; rar_entries--)
3536 hw->
mac.ops.clear_rar(hw, rar_entries);
3575 ixgbe_vlan_filter_disable(adapter);
3586 hw->
mac.ops.update_mc_addr_list(hw, netdev);
3589 ixgbe_vlan_filter_enable(adapter);
3598 count = ixgbe_write_uc_addr_list(netdev);
3629 ixgbe_vlan_strip_enable(adapter);
3631 ixgbe_vlan_strip_disable(adapter);
3634 static void ixgbe_napi_enable_all(
struct ixgbe_adapter *adapter)
3639 napi_enable(&adapter->
q_vector[q_idx]->napi);
3642 static void ixgbe_napi_disable_all(
struct ixgbe_adapter *adapter)
3647 napi_disable(&adapter->
q_vector[q_idx]->napi);
3650 #ifdef CONFIG_IXGBE_DCB
3659 static void ixgbe_configure_dcb(
struct ixgbe_adapter *adapter)
3666 netif_set_gso_max_size(adapter->
netdev, 65536);
3671 netif_set_gso_max_size(adapter->
netdev, 32768);
3675 max_frame =
max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
3711 #define IXGBE_ETH_FRAMING 20
3719 static int ixgbe_hpbthresh(
struct ixgbe_adapter *adapter,
int pb)
3732 (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
3734 tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3738 switch (hw->
mac.type) {
3755 marker = rx_pba -
kb;
3762 e_warn(drv,
"Packet Buffer(%i) can not provide enough"
3763 "headroom to support flow control."
3764 "Decrease MTU or number of traffic classes\n", pb);
3788 switch (hw->
mac.type) {
3804 static void ixgbe_pbthresh_setup(
struct ixgbe_adapter *adapter)
3807 int num_tc = netdev_get_num_tc(adapter->
netdev);
3813 hw->
fc.low_water = ixgbe_lpbthresh(adapter);
3815 for (i = 0; i < num_tc; i++) {
3816 hw->
fc.high_water[
i] = ixgbe_hpbthresh(adapter, i);
3819 if (hw->
fc.low_water > hw->
fc.high_water[i])
3820 hw->
fc.low_water = 0;
3824 static void ixgbe_configure_pb(
struct ixgbe_adapter *adapter)
3828 u8 tc = netdev_get_num_tc(adapter->
netdev);
3837 ixgbe_pbthresh_setup(adapter);
3840 static void ixgbe_fdir_filter_restore(
struct ixgbe_adapter *adapter)
3868 ixgbe_configure_pb(adapter);
3869 #ifdef CONFIG_IXGBE_DCB
3870 ixgbe_configure_dcb(adapter);
3876 ixgbe_configure_virtualization(adapter);
3879 ixgbe_restore_vlan(adapter);
3881 switch (hw->
mac.type) {
3884 hw->
mac.ops.disable_rx_buff(hw);
3896 ixgbe_fdir_filter_restore(adapter);
3899 switch (hw->
mac.type) {
3902 hw->
mac.ops.enable_rx_buff(hw);
3913 ixgbe_configure_tx(adapter);
3914 ixgbe_configure_rx(adapter);
3917 static inline bool ixgbe_is_sfp(
struct ixgbe_hw *hw)
3919 switch (hw->
phy.type) {
3941 static void ixgbe_sfp_link_config(
struct ixgbe_adapter *adapter)
3961 static int ixgbe_non_sfp_link_config(
struct ixgbe_hw *hw)
3964 bool negotiation, link_up =
false;
3967 if (hw->
mac.ops.check_link)
3968 ret = hw->
mac.ops.check_link(hw, &autoneg, &link_up,
false);
3973 autoneg = hw->
phy.autoneg_advertised;
3974 if ((!autoneg) && (hw->
mac.ops.get_link_capabilities))
3975 ret = hw->
mac.ops.get_link_capabilities(hw, &autoneg,
3980 if (hw->
mac.ops.setup_link)
3981 ret = hw->
mac.ops.setup_link(hw, autoneg, negotiation, link_up);
3999 switch (hw->
mac.type) {
4037 switch (adapter->
hw.mac.type) {
4061 static void ixgbe_up_complete(
struct ixgbe_adapter *adapter)
4067 ixgbe_get_hw_control(adapter);
4068 ixgbe_setup_gpie(adapter);
4071 ixgbe_configure_msix(adapter);
4073 ixgbe_configure_msi_and_legacy(adapter);
4076 if (hw->
mac.ops.enable_tx_laser &&
4077 ((hw->
phy.multispeed_fiber) ||
4080 hw->
mac.ops.enable_tx_laser(hw);
4083 ixgbe_napi_enable_all(adapter);
4085 if (ixgbe_is_sfp(hw)) {
4086 ixgbe_sfp_link_config(adapter);
4088 err = ixgbe_non_sfp_link_config(hw);
4090 e_err(probe,
"link_config FAILED %d\n", err);
4095 ixgbe_irq_enable(adapter,
true,
true);
4104 e_crit(drv,
"Fan has stopped, replace the adapter\n");
4108 netif_tx_start_all_queues(adapter->
netdev);
4146 ixgbe_configure(adapter);
4148 ixgbe_up_complete(adapter);
4165 err = hw->
mac.ops.init_hw(hw);
4172 e_dev_err(
"master disable timed out\n");
4176 e_dev_warn(
"This device is a pre-production adapter/LOM. "
4177 "Please be aware there may be issues associated with "
4178 "your hardware. If you are experiencing problems "
4179 "please contact your Intel or hardware "
4180 "representative who provided you with this "
4193 if (hw->
mac.san_mac_rar_index)
4194 hw->
mac.ops.set_vmdq_san_mac(hw,
VMDQ_P(0));
4201 static void ixgbe_clean_rx_ring(
struct ixgbe_ring *rx_ring)
4212 for (i = 0; i < rx_ring->
count; i++) {
4216 if (rx_buffer->
skb) {
4218 if (
IXGBE_CB(skb)->page_released) {
4221 ixgbe_rx_bufsz(rx_ring),
4223 IXGBE_CB(skb)->page_released =
false;
4233 if (rx_buffer->
page)
4235 ixgbe_rx_pg_order(rx_ring));
4254 static void ixgbe_clean_tx_ring(
struct ixgbe_ring *tx_ring)
4265 for (i = 0; i < tx_ring->
count; i++) {
4270 netdev_tx_reset_queue(txring_txq(tx_ring));
4286 static void ixgbe_clean_all_rx_rings(
struct ixgbe_adapter *adapter)
4291 ixgbe_clean_rx_ring(adapter->
rx_ring[i]);
4298 static void ixgbe_clean_all_tx_rings(
struct ixgbe_adapter *adapter)
4303 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
4306 static void ixgbe_fdir_filter_exit(
struct ixgbe_adapter *adapter)
4344 netif_tx_stop_all_queues(netdev);
4348 netif_tx_disable(netdev);
4350 ixgbe_irq_disable(adapter);
4352 ixgbe_napi_disable_all(adapter);
4365 for (i = 0 ; i < adapter->
num_vfs; i++)
4366 adapter->
vfinfo[i].clear_to_send =
false;
4377 u8 reg_idx = adapter->tx_ring[
i]->reg_idx;
4382 switch (hw->
mac.type) {
4393 if (!pci_channel_offline(adapter->
pdev))
4397 if (hw->
mac.ops.disable_tx_laser &&
4398 ((hw->
phy.multispeed_fiber) ||
4401 hw->
mac.ops.disable_tx_laser(hw);
4403 ixgbe_clean_all_tx_rings(adapter);
4404 ixgbe_clean_all_rx_rings(adapter);
4406 #ifdef CONFIG_IXGBE_DCA
4408 ixgbe_setup_dca(adapter);
4416 static void ixgbe_tx_timeout(
struct net_device *netdev)
4421 ixgbe_tx_timeout_reset(adapter);
4437 #ifdef CONFIG_IXGBE_DCB
4453 switch (hw->
mac.type) {
4475 #ifdef CONFIG_IXGBE_DCB
4493 #ifdef CONFIG_IXGBE_DCB
4494 switch (hw->
mac.type) {
4507 tc = &adapter->
dcb_cfg.tc_config[
j];
4516 tc = &adapter->
dcb_cfg.tc_config[0];
4522 adapter->
dcb_cfg.pfc_mode_enable =
false;
4533 ixgbe_pbthresh_setup(adapter);
4535 hw->
fc.send_xon =
true;
4536 hw->
fc.disable_fc_autoneg =
false;
4538 #ifdef CONFIG_PCI_IOV
4541 adapter->
num_vfs = (max_vfs > 63) ? 0 : max_vfs;
4557 e_dev_err(
"EEPROM initialization failed\n");
4575 int orig_node = dev_to_node(dev);
4582 numa_node = tx_ring->
q_vector->numa_node;
4594 set_dev_node(dev, numa_node);
4599 set_dev_node(dev, orig_node);
4613 dev_err(dev,
"Unable to allocate memory for the Tx descriptor ring\n");
4627 static int ixgbe_setup_all_tx_resources(
struct ixgbe_adapter *adapter)
4636 e_err(probe,
"Allocation for Tx Queue %u failed\n", i);
4657 int orig_node = dev_to_node(dev);
4664 numa_node = rx_ring->
q_vector->numa_node;
4676 set_dev_node(dev, numa_node);
4681 set_dev_node(dev, orig_node);
4695 dev_err(dev,
"Unable to allocate memory for the Rx descriptor ring\n");
4709 static int ixgbe_setup_all_rx_resources(
struct ixgbe_adapter *adapter)
4718 e_err(probe,
"Allocation for Rx Queue %u failed\n", i);
4742 ixgbe_clean_tx_ring(tx_ring);
4763 static void ixgbe_free_all_tx_resources(
struct ixgbe_adapter *adapter)
4768 if (adapter->tx_ring[i]->desc)
4780 ixgbe_clean_rx_ring(rx_ring);
4801 static void ixgbe_free_all_rx_resources(
struct ixgbe_adapter *adapter)
4810 if (adapter->
rx_ring[i]->desc)
4821 static int ixgbe_change_mtu(
struct net_device *netdev,
int new_mtu)
4840 e_info(probe,
"changing MTU from %d to %d\n", netdev->
mtu, new_mtu);
4843 netdev->
mtu = new_mtu;
4845 if (netif_running(netdev))
4863 static int ixgbe_open(
struct net_device *netdev)
4875 err = ixgbe_setup_all_tx_resources(adapter);
4880 err = ixgbe_setup_all_rx_resources(adapter);
4884 ixgbe_configure(adapter);
4886 err = ixgbe_request_irq(adapter);
4895 goto err_set_queues;
4898 err = netif_set_real_num_rx_queues(netdev,
4902 goto err_set_queues;
4904 ixgbe_up_complete(adapter);
4909 ixgbe_free_irq(adapter);
4911 ixgbe_free_all_rx_resources(adapter);
4913 ixgbe_free_all_tx_resources(adapter);
4931 static int ixgbe_close(
struct net_device *netdev)
4936 ixgbe_free_irq(adapter);
4938 ixgbe_fdir_filter_exit(adapter);
4940 ixgbe_free_all_tx_resources(adapter);
4941 ixgbe_free_all_rx_resources(adapter);
4943 ixgbe_release_hw_control(adapter);
4949 static int ixgbe_resume(
struct pci_dev *pdev)
4965 e_dev_err(
"Cannot enable PCI device from suspend\n");
4978 if (!err && netif_running(netdev))
4979 err = ixgbe_open(netdev);
4992 static int __ixgbe_shutdown(
struct pci_dev *pdev,
bool *enable_wake)
5005 if (netif_running(netdev)) {
5008 ixgbe_free_irq(adapter);
5009 ixgbe_free_all_tx_resources(adapter);
5010 ixgbe_free_all_rx_resources(adapter);
5029 if (hw->
mac.ops.enable_tx_laser &&
5030 (hw->
phy.multispeed_fiber ||
5033 hw->
mac.ops.enable_tx_laser(hw);
5052 switch (hw->
mac.type) {
5064 *enable_wake = !!wufc;
5066 ixgbe_release_hw_control(adapter);
5079 retval = __ixgbe_shutdown(pdev, &wake);
5094 static void ixgbe_shutdown(
struct pci_dev *pdev)
5098 __ixgbe_shutdown(pdev, &wake);
5116 u32 i, missed_rx = 0,
mpc,
bprc, lxon, lxoff, xon_off_tot;
5117 u64 non_eop_descs = 0, restart_queue = 0,
tx_busy = 0;
5118 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
5129 rsc_count += adapter->
rx_ring[
i]->rx_stats.rsc_count;
5130 rsc_flush += adapter->
rx_ring[
i]->rx_stats.rsc_flush;
5138 non_eop_descs += rx_ring->
rx_stats.non_eop_descs;
5139 alloc_rx_page_failed += rx_ring->
rx_stats.alloc_rx_page_failed;
5140 alloc_rx_buff_failed += rx_ring->
rx_stats.alloc_rx_buff_failed;
5141 hw_csum_rx_error += rx_ring->
rx_stats.csum_err;
5142 bytes += rx_ring->
stats.bytes;
5157 restart_queue += tx_ring->
tx_stats.restart_queue;
5159 bytes += tx_ring->
stats.bytes;
5170 for (i = 0; i < 8; i++) {
5174 hwstats->
mpc[
i] += mpc;
5175 total_mpc += hwstats->
mpc[
i];
5178 switch (hw->
mac.type) {
5197 for (i = 0; i < 16; i++) {
5211 hwstats->
gprc -= missed_rx;
5213 ixgbe_update_xoff_received(adapter);
5216 switch (hw->
mac.type) {
5230 for (i = 0; i < 16; i++)
5250 if (adapter->fcoe.ddp_pool) {
5257 noddp += ddp_pool->
noddp;
5269 hwstats->
bprc += bprc;
5272 hwstats->
mprc -= bprc;
5290 xon_off_tot = lxon + lxoff;
5291 hwstats->
gptc -= xon_off_tot;
5292 hwstats->
mptc -= xon_off_tot;
5299 hwstats->
ptc64 -= xon_off_tot;
5308 netdev->
stats.multicast = hwstats->
mprc;
5312 netdev->
stats.rx_dropped = 0;
5313 netdev->
stats.rx_length_errors = hwstats->
rlec;
5315 netdev->
stats.rx_missed_errors = total_mpc;
5322 static void ixgbe_fdir_reinit_subtask(
struct ixgbe_adapter *adapter)
5345 &(adapter->tx_ring[i]->
state));
5349 e_err(probe,
"failed to finish FDIR re-initialization, "
5350 "ignored adding FDIR ATR filters\n");
5363 static void ixgbe_check_hang_subtask(
struct ixgbe_adapter *adapter)
5375 if (netif_carrier_ok(adapter->
netdev)) {
5392 if (qv->rx.ring || qv->
tx.ring)
5393 eics |= ((
u64)1 << i);
5398 ixgbe_irq_rearm_queues(adapter, eics);
5407 static void ixgbe_watchdog_update_link(
struct ixgbe_adapter *adapter)
5411 bool link_up = adapter->
link_up;
5412 bool pfc_en = adapter->
dcb_cfg.pfc_mode_enable;
5417 if (hw->
mac.ops.check_link) {
5418 hw->
mac.ops.check_link(hw, &link_speed, &link_up,
false);
5429 hw->
mac.ops.fc_enable(hw);
5430 ixgbe_set_rx_drop_en(adapter);
5450 static void ixgbe_watchdog_link_is_up(
struct ixgbe_adapter *adapter)
5455 bool flow_rx, flow_tx;
5458 if (netif_carrier_ok(netdev))
5463 switch (hw->
mac.type) {
5485 #ifdef CONFIG_IXGBE_PTP
5489 e_info(drv,
"NIC Link is Up %s, Flow Control: %s\n",
5497 ((flow_rx && flow_tx) ?
"RX/TX" :
5499 (flow_tx ?
"TX" :
"None"))));
5513 static void ixgbe_watchdog_link_is_down(
struct ixgbe_adapter *adapter)
5522 if (!netif_carrier_ok(netdev))
5529 #ifdef CONFIG_IXGBE_PTP
5533 e_info(drv,
"NIC Link is Down\n");
5544 static void ixgbe_watchdog_flush_tx(
struct ixgbe_adapter *adapter)
5547 int some_tx_pending = 0;
5549 if (!netif_carrier_ok(adapter->
netdev)) {
5553 some_tx_pending = 1;
5558 if (some_tx_pending) {
5569 static void ixgbe_spoof_check(
struct ixgbe_adapter *adapter)
5587 e_warn(drv,
"%u Spoofed packets detected\n", ssvpc);
5594 static void ixgbe_watchdog_subtask(
struct ixgbe_adapter *adapter)
5601 ixgbe_watchdog_update_link(adapter);
5604 ixgbe_watchdog_link_is_up(adapter);
5606 ixgbe_watchdog_link_is_down(adapter);
5608 ixgbe_spoof_check(adapter);
5611 ixgbe_watchdog_flush_tx(adapter);
5618 static void ixgbe_sfp_detection_subtask(
struct ixgbe_adapter *adapter)
5632 err = hw->
phy.ops.identify_sfp(hw);
5658 err = hw->
phy.ops.reset(hw);
5660 err = hw->
mac.ops.setup_sfp(hw);
5666 e_info(probe,
"detected SFP+: %d\n", hw->
phy.sfp_type);
5672 (adapter->
netdev->reg_state == NETREG_REGISTERED)) {
5673 e_dev_err(
"failed to initialize because an unsupported "
5674 "SFP+ module type was detected.\n");
5675 e_dev_err(
"Reload the driver after installing a "
5676 "supported module.\n");
5685 static void ixgbe_sfp_link_config_subtask(
struct ixgbe_adapter *adapter)
5700 autoneg = hw->
phy.autoneg_advertised;
5701 if ((!autoneg) && (hw->
mac.ops.get_link_capabilities))
5702 hw->
mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
5703 if (hw->
mac.ops.setup_link)
5704 hw->
mac.ops.setup_link(hw, autoneg, negotiation,
true);
5711 #ifdef CONFIG_PCI_IOV
5712 static void ixgbe_check_for_bad_vf(
struct ixgbe_adapter *adapter)
5730 for (vf = 0; vf < adapter->
num_vfs; vf++) {
5731 ciaa = (vf << 16) | 0x80000000;
5742 netdev_err(netdev,
"VF %d Hung DMA\n", vf);
5744 ciaa = (vf << 16) | 0x80000000;
5760 static void ixgbe_service_timer(
unsigned long data)
5763 unsigned long next_event_offset;
5768 next_event_offset =
HZ / 10;
5770 next_event_offset =
HZ * 2;
5772 #ifdef CONFIG_PCI_IOV
5779 goto normal_timer_service;
5782 ixgbe_check_for_bad_vf(adapter);
5783 next_event_offset =
HZ / 50;
5791 normal_timer_service:
5797 ixgbe_service_event_schedule(adapter);
5800 static void ixgbe_reset_subtask(
struct ixgbe_adapter *adapter)
5812 ixgbe_dump(adapter);
5813 netdev_err(adapter->
netdev,
"Reset adapter\n");
5829 ixgbe_reset_subtask(adapter);
5830 ixgbe_sfp_detection_subtask(adapter);
5831 ixgbe_sfp_link_config_subtask(adapter);
5832 ixgbe_check_overtemp_subtask(adapter);
5833 ixgbe_watchdog_subtask(adapter);
5834 ixgbe_fdir_reinit_subtask(adapter);
5835 ixgbe_check_hang_subtask(adapter);
5836 #ifdef CONFIG_IXGBE_PTP
5840 ixgbe_service_event_complete(adapter);
5843 static int ixgbe_tso(
struct ixgbe_ring *tx_ring,
5848 u32 vlan_macip_lens, type_tucmd;
5849 u32 mss_l4len_idx, l4len;
5851 if (!skb_is_gso(skb))
5854 if (skb_header_cloned(skb)) {
5864 struct iphdr *iph = ip_hdr(skb);
5875 }
else if (skb_is_gso_v6(skb)) {
5876 ipv6_hdr(skb)->payload_len = 0;
5877 tcp_hdr(skb)->check =
5879 &ipv6_hdr(skb)->
daddr,
5886 l4len = tcp_hdrlen(skb);
5887 *hdr_len = skb_transport_offset(skb) + l4len;
5890 first->
gso_segs = skb_shinfo(skb)->gso_segs;
5899 vlan_macip_lens = skb_network_header_len(skb);
5909 static void ixgbe_tx_csum(
struct ixgbe_ring *tx_ring,
5913 u32 vlan_macip_lens = 0;
5914 u32 mss_l4len_idx = 0;
5928 vlan_macip_lens |= skb_network_header_len(skb);
5930 l4_hdr = ip_hdr(skb)->protocol;
5933 vlan_macip_lens |= skb_network_header_len(skb);
5934 l4_hdr = ipv6_hdr(skb)->nexthdr;
5939 "partial checksum but proto=%x!\n",
5948 mss_l4len_idx = tcp_hdrlen(skb) <<
5953 mss_l4len_idx =
sizeof(
struct sctphdr) <<
5957 mss_l4len_idx =
sizeof(
struct udphdr) <<
5958 IXGBE_ADVTXD_L4LEN_SHIFT;
5963 "partial checksum but l4 proto=%x!\n",
5978 type_tucmd, mss_l4len_idx);
5981 static __le32 ixgbe_tx_cmd_type(
u32 tx_flags)
5991 #ifdef CONFIG_IXGBE_PTP
6012 u32 tx_flags,
unsigned int paylen)
6028 if (tx_flags & IXGBE_TX_FLAGS_TSO)
6043 tx_desc->
read.olinfo_status = olinfo_status;
6046 #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
6049 static void ixgbe_tx_map(
struct ixgbe_ring *tx_ring,
6059 unsigned int size = skb_headlen(skb);
6067 ixgbe_tx_olinfo_status(tx_desc, tx_flags, paylen);
6068 cmd_type = ixgbe_tx_cmd_type(tx_flags);
6093 tx_desc->
read.cmd_type_len =
6098 if (i == tx_ring->
count) {
6107 tx_desc->
read.olinfo_status = 0;
6117 if (i == tx_ring->
count) {
6123 size =
min_t(
unsigned int, data_len, skb_frag_size(frag));
6125 size = skb_frag_size(frag);
6129 dma = skb_frag_dma_map(tx_ring->
dev, frag, 0, size,
6139 tx_desc->
read.olinfo_status = 0;
6146 tx_desc->
read.cmd_type_len = cmd_type;
6148 netdev_tx_sent_queue(txring_txq(tx_ring), first->
bytecount);
6167 if (i == tx_ring->
count)
6177 dev_err(tx_ring->
dev,
"TX DMA map failed\n");
6183 if (tx_buffer == first)
6193 static void ixgbe_atr(
struct ixgbe_ring *ring,
6200 unsigned char *network;
6218 hdr.network = skb_network_header(first->
skb);
6227 th = tcp_hdr(first->
skb);
6263 common.
ip ^=
hdr.ipv4->saddr ^
hdr.ipv4->daddr;
6266 common.
ip ^=
hdr.ipv6->saddr.s6_addr32[0] ^
6267 hdr.ipv6->saddr.s6_addr32[1] ^
6268 hdr.ipv6->saddr.s6_addr32[2] ^
6269 hdr.ipv6->saddr.s6_addr32[3] ^
6270 hdr.ipv6->daddr.s6_addr32[0] ^
6271 hdr.ipv6->daddr.s6_addr32[1] ^
6272 hdr.ipv6->daddr.s6_addr32[2] ^
6273 hdr.ipv6->daddr.s6_addr32[3];
6281 static int __ixgbe_maybe_stop_tx(
struct ixgbe_ring *tx_ring,
u16 size)
6291 if (
likely(ixgbe_desc_unused(tx_ring) < size))
6300 static inline int ixgbe_maybe_stop_tx(
struct ixgbe_ring *tx_ring,
u16 size)
6302 if (
likely(ixgbe_desc_unused(tx_ring) >= size))
6304 return __ixgbe_maybe_stop_tx(tx_ring, size);
6310 int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
6313 __be16 protocol = vlan_get_protocol(skb);
6336 return skb_tx_hash(dev, skb);
6346 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
6360 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
6361 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6364 count += skb_shinfo(skb)->nr_frags;
6366 if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
6384 vhdr = skb_header_pointer(skb,
ETH_HLEN,
sizeof(_vhdr), &_vhdr);
6394 skb_tx_timestamp(skb);
6396 #ifdef CONFIG_IXGBE_PTP
6403 #ifdef CONFIG_PCI_IOV
6417 tx_flags |= (skb->
priority & 0x7) <<
6421 if (skb_header_cloned(skb) &&
6440 tso =
ixgbe_fso(tx_ring, first, &hdr_len);
6448 tso = ixgbe_tso(tx_ring, first, &hdr_len);
6452 ixgbe_tx_csum(tx_ring, first);
6456 ixgbe_atr(tx_ring, first);
6461 ixgbe_tx_map(tx_ring, first, hdr_len);
6501 static int ixgbe_set_mac(
struct net_device *netdev,
void *p)
6507 if (!is_valid_ether_addr(addr->
sa_data))
6519 ixgbe_mdio_read(
struct net_device *netdev,
int prtad,
int devad,
u16 addr)
6526 if (prtad != hw->
phy.mdio.prtad)
6528 rc = hw->
phy.ops.read_reg(hw, addr, devad, &value);
6534 static int ixgbe_mdio_write(
struct net_device *netdev,
int prtad,
int devad,
6540 if (prtad != hw->
phy.mdio.prtad)
6542 return hw->
phy.ops.write_reg(hw, addr, devad, value);
6550 #ifdef CONFIG_IXGBE_PTP
6566 static int ixgbe_add_sanmac_netdev(
struct net_device *dev)
6572 if (is_valid_ether_addr(hw->
mac.san_addr)) {
6578 hw->
mac.ops.set_vmdq_san_mac(hw,
VMDQ_P(0));
6590 static int ixgbe_del_sanmac_netdev(
struct net_device *dev)
6596 if (is_valid_ether_addr(mac->
san_addr)) {
6604 #ifdef CONFIG_NET_POLL_CONTROLLER
6610 static void ixgbe_netpoll(
struct net_device *netdev)
6622 ixgbe_msix_clean_rings(0, adapter->
q_vector[i]);
6624 ixgbe_intr(adapter->
pdev->irq, netdev);
6644 start = u64_stats_fetch_begin_bh(&ring->
syncp);
6645 packets = ring->
stats.packets;
6646 bytes = ring->
stats.bytes;
6647 }
while (u64_stats_fetch_retry_bh(&ring->
syncp, start));
6660 start = u64_stats_fetch_begin_bh(&ring->
syncp);
6661 packets = ring->
stats.packets;
6662 bytes = ring->
stats.bytes;
6663 }
while (u64_stats_fetch_retry_bh(&ring->
syncp, start));
6678 #ifdef CONFIG_IXGBE_DCB
6722 static void ixgbe_set_prio_tc_map(
struct ixgbe_adapter *adapter)
6737 netdev_set_prio_tc_map(dev, prio, tc);
6753 if (tc > adapter->
dcb_cfg.num_tcs.pg_tcs ||
6755 tc < MAX_TRAFFIC_CLASS))
6762 if (netif_running(dev))
6767 netdev_set_num_tc(dev, tc);
6768 ixgbe_set_prio_tc_map(adapter);
6777 netdev_reset_tc(dev);
6785 adapter->
dcb_cfg.pfc_mode_enable =
false;
6789 ixgbe_validate_rtr(adapter, tc);
6790 if (netif_running(dev))
6801 if (netif_running(netdev))
6823 static int ixgbe_set_features(
struct net_device *netdev,
6828 bool need_reset =
false;
6841 }
else if ((changed ^ features) & NETIF_F_LRO) {
6842 e_info(probe,
"rx-usecs set too low, "
6852 case NETIF_F_NTUPLE:
6872 if (netdev_get_num_tc(netdev) > 1)
6888 ixgbe_vlan_strip_enable(adapter);
6890 ixgbe_vlan_strip_disable(adapter);
6902 static int ixgbe_ndo_fdb_add(
struct ndmsg *ndm,
struct nlattr *
tb[],
6904 const unsigned char *addr,
6914 pr_info(
"%s: FDB only supports static addresses\n",
6919 if (is_unicast_ether_addr(addr)) {
6926 }
else if (is_multicast_ether_addr(addr)) {
6939 static int ixgbe_ndo_fdb_del(
struct ndmsg *ndm,
6941 const unsigned char *addr)
6947 pr_info(
"%s: FDB only supports static addresses\n",
6953 if (is_unicast_ether_addr(addr))
6955 else if (is_multicast_ether_addr(addr))
6964 static int ixgbe_ndo_fdb_dump(
struct sk_buff *skb,
6978 .ndo_open = ixgbe_open,
6979 .ndo_stop = ixgbe_close,
6980 .ndo_start_xmit = ixgbe_xmit_frame,
6981 .ndo_select_queue = ixgbe_select_queue,
6984 .ndo_set_mac_address = ixgbe_set_mac,
6985 .ndo_change_mtu = ixgbe_change_mtu,
6986 .ndo_tx_timeout = ixgbe_tx_timeout,
6987 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
6988 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
6989 .ndo_do_ioctl = ixgbe_ioctl,
6995 .ndo_get_stats64 = ixgbe_get_stats64,
6996 #ifdef CONFIG_IXGBE_DCB
6997 .ndo_setup_tc = ixgbe_setup_tc,
6999 #ifdef CONFIG_NET_POLL_CONTROLLER
7000 .ndo_poll_controller = ixgbe_netpoll,
7011 .ndo_set_features = ixgbe_set_features,
7012 .ndo_fix_features = ixgbe_fix_features,
7013 .ndo_fdb_add = ixgbe_ndo_fdb_add,
7014 .ndo_fdb_del = ixgbe_ndo_fdb_del,
7015 .ndo_fdb_dump = ixgbe_ndo_fdb_dump,
7033 int is_wol_supported = 0;
7035 switch (device_id) {
7038 switch (subdevice_id) {
7041 if (hw->
bus.func != 0)
7045 is_wol_supported = 1;
7052 is_wol_supported = 1;
7055 is_wol_supported = 1;
7062 (hw->
bus.func == 0))) {
7063 is_wol_supported = 1;
7068 return is_wol_supported;
7089 static int cards_found;
7090 int i,
err, pci_using_dac;
7093 unsigned int dcb_max = 0;
7122 "No usable DMA configuration, aborting\n");
7133 "pci_request_selected_regions failed 0x%x\n", err);
7142 #ifdef CONFIG_IXGBE_DCB
7144 dcb_max =
min_t(
unsigned int, indices * MAX_TRAFFIC_CLASS,
7147 dcb_max =
min_t(
unsigned int, indices * MAX_TRAFFIC_CLASS,
7158 IXGBE_MAX_FCOE_INDICES);
7160 indices =
max_t(
unsigned int, dcb_max, indices);
7161 netdev = alloc_etherdev_mq(
sizeof(
struct ixgbe_adapter), indices);
7164 goto err_alloc_etherdev;
7169 adapter = netdev_priv(netdev);
7170 pci_set_drvdata(pdev, adapter);
7172 adapter->
netdev = netdev;
7173 adapter->
pdev = pdev;
7200 if (!(eec & (1 << 8)))
7208 hw->
phy.mdio.mmds = 0;
7210 hw->
phy.mdio.dev = netdev;
7211 hw->
phy.mdio.mdio_read = ixgbe_mdio_read;
7212 hw->
phy.mdio.mdio_write = ixgbe_mdio_write;
7217 err = ixgbe_sw_init(adapter);
7222 switch (adapter->
hw.mac.type) {
7238 e_crit(probe,
"Fan has stopped, replace the adapter\n");
7241 if (allow_unsupported_sfp)
7245 hw->
phy.reset_if_overtemp =
true;
7246 err = hw->
mac.ops.reset_hw(hw);
7247 hw->
phy.reset_if_overtemp =
false;
7252 e_dev_err(
"failed to load because an unsupported SFP+ "
7253 "module type was detected.\n");
7254 e_dev_err(
"Reload the driver after installing a supported "
7262 #ifdef CONFIG_PCI_IOV
7263 ixgbe_enable_sriov(adapter, ii);
7270 NETIF_F_HW_VLAN_RX |
7279 switch (adapter->
hw.mac.type) {
7301 #ifdef CONFIG_IXGBE_DCB
7307 if (hw->
mac.ops.get_device_caps) {
7308 hw->
mac.ops.get_device_caps(hw, &device_caps);
7323 if (pci_using_dac) {
7334 if (hw->
eeprom.ops.validate_checksum(hw,
NULL) < 0) {
7335 e_dev_err(
"The EEPROM Checksum Is Not Valid\n");
7350 (
unsigned long) adapter);
7367 #ifdef CONFIG_IXGBE_PTP
7376 hw->
mac.ops.get_bus_info(hw);
7393 e_dev_info(
"MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
7394 hw->
mac.type, hw->
phy.type, hw->
phy.sfp_type,
7398 hw->
mac.type, hw->
phy.type, part_str);
7401 e_dev_warn(
"PCI-Express bandwidth available for this card is "
7402 "not sufficient for optimal performance.\n");
7403 e_dev_warn(
"For optimal performance a x8 PCI-Express slot "
7408 err = hw->
mac.ops.start_hw(hw);
7411 e_dev_warn(
"This device is a pre-production adapter/LOM. "
7412 "Please be aware there may be issues associated "
7413 "with your hardware. If you are experiencing "
7414 "problems please contact your Intel or hardware "
7415 "representative who provided you with this "
7424 if (hw->
mac.ops.disable_tx_laser &&
7425 ((hw->
phy.multispeed_fiber) ||
7428 hw->
mac.ops.disable_tx_laser(hw);
7433 #ifdef CONFIG_IXGBE_DCA
7436 ixgbe_setup_dca(adapter);
7440 e_info(probe,
"IOV is enabled with %d VFs\n", adapter->
num_vfs);
7441 for (i = 0; i < adapter->
num_vfs; i++)
7448 if (hw->
mac.ops.set_fw_drv_ver)
7449 hw->
mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF,
7453 ixgbe_add_sanmac_netdev(netdev);
7455 e_dev_info(
"%s\n", ixgbe_default_device_descr);
7458 #ifdef CONFIG_IXGBE_HWMON
7460 e_err(probe,
"failed to allocate sysfs resources\n");
7463 #ifdef CONFIG_DEBUG_FS
7464 ixgbe_dbg_adapter_init(adapter);
7470 ixgbe_release_hw_control(adapter);
7501 #ifdef CONFIG_DEBUG_FS
7502 ixgbe_dbg_adapter_exit(adapter);
7508 #ifdef CONFIG_IXGBE_PTP
7512 #ifdef CONFIG_IXGBE_DCA
7520 #ifdef CONFIG_IXGBE_HWMON
7525 ixgbe_del_sanmac_netdev(netdev);
7534 ixgbe_release_hw_control(adapter);
7568 #ifdef CONFIG_PCI_IOV
7570 u32 dw0, dw1, dw2, dw3;
7576 goto skip_bad_vf_detection;
7578 bdev = pdev->
bus->self;
7580 bdev = bdev->
bus->self;
7583 goto skip_bad_vf_detection;
7587 goto skip_bad_vf_detection;
7596 if (!(req_id & 0x0080))
7597 goto skip_bad_vf_detection;
7599 pf_func = req_id & 0x01;
7600 if ((pf_func & 1) == (pdev->
devfn & 1)) {
7603 vf = (req_id & 0x7F) >> 1;
7604 e_dev_err(
"VF %d has caused a PCIe error\n", vf);
7605 e_dev_err(
"TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
7606 "%8.8x\tdw3: %8.8x\n",
7607 dw0, dw1, dw2, dw3);
7608 switch (adapter->
hw.mac.type) {
7623 if (vfdev->
devfn == (req_id & 0xFF))
7634 e_dev_err(
"Issuing VFLR to VF %d\n", vf);
7635 pci_write_config_dword(vfdev, 0xA8, 0x00008000);
7651 skip_bad_vf_detection:
7658 if (netif_running(netdev))
7679 e_err(probe,
"Cannot re-enable PCI device after reset.\n");
7695 e_dev_err(
"pci_cleanup_aer_uncorrect_error_status "
7696 "failed 0x%0x\n", err);
7710 static void ixgbe_io_resume(
struct pci_dev *pdev)
7715 #ifdef CONFIG_PCI_IOV
7717 e_info(drv,
"Resuming after VF err\n");
7723 if (netif_running(netdev))
7730 .error_detected = ixgbe_io_error_detected,
7731 .slot_reset = ixgbe_io_slot_reset,
7732 .resume = ixgbe_io_resume,
7737 .id_table = ixgbe_pci_tbl,
7738 .probe = ixgbe_probe,
7741 .suspend = ixgbe_suspend,
7742 .resume = ixgbe_resume,
7744 .shutdown = ixgbe_shutdown,
7745 .err_handler = &ixgbe_err_handler
7754 static int __init ixgbe_init_module(
void)
7758 pr_info(
"%s\n", ixgbe_copyright);
7760 #ifdef CONFIG_DEBUG_FS
7764 #ifdef CONFIG_IXGBE_DCA
7768 ret = pci_register_driver(&ixgbe_driver);
7780 static void __exit ixgbe_exit_module(
void)
7782 #ifdef CONFIG_IXGBE_DCA
7787 #ifdef CONFIG_DEBUG_FS
7794 #ifdef CONFIG_IXGBE_DCA
7801 __ixgbe_notify_dca);
7803 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;