28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 #include <linux/module.h>
31 #include <linux/types.h>
33 #include <linux/bitops.h>
36 #include <linux/netdevice.h>
37 #include <linux/ipv6.h>
38 #include <linux/slab.h>
42 #include <linux/mii.h>
43 #include <linux/ethtool.h>
45 #include <linux/if_vlan.h>
46 #include <linux/pci.h>
51 #include <linux/tcp.h>
53 #include <linux/if_ether.h>
55 #include <linux/prefetch.h>
65 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
66 __stringify(BUILD) "-k"
69 static const char igb_driver_string[] =
70 "Intel(R) Gigabit Ethernet Network Driver";
71 static const char igb_copyright[] =
"Copyright (c) 2007-2012 Intel Corporation.";
73 static const struct e1000_info *igb_info_tbl[] = {
115 static int igb_setup_all_tx_resources(
struct igb_adapter *);
116 static int igb_setup_all_rx_resources(
struct igb_adapter *);
117 static void igb_free_all_tx_resources(
struct igb_adapter *);
118 static void igb_free_all_rx_resources(
struct igb_adapter *);
125 static void igb_configure_tx(
struct igb_adapter *);
126 static void igb_configure_rx(
struct igb_adapter *);
127 static void igb_clean_all_tx_rings(
struct igb_adapter *);
128 static void igb_clean_all_rx_rings(
struct igb_adapter *);
129 static void igb_clean_tx_ring(
struct igb_ring *);
130 static void igb_clean_rx_ring(
struct igb_ring *);
131 static void igb_set_rx_mode(
struct net_device *);
132 static void igb_update_phy_info(
unsigned long);
133 static void igb_watchdog(
unsigned long);
134 static void igb_watchdog_task(
struct work_struct *);
138 static int igb_change_mtu(
struct net_device *,
int);
139 static int igb_set_mac(
struct net_device *,
void *);
143 static irqreturn_t igb_msix_other(
int irq,
void *);
145 #ifdef CONFIG_IGB_DCA
151 static bool igb_clean_rx_irq(
struct igb_q_vector *,
int);
153 static void igb_tx_timeout(
struct net_device *);
158 static void igb_restore_vlan(
struct igb_adapter *);
160 static void igb_ping_all_vfs(
struct igb_adapter *);
163 static int igb_set_vf_mac(
struct igb_adapter *,
int,
unsigned char *);
166 static int igb_ndo_set_vf_vlan(
struct net_device *netdev,
168 static int igb_ndo_set_vf_bw(
struct net_device *netdev,
int vf,
int tx_rate);
169 static int igb_ndo_get_vf_config(
struct net_device *netdev,
int vf,
171 static void igb_check_vf_rate_limit(
struct igb_adapter *);
173 #ifdef CONFIG_PCI_IOV
179 #ifdef CONFIG_PM_SLEEP
180 static int igb_suspend(
struct device *);
182 static int igb_resume(
struct device *);
183 #ifdef CONFIG_PM_RUNTIME
184 static int igb_runtime_suspend(
struct device *
dev);
185 static int igb_runtime_resume(
struct device *
dev);
186 static int igb_runtime_idle(
struct device *
dev);
194 static void igb_shutdown(
struct pci_dev *);
195 #ifdef CONFIG_IGB_DCA
196 static int igb_notify_dca(
struct notifier_block *,
unsigned long,
void *);
203 #ifdef CONFIG_NET_POLL_CONTROLLER
207 #ifdef CONFIG_PCI_IOV
208 static unsigned int max_vfs = 0;
210 MODULE_PARM_DESC(max_vfs,
"Maximum number of virtual functions to allocate "
211 "per physical function");
217 static void igb_io_resume(
struct pci_dev *);
220 .error_detected = igb_io_error_detected,
221 .slot_reset = igb_io_slot_reset,
222 .resume = igb_io_resume,
229 .id_table = igb_pci_tbl,
233 .driver.pm = &igb_pm_ops,
235 .shutdown = igb_shutdown,
236 .err_handler = &igb_err_handler
244 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
245 static int debug = -1;
299 switch (reginfo->
ofs) {
301 for (n = 0; n < 4; n++)
305 for (n = 0; n < 4; n++)
309 for (n = 0; n < 4; n++)
313 for (n = 0; n < 4; n++)
317 for (n = 0; n < 4; n++)
321 for (n = 0; n < 4; n++)
325 for (n = 0; n < 4; n++)
329 for (n = 0; n < 4; n++)
333 for (n = 0; n < 4; n++)
337 for (n = 0; n < 4; n++)
341 for (n = 0; n < 4; n++)
345 for (n = 0; n < 4; n++)
354 pr_info(
"%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
380 pr_info(
"Device Name state trans_start "
382 pr_info(
"%-15s %016lX %016lX %016lX\n", netdev->
name,
388 pr_info(
" Register Name Value\n");
390 reginfo->
name; reginfo++) {
391 igb_regdump(hw, reginfo);
395 if (!netdev || !netif_running(netdev))
399 pr_info(
"Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
404 pr_info(
" %5d %5X %5X %016llX %04X %p %016llX\n",
414 goto rx_ring_summary;
431 pr_info(
"------------------------------------\n");
433 pr_info(
"------------------------------------\n");
434 pr_info(
"T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
435 "[bi->dma ] leng ntw timestamp "
438 for (i = 0; tx_ring->
desc && (i < tx_ring->
count); i++) {
443 u0 = (
struct my_u0 *)tx_desc;
445 i == tx_ring->next_to_clean)
446 next_desc =
" NTC/U";
449 else if (i == tx_ring->next_to_clean)
454 pr_info(
"T [0x%03X] %016llX %016llX %016llX"
455 " %04X %p %016llX %p%s\n", i,
462 buffer_info->
skb, next_desc);
467 16, 1, buffer_info->
skb->data,
476 pr_info(
"Queue [NTU] [NTC]\n");
512 pr_info(
"------------------------------------\n");
514 pr_info(
"------------------------------------\n");
515 pr_info(
"R [desc] [ PktBuf A0] [ HeadBuf DD] "
516 "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
517 pr_info(
"RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
518 "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
520 for (i = 0; i < rx_ring->
count; i++) {
525 u0 = (
struct my_u0 *)rx_desc;
530 else if (i == rx_ring->next_to_clean)
537 pr_info(
"%s[0x%03X] %016llX %016llX -------"
538 "--------- %p%s\n",
"RWB", i,
541 buffer_info->
skb, next_desc);
543 pr_info(
"%s[0x%03X] %016llX %016llX %016llX"
548 buffer_info->
skb, next_desc);
551 buffer_info->
dma && buffer_info->
skb) {
554 16, 1, buffer_info->
skb->data,
587 static int __init igb_init_module(
void)
593 pr_info(
"%s\n", igb_copyright);
595 #ifdef CONFIG_IGB_DCA
598 ret = pci_register_driver(&igb_driver);
610 static void __exit igb_exit_module(
void)
612 #ifdef CONFIG_IGB_DCA
620 #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
628 static void igb_cache_ring_register(
struct igb_adapter *adapter)
633 switch (adapter->
hw.mac.type) {
642 adapter->
rx_ring[i]->reg_idx = rbase_offset +
652 adapter->
rx_ring[i]->reg_idx = rbase_offset + i;
654 adapter->
tx_ring[
j]->reg_idx = rbase_offset +
j;
659 static void igb_free_queues(
struct igb_adapter *adapter)
682 static int igb_alloc_queues(
struct igb_adapter *adapter)
693 ring->
dev = &adapter->
pdev->dev;
707 ring->
dev = &adapter->
pdev->dev;
723 igb_cache_ring_register(adapter);
728 igb_free_queues(adapter);
745 static void igb_write_ivar(
struct e1000_hw *hw,
int msix_vector,
751 ivar &= ~((
u32)0xFF << offset);
759 #define IGB_N0_QUEUE -1
768 if (q_vector->rx.ring)
769 rx_queue = q_vector->rx.ring->reg_idx;
770 if (q_vector->
tx.ring)
771 tx_queue = q_vector->
tx.ring->reg_idx;
773 switch (hw->
mac.type) {
796 igb_write_ivar(hw, msix_vector,
798 (rx_queue & 0x8) << 1);
800 igb_write_ivar(hw, msix_vector,
802 ((tx_queue & 0x8) << 1) + 8);
817 igb_write_ivar(hw, msix_vector,
819 (rx_queue & 0x1) << 4);
821 igb_write_ivar(hw, msix_vector,
823 ((tx_queue & 0x1) << 4) + 8);
844 static void igb_configure_msix(
struct igb_adapter *adapter)
853 switch (hw->
mac.type) {
897 igb_assign_vector(adapter->
q_vector[i], vector++);
908 static int igb_request_msix(
struct igb_adapter *adapter)
912 int i,
err = 0, vector = 0;
915 igb_msix_other, 0, netdev->
name, adapter);
925 if (q_vector->rx.ring && q_vector->
tx.ring)
927 q_vector->rx.ring->queue_index);
928 else if (q_vector->
tx.ring)
930 q_vector->
tx.ring->queue_index);
931 else if (q_vector->rx.ring)
933 q_vector->rx.ring->queue_index);
938 igb_msix_ring, 0, q_vector->
name,
945 igb_configure_msix(adapter);
951 static void igb_reset_interrupt_capability(
struct igb_adapter *adapter)
970 static void igb_free_q_vectors(
struct igb_adapter *adapter)
991 static void igb_clear_interrupt_scheme(
struct igb_adapter *adapter)
993 igb_free_queues(adapter);
994 igb_free_q_vectors(adapter);
995 igb_reset_interrupt_capability(adapter);
1004 static int igb_set_interrupt_capability(
struct igb_adapter *adapter)
1028 adapter->
msix_entries = kcalloc(numvecs,
sizeof(
struct msix_entry),
1034 for (i = 0; i < numvecs; i++)
1043 igb_reset_interrupt_capability(adapter);
1047 #ifdef CONFIG_PCI_IOV
1069 if (!pci_enable_msi(adapter->
pdev))
1075 err = netif_set_real_num_rx_queues(adapter->
netdev,
1088 static int igb_alloc_q_vectors(
struct igb_adapter *adapter)
1103 adapter->
q_vector[v_idx] = q_vector;
1109 igb_free_q_vectors(adapter);
1113 static void igb_map_rx_ring_to_vector(
struct igb_adapter *adapter,
1114 int ring_idx,
int v_idx)
1118 q_vector->rx.ring = adapter->
rx_ring[ring_idx];
1119 q_vector->rx.ring->q_vector = q_vector;
1120 q_vector->rx.count++;
1126 static void igb_map_tx_ring_to_vector(
struct igb_adapter *adapter,
1127 int ring_idx,
int v_idx)
1131 q_vector->
tx.ring = adapter->
tx_ring[ring_idx];
1132 q_vector->
tx.ring->q_vector = q_vector;
1133 q_vector->
tx.count++;
1145 static int igb_map_ring_to_vector(
struct igb_adapter *adapter)
1157 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1159 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1162 if (i < adapter->num_tx_queues)
1163 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1164 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1167 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1177 static int igb_init_interrupt_scheme(
struct igb_adapter *adapter)
1182 err = igb_set_interrupt_capability(adapter);
1186 err = igb_alloc_q_vectors(adapter);
1188 dev_err(&pdev->
dev,
"Unable to allocate memory for vectors\n");
1189 goto err_alloc_q_vectors;
1192 err = igb_alloc_queues(adapter);
1194 dev_err(&pdev->
dev,
"Unable to allocate memory for queues\n");
1195 goto err_alloc_queues;
1198 err = igb_map_ring_to_vector(adapter);
1200 dev_err(&pdev->
dev,
"Invalid q_vector to ring mapping\n");
1201 goto err_map_queues;
1207 igb_free_queues(adapter);
1209 igb_free_q_vectors(adapter);
1210 err_alloc_q_vectors:
1211 igb_reset_interrupt_capability(adapter);
1221 static int igb_request_irq(
struct igb_adapter *adapter)
1228 err = igb_request_msix(adapter);
1232 igb_clear_interrupt_scheme(adapter);
1233 if (!pci_enable_msi(pdev))
1235 igb_free_all_tx_resources(adapter);
1236 igb_free_all_rx_resources(adapter);
1240 err = igb_alloc_q_vectors(adapter);
1243 "Unable to allocate memory for vectors\n");
1246 err = igb_alloc_queues(adapter);
1249 "Unable to allocate memory for queues\n");
1250 igb_free_q_vectors(adapter);
1253 igb_setup_all_tx_resources(adapter);
1254 igb_setup_all_rx_resources(adapter);
1257 igb_assign_vector(adapter->
q_vector[0], 0);
1261 netdev->
name, adapter);
1266 igb_reset_interrupt_capability(adapter);
1271 netdev->
name, adapter);
1274 dev_err(&pdev->
dev,
"Error %d getting interrupt\n",
1281 static void igb_free_irq(
struct igb_adapter *adapter)
1300 static void igb_irq_disable(
struct igb_adapter *adapter)
1333 static void igb_irq_enable(
struct igb_adapter *adapter)
1357 static void igb_update_mng_vlan(
struct igb_adapter *adapter)
1360 u16 vid = adapter->
hw.mng_cookie.vlan_id;
1388 static void igb_release_hw_control(
struct igb_adapter *adapter)
1408 static void igb_get_hw_control(
struct igb_adapter *adapter)
1423 static void igb_configure(
struct igb_adapter *adapter)
1428 igb_get_hw_control(adapter);
1429 igb_set_rx_mode(netdev);
1431 igb_restore_vlan(adapter);
1434 igb_setup_mrqc(adapter);
1437 igb_configure_tx(adapter);
1438 igb_configure_rx(adapter);
1457 igb_reset_phy(&adapter->
hw);
1469 static void igb_power_down_link(
struct igb_adapter *adapter)
1487 igb_configure(adapter);
1492 napi_enable(&(adapter->
q_vector[i]->napi));
1495 igb_configure_msix(adapter);
1497 igb_assign_vector(adapter->
q_vector[0], 0);
1501 igb_irq_enable(adapter);
1510 netif_tx_start_all_queues(adapter->
netdev);
1513 hw->
mac.get_link_status = 1;
1535 netif_tx_stop_all_queues(netdev);
1546 napi_disable(&(adapter->
q_vector[i]->napi));
1548 igb_irq_disable(adapter);
1563 if (!pci_channel_offline(adapter->
pdev))
1565 igb_clean_all_tx_rings(adapter);
1566 igb_clean_all_rx_rings(adapter);
1567 #ifdef CONFIG_IGB_DCA
1570 igb_setup_dca(adapter);
1590 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1596 switch (mac->
type) {
1627 tx_space = pba >> 16;
1635 min_tx_space =
ALIGN(min_tx_space, 1024);
1636 min_tx_space >>= 10;
1639 min_rx_space =
ALIGN(min_rx_space, 1024);
1640 min_rx_space >>= 10;
1645 if (tx_space < min_tx_space &&
1646 ((min_tx_space - tx_space) < pba)) {
1647 pba = pba - (min_tx_space - tx_space);
1651 if (pba < min_rx_space)
1663 hwm =
min(((pba << 10) * 9 / 10),
1679 igb_ping_all_vfs(adapter);
1687 hw->
mac.ops.reset_hw(hw);
1690 if (hw->
mac.ops.init_hw(hw))
1697 if (!hw->
mac.autoneg)
1700 igb_init_dmac(adapter, pba);
1701 if (!netif_running(adapter->
netdev))
1702 igb_power_down_link(adapter);
1704 igb_update_mng_vlan(adapter);
1709 #ifdef CONFIG_IGB_PTP
1714 igb_get_phy_info(hw);
1732 static int igb_set_features(
struct net_device *netdev,
1736 struct igb_adapter *adapter = netdev_priv(netdev);
1739 igb_vlan_mode(netdev, features);
1746 if (netif_running(netdev))
1755 .ndo_open = igb_open,
1756 .ndo_stop = igb_close,
1757 .ndo_start_xmit = igb_xmit_frame,
1758 .ndo_get_stats64 = igb_get_stats64,
1759 .ndo_set_rx_mode = igb_set_rx_mode,
1760 .ndo_set_mac_address = igb_set_mac,
1761 .ndo_change_mtu = igb_change_mtu,
1762 .ndo_do_ioctl = igb_ioctl,
1763 .ndo_tx_timeout = igb_tx_timeout,
1765 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1766 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
1767 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1768 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1769 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1770 .ndo_get_vf_config = igb_ndo_get_vf_config,
1771 #ifdef CONFIG_NET_POLL_CONTROLLER
1772 .ndo_poll_controller = igb_netpoll,
1774 .ndo_fix_features = igb_fix_features,
1775 .ndo_set_features = igb_set_features,
1786 u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
1790 hw->
nvm.ops.read(hw, 5, 1, &fw_version);
1798 if ((comb_offset != 0x0) &&
1801 + 1), 1, &comb_verh);
1806 if ((comb_verh && comb_verl) &&
1811 (comb_verh >> IGB_COMB_VER_SHFT);
1815 "%d.%d%d, 0x%08x, %d.%d.%d",
1821 etrack_id, major, build, patch);
1860 static int global_quad_port_a;
1862 unsigned long mmio_start, mmio_len;
1863 int err, pci_using_dac;
1892 "configuration, aborting\n");
1910 netdev = alloc_etherdev_mq(
sizeof(
struct igb_adapter),
1913 goto err_alloc_etherdev;
1917 pci_set_drvdata(pdev, netdev);
1918 adapter = netdev_priv(netdev);
1919 adapter->
netdev = netdev;
1920 adapter->
pdev = pdev;
1940 netdev->
mem_end = mmio_start + mmio_len;
1959 err = igb_sw_init(adapter);
1965 hw->
phy.autoneg_wait_to_complete =
false;
1970 hw->
phy.disable_polarity_correction =
false;
1976 "PHY reset is blocked due to SOL/IDER session.\n");
1990 NETIF_F_HW_VLAN_RX |
2008 if (pci_using_dac) {
2024 hw->
mac.ops.reset_hw(hw);
2031 if (hw->
nvm.ops.validate(hw) < 0) {
2032 dev_err(&pdev->
dev,
"The NVM Checksum Is Not Valid\n");
2039 if (hw->
mac.ops.read_mac_addr(hw))
2045 if (!is_valid_ether_addr(netdev->
perm_addr)) {
2046 dev_err(&pdev->
dev,
"Invalid MAC Address\n");
2055 (
unsigned long) adapter);
2057 (
unsigned long) adapter);
2064 hw->
mac.autoneg =
true;
2065 hw->
phy.autoneg_advertised = 0x2f;
2076 if (hw->
bus.func == 0)
2082 else if (hw->
bus.func == 1)
2085 if (eeprom_data & eeprom_apme_mask)
2106 if (global_quad_port_a != 0)
2111 if (++global_quad_port_a == 4)
2112 global_quad_port_a = 0;
2125 igb_get_hw_control(adapter);
2135 #ifdef CONFIG_IGB_DCA
2139 igb_setup_dca(adapter);
2144 #ifdef CONFIG_IGB_PTP
2149 dev_info(&pdev->
dev,
"Intel(R) Gigabit Ethernet Network Connection\n");
2164 strcpy(part_str,
"Unknown");
2167 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2171 switch (hw->
mac.type) {
2181 pm_runtime_put_noidle(&pdev->
dev);
2185 igb_release_hw_control(adapter);
2193 igb_clear_interrupt_scheme(adapter);
2217 struct net_device *netdev = pci_get_drvdata(pdev);
2218 struct igb_adapter *adapter = netdev_priv(netdev);
2221 pm_runtime_get_noresume(&pdev->
dev);
2222 #ifdef CONFIG_IGB_PTP
2237 #ifdef CONFIG_IGB_DCA
2248 igb_release_hw_control(adapter);
2252 igb_clear_interrupt_scheme(adapter);
2254 #ifdef CONFIG_PCI_IOV
2258 if (igb_vfs_are_assigned(adapter)) {
2259 dev_info(&pdev->
dev,
"Unloading driver while VFs are assigned - VFs will not be deallocated\n");
2299 #ifdef CONFIG_PCI_IOV
2310 dev_info(&pdev->
dev,
"%d pre-allocated VFs found - override "
2311 "max_vfs setting of %d\n", old_vfs, max_vfs);
2324 dev_err(&pdev->
dev,
"Unable to allocate memory for VF "
2336 igb_vf_configure(adapter, i);
2383 #ifdef CONFIG_PCI_IOV
2384 switch (hw->
mac.type) {
2389 "Maximum of 7 VFs per PF, using max\n");
2400 switch (hw->
mac.type) {
2430 switch (hw->
mac.type) {
2453 if (adapter->
rss_queues > (max_rss_queues / 2))
2464 if (igb_init_interrupt_scheme(adapter)) {
2465 dev_err(&pdev->
dev,
"Unable to allocate memory for queues\n");
2469 igb_probe_vfs(adapter);
2472 igb_irq_disable(adapter);
2495 struct igb_adapter *adapter = netdev_priv(netdev);
2508 pm_runtime_get_sync(&pdev->
dev);
2513 err = igb_setup_all_tx_resources(adapter);
2518 err = igb_setup_all_rx_resources(adapter);
2528 igb_configure(adapter);
2530 err = igb_request_irq(adapter);
2538 napi_enable(&(adapter->
q_vector[i]->napi));
2543 igb_irq_enable(adapter);
2552 netif_tx_start_all_queues(netdev);
2555 pm_runtime_put(&pdev->
dev);
2558 hw->
mac.get_link_status = 1;
2564 igb_release_hw_control(adapter);
2565 igb_power_down_link(adapter);
2566 igb_free_all_rx_resources(adapter);
2568 igb_free_all_tx_resources(adapter);
2572 pm_runtime_put(&pdev->
dev);
2577 static int igb_open(
struct net_device *netdev)
2579 return __igb_open(netdev,
false);
2593 static int __igb_close(
struct net_device *netdev,
bool suspending)
2595 struct igb_adapter *adapter = netdev_priv(netdev);
2601 pm_runtime_get_sync(&pdev->
dev);
2604 igb_free_irq(adapter);
2606 igb_free_all_tx_resources(adapter);
2607 igb_free_all_rx_resources(adapter);
2610 pm_runtime_put_sync(&pdev->
dev);
2614 static int igb_close(
struct net_device *netdev)
2616 return __igb_close(netdev,
false);
2648 tx_ring->next_to_clean = 0;
2655 dev_err(dev,
"Unable to allocate memory for the Tx descriptor ring\n");
2666 static int igb_setup_all_tx_resources(
struct igb_adapter *adapter)
2675 "Allocation for Tx Queue %u failed\n", i);
2676 for (i--; i >= 0; i--)
2734 tdba & 0x00000000ffffffffULL);
2755 static void igb_configure_tx(
struct igb_adapter *adapter)
2792 rx_ring->next_to_clean = 0;
2800 dev_err(dev,
"Unable to allocate memory for the Rx descriptor ring\n");
2811 static int igb_setup_all_rx_resources(
struct igb_adapter *adapter)
2820 "Allocation for Rx Queue %u failed\n", i);
2821 for (i--; i >= 0; i--)
2834 static void igb_setup_mrqc(
struct igb_adapter *adapter)
2838 u32 j, num_rx_queues, shift = 0;
2839 static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
2840 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
2841 0xA32DCB77, 0x0CF23080, 0x3BB7426A,
2845 for (j = 0; j < 10; j++)
2850 switch (hw->
mac.type) {
2870 for (j = 0; j < 32; j++) {
2872 u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues;
2873 u32 reta = (base & 0x07800780) >> (7 - shift);
2876 base += 0x00010001 * num_rx_queues;
2877 reta |= (base & 0x07800780) << (1 + shift);
2929 igb_vmm_control(adapter);
2977 if (adapter->
netdev->features & NETIF_F_RXALL) {
2995 static inline int igb_set_vf_rlpml(
struct igb_adapter *adapter,
int size,
3003 if (vfn < adapter->vfs_allocated_count &&
3004 adapter->
vf_data[vfn].vlans_enabled)
3021 static void igb_rlpml_set(
struct igb_adapter *adapter)
3028 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
3042 static inline void igb_set_vmolr(
struct igb_adapter *adapter,
3071 if (vfn <= adapter->vfs_allocated_count)
3090 u32 srrctl = 0, rxdctl = 0;
3097 rdba & 0x00000000ffffffffULL);
3109 #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
3112 srrctl |= (
PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
3115 #ifdef CONFIG_IGB_PTP
3126 igb_set_vmolr(adapter, reg_idx & 0x7,
true);
3143 static void igb_configure_rx(
struct igb_adapter *adapter)
3148 igb_set_uta(adapter);
3151 igb_rar_set_qsel(adapter, adapter->
hw.mac.addr, 0,
3168 igb_clean_tx_ring(tx_ring);
3189 static void igb_free_all_tx_resources(
struct igb_adapter *adapter)
3200 if (tx_buffer->
skb) {
3223 static void igb_clean_tx_ring(
struct igb_ring *tx_ring)
3233 for (i = 0; i < tx_ring->
count; i++) {
3238 netdev_tx_reset_queue(txring_txq(tx_ring));
3247 tx_ring->next_to_clean = 0;
3254 static void igb_clean_all_tx_rings(
struct igb_adapter *adapter)
3259 igb_clean_tx_ring(adapter->
tx_ring[i]);
3270 igb_clean_rx_ring(rx_ring);
3291 static void igb_free_all_rx_resources(
struct igb_adapter *adapter)
3303 static void igb_clean_rx_ring(
struct igb_ring *rx_ring)
3312 for (i = 0; i < rx_ring->
count; i++) {
3314 if (buffer_info->
dma) {
3319 buffer_info->
dma = 0;
3322 if (buffer_info->
skb) {
3323 dev_kfree_skb(buffer_info->
skb);
3333 if (buffer_info->
page) {
3346 rx_ring->next_to_clean = 0;
3354 static void igb_clean_all_rx_rings(
struct igb_adapter *adapter)
3359 igb_clean_rx_ring(adapter->
rx_ring[i]);
3369 static int igb_set_mac(
struct net_device *netdev,
void *
p)
3371 struct igb_adapter *adapter = netdev_priv(netdev);
3375 if (!is_valid_ether_addr(addr->
sa_data))
3382 igb_rar_set_qsel(adapter, hw->
mac.addr, 0,
3397 static int igb_write_mc_addr_list(
struct net_device *netdev)
3399 struct igb_adapter *adapter = netdev_priv(netdev);
3408 igb_restore_vf_multicasts(adapter);
3438 struct igb_adapter *adapter = netdev_priv(netdev);
3441 unsigned int rar_entries = hw->
mac.rar_entry_count - (vfn + 1);
3454 igb_rar_set_qsel(adapter, ha->
addr,
3461 for (; rar_entries > 0 ; rar_entries--) {
3479 static void igb_set_rx_mode(
struct net_device *netdev)
3481 struct igb_adapter *adapter = netdev_priv(netdev);
3484 u32 rctl, vmolr = 0;
3506 count = igb_write_mc_addr_list(netdev);
3519 count = igb_write_uc_addr_list(netdev);
3540 igb_restore_vf_multicasts(adapter);
3543 static void igb_check_wvbr(
struct igb_adapter *adapter)
3548 switch (hw->
mac.type) {
3558 adapter->
wvbr |= wvbr;
3561 #define IGB_STAGGERED_QUEUE_OFFSET 8
3563 static void igb_spoof_check(
struct igb_adapter *adapter)
3571 if (adapter->
wvbr & (1 << j) ||
3574 "Spoof event(s) detected on VF %d\n", j);
3584 static void igb_update_phy_info(
unsigned long data)
3587 igb_get_phy_info(&adapter->
hw);
3597 bool link_active =
false;
3605 switch (hw->
phy.media_type) {
3607 if (hw->
mac.get_link_status) {
3608 ret_val = hw->
mac.ops.check_for_link(hw);
3609 link_active = !hw->
mac.get_link_status;
3615 ret_val = hw->
mac.ops.check_for_link(hw);
3616 link_active = hw->
mac.serdes_has_link;
3629 u32 ctrl_ext, thstat;
3638 ret = !!(thstat &
event);
3649 static void igb_watchdog(
unsigned long data)
3669 pm_runtime_resume(netdev->
dev.parent);
3671 if (!netif_carrier_ok(netdev)) {
3673 hw->
mac.ops.get_speed_and_duplex(hw,
3680 "Duplex, Flow Control: %s\n",
3687 (ctrl & E1000_CTRL_RFCE) ?
"RX" :
3688 (ctrl & E1000_CTRL_TFCE) ?
"TX" :
"None");
3691 if (igb_thermal_sensor_event(hw,
3693 netdev_info(netdev,
"The network adapter link "
3694 "speed was downshifted because it "
3711 igb_ping_all_vfs(adapter);
3712 igb_check_vf_rate_limit(adapter);
3720 if (netif_carrier_ok(netdev)) {
3725 if (igb_thermal_sensor_event(hw,
3727 netdev_err(netdev,
"The network adapter was "
3728 "stopped because it overheated\n");
3736 igb_ping_all_vfs(adapter);
3754 if (!netif_carrier_ok(netdev)) {
3759 if (igb_desc_unused(tx_ring) + 1 < tx_ring->
count) {
3775 eics |= adapter->
q_vector[i]->eims_value;
3781 igb_spoof_check(adapter);
3812 static void igb_update_ring_itr(
struct igb_q_vector *q_vector)
3814 int new_val = q_vector->
itr_val;
3815 int avg_wire_size = 0;
3827 packets = q_vector->rx.total_packets;
3829 avg_wire_size = q_vector->rx.total_bytes /
packets;
3831 packets = q_vector->
tx.total_packets;
3833 avg_wire_size =
max_t(
u32, avg_wire_size,
3834 q_vector->
tx.total_bytes / packets);
3841 avg_wire_size += 24;
3844 avg_wire_size =
min(avg_wire_size, 3000);
3847 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3848 new_val = avg_wire_size / 3;
3850 new_val = avg_wire_size / 2;
3859 if (new_val != q_vector->
itr_val) {
3864 q_vector->rx.total_bytes = 0;
3865 q_vector->rx.total_packets = 0;
3866 q_vector->
tx.total_bytes = 0;
3867 q_vector->
tx.total_packets = 0;
3886 static void igb_update_itr(
struct igb_q_vector *q_vector,
3891 u8 itrval = ring_container->
itr;
3900 if (bytes/packets > 8000)
3902 else if ((packets < 5) && (bytes > 512))
3906 if (bytes > 10000) {
3908 if (bytes/packets > 8000) {
3910 }
else if ((packets < 10) || ((bytes/packets) > 1200)) {
3912 }
else if ((packets > 35)) {
3915 }
else if (bytes/packets > 2000) {
3917 }
else if (packets <= 2 && bytes < 512) {
3922 if (bytes > 25000) {
3925 }
else if (bytes < 1500) {
3936 ring_container->
itr = itrval;
3952 igb_update_itr(q_vector, &q_vector->
tx);
3953 igb_update_itr(q_vector, &q_vector->rx);
3955 current_itr =
max(q_vector->rx.itr, q_vector->
tx.itr);
3963 switch (current_itr) {
3979 if (new_itr != q_vector->
itr_val) {
3983 new_itr = new_itr > q_vector->
itr_val ?
3985 (new_itr + (q_vector->
itr_val >> 2)),
3999 static void igb_tx_ctxtdesc(
struct igb_ring *tx_ring,
u32 vlan_macip_lens,
4000 u32 type_tucmd,
u32 mss_l4len_idx)
4015 mss_l4len_idx |= tx_ring->
reg_idx << 4;
4023 static int igb_tso(
struct igb_ring *tx_ring,
4028 u32 vlan_macip_lens, type_tucmd;
4029 u32 mss_l4len_idx, l4len;
4031 if (!skb_is_gso(skb))
4034 if (skb_header_cloned(skb)) {
4044 struct iphdr *iph = ip_hdr(skb);
4055 }
else if (skb_is_gso_v6(skb)) {
4056 ipv6_hdr(skb)->payload_len = 0;
4058 &ipv6_hdr(skb)->
daddr,
4065 l4len = tcp_hdrlen(skb);
4066 *hdr_len = skb_transport_offset(skb) + l4len;
4069 first->
gso_segs = skb_shinfo(skb)->gso_segs;
4077 vlan_macip_lens = skb_network_header_len(skb);
4081 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
4089 u32 vlan_macip_lens = 0;
4090 u32 mss_l4len_idx = 0;
4100 vlan_macip_lens |= skb_network_header_len(skb);
4102 l4_hdr = ip_hdr(skb)->protocol;
4105 vlan_macip_lens |= skb_network_header_len(skb);
4106 l4_hdr = ipv6_hdr(skb)->nexthdr;
4111 "partial checksum but proto=%x!\n",
4120 mss_l4len_idx = tcp_hdrlen(skb) <<
4125 mss_l4len_idx =
sizeof(
struct sctphdr) <<
4129 mss_l4len_idx =
sizeof(
struct udphdr) <<
4130 E1000_ADVTXD_L4LEN_SHIFT;
4135 "partial checksum but l4 proto=%x!\n",
4148 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
4151 static __le32 igb_tx_cmd_type(
u32 tx_flags)
4162 #ifdef CONFIG_IGB_PTP
4175 static void igb_tx_olinfo_status(
struct igb_ring *tx_ring,
4177 u32 tx_flags,
unsigned int paylen)
4184 olinfo_status |= tx_ring->
reg_idx << 4;
4202 #define IGB_MAX_TXD_PWR 15
4203 #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
4205 static void igb_tx_map(
struct igb_ring *tx_ring,
4215 unsigned int size = skb_headlen(skb);
4223 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
4224 cmd_type = igb_tx_cmd_type(tx_flags);
4237 tx_desc->
read.cmd_type_len =
4242 if (i == tx_ring->
count) {
4250 tx_desc->
read.olinfo_status = 0;
4261 if (i == tx_ring->
count) {
4266 size = skb_frag_size(frag);
4269 dma = skb_frag_dma_map(tx_ring->
dev, frag, 0,
4278 tx_desc->
read.olinfo_status = 0;
4284 netdev_tx_sent_queue(txring_txq(tx_ring), first->
bytecount);
4290 tx_desc->
read.cmd_type_len = cmd_type;
4309 if (i == tx_ring->
count)
4323 dev_err(tx_ring->
dev,
"TX DMA map failed\n");
4329 if (tx_buffer == first)
4339 static int __igb_maybe_stop_tx(
struct igb_ring *tx_ring,
const u16 size)
4343 netif_stop_subqueue(netdev, tx_ring->
queue_index);
4352 if (igb_desc_unused(tx_ring) < size)
4356 netif_wake_subqueue(netdev, tx_ring->
queue_index);
4358 u64_stats_update_begin(&tx_ring->
tx_syncp2);
4359 tx_ring->
tx_stats.restart_queue2++;
4360 u64_stats_update_end(&tx_ring->
tx_syncp2);
4365 static inline int igb_maybe_stop_tx(
struct igb_ring *tx_ring,
const u16 size)
4367 if (igb_desc_unused(tx_ring) >= size)
4369 return __igb_maybe_stop_tx(tx_ring, size);
4375 #ifdef CONFIG_IGB_PTP
4389 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
4400 #ifdef CONFIG_IGB_PTP
4402 !(adapter->ptp_tx_skb))) {
4406 adapter->ptp_tx_skb = skb_get(skb);
4421 tso = igb_tso(tx_ring, first, &hdr_len);
4425 igb_tx_csum(tx_ring, first);
4427 igb_tx_map(tx_ring, first, hdr_len);
4448 return adapter->
tx_ring[r_idx];
4454 struct igb_adapter *adapter = netdev_priv(netdev);
4461 if (skb->
len <= 0) {
4470 if (skb->
len < 17) {
4471 if (skb_padto(skb, 17))
4483 static void igb_tx_timeout(
struct net_device *netdev)
4485 struct igb_adapter *adapter = netdev_priv(netdev);
4499 static void igb_reset_task(
struct work_struct *work)
4505 netdev_err(adapter->
netdev,
"Reset adapter\n");
4518 struct igb_adapter *adapter = netdev_priv(netdev);
4535 static int igb_change_mtu(
struct net_device *netdev,
int new_mtu)
4537 struct igb_adapter *adapter = netdev_priv(netdev);
4542 dev_err(&pdev->
dev,
"Invalid MTU setting\n");
4546 #define MAX_STD_JUMBO_FRAME_SIZE 9238
4547 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
4548 dev_err(&pdev->
dev,
"MTU > 9216 not supported.\n");
4558 if (netif_running(netdev))
4561 dev_info(&pdev->
dev,
"changing MTU from %d to %d\n",
4562 netdev->
mtu, new_mtu);
4563 netdev->
mtu = new_mtu;
4565 if (netif_running(netdev))
4590 u64 _bytes, _packets;
4592 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4600 if (pci_channel_offline(pdev))
4615 start = u64_stats_fetch_begin_bh(&ring->
rx_syncp);
4618 }
while (u64_stats_fetch_retry_bh(&ring->
rx_syncp, start));
4620 packets += _packets;
4631 start = u64_stats_fetch_begin_bh(&ring->
tx_syncp);
4634 }
while (u64_stats_fetch_retry_bh(&ring->
tx_syncp, start));
4636 packets += _packets;
4660 adapter->
stats.mpc += mpc;
4731 adapter->
stats.crcerrs + adapter->
stats.algnerrc +
4733 adapter->
stats.cexterr;
4742 adapter->
stats.latecol;
4754 adapter->
phy_stats.idle_errors += phy_tmp;
4785 adapter->
stats.doosync++;
4789 igb_check_wvbr(adapter);
4794 igb_msg_task(adapter);
4797 hw->
mac.get_link_status = 1;
4803 #ifdef CONFIG_IGB_PTP
4821 static void igb_write_itr(
struct igb_q_vector *q_vector)
4833 itr_val |= itr_val << 16;
4841 static irqreturn_t igb_msix_ring(
int irq,
void *data)
4846 igb_write_itr(q_vector);
4848 napi_schedule(&q_vector->
napi);
4853 #ifdef CONFIG_IGB_DCA
4854 static void igb_update_dca(
struct igb_q_vector *q_vector)
4860 if (q_vector->
cpu == cpu)
4863 if (q_vector->
tx.ring) {
4864 int q = q_vector->
tx.ring->reg_idx;
4877 if (q_vector->rx.ring) {
4878 int q = q_vector->rx.ring->reg_idx;
4898 static void igb_setup_dca(
struct igb_adapter *adapter)
4911 igb_update_dca(adapter->
q_vector[i]);
4915 static int __igb_notify_dca(
struct device *
dev,
void *data)
4918 struct igb_adapter *adapter = netdev_priv(netdev);
4921 unsigned long event = *(
unsigned long *)data;
4931 igb_setup_dca(adapter);
4950 static int igb_notify_dca(
struct notifier_block *nb,
unsigned long event,
4958 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4962 #ifdef CONFIG_PCI_IOV
4963 static int igb_vf_configure(
struct igb_adapter *adapter,
int vf)
4967 eth_random_addr(mac_addr);
4968 igb_set_vf_mac(adapter, vf, mac_addr);
4973 static bool igb_vfs_are_assigned(
struct igb_adapter *adapter)
4979 switch (adapter->
hw.mac.type) {
4994 if (vfdev->
is_virtfn && vfdev->physfn == pdev) {
5007 static void igb_ping_all_vfs(
struct igb_adapter *adapter)
5034 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
5061 static int igb_set_vf_multicasts(
struct igb_adapter *adapter,
5065 u16 *hash_list = (
u16 *)&msgbuf[1];
5080 for (i = 0; i <
n; i++)
5084 igb_set_rx_mode(adapter->
netdev);
5089 static void igb_restore_vf_multicasts(
struct igb_adapter *adapter)
5113 static void igb_clear_vf_vfta(
struct igb_adapter *adapter,
u32 vf)
5164 if (i == E1000_VLVF_ARRAY_SIZE) {
5171 if (!(reg & E1000_VLVF_VLANID_ENABLE))
5175 if (i < E1000_VLVF_ARRAY_SIZE) {
5180 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
5193 if (!adapter->
vf_data[vf].vlans_enabled) {
5198 reg &= ~E1000_VMOLR_RLPML_MASK;
5206 if (i < E1000_VLVF_ARRAY_SIZE) {
5210 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5221 if (!adapter->
vf_data[vf].vlans_enabled) {
5226 reg &= ~E1000_VMOLR_RLPML_MASK;
5245 static int igb_ndo_set_vf_vlan(
struct net_device *netdev,
5249 struct igb_adapter *adapter = netdev_priv(netdev);
5254 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5258 igb_set_vmolr(adapter, vf, !vlan);
5262 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5265 "The VF VLAN has been set,"
5266 " but the PF device is not up.\n");
5268 "Bring the PF device up before"
5269 " attempting to use the VF device.\n");
5272 igb_vlvf_set(adapter, adapter->
vf_data[vf].pf_vlan,
5274 igb_set_vmvir(adapter, vlan, vf);
5275 igb_set_vmolr(adapter, vf,
true);
5288 return igb_vlvf_set(adapter, vid, add, vf);
5291 static inline void igb_vf_reset(
struct igb_adapter *adapter,
u32 vf)
5298 igb_set_vmolr(adapter, vf,
true);
5301 igb_clear_vf_vfta(adapter, vf);
5302 if (adapter->
vf_data[vf].pf_vlan)
5303 igb_ndo_set_vf_vlan(adapter->
netdev, vf,
5307 igb_clear_vf_vfta(adapter, vf);
5310 adapter->
vf_data[
vf].num_vf_mc_hashes = 0;
5313 igb_set_rx_mode(adapter->
netdev);
5316 static void igb_vf_reset_event(
struct igb_adapter *adapter,
u32 vf)
5318 unsigned char *vf_mac = adapter->
vf_data[
vf].vf_mac_addresses;
5322 eth_random_addr(vf_mac);
5325 igb_vf_reset(adapter, vf);
5328 static void igb_vf_reset_msg(
struct igb_adapter *adapter,
u32 vf)
5331 unsigned char *vf_mac = adapter->
vf_data[
vf].vf_mac_addresses;
5332 int rar_entry = hw->
mac.rar_entry_count - (vf + 1);
5334 u8 *addr = (
u8 *)(&msgbuf[1]);
5337 igb_vf_reset(adapter, vf);
5340 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
5362 unsigned char *addr = (
char *)&msg[1];
5365 if (is_valid_ether_addr(addr))
5366 err = igb_set_vf_mac(adapter, vf, addr);
5371 static void igb_rcv_ack_from_vf(
struct igb_adapter *adapter,
u32 vf)
5385 static void igb_rcv_msg_from_vf(
struct igb_adapter *adapter,
u32 vf)
5397 dev_err(&pdev->
dev,
"Error receiving message from VF\n");
5414 igb_vf_reset_msg(adapter, vf);
5425 switch ((msgbuf[0] & 0xFFFF)) {
5429 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5432 "VF %d attempted to override administratively "
5433 "set MAC address\nReload the VF driver to "
5434 "resume operations\n", vf);
5437 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5440 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5443 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5449 "VF %d attempted to override administratively "
5450 "set VLAN tag\nReload the VF driver to "
5451 "resume operations\n", vf);
5453 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
5456 dev_err(&pdev->
dev,
"Unhandled Msg %08x\n", msgbuf[0]);
5472 static void igb_msg_task(
struct igb_adapter *adapter)
5480 igb_vf_reset_event(adapter, vf);
5484 igb_rcv_msg_from_vf(adapter, vf);
5488 igb_rcv_ack_from_vf(adapter, vf);
5502 static void igb_set_uta(
struct igb_adapter *adapter)
5515 for (i = 0; i < hw->
mac.uta_reg_count; i++)
5524 static irqreturn_t igb_intr_msi(
int irq,
void *data)
5532 igb_write_itr(q_vector);
5534 if (icr & E1000_ICR_DRSTA)
5537 if (icr & E1000_ICR_DOUTSYNC) {
5539 adapter->
stats.doosync++;
5543 hw->
mac.get_link_status = 1;
5548 #ifdef CONFIG_IGB_PTP
5549 if (icr & E1000_ICR_TS) {
5561 napi_schedule(&q_vector->
napi);
5585 igb_write_itr(q_vector);
5587 if (icr & E1000_ICR_DRSTA)
5590 if (icr & E1000_ICR_DOUTSYNC) {
5592 adapter->
stats.doosync++;
5596 hw->
mac.get_link_status = 1;
5602 #ifdef CONFIG_IGB_PTP
5603 if (icr & E1000_ICR_TS) {
5615 napi_schedule(&q_vector->
napi);
5620 static void igb_ring_irq_enable(
struct igb_q_vector *q_vector)
5628 igb_set_itr(q_vector);
5630 igb_update_ring_itr(q_vector);
5637 igb_irq_enable(adapter);
5651 bool clean_complete =
true;
5653 #ifdef CONFIG_IGB_DCA
5655 igb_update_dca(q_vector);
5657 if (q_vector->
tx.ring)
5658 clean_complete = igb_clean_tx_irq(q_vector);
5660 if (q_vector->rx.ring)
5661 clean_complete &= igb_clean_rx_irq(q_vector, budget);
5664 if (!clean_complete)
5669 igb_ring_irq_enable(q_vector);
5680 static bool igb_clean_tx_irq(
struct igb_q_vector *q_vector)
5683 struct igb_ring *tx_ring = q_vector->
tx.ring;
5687 unsigned int budget = q_vector->
tx.work_limit;
5688 unsigned int i = tx_ring->next_to_clean;
5695 i -= tx_ring->
count;
5716 total_packets += tx_buffer->
gso_segs;
5732 while (tx_desc != eop_desc) {
5737 i -= tx_ring->
count;
5757 i -= tx_ring->
count;
5767 }
while (
likely(budget));
5769 netdev_tx_completed_queue(txring_txq(tx_ring),
5770 total_packets, total_bytes);
5771 i += tx_ring->
count;
5772 tx_ring->next_to_clean =
i;
5773 u64_stats_update_begin(&tx_ring->
tx_syncp);
5775 tx_ring->
tx_stats.packets += total_packets;
5776 u64_stats_update_end(&tx_ring->
tx_syncp);
5778 q_vector->
tx.total_packets += total_packets;
5793 "Detected Tx Unit Hang\n"
5797 " next_to_use <%x>\n"
5798 " next_to_clean <%x>\n"
5799 "buffer_info[next_to_clean]\n"
5800 " time_stamp <%lx>\n"
5801 " next_to_watch <%p>\n"
5803 " desc.status <%x>\n",
5808 tx_ring->next_to_clean,
5813 netif_stop_subqueue(tx_ring->
netdev,
5822 netif_carrier_ok(tx_ring->
netdev) &&
5828 if (__netif_subqueue_stopped(tx_ring->
netdev,
5831 netif_wake_subqueue(tx_ring->
netdev,
5834 u64_stats_update_begin(&tx_ring->
tx_syncp);
5836 u64_stats_update_end(&tx_ring->
tx_syncp);
5843 static inline void igb_rx_checksum(
struct igb_ring *ring,
5847 skb_checksum_none_assert(skb);
5858 if (igb_test_staterr(rx_desc,
5866 if (!((skb->
len == 60) &&
5868 u64_stats_update_begin(&ring->
rx_syncp);
5870 u64_stats_update_end(&ring->
rx_syncp);
5880 dev_dbg(ring->
dev,
"cksum success: bits %08X\n",
5884 static inline void igb_rx_hash(
struct igb_ring *ring,
5892 static void igb_rx_vlan(
struct igb_ring *ring,
5904 __vlan_hwaccel_put_tag(skb, vid);
5921 static bool igb_clean_rx_irq(
struct igb_q_vector *q_vector,
int budget)
5923 struct igb_ring *rx_ring = q_vector->rx.ring;
5926 unsigned int total_bytes = 0, total_packets = 0;
5927 u16 cleaned_count = igb_desc_unused(rx_ring);
5928 u16 i = rx_ring->next_to_clean;
5932 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
5941 if (i == rx_ring->
count)
5954 if (!skb_is_nonlinear(skb)) {
5955 __skb_put(skb, igb_get_hlen(rx_desc));
5959 buffer_info->
dma = 0;
5962 if (rx_desc->
wb.upper.length) {
5965 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
5974 if ((page_count(buffer_info->
page) != 1) ||
5975 (page_to_nid(buffer_info->
page) != current_node))
5978 get_page(buffer_info->
page);
5988 buffer_info->
skb = next_buffer->
skb;
5989 buffer_info->
dma = next_buffer->
dma;
5991 next_buffer->
dma = 0;
5995 if (
unlikely((igb_test_staterr(rx_desc,
5997 && !(rx_ring->
netdev->features & NETIF_F_RXALL))) {
6002 #ifdef CONFIG_IGB_PTP
6005 igb_rx_hash(rx_ring, rx_desc, skb);
6006 igb_rx_checksum(rx_ring, rx_desc, skb);
6007 igb_rx_vlan(rx_ring, rx_desc, skb);
6009 total_bytes += skb->
len;
6032 rx_ring->next_to_clean =
i;
6033 u64_stats_update_begin(&rx_ring->
rx_syncp);
6034 rx_ring->
rx_stats.packets += total_packets;
6036 u64_stats_update_end(&rx_ring->
rx_syncp);
6037 q_vector->rx.total_packets += total_packets;
6046 static bool igb_alloc_mapped_skb(
struct igb_ring *rx_ring,
6056 skb = netdev_alloc_skb_ip_align(rx_ring->
netdev,
6080 static bool igb_alloc_mapped_page(
struct igb_ring *rx_ring,
6125 i -= rx_ring->
count;
6127 while (cleaned_count--) {
6128 if (!igb_alloc_mapped_skb(rx_ring, bi))
6135 if (!igb_alloc_mapped_page(rx_ring, bi))
6146 i -= rx_ring->
count;
6150 rx_desc->
read.hdr_addr = 0;
6153 i += rx_ring->
count;
6175 struct igb_adapter *adapter = netdev_priv(netdev);
6183 data->
phy_id = adapter->
hw.phy.addr;
6186 if (igb_read_phy_reg(&adapter->
hw, data->
reg_num & 0x1F,
6203 static int igb_ioctl(
struct net_device *netdev,
struct ifreq *ifr,
int cmd)
6209 return igb_mii_ioctl(netdev, ifr, cmd);
6210 #ifdef CONFIG_IGB_PTP
6241 struct igb_adapter *adapter = netdev_priv(netdev);
6263 igb_rlpml_set(adapter);
6266 static int igb_vlan_rx_add_vid(
struct net_device *netdev,
u16 vid)
6268 struct igb_adapter *adapter = netdev_priv(netdev);
6273 igb_vlvf_set(adapter, vid,
true, pf_id);
6283 static int igb_vlan_rx_kill_vid(
struct net_device *netdev,
u16 vid)
6285 struct igb_adapter *adapter = netdev_priv(netdev);
6291 err = igb_vlvf_set(adapter, vid,
false, pf_id);
6302 static void igb_restore_vlan(
struct igb_adapter *adapter)
6306 igb_vlan_mode(adapter->
netdev, adapter->
netdev->features);
6309 igb_vlan_rx_add_vid(adapter->netdev, vid);
6314 struct pci_dev *pdev = adapter->pdev;
6321 if ((spd & 1) || (dplx & ~1))
6330 switch (spd + dplx) {
6358 dev_err(&pdev->
dev,
"Unsupported Speed/Duplex configuration\n");
6362 static int __igb_shutdown(
struct pci_dev *pdev,
bool *enable_wake,
6365 struct net_device *netdev = pci_get_drvdata(pdev);
6366 struct igb_adapter *adapter = netdev_priv(netdev);
6376 if (netif_running(netdev))
6377 __igb_close(netdev,
true);
6379 igb_clear_interrupt_scheme(adapter);
6393 igb_set_rx_mode(netdev);
6404 #define E1000_CTRL_ADVD3WUC 0x00100000
6406 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6420 *enable_wake = wufc || adapter->
en_mng_pt;
6422 igb_power_down_link(adapter);
6428 igb_release_hw_control(adapter);
6436 #ifdef CONFIG_PM_SLEEP
6437 static int igb_suspend(
struct device *dev)
6443 retval = __igb_shutdown(pdev, &wake, 0);
6458 static int igb_resume(
struct device *dev)
6461 struct net_device *netdev = pci_get_drvdata(pdev);
6462 struct igb_adapter *adapter = netdev_priv(netdev);
6473 "igb: Cannot enable PCI device from suspend\n");
6481 if (igb_init_interrupt_scheme(adapter)) {
6482 dev_err(&pdev->
dev,
"Unable to allocate memory for queues\n");
6490 igb_get_hw_control(adapter);
6495 err = __igb_open(netdev,
true);
6504 #ifdef CONFIG_PM_RUNTIME
6505 static int igb_runtime_idle(
struct device *dev)
6508 struct net_device *netdev = pci_get_drvdata(pdev);
6509 struct igb_adapter *adapter = netdev_priv(netdev);
6517 static int igb_runtime_suspend(
struct device *dev)
6523 retval = __igb_shutdown(pdev, &wake, 1);
6537 static int igb_runtime_resume(
struct device *dev)
6539 return igb_resume(dev);
6544 static void igb_shutdown(
struct pci_dev *pdev)
6548 __igb_shutdown(pdev, &wake, 0);
6556 #ifdef CONFIG_NET_POLL_CONTROLLER
6562 static void igb_netpoll(
struct net_device *netdev)
6564 struct igb_adapter *adapter = netdev_priv(netdev);
6574 igb_irq_disable(adapter);
6575 napi_schedule(&q_vector->
napi);
6591 struct net_device *netdev = pci_get_drvdata(pdev);
6592 struct igb_adapter *adapter = netdev_priv(netdev);
6599 if (netif_running(netdev))
6616 struct net_device *netdev = pci_get_drvdata(pdev);
6617 struct igb_adapter *adapter = netdev_priv(netdev);
6624 "Cannot re-enable PCI device after reset.\n");
6641 dev_err(&pdev->
dev,
"pci_cleanup_aer_uncorrect_error_status "
6642 "failed 0x%0x\n", err);
6657 static void igb_io_resume(
struct pci_dev *pdev)
6659 struct net_device *netdev = pci_get_drvdata(pdev);
6660 struct igb_adapter *adapter = netdev_priv(netdev);
6662 if (netif_running(netdev)) {
6664 dev_err(&pdev->
dev,
"igb_up failed after reset\n");
6673 igb_get_hw_control(adapter);
6679 u32 rar_low, rar_high;
6685 rar_low = ((
u32) addr[0] | ((
u32) addr[1] << 8) |
6686 ((
u32) addr[2] << 16) | ((
u32) addr[3] << 24));
6687 rar_high = ((
u32) addr[4] | ((
u32) addr[5] << 8));
6703 static int igb_set_vf_mac(
struct igb_adapter *adapter,
6709 int rar_entry = hw->
mac.rar_entry_count - (vf + 1);
6711 memcpy(adapter->
vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
6713 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
6718 static int igb_ndo_set_vf_mac(
struct net_device *netdev,
int vf,
u8 *
mac)
6720 struct igb_adapter *adapter = netdev_priv(netdev);
6724 dev_info(&adapter->
pdev->dev,
"setting MAC %pM on VF %d\n", mac, vf);
6725 dev_info(&adapter->
pdev->dev,
"Reload the VF driver to make this"
6726 " change effective.");
6728 dev_warn(&adapter->
pdev->dev,
"The VF MAC address has been set,"
6729 " but the PF device is not up.\n");
6730 dev_warn(&adapter->
pdev->dev,
"Bring the PF device up before"
6731 " attempting to use the VF device.\n");
6733 return igb_set_vf_mac(adapter, vf, mac);
6736 static int igb_link_mbps(
int internal_link_speed)
6738 switch (internal_link_speed) {
6748 static void igb_set_vf_rate_limit(
struct e1000_hw *hw,
int vf,
int tx_rate,
6756 rf_int = link_speed / tx_rate;
6757 rf_dec = (link_speed - (rf_int * tx_rate));
6777 static void igb_check_vf_rate_limit(
struct igb_adapter *adapter)
6779 int actual_link_speed,
i;
6780 bool reset_rate =
false;
6787 actual_link_speed = igb_link_mbps(adapter->
link_speed);
6792 "Link speed has been changed. VF Transmit "
6793 "rate is disabled\n");
6800 igb_set_vf_rate_limit(&adapter->
hw, i,
6806 static int igb_ndo_set_vf_bw(
struct net_device *netdev,
int vf,
int tx_rate)
6808 struct igb_adapter *adapter = netdev_priv(netdev);
6810 int actual_link_speed;
6815 actual_link_speed = igb_link_mbps(adapter->
link_speed);
6818 (tx_rate < 0) || (tx_rate > actual_link_speed))
6823 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
6828 static int igb_ndo_get_vf_config(
struct net_device *netdev,
6831 struct igb_adapter *adapter = netdev_priv(netdev);
6842 static void igb_vmm_control(
struct igb_adapter *adapter)
6847 switch (hw->
mac.type) {
6899 if (hwm < 64 * (pba - 6))
6900 hwm = 64 * (pba - 6);
6912 if (dmac_thr < pba - 10)
6913 dmac_thr = pba - 10;