32 #include <linux/prefetch.h>
33 #include <linux/bitops.h>
34 #include <linux/if_vlan.h>
37 static char e1000_driver_string[] =
"Intel(R) PRO/1000 Network Driver";
38 #define DRV_VERSION "7.3.21-k8-NAPI"
40 static const char e1000_copyright[] =
"Copyright (c) 1999-2006 Intel Corporation.";
111 static int e1000_init_module(
void);
112 static void e1000_exit_module(
void);
117 static int e1000_open(
struct net_device *netdev);
118 static int e1000_close(
struct net_device *netdev);
128 static void e1000_set_rx_mode(
struct net_device *netdev);
135 static int e1000_change_mtu(
struct net_device *netdev,
int new_mtu);
136 static int e1000_set_mac(
struct net_device *netdev,
void *
p);
143 int *work_done,
int work_to_do);
146 int *work_done,
int work_to_do);
154 static int e1000_mii_ioctl(
struct net_device *netdev,
struct ifreq *ifr,
165 static void e1000_vlan_mode(
struct net_device *netdev,
175 static int e1000_resume(
struct pci_dev *pdev);
177 static void e1000_shutdown(
struct pci_dev *pdev);
179 #ifdef CONFIG_NET_POLL_CONTROLLER
181 static void e1000_netpoll (
struct net_device *netdev);
184 #define COPYBREAK_DEFAULT 256
188 "Maximum size of packet that is copied to a new buffer on receive");
193 static void e1000_io_resume(
struct pci_dev *pdev);
196 .error_detected = e1000_io_error_detected,
197 .slot_reset = e1000_io_slot_reset,
198 .resume = e1000_io_resume,
203 .id_table = e1000_pci_tbl,
204 .probe = e1000_probe,
208 .suspend = e1000_suspend,
209 .resume = e1000_resume,
211 .shutdown = e1000_shutdown,
212 .err_handler = &e1000_err_handler
220 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
221 static int debug = -1;
243 static int __init e1000_init_module(
void)
248 pr_info(
"%s\n", e1000_copyright);
250 ret = pci_register_driver(&e1000_driver);
253 pr_info(
"copybreak disabled\n");
255 pr_info(
"copybreak enabled for "
270 static void __exit e1000_exit_module(
void)
287 e_err(probe,
"Unable to allocate interrupt Error: %d\n", err);
327 static void e1000_update_mng_vlan(
struct e1000_adapter *adapter)
334 if (!e1000_vlan_used(adapter))
340 e1000_vlan_rx_add_vid(netdev, vid);
348 e1000_vlan_rx_kill_vid(netdev, old_vid);
354 static void e1000_init_manageability(
struct e1000_adapter *adapter)
368 static void e1000_release_manageability(
struct e1000_adapter *adapter)
391 e1000_set_rx_mode(netdev);
393 e1000_restore_vlan(adapter);
394 e1000_init_manageability(adapter);
396 e1000_configure_tx(adapter);
397 e1000_setup_rctl(adapter);
398 e1000_configure_rx(adapter);
414 e1000_configure(adapter);
418 napi_enable(&adapter->
napi);
420 e1000_irq_enable(adapter);
422 netif_wake_queue(adapter->
netdev);
454 static void e1000_power_down_phy(
struct e1000_adapter *adapter)
493 static void e1000_down_and_stop(
struct e1000_adapter *adapter)
518 netif_tx_disable(netdev);
528 napi_disable(&adapter->
napi);
530 e1000_irq_disable(adapter);
537 e1000_down_and_stop(adapter);
544 e1000_clean_all_tx_rings(adapter);
545 e1000_clean_all_rx_rings(adapter);
574 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
575 bool legacy_pba_adjust =
false;
590 legacy_pba_adjust =
true;
602 legacy_pba_adjust =
true;
610 if (legacy_pba_adjust) {
633 tx_space = pba >> 16;
643 min_tx_space =
ALIGN(min_tx_space, 1024);
647 min_rx_space =
ALIGN(min_rx_space, 1024);
653 if (tx_space < min_tx_space &&
654 ((min_tx_space - tx_space) < pba)) {
655 pba = pba - (min_tx_space - tx_space);
668 if (pba < min_rx_space)
685 hwm =
min(((pba << 10) * 9 / 10),
701 e1000_update_mng_vlan(adapter);
721 e1000_release_manageability(adapter);
732 u16 csum_old, csum_new = 0;
746 csum_new += data[i] + (data[i + 1] << 8);
749 pr_err(
"/*********************/\n");
750 pr_err(
"Current EEPROM Checksum : 0x%04x\n", csum_old);
751 pr_err(
"Calculated : 0x%04x\n", csum_new);
753 pr_err(
"Offset Values\n");
754 pr_err(
"======== ======\n");
757 pr_err(
"Include this output when contacting your support provider.\n");
758 pr_err(
"This is not a software error! Something bad happened to\n");
759 pr_err(
"your hardware or EEPROM image. Ignoring this problem could\n");
760 pr_err(
"result in further problems, possibly loss of data,\n");
761 pr_err(
"corruption or system hangs!\n");
762 pr_err(
"The MAC Address will be reset to 00:00:00:00:00:00,\n");
763 pr_err(
"which is invalid and requires you to set the proper MAC\n");
764 pr_err(
"address manually before continuing to enable this network\n");
765 pr_err(
"device. Please inspect the EEPROM dump and report the\n");
766 pr_err(
"issue to your hardware vendor or Intel Customer Support.\n");
767 pr_err(
"/*********************/\n");
778 static int e1000_is_need_ioport(
struct pci_dev *pdev)
823 static int e1000_set_features(
struct net_device *netdev,
830 e1000_vlan_mode(netdev, features);
838 if (netif_running(netdev))
847 .ndo_open = e1000_open,
848 .ndo_stop = e1000_close,
849 .ndo_start_xmit = e1000_xmit_frame,
850 .ndo_get_stats = e1000_get_stats,
851 .ndo_set_rx_mode = e1000_set_rx_mode,
852 .ndo_set_mac_address = e1000_set_mac,
853 .ndo_tx_timeout = e1000_tx_timeout,
854 .ndo_change_mtu = e1000_change_mtu,
855 .ndo_do_ioctl = e1000_ioctl,
857 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
858 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
859 #ifdef CONFIG_NET_POLL_CONTROLLER
860 .ndo_poll_controller = e1000_netpoll,
862 .ndo_fix_features = e1000_fix_features,
863 .ndo_set_features = e1000_set_features,
877 static int e1000_init_hw_struct(
struct e1000_adapter *adapter,
897 e_err(probe,
"Unknown MAC Type\n");
948 static int cards_found = 0;
949 static int global_quad_port_a = 0;
950 int i,
err, pci_using_dac;
954 int bars, need_ioport;
957 need_ioport = e1000_is_need_ioport(pdev);
975 goto err_alloc_etherdev;
980 goto err_alloc_etherdev;
984 pci_set_drvdata(pdev, netdev);
985 adapter = netdev_priv(netdev);
987 adapter->
pdev = pdev;
989 adapter->
bars = bars;
1012 err = e1000_init_hw_struct(adapter, hw);
1033 pr_err(
"No usable DMA config, aborting\n");
1050 err = e1000_sw_init(adapter);
1061 goto err_mdio_ioremap;
1083 if (pci_using_dac) {
1098 e_err(probe,
"EEPROM initialization failed\n");
1109 e_err(probe,
"The EEPROM Checksum Is Not Valid\n");
1110 e1000_dump_eeprom(adapter);
1123 e_err(probe,
"EEPROM Read Error\n");
1129 if (!is_valid_ether_addr(netdev->
perm_addr))
1130 e_err(probe,
"Invalid MAC Address\n");
1135 e1000_82547_tx_fifo_stall_task);
1169 if (eeprom_data & eeprom_apme_mask)
1188 if (global_quad_port_a != 0)
1193 if (++global_quad_port_a == 4)
1194 global_quad_port_a = 0;
1204 for (i = 0; i < 32; i++) {
1207 if (tmp == 0 || tmp == 0xFF) {
1224 e1000_vlan_filter_on_off(adapter,
false);
1227 e_info(probe,
"(PCI%s:%dMHz:%d-bit) %pM\n",
1239 e_info(probe,
"Intel(R) PRO/1000 Network Connection\n");
1278 struct net_device *netdev = pci_get_drvdata(pdev);
1282 e1000_down_and_stop(adapter);
1283 e1000_release_manageability(adapter);
1319 if (e1000_alloc_queues(adapter)) {
1320 e_err(probe,
"Unable to allocate memory for queues\n");
1325 e1000_irq_disable(adapter);
1373 static int e1000_open(
struct net_device *netdev)
1400 e1000_update_mng_vlan(adapter);
1407 e1000_configure(adapter);
1409 err = e1000_request_irq(adapter);
1416 napi_enable(&adapter->
napi);
1418 e1000_irq_enable(adapter);
1420 netif_start_queue(netdev);
1428 e1000_power_down_phy(adapter);
1450 static int e1000_close(
struct net_device *netdev)
1457 e1000_power_down_phy(adapter);
1458 e1000_free_irq(adapter);
1468 e1000_vlan_rx_kill_vid(netdev, adapter->
mng_vlan_id);
1484 unsigned long begin = (
unsigned long)start;
1485 unsigned long end = begin + len;
1492 return ((begin ^ (end - 1)) >> 16) != 0 ?
false :
true;
1506 static int e1000_setup_tx_resources(
struct e1000_adapter *adapter,
1515 e_err(probe,
"Unable to allocate memory for the Tx descriptor "
1530 e_err(probe,
"Unable to allocate memory for the Tx descriptor "
1536 if (!e1000_check_64k_bound(adapter, txdr->
desc, txdr->
size)) {
1537 void *olddesc = txdr->
desc;
1539 e_err(tx_err,
"txdr align check failed: %u bytes at %p\n",
1548 goto setup_tx_desc_die;
1551 if (!e1000_check_64k_bound(adapter, txdr->
desc, txdr->
size)) {
1557 e_err(probe,
"Unable to allocate aligned memory "
1558 "for the transmit descriptor ring\n");
1588 err = e1000_setup_tx_resources(adapter, &adapter->
tx_ring[i]);
1590 e_err(probe,
"Allocation for Tx Queue %u failed\n", i);
1591 for (i-- ; i >= 0; i--)
1592 e1000_free_tx_resources(adapter,
1608 static void e1000_configure_tx(
struct e1000_adapter *adapter)
1612 u32 tdlen, tctl, tipg;
1620 tdba = adapter->
tx_ring[0].dma;
1621 tdlen = adapter->
tx_ring[0].count *
1624 ew32(TDBAH, (tdba >> 32));
1625 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1701 static int e1000_setup_rx_resources(
struct e1000_adapter *adapter,
1710 e_err(probe,
"Unable to allocate memory for the Rx descriptor "
1726 e_err(probe,
"Unable to allocate memory for the Rx descriptor "
1734 if (!e1000_check_64k_bound(adapter, rxdr->
desc, rxdr->
size)) {
1735 void *olddesc = rxdr->
desc;
1737 e_err(rx_err,
"rxdr align check failed: %u bytes at %p\n",
1746 e_err(probe,
"Unable to allocate memory for the Rx "
1747 "descriptor ring\n");
1748 goto setup_rx_desc_die;
1751 if (!e1000_check_64k_bound(adapter, rxdr->
desc, rxdr->
size)) {
1757 e_err(probe,
"Unable to allocate aligned memory for "
1758 "the Rx descriptor ring\n");
1759 goto setup_rx_desc_die;
1788 err = e1000_setup_rx_resources(adapter, &adapter->
rx_ring[i]);
1790 e_err(probe,
"Allocation for Rx Queue %u failed\n", i);
1791 for (i-- ; i >= 0; i--)
1792 e1000_free_rx_resources(adapter,
1874 static void e1000_configure_rx(
struct e1000_adapter *adapter)
1878 u32 rdlen, rctl, rxcsum;
1881 rdlen = adapter->
rx_ring[0].count *
1883 adapter->
clean_rx = e1000_clean_jumbo_rx_irq;
1886 rdlen = adapter->
rx_ring[0].count *
1888 adapter->
clean_rx = e1000_clean_rx_irq;
1902 ew32(
ITR, 1000000000 / (adapter->
itr * 256));
1910 rdba = adapter->
rx_ring[0].dma;
1912 ew32(RDBAH, (rdba >> 32));
1913 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1923 rxcsum =
er32(RXCSUM);
1929 ew32(RXCSUM, rxcsum);
1944 static void e1000_free_tx_resources(
struct e1000_adapter *adapter,
1949 e1000_clean_tx_ring(adapter, tx_ring);
1972 e1000_free_tx_resources(adapter, &adapter->
tx_ring[i]);
1975 static void e1000_unmap_and_free_tx_resource(
struct e1000_adapter *adapter,
1978 if (buffer_info->
dma) {
1986 buffer_info->
dma = 0;
1988 if (buffer_info->
skb) {
2002 static void e1000_clean_tx_ring(
struct e1000_adapter *adapter,
2012 for (i = 0; i < tx_ring->
count; i++) {
2014 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2017 netdev_reset_queue(adapter->
netdev);
2038 static void e1000_clean_all_tx_rings(
struct e1000_adapter *adapter)
2043 e1000_clean_tx_ring(adapter, &adapter->
tx_ring[i]);
2054 static void e1000_free_rx_resources(
struct e1000_adapter *adapter,
2059 e1000_clean_rx_ring(adapter, rx_ring);
2082 e1000_free_rx_resources(adapter, &adapter->
rx_ring[i]);
2091 static void e1000_clean_rx_ring(
struct e1000_adapter *adapter,
2101 for (i = 0; i < rx_ring->
count; i++) {
2103 if (buffer_info->
dma &&
2104 adapter->
clean_rx == e1000_clean_rx_irq) {
2108 }
else if (buffer_info->
dma &&
2109 adapter->
clean_rx == e1000_clean_jumbo_rx_irq) {
2115 buffer_info->
dma = 0;
2116 if (buffer_info->
page) {
2120 if (buffer_info->
skb) {
2121 dev_kfree_skb(buffer_info->
skb);
2150 static void e1000_clean_all_rx_rings(
struct e1000_adapter *adapter)
2155 e1000_clean_rx_ring(adapter, &adapter->
rx_ring[i]);
2161 static void e1000_enter_82542_rst(
struct e1000_adapter *adapter)
2175 if (netif_running(netdev))
2176 e1000_clean_all_rx_rings(adapter);
2179 static void e1000_leave_82542_rst(
struct e1000_adapter *adapter)
2194 if (netif_running(netdev)) {
2197 e1000_configure_rx(adapter);
2210 static int e1000_set_mac(
struct net_device *netdev,
void *
p)
2216 if (!is_valid_ether_addr(addr->
sa_data))
2222 e1000_enter_82542_rst(adapter);
2230 e1000_leave_82542_rst(adapter);
2245 static void e1000_set_rx_mode(
struct net_device *netdev)
2250 bool use_uc =
false;
2258 e_err(probe,
"memory allocation failed\n");
2275 if (e1000_vlan_used(adapter))
2291 e1000_enter_82542_rst(adapter);
2303 if (i == rar_entries)
2309 if (i == rar_entries) {
2311 u32 hash_reg, hash_bit, mta;
2313 hash_reg = (hash_value >> 5) & 0x7F;
2314 hash_bit = hash_value & 0x1F;
2315 mta = (1 << hash_bit);
2316 mcarray[hash_reg] |= mta;
2322 for (; i < rar_entries; i++) {
2331 for (i = mta_reg_count - 1; i >= 0 ; i--) {
2343 e1000_leave_82542_rst(adapter);
2371 static void e1000_82547_tx_fifo_stall_task(
struct work_struct *work)
2398 netif_wake_queue(netdev);
2409 bool link_active =
false;
2447 static void e1000_watchdog(
struct work_struct *work)
2462 if ((netif_carrier_ok(netdev)) && link)
2466 if (!netif_carrier_ok(netdev)) {
2475 pr_info(
"%s NIC Link is Up %d Mbps %s, "
2476 "Flow Control: %s\n",
2480 "Full Duplex" :
"Half Duplex",
2483 E1000_CTRL_RFCE) ?
"RX" : ((ctrl &
2484 E1000_CTRL_TFCE) ?
"TX" :
"None")));
2511 if (netif_carrier_ok(netdev)) {
2514 pr_info(
"%s NIC Link is Down\n",
2523 e1000_smartspeed(adapter);
2541 if (!netif_carrier_ok(netdev)) {
2565 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2567 ew32(
ITR, 1000000000 / (itr * 256));
2608 static unsigned int e1000_update_itr(
struct e1000_adapter *adapter,
2611 unsigned int retval = itr_setting;
2615 goto update_itr_done;
2618 goto update_itr_done;
2620 switch (itr_setting) {
2623 if (bytes/packets > 8000)
2625 else if ((packets < 5) && (bytes > 512))
2629 if (bytes > 10000) {
2631 if (bytes/packets > 8000)
2633 else if ((packets < 10) || ((bytes/packets) > 1200))
2635 else if ((packets > 35))
2637 }
else if (bytes/packets > 2000)
2639 else if (packets <= 2 && bytes < 512)
2643 if (bytes > 25000) {
2646 }
else if (bytes < 6000) {
2660 u32 new_itr = adapter->
itr;
2672 adapter->
tx_itr = e1000_update_itr(adapter,
2680 adapter->
rx_itr = e1000_update_itr(adapter,
2690 switch (current_itr) {
2706 if (new_itr != adapter->
itr) {
2710 new_itr = new_itr > adapter->
itr ?
2711 min(adapter->
itr + (new_itr >> 2), new_itr) :
2713 adapter->
itr = new_itr;
2714 ew32(
ITR, 1000000000 / (new_itr * 256));
2718 #define E1000_TX_FLAGS_CSUM 0x00000001
2719 #define E1000_TX_FLAGS_VLAN 0x00000002
2720 #define E1000_TX_FLAGS_TSO 0x00000004
2721 #define E1000_TX_FLAGS_IPV4 0x00000008
2722 #define E1000_TX_FLAGS_NO_FCS 0x00000010
2723 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2724 #define E1000_TX_FLAGS_VLAN_SHIFT 16
2733 u16 ipcse = 0, tucse,
mss;
2737 if (skb_is_gso(skb)) {
2738 if (skb_header_cloned(skb)) {
2744 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2745 mss = skb_shinfo(skb)->gso_size;
2747 struct iphdr *iph = ip_hdr(skb);
2755 ipcse = skb_transport_offset(skb) - 1;
2757 ipv6_hdr(skb)->payload_len = 0;
2758 tcp_hdr(skb)->check =
2760 &ipv6_hdr(skb)->
daddr,
2764 ipcss = skb_network_offset(skb);
2765 ipcso = (
void *)&(ip_hdr(skb)->check) - (
void *)skb->
data;
2766 tucss = skb_transport_offset(skb);
2767 tucso = (
void *)&(tcp_hdr(skb)->check) - (
void *)skb->
data;
2790 if (++i == tx_ring->
count) i = 0;
2822 e_warn(drv,
"checksum_partial proto=%x!\n",
2827 css = skb_checksum_start_offset(skb);
2850 #define E1000_MAX_TXD_PWR 12
2851 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2856 unsigned int max_per_txd,
unsigned int nr_frags,
2862 unsigned int len = skb_headlen(skb);
2870 size =
min(len, max_per_txd);
2883 if (
unlikely(mss && !nr_frags && size == len && size > 8))
2891 (size > 2015) && count == 0))
2897 !((
unsigned long)(skb->
data + offset + size - 1) & 4) &&
2922 for (f = 0; f < nr_frags; f++) {
2925 frag = &skb_shinfo(skb)->frags[
f];
2926 len = skb_frag_size(frag);
2930 unsigned long bufend;
2936 size =
min(len, max_per_txd);
2939 if (
unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
2944 bufend = (
unsigned long)
2946 bufend += offset + size - 1;
2955 buffer_info->
dma = skb_frag_dma_map(&pdev->
dev, frag,
2967 segs = skb_shinfo(skb)->gso_segs ?: 1;
2969 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->
len;
2980 buffer_info->
dma = 0;
2986 i += tx_ring->
count;
2989 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3042 if (
unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3066 #define E1000_FIFO_HDR 0x10
3067 #define E1000_82547_PAD_LEN 0x3E0
3069 static int e1000_82547_fifo_workaround(
struct e1000_adapter *adapter,
3078 goto no_fifo_stall_required;
3088 no_fifo_stall_required:
3095 static int __e1000_maybe_stop_tx(
struct net_device *netdev,
int size)
3100 netif_stop_queue(netdev);
3112 netif_start_queue(netdev);
3117 static int e1000_maybe_stop_tx(
struct net_device *netdev,
3122 return __e1000_maybe_stop_tx(netdev, size);
3125 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3134 unsigned int tx_flags = 0;
3135 unsigned int len = skb_headlen(skb);
3136 unsigned int nr_frags;
3161 skb_set_tail_pointer(skb,
ETH_ZLEN);
3164 mss = skb_shinfo(skb)->gso_size;
3173 max_per_txd =
min(mss << 2, max_per_txd);
3174 max_txd_pwr = fls(max_per_txd) - 1;
3176 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3177 if (skb->
data_len && hdr_len == len) {
3179 unsigned int pull_size;
3187 if ((
unsigned long)(skb_tail_pointer(skb) - 1) & 4)
3192 e_err(drv,
"__pskb_pull_tail "
3197 len = skb_headlen(skb);
3227 nr_frags = skb_shinfo(skb)->nr_frags;
3228 for (f = 0; f < nr_frags; f++)
3229 count +=
TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3236 if (
unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3240 (e1000_82547_fifo_workaround(adapter, skb)))) {
3241 netif_stop_queue(netdev);
3254 tso = e1000_tso(adapter, tx_ring, skb);
3264 }
else if (
likely(e1000_tx_csum(adapter, tx_ring, skb)))
3273 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3277 netdev_sent_queue(netdev, skb->
len);
3278 skb_tx_timestamp(skb);
3280 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3301 static const char *
const reg_name[] = {
3303 "RCTL",
"RDLEN",
"RDH",
"RDT",
"RDTR",
3304 "TCTL",
"TDBAL",
"TDBAH",
"TDLEN",
"TDH",
"TDT",
3305 "TIDV",
"TXDCTL",
"TADV",
"TARC0",
3306 "TDBAL1",
"TDBAH1",
"TDLEN1",
"TDH1",
"TDT1",
3308 "CTRL_EXT",
"ERT",
"RDBAL",
"RDBAH",
3309 "TDFH",
"TDFT",
"TDFHS",
"TDFTS",
"TDFPC",
3310 "RDFH",
"RDFT",
"RDFHS",
"RDFTS",
"RDFPC"
3316 regs_buff[2] =
er32(RCTL);
3317 regs_buff[3] =
er32(RDLEN);
3318 regs_buff[4] =
er32(RDH);
3319 regs_buff[5] =
er32(RDT);
3320 regs_buff[6] =
er32(RDTR);
3323 regs_buff[8] =
er32(TDBAL);
3324 regs_buff[9] =
er32(TDBAH);
3325 regs_buff[10] =
er32(TDLEN);
3326 regs_buff[11] =
er32(TDH);
3327 regs_buff[12] =
er32(TDT);
3328 regs_buff[13] =
er32(TIDV);
3329 regs_buff[14] =
er32(TXDCTL);
3330 regs_buff[15] =
er32(TADV);
3331 regs_buff[16] =
er32(TARC0);
3333 regs_buff[17] =
er32(TDBAL1);
3334 regs_buff[18] =
er32(TDBAH1);
3335 regs_buff[19] =
er32(TDLEN1);
3336 regs_buff[20] =
er32(TDH1);
3337 regs_buff[21] =
er32(TDT1);
3338 regs_buff[22] =
er32(TXDCTL1);
3339 regs_buff[23] =
er32(TARC1);
3340 regs_buff[24] =
er32(CTRL_EXT);
3341 regs_buff[25] =
er32(ERT);
3342 regs_buff[26] =
er32(RDBAL0);
3343 regs_buff[27] =
er32(RDBAH0);
3344 regs_buff[28] =
er32(TDFH);
3345 regs_buff[29] =
er32(TDFT);
3346 regs_buff[30] =
er32(TDFHS);
3347 regs_buff[31] =
er32(TDFTS);
3348 regs_buff[32] =
er32(TDFPC);
3349 regs_buff[33] =
er32(RDFH);
3350 regs_buff[34] =
er32(RDFT);
3351 regs_buff[35] =
er32(RDFHS);
3352 regs_buff[36] =
er32(RDFTS);
3353 regs_buff[37] =
er32(RDFPC);
3357 pr_info(
"%-15s %08x\n", reg_name[i], regs_buff[i]);
3374 e1000_regdump(adapter);
3379 pr_info(
"TX Desc ring0 dump\n");
3408 pr_info(
"Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n");
3409 pr_info(
"Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n");
3412 goto rx_ring_summary;
3414 for (i = 0; tx_ring->
desc && (i < tx_ring->
count); i++) {
3418 struct my_u *
u = (
struct my_u *)tx_desc;
3430 pr_info(
"T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n",
3442 pr_info(
"\nRX Desc ring dump\n");
3453 pr_info(
"R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n");
3458 for (i = 0; rx_ring->
desc && (i < rx_ring->
count); i++) {
3462 struct my_u *u = (
struct my_u *)rx_desc;
3472 pr_info(
"R[0x%03X] %016llX %016llX %016llX %p %s\n",
3474 (
u64)buffer_info->
dma, buffer_info->
skb, type);
3479 pr_info(
"Rx descriptor cache in 64bit format\n");
3480 for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3481 pr_info(
"R%04X: %08X|%08X %08X|%08X\n",
3483 readl(adapter->
hw.hw_addr + i+4),
3484 readl(adapter->
hw.hw_addr + i),
3485 readl(adapter->
hw.hw_addr + i+12),
3486 readl(adapter->
hw.hw_addr + i+8));
3489 pr_info(
"Tx descriptor cache in 64bit format\n");
3490 for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3491 pr_info(
"T%04X: %08X|%08X %08X|%08X\n",
3493 readl(adapter->
hw.hw_addr + i+4),
3494 readl(adapter->
hw.hw_addr + i),
3495 readl(adapter->
hw.hw_addr + i+12),
3496 readl(adapter->
hw.hw_addr + i+8));
3507 static void e1000_tx_timeout(
struct net_device *netdev)
3516 static void e1000_reset_task(
struct work_struct *work)
3523 e_err(drv,
"Reset adapter\n");
3524 e1000_reinit_safe(adapter);
3538 return &netdev->
stats;
3549 static int e1000_change_mtu(
struct net_device *netdev,
int new_mtu)
3557 e_err(probe,
"Invalid MTU setting\n");
3565 e_err(probe,
"Jumbo Frames not supported.\n");
3578 if (netif_running(netdev))
3591 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3593 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3603 pr_info(
"%s changing MTU from %d to %d\n",
3604 netdev->
name, netdev->
mtu, new_mtu);
3605 netdev->
mtu = new_mtu;
3607 if (netif_running(netdev))
3627 unsigned long flags;
3630 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3638 if (pci_channel_offline(pdev))
3648 adapter->
stats.crcerrs +=
er32(CRCERRS);
3660 adapter->
stats.prc1023 +=
er32(PRC1023);
3661 adapter->
stats.prc1522 +=
er32(PRC1522);
3663 adapter->
stats.symerrs +=
er32(SYMERRS);
3668 adapter->
stats.latecol +=
er32(LATECOL);
3674 adapter->
stats.xoffrxc +=
er32(XOFFRXC);
3675 adapter->
stats.xofftxc +=
er32(XOFFTXC);
3694 adapter->
stats.ptc1023 +=
er32(PTC1023);
3695 adapter->
stats.ptc1522 +=
er32(PTC1522);
3708 adapter->
stats.algnerrc +=
er32(ALGNERRC);
3711 adapter->
stats.cexterr +=
er32(CEXTERR);
3717 netdev->
stats.multicast = adapter->
stats.mprc;
3718 netdev->
stats.collisions = adapter->
stats.colc;
3724 netdev->
stats.rx_errors = adapter->
stats.rxerrc +
3725 adapter->
stats.crcerrs + adapter->
stats.algnerrc +
3727 adapter->
stats.cexterr;
3729 netdev->
stats.rx_length_errors = adapter->
stats.rlerrc;
3730 netdev->
stats.rx_crc_errors = adapter->
stats.crcerrs;
3731 netdev->
stats.rx_frame_errors = adapter->
stats.algnerrc;
3732 netdev->
stats.rx_missed_errors = adapter->
stats.mpc;
3736 netdev->
stats.tx_errors = adapter->
stats.txerrc;
3737 netdev->
stats.tx_aborted_errors = adapter->
stats.ecol;
3738 netdev->
stats.tx_window_errors = adapter->
stats.latecol;
3739 netdev->
stats.tx_carrier_errors = adapter->
stats.tncrs;
3742 netdev->
stats.tx_carrier_errors = 0;
3743 adapter->
stats.tncrs = 0;
3753 adapter->
phy_stats.idle_errors += phy_tmp;
3759 adapter->
phy_stats.receive_errors += phy_tmp;
3769 spin_unlock_irqrestore(&adapter->
stats_lock, flags);
3807 if (
likely(napi_schedule_prep(&adapter->
napi))) {
3817 e1000_irq_enable(adapter);
3830 int tx_clean_complete = 0, work_done = 0;
3832 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->
tx_ring[0]);
3836 if (!tx_clean_complete)
3840 if (work_done < budget) {
3842 e1000_set_itr(adapter);
3845 e1000_irq_enable(adapter);
3855 static bool e1000_clean_tx_irq(
struct e1000_adapter *adapter,
3862 unsigned int i, eop;
3863 unsigned int count = 0;
3864 unsigned int total_tx_bytes=0, total_tx_packets=0;
3865 unsigned int bytes_compl = 0, pkts_compl = 0;
3872 (count < tx_ring->count)) {
3873 bool cleaned =
false;
3875 for ( ; !cleaned; count++) {
3878 cleaned = (i == eop);
3881 total_tx_packets += buffer_info->
segs;
3882 total_tx_bytes += buffer_info->
bytecount;
3883 if (buffer_info->
skb) {
3884 bytes_compl += buffer_info->
skb->len;
3889 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3901 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3903 #define TX_WAKE_THRESHOLD 32
3904 if (
unlikely(count && netif_carrier_ok(netdev) &&
3911 if (netif_queue_stopped(netdev) &&
3913 netif_wake_queue(netdev);
3928 e_err(drv,
"Detected Tx Unit Hang\n"
3932 " next_to_use <%x>\n"
3933 " next_to_clean <%x>\n"
3934 "buffer_info[next_to_clean]\n"
3935 " time_stamp <%lx>\n"
3936 " next_to_watch <%x>\n"
3938 " next_to_watch.status <%x>\n",
3939 (
unsigned long)((tx_ring - adapter->
tx_ring) /
3949 e1000_dump(adapter);
3950 netif_stop_queue(netdev);
3955 netdev->
stats.tx_bytes += total_tx_bytes;
3956 netdev->
stats.tx_packets += total_tx_packets;
3957 return count < tx_ring->
count;
3968 static void e1000_rx_checksum(
struct e1000_adapter *adapter,
u32 status_err,
3975 skb_checksum_none_assert(skb);
3992 if (
likely(status & E1000_RXD_STAT_TCPCS)) {
4018 static void e1000_receive_skb(
struct e1000_adapter *adapter,
u8 status,
4026 __vlan_hwaccel_put_tag(skb, vid);
4041 static bool e1000_clean_jumbo_rx_irq(
struct e1000_adapter *adapter,
4043 int *work_done,
int work_to_do)
4050 unsigned long irq_flags;
4053 int cleaned_count = 0;
4054 bool cleaned =
false;
4055 unsigned int total_rx_bytes=0, total_rx_packets=0;
4065 if (*work_done >= work_to_do)
4070 status = rx_desc->
status;
4071 skb = buffer_info->
skb;
4074 if (++i == rx_ring->
count) i = 0;
4084 buffer_info->
dma = 0;
4095 last_byte = *(mapped + length - 1);
4119 #define rxtop rx_ring->rx_skb_top
4121 if (!(status & E1000_RXD_STAT_EOP)) {
4126 skb_fill_page_desc(rxtop, 0, buffer_info->
page,
4130 skb_fill_page_desc(rxtop,
4131 skb_shinfo(rxtop)->nr_frags,
4132 buffer_info->
page, 0, length);
4136 e1000_consume_page(buffer_info, rxtop, length);
4141 skb_fill_page_desc(rxtop,
4142 skb_shinfo(rxtop)->nr_frags,
4143 buffer_info->
page, 0, length);
4149 e1000_consume_page(buffer_info, skb, length);
4154 skb_tailroom(skb) >= length) {
4157 memcpy(skb_tail_pointer(skb), vaddr, length);
4163 skb_fill_page_desc(skb, 0,
4164 buffer_info->
page, 0,
4166 e1000_consume_page(buffer_info, skb,
4173 e1000_rx_checksum(adapter,
4178 total_rx_bytes += (skb->
len - 4);
4180 pskb_trim(skb, skb->
len - 4);
4184 if (!pskb_may_pull(skb,
ETH_HLEN)) {
4185 e_err(drv,
"pskb_may_pull failed.\n");
4190 e1000_receive_skb(adapter, status, rx_desc->
special, skb);
4197 adapter->
alloc_rx_buf(adapter, rx_ring, cleaned_count);
4203 buffer_info = next_buffer;
4209 adapter->
alloc_rx_buf(adapter, rx_ring, cleaned_count);
4213 netdev->
stats.rx_bytes += total_rx_bytes;
4214 netdev->
stats.rx_packets += total_rx_packets;
4222 static void e1000_check_copybreak(
struct net_device *netdev,
4231 new_skb = netdev_alloc_skb_ip_align(netdev, length);
4250 static bool e1000_clean_rx_irq(
struct e1000_adapter *adapter,
4252 int *work_done,
int work_to_do)
4259 unsigned long flags;
4262 int cleaned_count = 0;
4263 bool cleaned =
false;
4264 unsigned int total_rx_bytes=0, total_rx_packets=0;
4274 if (*work_done >= work_to_do)
4279 status = rx_desc->
status;
4280 skb = buffer_info->
skb;
4285 if (++i == rx_ring->
count) i = 0;
4295 buffer_info->
dma = 0;
4304 if (
unlikely(!(status & E1000_RXD_STAT_EOP)))
4309 e_dbg(
"Receive packet consumed multiple buffers\n");
4312 if (status & E1000_RXD_STAT_EOP)
4318 u8 last_byte = *(skb->
data + length - 1);
4337 total_rx_bytes += (length - 4);
4346 e1000_check_copybreak(netdev, buffer_info, length, &skb);
4351 e1000_rx_checksum(adapter,
4356 e1000_receive_skb(adapter, status, rx_desc->
special, skb);
4363 adapter->
alloc_rx_buf(adapter, rx_ring, cleaned_count);
4369 buffer_info = next_buffer;
4375 adapter->
alloc_rx_buf(adapter, rx_ring, cleaned_count);
4379 netdev->
stats.rx_bytes += total_rx_bytes;
4380 netdev->
stats.rx_packets += total_rx_packets;
4401 unsigned int bufsz = 256 - 16 ;
4406 while (cleaned_count--) {
4407 skb = buffer_info->
skb;
4413 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4424 if (!buffer_info->
page) {
4432 if (!buffer_info->
dma) {
4434 buffer_info->
page, 0,
4442 buffer_info->
dma = 0;
4459 i = (rx_ring->
count - 1);
4475 static void e1000_alloc_rx_buffers(
struct e1000_adapter *adapter,
4491 while (cleaned_count--) {
4492 skb = buffer_info->
skb;
4498 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4506 if (!e1000_check_64k_bound(adapter, skb->
data, bufsz)) {
4508 e_err(rx_err,
"skb align check failed: %u bytes at "
4509 "%p\n", bufsz, skb->
data);
4511 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4514 dev_kfree_skb(oldskb);
4519 if (!e1000_check_64k_bound(adapter, skb->
data, bufsz)) {
4522 dev_kfree_skb(oldskb);
4528 dev_kfree_skb(oldskb);
4540 buffer_info->
dma = 0;
4551 if (!e1000_check_64k_bound(adapter,
4552 (
void *)(
unsigned long)buffer_info->
dma,
4554 e_err(rx_err,
"dma align check failed: %u bytes at "
4556 (
void *)(
unsigned long)buffer_info->
dma);
4563 buffer_info->
dma = 0;
4579 i = (rx_ring->
count - 1);
4611 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
return;
4614 phy_ctrl &= ~CR_1000T_MS_ENABLE;
4658 return e1000_mii_ioctl(netdev, ifr, cmd);
4671 static int e1000_mii_ioctl(
struct net_device *netdev,
struct ifreq *ifr,
4679 unsigned long flags;
4692 spin_unlock_irqrestore(&adapter->
stats_lock, flags);
4695 spin_unlock_irqrestore(&adapter->
stats_lock, flags);
4704 spin_unlock_irqrestore(&adapter->
stats_lock, flags);
4707 spin_unlock_irqrestore(&adapter->
stats_lock, flags);
4720 else if (mii_reg & 0x2000)
4732 if (netif_running(adapter->
netdev))
4746 if (mii_reg & MII_CR_POWER_DOWN)
4748 if (netif_running(adapter->
netdev))
4768 e_err(probe,
"Error in setting MWI\n");
4807 struct e1000_hw *hw = &adapter->hw;
4811 if (features & NETIF_F_HW_VLAN_RX) {
4820 static void e1000_vlan_filter_on_off(
struct e1000_adapter *adapter,
4827 e1000_irq_disable(adapter);
4829 __e1000_vlan_mode(adapter, adapter->
netdev->features);
4837 e1000_update_mng_vlan(adapter);
4846 e1000_irq_enable(adapter);
4849 static void e1000_vlan_mode(
struct net_device *netdev,
4855 e1000_irq_disable(adapter);
4857 __e1000_vlan_mode(adapter, features);
4860 e1000_irq_enable(adapter);
4863 static int e1000_vlan_rx_add_vid(
struct net_device *netdev,
u16 vid)
4874 if (!e1000_vlan_used(adapter))
4875 e1000_vlan_filter_on_off(adapter,
true);
4878 index = (vid >> 5) & 0x7F;
4880 vfta |= (1 << (vid & 0x1F));
4888 static int e1000_vlan_rx_kill_vid(
struct net_device *netdev,
u16 vid)
4895 e1000_irq_disable(adapter);
4897 e1000_irq_enable(adapter);
4900 index = (vid >> 5) & 0x7F;
4902 vfta &= ~(1 << (vid & 0x1F));
4907 if (!e1000_vlan_used(adapter))
4908 e1000_vlan_filter_on_off(adapter,
false);
4913 static void e1000_restore_vlan(
struct e1000_adapter *adapter)
4917 if (!e1000_vlan_used(adapter))
4920 e1000_vlan_filter_on_off(adapter,
true);
4922 e1000_vlan_rx_add_vid(adapter->netdev, vid);
4927 struct e1000_hw *hw = &adapter->hw;
4933 if ((spd & 1) || (dplx & ~1))
4942 switch (spd + dplx) {
4970 e_err(probe,
"Unsupported Speed/Duplex configuration\n");
4974 static int __e1000_shutdown(
struct pci_dev *pdev,
bool *enable_wake)
4976 struct net_device *netdev = pci_get_drvdata(pdev);
4987 if (netif_running(netdev)) {
5003 e1000_setup_rctl(adapter);
5004 e1000_set_rx_mode(netdev);
5018 #define E1000_CTRL_ADVD3WUC 0x00100000
5020 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5029 ctrl_ext =
er32(CTRL_EXT);
5031 ew32(CTRL_EXT, ctrl_ext);
5041 e1000_release_manageability(adapter);
5043 *enable_wake = !!wufc;
5047 *enable_wake =
true;
5049 if (netif_running(netdev))
5050 e1000_free_irq(adapter);
5063 retval = __e1000_shutdown(pdev, &wake);
5077 static int e1000_resume(
struct pci_dev *pdev)
5079 struct net_device *netdev = pci_get_drvdata(pdev);
5093 pr_err(
"Cannot enable PCI device from suspend\n");
5101 if (netif_running(netdev)) {
5102 err = e1000_request_irq(adapter);
5111 e1000_init_manageability(adapter);
5113 if (netif_running(netdev))
5122 static void e1000_shutdown(
struct pci_dev *pdev)
5126 __e1000_shutdown(pdev, &wake);
5134 #ifdef CONFIG_NET_POLL_CONTROLLER
5140 static void e1000_netpoll(
struct net_device *netdev)
5145 e1000_intr(adapter->
pdev->irq, netdev);
5161 struct net_device *netdev = pci_get_drvdata(pdev);
5169 if (netif_running(netdev))
5186 struct net_device *netdev = pci_get_drvdata(pdev);
5196 pr_err(
"Cannot re-enable PCI device after reset.\n");
5218 static void e1000_io_resume(
struct pci_dev *pdev)
5220 struct net_device *netdev = pci_get_drvdata(pdev);
5223 e1000_init_manageability(adapter);
5225 if (netif_running(netdev)) {
5227 pr_info(
"can't bring device back up after reset\n");