29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/module.h>
32 #include <linux/types.h>
34 #include <linux/pci.h>
38 #include <linux/netdevice.h>
40 #include <linux/tcp.h>
41 #include <linux/ipv6.h>
42 #include <linux/slab.h>
45 #include <linux/mii.h>
46 #include <linux/ethtool.h>
47 #include <linux/if_vlan.h>
53 #include <linux/prefetch.h>
57 #define DRV_EXTRAVERSION "-k"
59 #define DRV_VERSION "2.1.4" DRV_EXTRAVERSION
63 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
64 static int debug = -1;
70 static const struct e1000_info *e1000_info_tbl[] = {
90 #define E1000_RDFH 0x02410
91 #define E1000_RDFT 0x02418
92 #define E1000_RDFHS 0x02420
93 #define E1000_RDFTS 0x02428
94 #define E1000_RDFPC 0x02430
96 #define E1000_TDFH 0x03410
97 #define E1000_TDFT 0x03418
98 #define E1000_TDFHS 0x03420
99 #define E1000_TDFTS 0x03428
100 #define E1000_TDFPC 0x03430
158 switch (reginfo->
ofs) {
160 for (n = 0; n < 2; n++)
164 for (n = 0; n < 2; n++)
168 for (n = 0; n < 2; n++)
173 reginfo->
name, __er32(hw, reginfo->
ofs));
178 pr_info(
"%-15s %08x %08x\n", rname, regs[0], regs[1]);
191 pr_info(
"packet dump for ps_page %d:\n", i);
232 pr_info(
"Device Name state trans_start last_rx\n");
233 pr_info(
"%-15s %016lX %016lX %016lX\n",
240 pr_info(
" Register Name Value\n");
242 reginfo->
name; reginfo++) {
243 e1000_regdump(hw, reginfo);
247 if (!netdev || !netif_running(netdev))
251 pr_info(
"Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
253 pr_info(
" %5d %5X %5X %016llX %04X %3X %016llX\n",
255 (
unsigned long long)buffer_info->
dma,
262 goto rx_ring_summary;
293 pr_info(
"Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n");
294 pr_info(
"Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n");
295 pr_info(
"Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n");
296 for (i = 0; tx_ring->
desc && (i < tx_ring->
count); i++) {
300 u0 = (
struct my_u0 *)tx_desc;
302 next_desc =
" NTC/U";
309 pr_info(
"T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n",
315 (
unsigned long long)buffer_info->
dma,
318 buffer_info->
skb, next_desc);
322 16, 1, buffer_info->
skb->data,
323 buffer_info->
skb->len,
true);
329 pr_info(
"Queue [NTU] [NTC]\n");
354 pr_info(
"R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n");
366 pr_info(
"RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
367 for (i = 0; i < rx_ring->
count; i++) {
371 u1 = (
struct my_u1 *)rx_desc_ps;
384 pr_info(
"%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n",
390 buffer_info->
skb, next_desc);
392 pr_info(
"%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n",
398 (
unsigned long long)buffer_info->
dma,
399 buffer_info->
skb, next_desc);
402 e1000e_dump_ps_pages(adapter,
417 pr_info(
"R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n");
431 pr_info(
"RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n");
433 for (i = 0; i < rx_ring->
count; i++) {
438 u1 = (
struct my_u1 *)rx_desc;
448 if (staterr & E1000_RXD_STAT_DD) {
450 pr_info(
"%s[0x%03X] %016llX %016llX ---------------- %p%s\n",
454 buffer_info->
skb, next_desc);
456 pr_info(
"%s[0x%03X] %016llX %016llX %016llX %p%s\n",
460 (
unsigned long long)buffer_info->
dma,
461 buffer_info->
skb, next_desc);
468 buffer_info->
skb->data,
502 __vlan_hwaccel_put_tag(skb, tag);
514 static void e1000_rx_checksum(
struct e1000_adapter *adapter,
u32 status_err,
517 u16 status = (
u16)status_err;
520 skb_checksum_none_assert(skb);
546 static void e1000e_update_rdt_wa(
struct e1000_ring *rx_ring,
unsigned int i)
550 s32 ret_val = __ew32_prepare(hw);
557 e_err(
"ME firmware caused invalid RDT - resetting\n");
562 static void e1000e_update_tdt_wa(
struct e1000_ring *tx_ring,
unsigned int i)
566 s32 ret_val = __ew32_prepare(hw);
573 e_err(
"ME firmware caused invalid TDT - resetting\n");
582 static void e1000_alloc_rx_buffers(
struct e1000_ring *rx_ring,
583 int cleaned_count,
gfp_t gfp)
597 while (cleaned_count--) {
598 skb = buffer_info->
skb;
604 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
634 e1000e_update_rdt_wa(rx_ring, i);
639 if (i == rx_ring->
count)
651 static void e1000_alloc_rx_buffers_ps(
struct e1000_ring *rx_ring,
652 int cleaned_count,
gfp_t gfp)
666 while (cleaned_count--) {
673 rx_desc->
read.buffer_addr[j + 1] =
677 if (!ps_page->
page) {
679 if (!ps_page->
page) {
690 "Rx DMA page map failed\n");
700 rx_desc->
read.buffer_addr[j + 1] =
704 skb = __netdev_alloc_skb_ip_align(netdev,
737 e1000e_update_rdt_wa(rx_ring, i << 1);
743 if (i == rx_ring->
count)
758 static void e1000_alloc_jumbo_rx_buffers(
struct e1000_ring *rx_ring,
759 int cleaned_count,
gfp_t gfp)
768 unsigned int bufsz = 256 - 16 ;
773 while (cleaned_count--) {
774 skb = buffer_info->
skb;
780 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
790 if (!buffer_info->
page) {
798 if (!buffer_info->
dma)
800 buffer_info->
page, 0,
815 i = (rx_ring->
count - 1);
823 e1000e_update_rdt_wa(rx_ring, i);
843 static bool e1000_clean_rx_irq(
struct e1000_ring *rx_ring,
int *work_done,
854 int cleaned_count = 0;
855 bool cleaned =
false;
856 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
863 while (staterr & E1000_RXD_STAT_DD) {
866 if (*work_done >= work_to_do)
871 skb = buffer_info->
skb;
877 if (i == rx_ring->
count)
890 buffer_info->
dma = 0;
906 e_dbg(
"Receive packet consumed multiple buffers\n");
909 if (staterr & E1000_RXD_STAT_EOP)
943 netdev_alloc_skb_ip_align(netdev, length);
945 skb_copy_to_linear_data_offset(new_skb,
961 e1000_rx_checksum(adapter, staterr, skb);
963 e1000_rx_hash(netdev, rx_desc->
wb.lower.hi_dword.rss, skb);
965 e1000_receive_skb(adapter, netdev, skb, staterr,
966 rx_desc->
wb.upper.vlan);
980 buffer_info = next_buffer;
986 cleaned_count = e1000_desc_unused(rx_ring);
995 static void e1000_put_txbuf(
struct e1000_ring *tx_ring,
1000 if (buffer_info->
dma) {
1007 buffer_info->
dma = 0;
1009 if (buffer_info->
skb) {
1024 unsigned int eop = tx_ring->
buffer_info[
i].next_to_watch;
1027 u16 phy_status, phy_1000t_status, phy_ext_status;
1054 netif_stop_queue(netdev);
1063 e_err(
"Detected Hardware Unit Hang:\n"
1066 " next_to_use <%x>\n"
1067 " next_to_clean <%x>\n"
1068 "buffer_info[next_to_clean]:\n"
1069 " time_stamp <%lx>\n"
1070 " next_to_watch <%x>\n"
1072 " next_to_watch.status <%x>\n"
1075 "PHY 1000BASE-T Status <%x>\n"
1076 "PHY Extended Status <%x>\n"
1077 "PCI Status <%x>\n",
1094 e_err(
"Try turning off Tx pause (flow control) via ethtool\n");
1104 static bool e1000_clean_tx_irq(
struct e1000_ring *tx_ring)
1111 unsigned int i, eop;
1112 unsigned int count = 0;
1113 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
1114 unsigned int bytes_compl = 0, pkts_compl = 0;
1121 (count < tx_ring->count)) {
1122 bool cleaned =
false;
1124 for (; !cleaned; count++) {
1127 cleaned = (i == eop);
1130 total_tx_packets += buffer_info->
segs;
1131 total_tx_bytes += buffer_info->
bytecount;
1132 if (buffer_info->
skb) {
1133 bytes_compl += buffer_info->
skb->len;
1138 e1000_put_txbuf(tx_ring, buffer_info);
1142 if (i == tx_ring->
count)
1154 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
1156 #define TX_WAKE_THRESHOLD 32
1157 if (count && netif_carrier_ok(netdev) &&
1158 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
1164 if (netif_queue_stopped(netdev) &&
1166 netif_wake_queue(netdev);
1187 return count < tx_ring->
count;
1197 static bool e1000_clean_rx_irq_ps(
struct e1000_ring *rx_ring,
int *work_done,
1210 int cleaned_count = 0;
1211 bool cleaned =
false;
1212 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1219 while (staterr & E1000_RXD_STAT_DD) {
1220 if (*work_done >= work_to_do)
1223 skb = buffer_info->
skb;
1230 if (i == rx_ring->
count)
1241 buffer_info->
dma = 0;
1244 if (!(staterr & E1000_RXD_STAT_EOP))
1248 e_dbg(
"Packet Split buffers didn't pick up the full packet\n");
1250 if (staterr & E1000_RXD_STAT_EOP)
1255 if (
unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1264 e_dbg(
"Last part of the packet spanning multiple descriptors\n");
1289 ps_page = &buffer_info->
ps_pages[0];
1301 memcpy(skb_tail_pointer(skb), vaddr, l1);
1328 skb_fill_page_desc(skb, j, ps_page->
page, 0, length);
1340 pskb_trim(skb, skb->
len - 4);
1344 total_rx_bytes += skb->
len;
1347 e1000_rx_checksum(adapter, staterr, skb);
1349 e1000_rx_hash(netdev, rx_desc->
wb.lower.hi_dword.rss, skb);
1351 if (rx_desc->
wb.upper.header_status &
1355 e1000_receive_skb(adapter, netdev, skb,
1356 staterr, rx_desc->
wb.middle.vlan);
1371 buffer_info = next_buffer;
1377 cleaned_count = e1000_desc_unused(rx_ring);
1405 static bool e1000_clean_jumbo_rx_irq(
struct e1000_ring *rx_ring,
int *work_done,
1415 int cleaned_count = 0;
1416 bool cleaned =
false;
1417 unsigned int total_rx_bytes=0, total_rx_packets=0;
1424 while (staterr & E1000_RXD_STAT_DD) {
1427 if (*work_done >= work_to_do)
1432 skb = buffer_info->
skb;
1436 if (i == rx_ring->
count)
1447 buffer_info->
dma = 0;
1452 if (
unlikely((staterr & E1000_RXD_STAT_EOP) &&
1453 ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1464 #define rxtop (rx_ring->rx_skb_top)
1465 if (!(staterr & E1000_RXD_STAT_EOP)) {
1470 skb_fill_page_desc(rxtop, 0, buffer_info->
page,
1474 skb_fill_page_desc(rxtop,
1475 skb_shinfo(rxtop)->nr_frags,
1476 buffer_info->
page, 0, length);
1480 e1000_consume_page(buffer_info, rxtop, length);
1485 skb_fill_page_desc(rxtop,
1486 skb_shinfo(rxtop)->nr_frags,
1487 buffer_info->
page, 0, length);
1493 e1000_consume_page(buffer_info, skb, length);
1498 skb_tailroom(skb) >= length) {
1501 memcpy(skb_tail_pointer(skb), vaddr,
1508 skb_fill_page_desc(skb, 0,
1509 buffer_info->
page, 0,
1511 e1000_consume_page(buffer_info, skb,
1518 e1000_rx_checksum(adapter, staterr, skb);
1520 e1000_rx_hash(netdev, rx_desc->
wb.lower.hi_dword.rss, skb);
1523 total_rx_bytes += skb->
len;
1527 if (!pskb_may_pull(skb,
ETH_HLEN)) {
1528 e_err(
"pskb_may_pull failed.\n");
1533 e1000_receive_skb(adapter, netdev, skb, staterr,
1534 rx_desc->
wb.upper.vlan);
1548 buffer_info = next_buffer;
1554 cleaned_count = e1000_desc_unused(rx_ring);
1567 static void e1000_clean_rx_ring(
struct e1000_ring *rx_ring)
1576 for (i = 0; i < rx_ring->
count; i++) {
1578 if (buffer_info->
dma) {
1579 if (adapter->
clean_rx == e1000_clean_rx_irq)
1583 else if (adapter->
clean_rx == e1000_clean_jumbo_rx_irq)
1587 else if (adapter->
clean_rx == e1000_clean_rx_irq_ps)
1591 buffer_info->
dma = 0;
1594 if (buffer_info->
page) {
1599 if (buffer_info->
skb) {
1600 dev_kfree_skb(buffer_info->
skb);
1631 e1000e_update_rdt_wa(rx_ring, 0);
1636 static void e1000e_downshift_workaround(
struct work_struct *work)
1664 hw->
mac.get_link_status =
true;
1678 if (netif_carrier_ok(netdev) &&
1690 if (napi_schedule_prep(&adapter->
napi)) {
1706 static irqreturn_t e1000_intr(
int irq,
void *data)
1729 if (icr & E1000_ICR_LSC) {
1730 hw->
mac.get_link_status =
true;
1745 if (netif_carrier_ok(netdev) &&
1757 if (napi_schedule_prep(&adapter->
napi)) {
1768 static irqreturn_t e1000_msix_other(
int irq,
void *data)
1775 if (!(icr & E1000_ICR_INT_ASSERTED)) {
1785 if (!(icr & E1000_ICR_LSC))
1786 goto no_link_interrupt;
1787 hw->
mac.get_link_status =
true;
1801 static irqreturn_t e1000_intr_msix_tx(
int irq,
void *data)
1812 if (!e1000_clean_tx_irq(tx_ring))
1819 static irqreturn_t e1000_intr_msix_rx(
int irq,
void *data)
1834 if (napi_schedule_prep(&adapter->
napi)) {
1848 static void e1000_configure_msix(
struct e1000_adapter *adapter)
1854 u32 ctrl_ext, ivar = 0;
1865 #define E1000_IVAR_INT_ALLOC_VALID 0x8
1902 ctrl_ext =
er32(CTRL_EXT);
1906 #define E1000_EIAC_MASK_82574 0x01F00000
1909 ew32(CTRL_EXT, ctrl_ext);
1941 sizeof(
struct msix_entry),
1954 e_err(
"Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n");
1960 if (!pci_enable_msi(adapter->
pdev)) {
1964 e_err(
"Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n");
1982 static int e1000_request_msix(
struct e1000_adapter *adapter)
1985 int err = 0, vector = 0;
1989 sizeof(adapter->
rx_ring->name) - 1,
1990 "%s-rx-0", netdev->
name);
1994 e1000_intr_msix_rx, 0, adapter->
rx_ring->name,
1998 adapter->
rx_ring->itr_register = adapter->
hw.hw_addr +
2005 sizeof(adapter->
tx_ring->name) - 1,
2006 "%s-tx-0", netdev->
name);
2010 e1000_intr_msix_tx, 0, adapter->
tx_ring->name,
2014 adapter->
tx_ring->itr_register = adapter->
hw.hw_addr +
2020 e1000_msix_other, 0, netdev->
name, netdev);
2024 e1000_configure_msix(adapter);
2041 err = e1000_request_msix(adapter);
2051 netdev->
name, netdev);
2061 netdev->
name, netdev);
2063 e_err(
"Unable to allocate interrupt, Error: %d\n", err);
2092 static void e1000_irq_disable(
struct e1000_adapter *adapter)
2098 ew32(EIAC_82574, 0);
2118 ew32(EIAC_82574, adapter->
eiac_mask & E1000_EIAC_MASK_82574);
2146 ctrl_ext =
er32(CTRL_EXT);
2172 ctrl_ext =
er32(CTRL_EXT);
2180 static int e1000_alloc_ring_dma(
struct e1000_adapter *adapter,
2213 err = e1000_alloc_ring_dma(adapter, tx_ring);
2223 e_err(
"Unable to allocate memory for the transmit descriptor ring\n");
2244 for (i = 0; i < rx_ring->
count; i++) {
2246 buffer_info->
ps_pages = kcalloc(PS_PAGE_BUFFERS,
2256 rx_ring->
size = rx_ring->
count * desc_len;
2259 err = e1000_alloc_ring_dma(adapter, rx_ring);
2270 for (i = 0; i < rx_ring->
count; i++) {
2276 e_err(
"Unable to allocate memory for the receive descriptor ring\n");
2284 static void e1000_clean_tx_ring(
struct e1000_ring *tx_ring)
2291 for (i = 0; i < tx_ring->
count; i++) {
2293 e1000_put_txbuf(tx_ring, buffer_info);
2296 netdev_reset_queue(adapter->
netdev);
2307 e1000e_update_tdt_wa(tx_ring, 0);
2323 e1000_clean_tx_ring(tx_ring);
2345 e1000_clean_rx_ring(rx_ring);
2347 for (i = 0; i < rx_ring->
count; i++)
2374 static unsigned int e1000_update_itr(
struct e1000_adapter *adapter,
2378 unsigned int retval = itr_setting;
2383 switch (itr_setting) {
2386 if (bytes/packets > 8000)
2388 else if ((packets < 5) && (bytes > 512))
2392 if (bytes > 10000) {
2394 if (bytes/packets > 8000)
2396 else if ((packets < 10) || ((bytes/packets) > 1200))
2398 else if ((packets > 35))
2400 }
else if (bytes/packets > 2000) {
2402 }
else if (packets <= 2 && bytes < 512) {
2407 if (bytes > 25000) {
2410 }
else if (bytes < 6000) {
2423 u32 new_itr = adapter->
itr;
2437 adapter->
tx_itr = e1000_update_itr(adapter,
2445 adapter->
rx_itr = e1000_update_itr(adapter,
2455 switch (current_itr) {
2471 if (new_itr != adapter->
itr) {
2477 new_itr = new_itr > adapter->
itr ?
2478 min(adapter->
itr + (new_itr >> 2), new_itr) :
2480 adapter->
itr = new_itr;
2481 adapter->
rx_ring->itr_val = new_itr;
2483 adapter->
rx_ring->set_itr = 1;
2486 ew32(
ITR, 1000000000 / (new_itr * 256));
2504 u32 new_itr = itr ? 1000000000 / (itr * 256) : 0;
2509 for (vector = 0; vector < adapter->
num_vectors; vector++)
2538 e_err(
"Unable to allocate memory for queues\n");
2555 int tx_cleaned = 1, work_done = 0;
2557 adapter = netdev_priv(poll_dev);
2561 tx_cleaned = e1000_clean_tx_irq(adapter->
tx_ring);
2569 if (work_done < weight) {
2571 e1000_set_itr(adapter);
2577 e1000_irq_enable(adapter);
2591 if ((adapter->
hw.mng_cookie.status &
2598 index = (vid >> 5) & 0x7F;
2600 vfta |= (1 << (vid & 0x1F));
2601 hw->
mac.ops.write_vfta(hw, index, vfta);
2609 static int e1000_vlan_rx_kill_vid(
struct net_device *netdev,
u16 vid)
2615 if ((adapter->
hw.mng_cookie.status &
2625 index = (vid >> 5) & 0x7F;
2627 vfta &= ~(1 << (vid & 0x1F));
2628 hw->
mac.ops.write_vfta(hw, index, vfta);
2640 static void e1000e_vlan_filter_disable(
struct e1000_adapter *adapter)
2653 e1000_vlan_rx_kill_vid(netdev, adapter->
mng_vlan_id);
2663 static void e1000e_vlan_filter_enable(
struct e1000_adapter *adapter)
2681 static void e1000e_vlan_strip_disable(
struct e1000_adapter *adapter)
2696 static void e1000e_vlan_strip_enable(
struct e1000_adapter *adapter)
2707 static void e1000_update_mng_vlan(
struct e1000_adapter *adapter)
2710 u16 vid = adapter->
hw.mng_cookie.vlan_id;
2713 if (adapter->
hw.mng_cookie.status &
2715 e1000_vlan_rx_add_vid(netdev, vid);
2720 e1000_vlan_rx_kill_vid(netdev, old_vid);
2723 static void e1000_restore_vlan(
struct e1000_adapter *adapter)
2727 e1000_vlan_rx_add_vid(adapter->
netdev, 0);
2730 e1000_vlan_rx_add_vid(adapter->netdev, vid);
2735 struct e1000_hw *hw = &adapter->hw;
2736 u32 manc, manc2h, mdef,
i,
j;
2749 manc2h =
er32(MANC2H);
2751 switch (hw->
mac.type) {
2761 for (i = 0, j = 0; i < 8; i++) {
2762 mdef =
er32(MDEF(i));
2779 for (i = 0, j = 0; i < 8; i++)
2780 if (
er32(MDEF(i)) == 0) {
2789 e_warn(
"Unable to create IPMI pass-through filter\n");
2793 ew32(MANC2H, manc2h);
2803 static void e1000_configure_tx(
struct e1000_adapter *adapter)
2811 tdba = tx_ring->
dma;
2814 ew32(TDBAH(0), (tdba >> 32));
2815 ew32(TDLEN(0), tdlen);
2841 ew32(TXDCTL(0), txdctl);
2847 tarc =
er32(TARC(0));
2852 #define SPEED_MODE_BIT (1 << 21)
2854 ew32(TARC(0), tarc);
2859 tarc =
er32(TARC(0));
2861 ew32(TARC(0), tarc);
2862 tarc =
er32(TARC(1));
2864 ew32(TARC(1), tarc);
2877 hw->
mac.ops.config_collision_dist(hw);
2884 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
2885 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
2902 e_dbg(
"failed to enable jumbo frame workaround mode\n");
2932 e1e_rphy(hw,
PHY_REG(770, 26), &phy_data);
2934 phy_data |= (1 << 2);
2935 e1e_wphy(hw,
PHY_REG(770, 26), phy_data);
2937 e1e_rphy(hw, 22, &phy_data);
2939 phy_data |= (1 << 14);
2940 e1e_wphy(hw, 0x10, 0x2823);
2941 e1e_wphy(hw, 0x11, 0x0003);
2942 e1e_wphy(hw, 22, phy_data);
2966 rfctl =
er32(RFCTL);
3013 ew32(PSRCTL, psrctl);
3043 static void e1000_configure_rx(
struct e1000_adapter *adapter)
3048 u32 rdlen, rctl, rxcsum, ctrl_ext;
3052 rdlen = rx_ring->
count *
3054 adapter->
clean_rx = e1000_clean_rx_irq_ps;
3058 adapter->
clean_rx = e1000_clean_jumbo_rx_irq;
3062 adapter->
clean_rx = e1000_clean_rx_irq;
3104 ctrl_ext =
er32(CTRL_EXT);
3107 ew32(IAM, 0xffffffff);
3108 ew32(CTRL_EXT, ctrl_ext);
3115 rdba = rx_ring->
dma;
3117 ew32(RDBAH(0), (rdba >> 32));
3118 ew32(RDLEN(0), rdlen);
3125 rxcsum =
er32(RXCSUM);
3130 ew32(RXCSUM, rxcsum);
3139 ew32(RXDCTL(0), rxdctl | 0x3);
3160 static int e1000e_write_mc_addr_list(
struct net_device *netdev)
3170 hw->
mac.ops.update_mc_addr_list(hw,
NULL, 0);
3183 hw->
mac.ops.update_mc_addr_list(hw, mta_list, i);
3202 unsigned int rar_entries = hw->
mac.rar_entry_count;
3226 hw->
mac.ops.rar_set(hw, ha->
addr, rar_entries--);
3232 for (; rar_entries > 0; rar_entries--) {
3233 ew32(RAH(rar_entries), 0);
3250 static void e1000e_set_rx_mode(
struct net_device *netdev)
3265 e1000e_vlan_filter_disable(adapter);
3277 count = e1000e_write_mc_addr_list(netdev);
3281 e1000e_vlan_filter_enable(adapter);
3287 count = e1000e_write_uc_addr_list(netdev);
3295 e1000e_vlan_strip_enable(adapter);
3297 e1000e_vlan_strip_disable(adapter);
3300 static void e1000e_setup_rss_hash(
struct e1000_adapter *adapter)
3305 static const u32 rsskey[10] = {
3306 0xda565a6d, 0xc20e5b25, 0x3d256741, 0xb08fa343, 0xcb2bcad0,
3307 0xb4307bae, 0xa32dcb77, 0x0cf23080, 0x3bb7426a, 0xfa01acbe
3311 for (i = 0; i < 10; i++)
3312 ew32(RSSRK(i), rsskey[i]);
3315 for (i = 0; i < 32; i++)
3322 rxcsum =
er32(RXCSUM);
3325 ew32(RXCSUM, rxcsum);
3344 e1000e_set_rx_mode(adapter->
netdev);
3346 e1000_restore_vlan(adapter);
3347 e1000_init_manageability_pt(adapter);
3349 e1000_configure_tx(adapter);
3352 e1000e_setup_rss_hash(adapter);
3353 e1000_setup_rctl(adapter);
3354 e1000_configure_rx(adapter);
3368 if (adapter->
hw.phy.ops.power_up)
3369 adapter->
hw.phy.ops.power_up(&adapter->
hw);
3371 adapter->
hw.mac.ops.setup_link(&adapter->
hw);
3380 static void e1000_power_down_phy(
struct e1000_adapter *adapter)
3386 if (adapter->
hw.phy.ops.power_down)
3387 adapter->
hw.phy.ops.power_down(&adapter->
hw);
3403 u32 tx_space, min_tx_space, min_rx_space;
3421 tx_space = pba >> 16;
3431 min_tx_space =
ALIGN(min_tx_space, 1024);
3432 min_tx_space >>= 10;
3435 min_rx_space =
ALIGN(min_rx_space, 1024);
3436 min_rx_space >>= 10;
3443 if ((tx_space < min_tx_space) &&
3444 ((min_tx_space - tx_space) < pba)) {
3445 pba -= min_tx_space - tx_space;
3451 if (pba < min_rx_space)
3474 switch (hw->
mac.type) {
3486 hwm =
min(((pba << 10) * 9 / 10),
3536 "Interrupt Throttle Rate turned off\n");
3542 "Interrupt Throttle Rate turned on\n");
3544 adapter->
itr = 20000;
3550 mac->
ops.reset_hw(hw);
3561 if (mac->
ops.init_hw(hw))
3562 e_err(
"Hardware Error\n");
3564 e1000_update_mng_vlan(adapter);
3571 if (!netif_running(adapter->
netdev) &&
3573 e1000_power_down_phy(adapter);
3577 e1000_get_phy_info(hw);
3598 e1000_configure(adapter);
3603 e1000_configure_msix(adapter);
3604 e1000_irq_enable(adapter);
3606 netif_start_queue(adapter->
netdev);
3617 static void e1000e_flush_descriptors(
struct e1000_adapter *adapter)
3642 static void e1000e_update_stats(
struct e1000_adapter *adapter);
3662 netif_stop_queue(netdev);
3673 e1000_irq_disable(adapter);
3681 e1000e_update_stats(adapter);
3684 e1000e_flush_descriptors(adapter);
3685 e1000_clean_tx_ring(adapter->
tx_ring);
3686 e1000_clean_rx_ring(adapter->
rx_ring);
3691 if (!pci_channel_offline(adapter->
pdev))
3733 if (e1000_alloc_queues(adapter))
3737 e1000_irq_disable(adapter);
3748 static irqreturn_t e1000_intr_msi_test(
int irq,
void *data)
3755 e_dbg(
"icr is %08X\n", icr);
3774 static int e1000_test_msi_interrupt(
struct e1000_adapter *adapter)
3785 e1000_free_irq(adapter);
3792 err = pci_enable_msi(adapter->
pdev);
3794 goto msi_test_failed;
3797 netdev->
name, netdev);
3800 goto msi_test_failed;
3809 e1000_irq_enable(adapter);
3816 e1000_irq_disable(adapter);
3822 e_info(
"MSI interrupt test failed, using legacy interrupt.\n");
3824 e_dbg(
"MSI interrupt test succeeded!\n");
3832 return e1000_request_irq(adapter);
3853 pci_cmd & ~PCI_COMMAND_SERR);
3855 err = e1000_test_msi_interrupt(adapter);
3858 if (pci_cmd & PCI_COMMAND_SERR) {
3879 static int e1000_open(
struct net_device *netdev)
3890 pm_runtime_get_sync(&pdev->
dev);
3916 if ((adapter->
hw.mng_cookie.status &
3918 e1000_update_mng_vlan(adapter);
3932 e1000_configure(adapter);
3934 err = e1000_request_irq(adapter);
3944 err = e1000_test_msi(adapter);
3946 e_err(
"Interrupt allocation failed\n");
3954 napi_enable(&adapter->
napi);
3956 e1000_irq_enable(adapter);
3959 netif_start_queue(netdev);
3962 pm_runtime_put(&pdev->
dev);
3974 e1000_power_down_phy(adapter);
3980 pm_runtime_put_sync(&pdev->
dev);
3996 static int e1000_close(
struct net_device *netdev)
4007 pm_runtime_get_sync(&pdev->
dev);
4009 napi_disable(&adapter->
napi);
4013 e1000_free_irq(adapter);
4015 e1000_power_down_phy(adapter);
4024 if (adapter->
hw.mng_cookie.status &
4026 e1000_vlan_rx_kill_vid(netdev, adapter->
mng_vlan_id);
4039 pm_runtime_put_sync(&pdev->
dev);
4050 static int e1000_set_mac(
struct net_device *netdev,
void *
p)
4056 if (!is_valid_ether_addr(addr->
sa_data))
4062 hw->
mac.ops.rar_set(&adapter->
hw, adapter->
hw.mac.addr, 0);
4076 hw->
mac.ops.rar_set(&adapter->
hw, adapter->
hw.mac.addr,
4077 adapter->
hw.mac.rar_entry_count - 1);
4091 static void e1000e_update_phy_task(
struct work_struct *work)
4099 e1000_get_phy_info(&adapter->
hw);
4106 static void e1000_update_phy_info(
unsigned long data)
4122 static void e1000e_update_phy_stats(
struct e1000_adapter *adapter)
4128 ret_val = hw->
phy.ops.acquire(hw);
4142 ret_val = hw->
phy.ops.set_page(hw,
4152 adapter->
stats.scc += phy_data;
4158 adapter->
stats.ecol += phy_data;
4164 adapter->
stats.mcc += phy_data;
4170 adapter->
stats.latecol += phy_data;
4176 hw->
mac.collision_delta = phy_data;
4182 adapter->
stats.dc += phy_data;
4188 adapter->
stats.tncrs += phy_data;
4191 hw->
phy.ops.release(hw);
4198 static void e1000e_update_stats(
struct e1000_adapter *adapter)
4210 if (pci_channel_offline(pdev))
4213 adapter->
stats.crcerrs +=
er32(CRCERRS);
4226 e1000e_update_phy_stats(adapter);
4231 adapter->
stats.latecol +=
er32(LATECOL);
4234 hw->
mac.collision_delta =
er32(COLC);
4240 adapter->
stats.colc += hw->
mac.collision_delta;
4245 adapter->
stats.xoffrxc +=
er32(XOFFRXC);
4246 adapter->
stats.xofftxc +=
er32(XOFFTXC);
4258 hw->
mac.tx_packet_delta =
er32(TPT);
4259 adapter->
stats.tpt += hw->
mac.tx_packet_delta;
4261 adapter->
stats.algnerrc +=
er32(ALGNERRC);
4263 adapter->
stats.cexterr +=
er32(CEXTERR);
4268 netdev->
stats.multicast = adapter->
stats.mprc;
4269 netdev->
stats.collisions = adapter->
stats.colc;
4277 netdev->
stats.rx_errors = adapter->
stats.rxerrc +
4278 adapter->
stats.crcerrs + adapter->
stats.algnerrc +
4280 adapter->
stats.cexterr;
4281 netdev->
stats.rx_length_errors = adapter->
stats.ruc +
4283 netdev->
stats.rx_crc_errors = adapter->
stats.crcerrs;
4284 netdev->
stats.rx_frame_errors = adapter->
stats.algnerrc;
4285 netdev->
stats.rx_missed_errors = adapter->
stats.mpc;
4288 netdev->
stats.tx_errors = adapter->
stats.ecol +
4289 adapter->
stats.latecol;
4290 netdev->
stats.tx_aborted_errors = adapter->
stats.ecol;
4291 netdev->
stats.tx_window_errors = adapter->
stats.latecol;
4292 netdev->
stats.tx_carrier_errors = adapter->
stats.tncrs;
4306 static void e1000_phy_read_status(
struct e1000_adapter *adapter)
4324 e_warn(
"Error reading PHY register\n");
4344 static void e1000_print_link_info(
struct e1000_adapter *adapter)
4350 printk(
KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4355 (ctrl & E1000_CTRL_RFCE) ?
"Rx" :
4356 (ctrl & E1000_CTRL_TFCE) ?
"Tx" :
"None");
4362 bool link_active =
false;
4371 switch (hw->
phy.media_type) {
4373 if (hw->
mac.get_link_status) {
4374 ret_val = hw->
mac.ops.check_for_link(hw);
4375 link_active = !hw->
mac.get_link_status;
4381 ret_val = hw->
mac.ops.check_for_link(hw);
4385 ret_val = hw->
mac.ops.check_for_link(hw);
4386 link_active = adapter->
hw.mac.serdes_has_link;
4396 e_info(
"Gigabit has been disabled, downgrading speed\n");
4402 static void e1000e_enable_receives(
struct e1000_adapter *adapter)
4414 static void e1000e_check_82574_phy_workaround(
struct e1000_adapter *adapter)
4437 static void e1000_watchdog(
unsigned long data)
4447 static void e1000_watchdog_task(
struct work_struct *work)
4461 link = e1000e_has_link(adapter);
4462 if ((netif_carrier_ok(netdev)) && link) {
4464 pm_runtime_resume(netdev->
dev.parent);
4466 e1000e_enable_receives(adapter);
4472 e1000_update_mng_vlan(adapter);
4475 if (!netif_carrier_ok(netdev)) {
4479 pm_runtime_resume(netdev->
dev.parent);
4482 e1000_phy_read_status(adapter);
4483 mac->
ops.get_link_up_info(&adapter->
hw,
4486 e1000_print_link_info(adapter);
4493 (hw->
mac.autoneg ==
true) &&
4502 e_info(
"Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n");
4525 tarc0 =
er32(TARC(0));
4527 ew32(TARC(0), tarc0);
4538 e_info(
"10/100 speed: disabling TSO\n");
4564 if (phy->
ops.cfg_on_link_up)
4565 phy->
ops.cfg_on_link_up(hw);
4574 if (netif_carrier_ok(netdev)) {
4595 e1000e_update_stats(adapter);
4610 if (!netif_carrier_ok(netdev) &&
4611 (e1000_desc_unused(tx_ring) + 1 < tx_ring->
count)) {
4630 u32 goc = (adapter->
gotc + adapter->
gorc) / 10000;
4633 adapter->
gorc - adapter->
gotc) / 10000;
4634 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
4646 e1000e_flush_descriptors(adapter);
4656 hw->
mac.ops.rar_set(hw, adapter->
hw.mac.addr, 0);
4659 e1000e_check_82574_phy_workaround(adapter);
4667 #define E1000_TX_FLAGS_CSUM 0x00000001
4668 #define E1000_TX_FLAGS_VLAN 0x00000002
4669 #define E1000_TX_FLAGS_TSO 0x00000004
4670 #define E1000_TX_FLAGS_IPV4 0x00000008
4671 #define E1000_TX_FLAGS_NO_FCS 0x00000010
4672 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
4673 #define E1000_TX_FLAGS_VLAN_SHIFT 16
4684 if (!skb_is_gso(skb))
4687 if (skb_header_cloned(skb)) {
4694 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4695 mss = skb_shinfo(skb)->gso_size;
4697 struct iphdr *iph = ip_hdr(skb);
4703 ipcse = skb_transport_offset(skb) - 1;
4704 }
else if (skb_is_gso_v6(skb)) {
4705 ipv6_hdr(skb)->payload_len = 0;
4707 &ipv6_hdr(skb)->
daddr,
4711 ipcss = skb_network_offset(skb);
4712 ipcso = (
void *)&(ip_hdr(skb)->check) - (
void *)skb->
data;
4713 tucss = skb_transport_offset(skb);
4714 tucso = (
void *)&(tcp_hdr(skb)->check) - (
void *)skb->
data;
4737 if (i == tx_ring->
count)
4758 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
4774 e_warn(
"checksum_partial proto=%x!\n",
4779 css = skb_checksum_start_offset(skb);
4797 if (i == tx_ring->
count)
4805 unsigned int first,
unsigned int max_per_txd,
4806 unsigned int nr_frags)
4811 unsigned int len = skb_headlen(skb);
4819 size =
min(len, max_per_txd);
4837 if (i == tx_ring->
count)
4842 for (f = 0; f < nr_frags; f++) {
4845 frag = &skb_shinfo(skb)->frags[
f];
4846 len = skb_frag_size(frag);
4851 if (i == tx_ring->
count)
4855 size =
min(len, max_per_txd);
4860 buffer_info->
dma = skb_frag_dma_map(&pdev->
dev, frag,
4872 segs = skb_shinfo(skb)->gso_segs ? : 1;
4874 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->
len;
4885 buffer_info->
dma = 0;
4891 i += tx_ring->
count;
4894 e1000_put_txbuf(tx_ring, buffer_info);
4900 static void e1000_tx_queue(
struct e1000_ring *tx_ring,
int tx_flags,
int count)
4941 if (i == tx_ring->
count)
4943 }
while (--count > 0);
4948 if (
unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
4962 e1000e_update_tdt_wa(tx_ring, i);
4973 #define MINIMUM_DHCP_PACKET_SIZE 282
4974 static int e1000_transfer_dhcp_info(
struct e1000_adapter *adapter,
4982 (adapter->
hw.mng_cookie.status &
5000 udp = (
struct udphdr *)((
u8 *)ip + (ip->ihl << 2));
5004 offset = (
u8 *)udp + 8 - skb->
data;
5005 length = skb->
len - offset;
5012 static int __e1000_maybe_stop_tx(
struct e1000_ring *tx_ring,
int size)
5016 netif_stop_queue(adapter->
netdev);
5028 if (e1000_desc_unused(tx_ring) < size)
5032 netif_start_queue(adapter->
netdev);
5037 static int e1000_maybe_stop_tx(
struct e1000_ring *tx_ring,
int size)
5041 if (e1000_desc_unused(tx_ring) >= size)
5043 return __e1000_maybe_stop_tx(tx_ring, size);
5052 unsigned int tx_flags = 0;
5053 unsigned int len = skb_headlen(skb);
5054 unsigned int nr_frags;
5065 if (skb->
len <= 0) {
5070 mss = skb_shinfo(skb)->gso_size;
5079 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5084 if (skb->
data_len && (hdr_len == len)) {
5085 unsigned int pull_size;
5089 e_err(
"__pskb_pull_tail failed.\n");
5093 len = skb_headlen(skb);
5104 nr_frags = skb_shinfo(skb)->nr_frags;
5105 for (f = 0; f < nr_frags; f++)
5106 count +=
DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
5109 if (adapter->
hw.mac.tx_pkt_filtering)
5110 e1000_transfer_dhcp_info(adapter, skb);
5116 if (e1000_maybe_stop_tx(tx_ring, count + 2))
5126 tso = e1000_tso(tx_ring, skb);
5134 else if (e1000_tx_csum(tx_ring, skb))
5149 count = e1000_tx_map(tx_ring, skb, first, adapter->
tx_fifo_limit,
5152 skb_tx_timestamp(skb);
5154 netdev_sent_queue(netdev, skb->
len);
5155 e1000_tx_queue(tx_ring, tx_flags, count);
5157 e1000_maybe_stop_tx(tx_ring,
5174 static void e1000_tx_timeout(
struct net_device *netdev)
5183 static void e1000_reset_task(
struct work_struct *work)
5194 e1000e_dump(adapter);
5195 e_err(
"Reset adapter\n");
5214 e1000e_update_stats(adapter);
5230 adapter->
stats.crcerrs + adapter->
stats.algnerrc +
5232 adapter->
stats.cexterr;
5241 adapter->
stats.latecol;
5259 static int e1000_change_mtu(
struct net_device *netdev,
int new_mtu)
5267 e_err(
"Jumbo Frames not supported.\n");
5274 e_err(
"Unsupported MTU setting\n");
5282 e_err(
"Jumbo Frames not supported on this device when CRC stripping is disabled.\n");
5290 e_info(
"changing MTU from %d to %d\n", netdev->
mtu, new_mtu);
5291 netdev->
mtu = new_mtu;
5292 if (netif_running(netdev))
5304 if (max_frame <= 2048)
5315 if (netif_running(netdev))
5325 static int e1000_mii_ioctl(
struct net_device *netdev,
struct ifreq *ifr,
5336 data->
phy_id = adapter->
hw.phy.addr;
5339 e1000_phy_read_status(adapter);
5341 switch (data->
reg_num & 0x1F) {
5349 data->
val_out = (adapter->
hw.phy.id >> 16);
5352 data->
val_out = (adapter->
hw.phy.id & 0xFFFF);
5383 static int e1000_ioctl(
struct net_device *netdev,
struct ifreq *ifr,
int cmd)
5389 return e1000_mii_ioctl(netdev, ifr, cmd);
5405 retval = hw->
phy.ops.acquire(hw);
5407 e_err(
"Could not acquire PHY\n");
5417 for (i = 0; i < adapter->
hw.mac.mta_reg_count; i++) {
5419 hw->
phy.ops.write_reg_page(hw,
BM_MTA(i),
5420 (
u16)(mac_reg & 0xFFFF));
5421 hw->
phy.ops.write_reg_page(hw,
BM_MTA(i) + 1,
5422 (
u16)((mac_reg >> 16) & 0xFFFF));
5426 hw->
phy.ops.read_reg_page(&adapter->
hw,
BM_RCTL, &phy_reg);
5427 mac_reg =
er32(RCTL);
5443 hw->
phy.ops.write_reg_page(&adapter->
hw,
BM_RCTL, phy_reg);
5450 hw->
phy.ops.write_reg_page(&adapter->
hw,
BM_WUFC, wufc);
5457 e_err(
"Could not set PHY Host Wakeup bit\n");
5459 hw->
phy.ops.release(hw);
5464 static int __e1000_shutdown(
struct pci_dev *pdev,
bool *enable_wake,
5467 struct net_device *netdev = pci_get_drvdata(pdev);
5477 if (netif_running(netdev)) {
5485 e1000_free_irq(adapter);
5494 if (status & E1000_STATUS_LU)
5498 e1000_setup_rctl(adapter);
5499 e1000e_set_rx_mode(netdev);
5510 #define E1000_CTRL_ADVD3WUC 0x00100000
5512 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5519 adapter->
hw.phy.media_type ==
5522 ctrl_ext =
er32(CTRL_EXT);
5524 ew32(CTRL_EXT, ctrl_ext);
5535 retval = e1000_init_phy_wakeup(adapter, wufc);
5548 *enable_wake = !!wufc;
5552 (hw->
mac.ops.check_mng_mode(hw)))
5553 *enable_wake =
true;
5569 static void e1000_power_off(
struct pci_dev *pdev,
bool sleep,
bool wake)
5571 if (sleep && wake) {
5580 static void e1000_complete_shutdown(
struct pci_dev *pdev,
bool sleep,
5583 struct net_device *netdev = pci_get_drvdata(pdev);
5600 e1000_power_off(pdev, sleep, wake);
5604 e1000_power_off(pdev, sleep, wake);
5608 #ifdef CONFIG_PCIEASPM
5622 if (pdev->
bus->self)
5627 static void e1000e_disable_aspm(
struct pci_dev *pdev,
u16 state)
5633 __e1000e_disable_aspm(pdev, state);
5639 return !!adapter->
tx_ring->buffer_info;
5642 static int __e1000_resume(
struct pci_dev *pdev)
5644 struct net_device *netdev = pci_get_drvdata(pdev);
5647 u16 aspm_disable_flag = 0;
5654 if (aspm_disable_flag)
5655 e1000e_disable_aspm(pdev, aspm_disable_flag);
5662 if (netif_running(netdev)) {
5663 err = e1000_request_irq(adapter);
5677 e1e_rphy(&adapter->
hw,
BM_WUS, &phy_data);
5679 e_info(
"PHY Wakeup cause - %s\n",
5685 "Link Status Change" :
"other");
5687 e1e_wphy(&adapter->
hw,
BM_WUS, ~0);
5691 e_info(
"MAC Wakeup cause - %s\n",
5704 e1000_init_manageability_pt(adapter);
5706 if (netif_running(netdev))
5722 #ifdef CONFIG_PM_SLEEP
5723 static int e1000_suspend(
struct device *
dev)
5729 retval = __e1000_shutdown(pdev, &wake,
false);
5731 e1000_complete_shutdown(pdev,
true, wake);
5736 static int e1000_resume(
struct device *dev)
5739 struct net_device *netdev = pci_get_drvdata(pdev);
5742 if (e1000e_pm_ready(adapter))
5745 return __e1000_resume(pdev);
5749 #ifdef CONFIG_PM_RUNTIME
5750 static int e1000_runtime_suspend(
struct device *dev)
5753 struct net_device *netdev = pci_get_drvdata(pdev);
5756 if (e1000e_pm_ready(adapter)) {
5759 __e1000_shutdown(pdev, &wake,
true);
5765 static int e1000_idle(
struct device *dev)
5768 struct net_device *netdev = pci_get_drvdata(pdev);
5771 if (!e1000e_pm_ready(adapter))
5776 if (!e1000e_has_link(adapter))
5783 static int e1000_runtime_resume(
struct device *dev)
5786 struct net_device *netdev = pci_get_drvdata(pdev);
5789 if (!e1000e_pm_ready(adapter))
5793 return __e1000_resume(pdev);
5798 static void e1000_shutdown(
struct pci_dev *pdev)
5802 __e1000_shutdown(pdev, &wake,
false);
5805 e1000_complete_shutdown(pdev,
false, wake);
5808 #ifdef CONFIG_NET_POLL_CONTROLLER
5810 static irqreturn_t e1000_intr_msix(
int irq,
void *data)
5821 e1000_intr_msix_rx(msix_irq, netdev);
5827 e1000_intr_msix_tx(msix_irq, netdev);
5833 e1000_msix_other(msix_irq, netdev);
5845 static void e1000_netpoll(
struct net_device *netdev)
5851 e1000_intr_msix(adapter->
pdev->irq, netdev);
5855 e1000_intr_msi(adapter->
pdev->irq, netdev);
5860 e1000_intr(adapter->
pdev->irq, netdev);
5878 struct net_device *netdev = pci_get_drvdata(pdev);
5886 if (netif_running(netdev))
5903 struct net_device *netdev = pci_get_drvdata(pdev);
5906 u16 aspm_disable_flag = 0;
5914 if (aspm_disable_flag)
5915 e1000e_disable_aspm(pdev, aspm_disable_flag);
5920 "Cannot re-enable PCI device after reset.\n");
5948 static void e1000_io_resume(
struct pci_dev *pdev)
5950 struct net_device *netdev = pci_get_drvdata(pdev);
5953 e1000_init_manageability_pt(adapter);
5955 if (netif_running(netdev)) {
5958 "can't bring device back up after reset\n");
5975 static void e1000_print_device_info(
struct e1000_adapter *adapter)
5983 e_info(
"(PCI Express:2.5GT/s:%s) %pM\n",
5989 e_info(
"Intel(R) PRO/%s Network Connection\n",
5994 strlcpy((
char *)pba_str,
"Unknown",
sizeof(pba_str));
5995 e_info(
"MAC: %d, PHY: %d, PBA No: %s\n",
5996 hw->
mac.type, hw->
phy.type, pba_str);
5999 static void e1000_eeprom_checks(
struct e1000_adapter *adapter)
6010 if (!ret_val && (!(buf & (1 << 0)))) {
6013 "Warning: detected DSPD enabled in EEPROM\n");
6017 static int e1000_set_features(
struct net_device *netdev,
6032 if (features & NETIF_F_RXFCS) {
6047 if (netif_running(netdev))
6056 .ndo_open = e1000_open,
6057 .ndo_stop = e1000_close,
6058 .ndo_start_xmit = e1000_xmit_frame,
6060 .ndo_set_rx_mode = e1000e_set_rx_mode,
6061 .ndo_set_mac_address = e1000_set_mac,
6062 .ndo_change_mtu = e1000_change_mtu,
6063 .ndo_do_ioctl = e1000_ioctl,
6064 .ndo_tx_timeout = e1000_tx_timeout,
6067 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
6068 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
6069 #ifdef CONFIG_NET_POLL_CONTROLLER
6070 .ndo_poll_controller = e1000_netpoll,
6072 .ndo_set_features = e1000_set_features,
6095 static int cards_found;
6096 u16 aspm_disable_flag = 0;
6097 int i,
err, pci_using_dac;
6105 if (aspm_disable_flag)
6106 e1000e_disable_aspm(pdev, aspm_disable_flag);
6124 dev_err(&pdev->
dev,
"No usable DMA configuration, aborting\n");
6143 goto err_alloc_etherdev;
6148 goto err_alloc_etherdev;
6154 pci_set_drvdata(pdev, netdev);
6155 adapter = netdev_priv(netdev);
6157 adapter->
netdev = netdev;
6158 adapter->
pdev = pdev;
6164 adapter->
hw.mac.type = ei->
mac;
6172 adapter->
hw.hw_addr =
ioremap(mmio_start, mmio_len);
6173 if (!adapter->
hw.hw_addr)
6180 adapter->
hw.flash_address =
ioremap(flash_start, flash_len);
6181 if (!adapter->
hw.flash_address)
6193 netdev->
mem_end = mmio_start + mmio_len;
6200 err = e1000_sw_init(adapter);
6216 hw->
mac.ops.get_bus_info(&adapter->
hw);
6218 adapter->
hw.phy.autoneg_wait_to_complete = 0;
6223 adapter->
hw.phy.disable_polarity_correction = 0;
6227 if (hw->
phy.ops.check_reset_block && hw->
phy.ops.check_reset_block(hw))
6229 "PHY reset is blocked due to SOL/IDER session.\n");
6257 if (pci_using_dac) {
6269 adapter->
hw.mac.ops.reset_hw(&adapter->
hw);
6276 if (e1000_validate_nvm_checksum(&adapter->
hw) >= 0)
6279 dev_err(&pdev->
dev,
"The NVM Checksum Is Not Valid\n");
6285 e1000_eeprom_checks(adapter);
6288 if (e1000e_read_mac_addr(&adapter->
hw))
6290 "NVM Read Error while reading MAC address\n");
6295 if (!is_valid_ether_addr(netdev->
perm_addr)) {
6296 dev_err(&pdev->
dev,
"Invalid MAC Address: %pM\n",
6317 adapter->
hw.mac.autoneg = 1;
6321 adapter->
hw.phy.autoneg_advertised = 0x2f;
6333 eeprom_data =
er32(WUC);
6340 (adapter->
hw.bus.func == 1))
6349 if (eeprom_data & eeprom_apme_mask)
6386 e1000_print_device_info(adapter);
6389 pm_runtime_put_noidle(&pdev->
dev);
6397 if (hw->
phy.ops.check_reset_block && !hw->
phy.ops.check_reset_block(hw))
6403 if (adapter->
hw.flash_address)
6430 struct net_device *netdev = pci_get_drvdata(pdev);
6450 e1000_power_down_phy(adapter);
6458 pm_runtime_get_noresume(&pdev->
dev);
6471 if (adapter->
hw.flash_address)
6486 .error_detected = e1000_io_error_detected,
6487 .slot_reset = e1000_io_slot_reset,
6488 .resume = e1000_io_resume,
6564 { 0, 0, 0, 0, 0, 0, 0 }
6569 static const struct dev_pm_ops e1000_pm_ops = {
6572 e1000_runtime_resume, e1000_idle)
6579 .id_table = e1000_pci_tbl,
6580 .probe = e1000_probe,
6584 .pm = &e1000_pm_ops,
6587 .shutdown = e1000_shutdown,
6588 .err_handler = &e1000_err_handler
6597 static int __init e1000_init_module(
void)
6600 pr_info(
"Intel(R) PRO/1000 Network Driver - %s\n",
6602 pr_info(
"Copyright(c) 1999 - 2012 Intel Corporation.\n");
6603 ret = pci_register_driver(&e1000_driver);
6615 static void __exit e1000_exit_module(
void)