Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
e1000_mac.c
Go to the documentation of this file.
1 /*******************************************************************************
2 
3  Intel(R) Gigabit Ethernet Linux driver
4  Copyright(c) 2007-2012 Intel Corporation.
5 
6  This program is free software; you can redistribute it and/or modify it
7  under the terms and conditions of the GNU General Public License,
8  version 2, as published by the Free Software Foundation.
9 
10  This program is distributed in the hope it will be useful, but WITHOUT
11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13  more details.
14 
15  You should have received a copy of the GNU General Public License along with
16  this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 
19  The full GNU General Public License is included in this distribution in
20  the file called "COPYING".
21 
22  Contact Information:
23  e1000-devel Mailing List <[email protected]>
24  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 
26 *******************************************************************************/
27 
28 #include <linux/if_ether.h>
29 #include <linux/delay.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 
34 #include "e1000_mac.h"
35 
36 #include "igb.h"
37 
38 static s32 igb_set_default_fc(struct e1000_hw *hw);
39 static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
40 
50 {
51  struct e1000_bus_info *bus = &hw->bus;
52  s32 ret_val;
53  u32 reg;
54  u16 pcie_link_status;
55 
57 
58  ret_val = igb_read_pcie_cap_reg(hw,
60  &pcie_link_status);
61  if (ret_val) {
64  } else {
65  switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
68  break;
71  break;
72  default:
74  break;
75  }
76 
77  bus->width = (enum e1000_bus_width)((pcie_link_status &
80  }
81 
82  reg = rd32(E1000_STATUS);
84 
85  return 0;
86 }
87 
95 void igb_clear_vfta(struct e1000_hw *hw)
96 {
97  u32 offset;
98 
99  for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
100  array_wr32(E1000_VFTA, offset, 0);
101  wrfl();
102  }
103 }
104 
114 static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
115 {
116  array_wr32(E1000_VFTA, offset, value);
117  wrfl();
118 }
119 
120 /* Due to a hw errata, if the host tries to configure the VFTA register
121  * while performing queries from the BMC or DMA, then the VFTA in some
122  * cases won't be written.
123  */
124 
133 {
134  u32 offset;
135  int i;
136 
137  for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
138  for (i = 0; i < 10; i++)
139  array_wr32(E1000_VFTA, offset, 0);
140 
141  wrfl();
142  }
143 }
144 
154 static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
155 {
156  int i;
157 
158  for (i = 0; i < 10; i++)
159  array_wr32(E1000_VFTA, offset, value);
160 
161  wrfl();
162 }
163 
173 void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
174 {
175  u32 i;
176  u8 mac_addr[ETH_ALEN] = {0};
177 
178  /* Setup the receive address */
179  hw_dbg("Programming MAC Address into RAR[0]\n");
180 
181  hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
182 
183  /* Zero out the other (rar_entry_count - 1) receive addresses */
184  hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
185  for (i = 1; i < rar_count; i++)
186  hw->mac.ops.rar_set(hw, mac_addr, i);
187 }
188 
198 s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
199 {
202  u32 vfta;
203  struct igb_adapter *adapter = hw->back;
204  s32 ret_val = 0;
205 
206  vfta = adapter->shadow_vfta[index];
207 
208  /* bit was set/cleared before we started */
209  if ((!!(vfta & mask)) == add) {
210  ret_val = -E1000_ERR_CONFIG;
211  } else {
212  if (add)
213  vfta |= mask;
214  else
215  vfta &= ~mask;
216  }
217  if (hw->mac.type == e1000_i350)
218  igb_write_vfta_i350(hw, index, vfta);
219  else
220  igb_write_vfta(hw, index, vfta);
221  adapter->shadow_vfta[index] = vfta;
222 
223  return ret_val;
224 }
225 
238 {
239  u32 i;
240  s32 ret_val = 0;
241  u16 offset, nvm_alt_mac_addr_offset, nvm_data;
242  u8 alt_mac_addr[ETH_ALEN];
243 
244  /*
245  * Alternate MAC address is handled by the option ROM for 82580
246  * and newer. SW support not required.
247  */
248  if (hw->mac.type >= e1000_82580)
249  goto out;
250 
251  ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
252  &nvm_alt_mac_addr_offset);
253  if (ret_val) {
254  hw_dbg("NVM Read Error\n");
255  goto out;
256  }
257 
258  if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
259  (nvm_alt_mac_addr_offset == 0x0000))
260  /* There is no Alternate MAC Address */
261  goto out;
262 
263  if (hw->bus.func == E1000_FUNC_1)
264  nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
265  if (hw->bus.func == E1000_FUNC_2)
266  nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
267 
268  if (hw->bus.func == E1000_FUNC_3)
269  nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
270  for (i = 0; i < ETH_ALEN; i += 2) {
271  offset = nvm_alt_mac_addr_offset + (i >> 1);
272  ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
273  if (ret_val) {
274  hw_dbg("NVM Read Error\n");
275  goto out;
276  }
277 
278  alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
279  alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
280  }
281 
282  /* if multicast bit is set, the alternate address will not be used */
283  if (is_multicast_ether_addr(alt_mac_addr)) {
284  hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
285  goto out;
286  }
287 
288  /*
289  * We have a valid alternate MAC address, and we want to treat it the
290  * same as the normal permanent MAC address stored by the HW into the
291  * RAR. Do this by mapping this address into RAR0.
292  */
293  hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
294 
295 out:
296  return ret_val;
297 }
298 
308 void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
309 {
310  u32 rar_low, rar_high;
311 
312  /*
313  * HW expects these in little endian so we reverse the byte order
314  * from network order (big endian) to little endian
315  */
316  rar_low = ((u32) addr[0] |
317  ((u32) addr[1] << 8) |
318  ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
319 
320  rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
321 
322  /* If MAC address zero, no need to set the AV bit */
323  if (rar_low || rar_high)
324  rar_high |= E1000_RAH_AV;
325 
326  /*
327  * Some bridges will combine consecutive 32-bit writes into
328  * a single burst write, which will malfunction on some parts.
329  * The flushes avoid this.
330  */
331  wr32(E1000_RAL(index), rar_low);
332  wrfl();
333  wr32(E1000_RAH(index), rar_high);
334  wrfl();
335 }
336 
347 void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
348 {
349  u32 hash_bit, hash_reg, mta;
350 
351  /*
352  * The MTA is a register array of 32-bit registers. It is
353  * treated like an array of (32*mta_reg_count) bits. We want to
354  * set bit BitArray[hash_value]. So we figure out what register
355  * the bit is in, read it, OR in the new bit, then write
356  * back the new value. The (hw->mac.mta_reg_count - 1) serves as a
357  * mask to bits 31:5 of the hash value which gives us the
358  * register we're modifying. The hash bit within that register
359  * is determined by the lower 5 bits of the hash value.
360  */
361  hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
362  hash_bit = hash_value & 0x1F;
363 
364  mta = array_rd32(E1000_MTA, hash_reg);
365 
366  mta |= (1 << hash_bit);
367 
368  array_wr32(E1000_MTA, hash_reg, mta);
369  wrfl();
370 }
371 
381 static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
382 {
383  u32 hash_value, hash_mask;
384  u8 bit_shift = 0;
385 
386  /* Register count multiplied by bits per register */
387  hash_mask = (hw->mac.mta_reg_count * 32) - 1;
388 
389  /*
390  * For a mc_filter_type of 0, bit_shift is the number of left-shifts
391  * where 0xFF would still fall within the hash mask.
392  */
393  while (hash_mask >> bit_shift != 0xFF)
394  bit_shift++;
395 
396  /*
397  * The portion of the address that is used for the hash table
398  * is determined by the mc_filter_type setting.
399  * The algorithm is such that there is a total of 8 bits of shifting.
400  * The bit_shift for a mc_filter_type of 0 represents the number of
401  * left-shifts where the MSB of mc_addr[5] would still fall within
402  * the hash_mask. Case 0 does this exactly. Since there are a total
403  * of 8 bits of shifting, then mc_addr[4] will shift right the
404  * remaining number of bits. Thus 8 - bit_shift. The rest of the
405  * cases are a variation of this algorithm...essentially raising the
406  * number of bits to shift mc_addr[5] left, while still keeping the
407  * 8-bit shifting total.
408  *
409  * For example, given the following Destination MAC Address and an
410  * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
411  * we can see that the bit_shift for case 0 is 4. These are the hash
412  * values resulting from each mc_filter_type...
413  * [0] [1] [2] [3] [4] [5]
414  * 01 AA 00 12 34 56
415  * LSB MSB
416  *
417  * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
418  * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
419  * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
420  * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
421  */
422  switch (hw->mac.mc_filter_type) {
423  default:
424  case 0:
425  break;
426  case 1:
427  bit_shift += 1;
428  break;
429  case 2:
430  bit_shift += 2;
431  break;
432  case 3:
433  bit_shift += 4;
434  break;
435  }
436 
437  hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
438  (((u16) mc_addr[5]) << bit_shift)));
439 
440  return hash_value;
441 }
442 
453  u8 *mc_addr_list, u32 mc_addr_count)
454 {
455  u32 hash_value, hash_bit, hash_reg;
456  int i;
457 
458  /* clear mta_shadow */
459  memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
460 
461  /* update mta_shadow from mc_addr_list */
462  for (i = 0; (u32) i < mc_addr_count; i++) {
463  hash_value = igb_hash_mc_addr(hw, mc_addr_list);
464 
465  hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
466  hash_bit = hash_value & 0x1F;
467 
468  hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
469  mc_addr_list += (ETH_ALEN);
470  }
471 
472  /* replace the entire MTA table */
473  for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
474  array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
475  wrfl();
476 }
477 
485 {
488  rd32(E1000_MPC);
489  rd32(E1000_SCC);
490  rd32(E1000_ECOL);
491  rd32(E1000_MCC);
493  rd32(E1000_COLC);
494  rd32(E1000_DC);
495  rd32(E1000_SEC);
496  rd32(E1000_RLEC);
501  rd32(E1000_FCRUC);
502  rd32(E1000_GPRC);
503  rd32(E1000_BPRC);
504  rd32(E1000_MPRC);
505  rd32(E1000_GPTC);
506  rd32(E1000_GORCL);
507  rd32(E1000_GORCH);
508  rd32(E1000_GOTCL);
509  rd32(E1000_GOTCH);
510  rd32(E1000_RNBC);
511  rd32(E1000_RUC);
512  rd32(E1000_RFC);
513  rd32(E1000_ROC);
514  rd32(E1000_RJC);
515  rd32(E1000_TORL);
516  rd32(E1000_TORH);
517  rd32(E1000_TOTL);
518  rd32(E1000_TOTH);
519  rd32(E1000_TPR);
520  rd32(E1000_TPT);
521  rd32(E1000_MPTC);
522  rd32(E1000_BPTC);
523 }
524 
534 {
535  struct e1000_mac_info *mac = &hw->mac;
536  s32 ret_val;
537  bool link;
538 
539  /*
540  * We only want to go out to the PHY registers to see if Auto-Neg
541  * has completed and/or if our link status has changed. The
542  * get_link_status flag is set upon receiving a Link Status
543  * Change or Rx Sequence Error interrupt.
544  */
545  if (!mac->get_link_status) {
546  ret_val = 0;
547  goto out;
548  }
549 
550  /*
551  * First we want to see if the MII Status Register reports
552  * link. If so, then we want to get the current speed/duplex
553  * of the PHY.
554  */
555  ret_val = igb_phy_has_link(hw, 1, 0, &link);
556  if (ret_val)
557  goto out;
558 
559  if (!link)
560  goto out; /* No link detected */
561 
562  mac->get_link_status = false;
563 
564  /*
565  * Check if there was DownShift, must be checked
566  * immediately after link-up
567  */
569 
570  /*
571  * If we are forcing speed/duplex, then we simply return since
572  * we have already determined whether we have link or not.
573  */
574  if (!mac->autoneg) {
575  ret_val = -E1000_ERR_CONFIG;
576  goto out;
577  }
578 
579  /*
580  * Auto-Neg is enabled. Auto Speed Detection takes care
581  * of MAC speed/duplex configuration. So we only need to
582  * configure Collision Distance in the MAC.
583  */
585 
586  /*
587  * Configure Flow Control now that Auto-Neg has completed.
588  * First, we need to restore the desired flow control
589  * settings because we may have had to re-autoneg with a
590  * different link partner.
591  */
592  ret_val = igb_config_fc_after_link_up(hw);
593  if (ret_val)
594  hw_dbg("Error configuring flow control\n");
595 
596 out:
597  return ret_val;
598 }
599 
611 {
612  s32 ret_val = 0;
613 
614  /*
615  * In the case of the phy reset being blocked, we already have a link.
616  * We do not need to set it up again.
617  */
618  if (igb_check_reset_block(hw))
619  goto out;
620 
621  /*
622  * If requested flow control is set to default, set flow control
623  * based on the EEPROM flow control settings.
624  */
625  if (hw->fc.requested_mode == e1000_fc_default) {
626  ret_val = igb_set_default_fc(hw);
627  if (ret_val)
628  goto out;
629  }
630 
631  /*
632  * We want to save off the original Flow Control configuration just
633  * in case we get disconnected and then reconnected into a different
634  * hub or switch with different Flow Control capabilities.
635  */
636  hw->fc.current_mode = hw->fc.requested_mode;
637 
638  hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
639 
640  /* Call the necessary media_type subroutine to configure the link. */
641  ret_val = hw->mac.ops.setup_physical_interface(hw);
642  if (ret_val)
643  goto out;
644 
645  /*
646  * Initialize the flow control address, type, and PAUSE timer
647  * registers to their default values. This is done even if flow
648  * control is disabled, because it does not hurt anything to
649  * initialize these registers.
650  */
651  hw_dbg("Initializing the Flow Control address, type and timer regs\n");
655 
656  wr32(E1000_FCTTV, hw->fc.pause_time);
657 
658  ret_val = igb_set_fc_watermarks(hw);
659 
660 out:
661 
662  return ret_val;
663 }
664 
674 {
675  u32 tctl;
676 
677  tctl = rd32(E1000_TCTL);
678 
679  tctl &= ~E1000_TCTL_COLD;
681 
682  wr32(E1000_TCTL, tctl);
683  wrfl();
684 }
685 
694 static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
695 {
696  s32 ret_val = 0;
697  u32 fcrtl = 0, fcrth = 0;
698 
699  /*
700  * Set the flow control receive threshold registers. Normally,
701  * these registers will be set to a default threshold that may be
702  * adjusted later by the driver's runtime code. However, if the
703  * ability to transmit pause frames is not enabled, then these
704  * registers will be set to 0.
705  */
706  if (hw->fc.current_mode & e1000_fc_tx_pause) {
707  /*
708  * We need to set up the Receive Threshold high and low water
709  * marks as well as (optionally) enabling the transmission of
710  * XON frames.
711  */
712  fcrtl = hw->fc.low_water;
713  if (hw->fc.send_xon)
714  fcrtl |= E1000_FCRTL_XONE;
715 
716  fcrth = hw->fc.high_water;
717  }
718  wr32(E1000_FCRTL, fcrtl);
719  wr32(E1000_FCRTH, fcrth);
720 
721  return ret_val;
722 }
723 
731 static s32 igb_set_default_fc(struct e1000_hw *hw)
732 {
733  s32 ret_val = 0;
734  u16 nvm_data;
735 
736  /*
737  * Read and store word 0x0F of the EEPROM. This word contains bits
738  * that determine the hardware's default PAUSE (flow control) mode,
739  * a bit that determines whether the HW defaults to enabling or
740  * disabling auto-negotiation, and the direction of the
741  * SW defined pins. If there is no SW over-ride of the flow
742  * control setting, then the variable hw->fc will
743  * be initialized based on a value in the EEPROM.
744  */
745  ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
746 
747  if (ret_val) {
748  hw_dbg("NVM Read Error\n");
749  goto out;
750  }
751 
752  if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
753  hw->fc.requested_mode = e1000_fc_none;
754  else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
756  hw->fc.requested_mode = e1000_fc_tx_pause;
757  else
758  hw->fc.requested_mode = e1000_fc_full;
759 
760 out:
761  return ret_val;
762 }
763 
775 {
776  u32 ctrl;
777  s32 ret_val = 0;
778 
779  ctrl = rd32(E1000_CTRL);
780 
781  /*
782  * Because we didn't get link via the internal auto-negotiation
783  * mechanism (we either forced link or we got link via PHY
784  * auto-neg), we have to manually enable/disable transmit an
785  * receive flow control.
786  *
787  * The "Case" statement below enables/disable flow control
788  * according to the "hw->fc.current_mode" parameter.
789  *
790  * The possible values of the "fc" parameter are:
791  * 0: Flow control is completely disabled
792  * 1: Rx flow control is enabled (we can receive pause
793  * frames but not send pause frames).
794  * 2: Tx flow control is enabled (we can send pause frames
795  * frames but we do not receive pause frames).
796  * 3: Both Rx and TX flow control (symmetric) is enabled.
797  * other: No other values should be possible at this point.
798  */
799  hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
800 
801  switch (hw->fc.current_mode) {
802  case e1000_fc_none:
803  ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
804  break;
805  case e1000_fc_rx_pause:
806  ctrl &= (~E1000_CTRL_TFCE);
807  ctrl |= E1000_CTRL_RFCE;
808  break;
809  case e1000_fc_tx_pause:
810  ctrl &= (~E1000_CTRL_RFCE);
811  ctrl |= E1000_CTRL_TFCE;
812  break;
813  case e1000_fc_full:
814  ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
815  break;
816  default:
817  hw_dbg("Flow control param set incorrectly\n");
818  ret_val = -E1000_ERR_CONFIG;
819  goto out;
820  }
821 
822  wr32(E1000_CTRL, ctrl);
823 
824 out:
825  return ret_val;
826 }
827 
839 {
840  struct e1000_mac_info *mac = &hw->mac;
841  s32 ret_val = 0;
842  u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
843  u16 speed, duplex;
844 
845  /*
846  * Check for the case where we have fiber media and auto-neg failed
847  * so we had to force link. In this case, we need to force the
848  * configuration of the MAC to match the "fc" parameter.
849  */
850  if (mac->autoneg_failed) {
851  if (hw->phy.media_type == e1000_media_type_internal_serdes)
852  ret_val = igb_force_mac_fc(hw);
853  } else {
854  if (hw->phy.media_type == e1000_media_type_copper)
855  ret_val = igb_force_mac_fc(hw);
856  }
857 
858  if (ret_val) {
859  hw_dbg("Error forcing flow control settings\n");
860  goto out;
861  }
862 
863  /*
864  * Check for the case where we have copper media and auto-neg is
865  * enabled. In this case, we need to check and see if Auto-Neg
866  * has completed, and if so, how the PHY and link partner has
867  * flow control configured.
868  */
869  if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
870  /*
871  * Read the MII Status Register and check to see if AutoNeg
872  * has completed. We read this twice because this reg has
873  * some "sticky" (latched) bits.
874  */
875  ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
876  &mii_status_reg);
877  if (ret_val)
878  goto out;
879  ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
880  &mii_status_reg);
881  if (ret_val)
882  goto out;
883 
884  if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
885  hw_dbg("Copper PHY and Auto Neg "
886  "has not completed.\n");
887  goto out;
888  }
889 
890  /*
891  * The AutoNeg process has completed, so we now need to
892  * read both the Auto Negotiation Advertisement
893  * Register (Address 4) and the Auto_Negotiation Base
894  * Page Ability Register (Address 5) to determine how
895  * flow control was negotiated.
896  */
897  ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
898  &mii_nway_adv_reg);
899  if (ret_val)
900  goto out;
901  ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
902  &mii_nway_lp_ability_reg);
903  if (ret_val)
904  goto out;
905 
906  /*
907  * Two bits in the Auto Negotiation Advertisement Register
908  * (Address 4) and two bits in the Auto Negotiation Base
909  * Page Ability Register (Address 5) determine flow control
910  * for both the PHY and the link partner. The following
911  * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
912  * 1999, describes these PAUSE resolution bits and how flow
913  * control is determined based upon these settings.
914  * NOTE: DC = Don't Care
915  *
916  * LOCAL DEVICE | LINK PARTNER
917  * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
918  *-------|---------|-------|---------|--------------------
919  * 0 | 0 | DC | DC | e1000_fc_none
920  * 0 | 1 | 0 | DC | e1000_fc_none
921  * 0 | 1 | 1 | 0 | e1000_fc_none
922  * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
923  * 1 | 0 | 0 | DC | e1000_fc_none
924  * 1 | DC | 1 | DC | e1000_fc_full
925  * 1 | 1 | 0 | 0 | e1000_fc_none
926  * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
927  *
928  * Are both PAUSE bits set to 1? If so, this implies
929  * Symmetric Flow Control is enabled at both ends. The
930  * ASM_DIR bits are irrelevant per the spec.
931  *
932  * For Symmetric Flow Control:
933  *
934  * LOCAL DEVICE | LINK PARTNER
935  * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
936  *-------|---------|-------|---------|--------------------
937  * 1 | DC | 1 | DC | E1000_fc_full
938  *
939  */
940  if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
941  (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
942  /*
943  * Now we need to check if the user selected RX ONLY
944  * of pause frames. In this case, we had to advertise
945  * FULL flow control because we could not advertise RX
946  * ONLY. Hence, we must now check to see if we need to
947  * turn OFF the TRANSMISSION of PAUSE frames.
948  */
949  if (hw->fc.requested_mode == e1000_fc_full) {
950  hw->fc.current_mode = e1000_fc_full;
951  hw_dbg("Flow Control = FULL.\r\n");
952  } else {
953  hw->fc.current_mode = e1000_fc_rx_pause;
954  hw_dbg("Flow Control = "
955  "RX PAUSE frames only.\r\n");
956  }
957  }
958  /*
959  * For receiving PAUSE frames ONLY.
960  *
961  * LOCAL DEVICE | LINK PARTNER
962  * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
963  *-------|---------|-------|---------|--------------------
964  * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
965  */
966  else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
967  (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
968  (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
969  (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
970  hw->fc.current_mode = e1000_fc_tx_pause;
971  hw_dbg("Flow Control = TX PAUSE frames only.\r\n");
972  }
973  /*
974  * For transmitting PAUSE frames ONLY.
975  *
976  * LOCAL DEVICE | LINK PARTNER
977  * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
978  *-------|---------|-------|---------|--------------------
979  * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
980  */
981  else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
982  (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
983  !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
984  (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
985  hw->fc.current_mode = e1000_fc_rx_pause;
986  hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
987  }
988  /*
989  * Per the IEEE spec, at this point flow control should be
990  * disabled. However, we want to consider that we could
991  * be connected to a legacy switch that doesn't advertise
992  * desired flow control, but can be forced on the link
993  * partner. So if we advertised no flow control, that is
994  * what we will resolve to. If we advertised some kind of
995  * receive capability (Rx Pause Only or Full Flow Control)
996  * and the link partner advertised none, we will configure
997  * ourselves to enable Rx Flow Control only. We can do
998  * this safely for two reasons: If the link partner really
999  * didn't want flow control enabled, and we enable Rx, no
1000  * harm done since we won't be receiving any PAUSE frames
1001  * anyway. If the intent on the link partner was to have
1002  * flow control enabled, then by us enabling RX only, we
1003  * can at least receive pause frames and process them.
1004  * This is a good idea because in most cases, since we are
1005  * predominantly a server NIC, more times than not we will
1006  * be asked to delay transmission of packets than asking
1007  * our link partner to pause transmission of frames.
1008  */
1009  else if ((hw->fc.requested_mode == e1000_fc_none ||
1010  hw->fc.requested_mode == e1000_fc_tx_pause) ||
1011  hw->fc.strict_ieee) {
1012  hw->fc.current_mode = e1000_fc_none;
1013  hw_dbg("Flow Control = NONE.\r\n");
1014  } else {
1015  hw->fc.current_mode = e1000_fc_rx_pause;
1016  hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
1017  }
1018 
1019  /*
1020  * Now we need to do one last check... If we auto-
1021  * negotiated to HALF DUPLEX, flow control should not be
1022  * enabled per IEEE 802.3 spec.
1023  */
1024  ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
1025  if (ret_val) {
1026  hw_dbg("Error getting link speed and duplex\n");
1027  goto out;
1028  }
1029 
1030  if (duplex == HALF_DUPLEX)
1031  hw->fc.current_mode = e1000_fc_none;
1032 
1033  /*
1034  * Now we call a subroutine to actually force the MAC
1035  * controller to use the correct flow control settings.
1036  */
1037  ret_val = igb_force_mac_fc(hw);
1038  if (ret_val) {
1039  hw_dbg("Error forcing flow control settings\n");
1040  goto out;
1041  }
1042  }
1043 
1044 out:
1045  return ret_val;
1046 }
1047 
1058  u16 *duplex)
1059 {
1060  u32 status;
1061 
1062  status = rd32(E1000_STATUS);
1063  if (status & E1000_STATUS_SPEED_1000) {
1064  *speed = SPEED_1000;
1065  hw_dbg("1000 Mbs, ");
1066  } else if (status & E1000_STATUS_SPEED_100) {
1067  *speed = SPEED_100;
1068  hw_dbg("100 Mbs, ");
1069  } else {
1070  *speed = SPEED_10;
1071  hw_dbg("10 Mbs, ");
1072  }
1073 
1074  if (status & E1000_STATUS_FD) {
1075  *duplex = FULL_DUPLEX;
1076  hw_dbg("Full Duplex\n");
1077  } else {
1078  *duplex = HALF_DUPLEX;
1079  hw_dbg("Half Duplex\n");
1080  }
1081 
1082  return 0;
1083 }
1084 
1092 {
1093  u32 swsm;
1094  s32 ret_val = 0;
1095  s32 timeout = hw->nvm.word_size + 1;
1096  s32 i = 0;
1097 
1098  /* Get the SW semaphore */
1099  while (i < timeout) {
1100  swsm = rd32(E1000_SWSM);
1101  if (!(swsm & E1000_SWSM_SMBI))
1102  break;
1103 
1104  udelay(50);
1105  i++;
1106  }
1107 
1108  if (i == timeout) {
1109  hw_dbg("Driver can't access device - SMBI bit is set.\n");
1110  ret_val = -E1000_ERR_NVM;
1111  goto out;
1112  }
1113 
1114  /* Get the FW semaphore. */
1115  for (i = 0; i < timeout; i++) {
1116  swsm = rd32(E1000_SWSM);
1118 
1119  /* Semaphore acquired if bit latched */
1121  break;
1122 
1123  udelay(50);
1124  }
1125 
1126  if (i == timeout) {
1127  /* Release semaphores */
1129  hw_dbg("Driver can't access the NVM\n");
1130  ret_val = -E1000_ERR_NVM;
1131  goto out;
1132  }
1133 
1134 out:
1135  return ret_val;
1136 }
1137 
1145 {
1146  u32 swsm;
1147 
1148  swsm = rd32(E1000_SWSM);
1149 
1150  swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1151 
1152  wr32(E1000_SWSM, swsm);
1153 }
1154 
1162 {
1163  s32 i = 0;
1164  s32 ret_val = 0;
1165 
1166 
1167  while (i < AUTO_READ_DONE_TIMEOUT) {
1169  break;
1170  msleep(1);
1171  i++;
1172  }
1173 
1174  if (i == AUTO_READ_DONE_TIMEOUT) {
1175  hw_dbg("Auto read by HW from NVM has not completed.\n");
1176  ret_val = -E1000_ERR_RESET;
1177  goto out;
1178  }
1179 
1180 out:
1181  return ret_val;
1182 }
1183 
1192 static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1193 {
1194  s32 ret_val;
1195 
1196  ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1197  if (ret_val) {
1198  hw_dbg("NVM Read Error\n");
1199  goto out;
1200  }
1201 
1202  if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1203  switch(hw->phy.media_type) {
1206  break;
1208  default:
1209  *data = ID_LED_DEFAULT;
1210  break;
1211  }
1212  }
1213 out:
1214  return ret_val;
1215 }
1216 
1223 {
1224  struct e1000_mac_info *mac = &hw->mac;
1225  s32 ret_val;
1226  const u32 ledctl_mask = 0x000000FF;
1227  const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1228  const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1229  u16 data, i, temp;
1230  const u16 led_mask = 0x0F;
1231 
1232  ret_val = igb_valid_led_default(hw, &data);
1233  if (ret_val)
1234  goto out;
1235 
1237  mac->ledctl_mode1 = mac->ledctl_default;
1238  mac->ledctl_mode2 = mac->ledctl_default;
1239 
1240  for (i = 0; i < 4; i++) {
1241  temp = (data >> (i << 2)) & led_mask;
1242  switch (temp) {
1243  case ID_LED_ON1_DEF2:
1244  case ID_LED_ON1_ON2:
1245  case ID_LED_ON1_OFF2:
1246  mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1247  mac->ledctl_mode1 |= ledctl_on << (i << 3);
1248  break;
1249  case ID_LED_OFF1_DEF2:
1250  case ID_LED_OFF1_ON2:
1251  case ID_LED_OFF1_OFF2:
1252  mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1253  mac->ledctl_mode1 |= ledctl_off << (i << 3);
1254  break;
1255  default:
1256  /* Do nothing */
1257  break;
1258  }
1259  switch (temp) {
1260  case ID_LED_DEF1_ON2:
1261  case ID_LED_ON1_ON2:
1262  case ID_LED_OFF1_ON2:
1263  mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1264  mac->ledctl_mode2 |= ledctl_on << (i << 3);
1265  break;
1266  case ID_LED_DEF1_OFF2:
1267  case ID_LED_ON1_OFF2:
1268  case ID_LED_OFF1_OFF2:
1269  mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1270  mac->ledctl_mode2 |= ledctl_off << (i << 3);
1271  break;
1272  default:
1273  /* Do nothing */
1274  break;
1275  }
1276  }
1277 
1278 out:
1279  return ret_val;
1280 }
1281 
1290 {
1291  wr32(E1000_LEDCTL, hw->mac.ledctl_default);
1292  return 0;
1293 }
1294 
1302 {
1303  u32 ledctl_blink = 0;
1304  u32 i;
1305 
1306  /*
1307  * set the blink bit for each LED that's "on" (0x0E)
1308  * in ledctl_mode2
1309  */
1310  ledctl_blink = hw->mac.ledctl_mode2;
1311  for (i = 0; i < 4; i++)
1312  if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
1314  ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
1315  (i * 8));
1316 
1317  wr32(E1000_LEDCTL, ledctl_blink);
1318 
1319  return 0;
1320 }
1321 
1329 {
1330  switch (hw->phy.media_type) {
1332  wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
1333  break;
1334  default:
1335  break;
1336  }
1337 
1338  return 0;
1339 }
1340 
1353 {
1354  u32 ctrl;
1356  s32 ret_val = 0;
1357 
1358  if (hw->bus.type != e1000_bus_type_pci_express)
1359  goto out;
1360 
1361  ctrl = rd32(E1000_CTRL);
1363  wr32(E1000_CTRL, ctrl);
1364 
1365  while (timeout) {
1366  if (!(rd32(E1000_STATUS) &
1368  break;
1369  udelay(100);
1370  timeout--;
1371  }
1372 
1373  if (!timeout) {
1374  hw_dbg("Master requests are pending.\n");
1376  goto out;
1377  }
1378 
1379 out:
1380  return ret_val;
1381 }
1382 
1391 {
1392  s32 ret_val = 0;
1393 
1394  if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
1395  hw_dbg("Invalid MDI setting detected\n");
1396  hw->phy.mdix = 1;
1397  ret_val = -E1000_ERR_CONFIG;
1398  goto out;
1399  }
1400 
1401 out:
1402  return ret_val;
1403 }
1404 
1417  u32 offset, u8 data)
1418 {
1419  u32 i, regvalue = 0;
1420  s32 ret_val = 0;
1421 
1422  /* Set up the address and data */
1423  regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
1424  wr32(reg, regvalue);
1425 
1426  /* Poll the ready bit to see if the MDI read completed */
1427  for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
1428  udelay(5);
1429  regvalue = rd32(reg);
1430  if (regvalue & E1000_GEN_CTL_READY)
1431  break;
1432  }
1433  if (!(regvalue & E1000_GEN_CTL_READY)) {
1434  hw_dbg("Reg %08x did not indicate ready\n", reg);
1435  ret_val = -E1000_ERR_PHY;
1436  goto out;
1437  }
1438 
1439 out:
1440  return ret_val;
1441 }
1442 
1451 {
1452  u32 manc;
1453  u32 fwsm, factps;
1454  bool ret_val = false;
1455 
1456  if (!hw->mac.asf_firmware_present)
1457  goto out;
1458 
1459  manc = rd32(E1000_MANC);
1460 
1461  if (!(manc & E1000_MANC_RCV_TCO_EN))
1462  goto out;
1463 
1464  if (hw->mac.arc_subsystem_valid) {
1465  fwsm = rd32(E1000_FWSM);
1466  factps = rd32(E1000_FACTPS);
1467 
1468  if (!(factps & E1000_FACTPS_MNGCG) &&
1469  ((fwsm & E1000_FWSM_MODE_MASK) ==
1471  ret_val = true;
1472  goto out;
1473  }
1474  } else {
1475  if ((manc & E1000_MANC_SMBUS_EN) &&
1476  !(manc & E1000_MANC_ASF_EN)) {
1477  ret_val = true;
1478  goto out;
1479  }
1480  }
1481 
1482 out:
1483  return ret_val;
1484 }