Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
e1000_82575.c
Go to the documentation of this file.
1 /*******************************************************************************
2 
3  Intel(R) Gigabit Ethernet Linux driver
4  Copyright(c) 2007-2012 Intel Corporation.
5 
6  This program is free software; you can redistribute it and/or modify it
7  under the terms and conditions of the GNU General Public License,
8  version 2, as published by the Free Software Foundation.
9 
10  This program is distributed in the hope it will be useful, but WITHOUT
11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13  more details.
14 
15  You should have received a copy of the GNU General Public License along with
16  this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 
19  The full GNU General Public License is included in this distribution in
20  the file called "COPYING".
21 
22  Contact Information:
23  e1000-devel Mailing List <[email protected]>
24  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 
26 *******************************************************************************/
27 
28 /* e1000_82575
29  * e1000_82576
30  */
31 
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 
34 #include <linux/types.h>
35 #include <linux/if_ether.h>
36 
37 #include "e1000_mac.h"
38 #include "e1000_82575.h"
39 #include "e1000_i210.h"
40 
41 static s32 igb_get_invariants_82575(struct e1000_hw *);
42 static s32 igb_acquire_phy_82575(struct e1000_hw *);
43 static void igb_release_phy_82575(struct e1000_hw *);
44 static s32 igb_acquire_nvm_82575(struct e1000_hw *);
45 static void igb_release_nvm_82575(struct e1000_hw *);
46 static s32 igb_check_for_link_82575(struct e1000_hw *);
47 static s32 igb_get_cfg_done_82575(struct e1000_hw *);
48 static s32 igb_init_hw_82575(struct e1000_hw *);
49 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *);
50 static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
51 static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *);
52 static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16);
53 static s32 igb_reset_hw_82575(struct e1000_hw *);
54 static s32 igb_reset_hw_82580(struct e1000_hw *);
55 static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
56 static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool);
57 static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool);
58 static s32 igb_setup_copper_link_82575(struct e1000_hw *);
59 static s32 igb_setup_serdes_link_82575(struct e1000_hw *);
60 static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16);
61 static void igb_clear_hw_cntrs_82575(struct e1000_hw *);
62 static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16);
63 static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *,
64  u16 *);
65 static s32 igb_get_phy_id_82575(struct e1000_hw *);
66 static void igb_release_swfw_sync_82575(struct e1000_hw *, u16);
67 static bool igb_sgmii_active_82575(struct e1000_hw *);
68 static s32 igb_reset_init_script_82575(struct e1000_hw *);
69 static s32 igb_read_mac_addr_82575(struct e1000_hw *);
70 static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw);
71 static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw);
72 static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
73 static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
74 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
75 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
76 static const u16 e1000_82580_rxpbs_table[] =
77  { 36, 72, 144, 1, 2, 4, 8, 16,
78  35, 70, 140 };
79 #define E1000_82580_RXPBS_TABLE_SIZE \
80  (sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
81 
89 static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
90 {
91  u32 reg = 0;
92  bool ext_mdio = false;
93 
94  switch (hw->mac.type) {
95  case e1000_82575:
96  case e1000_82576:
97  reg = rd32(E1000_MDIC);
98  ext_mdio = !!(reg & E1000_MDIC_DEST);
99  break;
100  case e1000_82580:
101  case e1000_i350:
102  case e1000_i210:
103  case e1000_i211:
104  reg = rd32(E1000_MDICNFG);
105  ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
106  break;
107  default:
108  break;
109  }
110  return ext_mdio;
111 }
112 
113 static s32 igb_get_invariants_82575(struct e1000_hw *hw)
114 {
115  struct e1000_phy_info *phy = &hw->phy;
116  struct e1000_nvm_info *nvm = &hw->nvm;
117  struct e1000_mac_info *mac = &hw->mac;
118  struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
119  u32 eecd;
120  s32 ret_val;
121  u16 size;
122  u32 ctrl_ext = 0;
123 
124  switch (hw->device_id) {
128  mac->type = e1000_82575;
129  break;
130  case E1000_DEV_ID_82576:
138  mac->type = e1000_82576;
139  break;
150  mac->type = e1000_82580;
151  break;
156  mac->type = e1000_i350;
157  break;
164  mac->type = e1000_i210;
165  break;
167  mac->type = e1000_i211;
168  break;
169  default:
170  return -E1000_ERR_MAC_INIT;
171  break;
172  }
173 
174  /* Set media type */
175  /*
176  * The 82575 uses bits 22:23 for link mode. The mode can be changed
177  * based on the EEPROM. We cannot rely upon device ID. There
178  * is no distinguishable difference between fiber and internal
179  * SerDes mode on the 82575. There can be an external PHY attached
180  * on the SGMII interface. For this, we'll set sgmii_active to true.
181  */
183  dev_spec->sgmii_active = false;
184 
185  ctrl_ext = rd32(E1000_CTRL_EXT);
186  switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
188  dev_spec->sgmii_active = true;
189  break;
192  hw->phy.media_type = e1000_media_type_internal_serdes;
193  break;
194  default:
195  break;
196  }
197 
198  /* Set mta register count */
199  mac->mta_reg_count = 128;
200  /* Set rar entry count */
201  switch (mac->type) {
202  case e1000_82576:
204  break;
205  case e1000_82580:
207  break;
208  case e1000_i350:
210  break;
211  default:
213  break;
214  }
215  /* reset */
216  if (mac->type >= e1000_82580)
217  mac->ops.reset_hw = igb_reset_hw_82580;
218  else
219  mac->ops.reset_hw = igb_reset_hw_82575;
220 
221  if (mac->type >= e1000_i210) {
222  mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
223  mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
224  } else {
225  mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
226  mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
227  }
228 
229  /* Set if part includes ASF firmware */
230  mac->asf_firmware_present = true;
231  /* Set if manageability features are enabled. */
232  mac->arc_subsystem_valid =
234  ? true : false;
235  /* enable EEE on i350 parts and later parts */
236  if (mac->type >= e1000_i350)
237  dev_spec->eee_disable = false;
238  else
239  dev_spec->eee_disable = true;
240  /* physical interface link setup */
241  mac->ops.setup_physical_interface =
242  (hw->phy.media_type == e1000_media_type_copper)
243  ? igb_setup_copper_link_82575
244  : igb_setup_serdes_link_82575;
245 
246  /* NVM initialization */
247  eecd = rd32(E1000_EECD);
248  size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
250 
251  /*
252  * Added to a constant, "size" becomes the left-shift value
253  * for setting word_size.
254  */
255  size += NVM_WORD_SIZE_BASE_SHIFT;
256 
257  /*
258  * Check for invalid size
259  */
260  if ((hw->mac.type == e1000_82576) && (size > 15)) {
261  pr_notice("The NVM size is not valid, defaulting to 32K\n");
262  size = 15;
263  }
264 
265  nvm->word_size = 1 << size;
266  if (hw->mac.type < e1000_i210) {
267  nvm->opcode_bits = 8;
268  nvm->delay_usec = 1;
269  switch (nvm->override) {
271  nvm->page_size = 32;
272  nvm->address_bits = 16;
273  break;
275  nvm->page_size = 8;
276  nvm->address_bits = 8;
277  break;
278  default:
279  nvm->page_size = eecd
280  & E1000_EECD_ADDR_BITS ? 32 : 8;
281  nvm->address_bits = eecd
282  & E1000_EECD_ADDR_BITS ? 16 : 8;
283  break;
284  }
285  if (nvm->word_size == (1 << 15))
286  nvm->page_size = 128;
287 
288  nvm->type = e1000_nvm_eeprom_spi;
289  } else
290  nvm->type = e1000_nvm_flash_hw;
291 
292  /* NVM Function Pointers */
293  switch (hw->mac.type) {
294  case e1000_82580:
295  nvm->ops.validate = igb_validate_nvm_checksum_82580;
296  nvm->ops.update = igb_update_nvm_checksum_82580;
297  nvm->ops.acquire = igb_acquire_nvm_82575;
298  nvm->ops.release = igb_release_nvm_82575;
299  if (nvm->word_size < (1 << 15))
300  nvm->ops.read = igb_read_nvm_eerd;
301  else
302  nvm->ops.read = igb_read_nvm_spi;
303  nvm->ops.write = igb_write_nvm_spi;
304  break;
305  case e1000_i350:
306  nvm->ops.validate = igb_validate_nvm_checksum_i350;
307  nvm->ops.update = igb_update_nvm_checksum_i350;
308  nvm->ops.acquire = igb_acquire_nvm_82575;
309  nvm->ops.release = igb_release_nvm_82575;
310  if (nvm->word_size < (1 << 15))
311  nvm->ops.read = igb_read_nvm_eerd;
312  else
313  nvm->ops.read = igb_read_nvm_spi;
314  nvm->ops.write = igb_write_nvm_spi;
315  break;
316  case e1000_i210:
317  nvm->ops.validate = igb_validate_nvm_checksum_i210;
318  nvm->ops.update = igb_update_nvm_checksum_i210;
319  nvm->ops.acquire = igb_acquire_nvm_i210;
320  nvm->ops.release = igb_release_nvm_i210;
321  nvm->ops.read = igb_read_nvm_srrd_i210;
322  nvm->ops.valid_led_default = igb_valid_led_default_i210;
323  break;
324  case e1000_i211:
325  nvm->ops.acquire = igb_acquire_nvm_i210;
326  nvm->ops.release = igb_release_nvm_i210;
327  nvm->ops.read = igb_read_nvm_i211;
328  nvm->ops.valid_led_default = igb_valid_led_default_i210;
329  nvm->ops.validate = NULL;
330  nvm->ops.update = NULL;
331  nvm->ops.write = NULL;
332  break;
333  default:
334  nvm->ops.validate = igb_validate_nvm_checksum;
335  nvm->ops.update = igb_update_nvm_checksum;
336  nvm->ops.acquire = igb_acquire_nvm_82575;
337  nvm->ops.release = igb_release_nvm_82575;
338  if (nvm->word_size < (1 << 15))
339  nvm->ops.read = igb_read_nvm_eerd;
340  else
341  nvm->ops.read = igb_read_nvm_spi;
342  nvm->ops.write = igb_write_nvm_spi;
343  break;
344  }
345 
346  /* if part supports SR-IOV then initialize mailbox parameters */
347  switch (mac->type) {
348  case e1000_82576:
349  case e1000_i350:
351  break;
352  default:
353  break;
354  }
355 
356  /* setup PHY parameters */
357  if (phy->media_type != e1000_media_type_copper) {
358  phy->type = e1000_phy_none;
359  return 0;
360  }
361 
363  phy->reset_delay_us = 100;
364 
365  ctrl_ext = rd32(E1000_CTRL_EXT);
366 
367  /* PHY function pointers */
368  if (igb_sgmii_active_82575(hw)) {
369  phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
370  ctrl_ext |= E1000_CTRL_I2C_ENA;
371  } else {
372  phy->ops.reset = igb_phy_hw_reset;
373  ctrl_ext &= ~E1000_CTRL_I2C_ENA;
374  }
375 
376  wr32(E1000_CTRL_EXT, ctrl_ext);
377  igb_reset_mdicnfg_82580(hw);
378 
379  if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
380  phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
381  phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
382  } else if ((hw->mac.type == e1000_82580)
383  || (hw->mac.type == e1000_i350)) {
384  phy->ops.read_reg = igb_read_phy_reg_82580;
385  phy->ops.write_reg = igb_write_phy_reg_82580;
386  } else if (hw->phy.type >= e1000_phy_i210) {
387  phy->ops.read_reg = igb_read_phy_reg_gs40g;
388  phy->ops.write_reg = igb_write_phy_reg_gs40g;
389  } else {
390  phy->ops.read_reg = igb_read_phy_reg_igp;
391  phy->ops.write_reg = igb_write_phy_reg_igp;
392  }
393 
394  /* set lan id */
395  hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
397 
398  /* Set phy->phy_addr and phy->id. */
399  ret_val = igb_get_phy_id_82575(hw);
400  if (ret_val)
401  return ret_val;
402 
403  /* Verify phy id and set remaining function pointers */
404  switch (phy->id) {
405  case I347AT4_E_PHY_ID:
406  case M88E1112_E_PHY_ID:
407  case M88E1111_I_PHY_ID:
408  phy->type = e1000_phy_m88;
409  phy->ops.get_phy_info = igb_get_phy_info_m88;
410 
411  if (phy->id == I347AT4_E_PHY_ID ||
412  phy->id == M88E1112_E_PHY_ID)
413  phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
414  else
415  phy->ops.get_cable_length = igb_get_cable_length_m88;
416 
417  if (phy->id == I210_I_PHY_ID) {
418  phy->ops.get_cable_length =
420  phy->ops.set_d0_lplu_state =
421  igb_set_d0_lplu_state_82580;
422  phy->ops.set_d3_lplu_state =
423  igb_set_d3_lplu_state_82580;
424  }
425  phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
426  break;
427  case IGP03E1000_E_PHY_ID:
428  phy->type = e1000_phy_igp_3;
429  phy->ops.get_phy_info = igb_get_phy_info_igp;
430  phy->ops.get_cable_length = igb_get_cable_length_igp_2;
431  phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
432  phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
433  phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
434  break;
435  case I82580_I_PHY_ID:
436  case I350_I_PHY_ID:
437  phy->type = e1000_phy_82580;
438  phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580;
439  phy->ops.get_cable_length = igb_get_cable_length_82580;
440  phy->ops.get_phy_info = igb_get_phy_info_82580;
441  phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
442  phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
443  break;
444  case I210_I_PHY_ID:
445  phy->type = e1000_phy_i210;
446  phy->ops.get_phy_info = igb_get_phy_info_m88;
447  phy->ops.check_polarity = igb_check_polarity_m88;
448  phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
449  phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
450  phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
451  phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
452  break;
453  default:
454  return -E1000_ERR_PHY;
455  }
456 
457  return 0;
458 }
459 
467 static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
468 {
470 
471  if (hw->bus.func == E1000_FUNC_1)
472  mask = E1000_SWFW_PHY1_SM;
473  else if (hw->bus.func == E1000_FUNC_2)
474  mask = E1000_SWFW_PHY2_SM;
475  else if (hw->bus.func == E1000_FUNC_3)
476  mask = E1000_SWFW_PHY3_SM;
477 
478  return hw->mac.ops.acquire_swfw_sync(hw, mask);
479 }
480 
488 static void igb_release_phy_82575(struct e1000_hw *hw)
489 {
490  u16 mask = E1000_SWFW_PHY0_SM;
491 
492  if (hw->bus.func == E1000_FUNC_1)
493  mask = E1000_SWFW_PHY1_SM;
494  else if (hw->bus.func == E1000_FUNC_2)
495  mask = E1000_SWFW_PHY2_SM;
496  else if (hw->bus.func == E1000_FUNC_3)
497  mask = E1000_SWFW_PHY3_SM;
498 
499  hw->mac.ops.release_swfw_sync(hw, mask);
500 }
501 
511 static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
512  u16 *data)
513 {
514  s32 ret_val = -E1000_ERR_PARAM;
515 
516  if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
517  hw_dbg("PHY Address %u is out of range\n", offset);
518  goto out;
519  }
520 
521  ret_val = hw->phy.ops.acquire(hw);
522  if (ret_val)
523  goto out;
524 
525  ret_val = igb_read_phy_reg_i2c(hw, offset, data);
526 
527  hw->phy.ops.release(hw);
528 
529 out:
530  return ret_val;
531 }
532 
542 static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
543  u16 data)
544 {
545  s32 ret_val = -E1000_ERR_PARAM;
546 
547 
548  if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
549  hw_dbg("PHY Address %d is out of range\n", offset);
550  goto out;
551  }
552 
553  ret_val = hw->phy.ops.acquire(hw);
554  if (ret_val)
555  goto out;
556 
557  ret_val = igb_write_phy_reg_i2c(hw, offset, data);
558 
559  hw->phy.ops.release(hw);
560 
561 out:
562  return ret_val;
563 }
564 
572 static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
573 {
574  struct e1000_phy_info *phy = &hw->phy;
575  s32 ret_val = 0;
576  u16 phy_id;
577  u32 ctrl_ext;
578  u32 mdic;
579 
580  /*
581  * For SGMII PHYs, we try the list of possible addresses until
582  * we find one that works. For non-SGMII PHYs
583  * (e.g. integrated copper PHYs), an address of 1 should
584  * work. The result of this function should mean phy->phy_addr
585  * and phy->id are set correctly.
586  */
587  if (!(igb_sgmii_active_82575(hw))) {
588  phy->addr = 1;
589  ret_val = igb_get_phy_id(hw);
590  goto out;
591  }
592 
593  if (igb_sgmii_uses_mdio_82575(hw)) {
594  switch (hw->mac.type) {
595  case e1000_82575:
596  case e1000_82576:
597  mdic = rd32(E1000_MDIC);
598  mdic &= E1000_MDIC_PHY_MASK;
599  phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
600  break;
601  case e1000_82580:
602  case e1000_i350:
603  case e1000_i210:
604  case e1000_i211:
605  mdic = rd32(E1000_MDICNFG);
606  mdic &= E1000_MDICNFG_PHY_MASK;
607  phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
608  break;
609  default:
610  ret_val = -E1000_ERR_PHY;
611  goto out;
612  break;
613  }
614  ret_val = igb_get_phy_id(hw);
615  goto out;
616  }
617 
618  /* Power on sgmii phy if it is disabled */
619  ctrl_ext = rd32(E1000_CTRL_EXT);
621  wrfl();
622  msleep(300);
623 
624  /*
625  * The address field in the I2CCMD register is 3 bits and 0 is invalid.
626  * Therefore, we need to test 1-7
627  */
628  for (phy->addr = 1; phy->addr < 8; phy->addr++) {
629  ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
630  if (ret_val == 0) {
631  hw_dbg("Vendor ID 0x%08X read at address %u\n",
632  phy_id, phy->addr);
633  /*
634  * At the time of this writing, The M88 part is
635  * the only supported SGMII PHY product.
636  */
637  if (phy_id == M88_VENDOR)
638  break;
639  } else {
640  hw_dbg("PHY address %u was unreadable\n", phy->addr);
641  }
642  }
643 
644  /* A valid PHY type couldn't be found. */
645  if (phy->addr == 8) {
646  phy->addr = 0;
647  ret_val = -E1000_ERR_PHY;
648  goto out;
649  } else {
650  ret_val = igb_get_phy_id(hw);
651  }
652 
653  /* restore previous sfp cage power state */
654  wr32(E1000_CTRL_EXT, ctrl_ext);
655 
656 out:
657  return ret_val;
658 }
659 
666 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
667 {
668  s32 ret_val;
669 
670  /*
671  * This isn't a true "hard" reset, but is the only reset
672  * available to us at this time.
673  */
674 
675  hw_dbg("Soft resetting SGMII attached PHY...\n");
676 
677  /*
678  * SFP documentation requires the following to configure the SPF module
679  * to work on SGMII. No further documentation is given.
680  */
681  ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
682  if (ret_val)
683  goto out;
684 
685  ret_val = igb_phy_sw_reset(hw);
686 
687 out:
688  return ret_val;
689 }
690 
704 static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
705 {
706  struct e1000_phy_info *phy = &hw->phy;
707  s32 ret_val;
708  u16 data;
709 
710  ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
711  if (ret_val)
712  goto out;
713 
714  if (active) {
715  data |= IGP02E1000_PM_D0_LPLU;
716  ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
717  data);
718  if (ret_val)
719  goto out;
720 
721  /* When LPLU is enabled, we should disable SmartSpeed */
722  ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
723  &data);
725  ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
726  data);
727  if (ret_val)
728  goto out;
729  } else {
730  data &= ~IGP02E1000_PM_D0_LPLU;
731  ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
732  data);
733  /*
734  * LPLU and SmartSpeed are mutually exclusive. LPLU is used
735  * during Dx states where the power conservation is most
736  * important. During driver activity we should enable
737  * SmartSpeed, so performance is maintained.
738  */
739  if (phy->smart_speed == e1000_smart_speed_on) {
740  ret_val = phy->ops.read_reg(hw,
742  if (ret_val)
743  goto out;
744 
746  ret_val = phy->ops.write_reg(hw,
748  if (ret_val)
749  goto out;
750  } else if (phy->smart_speed == e1000_smart_speed_off) {
751  ret_val = phy->ops.read_reg(hw,
753  if (ret_val)
754  goto out;
755 
757  ret_val = phy->ops.write_reg(hw,
759  if (ret_val)
760  goto out;
761  }
762  }
763 
764 out:
765  return ret_val;
766 }
767 
781 static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
782 {
783  struct e1000_phy_info *phy = &hw->phy;
784  s32 ret_val = 0;
785  u16 data;
786 
788 
789  if (active) {
790  data |= E1000_82580_PM_D0_LPLU;
791 
792  /* When LPLU is enabled, we should disable SmartSpeed */
793  data &= ~E1000_82580_PM_SPD;
794  } else {
795  data &= ~E1000_82580_PM_D0_LPLU;
796 
797  /*
798  * LPLU and SmartSpeed are mutually exclusive. LPLU is used
799  * during Dx states where the power conservation is most
800  * important. During driver activity we should enable
801  * SmartSpeed, so performance is maintained.
802  */
803  if (phy->smart_speed == e1000_smart_speed_on)
804  data |= E1000_82580_PM_SPD;
805  else if (phy->smart_speed == e1000_smart_speed_off)
806  data &= ~E1000_82580_PM_SPD; }
807 
809  return ret_val;
810 }
811 
826 s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
827 {
828  struct e1000_phy_info *phy = &hw->phy;
829  s32 ret_val = 0;
830  u16 data;
831 
833 
834  if (!active) {
835  data &= ~E1000_82580_PM_D3_LPLU;
836  /*
837  * LPLU and SmartSpeed are mutually exclusive. LPLU is used
838  * during Dx states where the power conservation is most
839  * important. During driver activity we should enable
840  * SmartSpeed, so performance is maintained.
841  */
842  if (phy->smart_speed == e1000_smart_speed_on)
843  data |= E1000_82580_PM_SPD;
844  else if (phy->smart_speed == e1000_smart_speed_off)
845  data &= ~E1000_82580_PM_SPD;
846  } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
849  data |= E1000_82580_PM_D3_LPLU;
850  /* When LPLU is enabled, we should disable SmartSpeed */
851  data &= ~E1000_82580_PM_SPD;
852  }
853 
855  return ret_val;
856 }
857 
867 static s32 igb_acquire_nvm_82575(struct e1000_hw *hw)
868 {
869  s32 ret_val;
870 
871  ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
872  if (ret_val)
873  goto out;
874 
875  ret_val = igb_acquire_nvm(hw);
876 
877  if (ret_val)
878  hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
879 
880 out:
881  return ret_val;
882 }
883 
891 static void igb_release_nvm_82575(struct e1000_hw *hw)
892 {
893  igb_release_nvm(hw);
894  hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
895 }
896 
905 static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
906 {
907  u32 swfw_sync;
908  u32 swmask = mask;
909  u32 fwmask = mask << 16;
910  s32 ret_val = 0;
911  s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
912 
913  while (i < timeout) {
914  if (igb_get_hw_semaphore(hw)) {
915  ret_val = -E1000_ERR_SWFW_SYNC;
916  goto out;
917  }
918 
919  swfw_sync = rd32(E1000_SW_FW_SYNC);
920  if (!(swfw_sync & (fwmask | swmask)))
921  break;
922 
923  /*
924  * Firmware currently using resource (fwmask)
925  * or other software thread using resource (swmask)
926  */
928  mdelay(5);
929  i++;
930  }
931 
932  if (i == timeout) {
933  hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
934  ret_val = -E1000_ERR_SWFW_SYNC;
935  goto out;
936  }
937 
938  swfw_sync |= swmask;
939  wr32(E1000_SW_FW_SYNC, swfw_sync);
940 
942 
943 out:
944  return ret_val;
945 }
946 
955 static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
956 {
957  u32 swfw_sync;
958 
959  while (igb_get_hw_semaphore(hw) != 0);
960  /* Empty */
961 
962  swfw_sync = rd32(E1000_SW_FW_SYNC);
963  swfw_sync &= ~mask;
964  wr32(E1000_SW_FW_SYNC, swfw_sync);
965 
967 }
968 
979 static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
980 {
982  s32 ret_val = 0;
984 
985  if (hw->bus.func == 1)
987  else if (hw->bus.func == E1000_FUNC_2)
989  else if (hw->bus.func == E1000_FUNC_3)
991 
992  while (timeout) {
993  if (rd32(E1000_EEMNGCTL) & mask)
994  break;
995  msleep(1);
996  timeout--;
997  }
998  if (!timeout)
999  hw_dbg("MNG configuration cycle has not completed.\n");
1000 
1001  /* If EEPROM is not marked present, init the PHY manually */
1002  if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) &&
1003  (hw->phy.type == e1000_phy_igp_3))
1005 
1006  return ret_val;
1007 }
1008 
1016 static s32 igb_check_for_link_82575(struct e1000_hw *hw)
1017 {
1018  s32 ret_val;
1019  u16 speed, duplex;
1020 
1021  if (hw->phy.media_type != e1000_media_type_copper) {
1022  ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
1023  &duplex);
1024  /*
1025  * Use this flag to determine if link needs to be checked or
1026  * not. If we have link clear the flag so that we do not
1027  * continue to check for link.
1028  */
1029  hw->mac.get_link_status = !hw->mac.serdes_has_link;
1030  } else {
1031  ret_val = igb_check_for_copper_link(hw);
1032  }
1033 
1034  return ret_val;
1035 }
1036 
1042 {
1043  u32 reg;
1044 
1045 
1046  if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1047  !igb_sgmii_active_82575(hw))
1048  return;
1049 
1050  /* Enable PCS to turn on link */
1051  reg = rd32(E1000_PCS_CFG0);
1052  reg |= E1000_PCS_CFG_PCS_EN;
1053  wr32(E1000_PCS_CFG0, reg);
1054 
1055  /* Power up the laser */
1056  reg = rd32(E1000_CTRL_EXT);
1057  reg &= ~E1000_CTRL_EXT_SDP3_DATA;
1058  wr32(E1000_CTRL_EXT, reg);
1059 
1060  /* flush the write to verify completion */
1061  wrfl();
1062  msleep(1);
1063 }
1064 
1074 static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
1075  u16 *duplex)
1076 {
1077  struct e1000_mac_info *mac = &hw->mac;
1078  u32 pcs;
1079 
1080  /* Set up defaults for the return values of this function */
1081  mac->serdes_has_link = false;
1082  *speed = 0;
1083  *duplex = 0;
1084 
1085  /*
1086  * Read the PCS Status register for link state. For non-copper mode,
1087  * the status register is not accurate. The PCS status register is
1088  * used instead.
1089  */
1090  pcs = rd32(E1000_PCS_LSTAT);
1091 
1092  /*
1093  * The link up bit determines when link is up on autoneg. The sync ok
1094  * gets set once both sides sync up and agree upon link. Stable link
1095  * can be determined by checking for both link up and link sync ok
1096  */
1097  if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
1098  mac->serdes_has_link = true;
1099 
1100  /* Detect and store PCS speed */
1101  if (pcs & E1000_PCS_LSTS_SPEED_1000) {
1102  *speed = SPEED_1000;
1103  } else if (pcs & E1000_PCS_LSTS_SPEED_100) {
1104  *speed = SPEED_100;
1105  } else {
1106  *speed = SPEED_10;
1107  }
1108 
1109  /* Detect and store PCS duplex */
1110  if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
1111  *duplex = FULL_DUPLEX;
1112  } else {
1113  *duplex = HALF_DUPLEX;
1114  }
1115  }
1116 
1117  return 0;
1118 }
1119 
1128 {
1129  u32 reg;
1130 
1131  if (hw->phy.media_type != e1000_media_type_internal_serdes &&
1132  igb_sgmii_active_82575(hw))
1133  return;
1134 
1135  if (!igb_enable_mng_pass_thru(hw)) {
1136  /* Disable PCS to turn off link */
1137  reg = rd32(E1000_PCS_CFG0);
1138  reg &= ~E1000_PCS_CFG_PCS_EN;
1139  wr32(E1000_PCS_CFG0, reg);
1140 
1141  /* shutdown the laser */
1142  reg = rd32(E1000_CTRL_EXT);
1143  reg |= E1000_CTRL_EXT_SDP3_DATA;
1144  wr32(E1000_CTRL_EXT, reg);
1145 
1146  /* flush the write to verify completion */
1147  wrfl();
1148  msleep(1);
1149  }
1150 }
1151 
1159 static s32 igb_reset_hw_82575(struct e1000_hw *hw)
1160 {
1161  u32 ctrl, icr;
1162  s32 ret_val;
1163 
1164  /*
1165  * Prevent the PCI-E bus from sticking if there is no TLP connection
1166  * on the last TLP read/write transaction when MAC is reset.
1167  */
1168  ret_val = igb_disable_pcie_master(hw);
1169  if (ret_val)
1170  hw_dbg("PCI-E Master disable polling has failed.\n");
1171 
1172  /* set the completion timeout for interface */
1173  ret_val = igb_set_pcie_completion_timeout(hw);
1174  if (ret_val) {
1175  hw_dbg("PCI-E Set completion timeout has failed.\n");
1176  }
1177 
1178  hw_dbg("Masking off all interrupts\n");
1179  wr32(E1000_IMC, 0xffffffff);
1180 
1181  wr32(E1000_RCTL, 0);
1183  wrfl();
1184 
1185  msleep(10);
1186 
1187  ctrl = rd32(E1000_CTRL);
1188 
1189  hw_dbg("Issuing a global reset to MAC\n");
1190  wr32(E1000_CTRL, ctrl | E1000_CTRL_RST);
1191 
1192  ret_val = igb_get_auto_rd_done(hw);
1193  if (ret_val) {
1194  /*
1195  * When auto config read does not complete, do not
1196  * return with an error. This can happen in situations
1197  * where there is no eeprom and prevents getting link.
1198  */
1199  hw_dbg("Auto Read Done did not complete\n");
1200  }
1201 
1202  /* If EEPROM is not present, run manual init scripts */
1203  if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
1204  igb_reset_init_script_82575(hw);
1205 
1206  /* Clear any pending interrupt events. */
1207  wr32(E1000_IMC, 0xffffffff);
1208  icr = rd32(E1000_ICR);
1209 
1210  /* Install any alternate MAC address into RAR0 */
1211  ret_val = igb_check_alt_mac_addr(hw);
1212 
1213  return ret_val;
1214 }
1215 
1222 static s32 igb_init_hw_82575(struct e1000_hw *hw)
1223 {
1224  struct e1000_mac_info *mac = &hw->mac;
1225  s32 ret_val;
1226  u16 i, rar_count = mac->rar_entry_count;
1227 
1228  /* Initialize identification LED */
1229  ret_val = igb_id_led_init(hw);
1230  if (ret_val) {
1231  hw_dbg("Error initializing identification LED\n");
1232  /* This is not fatal and we should not stop init due to this */
1233  }
1234 
1235  /* Disabling VLAN filtering */
1236  hw_dbg("Initializing the IEEE VLAN\n");
1237  if (hw->mac.type == e1000_i350)
1238  igb_clear_vfta_i350(hw);
1239  else
1240  igb_clear_vfta(hw);
1241 
1242  /* Setup the receive address */
1243  igb_init_rx_addrs(hw, rar_count);
1244 
1245  /* Zero out the Multicast HASH table */
1246  hw_dbg("Zeroing the MTA\n");
1247  for (i = 0; i < mac->mta_reg_count; i++)
1248  array_wr32(E1000_MTA, i, 0);
1249 
1250  /* Zero out the Unicast HASH table */
1251  hw_dbg("Zeroing the UTA\n");
1252  for (i = 0; i < mac->uta_reg_count; i++)
1253  array_wr32(E1000_UTA, i, 0);
1254 
1255  /* Setup link and flow control */
1256  ret_val = igb_setup_link(hw);
1257 
1258  /*
1259  * Clear all of the statistics registers (clear on read). It is
1260  * important that we do this after we have tried to establish link
1261  * because the symbol error count will increment wildly if there
1262  * is no link.
1263  */
1264  igb_clear_hw_cntrs_82575(hw);
1265  return ret_val;
1266 }
1267 
1276 static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1277 {
1278  u32 ctrl;
1279  s32 ret_val;
1280 
1281  ctrl = rd32(E1000_CTRL);
1282  ctrl |= E1000_CTRL_SLU;
1283  ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1284  wr32(E1000_CTRL, ctrl);
1285 
1286  ret_val = igb_setup_serdes_link_82575(hw);
1287  if (ret_val)
1288  goto out;
1289 
1290  if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
1291  /* allow time for SFP cage time to power up phy */
1292  msleep(300);
1293 
1294  ret_val = hw->phy.ops.reset(hw);
1295  if (ret_val) {
1296  hw_dbg("Error resetting the PHY.\n");
1297  goto out;
1298  }
1299  }
1300  switch (hw->phy.type) {
1301  case e1000_phy_i210:
1302  case e1000_phy_m88:
1303  if (hw->phy.id == I347AT4_E_PHY_ID ||
1304  hw->phy.id == M88E1112_E_PHY_ID)
1305  ret_val = igb_copper_link_setup_m88_gen2(hw);
1306  else
1307  ret_val = igb_copper_link_setup_m88(hw);
1308  break;
1309  case e1000_phy_igp_3:
1310  ret_val = igb_copper_link_setup_igp(hw);
1311  break;
1312  case e1000_phy_82580:
1313  ret_val = igb_copper_link_setup_82580(hw);
1314  break;
1315  default:
1316  ret_val = -E1000_ERR_PHY;
1317  break;
1318  }
1319 
1320  if (ret_val)
1321  goto out;
1322 
1323  ret_val = igb_setup_copper_link(hw);
1324 out:
1325  return ret_val;
1326 }
1327 
1337 static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1338 {
1339  u32 ctrl_ext, ctrl_reg, reg;
1340  bool pcs_autoneg;
1341  s32 ret_val = E1000_SUCCESS;
1342  u16 data;
1343 
1344  if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1345  !igb_sgmii_active_82575(hw))
1346  return ret_val;
1347 
1348 
1349  /*
1350  * On the 82575, SerDes loopback mode persists until it is
1351  * explicitly turned off or a power cycle is performed. A read to
1352  * the register does not indicate its status. Therefore, we ensure
1353  * loopback mode is disabled during initialization.
1354  */
1356 
1357  /* power on the sfp cage if present */
1358  ctrl_ext = rd32(E1000_CTRL_EXT);
1359  ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
1360  wr32(E1000_CTRL_EXT, ctrl_ext);
1361 
1362  ctrl_reg = rd32(E1000_CTRL);
1363  ctrl_reg |= E1000_CTRL_SLU;
1364 
1365  if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
1366  /* set both sw defined pins */
1367  ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
1368 
1369  /* Set switch control to serdes energy detect */
1370  reg = rd32(E1000_CONNSW);
1371  reg |= E1000_CONNSW_ENRGSRC;
1372  wr32(E1000_CONNSW, reg);
1373  }
1374 
1375  reg = rd32(E1000_PCS_LCTL);
1376 
1377  /* default pcs_autoneg to the same setting as mac autoneg */
1378  pcs_autoneg = hw->mac.autoneg;
1379 
1380  switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
1382  /* sgmii mode lets the phy handle forcing speed/duplex */
1383  pcs_autoneg = true;
1384  /* autoneg time out should be disabled for SGMII mode */
1385  reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
1386  break;
1388  /* disable PCS autoneg and support parallel detect only */
1389  pcs_autoneg = false;
1390  default:
1391  if (hw->mac.type == e1000_82575 ||
1392  hw->mac.type == e1000_82576) {
1393  ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
1394  if (ret_val) {
1395  printk(KERN_DEBUG "NVM Read Error\n\n");
1396  return ret_val;
1397  }
1398 
1400  pcs_autoneg = false;
1401  }
1402 
1403  /*
1404  * non-SGMII modes only supports a speed of 1000/Full for the
1405  * link so it is best to just force the MAC and let the pcs
1406  * link either autoneg or be forced to 1000/Full
1407  */
1408  ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
1410 
1411  /* set speed of 1000/Full if speed/duplex is forced */
1413  break;
1414  }
1415 
1416  wr32(E1000_CTRL, ctrl_reg);
1417 
1418  /*
1419  * New SerDes mode allows for forcing speed or autonegotiating speed
1420  * at 1gb. Autoneg should be default set by most drivers. This is the
1421  * mode that will be compatible with older link partners and switches.
1422  * However, both are supported by the hardware and some drivers/tools.
1423  */
1426 
1427  /*
1428  * We force flow control to prevent the CTRL register values from being
1429  * overwritten by the autonegotiated flow control values
1430  */
1432 
1433  if (pcs_autoneg) {
1434  /* Set PCS register for autoneg */
1435  reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
1436  E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1437  hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
1438  } else {
1439  /* Set PCS register for forced link */
1440  reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
1441 
1442  hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
1443  }
1444 
1445  wr32(E1000_PCS_LCTL, reg);
1446 
1447  if (!igb_sgmii_active_82575(hw))
1448  igb_force_mac_fc(hw);
1449 
1450  return ret_val;
1451 }
1452 
1461 static bool igb_sgmii_active_82575(struct e1000_hw *hw)
1462 {
1463  struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
1464  return dev_spec->sgmii_active;
1465 }
1466 
1474 static s32 igb_reset_init_script_82575(struct e1000_hw *hw)
1475 {
1476  if (hw->mac.type == e1000_82575) {
1477  hw_dbg("Running reset init script for 82575\n");
1478  /* SerDes configuration via SERDESCTRL */
1479  igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C);
1480  igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78);
1481  igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23);
1482  igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15);
1483 
1484  /* CCM configuration via CCMCTL register */
1485  igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00);
1486  igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00);
1487 
1488  /* PCIe lanes configuration */
1489  igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC);
1490  igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF);
1491  igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05);
1492  igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81);
1493 
1494  /* PCIe PLL Configuration */
1495  igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47);
1496  igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00);
1497  igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00);
1498  }
1499 
1500  return 0;
1501 }
1502 
1507 static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
1508 {
1509  s32 ret_val = 0;
1510 
1511  /*
1512  * If there's an alternate MAC address place it in RAR0
1513  * so that it will override the Si installed default perm
1514  * address.
1515  */
1516  ret_val = igb_check_alt_mac_addr(hw);
1517  if (ret_val)
1518  goto out;
1519 
1520  ret_val = igb_read_mac_addr(hw);
1521 
1522 out:
1523  return ret_val;
1524 }
1525 
1534 {
1535  /* If the management interface is not enabled, then power down */
1538 }
1539 
1546 static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
1547 {
1549 
1550  rd32(E1000_PRC64);
1551  rd32(E1000_PRC127);
1552  rd32(E1000_PRC255);
1553  rd32(E1000_PRC511);
1556  rd32(E1000_PTC64);
1557  rd32(E1000_PTC127);
1558  rd32(E1000_PTC255);
1559  rd32(E1000_PTC511);
1562 
1564  rd32(E1000_RXERRC);
1565  rd32(E1000_TNCRS);
1567  rd32(E1000_TSCTC);
1568  rd32(E1000_TSCTFC);
1569 
1570  rd32(E1000_MGTPRC);
1571  rd32(E1000_MGTPDC);
1572  rd32(E1000_MGTPTC);
1573 
1574  rd32(E1000_IAC);
1575  rd32(E1000_ICRXOC);
1576 
1584 
1585  rd32(E1000_CBTMPC);
1586  rd32(E1000_HTDPMC);
1587  rd32(E1000_CBRMPC);
1588  rd32(E1000_RPTHC);
1589  rd32(E1000_HGPTC);
1591  rd32(E1000_HGORCL);
1592  rd32(E1000_HGORCH);
1593  rd32(E1000_HGOTCL);
1594  rd32(E1000_HGOTCH);
1596 
1597  /* This register should not be read in copper configurations */
1598  if (hw->phy.media_type == e1000_media_type_internal_serdes ||
1599  igb_sgmii_active_82575(hw))
1600  rd32(E1000_SCVPC);
1601 }
1602 
1613 {
1614  u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
1615  int i, ms_wait;
1616 
1617  if (hw->mac.type != e1000_82575 ||
1619  return;
1620 
1621  /* Disable all RX queues */
1622  for (i = 0; i < 4; i++) {
1623  rxdctl[i] = rd32(E1000_RXDCTL(i));
1624  wr32(E1000_RXDCTL(i),
1625  rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
1626  }
1627  /* Poll all queues to verify they have shut down */
1628  for (ms_wait = 0; ms_wait < 10; ms_wait++) {
1629  msleep(1);
1630  rx_enabled = 0;
1631  for (i = 0; i < 4; i++)
1632  rx_enabled |= rd32(E1000_RXDCTL(i));
1633  if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
1634  break;
1635  }
1636 
1637  if (ms_wait == 10)
1638  hw_dbg("Queue disable timed out after 10ms\n");
1639 
1640  /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
1641  * incoming packets are rejected. Set enable and wait 2ms so that
1642  * any packet that was coming in as RCTL.EN was set is flushed
1643  */
1644  rfctl = rd32(E1000_RFCTL);
1645  wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
1646 
1647  rlpml = rd32(E1000_RLPML);
1648  wr32(E1000_RLPML, 0);
1649 
1650  rctl = rd32(E1000_RCTL);
1651  temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
1652  temp_rctl |= E1000_RCTL_LPE;
1653 
1654  wr32(E1000_RCTL, temp_rctl);
1655  wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
1656  wrfl();
1657  msleep(2);
1658 
1659  /* Enable RX queues that were previously enabled and restore our
1660  * previous state
1661  */
1662  for (i = 0; i < 4; i++)
1663  wr32(E1000_RXDCTL(i), rxdctl[i]);
1664  wr32(E1000_RCTL, rctl);
1665  wrfl();
1666 
1667  wr32(E1000_RLPML, rlpml);
1668  wr32(E1000_RFCTL, rfctl);
1669 
1670  /* Flush receive errors generated by workaround */
1671  rd32(E1000_ROC);
1672  rd32(E1000_RNBC);
1673  rd32(E1000_MPC);
1674 }
1675 
1686 static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
1687 {
1688  u32 gcr = rd32(E1000_GCR);
1689  s32 ret_val = 0;
1690  u16 pcie_devctl2;
1691 
1692  /* only take action if timeout value is defaulted to 0 */
1693  if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
1694  goto out;
1695 
1696  /*
1697  * if capababilities version is type 1 we can write the
1698  * timeout of 10ms to 200ms through the GCR register
1699  */
1700  if (!(gcr & E1000_GCR_CAP_VER2)) {
1702  goto out;
1703  }
1704 
1705  /*
1706  * for version 2 capabilities we need to write the config space
1707  * directly in order to set the completion timeout value for
1708  * 16ms to 55ms
1709  */
1711  &pcie_devctl2);
1712  if (ret_val)
1713  goto out;
1714 
1715  pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
1716 
1718  &pcie_devctl2);
1719 out:
1720  /* disable completion timeout resend */
1722 
1723  wr32(E1000_GCR, gcr);
1724  return ret_val;
1725 }
1726 
1736 {
1737  u32 dtxswc;
1738 
1739  switch (hw->mac.type) {
1740  case e1000_82576:
1741  case e1000_i350:
1742  dtxswc = rd32(E1000_DTXSWC);
1743  if (enable) {
1744  dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
1746  /* The PF can spoof - it has to in order to
1747  * support emulation mode NICs */
1748  dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
1749  } else {
1750  dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
1752  }
1753  wr32(E1000_DTXSWC, dtxswc);
1754  break;
1755  default:
1756  break;
1757  }
1758 }
1759 
1768 {
1769  u32 dtxswc;
1770 
1771  switch (hw->mac.type) {
1772  case e1000_82576:
1773  dtxswc = rd32(E1000_DTXSWC);
1774  if (enable)
1776  else
1777  dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1778  wr32(E1000_DTXSWC, dtxswc);
1779  break;
1780  case e1000_i350:
1781  dtxswc = rd32(E1000_TXSWC);
1782  if (enable)
1784  else
1785  dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1786  wr32(E1000_TXSWC, dtxswc);
1787  break;
1788  default:
1789  /* Currently no other hardware supports loopback */
1790  break;
1791  }
1792 
1793 
1794 }
1795 
1804 {
1805  u32 vt_ctl = rd32(E1000_VT_CTL);
1806 
1807  if (enable)
1808  vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
1809  else
1810  vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
1811 
1812  wr32(E1000_VT_CTL, vt_ctl);
1813 }
1814 
1824 static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
1825 {
1826  s32 ret_val;
1827 
1828 
1829  ret_val = hw->phy.ops.acquire(hw);
1830  if (ret_val)
1831  goto out;
1832 
1833  ret_val = igb_read_phy_reg_mdic(hw, offset, data);
1834 
1835  hw->phy.ops.release(hw);
1836 
1837 out:
1838  return ret_val;
1839 }
1840 
1849 static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
1850 {
1851  s32 ret_val;
1852 
1853 
1854  ret_val = hw->phy.ops.acquire(hw);
1855  if (ret_val)
1856  goto out;
1857 
1858  ret_val = igb_write_phy_reg_mdic(hw, offset, data);
1859 
1860  hw->phy.ops.release(hw);
1861 
1862 out:
1863  return ret_val;
1864 }
1865 
1874 static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw)
1875 {
1876  s32 ret_val = 0;
1877  u32 mdicnfg;
1878  u16 nvm_data = 0;
1879 
1880  if (hw->mac.type != e1000_82580)
1881  goto out;
1882  if (!igb_sgmii_active_82575(hw))
1883  goto out;
1884 
1885  ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
1886  NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
1887  &nvm_data);
1888  if (ret_val) {
1889  hw_dbg("NVM Read Error\n");
1890  goto out;
1891  }
1892 
1893  mdicnfg = rd32(E1000_MDICNFG);
1894  if (nvm_data & NVM_WORD24_EXT_MDIO)
1895  mdicnfg |= E1000_MDICNFG_EXT_MDIO;
1896  if (nvm_data & NVM_WORD24_COM_MDIO)
1897  mdicnfg |= E1000_MDICNFG_COM_MDIO;
1898  wr32(E1000_MDICNFG, mdicnfg);
1899 out:
1900  return ret_val;
1901 }
1902 
1910 static s32 igb_reset_hw_82580(struct e1000_hw *hw)
1911 {
1912  s32 ret_val = 0;
1913  /* BH SW mailbox bit in SW_FW_SYNC */
1914  u16 swmbsw_mask = E1000_SW_SYNCH_MB;
1915  u32 ctrl, icr;
1916  bool global_device_reset = hw->dev_spec._82575.global_device_reset;
1917 
1918 
1919  hw->dev_spec._82575.global_device_reset = false;
1920 
1921  /* Get current control state. */
1922  ctrl = rd32(E1000_CTRL);
1923 
1924  /*
1925  * Prevent the PCI-E bus from sticking if there is no TLP connection
1926  * on the last TLP read/write transaction when MAC is reset.
1927  */
1928  ret_val = igb_disable_pcie_master(hw);
1929  if (ret_val)
1930  hw_dbg("PCI-E Master disable polling has failed.\n");
1931 
1932  hw_dbg("Masking off all interrupts\n");
1933  wr32(E1000_IMC, 0xffffffff);
1934  wr32(E1000_RCTL, 0);
1936  wrfl();
1937 
1938  msleep(10);
1939 
1940  /* Determine whether or not a global dev reset is requested */
1941  if (global_device_reset &&
1942  hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask))
1943  global_device_reset = false;
1944 
1945  if (global_device_reset &&
1947  ctrl |= E1000_CTRL_DEV_RST;
1948  else
1949  ctrl |= E1000_CTRL_RST;
1950 
1951  wr32(E1000_CTRL, ctrl);
1952  wrfl();
1953 
1954  /* Add delay to insure DEV_RST has time to complete */
1955  if (global_device_reset)
1956  msleep(5);
1957 
1958  ret_val = igb_get_auto_rd_done(hw);
1959  if (ret_val) {
1960  /*
1961  * When auto config read does not complete, do not
1962  * return with an error. This can happen in situations
1963  * where there is no eeprom and prevents getting link.
1964  */
1965  hw_dbg("Auto Read Done did not complete\n");
1966  }
1967 
1968  /* If EEPROM is not present, run manual init scripts */
1969  if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
1970  igb_reset_init_script_82575(hw);
1971 
1972  /* clear global device reset status bit */
1973  wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET);
1974 
1975  /* Clear any pending interrupt events. */
1976  wr32(E1000_IMC, 0xffffffff);
1977  icr = rd32(E1000_ICR);
1978 
1979  ret_val = igb_reset_mdicnfg_82580(hw);
1980  if (ret_val)
1981  hw_dbg("Could not reset MDICNFG based on EEPROM\n");
1982 
1983  /* Install any alternate MAC address into RAR0 */
1984  ret_val = igb_check_alt_mac_addr(hw);
1985 
1986  /* Release semaphore */
1987  if (global_device_reset)
1988  hw->mac.ops.release_swfw_sync(hw, swmbsw_mask);
1989 
1990  return ret_val;
1991 }
1992 
2004 {
2005  u16 ret_val = 0;
2006 
2007  if (data < E1000_82580_RXPBS_TABLE_SIZE)
2008  ret_val = e1000_82580_rxpbs_table[data];
2009 
2010  return ret_val;
2011 }
2012 
2022 static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
2023  u16 offset)
2024 {
2025  s32 ret_val = 0;
2026  u16 checksum = 0;
2027  u16 i, nvm_data;
2028 
2029  for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
2030  ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2031  if (ret_val) {
2032  hw_dbg("NVM Read Error\n");
2033  goto out;
2034  }
2035  checksum += nvm_data;
2036  }
2037 
2038  if (checksum != (u16) NVM_SUM) {
2039  hw_dbg("NVM Checksum Invalid\n");
2040  ret_val = -E1000_ERR_NVM;
2041  goto out;
2042  }
2043 
2044 out:
2045  return ret_val;
2046 }
2047 
2058 static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
2059 {
2060  s32 ret_val;
2061  u16 checksum = 0;
2062  u16 i, nvm_data;
2063 
2064  for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
2065  ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2066  if (ret_val) {
2067  hw_dbg("NVM Read Error while updating checksum.\n");
2068  goto out;
2069  }
2070  checksum += nvm_data;
2071  }
2072  checksum = (u16) NVM_SUM - checksum;
2073  ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
2074  &checksum);
2075  if (ret_val)
2076  hw_dbg("NVM Write Error while updating checksum.\n");
2077 
2078 out:
2079  return ret_val;
2080 }
2081 
2090 static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
2091 {
2092  s32 ret_val = 0;
2093  u16 eeprom_regions_count = 1;
2094  u16 j, nvm_data;
2095  u16 nvm_offset;
2096 
2097  ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2098  if (ret_val) {
2099  hw_dbg("NVM Read Error\n");
2100  goto out;
2101  }
2102 
2103  if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
2104  /* if checksums compatibility bit is set validate checksums
2105  * for all 4 ports. */
2106  eeprom_regions_count = 4;
2107  }
2108 
2109  for (j = 0; j < eeprom_regions_count; j++) {
2110  nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2111  ret_val = igb_validate_nvm_checksum_with_offset(hw,
2112  nvm_offset);
2113  if (ret_val != 0)
2114  goto out;
2115  }
2116 
2117 out:
2118  return ret_val;
2119 }
2120 
2129 static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
2130 {
2131  s32 ret_val;
2132  u16 j, nvm_data;
2133  u16 nvm_offset;
2134 
2135  ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2136  if (ret_val) {
2137  hw_dbg("NVM Read Error while updating checksum"
2138  " compatibility bit.\n");
2139  goto out;
2140  }
2141 
2142  if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
2143  /* set compatibility bit to validate checksums appropriately */
2144  nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
2145  ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
2146  &nvm_data);
2147  if (ret_val) {
2148  hw_dbg("NVM Write Error while updating checksum"
2149  " compatibility bit.\n");
2150  goto out;
2151  }
2152  }
2153 
2154  for (j = 0; j < 4; j++) {
2155  nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2156  ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
2157  if (ret_val)
2158  goto out;
2159  }
2160 
2161 out:
2162  return ret_val;
2163 }
2164 
2173 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw)
2174 {
2175  s32 ret_val = 0;
2176  u16 j;
2177  u16 nvm_offset;
2178 
2179  for (j = 0; j < 4; j++) {
2180  nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2181  ret_val = igb_validate_nvm_checksum_with_offset(hw,
2182  nvm_offset);
2183  if (ret_val != 0)
2184  goto out;
2185  }
2186 
2187 out:
2188  return ret_val;
2189 }
2190 
2199 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
2200 {
2201  s32 ret_val = 0;
2202  u16 j;
2203  u16 nvm_offset;
2204 
2205  for (j = 0; j < 4; j++) {
2206  nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2207  ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
2208  if (ret_val != 0)
2209  goto out;
2210  }
2211 
2212 out:
2213  return ret_val;
2214 }
2215 
2224 {
2225  s32 ret_val = 0;
2226  u32 ipcnfg, eeer;
2227 
2228  if ((hw->mac.type < e1000_i350) ||
2229  (hw->phy.media_type != e1000_media_type_copper))
2230  goto out;
2231  ipcnfg = rd32(E1000_IPCNFG);
2232  eeer = rd32(E1000_EEER);
2233 
2234  /* enable or disable per user setting */
2235  if (!(hw->dev_spec._82575.eee_disable)) {
2236  ipcnfg |= (E1000_IPCNFG_EEE_1G_AN |
2238  eeer |= (E1000_EEER_TX_LPI_EN |
2241 
2242  /* keep the LPI clock running before EEE is enabled */
2243  if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
2244  u32 eee_su;
2245  eee_su = rd32(E1000_EEE_SU);
2246  eee_su &= ~E1000_EEE_SU_LPI_CLK_STP;
2247  wr32(E1000_EEE_SU, eee_su);
2248  }
2249 
2250  } else {
2251  ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
2253  eeer &= ~(E1000_EEER_TX_LPI_EN |
2256  }
2257  wr32(E1000_IPCNFG, ipcnfg);
2258  wr32(E1000_EEER, eeer);
2259  rd32(E1000_IPCNFG);
2260  rd32(E1000_EEER);
2261 out:
2262 
2263  return ret_val;
2264 }
2265 
2266 static struct e1000_mac_operations e1000_mac_ops_82575 = {
2267  .init_hw = igb_init_hw_82575,
2268  .check_for_link = igb_check_for_link_82575,
2269  .rar_set = igb_rar_set,
2270  .read_mac_addr = igb_read_mac_addr_82575,
2271  .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
2272 };
2273 
2274 static struct e1000_phy_operations e1000_phy_ops_82575 = {
2275  .acquire = igb_acquire_phy_82575,
2276  .get_cfg_done = igb_get_cfg_done_82575,
2277  .release = igb_release_phy_82575,
2278 };
2279 
2280 static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
2281  .acquire = igb_acquire_nvm_82575,
2282  .read = igb_read_nvm_eerd,
2283  .release = igb_release_nvm_82575,
2284  .write = igb_write_nvm_spi,
2285 };
2286 
2288  .get_invariants = igb_get_invariants_82575,
2289  .mac_ops = &e1000_mac_ops_82575,
2290  .phy_ops = &e1000_phy_ops_82575,
2291  .nvm_ops = &e1000_nvm_ops_82575,
2292 };
2293