Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ixgbe_phy.c
Go to the documentation of this file.
1 /*******************************************************************************
2 
3  Intel 10 Gigabit PCI Express Linux driver
4  Copyright(c) 1999 - 2012 Intel Corporation.
5 
6  This program is free software; you can redistribute it and/or modify it
7  under the terms and conditions of the GNU General Public License,
8  version 2, as published by the Free Software Foundation.
9 
10  This program is distributed in the hope it will be useful, but WITHOUT
11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13  more details.
14 
15  You should have received a copy of the GNU General Public License along with
16  this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 
19  The full GNU General Public License is included in this distribution in
20  the file called "COPYING".
21 
22  Contact Information:
23  e1000-devel Mailing List <[email protected]>
24  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 
26 *******************************************************************************/
27 
28 #include <linux/pci.h>
29 #include <linux/delay.h>
30 #include <linux/sched.h>
31 
32 #include "ixgbe_common.h"
33 #include "ixgbe_phy.h"
34 
35 static void ixgbe_i2c_start(struct ixgbe_hw *hw);
36 static void ixgbe_i2c_stop(struct ixgbe_hw *hw);
37 static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
38 static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
39 static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
40 static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
41 static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
42 static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
43 static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
44 static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
45 static bool ixgbe_get_i2c_data(u32 *i2cctl);
46 static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
47 static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
48 static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
49 
57 {
59  u32 phy_addr;
60  u16 ext_ability = 0;
61 
62  if (hw->phy.type == ixgbe_phy_unknown) {
63  for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
64  hw->phy.mdio.prtad = phy_addr;
65  if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) {
66  ixgbe_get_phy_id(hw);
67  hw->phy.type =
68  ixgbe_get_phy_type_from_id(hw->phy.id);
69 
70  if (hw->phy.type == ixgbe_phy_unknown) {
71  hw->phy.ops.read_reg(hw,
74  &ext_ability);
75  if (ext_ability &
78  hw->phy.type =
80  else
81  hw->phy.type =
83  }
84 
85  status = 0;
86  break;
87  }
88  }
89  /* clear value if nothing found */
90  if (status != 0)
91  hw->phy.mdio.prtad = 0;
92  } else {
93  status = 0;
94  }
95 
96  return status;
97 }
98 
104 static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
105 {
106  u32 status;
107  u16 phy_id_high = 0;
108  u16 phy_id_low = 0;
109 
110  status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
111  &phy_id_high);
112 
113  if (status == 0) {
114  hw->phy.id = (u32)(phy_id_high << 16);
115  status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
116  &phy_id_low);
117  hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
118  hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
119  }
120  return status;
121 }
122 
128 static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
129 {
130  enum ixgbe_phy_type phy_type;
131 
132  switch (phy_id) {
133  case TN1010_PHY_ID:
134  phy_type = ixgbe_phy_tn;
135  break;
136  case X540_PHY_ID:
137  phy_type = ixgbe_phy_aq;
138  break;
139  case QT2022_PHY_ID:
140  phy_type = ixgbe_phy_qt;
141  break;
142  case ATH_PHY_ID:
143  phy_type = ixgbe_phy_nl;
144  break;
145  default:
146  phy_type = ixgbe_phy_unknown;
147  break;
148  }
149 
150  return phy_type;
151 }
152 
158 {
159  u32 i;
160  u16 ctrl = 0;
161  s32 status = 0;
162 
163  if (hw->phy.type == ixgbe_phy_unknown)
164  status = ixgbe_identify_phy_generic(hw);
165 
166  if (status != 0 || hw->phy.type == ixgbe_phy_none)
167  goto out;
168 
169  /* Don't reset PHY if it's shut down due to overtemp. */
170  if (!hw->phy.reset_if_overtemp &&
171  (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
172  goto out;
173 
174  /*
175  * Perform soft PHY reset to the PHY_XS.
176  * This will cause a soft reset to the PHY
177  */
178  hw->phy.ops.write_reg(hw, MDIO_CTRL1,
181 
182  /*
183  * Poll for reset bit to self-clear indicating reset is complete.
184  * Some PHYs could take up to 3 seconds to complete and need about
185  * 1.7 usec delay after the reset is complete.
186  */
187  for (i = 0; i < 30; i++) {
188  msleep(100);
189  hw->phy.ops.read_reg(hw, MDIO_CTRL1,
190  MDIO_MMD_PHYXS, &ctrl);
191  if (!(ctrl & MDIO_CTRL1_RESET)) {
192  udelay(2);
193  break;
194  }
195  }
196 
197  if (ctrl & MDIO_CTRL1_RESET) {
198  status = IXGBE_ERR_RESET_FAILED;
199  hw_dbg(hw, "PHY reset polling failed to complete.\n");
200  }
201 
202 out:
203  return status;
204 }
205 
213  u32 device_type, u16 *phy_data)
214 {
215  u32 command;
216  u32 i;
217  u32 data;
218  s32 status = 0;
219  u16 gssr;
220 
222  gssr = IXGBE_GSSR_PHY1_SM;
223  else
224  gssr = IXGBE_GSSR_PHY0_SM;
225 
226  if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
227  status = IXGBE_ERR_SWFW_SYNC;
228 
229  if (status == 0) {
230  /* Setup and write the address cycle command */
231  command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
232  (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
233  (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
235 
236  IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
237 
238  /*
239  * Check every 10 usec to see if the address cycle completed.
240  * The MDI Command bit will clear when the operation is
241  * complete
242  */
243  for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
244  udelay(10);
245 
246  command = IXGBE_READ_REG(hw, IXGBE_MSCA);
247 
248  if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
249  break;
250  }
251 
252  if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
253  hw_dbg(hw, "PHY address command did not complete.\n");
254  status = IXGBE_ERR_PHY;
255  }
256 
257  if (status == 0) {
258  /*
259  * Address cycle complete, setup and write the read
260  * command
261  */
262  command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
263  (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
264  (hw->phy.mdio.prtad <<
266  (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
267 
268  IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
269 
270  /*
271  * Check every 10 usec to see if the address cycle
272  * completed. The MDI Command bit will clear when the
273  * operation is complete
274  */
275  for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
276  udelay(10);
277 
278  command = IXGBE_READ_REG(hw, IXGBE_MSCA);
279 
280  if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
281  break;
282  }
283 
284  if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
285  hw_dbg(hw, "PHY read command didn't complete\n");
286  status = IXGBE_ERR_PHY;
287  } else {
288  /*
289  * Read operation is complete. Get the data
290  * from MSRWD
291  */
292  data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
294  *phy_data = (u16)(data);
295  }
296  }
297 
298  hw->mac.ops.release_swfw_sync(hw, gssr);
299  }
300 
301  return status;
302 }
303 
312  u32 device_type, u16 phy_data)
313 {
314  u32 command;
315  u32 i;
316  s32 status = 0;
317  u16 gssr;
318 
320  gssr = IXGBE_GSSR_PHY1_SM;
321  else
322  gssr = IXGBE_GSSR_PHY0_SM;
323 
324  if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
325  status = IXGBE_ERR_SWFW_SYNC;
326 
327  if (status == 0) {
328  /* Put the data in the MDI single read and write data register*/
329  IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
330 
331  /* Setup and write the address cycle command */
332  command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
333  (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
334  (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
336 
337  IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
338 
339  /*
340  * Check every 10 usec to see if the address cycle completed.
341  * The MDI Command bit will clear when the operation is
342  * complete
343  */
344  for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
345  udelay(10);
346 
347  command = IXGBE_READ_REG(hw, IXGBE_MSCA);
348 
349  if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
350  break;
351  }
352 
353  if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
354  hw_dbg(hw, "PHY address cmd didn't complete\n");
355  status = IXGBE_ERR_PHY;
356  }
357 
358  if (status == 0) {
359  /*
360  * Address cycle complete, setup and write the write
361  * command
362  */
363  command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
364  (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
365  (hw->phy.mdio.prtad <<
367  (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
368 
369  IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
370 
371  /*
372  * Check every 10 usec to see if the address cycle
373  * completed. The MDI Command bit will clear when the
374  * operation is complete
375  */
376  for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
377  udelay(10);
378 
379  command = IXGBE_READ_REG(hw, IXGBE_MSCA);
380 
381  if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
382  break;
383  }
384 
385  if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
386  hw_dbg(hw, "PHY address cmd didn't complete\n");
387  status = IXGBE_ERR_PHY;
388  }
389  }
390 
391  hw->mac.ops.release_swfw_sync(hw, gssr);
392  }
393 
394  return status;
395 }
396 
404 {
405  s32 status = 0;
406  u32 time_out;
407  u32 max_time_out = 10;
408  u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
409  bool autoneg = false;
411 
412  ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
413 
414  if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
415  /* Set or unset auto-negotiation 10G advertisement */
416  hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
417  MDIO_MMD_AN,
418  &autoneg_reg);
419 
420  autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
421  if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
422  autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
423 
424  hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
425  MDIO_MMD_AN,
426  autoneg_reg);
427  }
428 
429  if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
430  /* Set or unset auto-negotiation 1G advertisement */
431  hw->phy.ops.read_reg(hw,
433  MDIO_MMD_AN,
434  &autoneg_reg);
435 
436  autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
437  if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
438  autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
439 
440  hw->phy.ops.write_reg(hw,
442  MDIO_MMD_AN,
443  autoneg_reg);
444  }
445 
446  if (speed & IXGBE_LINK_SPEED_100_FULL) {
447  /* Set or unset auto-negotiation 100M advertisement */
448  hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
449  MDIO_MMD_AN,
450  &autoneg_reg);
451 
452  autoneg_reg &= ~(ADVERTISE_100FULL |
454  if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
455  autoneg_reg |= ADVERTISE_100FULL;
456 
457  hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
458  MDIO_MMD_AN,
459  autoneg_reg);
460  }
461 
462  /* Restart PHY autonegotiation and wait for completion */
463  hw->phy.ops.read_reg(hw, MDIO_CTRL1,
464  MDIO_MMD_AN, &autoneg_reg);
465 
466  autoneg_reg |= MDIO_AN_CTRL1_RESTART;
467 
468  hw->phy.ops.write_reg(hw, MDIO_CTRL1,
469  MDIO_MMD_AN, autoneg_reg);
470 
471  /* Wait for autonegotiation to finish */
472  for (time_out = 0; time_out < max_time_out; time_out++) {
473  udelay(10);
474  /* Restart PHY autonegotiation and wait for completion */
475  status = hw->phy.ops.read_reg(hw, MDIO_STAT1,
476  MDIO_MMD_AN,
477  &autoneg_reg);
478 
479  autoneg_reg &= MDIO_AN_STAT1_COMPLETE;
480  if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) {
481  break;
482  }
483  }
484 
485  if (time_out == max_time_out) {
486  status = IXGBE_ERR_LINK_SETUP;
487  hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out");
488  }
489 
490  return status;
491 }
492 
500  ixgbe_link_speed speed,
501  bool autoneg,
502  bool autoneg_wait_to_complete)
503 {
504 
505  /*
506  * Clear autoneg_advertised and set new values based on input link
507  * speed.
508  */
509  hw->phy.autoneg_advertised = 0;
510 
511  if (speed & IXGBE_LINK_SPEED_10GB_FULL)
512  hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
513 
514  if (speed & IXGBE_LINK_SPEED_1GB_FULL)
515  hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
516 
517  if (speed & IXGBE_LINK_SPEED_100_FULL)
518  hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
519 
520  /* Setup link based on the new speed settings */
521  hw->phy.ops.setup_link(hw);
522 
523  return 0;
524 }
525 
535  ixgbe_link_speed *speed,
536  bool *autoneg)
537 {
538  s32 status = IXGBE_ERR_LINK_SETUP;
539  u16 speed_ability;
540 
541  *speed = 0;
542  *autoneg = true;
543 
544  status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
545  &speed_ability);
546 
547  if (status == 0) {
548  if (speed_ability & MDIO_SPEED_10G)
549  *speed |= IXGBE_LINK_SPEED_10GB_FULL;
550  if (speed_ability & MDIO_PMA_SPEED_1000)
551  *speed |= IXGBE_LINK_SPEED_1GB_FULL;
552  if (speed_ability & MDIO_PMA_SPEED_100)
553  *speed |= IXGBE_LINK_SPEED_100_FULL;
554  }
555 
556  return status;
557 }
558 
567  bool *link_up)
568 {
569  s32 status = 0;
570  u32 time_out;
571  u32 max_time_out = 10;
572  u16 phy_link = 0;
573  u16 phy_speed = 0;
574  u16 phy_data = 0;
575 
576  /* Initialize speed and link to default case */
577  *link_up = false;
579 
580  /*
581  * Check current speed and link status of the PHY register.
582  * This is a vendor specific register and may have to
583  * be changed for other copper PHYs.
584  */
585  for (time_out = 0; time_out < max_time_out; time_out++) {
586  udelay(10);
587  status = hw->phy.ops.read_reg(hw,
588  MDIO_STAT1,
590  &phy_data);
591  phy_link = phy_data &
593  phy_speed = phy_data &
595  if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
596  *link_up = true;
597  if (phy_speed ==
598  IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
599  *speed = IXGBE_LINK_SPEED_1GB_FULL;
600  break;
601  }
602  }
603 
604  return status;
605 }
606 
614 {
615  s32 status = 0;
616  u32 time_out;
617  u32 max_time_out = 10;
618  u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
619  bool autoneg = false;
621 
622  ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
623 
624  if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
625  /* Set or unset auto-negotiation 10G advertisement */
626  hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
627  MDIO_MMD_AN,
628  &autoneg_reg);
629 
630  autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G;
631  if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
632  autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G;
633 
634  hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL,
635  MDIO_MMD_AN,
636  autoneg_reg);
637  }
638 
639  if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
640  /* Set or unset auto-negotiation 1G advertisement */
641  hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
642  MDIO_MMD_AN,
643  &autoneg_reg);
644 
645  autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
646  if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
648 
649  hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
650  MDIO_MMD_AN,
651  autoneg_reg);
652  }
653 
654  if (speed & IXGBE_LINK_SPEED_100_FULL) {
655  /* Set or unset auto-negotiation 100M advertisement */
656  hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
657  MDIO_MMD_AN,
658  &autoneg_reg);
659 
660  autoneg_reg &= ~(ADVERTISE_100FULL |
662  if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
663  autoneg_reg |= ADVERTISE_100FULL;
664 
665  hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
666  MDIO_MMD_AN,
667  autoneg_reg);
668  }
669 
670  /* Restart PHY autonegotiation and wait for completion */
671  hw->phy.ops.read_reg(hw, MDIO_CTRL1,
672  MDIO_MMD_AN, &autoneg_reg);
673 
674  autoneg_reg |= MDIO_AN_CTRL1_RESTART;
675 
676  hw->phy.ops.write_reg(hw, MDIO_CTRL1,
677  MDIO_MMD_AN, autoneg_reg);
678 
679  /* Wait for autonegotiation to finish */
680  for (time_out = 0; time_out < max_time_out; time_out++) {
681  udelay(10);
682  /* Restart PHY autonegotiation and wait for completion */
683  status = hw->phy.ops.read_reg(hw, MDIO_STAT1,
684  MDIO_MMD_AN,
685  &autoneg_reg);
686 
687  autoneg_reg &= MDIO_AN_STAT1_COMPLETE;
688  if (autoneg_reg == MDIO_AN_STAT1_COMPLETE)
689  break;
690  }
691 
692  if (time_out == max_time_out) {
693  status = IXGBE_ERR_LINK_SETUP;
694  hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out");
695  }
696 
697  return status;
698 }
699 
707 {
708  s32 status = 0;
709 
710  status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
712  firmware_version);
713 
714  return status;
715 }
716 
724 {
725  s32 status = 0;
726 
727  status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
729  firmware_version);
730 
731  return status;
732 }
733 
739 {
740  u16 phy_offset, control, eword, edata, block_crc;
741  bool end_data = false;
742  u16 list_offset, data_offset;
743  u16 phy_data = 0;
744  s32 ret_val = 0;
745  u32 i;
746 
747  hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data);
748 
749  /* reset the PHY and poll for completion */
750  hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
751  (phy_data | MDIO_CTRL1_RESET));
752 
753  for (i = 0; i < 100; i++) {
754  hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
755  &phy_data);
756  if ((phy_data & MDIO_CTRL1_RESET) == 0)
757  break;
758  usleep_range(10000, 20000);
759  }
760 
761  if ((phy_data & MDIO_CTRL1_RESET) != 0) {
762  hw_dbg(hw, "PHY reset did not complete.\n");
763  ret_val = IXGBE_ERR_PHY;
764  goto out;
765  }
766 
767  /* Get init offsets */
768  ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
769  &data_offset);
770  if (ret_val != 0)
771  goto out;
772 
773  ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
774  data_offset++;
775  while (!end_data) {
776  /*
777  * Read control word from PHY init contents offset
778  */
779  ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
780  control = (eword & IXGBE_CONTROL_MASK_NL) >>
782  edata = eword & IXGBE_DATA_MASK_NL;
783  switch (control) {
784  case IXGBE_DELAY_NL:
785  data_offset++;
786  hw_dbg(hw, "DELAY: %d MS\n", edata);
787  usleep_range(edata * 1000, edata * 2000);
788  break;
789  case IXGBE_DATA_NL:
790  hw_dbg(hw, "DATA:\n");
791  data_offset++;
792  hw->eeprom.ops.read(hw, data_offset++,
793  &phy_offset);
794  for (i = 0; i < edata; i++) {
795  hw->eeprom.ops.read(hw, data_offset, &eword);
796  hw->phy.ops.write_reg(hw, phy_offset,
797  MDIO_MMD_PMAPMD, eword);
798  hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
799  phy_offset);
800  data_offset++;
801  phy_offset++;
802  }
803  break;
804  case IXGBE_CONTROL_NL:
805  data_offset++;
806  hw_dbg(hw, "CONTROL:\n");
807  if (edata == IXGBE_CONTROL_EOL_NL) {
808  hw_dbg(hw, "EOL\n");
809  end_data = true;
810  } else if (edata == IXGBE_CONTROL_SOL_NL) {
811  hw_dbg(hw, "SOL\n");
812  } else {
813  hw_dbg(hw, "Bad control value\n");
814  ret_val = IXGBE_ERR_PHY;
815  goto out;
816  }
817  break;
818  default:
819  hw_dbg(hw, "Bad control type\n");
820  ret_val = IXGBE_ERR_PHY;
821  goto out;
822  }
823  }
824 
825 out:
826  return ret_val;
827 }
828 
836 {
837  struct ixgbe_adapter *adapter = hw->back;
839  u32 vendor_oui = 0;
840  enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
841  u8 identifier = 0;
842  u8 comp_codes_1g = 0;
843  u8 comp_codes_10g = 0;
844  u8 oui_bytes[3] = {0, 0, 0};
845  u8 cable_tech = 0;
846  u8 cable_spec = 0;
847  u16 enforce_sfp = 0;
848 
849  if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
850  hw->phy.sfp_type = ixgbe_sfp_type_not_present;
851  status = IXGBE_ERR_SFP_NOT_PRESENT;
852  goto out;
853  }
854 
855  status = hw->phy.ops.read_i2c_eeprom(hw,
857  &identifier);
858 
859  if (status == IXGBE_ERR_SWFW_SYNC ||
860  status == IXGBE_ERR_I2C ||
861  status == IXGBE_ERR_SFP_NOT_PRESENT)
862  goto err_read_i2c_eeprom;
863 
864  /* LAN ID is needed for sfp_type determination */
865  hw->mac.ops.set_lan_id(hw);
866 
867  if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
868  hw->phy.type = ixgbe_phy_sfp_unsupported;
870  } else {
871  status = hw->phy.ops.read_i2c_eeprom(hw,
873  &comp_codes_1g);
874 
875  if (status == IXGBE_ERR_SWFW_SYNC ||
876  status == IXGBE_ERR_I2C ||
877  status == IXGBE_ERR_SFP_NOT_PRESENT)
878  goto err_read_i2c_eeprom;
879 
880  status = hw->phy.ops.read_i2c_eeprom(hw,
882  &comp_codes_10g);
883 
884  if (status == IXGBE_ERR_SWFW_SYNC ||
885  status == IXGBE_ERR_I2C ||
886  status == IXGBE_ERR_SFP_NOT_PRESENT)
887  goto err_read_i2c_eeprom;
888  status = hw->phy.ops.read_i2c_eeprom(hw,
890  &cable_tech);
891 
892  if (status == IXGBE_ERR_SWFW_SYNC ||
893  status == IXGBE_ERR_I2C ||
894  status == IXGBE_ERR_SFP_NOT_PRESENT)
895  goto err_read_i2c_eeprom;
896 
897  /* ID Module
898  * =========
899  * 0 SFP_DA_CU
900  * 1 SFP_SR
901  * 2 SFP_LR
902  * 3 SFP_DA_CORE0 - 82599-specific
903  * 4 SFP_DA_CORE1 - 82599-specific
904  * 5 SFP_SR/LR_CORE0 - 82599-specific
905  * 6 SFP_SR/LR_CORE1 - 82599-specific
906  * 7 SFP_act_lmt_DA_CORE0 - 82599-specific
907  * 8 SFP_act_lmt_DA_CORE1 - 82599-specific
908  * 9 SFP_1g_cu_CORE0 - 82599-specific
909  * 10 SFP_1g_cu_CORE1 - 82599-specific
910  * 11 SFP_1g_sx_CORE0 - 82599-specific
911  * 12 SFP_1g_sx_CORE1 - 82599-specific
912  */
913  if (hw->mac.type == ixgbe_mac_82598EB) {
914  if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
915  hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
916  else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
917  hw->phy.sfp_type = ixgbe_sfp_type_sr;
918  else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
919  hw->phy.sfp_type = ixgbe_sfp_type_lr;
920  else
921  hw->phy.sfp_type = ixgbe_sfp_type_unknown;
922  } else if (hw->mac.type == ixgbe_mac_82599EB) {
923  if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
924  if (hw->bus.lan_id == 0)
925  hw->phy.sfp_type =
927  else
928  hw->phy.sfp_type =
930  } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
931  hw->phy.ops.read_i2c_eeprom(
933  &cable_spec);
934  if (cable_spec &
936  if (hw->bus.lan_id == 0)
937  hw->phy.sfp_type =
939  else
940  hw->phy.sfp_type =
942  } else {
943  hw->phy.sfp_type =
945  }
946  } else if (comp_codes_10g &
949  if (hw->bus.lan_id == 0)
950  hw->phy.sfp_type =
952  else
953  hw->phy.sfp_type =
955  } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
956  if (hw->bus.lan_id == 0)
957  hw->phy.sfp_type =
959  else
960  hw->phy.sfp_type =
962  } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
963  if (hw->bus.lan_id == 0)
964  hw->phy.sfp_type =
966  else
967  hw->phy.sfp_type =
969  } else {
970  hw->phy.sfp_type = ixgbe_sfp_type_unknown;
971  }
972  }
973 
974  if (hw->phy.sfp_type != stored_sfp_type)
975  hw->phy.sfp_setup_needed = true;
976 
977  /* Determine if the SFP+ PHY is dual speed or not. */
978  hw->phy.multispeed_fiber = false;
979  if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
980  (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
981  ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
982  (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
983  hw->phy.multispeed_fiber = true;
984 
985  /* Determine PHY vendor */
986  if (hw->phy.type != ixgbe_phy_nl) {
987  hw->phy.id = identifier;
988  status = hw->phy.ops.read_i2c_eeprom(hw,
990  &oui_bytes[0]);
991 
992  if (status == IXGBE_ERR_SWFW_SYNC ||
993  status == IXGBE_ERR_I2C ||
994  status == IXGBE_ERR_SFP_NOT_PRESENT)
995  goto err_read_i2c_eeprom;
996 
997  status = hw->phy.ops.read_i2c_eeprom(hw,
999  &oui_bytes[1]);
1000 
1001  if (status == IXGBE_ERR_SWFW_SYNC ||
1002  status == IXGBE_ERR_I2C ||
1003  status == IXGBE_ERR_SFP_NOT_PRESENT)
1004  goto err_read_i2c_eeprom;
1005 
1006  status = hw->phy.ops.read_i2c_eeprom(hw,
1008  &oui_bytes[2]);
1009 
1010  if (status == IXGBE_ERR_SWFW_SYNC ||
1011  status == IXGBE_ERR_I2C ||
1012  status == IXGBE_ERR_SFP_NOT_PRESENT)
1013  goto err_read_i2c_eeprom;
1014 
1015  vendor_oui =
1016  ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
1017  (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
1018  (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
1019 
1020  switch (vendor_oui) {
1022  if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1023  hw->phy.type =
1025  break;
1027  if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
1028  hw->phy.type = ixgbe_phy_sfp_ftl_active;
1029  else
1030  hw->phy.type = ixgbe_phy_sfp_ftl;
1031  break;
1033  hw->phy.type = ixgbe_phy_sfp_avago;
1034  break;
1036  hw->phy.type = ixgbe_phy_sfp_intel;
1037  break;
1038  default:
1039  if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
1040  hw->phy.type =
1042  else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
1043  hw->phy.type =
1045  else
1046  hw->phy.type = ixgbe_phy_sfp_unknown;
1047  break;
1048  }
1049  }
1050 
1051  /* Allow any DA cable vendor */
1052  if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
1054  status = 0;
1055  goto out;
1056  }
1057 
1058  /* Verify supported 1G SFP modules */
1059  if (comp_codes_10g == 0 &&
1060  !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1061  hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1062  hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
1063  hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
1064  hw->phy.type = ixgbe_phy_sfp_unsupported;
1065  status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1066  goto out;
1067  }
1068 
1069  /* Anything else 82598-based is supported */
1070  if (hw->mac.type == ixgbe_mac_82598EB) {
1071  status = 0;
1072  goto out;
1073  }
1074 
1075  hw->mac.ops.get_device_caps(hw, &enforce_sfp);
1076  if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
1077  !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
1078  (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) ||
1079  (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0) ||
1080  (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1))) {
1081  /* Make sure we're a supported PHY type */
1082  if (hw->phy.type == ixgbe_phy_sfp_intel) {
1083  status = 0;
1084  } else {
1085  if (hw->allow_unsupported_sfp) {
1086  e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.");
1087  status = 0;
1088  } else {
1089  hw_dbg(hw,
1090  "SFP+ module not supported\n");
1091  hw->phy.type =
1093  status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1094  }
1095  }
1096  } else {
1097  status = 0;
1098  }
1099  }
1100 
1101 out:
1102  return status;
1103 
1104 err_read_i2c_eeprom:
1105  hw->phy.sfp_type = ixgbe_sfp_type_not_present;
1106  if (hw->phy.type != ixgbe_phy_nl) {
1107  hw->phy.id = 0;
1108  hw->phy.type = ixgbe_phy_unknown;
1109  }
1111 }
1112 
1123  u16 *list_offset,
1124  u16 *data_offset)
1125 {
1126  u16 sfp_id;
1127  u16 sfp_type = hw->phy.sfp_type;
1128 
1129  if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
1131 
1132  if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
1134 
1136  (hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
1138 
1139  /*
1140  * Limiting active cables and 1G Phys must be initialized as
1141  * SR modules
1142  */
1143  if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
1144  sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
1145  sfp_type == ixgbe_sfp_type_1g_sx_core0)
1146  sfp_type = ixgbe_sfp_type_srlr_core0;
1147  else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
1148  sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
1149  sfp_type == ixgbe_sfp_type_1g_sx_core1)
1150  sfp_type = ixgbe_sfp_type_srlr_core1;
1151 
1152  /* Read offset to PHY init contents */
1153  hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset);
1154 
1155  if ((!*list_offset) || (*list_offset == 0xFFFF))
1157 
1158  /* Shift offset to first ID word */
1159  (*list_offset)++;
1160 
1161  /*
1162  * Find the matching SFP ID in the EEPROM
1163  * and program the init sequence
1164  */
1165  hw->eeprom.ops.read(hw, *list_offset, &sfp_id);
1166 
1167  while (sfp_id != IXGBE_PHY_INIT_END_NL) {
1168  if (sfp_id == sfp_type) {
1169  (*list_offset)++;
1170  hw->eeprom.ops.read(hw, *list_offset, data_offset);
1171  if ((!*data_offset) || (*data_offset == 0xFFFF)) {
1172  hw_dbg(hw, "SFP+ module not supported\n");
1174  } else {
1175  break;
1176  }
1177  } else {
1178  (*list_offset) += 2;
1179  if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
1180  return IXGBE_ERR_PHY;
1181  }
1182  }
1183 
1184  if (sfp_id == IXGBE_PHY_INIT_END_NL) {
1185  hw_dbg(hw, "No matching SFP+ module found\n");
1187  }
1188 
1189  return 0;
1190 }
1191 
1201  u8 *eeprom_data)
1202 {
1203  return hw->phy.ops.read_i2c_byte(hw, byte_offset,
1205  eeprom_data);
1206 }
1207 
1217  u8 eeprom_data)
1218 {
1219  return hw->phy.ops.write_i2c_byte(hw, byte_offset,
1221  eeprom_data);
1222 }
1223 
1234  u8 dev_addr, u8 *data)
1235 {
1236  s32 status = 0;
1237  u32 max_retry = 10;
1238  u32 retry = 0;
1239  u16 swfw_mask = 0;
1240  bool nack = true;
1241  *data = 0;
1242 
1244  swfw_mask = IXGBE_GSSR_PHY1_SM;
1245  else
1246  swfw_mask = IXGBE_GSSR_PHY0_SM;
1247 
1248  do {
1249  if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) {
1250  status = IXGBE_ERR_SWFW_SYNC;
1251  goto read_byte_out;
1252  }
1253 
1254  ixgbe_i2c_start(hw);
1255 
1256  /* Device Address and write indication */
1257  status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
1258  if (status != 0)
1259  goto fail;
1260 
1261  status = ixgbe_get_i2c_ack(hw);
1262  if (status != 0)
1263  goto fail;
1264 
1265  status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
1266  if (status != 0)
1267  goto fail;
1268 
1269  status = ixgbe_get_i2c_ack(hw);
1270  if (status != 0)
1271  goto fail;
1272 
1273  ixgbe_i2c_start(hw);
1274 
1275  /* Device Address and read indication */
1276  status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1));
1277  if (status != 0)
1278  goto fail;
1279 
1280  status = ixgbe_get_i2c_ack(hw);
1281  if (status != 0)
1282  goto fail;
1283 
1284  status = ixgbe_clock_in_i2c_byte(hw, data);
1285  if (status != 0)
1286  goto fail;
1287 
1288  status = ixgbe_clock_out_i2c_bit(hw, nack);
1289  if (status != 0)
1290  goto fail;
1291 
1292  ixgbe_i2c_stop(hw);
1293  break;
1294 
1295 fail:
1296  hw->mac.ops.release_swfw_sync(hw, swfw_mask);
1297  msleep(100);
1298  ixgbe_i2c_bus_clear(hw);
1299  retry++;
1300  if (retry < max_retry)
1301  hw_dbg(hw, "I2C byte read error - Retrying.\n");
1302  else
1303  hw_dbg(hw, "I2C byte read error.\n");
1304 
1305  } while (retry < max_retry);
1306 
1307  hw->mac.ops.release_swfw_sync(hw, swfw_mask);
1308 
1309 read_byte_out:
1310  return status;
1311 }
1312 
1323  u8 dev_addr, u8 data)
1324 {
1325  s32 status = 0;
1326  u32 max_retry = 1;
1327  u32 retry = 0;
1328  u16 swfw_mask = 0;
1329 
1331  swfw_mask = IXGBE_GSSR_PHY1_SM;
1332  else
1333  swfw_mask = IXGBE_GSSR_PHY0_SM;
1334 
1335  if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) {
1336  status = IXGBE_ERR_SWFW_SYNC;
1337  goto write_byte_out;
1338  }
1339 
1340  do {
1341  ixgbe_i2c_start(hw);
1342 
1343  status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
1344  if (status != 0)
1345  goto fail;
1346 
1347  status = ixgbe_get_i2c_ack(hw);
1348  if (status != 0)
1349  goto fail;
1350 
1351  status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
1352  if (status != 0)
1353  goto fail;
1354 
1355  status = ixgbe_get_i2c_ack(hw);
1356  if (status != 0)
1357  goto fail;
1358 
1359  status = ixgbe_clock_out_i2c_byte(hw, data);
1360  if (status != 0)
1361  goto fail;
1362 
1363  status = ixgbe_get_i2c_ack(hw);
1364  if (status != 0)
1365  goto fail;
1366 
1367  ixgbe_i2c_stop(hw);
1368  break;
1369 
1370 fail:
1371  ixgbe_i2c_bus_clear(hw);
1372  retry++;
1373  if (retry < max_retry)
1374  hw_dbg(hw, "I2C byte write error - Retrying.\n");
1375  else
1376  hw_dbg(hw, "I2C byte write error.\n");
1377  } while (retry < max_retry);
1378 
1379  hw->mac.ops.release_swfw_sync(hw, swfw_mask);
1380 
1381 write_byte_out:
1382  return status;
1383 }
1384 
1391 static void ixgbe_i2c_start(struct ixgbe_hw *hw)
1392 {
1393  u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
1394 
1395  /* Start condition must begin with data and clock high */
1396  ixgbe_set_i2c_data(hw, &i2cctl, 1);
1397  ixgbe_raise_i2c_clk(hw, &i2cctl);
1398 
1399  /* Setup time for start condition (4.7us) */
1401 
1402  ixgbe_set_i2c_data(hw, &i2cctl, 0);
1403 
1404  /* Hold time for start condition (4us) */
1406 
1407  ixgbe_lower_i2c_clk(hw, &i2cctl);
1408 
1409  /* Minimum low period of clock is 4.7 us */
1411 
1412 }
1413 
1420 static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
1421 {
1422  u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
1423 
1424  /* Stop condition must begin with data low and clock high */
1425  ixgbe_set_i2c_data(hw, &i2cctl, 0);
1426  ixgbe_raise_i2c_clk(hw, &i2cctl);
1427 
1428  /* Setup time for stop condition (4us) */
1430 
1431  ixgbe_set_i2c_data(hw, &i2cctl, 1);
1432 
1433  /* bus free time between stop and start (4.7us)*/
1435 }
1436 
1444 static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
1445 {
1446  s32 i;
1447  bool bit = false;
1448 
1449  for (i = 7; i >= 0; i--) {
1450  ixgbe_clock_in_i2c_bit(hw, &bit);
1451  *data |= bit << i;
1452  }
1453 
1454  return 0;
1455 }
1456 
1464 static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
1465 {
1466  s32 status = 0;
1467  s32 i;
1468  u32 i2cctl;
1469  bool bit = false;
1470 
1471  for (i = 7; i >= 0; i--) {
1472  bit = (data >> i) & 0x1;
1473  status = ixgbe_clock_out_i2c_bit(hw, bit);
1474 
1475  if (status != 0)
1476  break;
1477  }
1478 
1479  /* Release SDA line (set high) */
1480  i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
1481  i2cctl |= IXGBE_I2C_DATA_OUT;
1482  IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl);
1483  IXGBE_WRITE_FLUSH(hw);
1484 
1485  return status;
1486 }
1487 
1494 static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
1495 {
1496  s32 status = 0;
1497  u32 i = 0;
1498  u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
1499  u32 timeout = 10;
1500  bool ack = true;
1501 
1502  ixgbe_raise_i2c_clk(hw, &i2cctl);
1503 
1504 
1505  /* Minimum high period of clock is 4us */
1507 
1508  /* Poll for ACK. Note that ACK in I2C spec is
1509  * transition from 1 to 0 */
1510  for (i = 0; i < timeout; i++) {
1511  i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
1512  ack = ixgbe_get_i2c_data(&i2cctl);
1513 
1514  udelay(1);
1515  if (ack == 0)
1516  break;
1517  }
1518 
1519  if (ack == 1) {
1520  hw_dbg(hw, "I2C ack was not received.\n");
1521  status = IXGBE_ERR_I2C;
1522  }
1523 
1524  ixgbe_lower_i2c_clk(hw, &i2cctl);
1525 
1526  /* Minimum low period of clock is 4.7 us */
1528 
1529  return status;
1530 }
1531 
1539 static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
1540 {
1541  u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
1542 
1543  ixgbe_raise_i2c_clk(hw, &i2cctl);
1544 
1545  /* Minimum high period of clock is 4us */
1547 
1548  i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
1549  *data = ixgbe_get_i2c_data(&i2cctl);
1550 
1551  ixgbe_lower_i2c_clk(hw, &i2cctl);
1552 
1553  /* Minimum low period of clock is 4.7 us */
1555 
1556  return 0;
1557 }
1558 
1566 static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
1567 {
1568  s32 status;
1569  u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
1570 
1571  status = ixgbe_set_i2c_data(hw, &i2cctl, data);
1572  if (status == 0) {
1573  ixgbe_raise_i2c_clk(hw, &i2cctl);
1574 
1575  /* Minimum high period of clock is 4us */
1577 
1578  ixgbe_lower_i2c_clk(hw, &i2cctl);
1579 
1580  /* Minimum low period of clock is 4.7 us.
1581  * This also takes care of the data hold time.
1582  */
1584  } else {
1585  status = IXGBE_ERR_I2C;
1586  hw_dbg(hw, "I2C data was not set to %X\n", data);
1587  }
1588 
1589  return status;
1590 }
1598 static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
1599 {
1600  u32 i = 0;
1602  u32 i2cctl_r = 0;
1603 
1604  for (i = 0; i < timeout; i++) {
1605  *i2cctl |= IXGBE_I2C_CLK_OUT;
1606  IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
1607  IXGBE_WRITE_FLUSH(hw);
1608  /* SCL rise time (1000ns) */
1610 
1611  i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
1612  if (i2cctl_r & IXGBE_I2C_CLK_IN)
1613  break;
1614  }
1615 }
1616 
1624 static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
1625 {
1626 
1627  *i2cctl &= ~IXGBE_I2C_CLK_OUT;
1628 
1629  IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
1630  IXGBE_WRITE_FLUSH(hw);
1631 
1632  /* SCL fall time (300ns) */
1634 }
1635 
1644 static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
1645 {
1646  s32 status = 0;
1647 
1648  if (data)
1649  *i2cctl |= IXGBE_I2C_DATA_OUT;
1650  else
1651  *i2cctl &= ~IXGBE_I2C_DATA_OUT;
1652 
1653  IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
1654  IXGBE_WRITE_FLUSH(hw);
1655 
1656  /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
1658 
1659  /* Verify data was set correctly */
1660  *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
1661  if (data != ixgbe_get_i2c_data(i2cctl)) {
1662  status = IXGBE_ERR_I2C;
1663  hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
1664  }
1665 
1666  return status;
1667 }
1668 
1676 static bool ixgbe_get_i2c_data(u32 *i2cctl)
1677 {
1678  bool data;
1679 
1680  if (*i2cctl & IXGBE_I2C_DATA_IN)
1681  data = true;
1682  else
1683  data = false;
1684 
1685  return data;
1686 }
1687 
1695 static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
1696 {
1697  u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
1698  u32 i;
1699 
1700  ixgbe_i2c_start(hw);
1701 
1702  ixgbe_set_i2c_data(hw, &i2cctl, 1);
1703 
1704  for (i = 0; i < 9; i++) {
1705  ixgbe_raise_i2c_clk(hw, &i2cctl);
1706 
1707  /* Min high period of clock is 4us */
1709 
1710  ixgbe_lower_i2c_clk(hw, &i2cctl);
1711 
1712  /* Min low period of clock is 4.7us*/
1714  }
1715 
1716  ixgbe_i2c_start(hw);
1717 
1718  /* Put the i2c bus back to default state */
1719  ixgbe_i2c_stop(hw);
1720 }
1721 
1729 {
1730  s32 status = 0;
1731  u16 phy_data = 0;
1732 
1734  goto out;
1735 
1736  /* Check that the LASI temp alarm status was triggered */
1737  hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
1738  MDIO_MMD_PMAPMD, &phy_data);
1739 
1740  if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
1741  goto out;
1742 
1743  status = IXGBE_ERR_OVERTEMP;
1744 out:
1745  return status;
1746 }