Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
phy.c
Go to the documentation of this file.
1 /*
2  * drivers/net/phy/phy.c
3  *
4  * Framework for configuring and reading PHY devices
5  * Based on code in sungem_phy.c and gianfar_phy.c
6  *
7  * Author: Andy Fleming
8  *
9  * Copyright (c) 2004 Freescale Semiconductor, Inc.
10  * Copyright (c) 2006, 2007 Maciej W. Rozycki
11  *
12  * This program is free software; you can redistribute it and/or modify it
13  * under the terms of the GNU General Public License as published by the
14  * Free Software Foundation; either version 2 of the License, or (at your
15  * option) any later version.
16  *
17  */
18 
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/errno.h>
24 #include <linux/unistd.h>
25 #include <linux/interrupt.h>
26 #include <linux/init.h>
27 #include <linux/delay.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/skbuff.h>
31 #include <linux/mm.h>
32 #include <linux/module.h>
33 #include <linux/mii.h>
34 #include <linux/ethtool.h>
35 #include <linux/phy.h>
36 #include <linux/timer.h>
37 #include <linux/workqueue.h>
38 #include <linux/mdio.h>
39 
40 #include <linux/atomic.h>
41 #include <asm/io.h>
42 #include <asm/irq.h>
43 #include <asm/uaccess.h>
44 
49 void phy_print_status(struct phy_device *phydev)
50 {
51  if (phydev->link)
52  pr_info("%s - Link is Up - %d/%s\n",
53  dev_name(&phydev->dev),
54  phydev->speed,
55  DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
56  else
57  pr_info("%s - Link is Down\n", dev_name(&phydev->dev));
58 }
60 
70 static int phy_clear_interrupt(struct phy_device *phydev)
71 {
72  int err = 0;
73 
74  if (phydev->drv->ack_interrupt)
75  err = phydev->drv->ack_interrupt(phydev);
76 
77  return err;
78 }
79 
87 static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
88 {
89  int err = 0;
90 
91  phydev->interrupts = interrupts;
92  if (phydev->drv->config_intr)
93  err = phydev->drv->config_intr(phydev);
94 
95  return err;
96 }
97 
98 
107 static inline int phy_aneg_done(struct phy_device *phydev)
108 {
109  int retval;
110 
111  retval = phy_read(phydev, MII_BMSR);
112 
113  return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
114 }
115 
116 /* A structure for mapping a particular speed and duplex
117  * combination to a particular SUPPORTED and ADVERTISED value */
118 struct phy_setting {
119  int speed;
120  int duplex;
122 };
123 
124 /* A mapping of all SUPPORTED settings to speed/duplex */
125 static const struct phy_setting settings[] = {
126  {
127  .speed = 10000,
128  .duplex = DUPLEX_FULL,
129  .setting = SUPPORTED_10000baseT_Full,
130  },
131  {
132  .speed = SPEED_1000,
133  .duplex = DUPLEX_FULL,
134  .setting = SUPPORTED_1000baseT_Full,
135  },
136  {
137  .speed = SPEED_1000,
138  .duplex = DUPLEX_HALF,
139  .setting = SUPPORTED_1000baseT_Half,
140  },
141  {
142  .speed = SPEED_100,
143  .duplex = DUPLEX_FULL,
144  .setting = SUPPORTED_100baseT_Full,
145  },
146  {
147  .speed = SPEED_100,
148  .duplex = DUPLEX_HALF,
149  .setting = SUPPORTED_100baseT_Half,
150  },
151  {
152  .speed = SPEED_10,
153  .duplex = DUPLEX_FULL,
154  .setting = SUPPORTED_10baseT_Full,
155  },
156  {
157  .speed = SPEED_10,
158  .duplex = DUPLEX_HALF,
159  .setting = SUPPORTED_10baseT_Half,
160  },
161 };
162 
163 #define MAX_NUM_SETTINGS ARRAY_SIZE(settings)
164 
175 static inline int phy_find_setting(int speed, int duplex)
176 {
177  int idx = 0;
178 
179  while (idx < ARRAY_SIZE(settings) &&
180  (settings[idx].speed != speed ||
181  settings[idx].duplex != duplex))
182  idx++;
183 
184  return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
185 }
186 
197 static inline int phy_find_valid(int idx, u32 features)
198 {
199  while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
200  idx++;
201 
202  return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
203 }
204 
213 static void phy_sanitize_settings(struct phy_device *phydev)
214 {
215  u32 features = phydev->supported;
216  int idx;
217 
218  /* Sanitize settings based on PHY capabilities */
219  if ((features & SUPPORTED_Autoneg) == 0)
220  phydev->autoneg = AUTONEG_DISABLE;
221 
222  idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex),
223  features);
224 
225  phydev->speed = settings[idx].speed;
226  phydev->duplex = settings[idx].duplex;
227 }
228 
241 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
242 {
243  u32 speed = ethtool_cmd_speed(cmd);
244 
245  if (cmd->phy_address != phydev->addr)
246  return -EINVAL;
247 
248  /* We make sure that we don't pass unsupported
249  * values in to the PHY */
250  cmd->advertising &= phydev->supported;
251 
252  /* Verify the settings we care about. */
253  if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
254  return -EINVAL;
255 
256  if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
257  return -EINVAL;
258 
259  if (cmd->autoneg == AUTONEG_DISABLE &&
260  ((speed != SPEED_1000 &&
261  speed != SPEED_100 &&
262  speed != SPEED_10) ||
263  (cmd->duplex != DUPLEX_HALF &&
264  cmd->duplex != DUPLEX_FULL)))
265  return -EINVAL;
266 
267  phydev->autoneg = cmd->autoneg;
268 
269  phydev->speed = speed;
270 
271  phydev->advertising = cmd->advertising;
272 
273  if (AUTONEG_ENABLE == cmd->autoneg)
274  phydev->advertising |= ADVERTISED_Autoneg;
275  else
276  phydev->advertising &= ~ADVERTISED_Autoneg;
277 
278  phydev->duplex = cmd->duplex;
279 
280  /* Restart the PHY */
281  phy_start_aneg(phydev);
282 
283  return 0;
284 }
286 
287 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
288 {
289  cmd->supported = phydev->supported;
290 
291  cmd->advertising = phydev->advertising;
292 
293  ethtool_cmd_speed_set(cmd, phydev->speed);
294  cmd->duplex = phydev->duplex;
295  cmd->port = PORT_MII;
296  cmd->phy_address = phydev->addr;
297  cmd->transceiver = XCVR_EXTERNAL;
298  cmd->autoneg = phydev->autoneg;
299 
300  return 0;
301 }
303 
314 int phy_mii_ioctl(struct phy_device *phydev,
315  struct ifreq *ifr, int cmd)
316 {
317  struct mii_ioctl_data *mii_data = if_mii(ifr);
318  u16 val = mii_data->val_in;
319 
320  switch (cmd) {
321  case SIOCGMIIPHY:
322  mii_data->phy_id = phydev->addr;
323  /* fall through */
324 
325  case SIOCGMIIREG:
326  mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id,
327  mii_data->reg_num);
328  break;
329 
330  case SIOCSMIIREG:
331  if (mii_data->phy_id == phydev->addr) {
332  switch(mii_data->reg_num) {
333  case MII_BMCR:
334  if ((val & (BMCR_RESET|BMCR_ANENABLE)) == 0)
335  phydev->autoneg = AUTONEG_DISABLE;
336  else
337  phydev->autoneg = AUTONEG_ENABLE;
338  if ((!phydev->autoneg) && (val & BMCR_FULLDPLX))
339  phydev->duplex = DUPLEX_FULL;
340  else
341  phydev->duplex = DUPLEX_HALF;
342  if ((!phydev->autoneg) &&
343  (val & BMCR_SPEED1000))
344  phydev->speed = SPEED_1000;
345  else if ((!phydev->autoneg) &&
346  (val & BMCR_SPEED100))
347  phydev->speed = SPEED_100;
348  break;
349  case MII_ADVERTISE:
350  phydev->advertising = val;
351  break;
352  default:
353  /* do nothing */
354  break;
355  }
356  }
357 
358  mdiobus_write(phydev->bus, mii_data->phy_id,
359  mii_data->reg_num, val);
360 
361  if (mii_data->reg_num == MII_BMCR &&
362  val & BMCR_RESET &&
363  phydev->drv->config_init) {
364  phy_scan_fixups(phydev);
365  phydev->drv->config_init(phydev);
366  }
367  break;
368 
369  case SIOCSHWTSTAMP:
370  if (phydev->drv->hwtstamp)
371  return phydev->drv->hwtstamp(phydev, ifr);
372  /* fall through */
373 
374  default:
375  return -EOPNOTSUPP;
376  }
377 
378  return 0;
379 }
381 
391 int phy_start_aneg(struct phy_device *phydev)
392 {
393  int err;
394 
395  mutex_lock(&phydev->lock);
396 
397  if (AUTONEG_DISABLE == phydev->autoneg)
398  phy_sanitize_settings(phydev);
399 
400  err = phydev->drv->config_aneg(phydev);
401 
402  if (err < 0)
403  goto out_unlock;
404 
405  if (phydev->state != PHY_HALTED) {
406  if (AUTONEG_ENABLE == phydev->autoneg) {
407  phydev->state = PHY_AN;
408  phydev->link_timeout = PHY_AN_TIMEOUT;
409  } else {
410  phydev->state = PHY_FORCING;
412  }
413  }
414 
415 out_unlock:
416  mutex_unlock(&phydev->lock);
417  return err;
418 }
420 
421 
422 static void phy_change(struct work_struct *work);
423 
437 void phy_start_machine(struct phy_device *phydev,
438  void (*handler)(struct net_device *))
439 {
440  phydev->adjust_state = handler;
441 
443 }
444 
453 void phy_stop_machine(struct phy_device *phydev)
454 {
456 
457  mutex_lock(&phydev->lock);
458  if (phydev->state > PHY_UP)
459  phydev->state = PHY_UP;
460  mutex_unlock(&phydev->lock);
461 
462  phydev->adjust_state = NULL;
463 }
464 
474 static void phy_force_reduction(struct phy_device *phydev)
475 {
476  int idx;
477 
478  idx = phy_find_setting(phydev->speed, phydev->duplex);
479 
480  idx++;
481 
482  idx = phy_find_valid(idx, phydev->supported);
483 
484  phydev->speed = settings[idx].speed;
485  phydev->duplex = settings[idx].duplex;
486 
487  pr_info("Trying %d/%s\n",
488  phydev->speed, DUPLEX_FULL == phydev->duplex ? "FULL" : "HALF");
489 }
490 
491 
501 static void phy_error(struct phy_device *phydev)
502 {
503  mutex_lock(&phydev->lock);
504  phydev->state = PHY_HALTED;
505  mutex_unlock(&phydev->lock);
506 }
507 
516 static irqreturn_t phy_interrupt(int irq, void *phy_dat)
517 {
518  struct phy_device *phydev = phy_dat;
519 
520  if (PHY_HALTED == phydev->state)
521  return IRQ_NONE; /* It can't be ours. */
522 
523  /* The MDIO bus is not allowed to be written in interrupt
524  * context, so we need to disable the irq here. A work
525  * queue will write the PHY to disable and clear the
526  * interrupt, and then reenable the irq line. */
527  disable_irq_nosync(irq);
528  atomic_inc(&phydev->irq_disable);
529 
530  schedule_work(&phydev->phy_queue);
531 
532  return IRQ_HANDLED;
533 }
534 
539 static int phy_enable_interrupts(struct phy_device *phydev)
540 {
541  int err;
542 
543  err = phy_clear_interrupt(phydev);
544 
545  if (err < 0)
546  return err;
547 
548  err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
549 
550  return err;
551 }
552 
557 static int phy_disable_interrupts(struct phy_device *phydev)
558 {
559  int err;
560 
561  /* Disable PHY interrupts */
562  err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
563 
564  if (err)
565  goto phy_err;
566 
567  /* Clear the interrupt */
568  err = phy_clear_interrupt(phydev);
569 
570  if (err)
571  goto phy_err;
572 
573  return 0;
574 
575 phy_err:
576  phy_error(phydev);
577 
578  return err;
579 }
580 
591 int phy_start_interrupts(struct phy_device *phydev)
592 {
593  int err = 0;
594 
595  INIT_WORK(&phydev->phy_queue, phy_change);
596 
597  atomic_set(&phydev->irq_disable, 0);
598  if (request_irq(phydev->irq, phy_interrupt,
599  IRQF_SHARED,
600  "phy_interrupt",
601  phydev) < 0) {
602  pr_warn("%s: Can't get IRQ %d (PHY)\n",
603  phydev->bus->name, phydev->irq);
604  phydev->irq = PHY_POLL;
605  return 0;
606  }
607 
608  err = phy_enable_interrupts(phydev);
609 
610  return err;
611 }
613 
618 int phy_stop_interrupts(struct phy_device *phydev)
619 {
620  int err;
621 
622  err = phy_disable_interrupts(phydev);
623 
624  if (err)
625  phy_error(phydev);
626 
627  free_irq(phydev->irq, phydev);
628 
629  /*
630  * Cannot call flush_scheduled_work() here as desired because
631  * of rtnl_lock(), but we do not really care about what would
632  * be done, except from enable_irq(), so cancel any work
633  * possibly pending and take care of the matter below.
634  */
635  cancel_work_sync(&phydev->phy_queue);
636  /*
637  * If work indeed has been cancelled, disable_irq() will have
638  * been left unbalanced from phy_interrupt() and enable_irq()
639  * has to be called so that other devices on the line work.
640  */
641  while (atomic_dec_return(&phydev->irq_disable) >= 0)
642  enable_irq(phydev->irq);
643 
644  return err;
645 }
647 
648 
653 static void phy_change(struct work_struct *work)
654 {
655  int err;
656  struct phy_device *phydev =
657  container_of(work, struct phy_device, phy_queue);
658 
659  if (phydev->drv->did_interrupt &&
660  !phydev->drv->did_interrupt(phydev))
661  goto ignore;
662 
663  err = phy_disable_interrupts(phydev);
664 
665  if (err)
666  goto phy_err;
667 
668  mutex_lock(&phydev->lock);
669  if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
670  phydev->state = PHY_CHANGELINK;
671  mutex_unlock(&phydev->lock);
672 
673  atomic_dec(&phydev->irq_disable);
674  enable_irq(phydev->irq);
675 
676  /* Reenable interrupts */
677  if (PHY_HALTED != phydev->state)
678  err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
679 
680  if (err)
681  goto irq_enable_err;
682 
683  /* reschedule state queue work to run as soon as possible */
685  schedule_delayed_work(&phydev->state_queue, 0);
686 
687  return;
688 
689 ignore:
690  atomic_dec(&phydev->irq_disable);
691  enable_irq(phydev->irq);
692  return;
693 
694 irq_enable_err:
695  disable_irq(phydev->irq);
696  atomic_inc(&phydev->irq_disable);
697 phy_err:
698  phy_error(phydev);
699 }
700 
705 void phy_stop(struct phy_device *phydev)
706 {
707  mutex_lock(&phydev->lock);
708 
709  if (PHY_HALTED == phydev->state)
710  goto out_unlock;
711 
712  if (phydev->irq != PHY_POLL) {
713  /* Disable PHY Interrupts */
714  phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
715 
716  /* Clear any pending interrupts */
717  phy_clear_interrupt(phydev);
718  }
719 
720  phydev->state = PHY_HALTED;
721 
722 out_unlock:
723  mutex_unlock(&phydev->lock);
724 
725  /*
726  * Cannot call flush_scheduled_work() here as desired because
727  * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
728  * will not reenable interrupts.
729  */
730 }
731 
732 
743 void phy_start(struct phy_device *phydev)
744 {
745  mutex_lock(&phydev->lock);
746 
747  switch (phydev->state) {
748  case PHY_STARTING:
749  phydev->state = PHY_PENDING;
750  break;
751  case PHY_READY:
752  phydev->state = PHY_UP;
753  break;
754  case PHY_HALTED:
755  phydev->state = PHY_RESUMING;
756  default:
757  break;
758  }
759  mutex_unlock(&phydev->lock);
760 }
763 
768 void phy_state_machine(struct work_struct *work)
769 {
770  struct delayed_work *dwork = to_delayed_work(work);
771  struct phy_device *phydev =
772  container_of(dwork, struct phy_device, state_queue);
773  int needs_aneg = 0;
774  int err = 0;
775 
776  mutex_lock(&phydev->lock);
777 
778  if (phydev->adjust_state)
779  phydev->adjust_state(phydev->attached_dev);
780 
781  switch(phydev->state) {
782  case PHY_DOWN:
783  case PHY_STARTING:
784  case PHY_READY:
785  case PHY_PENDING:
786  break;
787  case PHY_UP:
788  needs_aneg = 1;
789 
790  phydev->link_timeout = PHY_AN_TIMEOUT;
791 
792  break;
793  case PHY_AN:
794  err = phy_read_status(phydev);
795 
796  if (err < 0)
797  break;
798 
799  /* If the link is down, give up on
800  * negotiation for now */
801  if (!phydev->link) {
802  phydev->state = PHY_NOLINK;
804  phydev->adjust_link(phydev->attached_dev);
805  break;
806  }
807 
808  /* Check if negotiation is done. Break
809  * if there's an error */
810  err = phy_aneg_done(phydev);
811  if (err < 0)
812  break;
813 
814  /* If AN is done, we're running */
815  if (err > 0) {
816  phydev->state = PHY_RUNNING;
818  phydev->adjust_link(phydev->attached_dev);
819 
820  } else if (0 == phydev->link_timeout--) {
821  int idx;
822 
823  needs_aneg = 1;
824  /* If we have the magic_aneg bit,
825  * we try again */
826  if (phydev->drv->flags & PHY_HAS_MAGICANEG)
827  break;
828 
829  /* The timer expired, and we still
830  * don't have a setting, so we try
831  * forcing it until we find one that
832  * works, starting from the fastest speed,
833  * and working our way down */
834  idx = phy_find_valid(0, phydev->supported);
835 
836  phydev->speed = settings[idx].speed;
837  phydev->duplex = settings[idx].duplex;
838 
839  phydev->autoneg = AUTONEG_DISABLE;
840 
841  pr_info("Trying %d/%s\n",
842  phydev->speed,
843  DUPLEX_FULL == phydev->duplex ?
844  "FULL" : "HALF");
845  }
846  break;
847  case PHY_NOLINK:
848  err = phy_read_status(phydev);
849 
850  if (err)
851  break;
852 
853  if (phydev->link) {
854  phydev->state = PHY_RUNNING;
856  phydev->adjust_link(phydev->attached_dev);
857  }
858  break;
859  case PHY_FORCING:
860  err = genphy_update_link(phydev);
861 
862  if (err)
863  break;
864 
865  if (phydev->link) {
866  phydev->state = PHY_RUNNING;
868  } else {
869  if (0 == phydev->link_timeout--) {
870  phy_force_reduction(phydev);
871  needs_aneg = 1;
872  }
873  }
874 
875  phydev->adjust_link(phydev->attached_dev);
876  break;
877  case PHY_RUNNING:
878  /* Only register a CHANGE if we are
879  * polling */
880  if (PHY_POLL == phydev->irq)
881  phydev->state = PHY_CHANGELINK;
882  break;
883  case PHY_CHANGELINK:
884  err = phy_read_status(phydev);
885 
886  if (err)
887  break;
888 
889  if (phydev->link) {
890  phydev->state = PHY_RUNNING;
892  } else {
893  phydev->state = PHY_NOLINK;
895  }
896 
897  phydev->adjust_link(phydev->attached_dev);
898 
899  if (PHY_POLL != phydev->irq)
900  err = phy_config_interrupt(phydev,
902  break;
903  case PHY_HALTED:
904  if (phydev->link) {
905  phydev->link = 0;
907  phydev->adjust_link(phydev->attached_dev);
908  }
909  break;
910  case PHY_RESUMING:
911 
912  err = phy_clear_interrupt(phydev);
913 
914  if (err)
915  break;
916 
917  err = phy_config_interrupt(phydev,
919 
920  if (err)
921  break;
922 
923  if (AUTONEG_ENABLE == phydev->autoneg) {
924  err = phy_aneg_done(phydev);
925  if (err < 0)
926  break;
927 
928  /* err > 0 if AN is done.
929  * Otherwise, it's 0, and we're
930  * still waiting for AN */
931  if (err > 0) {
932  err = phy_read_status(phydev);
933  if (err)
934  break;
935 
936  if (phydev->link) {
937  phydev->state = PHY_RUNNING;
939  } else
940  phydev->state = PHY_NOLINK;
941  phydev->adjust_link(phydev->attached_dev);
942  } else {
943  phydev->state = PHY_AN;
944  phydev->link_timeout = PHY_AN_TIMEOUT;
945  }
946  } else {
947  err = phy_read_status(phydev);
948  if (err)
949  break;
950 
951  if (phydev->link) {
952  phydev->state = PHY_RUNNING;
954  } else
955  phydev->state = PHY_NOLINK;
956  phydev->adjust_link(phydev->attached_dev);
957  }
958  break;
959  }
960 
961  mutex_unlock(&phydev->lock);
962 
963  if (needs_aneg)
964  err = phy_start_aneg(phydev);
965 
966  if (err < 0)
967  phy_error(phydev);
968 
970 }
971 
972 static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
973  int addr)
974 {
975  /* Write the desired MMD Devad */
976  bus->write(bus, addr, MII_MMD_CTRL, devad);
977 
978  /* Write the desired MMD register address */
979  bus->write(bus, addr, MII_MMD_DATA, prtad);
980 
981  /* Select the Function : DATA with no post increment */
982  bus->write(bus, addr, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
983 }
984 
1000 static int phy_read_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
1001  int addr)
1002 {
1003  u32 ret;
1004 
1005  mmd_phy_indirect(bus, prtad, devad, addr);
1006 
1007  /* Read the content of the MMD's selected register */
1008  ret = bus->read(bus, addr, MII_MMD_DATA);
1009 
1010  return ret;
1011 }
1012 
1029 static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
1030  int addr, u32 data)
1031 {
1032  mmd_phy_indirect(bus, prtad, devad, addr);
1033 
1034  /* Write the data into MMD's selected register */
1035  bus->write(bus, addr, MII_MMD_DATA, data);
1036 }
1037 
1048 int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1049 {
1050  int ret = -EPROTONOSUPPORT;
1051 
1052  /* According to 802.3az,the EEE is supported only in full duplex-mode.
1053  * Also EEE feature is active when core is operating with MII, GMII
1054  * or RGMII.
1055  */
1056  if ((phydev->duplex == DUPLEX_FULL) &&
1057  ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
1058  (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
1059  (phydev->interface == PHY_INTERFACE_MODE_RGMII))) {
1060  int eee_lp, eee_cap, eee_adv;
1061  u32 lp, cap, adv;
1062  int idx, status;
1063 
1064  /* Read phy status to properly get the right settings */
1065  status = phy_read_status(phydev);
1066  if (status)
1067  return status;
1068 
1069  /* First check if the EEE ability is supported */
1070  eee_cap = phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_ABLE,
1071  MDIO_MMD_PCS, phydev->addr);
1072  if (eee_cap < 0)
1073  return eee_cap;
1074 
1075  cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
1076  if (!cap)
1077  goto eee_exit;
1078 
1079  /* Check which link settings negotiated and verify it in
1080  * the EEE advertising registers.
1081  */
1082  eee_lp = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
1083  MDIO_MMD_AN, phydev->addr);
1084  if (eee_lp < 0)
1085  return eee_lp;
1086 
1087  eee_adv = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
1088  MDIO_MMD_AN, phydev->addr);
1089  if (eee_adv < 0)
1090  return eee_adv;
1091 
1092  adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
1093  lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
1094  idx = phy_find_setting(phydev->speed, phydev->duplex);
1095  if ((lp & adv & settings[idx].setting))
1096  goto eee_exit;
1097 
1098  if (clk_stop_enable) {
1099  /* Configure the PHY to stop receiving xMII
1100  * clock while it is signaling LPI.
1101  */
1102  int val = phy_read_mmd_indirect(phydev->bus, MDIO_CTRL1,
1103  MDIO_MMD_PCS,
1104  phydev->addr);
1105  if (val < 0)
1106  return val;
1107 
1109  phy_write_mmd_indirect(phydev->bus, MDIO_CTRL1,
1110  MDIO_MMD_PCS, phydev->addr, val);
1111  }
1112 
1113  ret = 0; /* EEE supported */
1114  }
1115 
1116 eee_exit:
1117  return ret;
1118 }
1120 
1128 int phy_get_eee_err(struct phy_device *phydev)
1129 {
1130  return phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_WK_ERR,
1131  MDIO_MMD_PCS, phydev->addr);
1132 
1133 }
1135 
1144 int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
1145 {
1146  int val;
1147 
1148  /* Get Supported EEE */
1149  val = phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_ABLE,
1150  MDIO_MMD_PCS, phydev->addr);
1151  if (val < 0)
1152  return val;
1153  data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
1154 
1155  /* Get advertisement EEE */
1156  val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
1157  MDIO_MMD_AN, phydev->addr);
1158  if (val < 0)
1159  return val;
1160  data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
1161 
1162  /* Get LP advertisement EEE */
1163  val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
1164  MDIO_MMD_AN, phydev->addr);
1165  if (val < 0)
1166  return val;
1167  data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
1168 
1169  return 0;
1170 }
1172 
1180 int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
1181 {
1182  int val;
1183 
1184  val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
1185  phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
1186  phydev->addr, val);
1187 
1188  return 0;
1189 }