Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ixgbe_ethtool.c
Go to the documentation of this file.
1 /*******************************************************************************
2 
3  Intel 10 Gigabit PCI Express Linux driver
4  Copyright(c) 1999 - 2012 Intel Corporation.
5 
6  This program is free software; you can redistribute it and/or modify it
7  under the terms and conditions of the GNU General Public License,
8  version 2, as published by the Free Software Foundation.
9 
10  This program is distributed in the hope it will be useful, but WITHOUT
11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13  more details.
14 
15  You should have received a copy of the GNU General Public License along with
16  this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 
19  The full GNU General Public License is included in this distribution in
20  the file called "COPYING".
21 
22  Contact Information:
23  e1000-devel Mailing List <[email protected]>
24  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 
26 *******************************************************************************/
27 
28 /* ethtool support for ixgbe */
29 
30 #include <linux/interrupt.h>
31 #include <linux/types.h>
32 #include <linux/module.h>
33 #include <linux/slab.h>
34 #include <linux/pci.h>
35 #include <linux/netdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/vmalloc.h>
38 #include <linux/highmem.h>
39 #include <linux/uaccess.h>
40 
41 #include "ixgbe.h"
42 
43 
44 #define IXGBE_ALL_RAR_ENTRIES 16
45 
47 
48 struct ixgbe_stats {
50  int type;
53 };
54 
55 #define IXGBE_STAT(m) IXGBE_STATS, \
56  sizeof(((struct ixgbe_adapter *)0)->m), \
57  offsetof(struct ixgbe_adapter, m)
58 #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
59  sizeof(((struct rtnl_link_stats64 *)0)->m), \
60  offsetof(struct rtnl_link_stats64, m)
61 
62 static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
63  {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
64  {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
65  {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
66  {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
67  {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
68  {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
69  {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
70  {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
71  {"lsc_int", IXGBE_STAT(lsc_int)},
72  {"tx_busy", IXGBE_STAT(tx_busy)},
73  {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
74  {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
75  {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
76  {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
77  {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
78  {"multicast", IXGBE_NETDEV_STAT(multicast)},
79  {"broadcast", IXGBE_STAT(stats.bprc)},
80  {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
81  {"collisions", IXGBE_NETDEV_STAT(collisions)},
82  {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
83  {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
84  {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
85  {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
86  {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
87  {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
88  {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
89  {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
90  {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
91  {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
92  {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
93  {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
94  {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
95  {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
96  {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
97  {"tx_restart_queue", IXGBE_STAT(restart_queue)},
98  {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
99  {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
100  {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
101  {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
102  {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
103  {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
104  {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
105  {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
106  {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
107  {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
108  {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
109  {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
110  {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
111  {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
112 #ifdef IXGBE_FCOE
113  {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
114  {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
115  {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
116  {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
117  {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
118  {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
119  {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
120  {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
121 #endif /* IXGBE_FCOE */
122 };
123 
124 /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
125  * we set the num_rx_queues to evaluate to num_tx_queues. This is
126  * used because we do not have a good way to get the max number of
127  * rx queues with CONFIG_RPS disabled.
128  */
129 #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
130 
131 #define IXGBE_QUEUE_STATS_LEN ( \
132  (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
133  (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
134 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
135 #define IXGBE_PB_STATS_LEN ( \
136  (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
137  sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
138  sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
139  sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
140  / sizeof(u64))
141 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
142  IXGBE_PB_STATS_LEN + \
143  IXGBE_QUEUE_STATS_LEN)
144 
145 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
146  "Register test (offline)", "Eeprom test (offline)",
147  "Interrupt test (offline)", "Loopback test (offline)",
148  "Link test (on/offline)"
149 };
150 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
151 
152 static int ixgbe_get_settings(struct net_device *netdev,
153  struct ethtool_cmd *ecmd)
154 {
155  struct ixgbe_adapter *adapter = netdev_priv(netdev);
156  struct ixgbe_hw *hw = &adapter->hw;
157  ixgbe_link_speed supported_link;
158  u32 link_speed = 0;
159  bool autoneg;
160  bool link_up;
161 
162  hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
163 
164  /* set the supported link speeds */
165  if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
167  if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
169  if (supported_link & IXGBE_LINK_SPEED_100_FULL)
171 
172  /* set the advertised speeds */
173  if (hw->phy.autoneg_advertised) {
174  if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
176  if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
178  if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
180  } else {
181  /* default modes in case phy.autoneg_advertised isn't set */
182  if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
184  if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
186  if (supported_link & IXGBE_LINK_SPEED_100_FULL)
188  }
189 
190  if (autoneg) {
191  ecmd->supported |= SUPPORTED_Autoneg;
193  ecmd->autoneg = AUTONEG_ENABLE;
194  } else
195  ecmd->autoneg = AUTONEG_DISABLE;
196 
197  ecmd->transceiver = XCVR_EXTERNAL;
198 
199  /* Determine the remaining settings based on the PHY type. */
200  switch (adapter->hw.phy.type) {
201  case ixgbe_phy_tn:
202  case ixgbe_phy_aq:
204  ecmd->supported |= SUPPORTED_TP;
205  ecmd->advertising |= ADVERTISED_TP;
206  ecmd->port = PORT_TP;
207  break;
208  case ixgbe_phy_qt:
209  ecmd->supported |= SUPPORTED_FIBRE;
210  ecmd->advertising |= ADVERTISED_FIBRE;
211  ecmd->port = PORT_FIBRE;
212  break;
213  case ixgbe_phy_nl:
216  case ixgbe_phy_sfp_ftl:
217  case ixgbe_phy_sfp_avago:
218  case ixgbe_phy_sfp_intel:
220  /* SFP+ devices, further checking needed */
221  switch (adapter->hw.phy.sfp_type) {
225  ecmd->supported |= SUPPORTED_FIBRE;
226  ecmd->advertising |= ADVERTISED_FIBRE;
227  ecmd->port = PORT_DA;
228  break;
229  case ixgbe_sfp_type_sr:
230  case ixgbe_sfp_type_lr:
233  ecmd->supported |= SUPPORTED_FIBRE;
234  ecmd->advertising |= ADVERTISED_FIBRE;
235  ecmd->port = PORT_FIBRE;
236  break;
238  ecmd->supported |= SUPPORTED_FIBRE;
239  ecmd->advertising |= ADVERTISED_FIBRE;
240  ecmd->port = PORT_NONE;
241  break;
244  ecmd->supported |= SUPPORTED_TP;
245  ecmd->advertising |= ADVERTISED_TP;
246  ecmd->port = PORT_TP;
247  break;
250  ecmd->supported |= SUPPORTED_FIBRE;
251  ecmd->advertising |= ADVERTISED_FIBRE;
252  ecmd->port = PORT_FIBRE;
253  break;
255  default:
256  ecmd->supported |= SUPPORTED_FIBRE;
257  ecmd->advertising |= ADVERTISED_FIBRE;
258  ecmd->port = PORT_OTHER;
259  break;
260  }
261  break;
262  case ixgbe_phy_xaui:
263  ecmd->supported |= SUPPORTED_FIBRE;
264  ecmd->advertising |= ADVERTISED_FIBRE;
265  ecmd->port = PORT_NONE;
266  break;
267  case ixgbe_phy_unknown:
268  case ixgbe_phy_generic:
270  default:
271  ecmd->supported |= SUPPORTED_FIBRE;
272  ecmd->advertising |= ADVERTISED_FIBRE;
273  ecmd->port = PORT_OTHER;
274  break;
275  }
276 
277  hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
278  if (link_up) {
279  switch (link_speed) {
280  case IXGBE_LINK_SPEED_10GB_FULL:
281  ethtool_cmd_speed_set(ecmd, SPEED_10000);
282  break;
283  case IXGBE_LINK_SPEED_1GB_FULL:
284  ethtool_cmd_speed_set(ecmd, SPEED_1000);
285  break;
286  case IXGBE_LINK_SPEED_100_FULL:
287  ethtool_cmd_speed_set(ecmd, SPEED_100);
288  break;
289  default:
290  break;
291  }
292  ecmd->duplex = DUPLEX_FULL;
293  } else {
294  ethtool_cmd_speed_set(ecmd, -1);
295  ecmd->duplex = -1;
296  }
297 
298  return 0;
299 }
300 
301 static int ixgbe_set_settings(struct net_device *netdev,
302  struct ethtool_cmd *ecmd)
303 {
304  struct ixgbe_adapter *adapter = netdev_priv(netdev);
305  struct ixgbe_hw *hw = &adapter->hw;
306  u32 advertised, old;
307  s32 err = 0;
308 
309  if ((hw->phy.media_type == ixgbe_media_type_copper) ||
310  (hw->phy.multispeed_fiber)) {
311  /*
312  * this function does not support duplex forcing, but can
313  * limit the advertising of the adapter to the specified speed
314  */
315  if (ecmd->autoneg == AUTONEG_DISABLE)
316  return -EINVAL;
317 
318  if (ecmd->advertising & ~ecmd->supported)
319  return -EINVAL;
320 
321  old = hw->phy.autoneg_advertised;
322  advertised = 0;
324  advertised |= IXGBE_LINK_SPEED_10GB_FULL;
325 
327  advertised |= IXGBE_LINK_SPEED_1GB_FULL;
328 
330  advertised |= IXGBE_LINK_SPEED_100_FULL;
331 
332  if (old == advertised)
333  return err;
334  /* this sets the link speed and restarts auto-neg */
335  hw->mac.autotry_restart = true;
336  err = hw->mac.ops.setup_link(hw, advertised, true, true);
337  if (err) {
338  e_info(probe, "setup link failed with code %d\n", err);
339  hw->mac.ops.setup_link(hw, old, true, true);
340  }
341  } else {
342  /* in this case we currently only support 10Gb/FULL */
343  u32 speed = ethtool_cmd_speed(ecmd);
344  if ((ecmd->autoneg == AUTONEG_ENABLE) ||
346  (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
347  return -EINVAL;
348  }
349 
350  return err;
351 }
352 
353 static void ixgbe_get_pauseparam(struct net_device *netdev,
354  struct ethtool_pauseparam *pause)
355 {
356  struct ixgbe_adapter *adapter = netdev_priv(netdev);
357  struct ixgbe_hw *hw = &adapter->hw;
358 
359  if (hw->fc.disable_fc_autoneg)
360  pause->autoneg = 0;
361  else
362  pause->autoneg = 1;
363 
364  if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
365  pause->rx_pause = 1;
366  } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
367  pause->tx_pause = 1;
368  } else if (hw->fc.current_mode == ixgbe_fc_full) {
369  pause->rx_pause = 1;
370  pause->tx_pause = 1;
371  }
372 }
373 
374 static int ixgbe_set_pauseparam(struct net_device *netdev,
375  struct ethtool_pauseparam *pause)
376 {
377  struct ixgbe_adapter *adapter = netdev_priv(netdev);
378  struct ixgbe_hw *hw = &adapter->hw;
379  struct ixgbe_fc_info fc = hw->fc;
380 
381  /* 82598 does no support link flow control with DCB enabled */
382  if ((hw->mac.type == ixgbe_mac_82598EB) &&
383  (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
384  return -EINVAL;
385 
386  fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
387 
388  if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
390  else if (pause->rx_pause && !pause->tx_pause)
392  else if (!pause->rx_pause && pause->tx_pause)
394  else
396 
397  /* if the thing changed then we'll update and use new autoneg */
398  if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
399  hw->fc = fc;
400  if (netif_running(netdev))
401  ixgbe_reinit_locked(adapter);
402  else
403  ixgbe_reset(adapter);
404  }
405 
406  return 0;
407 }
408 
409 static u32 ixgbe_get_msglevel(struct net_device *netdev)
410 {
411  struct ixgbe_adapter *adapter = netdev_priv(netdev);
412  return adapter->msg_enable;
413 }
414 
415 static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
416 {
417  struct ixgbe_adapter *adapter = netdev_priv(netdev);
418  adapter->msg_enable = data;
419 }
420 
421 static int ixgbe_get_regs_len(struct net_device *netdev)
422 {
423 #define IXGBE_REGS_LEN 1129
424  return IXGBE_REGS_LEN * sizeof(u32);
425 }
426 
427 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
428 
429 static void ixgbe_get_regs(struct net_device *netdev,
430  struct ethtool_regs *regs, void *p)
431 {
432  struct ixgbe_adapter *adapter = netdev_priv(netdev);
433  struct ixgbe_hw *hw = &adapter->hw;
434  u32 *regs_buff = p;
435  u8 i;
436 
437  memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
438 
439  regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
440 
441  /* General Registers */
442  regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
443  regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
444  regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
445  regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
446  regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
447  regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
448  regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
449  regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
450 
451  /* NVM Register */
452  regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC);
453  regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
454  regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA);
455  regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
456  regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
457  regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
458  regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
459  regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
460  regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
461  regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
462 
463  /* Interrupt */
464  /* don't read EICR because it can clear interrupt causes, instead
465  * read EICS which is a shadow but doesn't clear EICR */
466  regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
467  regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
468  regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
469  regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
470  regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
471  regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
472  regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
473  regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
474  regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
475  regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
476  regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
477  regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
478 
479  /* Flow Control */
480  regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
481  regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
482  regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
483  regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
484  regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
485  for (i = 0; i < 8; i++) {
486  switch (hw->mac.type) {
487  case ixgbe_mac_82598EB:
488  regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
489  regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
490  break;
491  case ixgbe_mac_82599EB:
492  case ixgbe_mac_X540:
493  regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
494  regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
495  break;
496  default:
497  break;
498  }
499  }
500  regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
501  regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
502 
503  /* Receive DMA */
504  for (i = 0; i < 64; i++)
505  regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
506  for (i = 0; i < 64; i++)
507  regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
508  for (i = 0; i < 64; i++)
509  regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
510  for (i = 0; i < 64; i++)
511  regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
512  for (i = 0; i < 64; i++)
513  regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
514  for (i = 0; i < 64; i++)
515  regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
516  for (i = 0; i < 16; i++)
517  regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
518  for (i = 0; i < 16; i++)
519  regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
520  regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
521  for (i = 0; i < 8; i++)
522  regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
523  regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
524  regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
525 
526  /* Receive */
527  regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
528  regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
529  for (i = 0; i < 16; i++)
530  regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
531  for (i = 0; i < 16; i++)
532  regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
533  regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
534  regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
535  regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
536  regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
537  regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
538  regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
539  for (i = 0; i < 8; i++)
540  regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
541  for (i = 0; i < 8; i++)
542  regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
543  regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
544 
545  /* Transmit */
546  for (i = 0; i < 32; i++)
547  regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
548  for (i = 0; i < 32; i++)
549  regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
550  for (i = 0; i < 32; i++)
551  regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
552  for (i = 0; i < 32; i++)
553  regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
554  for (i = 0; i < 32; i++)
555  regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
556  for (i = 0; i < 32; i++)
557  regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
558  for (i = 0; i < 32; i++)
559  regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
560  for (i = 0; i < 32; i++)
561  regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
562  regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
563  for (i = 0; i < 16; i++)
564  regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
565  regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
566  for (i = 0; i < 8; i++)
567  regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
568  regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
569 
570  /* Wake Up */
571  regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
572  regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
573  regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
574  regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
575  regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
576  regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
577  regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
578  regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
579  regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
580 
581  /* DCB */
582  regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
583  regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
584  regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
585  regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
586  for (i = 0; i < 8; i++)
587  regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
588  for (i = 0; i < 8; i++)
589  regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
590  for (i = 0; i < 8; i++)
591  regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
592  for (i = 0; i < 8; i++)
593  regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
594  for (i = 0; i < 8; i++)
595  regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
596  for (i = 0; i < 8; i++)
597  regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
598 
599  /* Statistics */
600  regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
601  regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
602  regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
603  regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
604  for (i = 0; i < 8; i++)
605  regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
606  regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
607  regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
608  regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
609  regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
610  regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
611  regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
612  regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
613  for (i = 0; i < 8; i++)
614  regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
615  for (i = 0; i < 8; i++)
616  regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
617  for (i = 0; i < 8; i++)
618  regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
619  for (i = 0; i < 8; i++)
620  regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
621  regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
622  regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
623  regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
624  regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
625  regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
626  regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
627  regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
628  regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
629  regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
630  regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
631  regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
632  regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
633  for (i = 0; i < 8; i++)
634  regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
635  regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
636  regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
637  regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
638  regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
639  regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
640  regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
641  regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
642  regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
643  regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
644  regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
645  regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
646  regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
647  regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
648  regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
649  regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
650  regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
651  regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
652  regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
653  regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
654  for (i = 0; i < 16; i++)
655  regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
656  for (i = 0; i < 16; i++)
657  regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
658  for (i = 0; i < 16; i++)
659  regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
660  for (i = 0; i < 16; i++)
661  regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
662 
663  /* MAC */
664  regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
665  regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
666  regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
667  regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
668  regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
669  regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
670  regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
671  regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
672  regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
673  regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
674  regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
675  regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
676  regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
677  regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
678  regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
679  regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
680  regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
681  regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
682  regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
683  regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
684  regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
685  regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
686  regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
687  regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
688  regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
689  regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
690  regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
691  regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
692  regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
693  regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
694  regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
695  regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
696  regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
697 
698  /* Diagnostic */
699  regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
700  for (i = 0; i < 8; i++)
701  regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
702  regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
703  for (i = 0; i < 4; i++)
704  regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
705  regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
706  regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
707  for (i = 0; i < 8; i++)
708  regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
709  regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
710  for (i = 0; i < 4; i++)
711  regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
712  regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
713  regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
714  regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
715  regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
716  regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
717  regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
718  regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
719  regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
720  regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
721  regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
722  regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
723  for (i = 0; i < 8; i++)
724  regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
725  regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
726  regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
727  regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
728  regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
729  regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
730  regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
731  regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
732  regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
733  regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
734 
735  /* 82599 X540 specific registers */
736  regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
737 }
738 
739 static int ixgbe_get_eeprom_len(struct net_device *netdev)
740 {
741  struct ixgbe_adapter *adapter = netdev_priv(netdev);
742  return adapter->hw.eeprom.word_size * 2;
743 }
744 
745 static int ixgbe_get_eeprom(struct net_device *netdev,
746  struct ethtool_eeprom *eeprom, u8 *bytes)
747 {
748  struct ixgbe_adapter *adapter = netdev_priv(netdev);
749  struct ixgbe_hw *hw = &adapter->hw;
750  u16 *eeprom_buff;
751  int first_word, last_word, eeprom_len;
752  int ret_val = 0;
753  u16 i;
754 
755  if (eeprom->len == 0)
756  return -EINVAL;
757 
758  eeprom->magic = hw->vendor_id | (hw->device_id << 16);
759 
760  first_word = eeprom->offset >> 1;
761  last_word = (eeprom->offset + eeprom->len - 1) >> 1;
762  eeprom_len = last_word - first_word + 1;
763 
764  eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
765  if (!eeprom_buff)
766  return -ENOMEM;
767 
768  ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
769  eeprom_buff);
770 
771  /* Device's eeprom is always little-endian, word addressable */
772  for (i = 0; i < eeprom_len; i++)
773  le16_to_cpus(&eeprom_buff[i]);
774 
775  memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
776  kfree(eeprom_buff);
777 
778  return ret_val;
779 }
780 
781 static int ixgbe_set_eeprom(struct net_device *netdev,
782  struct ethtool_eeprom *eeprom, u8 *bytes)
783 {
784  struct ixgbe_adapter *adapter = netdev_priv(netdev);
785  struct ixgbe_hw *hw = &adapter->hw;
786  u16 *eeprom_buff;
787  void *ptr;
788  int max_len, first_word, last_word, ret_val = 0;
789  u16 i;
790 
791  if (eeprom->len == 0)
792  return -EINVAL;
793 
794  if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
795  return -EINVAL;
796 
797  max_len = hw->eeprom.word_size * 2;
798 
799  first_word = eeprom->offset >> 1;
800  last_word = (eeprom->offset + eeprom->len - 1) >> 1;
801  eeprom_buff = kmalloc(max_len, GFP_KERNEL);
802  if (!eeprom_buff)
803  return -ENOMEM;
804 
805  ptr = eeprom_buff;
806 
807  if (eeprom->offset & 1) {
808  /*
809  * need read/modify/write of first changed EEPROM word
810  * only the second byte of the word is being modified
811  */
812  ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
813  if (ret_val)
814  goto err;
815 
816  ptr++;
817  }
818  if ((eeprom->offset + eeprom->len) & 1) {
819  /*
820  * need read/modify/write of last changed EEPROM word
821  * only the first byte of the word is being modified
822  */
823  ret_val = hw->eeprom.ops.read(hw, last_word,
824  &eeprom_buff[last_word - first_word]);
825  if (ret_val)
826  goto err;
827  }
828 
829  /* Device's eeprom is always little-endian, word addressable */
830  for (i = 0; i < last_word - first_word + 1; i++)
831  le16_to_cpus(&eeprom_buff[i]);
832 
833  memcpy(ptr, bytes, eeprom->len);
834 
835  for (i = 0; i < last_word - first_word + 1; i++)
836  cpu_to_le16s(&eeprom_buff[i]);
837 
838  ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
839  last_word - first_word + 1,
840  eeprom_buff);
841 
842  /* Update the checksum */
843  if (ret_val == 0)
844  hw->eeprom.ops.update_checksum(hw);
845 
846 err:
847  kfree(eeprom_buff);
848  return ret_val;
849 }
850 
851 static void ixgbe_get_drvinfo(struct net_device *netdev,
852  struct ethtool_drvinfo *drvinfo)
853 {
854  struct ixgbe_adapter *adapter = netdev_priv(netdev);
855  u32 nvm_track_id;
856 
857  strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
859  sizeof(drvinfo->version));
860 
861  nvm_track_id = (adapter->eeprom_verh << 16) |
862  adapter->eeprom_verl;
863  snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
864  nvm_track_id);
865 
866  strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
867  sizeof(drvinfo->bus_info));
868  drvinfo->n_stats = IXGBE_STATS_LEN;
869  drvinfo->testinfo_len = IXGBE_TEST_LEN;
870  drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
871 }
872 
873 static void ixgbe_get_ringparam(struct net_device *netdev,
874  struct ethtool_ringparam *ring)
875 {
876  struct ixgbe_adapter *adapter = netdev_priv(netdev);
877  struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
878  struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
879 
882  ring->rx_pending = rx_ring->count;
883  ring->tx_pending = tx_ring->count;
884 }
885 
886 static int ixgbe_set_ringparam(struct net_device *netdev,
887  struct ethtool_ringparam *ring)
888 {
889  struct ixgbe_adapter *adapter = netdev_priv(netdev);
890  struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
891  int i, err = 0;
892  u32 new_rx_count, new_tx_count;
893  bool need_update = false;
894 
895  if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
896  return -EINVAL;
897 
898  new_rx_count = max_t(u32, ring->rx_pending, IXGBE_MIN_RXD);
899  new_rx_count = min_t(u32, new_rx_count, IXGBE_MAX_RXD);
900  new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
901 
902  new_tx_count = max_t(u32, ring->tx_pending, IXGBE_MIN_TXD);
903  new_tx_count = min_t(u32, new_tx_count, IXGBE_MAX_TXD);
904  new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
905 
906  if ((new_tx_count == adapter->tx_ring[0]->count) &&
907  (new_rx_count == adapter->rx_ring[0]->count)) {
908  /* nothing to do */
909  return 0;
910  }
911 
912  while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
913  usleep_range(1000, 2000);
914 
915  if (!netif_running(adapter->netdev)) {
916  for (i = 0; i < adapter->num_tx_queues; i++)
917  adapter->tx_ring[i]->count = new_tx_count;
918  for (i = 0; i < adapter->num_rx_queues; i++)
919  adapter->rx_ring[i]->count = new_rx_count;
920  adapter->tx_ring_count = new_tx_count;
921  adapter->rx_ring_count = new_rx_count;
922  goto clear_reset;
923  }
924 
925  temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring));
926  if (!temp_tx_ring) {
927  err = -ENOMEM;
928  goto clear_reset;
929  }
930 
931  if (new_tx_count != adapter->tx_ring_count) {
932  for (i = 0; i < adapter->num_tx_queues; i++) {
933  memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
934  sizeof(struct ixgbe_ring));
935  temp_tx_ring[i].count = new_tx_count;
936  err = ixgbe_setup_tx_resources(&temp_tx_ring[i]);
937  if (err) {
938  while (i) {
939  i--;
940  ixgbe_free_tx_resources(&temp_tx_ring[i]);
941  }
942  goto clear_reset;
943  }
944  }
945  need_update = true;
946  }
947 
948  temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring));
949  if (!temp_rx_ring) {
950  err = -ENOMEM;
951  goto err_setup;
952  }
953 
954  if (new_rx_count != adapter->rx_ring_count) {
955  for (i = 0; i < adapter->num_rx_queues; i++) {
956  memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
957  sizeof(struct ixgbe_ring));
958  temp_rx_ring[i].count = new_rx_count;
959  err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
960  if (err) {
961  while (i) {
962  i--;
963  ixgbe_free_rx_resources(&temp_rx_ring[i]);
964  }
965  goto err_setup;
966  }
967  }
968  need_update = true;
969  }
970 
971  /* if rings need to be updated, here's the place to do it in one shot */
972  if (need_update) {
973  ixgbe_down(adapter);
974 
975  /* tx */
976  if (new_tx_count != adapter->tx_ring_count) {
977  for (i = 0; i < adapter->num_tx_queues; i++) {
978  ixgbe_free_tx_resources(adapter->tx_ring[i]);
979  memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
980  sizeof(struct ixgbe_ring));
981  }
982  adapter->tx_ring_count = new_tx_count;
983  }
984 
985  /* rx */
986  if (new_rx_count != adapter->rx_ring_count) {
987  for (i = 0; i < adapter->num_rx_queues; i++) {
988  ixgbe_free_rx_resources(adapter->rx_ring[i]);
989  memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
990  sizeof(struct ixgbe_ring));
991  }
992  adapter->rx_ring_count = new_rx_count;
993  }
994  ixgbe_up(adapter);
995  }
996 
997  vfree(temp_rx_ring);
998 err_setup:
999  vfree(temp_tx_ring);
1000 clear_reset:
1001  clear_bit(__IXGBE_RESETTING, &adapter->state);
1002  return err;
1003 }
1004 
1005 static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1006 {
1007  switch (sset) {
1008  case ETH_SS_TEST:
1009  return IXGBE_TEST_LEN;
1010  case ETH_SS_STATS:
1011  return IXGBE_STATS_LEN;
1012  default:
1013  return -EOPNOTSUPP;
1014  }
1015 }
1016 
1017 static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1018  struct ethtool_stats *stats, u64 *data)
1019 {
1020  struct ixgbe_adapter *adapter = netdev_priv(netdev);
1021  struct rtnl_link_stats64 temp;
1022  const struct rtnl_link_stats64 *net_stats;
1023  unsigned int start;
1024  struct ixgbe_ring *ring;
1025  int i, j;
1026  char *p = NULL;
1027 
1028  ixgbe_update_stats(adapter);
1029  net_stats = dev_get_stats(netdev, &temp);
1030  for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1031  switch (ixgbe_gstrings_stats[i].type) {
1032  case NETDEV_STATS:
1033  p = (char *) net_stats +
1034  ixgbe_gstrings_stats[i].stat_offset;
1035  break;
1036  case IXGBE_STATS:
1037  p = (char *) adapter +
1038  ixgbe_gstrings_stats[i].stat_offset;
1039  break;
1040  }
1041 
1042  data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1043  sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1044  }
1045  for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
1046  ring = adapter->tx_ring[j];
1047  if (!ring) {
1048  data[i] = 0;
1049  data[i+1] = 0;
1050  i += 2;
1051  continue;
1052  }
1053 
1054  do {
1055  start = u64_stats_fetch_begin_bh(&ring->syncp);
1056  data[i] = ring->stats.packets;
1057  data[i+1] = ring->stats.bytes;
1058  } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
1059  i += 2;
1060  }
1061  for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
1062  ring = adapter->rx_ring[j];
1063  if (!ring) {
1064  data[i] = 0;
1065  data[i+1] = 0;
1066  i += 2;
1067  continue;
1068  }
1069 
1070  do {
1071  start = u64_stats_fetch_begin_bh(&ring->syncp);
1072  data[i] = ring->stats.packets;
1073  data[i+1] = ring->stats.bytes;
1074  } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
1075  i += 2;
1076  }
1077 
1078  for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1079  data[i++] = adapter->stats.pxontxc[j];
1080  data[i++] = adapter->stats.pxofftxc[j];
1081  }
1082  for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1083  data[i++] = adapter->stats.pxonrxc[j];
1084  data[i++] = adapter->stats.pxoffrxc[j];
1085  }
1086 }
1087 
1088 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1089  u8 *data)
1090 {
1091  char *p = (char *)data;
1092  int i;
1093 
1094  switch (stringset) {
1095  case ETH_SS_TEST:
1096  memcpy(data, *ixgbe_gstrings_test,
1098  break;
1099  case ETH_SS_STATS:
1100  for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1101  memcpy(p, ixgbe_gstrings_stats[i].stat_string,
1102  ETH_GSTRING_LEN);
1103  p += ETH_GSTRING_LEN;
1104  }
1105  for (i = 0; i < netdev->num_tx_queues; i++) {
1106  sprintf(p, "tx_queue_%u_packets", i);
1107  p += ETH_GSTRING_LEN;
1108  sprintf(p, "tx_queue_%u_bytes", i);
1109  p += ETH_GSTRING_LEN;
1110  }
1111  for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
1112  sprintf(p, "rx_queue_%u_packets", i);
1113  p += ETH_GSTRING_LEN;
1114  sprintf(p, "rx_queue_%u_bytes", i);
1115  p += ETH_GSTRING_LEN;
1116  }
1117  for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1118  sprintf(p, "tx_pb_%u_pxon", i);
1119  p += ETH_GSTRING_LEN;
1120  sprintf(p, "tx_pb_%u_pxoff", i);
1121  p += ETH_GSTRING_LEN;
1122  }
1123  for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1124  sprintf(p, "rx_pb_%u_pxon", i);
1125  p += ETH_GSTRING_LEN;
1126  sprintf(p, "rx_pb_%u_pxoff", i);
1127  p += ETH_GSTRING_LEN;
1128  }
1129  /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1130  break;
1131  }
1132 }
1133 
1134 static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1135 {
1136  struct ixgbe_hw *hw = &adapter->hw;
1137  bool link_up;
1138  u32 link_speed = 0;
1139  *data = 0;
1140 
1141  hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
1142  if (link_up)
1143  return *data;
1144  else
1145  *data = 1;
1146  return *data;
1147 }
1148 
1149 /* ethtool register test data */
1156 };
1157 
1158 /* In the hardware, registers are laid out either singly, in arrays
1159  * spaced 0x40 bytes apart, or in contiguous tables. We assume
1160  * most tests take place on arrays or single registers (handled
1161  * as a single-element array) and special-case the tables.
1162  * Table tests are always pattern tests.
1163  *
1164  * We also make provision for some required setup steps by specifying
1165  * registers to be written without any read-back testing.
1166  */
1167 
1168 #define PATTERN_TEST 1
1169 #define SET_READ_TEST 2
1170 #define WRITE_NO_TEST 3
1171 #define TABLE32_TEST 4
1172 #define TABLE64_TEST_LO 5
1173 #define TABLE64_TEST_HI 6
1174 
1175 /* default 82599 register test */
1176 static const struct ixgbe_reg_test reg_test_82599[] = {
1177  { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1178  { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1179  { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1180  { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1181  { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
1182  { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1183  { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1185  { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1186  { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1187  { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1188  { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1189  { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1190  { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1191  { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
1192  { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
1193  { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1194  { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
1195  { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1196  { 0, 0, 0, 0 }
1197 };
1198 
1199 /* default 82598 register test */
1200 static const struct ixgbe_reg_test reg_test_82598[] = {
1201  { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1202  { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1203  { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1204  { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1205  { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1206  { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1207  { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1208  /* Enable all four RX queues before testing. */
1210  /* RDH is read-only for 82598, only test RDT. */
1211  { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1212  { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1213  { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1214  { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1215  { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
1216  { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1217  { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1218  { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1219  { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
1220  { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
1221  { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1222  { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
1223  { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1224  { 0, 0, 0, 0 }
1225 };
1226 
1227 static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1228  u32 mask, u32 write)
1229 {
1230  u32 pat, val, before;
1231  static const u32 test_pattern[] = {
1232  0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1233 
1234  for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
1235  before = readl(adapter->hw.hw_addr + reg);
1236  writel((test_pattern[pat] & write),
1237  (adapter->hw.hw_addr + reg));
1238  val = readl(adapter->hw.hw_addr + reg);
1239  if (val != (test_pattern[pat] & write & mask)) {
1240  e_err(drv, "pattern test reg %04X failed: got "
1241  "0x%08X expected 0x%08X\n",
1242  reg, val, (test_pattern[pat] & write & mask));
1243  *data = reg;
1244  writel(before, adapter->hw.hw_addr + reg);
1245  return 1;
1246  }
1247  writel(before, adapter->hw.hw_addr + reg);
1248  }
1249  return 0;
1250 }
1251 
1252 static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1253  u32 mask, u32 write)
1254 {
1255  u32 val, before;
1256  before = readl(adapter->hw.hw_addr + reg);
1257  writel((write & mask), (adapter->hw.hw_addr + reg));
1258  val = readl(adapter->hw.hw_addr + reg);
1259  if ((write & mask) != (val & mask)) {
1260  e_err(drv, "set/check reg %04X test failed: got 0x%08X "
1261  "expected 0x%08X\n", reg, (val & mask), (write & mask));
1262  *data = reg;
1263  writel(before, (adapter->hw.hw_addr + reg));
1264  return 1;
1265  }
1266  writel(before, (adapter->hw.hw_addr + reg));
1267  return 0;
1268 }
1269 
1270 #define REG_PATTERN_TEST(reg, mask, write) \
1271  do { \
1272  if (reg_pattern_test(adapter, data, reg, mask, write)) \
1273  return 1; \
1274  } while (0) \
1275 
1276 
1277 #define REG_SET_AND_CHECK(reg, mask, write) \
1278  do { \
1279  if (reg_set_and_check(adapter, data, reg, mask, write)) \
1280  return 1; \
1281  } while (0) \
1282 
1283 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1284 {
1285  const struct ixgbe_reg_test *test;
1286  u32 value, before, after;
1287  u32 i, toggle;
1288 
1289  switch (adapter->hw.mac.type) {
1290  case ixgbe_mac_82598EB:
1291  toggle = 0x7FFFF3FF;
1292  test = reg_test_82598;
1293  break;
1294  case ixgbe_mac_82599EB:
1295  case ixgbe_mac_X540:
1296  toggle = 0x7FFFF30F;
1297  test = reg_test_82599;
1298  break;
1299  default:
1300  *data = 1;
1301  return 1;
1302  break;
1303  }
1304 
1305  /*
1306  * Because the status register is such a special case,
1307  * we handle it separately from the rest of the register
1308  * tests. Some bits are read-only, some toggle, and some
1309  * are writeable on newer MACs.
1310  */
1311  before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
1312  value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
1313  IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
1314  after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
1315  if (value != after) {
1316  e_err(drv, "failed STATUS register test got: 0x%08X "
1317  "expected: 0x%08X\n", after, value);
1318  *data = 1;
1319  return 1;
1320  }
1321  /* restore previous status */
1322  IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before);
1323 
1324  /*
1325  * Perform the remainder of the register test, looping through
1326  * the test table until we either fail or reach the null entry.
1327  */
1328  while (test->reg) {
1329  for (i = 0; i < test->array_len; i++) {
1330  switch (test->test_type) {
1331  case PATTERN_TEST:
1332  REG_PATTERN_TEST(test->reg + (i * 0x40),
1333  test->mask,
1334  test->write);
1335  break;
1336  case SET_READ_TEST:
1337  REG_SET_AND_CHECK(test->reg + (i * 0x40),
1338  test->mask,
1339  test->write);
1340  break;
1341  case WRITE_NO_TEST:
1342  writel(test->write,
1343  (adapter->hw.hw_addr + test->reg)
1344  + (i * 0x40));
1345  break;
1346  case TABLE32_TEST:
1347  REG_PATTERN_TEST(test->reg + (i * 4),
1348  test->mask,
1349  test->write);
1350  break;
1351  case TABLE64_TEST_LO:
1352  REG_PATTERN_TEST(test->reg + (i * 8),
1353  test->mask,
1354  test->write);
1355  break;
1356  case TABLE64_TEST_HI:
1357  REG_PATTERN_TEST((test->reg + 4) + (i * 8),
1358  test->mask,
1359  test->write);
1360  break;
1361  }
1362  }
1363  test++;
1364  }
1365 
1366  *data = 0;
1367  return 0;
1368 }
1369 
1370 static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
1371 {
1372  struct ixgbe_hw *hw = &adapter->hw;
1373  if (hw->eeprom.ops.validate_checksum(hw, NULL))
1374  *data = 1;
1375  else
1376  *data = 0;
1377  return *data;
1378 }
1379 
1380 static irqreturn_t ixgbe_test_intr(int irq, void *data)
1381 {
1382  struct net_device *netdev = (struct net_device *) data;
1383  struct ixgbe_adapter *adapter = netdev_priv(netdev);
1384 
1385  adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1386 
1387  return IRQ_HANDLED;
1388 }
1389 
1390 static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1391 {
1392  struct net_device *netdev = adapter->netdev;
1393  u32 mask, i = 0, shared_int = true;
1394  u32 irq = adapter->pdev->irq;
1395 
1396  *data = 0;
1397 
1398  /* Hook up test interrupt handler just for this test */
1399  if (adapter->msix_entries) {
1400  /* NOTE: we don't test MSI-X interrupts here, yet */
1401  return 0;
1402  } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1403  shared_int = false;
1404  if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
1405  netdev)) {
1406  *data = 1;
1407  return -1;
1408  }
1409  } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
1410  netdev->name, netdev)) {
1411  shared_int = false;
1412  } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
1413  netdev->name, netdev)) {
1414  *data = 1;
1415  return -1;
1416  }
1417  e_info(hw, "testing %s interrupt\n", shared_int ?
1418  "shared" : "unshared");
1419 
1420  /* Disable all the interrupts */
1421  IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1422  IXGBE_WRITE_FLUSH(&adapter->hw);
1423  usleep_range(10000, 20000);
1424 
1425  /* Test each interrupt */
1426  for (; i < 10; i++) {
1427  /* Interrupt to test */
1428  mask = 1 << i;
1429 
1430  if (!shared_int) {
1431  /*
1432  * Disable the interrupts to be reported in
1433  * the cause register and then force the same
1434  * interrupt and see if one gets posted. If
1435  * an interrupt was posted to the bus, the
1436  * test failed.
1437  */
1438  adapter->test_icr = 0;
1439  IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1440  ~mask & 0x00007FFF);
1441  IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1442  ~mask & 0x00007FFF);
1443  IXGBE_WRITE_FLUSH(&adapter->hw);
1444  usleep_range(10000, 20000);
1445 
1446  if (adapter->test_icr & mask) {
1447  *data = 3;
1448  break;
1449  }
1450  }
1451 
1452  /*
1453  * Enable the interrupt to be reported in the cause
1454  * register and then force the same interrupt and see
1455  * if one gets posted. If an interrupt was not posted
1456  * to the bus, the test failed.
1457  */
1458  adapter->test_icr = 0;
1459  IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1460  IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1461  IXGBE_WRITE_FLUSH(&adapter->hw);
1462  usleep_range(10000, 20000);
1463 
1464  if (!(adapter->test_icr &mask)) {
1465  *data = 4;
1466  break;
1467  }
1468 
1469  if (!shared_int) {
1470  /*
1471  * Disable the other interrupts to be reported in
1472  * the cause register and then force the other
1473  * interrupts and see if any get posted. If
1474  * an interrupt was posted to the bus, the
1475  * test failed.
1476  */
1477  adapter->test_icr = 0;
1478  IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1479  ~mask & 0x00007FFF);
1480  IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1481  ~mask & 0x00007FFF);
1482  IXGBE_WRITE_FLUSH(&adapter->hw);
1483  usleep_range(10000, 20000);
1484 
1485  if (adapter->test_icr) {
1486  *data = 5;
1487  break;
1488  }
1489  }
1490  }
1491 
1492  /* Disable all the interrupts */
1493  IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1494  IXGBE_WRITE_FLUSH(&adapter->hw);
1495  usleep_range(10000, 20000);
1496 
1497  /* Unhook test interrupt handler */
1498  free_irq(irq, netdev);
1499 
1500  return *data;
1501 }
1502 
1503 static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1504 {
1505  struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1506  struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1507  struct ixgbe_hw *hw = &adapter->hw;
1508  u32 reg_ctl;
1509 
1510  /* shut down the DMA engines now so they can be reinitialized later */
1511 
1512  /* first Rx */
1513  reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1514  reg_ctl &= ~IXGBE_RXCTRL_RXEN;
1515  IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
1516  ixgbe_disable_rx_queue(adapter, rx_ring);
1517 
1518  /* now Tx */
1519  reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
1520  reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
1521  IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
1522 
1523  switch (hw->mac.type) {
1524  case ixgbe_mac_82599EB:
1525  case ixgbe_mac_X540:
1526  reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1527  reg_ctl &= ~IXGBE_DMATXCTL_TE;
1528  IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
1529  break;
1530  default:
1531  break;
1532  }
1533 
1534  ixgbe_reset(adapter);
1535 
1538 }
1539 
1540 static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1541 {
1542  struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1543  struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1544  u32 rctl, reg_data;
1545  int ret_val;
1546  int err;
1547 
1548  /* Setup Tx descriptor ring and Tx buffers */
1549  tx_ring->count = IXGBE_DEFAULT_TXD;
1550  tx_ring->queue_index = 0;
1551  tx_ring->dev = &adapter->pdev->dev;
1552  tx_ring->netdev = adapter->netdev;
1553  tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1554 
1555  err = ixgbe_setup_tx_resources(tx_ring);
1556  if (err)
1557  return 1;
1558 
1559  switch (adapter->hw.mac.type) {
1560  case ixgbe_mac_82599EB:
1561  case ixgbe_mac_X540:
1562  reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1563  reg_data |= IXGBE_DMATXCTL_TE;
1564  IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1565  break;
1566  default:
1567  break;
1568  }
1569 
1570  ixgbe_configure_tx_ring(adapter, tx_ring);
1571 
1572  /* Setup Rx Descriptor ring and Rx buffers */
1573  rx_ring->count = IXGBE_DEFAULT_RXD;
1574  rx_ring->queue_index = 0;
1575  rx_ring->dev = &adapter->pdev->dev;
1576  rx_ring->netdev = adapter->netdev;
1577  rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1578 
1579  err = ixgbe_setup_rx_resources(rx_ring);
1580  if (err) {
1581  ret_val = 4;
1582  goto err_nomem;
1583  }
1584 
1585  rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1586  IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
1587 
1588  ixgbe_configure_rx_ring(adapter, rx_ring);
1589 
1591  IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1592 
1593  return 0;
1594 
1595 err_nomem:
1596  ixgbe_free_desc_rings(adapter);
1597  return ret_val;
1598 }
1599 
1600 static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1601 {
1602  struct ixgbe_hw *hw = &adapter->hw;
1603  u32 reg_data;
1604 
1605  /* X540 needs to set the MACC.FLU bit to force link up */
1606  if (adapter->hw.mac.type == ixgbe_mac_X540) {
1607  reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
1608  reg_data |= IXGBE_MACC_FLU;
1609  IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
1610  }
1611 
1612  /* right now we only support MAC loopback in the driver */
1613  reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1614  /* Setup MAC loopback */
1615  reg_data |= IXGBE_HLREG0_LPBK;
1616  IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
1617 
1618  reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1620  IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
1621 
1622  reg_data = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1623  reg_data &= ~IXGBE_AUTOC_LMS_MASK;
1625  IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
1626  IXGBE_WRITE_FLUSH(hw);
1627  usleep_range(10000, 20000);
1628 
1629  /* Disable Atlas Tx lanes; re-enabled in reset path */
1630  if (hw->mac.type == ixgbe_mac_82598EB) {
1631  u8 atlas;
1632 
1633  hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
1634  atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
1635  hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
1636 
1637  hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
1639  hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
1640 
1641  hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
1643  hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
1644 
1645  hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
1647  hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
1648  }
1649 
1650  return 0;
1651 }
1652 
1653 static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
1654 {
1655  u32 reg_data;
1656 
1657  reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1658  reg_data &= ~IXGBE_HLREG0_LPBK;
1659  IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1660 }
1661 
1662 static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
1663  unsigned int frame_size)
1664 {
1665  memset(skb->data, 0xFF, frame_size);
1666  frame_size >>= 1;
1667  memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
1668  memset(&skb->data[frame_size + 10], 0xBE, 1);
1669  memset(&skb->data[frame_size + 12], 0xAF, 1);
1670 }
1671 
1672 static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
1673  unsigned int frame_size)
1674 {
1675  unsigned char *data;
1676  bool match = true;
1677 
1678  frame_size >>= 1;
1679 
1680  data = kmap(rx_buffer->page) + rx_buffer->page_offset;
1681 
1682  if (data[3] != 0xFF ||
1683  data[frame_size + 10] != 0xBE ||
1684  data[frame_size + 12] != 0xAF)
1685  match = false;
1686 
1687  kunmap(rx_buffer->page);
1688 
1689  return match;
1690 }
1691 
1692 static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1693  struct ixgbe_ring *tx_ring,
1694  unsigned int size)
1695 {
1696  union ixgbe_adv_rx_desc *rx_desc;
1697  struct ixgbe_rx_buffer *rx_buffer;
1698  struct ixgbe_tx_buffer *tx_buffer;
1699  u16 rx_ntc, tx_ntc, count = 0;
1700 
1701  /* initialize next to clean and descriptor values */
1702  rx_ntc = rx_ring->next_to_clean;
1703  tx_ntc = tx_ring->next_to_clean;
1704  rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1705 
1706  while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) {
1707  /* check Rx buffer */
1708  rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
1709 
1710  /* sync Rx buffer for CPU read */
1711  dma_sync_single_for_cpu(rx_ring->dev,
1712  rx_buffer->dma,
1713  ixgbe_rx_bufsz(rx_ring),
1714  DMA_FROM_DEVICE);
1715 
1716  /* verify contents of skb */
1717  if (ixgbe_check_lbtest_frame(rx_buffer, size))
1718  count++;
1719 
1720  /* sync Rx buffer for device write */
1722  rx_buffer->dma,
1723  ixgbe_rx_bufsz(rx_ring),
1724  DMA_FROM_DEVICE);
1725 
1726  /* unmap buffer on Tx side */
1727  tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
1728  ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
1729 
1730  /* increment Rx/Tx next to clean counters */
1731  rx_ntc++;
1732  if (rx_ntc == rx_ring->count)
1733  rx_ntc = 0;
1734  tx_ntc++;
1735  if (tx_ntc == tx_ring->count)
1736  tx_ntc = 0;
1737 
1738  /* fetch next descriptor */
1739  rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1740  }
1741 
1742  netdev_tx_reset_queue(txring_txq(tx_ring));
1743 
1744  /* re-map buffers to ring, store next to clean values */
1745  ixgbe_alloc_rx_buffers(rx_ring, count);
1746  rx_ring->next_to_clean = rx_ntc;
1747  tx_ring->next_to_clean = tx_ntc;
1748 
1749  return count;
1750 }
1751 
1752 static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1753 {
1754  struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1755  struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1756  int i, j, lc, good_cnt, ret_val = 0;
1757  unsigned int size = 1024;
1758  netdev_tx_t tx_ret_val;
1759  struct sk_buff *skb;
1760 
1761  /* allocate test skb */
1762  skb = alloc_skb(size, GFP_KERNEL);
1763  if (!skb)
1764  return 11;
1765 
1766  /* place data into test skb */
1767  ixgbe_create_lbtest_frame(skb, size);
1768  skb_put(skb, size);
1769 
1770  /*
1771  * Calculate the loop count based on the largest descriptor ring
1772  * The idea is to wrap the largest ring a number of times using 64
1773  * send/receive pairs during each loop
1774  */
1775 
1776  if (rx_ring->count <= tx_ring->count)
1777  lc = ((tx_ring->count / 64) * 2) + 1;
1778  else
1779  lc = ((rx_ring->count / 64) * 2) + 1;
1780 
1781  for (j = 0; j <= lc; j++) {
1782  /* reset count of good packets */
1783  good_cnt = 0;
1784 
1785  /* place 64 packets on the transmit queue*/
1786  for (i = 0; i < 64; i++) {
1787  skb_get(skb);
1788  tx_ret_val = ixgbe_xmit_frame_ring(skb,
1789  adapter,
1790  tx_ring);
1791  if (tx_ret_val == NETDEV_TX_OK)
1792  good_cnt++;
1793  }
1794 
1795  if (good_cnt != 64) {
1796  ret_val = 12;
1797  break;
1798  }
1799 
1800  /* allow 200 milliseconds for packets to go from Tx to Rx */
1801  msleep(200);
1802 
1803  good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
1804  if (good_cnt != 64) {
1805  ret_val = 13;
1806  break;
1807  }
1808  }
1809 
1810  /* free the original skb */
1811  kfree_skb(skb);
1812 
1813  return ret_val;
1814 }
1815 
1816 static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
1817 {
1818  *data = ixgbe_setup_desc_rings(adapter);
1819  if (*data)
1820  goto out;
1821  *data = ixgbe_setup_loopback_test(adapter);
1822  if (*data)
1823  goto err_loopback;
1824  *data = ixgbe_run_loopback_test(adapter);
1825  ixgbe_loopback_cleanup(adapter);
1826 
1827 err_loopback:
1828  ixgbe_free_desc_rings(adapter);
1829 out:
1830  return *data;
1831 }
1832 
1833 static void ixgbe_diag_test(struct net_device *netdev,
1834  struct ethtool_test *eth_test, u64 *data)
1835 {
1836  struct ixgbe_adapter *adapter = netdev_priv(netdev);
1837  bool if_running = netif_running(netdev);
1838 
1839  set_bit(__IXGBE_TESTING, &adapter->state);
1840  if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1841  /* Offline tests */
1842 
1843  e_info(hw, "offline testing starting\n");
1844 
1845  /* Link test performed before hardware reset so autoneg doesn't
1846  * interfere with test result */
1847  if (ixgbe_link_test(adapter, &data[4]))
1848  eth_test->flags |= ETH_TEST_FL_FAILED;
1849 
1850  if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
1851  int i;
1852  for (i = 0; i < adapter->num_vfs; i++) {
1853  if (adapter->vfinfo[i].clear_to_send) {
1854  netdev_warn(netdev, "%s",
1855  "offline diagnostic is not "
1856  "supported when VFs are "
1857  "present\n");
1858  data[0] = 1;
1859  data[1] = 1;
1860  data[2] = 1;
1861  data[3] = 1;
1862  eth_test->flags |= ETH_TEST_FL_FAILED;
1864  &adapter->state);
1865  goto skip_ol_tests;
1866  }
1867  }
1868  }
1869 
1870  if (if_running)
1871  /* indicate we're in test mode */
1872  dev_close(netdev);
1873  else
1874  ixgbe_reset(adapter);
1875 
1876  e_info(hw, "register testing starting\n");
1877  if (ixgbe_reg_test(adapter, &data[0]))
1878  eth_test->flags |= ETH_TEST_FL_FAILED;
1879 
1880  ixgbe_reset(adapter);
1881  e_info(hw, "eeprom testing starting\n");
1882  if (ixgbe_eeprom_test(adapter, &data[1]))
1883  eth_test->flags |= ETH_TEST_FL_FAILED;
1884 
1885  ixgbe_reset(adapter);
1886  e_info(hw, "interrupt testing starting\n");
1887  if (ixgbe_intr_test(adapter, &data[2]))
1888  eth_test->flags |= ETH_TEST_FL_FAILED;
1889 
1890  /* If SRIOV or VMDq is enabled then skip MAC
1891  * loopback diagnostic. */
1892  if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
1894  e_info(hw, "Skip MAC loopback diagnostic in VT "
1895  "mode\n");
1896  data[3] = 0;
1897  goto skip_loopback;
1898  }
1899 
1900  ixgbe_reset(adapter);
1901  e_info(hw, "loopback testing starting\n");
1902  if (ixgbe_loopback_test(adapter, &data[3]))
1903  eth_test->flags |= ETH_TEST_FL_FAILED;
1904 
1905 skip_loopback:
1906  ixgbe_reset(adapter);
1907 
1908  clear_bit(__IXGBE_TESTING, &adapter->state);
1909  if (if_running)
1910  dev_open(netdev);
1911  } else {
1912  e_info(hw, "online testing starting\n");
1913  /* Online tests */
1914  if (ixgbe_link_test(adapter, &data[4]))
1915  eth_test->flags |= ETH_TEST_FL_FAILED;
1916 
1917  /* Online tests aren't run; pass by default */
1918  data[0] = 0;
1919  data[1] = 0;
1920  data[2] = 0;
1921  data[3] = 0;
1922 
1923  clear_bit(__IXGBE_TESTING, &adapter->state);
1924  }
1925 skip_ol_tests:
1926  msleep_interruptible(4 * 1000);
1927 }
1928 
1929 static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
1930  struct ethtool_wolinfo *wol)
1931 {
1932  struct ixgbe_hw *hw = &adapter->hw;
1933  int retval = 0;
1934 
1935  /* WOL not supported for all devices */
1936  if (!ixgbe_wol_supported(adapter, hw->device_id,
1937  hw->subsystem_device_id)) {
1938  retval = 1;
1939  wol->supported = 0;
1940  }
1941 
1942  return retval;
1943 }
1944 
1945 static void ixgbe_get_wol(struct net_device *netdev,
1946  struct ethtool_wolinfo *wol)
1947 {
1948  struct ixgbe_adapter *adapter = netdev_priv(netdev);
1949 
1950  wol->supported = WAKE_UCAST | WAKE_MCAST |
1952  wol->wolopts = 0;
1953 
1954  if (ixgbe_wol_exclusion(adapter, wol) ||
1955  !device_can_wakeup(&adapter->pdev->dev))
1956  return;
1957 
1958  if (adapter->wol & IXGBE_WUFC_EX)
1959  wol->wolopts |= WAKE_UCAST;
1960  if (adapter->wol & IXGBE_WUFC_MC)
1961  wol->wolopts |= WAKE_MCAST;
1962  if (adapter->wol & IXGBE_WUFC_BC)
1963  wol->wolopts |= WAKE_BCAST;
1964  if (adapter->wol & IXGBE_WUFC_MAG)
1965  wol->wolopts |= WAKE_MAGIC;
1966 }
1967 
1968 static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1969 {
1970  struct ixgbe_adapter *adapter = netdev_priv(netdev);
1971 
1972  if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1973  return -EOPNOTSUPP;
1974 
1975  if (ixgbe_wol_exclusion(adapter, wol))
1976  return wol->wolopts ? -EOPNOTSUPP : 0;
1977 
1978  adapter->wol = 0;
1979 
1980  if (wol->wolopts & WAKE_UCAST)
1981  adapter->wol |= IXGBE_WUFC_EX;
1982  if (wol->wolopts & WAKE_MCAST)
1983  adapter->wol |= IXGBE_WUFC_MC;
1984  if (wol->wolopts & WAKE_BCAST)
1985  adapter->wol |= IXGBE_WUFC_BC;
1986  if (wol->wolopts & WAKE_MAGIC)
1987  adapter->wol |= IXGBE_WUFC_MAG;
1988 
1989  device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1990 
1991  return 0;
1992 }
1993 
1994 static int ixgbe_nway_reset(struct net_device *netdev)
1995 {
1996  struct ixgbe_adapter *adapter = netdev_priv(netdev);
1997 
1998  if (netif_running(netdev))
1999  ixgbe_reinit_locked(adapter);
2000 
2001  return 0;
2002 }
2003 
2004 static int ixgbe_set_phys_id(struct net_device *netdev,
2006 {
2007  struct ixgbe_adapter *adapter = netdev_priv(netdev);
2008  struct ixgbe_hw *hw = &adapter->hw;
2009 
2010  switch (state) {
2011  case ETHTOOL_ID_ACTIVE:
2012  adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2013  return 2;
2014 
2015  case ETHTOOL_ID_ON:
2016  hw->mac.ops.led_on(hw, IXGBE_LED_ON);
2017  break;
2018 
2019  case ETHTOOL_ID_OFF:
2020  hw->mac.ops.led_off(hw, IXGBE_LED_ON);
2021  break;
2022 
2023  case ETHTOOL_ID_INACTIVE:
2024  /* Restore LED settings */
2025  IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
2026  break;
2027  }
2028 
2029  return 0;
2030 }
2031 
2032 static int ixgbe_get_coalesce(struct net_device *netdev,
2033  struct ethtool_coalesce *ec)
2034 {
2035  struct ixgbe_adapter *adapter = netdev_priv(netdev);
2036 
2037  /* only valid if in constant ITR mode */
2038  if (adapter->rx_itr_setting <= 1)
2039  ec->rx_coalesce_usecs = adapter->rx_itr_setting;
2040  else
2041  ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
2042 
2043  /* if in mixed tx/rx queues per vector mode, report only rx settings */
2044  if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2045  return 0;
2046 
2047  /* only valid if in constant ITR mode */
2048  if (adapter->tx_itr_setting <= 1)
2049  ec->tx_coalesce_usecs = adapter->tx_itr_setting;
2050  else
2051  ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
2052 
2053  return 0;
2054 }
2055 
2056 /*
2057  * this function must be called before setting the new value of
2058  * rx_itr_setting
2059  */
2060 static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
2061 {
2062  struct net_device *netdev = adapter->netdev;
2063 
2064  /* nothing to do if LRO or RSC are not enabled */
2065  if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
2066  !(netdev->features & NETIF_F_LRO))
2067  return false;
2068 
2069  /* check the feature flag value and enable RSC if necessary */
2070  if (adapter->rx_itr_setting == 1 ||
2071  adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
2072  if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2073  adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2074  e_info(probe, "rx-usecs value high enough "
2075  "to re-enable RSC\n");
2076  return true;
2077  }
2078  /* if interrupt rate is too high then disable RSC */
2079  } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2080  adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2081  e_info(probe, "rx-usecs set too low, disabling RSC\n");
2082  return true;
2083  }
2084  return false;
2085 }
2086 
2087 static int ixgbe_set_coalesce(struct net_device *netdev,
2088  struct ethtool_coalesce *ec)
2089 {
2090  struct ixgbe_adapter *adapter = netdev_priv(netdev);
2091  struct ixgbe_q_vector *q_vector;
2092  int i;
2093  u16 tx_itr_param, rx_itr_param;
2094  bool need_reset = false;
2095 
2096  /* don't accept tx specific changes if we've got mixed RxTx vectors */
2097  if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
2098  && ec->tx_coalesce_usecs)
2099  return -EINVAL;
2100 
2101  if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
2102  (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
2103  return -EINVAL;
2104 
2105  if (ec->rx_coalesce_usecs > 1)
2106  adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
2107  else
2108  adapter->rx_itr_setting = ec->rx_coalesce_usecs;
2109 
2110  if (adapter->rx_itr_setting == 1)
2111  rx_itr_param = IXGBE_20K_ITR;
2112  else
2113  rx_itr_param = adapter->rx_itr_setting;
2114 
2115  if (ec->tx_coalesce_usecs > 1)
2116  adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
2117  else
2118  adapter->tx_itr_setting = ec->tx_coalesce_usecs;
2119 
2120  if (adapter->tx_itr_setting == 1)
2121  tx_itr_param = IXGBE_10K_ITR;
2122  else
2123  tx_itr_param = adapter->tx_itr_setting;
2124 
2125  /* check the old value and enable RSC if necessary */
2126  need_reset = ixgbe_update_rsc(adapter);
2127 
2128  for (i = 0; i < adapter->num_q_vectors; i++) {
2129  q_vector = adapter->q_vector[i];
2130  if (q_vector->tx.count && !q_vector->rx.count)
2131  /* tx only */
2132  q_vector->itr = tx_itr_param;
2133  else
2134  /* rx only or mixed */
2135  q_vector->itr = rx_itr_param;
2136  ixgbe_write_eitr(q_vector);
2137  }
2138 
2139  /*
2140  * do reset here at the end to make sure EITR==0 case is handled
2141  * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2142  * also locks in RSC enable/disable which requires reset
2143  */
2144  if (need_reset)
2145  ixgbe_do_reset(netdev);
2146 
2147  return 0;
2148 }
2149 
2150 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2151  struct ethtool_rxnfc *cmd)
2152 {
2153  union ixgbe_atr_input *mask = &adapter->fdir_mask;
2154  struct ethtool_rx_flow_spec *fsp =
2155  (struct ethtool_rx_flow_spec *)&cmd->fs;
2156  struct hlist_node *node, *node2;
2157  struct ixgbe_fdir_filter *rule = NULL;
2158 
2159  /* report total rule count */
2160  cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2161 
2162  hlist_for_each_entry_safe(rule, node, node2,
2163  &adapter->fdir_filter_list, fdir_node) {
2164  if (fsp->location <= rule->sw_idx)
2165  break;
2166  }
2167 
2168  if (!rule || fsp->location != rule->sw_idx)
2169  return -EINVAL;
2170 
2171  /* fill out the flow spec entry */
2172 
2173  /* set flow type field */
2174  switch (rule->filter.formatted.flow_type) {
2176  fsp->flow_type = TCP_V4_FLOW;
2177  break;
2179  fsp->flow_type = UDP_V4_FLOW;
2180  break;
2182  fsp->flow_type = SCTP_V4_FLOW;
2183  break;
2185  fsp->flow_type = IP_USER_FLOW;
2186  fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
2187  fsp->h_u.usr_ip4_spec.proto = 0;
2188  fsp->m_u.usr_ip4_spec.proto = 0;
2189  break;
2190  default:
2191  return -EINVAL;
2192  }
2193 
2194  fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
2195  fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
2196  fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
2197  fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
2198  fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
2199  fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
2200  fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
2201  fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
2202  fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
2203  fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
2204  fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
2205  fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
2206  fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
2207  fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
2208  fsp->flow_type |= FLOW_EXT;
2209 
2210  /* record action */
2211  if (rule->action == IXGBE_FDIR_DROP_QUEUE)
2213  else
2214  fsp->ring_cookie = rule->action;
2215 
2216  return 0;
2217 }
2218 
2219 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
2220  struct ethtool_rxnfc *cmd,
2221  u32 *rule_locs)
2222 {
2223  struct hlist_node *node, *node2;
2224  struct ixgbe_fdir_filter *rule;
2225  int cnt = 0;
2226 
2227  /* report total rule count */
2228  cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2229 
2230  hlist_for_each_entry_safe(rule, node, node2,
2231  &adapter->fdir_filter_list, fdir_node) {
2232  if (cnt == cmd->rule_cnt)
2233  return -EMSGSIZE;
2234  rule_locs[cnt] = rule->sw_idx;
2235  cnt++;
2236  }
2237 
2238  cmd->rule_cnt = cnt;
2239 
2240  return 0;
2241 }
2242 
2243 static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2244  struct ethtool_rxnfc *cmd)
2245 {
2246  cmd->data = 0;
2247 
2248  /* Report default options for RSS on ixgbe */
2249  switch (cmd->flow_type) {
2250  case TCP_V4_FLOW:
2251  cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2252  case UDP_V4_FLOW:
2253  if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2254  cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2255  case SCTP_V4_FLOW:
2256  case AH_ESP_V4_FLOW:
2257  case AH_V4_FLOW:
2258  case ESP_V4_FLOW:
2259  case IPV4_FLOW:
2260  cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2261  break;
2262  case TCP_V6_FLOW:
2263  cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2264  case UDP_V6_FLOW:
2265  if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2266  cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2267  case SCTP_V6_FLOW:
2268  case AH_ESP_V6_FLOW:
2269  case AH_V6_FLOW:
2270  case ESP_V6_FLOW:
2271  case IPV6_FLOW:
2272  cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2273  break;
2274  default:
2275  return -EINVAL;
2276  }
2277 
2278  return 0;
2279 }
2280 
2281 static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2282  u32 *rule_locs)
2283 {
2284  struct ixgbe_adapter *adapter = netdev_priv(dev);
2285  int ret = -EOPNOTSUPP;
2286 
2287  switch (cmd->cmd) {
2288  case ETHTOOL_GRXRINGS:
2289  cmd->data = adapter->num_rx_queues;
2290  ret = 0;
2291  break;
2292  case ETHTOOL_GRXCLSRLCNT:
2293  cmd->rule_cnt = adapter->fdir_filter_count;
2294  ret = 0;
2295  break;
2296  case ETHTOOL_GRXCLSRULE:
2297  ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
2298  break;
2299  case ETHTOOL_GRXCLSRLALL:
2300  ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
2301  break;
2302  case ETHTOOL_GRXFH:
2303  ret = ixgbe_get_rss_hash_opts(adapter, cmd);
2304  break;
2305  default:
2306  break;
2307  }
2308 
2309  return ret;
2310 }
2311 
2312 static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2313  struct ixgbe_fdir_filter *input,
2314  u16 sw_idx)
2315 {
2316  struct ixgbe_hw *hw = &adapter->hw;
2317  struct hlist_node *node, *node2, *parent;
2318  struct ixgbe_fdir_filter *rule;
2319  int err = -EINVAL;
2320 
2321  parent = NULL;
2322  rule = NULL;
2323 
2324  hlist_for_each_entry_safe(rule, node, node2,
2325  &adapter->fdir_filter_list, fdir_node) {
2326  /* hash found, or no matching entry */
2327  if (rule->sw_idx >= sw_idx)
2328  break;
2329  parent = node;
2330  }
2331 
2332  /* if there is an old rule occupying our place remove it */
2333  if (rule && (rule->sw_idx == sw_idx)) {
2334  if (!input || (rule->filter.formatted.bkt_hash !=
2335  input->filter.formatted.bkt_hash)) {
2337  &rule->filter,
2338  sw_idx);
2339  }
2340 
2341  hlist_del(&rule->fdir_node);
2342  kfree(rule);
2343  adapter->fdir_filter_count--;
2344  }
2345 
2346  /*
2347  * If no input this was a delete, err should be 0 if a rule was
2348  * successfully found and removed from the list else -EINVAL
2349  */
2350  if (!input)
2351  return err;
2352 
2353  /* initialize node and set software index */
2354  INIT_HLIST_NODE(&input->fdir_node);
2355 
2356  /* add filter to the list */
2357  if (parent)
2358  hlist_add_after(parent, &input->fdir_node);
2359  else
2360  hlist_add_head(&input->fdir_node,
2361  &adapter->fdir_filter_list);
2362 
2363  /* update counts */
2364  adapter->fdir_filter_count++;
2365 
2366  return 0;
2367 }
2368 
2369 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
2370  u8 *flow_type)
2371 {
2372  switch (fsp->flow_type & ~FLOW_EXT) {
2373  case TCP_V4_FLOW:
2374  *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2375  break;
2376  case UDP_V4_FLOW:
2377  *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2378  break;
2379  case SCTP_V4_FLOW:
2380  *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2381  break;
2382  case IP_USER_FLOW:
2383  switch (fsp->h_u.usr_ip4_spec.proto) {
2384  case IPPROTO_TCP:
2385  *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2386  break;
2387  case IPPROTO_UDP:
2388  *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2389  break;
2390  case IPPROTO_SCTP:
2391  *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2392  break;
2393  case 0:
2394  if (!fsp->m_u.usr_ip4_spec.proto) {
2395  *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2396  break;
2397  }
2398  default:
2399  return 0;
2400  }
2401  break;
2402  default:
2403  return 0;
2404  }
2405 
2406  return 1;
2407 }
2408 
2409 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2410  struct ethtool_rxnfc *cmd)
2411 {
2412  struct ethtool_rx_flow_spec *fsp =
2413  (struct ethtool_rx_flow_spec *)&cmd->fs;
2414  struct ixgbe_hw *hw = &adapter->hw;
2415  struct ixgbe_fdir_filter *input;
2416  union ixgbe_atr_input mask;
2417  int err;
2418 
2419  if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
2420  return -EOPNOTSUPP;
2421 
2422  /*
2423  * Don't allow programming if the action is a queue greater than
2424  * the number of online Rx queues.
2425  */
2426  if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
2427  (fsp->ring_cookie >= adapter->num_rx_queues))
2428  return -EINVAL;
2429 
2430  /* Don't allow indexes to exist outside of available space */
2431  if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
2432  e_err(drv, "Location out of range\n");
2433  return -EINVAL;
2434  }
2435 
2436  input = kzalloc(sizeof(*input), GFP_ATOMIC);
2437  if (!input)
2438  return -ENOMEM;
2439 
2440  memset(&mask, 0, sizeof(union ixgbe_atr_input));
2441 
2442  /* set SW index */
2443  input->sw_idx = fsp->location;
2444 
2445  /* record flow type */
2446  if (!ixgbe_flowspec_to_flow_type(fsp,
2447  &input->filter.formatted.flow_type)) {
2448  e_err(drv, "Unrecognized flow type\n");
2449  goto err_out;
2450  }
2451 
2452  mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2454 
2455  if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
2456  mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
2457 
2458  /* Copy input into formatted structures */
2459  input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2460  mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
2461  input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2462  mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
2463  input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
2464  mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
2465  input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
2466  mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
2467 
2468  if (fsp->flow_type & FLOW_EXT) {
2469  input->filter.formatted.vm_pool =
2470  (unsigned char)ntohl(fsp->h_ext.data[1]);
2471  mask.formatted.vm_pool =
2472  (unsigned char)ntohl(fsp->m_ext.data[1]);
2473  input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
2474  mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
2475  input->filter.formatted.flex_bytes =
2476  fsp->h_ext.vlan_etype;
2477  mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
2478  }
2479 
2480  /* determine if we need to drop or route the packet */
2481  if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
2482  input->action = IXGBE_FDIR_DROP_QUEUE;
2483  else
2484  input->action = fsp->ring_cookie;
2485 
2486  spin_lock(&adapter->fdir_perfect_lock);
2487 
2488  if (hlist_empty(&adapter->fdir_filter_list)) {
2489  /* save mask and program input mask into HW */
2490  memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
2491  err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
2492  if (err) {
2493  e_err(drv, "Error writing mask\n");
2494  goto err_out_w_lock;
2495  }
2496  } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
2497  e_err(drv, "Only one mask supported per port\n");
2498  goto err_out_w_lock;
2499  }
2500 
2501  /* apply mask and compute/store hash */
2503 
2504  /* program filters to filter memory */
2506  &input->filter, input->sw_idx,
2507  (input->action == IXGBE_FDIR_DROP_QUEUE) ?
2509  adapter->rx_ring[input->action]->reg_idx);
2510  if (err)
2511  goto err_out_w_lock;
2512 
2513  ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
2514 
2515  spin_unlock(&adapter->fdir_perfect_lock);
2516 
2517  return err;
2518 err_out_w_lock:
2519  spin_unlock(&adapter->fdir_perfect_lock);
2520 err_out:
2521  kfree(input);
2522  return -EINVAL;
2523 }
2524 
2525 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2526  struct ethtool_rxnfc *cmd)
2527 {
2528  struct ethtool_rx_flow_spec *fsp =
2529  (struct ethtool_rx_flow_spec *)&cmd->fs;
2530  int err;
2531 
2532  spin_lock(&adapter->fdir_perfect_lock);
2533  err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
2534  spin_unlock(&adapter->fdir_perfect_lock);
2535 
2536  return err;
2537 }
2538 
2539 #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2540  IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2541 static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
2542  struct ethtool_rxnfc *nfc)
2543 {
2544  u32 flags2 = adapter->flags2;
2545 
2546  /*
2547  * RSS does not support anything other than hashing
2548  * to queues on src and dst IPs and ports
2549  */
2550  if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2552  return -EINVAL;
2553 
2554  switch (nfc->flow_type) {
2555  case TCP_V4_FLOW:
2556  case TCP_V6_FLOW:
2557  if (!(nfc->data & RXH_IP_SRC) ||
2558  !(nfc->data & RXH_IP_DST) ||
2559  !(nfc->data & RXH_L4_B_0_1) ||
2560  !(nfc->data & RXH_L4_B_2_3))
2561  return -EINVAL;
2562  break;
2563  case UDP_V4_FLOW:
2564  if (!(nfc->data & RXH_IP_SRC) ||
2565  !(nfc->data & RXH_IP_DST))
2566  return -EINVAL;
2567  switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2568  case 0:
2569  flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2570  break;
2571  case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2573  break;
2574  default:
2575  return -EINVAL;
2576  }
2577  break;
2578  case UDP_V6_FLOW:
2579  if (!(nfc->data & RXH_IP_SRC) ||
2580  !(nfc->data & RXH_IP_DST))
2581  return -EINVAL;
2582  switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2583  case 0:
2584  flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2585  break;
2586  case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2588  break;
2589  default:
2590  return -EINVAL;
2591  }
2592  break;
2593  case AH_ESP_V4_FLOW:
2594  case AH_V4_FLOW:
2595  case ESP_V4_FLOW:
2596  case SCTP_V4_FLOW:
2597  case AH_ESP_V6_FLOW:
2598  case AH_V6_FLOW:
2599  case ESP_V6_FLOW:
2600  case SCTP_V6_FLOW:
2601  if (!(nfc->data & RXH_IP_SRC) ||
2602  !(nfc->data & RXH_IP_DST) ||
2603  (nfc->data & RXH_L4_B_0_1) ||
2604  (nfc->data & RXH_L4_B_2_3))
2605  return -EINVAL;
2606  break;
2607  default:
2608  return -EINVAL;
2609  }
2610 
2611  /* if we changed something we need to update flags */
2612  if (flags2 != adapter->flags2) {
2613  struct ixgbe_hw *hw = &adapter->hw;
2614  u32 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2615 
2616  if ((flags2 & UDP_RSS_FLAGS) &&
2617  !(adapter->flags2 & UDP_RSS_FLAGS))
2618  e_warn(drv, "enabling UDP RSS: fragmented packets"
2619  " may arrive out of order to the stack above\n");
2620 
2621  adapter->flags2 = flags2;
2622 
2623  /* Perform hash on these packet types */
2628 
2629  mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2631 
2632  if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2634 
2635  if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2637 
2638  IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2639  }
2640 
2641  return 0;
2642 }
2643 
2644 static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2645 {
2646  struct ixgbe_adapter *adapter = netdev_priv(dev);
2647  int ret = -EOPNOTSUPP;
2648 
2649  switch (cmd->cmd) {
2650  case ETHTOOL_SRXCLSRLINS:
2651  ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
2652  break;
2653  case ETHTOOL_SRXCLSRLDEL:
2654  ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
2655  break;
2656  case ETHTOOL_SRXFH:
2657  ret = ixgbe_set_rss_hash_opt(adapter, cmd);
2658  break;
2659  default:
2660  break;
2661  }
2662 
2663  return ret;
2664 }
2665 
2666 static int ixgbe_get_ts_info(struct net_device *dev,
2667  struct ethtool_ts_info *info)
2668 {
2669  struct ixgbe_adapter *adapter = netdev_priv(dev);
2670 
2671  switch (adapter->hw.mac.type) {
2672 #ifdef CONFIG_IXGBE_PTP
2673  case ixgbe_mac_X540:
2674  case ixgbe_mac_82599EB:
2675  info->so_timestamping =
2682 
2683  if (adapter->ptp_clock)
2684  info->phc_index = ptp_clock_index(adapter->ptp_clock);
2685  else
2686  info->phc_index = -1;
2687 
2688  info->tx_types =
2689  (1 << HWTSTAMP_TX_OFF) |
2690  (1 << HWTSTAMP_TX_ON);
2691 
2692  info->rx_filters =
2693  (1 << HWTSTAMP_FILTER_NONE) |
2697  break;
2698 #endif /* CONFIG_IXGBE_PTP */
2699  default:
2700  return ethtool_op_get_ts_info(dev, info);
2701  break;
2702  }
2703  return 0;
2704 }
2705 
2706 static const struct ethtool_ops ixgbe_ethtool_ops = {
2707  .get_settings = ixgbe_get_settings,
2708  .set_settings = ixgbe_set_settings,
2709  .get_drvinfo = ixgbe_get_drvinfo,
2710  .get_regs_len = ixgbe_get_regs_len,
2711  .get_regs = ixgbe_get_regs,
2712  .get_wol = ixgbe_get_wol,
2713  .set_wol = ixgbe_set_wol,
2714  .nway_reset = ixgbe_nway_reset,
2715  .get_link = ethtool_op_get_link,
2716  .get_eeprom_len = ixgbe_get_eeprom_len,
2717  .get_eeprom = ixgbe_get_eeprom,
2718  .set_eeprom = ixgbe_set_eeprom,
2719  .get_ringparam = ixgbe_get_ringparam,
2720  .set_ringparam = ixgbe_set_ringparam,
2721  .get_pauseparam = ixgbe_get_pauseparam,
2722  .set_pauseparam = ixgbe_set_pauseparam,
2723  .get_msglevel = ixgbe_get_msglevel,
2724  .set_msglevel = ixgbe_set_msglevel,
2725  .self_test = ixgbe_diag_test,
2726  .get_strings = ixgbe_get_strings,
2727  .set_phys_id = ixgbe_set_phys_id,
2728  .get_sset_count = ixgbe_get_sset_count,
2729  .get_ethtool_stats = ixgbe_get_ethtool_stats,
2730  .get_coalesce = ixgbe_get_coalesce,
2731  .set_coalesce = ixgbe_set_coalesce,
2732  .get_rxnfc = ixgbe_get_rxnfc,
2733  .set_rxnfc = ixgbe_set_rxnfc,
2734  .get_ts_info = ixgbe_get_ts_info,
2735 };
2736 
2737 void ixgbe_set_ethtool_ops(struct net_device *netdev)
2738 {
2739  SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);
2740 }