Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
en_ethtool.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses. You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  * Redistribution and use in source and binary forms, with or
11  * without modification, are permitted provided that the following
12  * conditions are met:
13  *
14  * - Redistributions of source code must retain the above
15  * copyright notice, this list of conditions and the following
16  * disclaimer.
17  *
18  * - Redistributions in binary form must reproduce the above
19  * copyright notice, this list of conditions and the following
20  * disclaimer in the documentation and/or other materials
21  * provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 
34 #include <linux/kernel.h>
35 #include <linux/ethtool.h>
36 #include <linux/netdevice.h>
37 #include <linux/mlx4/driver.h>
38 
39 #include "mlx4_en.h"
40 #include "en_port.h"
41 
42 #define EN_ETHTOOL_QP_ATTACH (1ull << 63)
43 #define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff)
44 #define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff)
45 
46 static void
47 mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
48 {
49  struct mlx4_en_priv *priv = netdev_priv(dev);
50  struct mlx4_en_dev *mdev = priv->mdev;
51 
52  strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
53  strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")",
54  sizeof(drvinfo->version));
55  snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
56  "%d.%d.%d",
57  (u16) (mdev->dev->caps.fw_ver >> 32),
58  (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
59  (u16) (mdev->dev->caps.fw_ver & 0xffff));
60  strlcpy(drvinfo->bus_info, pci_name(mdev->dev->pdev),
61  sizeof(drvinfo->bus_info));
62  drvinfo->n_stats = 0;
63  drvinfo->regdump_len = 0;
64  drvinfo->eedump_len = 0;
65 }
66 
67 static const char main_strings[][ETH_GSTRING_LEN] = {
68  "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
69  "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
70  "rx_length_errors", "rx_over_errors", "rx_crc_errors",
71  "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
72  "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
73  "tx_heartbeat_errors", "tx_window_errors",
74 
75  /* port statistics */
76  "tso_packets",
77  "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
78  "rx_csum_good", "rx_csum_none", "tx_chksum_offload",
79 
80  /* packet statistics */
81  "broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3",
82  "rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0",
83  "tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5",
84  "tx_prio_6", "tx_prio_7",
85 };
86 #define NUM_MAIN_STATS 21
87 #define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
88 
89 static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
90  "Interrupt Test",
91  "Link Test",
92  "Speed Test",
93  "Register Test",
94  "Loopback Test",
95 };
96 
97 static u32 mlx4_en_get_msglevel(struct net_device *dev)
98 {
99  return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
100 }
101 
102 static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
103 {
104  ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
105 }
106 
107 static void mlx4_en_get_wol(struct net_device *netdev,
108  struct ethtool_wolinfo *wol)
109 {
110  struct mlx4_en_priv *priv = netdev_priv(netdev);
111  int err = 0;
112  u64 config = 0;
113  u64 mask;
114 
115  if ((priv->port < 1) || (priv->port > 2)) {
116  en_err(priv, "Failed to get WoL information\n");
117  return;
118  }
119 
120  mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
122 
123  if (!(priv->mdev->dev->caps.flags & mask)) {
124  wol->supported = 0;
125  wol->wolopts = 0;
126  return;
127  }
128 
129  err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
130  if (err) {
131  en_err(priv, "Failed to get WoL information\n");
132  return;
133  }
134 
135  if (config & MLX4_EN_WOL_MAGIC)
136  wol->supported = WAKE_MAGIC;
137  else
138  wol->supported = 0;
139 
140  if (config & MLX4_EN_WOL_ENABLED)
141  wol->wolopts = WAKE_MAGIC;
142  else
143  wol->wolopts = 0;
144 }
145 
146 static int mlx4_en_set_wol(struct net_device *netdev,
147  struct ethtool_wolinfo *wol)
148 {
149  struct mlx4_en_priv *priv = netdev_priv(netdev);
150  u64 config = 0;
151  int err = 0;
152  u64 mask;
153 
154  if ((priv->port < 1) || (priv->port > 2))
155  return -EOPNOTSUPP;
156 
157  mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
159 
160  if (!(priv->mdev->dev->caps.flags & mask))
161  return -EOPNOTSUPP;
162 
163  if (wol->supported & ~WAKE_MAGIC)
164  return -EINVAL;
165 
166  err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
167  if (err) {
168  en_err(priv, "Failed to get WoL info, unable to modify\n");
169  return err;
170  }
171 
172  if (wol->wolopts & WAKE_MAGIC) {
175  } else {
176  config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC);
177  config |= MLX4_EN_WOL_DO_MODIFY;
178  }
179 
180  err = mlx4_wol_write(priv->mdev->dev, config, priv->port);
181  if (err)
182  en_err(priv, "Failed to set WoL information\n");
183 
184  return err;
185 }
186 
187 static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
188 {
189  struct mlx4_en_priv *priv = netdev_priv(dev);
190  int bit_count = hweight64(priv->stats_bitmap);
191 
192  switch (sset) {
193  case ETH_SS_STATS:
194  return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) +
195  (priv->tx_ring_num + priv->rx_ring_num) * 2;
196  case ETH_SS_TEST:
197  return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
199  default:
200  return -EOPNOTSUPP;
201  }
202 }
203 
204 static void mlx4_en_get_ethtool_stats(struct net_device *dev,
205  struct ethtool_stats *stats, uint64_t *data)
206 {
207  struct mlx4_en_priv *priv = netdev_priv(dev);
208  int index = 0;
209  int i, j = 0;
210 
211  spin_lock_bh(&priv->stats_lock);
212 
213  if (!(priv->stats_bitmap)) {
214  for (i = 0; i < NUM_MAIN_STATS; i++)
215  data[index++] =
216  ((unsigned long *) &priv->stats)[i];
217  for (i = 0; i < NUM_PORT_STATS; i++)
218  data[index++] =
219  ((unsigned long *) &priv->port_stats)[i];
220  for (i = 0; i < NUM_PKT_STATS; i++)
221  data[index++] =
222  ((unsigned long *) &priv->pkstats)[i];
223  } else {
224  for (i = 0; i < NUM_MAIN_STATS; i++) {
225  if ((priv->stats_bitmap >> j) & 1)
226  data[index++] =
227  ((unsigned long *) &priv->stats)[i];
228  j++;
229  }
230  for (i = 0; i < NUM_PORT_STATS; i++) {
231  if ((priv->stats_bitmap >> j) & 1)
232  data[index++] =
233  ((unsigned long *) &priv->port_stats)[i];
234  j++;
235  }
236  }
237  for (i = 0; i < priv->tx_ring_num; i++) {
238  data[index++] = priv->tx_ring[i].packets;
239  data[index++] = priv->tx_ring[i].bytes;
240  }
241  for (i = 0; i < priv->rx_ring_num; i++) {
242  data[index++] = priv->rx_ring[i].packets;
243  data[index++] = priv->rx_ring[i].bytes;
244  }
245  spin_unlock_bh(&priv->stats_lock);
246 
247 }
248 
249 static void mlx4_en_self_test(struct net_device *dev,
250  struct ethtool_test *etest, u64 *buf)
251 {
252  mlx4_en_ex_selftest(dev, &etest->flags, buf);
253 }
254 
255 static void mlx4_en_get_strings(struct net_device *dev,
256  uint32_t stringset, uint8_t *data)
257 {
258  struct mlx4_en_priv *priv = netdev_priv(dev);
259  int index = 0;
260  int i;
261 
262  switch (stringset) {
263  case ETH_SS_TEST:
264  for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
265  strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
266  if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
267  for (; i < MLX4_EN_NUM_SELF_TEST; i++)
268  strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
269  break;
270 
271  case ETH_SS_STATS:
272  /* Add main counters */
273  if (!priv->stats_bitmap) {
274  for (i = 0; i < NUM_MAIN_STATS; i++)
275  strcpy(data + (index++) * ETH_GSTRING_LEN,
276  main_strings[i]);
277  for (i = 0; i < NUM_PORT_STATS; i++)
278  strcpy(data + (index++) * ETH_GSTRING_LEN,
279  main_strings[i +
280  NUM_MAIN_STATS]);
281  for (i = 0; i < NUM_PKT_STATS; i++)
282  strcpy(data + (index++) * ETH_GSTRING_LEN,
283  main_strings[i +
284  NUM_MAIN_STATS +
285  NUM_PORT_STATS]);
286  } else
287  for (i = 0; i < NUM_MAIN_STATS + NUM_PORT_STATS; i++) {
288  if ((priv->stats_bitmap >> i) & 1) {
289  strcpy(data +
290  (index++) * ETH_GSTRING_LEN,
291  main_strings[i]);
292  }
293  if (!(priv->stats_bitmap >> i))
294  break;
295  }
296  for (i = 0; i < priv->tx_ring_num; i++) {
297  sprintf(data + (index++) * ETH_GSTRING_LEN,
298  "tx%d_packets", i);
299  sprintf(data + (index++) * ETH_GSTRING_LEN,
300  "tx%d_bytes", i);
301  }
302  for (i = 0; i < priv->rx_ring_num; i++) {
303  sprintf(data + (index++) * ETH_GSTRING_LEN,
304  "rx%d_packets", i);
305  sprintf(data + (index++) * ETH_GSTRING_LEN,
306  "rx%d_bytes", i);
307  }
308  break;
309  }
310 }
311 
312 static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
313 {
314  struct mlx4_en_priv *priv = netdev_priv(dev);
315  int trans_type;
316 
317  cmd->autoneg = AUTONEG_DISABLE;
320 
321  if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
322  return -ENOMEM;
323 
324  trans_type = priv->port_state.transciver;
325  if (netif_carrier_ok(dev)) {
326  ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
327  cmd->duplex = DUPLEX_FULL;
328  } else {
329  ethtool_cmd_speed_set(cmd, -1);
330  cmd->duplex = -1;
331  }
332 
333  if (trans_type > 0 && trans_type <= 0xC) {
334  cmd->port = PORT_FIBRE;
335  cmd->transceiver = XCVR_EXTERNAL;
336  cmd->supported |= SUPPORTED_FIBRE;
338  } else if (trans_type == 0x80 || trans_type == 0) {
339  cmd->port = PORT_TP;
340  cmd->transceiver = XCVR_INTERNAL;
341  cmd->supported |= SUPPORTED_TP;
342  cmd->advertising |= ADVERTISED_TP;
343  } else {
344  cmd->port = -1;
345  cmd->transceiver = -1;
346  }
347  return 0;
348 }
349 
350 static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
351 {
352  if ((cmd->autoneg == AUTONEG_ENABLE) ||
353  (ethtool_cmd_speed(cmd) != SPEED_10000) ||
354  (cmd->duplex != DUPLEX_FULL))
355  return -EINVAL;
356 
357  /* Nothing to change */
358  return 0;
359 }
360 
361 static int mlx4_en_get_coalesce(struct net_device *dev,
362  struct ethtool_coalesce *coal)
363 {
364  struct mlx4_en_priv *priv = netdev_priv(dev);
365 
366  coal->tx_coalesce_usecs = priv->tx_usecs;
367  coal->tx_max_coalesced_frames = priv->tx_frames;
368  coal->rx_coalesce_usecs = priv->rx_usecs;
369  coal->rx_max_coalesced_frames = priv->rx_frames;
370 
371  coal->pkt_rate_low = priv->pkt_rate_low;
372  coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
373  coal->pkt_rate_high = priv->pkt_rate_high;
377  return 0;
378 }
379 
380 static int mlx4_en_set_coalesce(struct net_device *dev,
381  struct ethtool_coalesce *coal)
382 {
383  struct mlx4_en_priv *priv = netdev_priv(dev);
384  int err, i;
385 
386  priv->rx_frames = (coal->rx_max_coalesced_frames ==
390  priv->rx_usecs = (coal->rx_coalesce_usecs ==
393  coal->rx_coalesce_usecs;
394 
395  /* Setting TX coalescing parameters */
396  if (coal->tx_coalesce_usecs != priv->tx_usecs ||
397  coal->tx_max_coalesced_frames != priv->tx_frames) {
398  priv->tx_usecs = coal->tx_coalesce_usecs;
399  priv->tx_frames = coal->tx_max_coalesced_frames;
400  for (i = 0; i < priv->tx_ring_num; i++) {
401  priv->tx_cq[i].moder_cnt = priv->tx_frames;
402  priv->tx_cq[i].moder_time = priv->tx_usecs;
403  if (mlx4_en_set_cq_moder(priv, &priv->tx_cq[i])) {
404  en_warn(priv, "Failed changing moderation "
405  "for TX cq %d\n", i);
406  }
407  }
408  }
409 
410  /* Set adaptive coalescing params */
411  priv->pkt_rate_low = coal->pkt_rate_low;
412  priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
413  priv->pkt_rate_high = coal->pkt_rate_high;
417  if (priv->adaptive_rx_coal)
418  return 0;
419 
420  for (i = 0; i < priv->rx_ring_num; i++) {
421  priv->rx_cq[i].moder_cnt = priv->rx_frames;
422  priv->rx_cq[i].moder_time = priv->rx_usecs;
424  err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
425  if (err)
426  return err;
427  }
428  return 0;
429 }
430 
431 static int mlx4_en_set_pauseparam(struct net_device *dev,
432  struct ethtool_pauseparam *pause)
433 {
434  struct mlx4_en_priv *priv = netdev_priv(dev);
435  struct mlx4_en_dev *mdev = priv->mdev;
436  int err;
437 
438  priv->prof->tx_pause = pause->tx_pause != 0;
439  priv->prof->rx_pause = pause->rx_pause != 0;
440  err = mlx4_SET_PORT_general(mdev->dev, priv->port,
441  priv->rx_skb_size + ETH_FCS_LEN,
442  priv->prof->tx_pause,
443  priv->prof->tx_ppp,
444  priv->prof->rx_pause,
445  priv->prof->rx_ppp);
446  if (err)
447  en_err(priv, "Failed setting pause params\n");
448 
449  return err;
450 }
451 
452 static void mlx4_en_get_pauseparam(struct net_device *dev,
453  struct ethtool_pauseparam *pause)
454 {
455  struct mlx4_en_priv *priv = netdev_priv(dev);
456 
457  pause->tx_pause = priv->prof->tx_pause;
458  pause->rx_pause = priv->prof->rx_pause;
459 }
460 
461 static int mlx4_en_set_ringparam(struct net_device *dev,
462  struct ethtool_ringparam *param)
463 {
464  struct mlx4_en_priv *priv = netdev_priv(dev);
465  struct mlx4_en_dev *mdev = priv->mdev;
466  u32 rx_size, tx_size;
467  int port_up = 0;
468  int err = 0;
469  int i;
470 
471  if (param->rx_jumbo_pending || param->rx_mini_pending)
472  return -EINVAL;
473 
474  rx_size = roundup_pow_of_two(param->rx_pending);
475  rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
476  rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
477  tx_size = roundup_pow_of_two(param->tx_pending);
478  tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
479  tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
480 
481  if (rx_size == (priv->port_up ? priv->rx_ring[0].actual_size :
482  priv->rx_ring[0].size) &&
483  tx_size == priv->tx_ring[0].size)
484  return 0;
485 
486  mutex_lock(&mdev->state_lock);
487  if (priv->port_up) {
488  port_up = 1;
489  mlx4_en_stop_port(dev);
490  }
491 
493 
494  priv->prof->tx_ring_size = tx_size;
495  priv->prof->rx_ring_size = rx_size;
496 
497  err = mlx4_en_alloc_resources(priv);
498  if (err) {
499  en_err(priv, "Failed reallocating port resources\n");
500  goto out;
501  }
502  if (port_up) {
503  err = mlx4_en_start_port(dev);
504  if (err)
505  en_err(priv, "Failed starting port\n");
506  }
507 
508  for (i = 0; i < priv->rx_ring_num; i++) {
509  priv->rx_cq[i].moder_cnt = priv->rx_frames;
510  priv->rx_cq[i].moder_time = priv->rx_usecs;
512  err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
513  if (err)
514  goto out;
515  }
516 
517 out:
518  mutex_unlock(&mdev->state_lock);
519  return err;
520 }
521 
522 static void mlx4_en_get_ringparam(struct net_device *dev,
523  struct ethtool_ringparam *param)
524 {
525  struct mlx4_en_priv *priv = netdev_priv(dev);
526 
527  memset(param, 0, sizeof(*param));
530  param->rx_pending = priv->port_up ?
531  priv->rx_ring[0].actual_size : priv->rx_ring[0].size;
532  param->tx_pending = priv->tx_ring[0].size;
533 }
534 
535 static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
536 {
537  struct mlx4_en_priv *priv = netdev_priv(dev);
538 
539  return priv->rx_ring_num;
540 }
541 
542 static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index)
543 {
544  struct mlx4_en_priv *priv = netdev_priv(dev);
545  struct mlx4_en_rss_map *rss_map = &priv->rss_map;
546  int rss_rings;
547  size_t n = priv->rx_ring_num;
548  int err = 0;
549 
550  rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num;
551 
552  while (n--) {
553  ring_index[n] = rss_map->qps[n % rss_rings].qpn -
554  rss_map->base_qpn;
555  }
556 
557  return err;
558 }
559 
560 static int mlx4_en_set_rxfh_indir(struct net_device *dev,
561  const u32 *ring_index)
562 {
563  struct mlx4_en_priv *priv = netdev_priv(dev);
564  struct mlx4_en_dev *mdev = priv->mdev;
565  int port_up = 0;
566  int err = 0;
567  int i;
568  int rss_rings = 0;
569 
570  /* Calculate RSS table size and make sure flows are spread evenly
571  * between rings
572  */
573  for (i = 0; i < priv->rx_ring_num; i++) {
574  if (i > 0 && !ring_index[i] && !rss_rings)
575  rss_rings = i;
576 
577  if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num)))
578  return -EINVAL;
579  }
580 
581  if (!rss_rings)
582  rss_rings = priv->rx_ring_num;
583 
584  /* RSS table size must be an order of 2 */
585  if (!is_power_of_2(rss_rings))
586  return -EINVAL;
587 
588  mutex_lock(&mdev->state_lock);
589  if (priv->port_up) {
590  port_up = 1;
591  mlx4_en_stop_port(dev);
592  }
593 
594  priv->prof->rss_rings = rss_rings;
595 
596  if (port_up) {
597  err = mlx4_en_start_port(dev);
598  if (err)
599  en_err(priv, "Failed starting port\n");
600  }
601 
602  mutex_unlock(&mdev->state_lock);
603  return err;
604 }
605 
606 #define all_zeros_or_all_ones(field) \
607  ((field) == 0 || (field) == (__force typeof(field))-1)
608 
609 static int mlx4_en_validate_flow(struct net_device *dev,
610  struct ethtool_rxnfc *cmd)
611 {
612  struct ethtool_usrip4_spec *l3_mask;
613  struct ethtool_tcpip4_spec *l4_mask;
614  struct ethhdr *eth_mask;
615  u64 full_mac = ~0ull;
616  u64 zero_mac = 0;
617 
618  if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
619  return -EINVAL;
620 
621  switch (cmd->fs.flow_type & ~FLOW_EXT) {
622  case TCP_V4_FLOW:
623  case UDP_V4_FLOW:
624  if (cmd->fs.m_u.tcp_ip4_spec.tos)
625  return -EINVAL;
626  l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
627  /* don't allow mask which isn't all 0 or 1 */
628  if (!all_zeros_or_all_ones(l4_mask->ip4src) ||
629  !all_zeros_or_all_ones(l4_mask->ip4dst) ||
630  !all_zeros_or_all_ones(l4_mask->psrc) ||
631  !all_zeros_or_all_ones(l4_mask->pdst))
632  return -EINVAL;
633  break;
634  case IP_USER_FLOW:
635  l3_mask = &cmd->fs.m_u.usr_ip4_spec;
636  if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
637  cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
638  (!l3_mask->ip4src && !l3_mask->ip4dst) ||
639  !all_zeros_or_all_ones(l3_mask->ip4src) ||
640  !all_zeros_or_all_ones(l3_mask->ip4dst))
641  return -EINVAL;
642  break;
643  case ETHER_FLOW:
644  eth_mask = &cmd->fs.m_u.ether_spec;
645  /* source mac mask must not be set */
646  if (memcmp(eth_mask->h_source, &zero_mac, ETH_ALEN))
647  return -EINVAL;
648 
649  /* dest mac mask must be ff:ff:ff:ff:ff:ff */
650  if (memcmp(eth_mask->h_dest, &full_mac, ETH_ALEN))
651  return -EINVAL;
652 
653  if (!all_zeros_or_all_ones(eth_mask->h_proto))
654  return -EINVAL;
655  break;
656  default:
657  return -EINVAL;
658  }
659 
660  if ((cmd->fs.flow_type & FLOW_EXT)) {
661  if (cmd->fs.m_ext.vlan_etype ||
662  !(cmd->fs.m_ext.vlan_tci == 0 ||
663  cmd->fs.m_ext.vlan_tci == cpu_to_be16(0xfff)))
664  return -EINVAL;
665  }
666 
667  return 0;
668 }
669 
670 static int add_ip_rule(struct mlx4_en_priv *priv,
671  struct ethtool_rxnfc *cmd,
672  struct list_head *list_h)
673 {
674  struct mlx4_spec_list *spec_l3;
675  struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
676 
677  spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
678  if (!spec_l3) {
679  en_err(priv, "Fail to alloc ethtool rule.\n");
680  return -ENOMEM;
681  }
682 
683  spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
684  spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
685  if (l3_mask->ip4src)
686  spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
687  spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst;
688  if (l3_mask->ip4dst)
689  spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
690  list_add_tail(&spec_l3->list, list_h);
691 
692  return 0;
693 }
694 
695 static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
696  struct ethtool_rxnfc *cmd,
697  struct list_head *list_h, int proto)
698 {
699  struct mlx4_spec_list *spec_l3;
700  struct mlx4_spec_list *spec_l4;
701  struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
702 
703  spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
704  spec_l4 = kzalloc(sizeof *spec_l4, GFP_KERNEL);
705  if (!spec_l4 || !spec_l3) {
706  en_err(priv, "Fail to alloc ethtool rule.\n");
707  kfree(spec_l3);
708  kfree(spec_l4);
709  return -ENOMEM;
710  }
711 
712  spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
713 
714  if (proto == TCP_V4_FLOW) {
715  spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
716  spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
717  spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
718  spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
719  spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
720  } else {
721  spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
722  spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
723  spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
724  spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc;
725  spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst;
726  }
727 
728  if (l4_mask->ip4src)
729  spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
730  if (l4_mask->ip4dst)
731  spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
732 
733  if (l4_mask->psrc)
734  spec_l4->tcp_udp.src_port_msk = EN_ETHTOOL_SHORT_MASK;
735  if (l4_mask->pdst)
736  spec_l4->tcp_udp.dst_port_msk = EN_ETHTOOL_SHORT_MASK;
737 
738  list_add_tail(&spec_l3->list, list_h);
739  list_add_tail(&spec_l4->list, list_h);
740 
741  return 0;
742 }
743 
744 static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
745  struct ethtool_rxnfc *cmd,
746  struct list_head *rule_list_h)
747 {
748  int err;
749  u64 mac;
750  __be64 be_mac;
751  struct ethhdr *eth_spec;
752  struct mlx4_en_priv *priv = netdev_priv(dev);
753  struct mlx4_spec_list *spec_l2;
754  __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
755 
756  err = mlx4_en_validate_flow(dev, cmd);
757  if (err)
758  return err;
759 
760  spec_l2 = kzalloc(sizeof *spec_l2, GFP_KERNEL);
761  if (!spec_l2)
762  return -ENOMEM;
763 
764  mac = priv->mac & MLX4_MAC_MASK;
765  be_mac = cpu_to_be64(mac << 16);
766 
767  spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
768  memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
769  if ((cmd->fs.flow_type & ~FLOW_EXT) != ETHER_FLOW)
770  memcpy(spec_l2->eth.dst_mac, &be_mac, ETH_ALEN);
771 
772  if ((cmd->fs.flow_type & FLOW_EXT) && cmd->fs.m_ext.vlan_tci) {
773  spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
774  spec_l2->eth.vlan_id_msk = cpu_to_be16(0xfff);
775  }
776 
777  list_add_tail(&spec_l2->list, rule_list_h);
778 
779  switch (cmd->fs.flow_type & ~FLOW_EXT) {
780  case ETHER_FLOW:
781  eth_spec = &cmd->fs.h_u.ether_spec;
782  memcpy(&spec_l2->eth.dst_mac, eth_spec->h_dest, ETH_ALEN);
783  spec_l2->eth.ether_type = eth_spec->h_proto;
784  if (eth_spec->h_proto)
785  spec_l2->eth.ether_type_enable = 1;
786  break;
787  case IP_USER_FLOW:
788  err = add_ip_rule(priv, cmd, rule_list_h);
789  break;
790  case TCP_V4_FLOW:
791  err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW);
792  break;
793  case UDP_V4_FLOW:
794  err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW);
795  break;
796  }
797 
798  return err;
799 }
800 
801 static int mlx4_en_flow_replace(struct net_device *dev,
802  struct ethtool_rxnfc *cmd)
803 {
804  int err;
805  struct mlx4_en_priv *priv = netdev_priv(dev);
806  struct ethtool_flow_id *loc_rule;
807  struct mlx4_spec_list *spec, *tmp_spec;
808  u32 qpn;
809  u64 reg_id;
810 
811  struct mlx4_net_trans_rule rule = {
812  .queue_mode = MLX4_NET_TRANS_Q_FIFO,
813  .exclusive = 0,
814  .allow_loopback = 1,
815  .promisc_mode = MLX4_FS_PROMISC_NONE,
816  };
817 
818  rule.port = priv->port;
819  rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location;
820  INIT_LIST_HEAD(&rule.list);
821 
822  /* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */
823  if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC)
824  qpn = priv->drop_qp.qpn;
825  else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
826  qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
827  } else {
828  if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
829  en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist.\n",
830  cmd->fs.ring_cookie);
831  return -EINVAL;
832  }
833  qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
834  if (!qpn) {
835  en_warn(priv, "rxnfc: RX ring (%llu) is inactive.\n",
836  cmd->fs.ring_cookie);
837  return -EINVAL;
838  }
839  }
840  rule.qpn = qpn;
841  err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list);
842  if (err)
843  goto out_free_list;
844 
845  loc_rule = &priv->ethtool_rules[cmd->fs.location];
846  if (loc_rule->id) {
847  err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id);
848  if (err) {
849  en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n",
850  cmd->fs.location, loc_rule->id);
851  goto out_free_list;
852  }
853  loc_rule->id = 0;
854  memset(&loc_rule->flow_spec, 0,
855  sizeof(struct ethtool_rx_flow_spec));
856  }
857  err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
858  if (err) {
859  en_err(priv, "Fail to attach network rule at location %d.\n",
860  cmd->fs.location);
861  goto out_free_list;
862  }
863  loc_rule->id = reg_id;
864  memcpy(&loc_rule->flow_spec, &cmd->fs,
865  sizeof(struct ethtool_rx_flow_spec));
866 
867 out_free_list:
868  list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
869  list_del(&spec->list);
870  kfree(spec);
871  }
872  return err;
873 }
874 
875 static int mlx4_en_flow_detach(struct net_device *dev,
876  struct ethtool_rxnfc *cmd)
877 {
878  int err = 0;
879  struct ethtool_flow_id *rule;
880  struct mlx4_en_priv *priv = netdev_priv(dev);
881 
882  if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
883  return -EINVAL;
884 
885  rule = &priv->ethtool_rules[cmd->fs.location];
886  if (!rule->id) {
887  err = -ENOENT;
888  goto out;
889  }
890 
891  err = mlx4_flow_detach(priv->mdev->dev, rule->id);
892  if (err) {
893  en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n",
894  cmd->fs.location, rule->id);
895  goto out;
896  }
897  rule->id = 0;
898  memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
899 out:
900  return err;
901 
902 }
903 
904 static int mlx4_en_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
905  int loc)
906 {
907  int err = 0;
908  struct ethtool_flow_id *rule;
909  struct mlx4_en_priv *priv = netdev_priv(dev);
910 
911  if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
912  return -EINVAL;
913 
914  rule = &priv->ethtool_rules[loc];
915  if (rule->id)
916  memcpy(&cmd->fs, &rule->flow_spec,
917  sizeof(struct ethtool_rx_flow_spec));
918  else
919  err = -ENOENT;
920 
921  return err;
922 }
923 
924 static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
925 {
926 
927  int i, res = 0;
928  for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
929  if (priv->ethtool_rules[i].id)
930  res++;
931  }
932  return res;
933 
934 }
935 
936 static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
937  u32 *rule_locs)
938 {
939  struct mlx4_en_priv *priv = netdev_priv(dev);
940  struct mlx4_en_dev *mdev = priv->mdev;
941  int err = 0;
942  int i = 0, priority = 0;
943 
944  if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
945  cmd->cmd == ETHTOOL_GRXCLSRULE ||
946  cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
947  mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
948  return -EINVAL;
949 
950  switch (cmd->cmd) {
951  case ETHTOOL_GRXRINGS:
952  cmd->data = priv->rx_ring_num;
953  break;
954  case ETHTOOL_GRXCLSRLCNT:
955  cmd->rule_cnt = mlx4_en_get_num_flows(priv);
956  break;
957  case ETHTOOL_GRXCLSRULE:
958  err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
959  break;
960  case ETHTOOL_GRXCLSRLALL:
961  while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
962  err = mlx4_en_get_flow(dev, cmd, i);
963  if (!err)
964  rule_locs[priority++] = i;
965  i++;
966  }
967  err = 0;
968  break;
969  default:
970  err = -EOPNOTSUPP;
971  break;
972  }
973 
974  return err;
975 }
976 
977 static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
978 {
979  int err = 0;
980  struct mlx4_en_priv *priv = netdev_priv(dev);
981  struct mlx4_en_dev *mdev = priv->mdev;
982 
983  if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
984  return -EINVAL;
985 
986  switch (cmd->cmd) {
987  case ETHTOOL_SRXCLSRLINS:
988  err = mlx4_en_flow_replace(dev, cmd);
989  break;
990  case ETHTOOL_SRXCLSRLDEL:
991  err = mlx4_en_flow_detach(dev, cmd);
992  break;
993  default:
994  en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd);
995  return -EINVAL;
996  }
997 
998  return err;
999 }
1000 
1002  .get_drvinfo = mlx4_en_get_drvinfo,
1003  .get_settings = mlx4_en_get_settings,
1004  .set_settings = mlx4_en_set_settings,
1005  .get_link = ethtool_op_get_link,
1006  .get_strings = mlx4_en_get_strings,
1007  .get_sset_count = mlx4_en_get_sset_count,
1008  .get_ethtool_stats = mlx4_en_get_ethtool_stats,
1009  .self_test = mlx4_en_self_test,
1010  .get_wol = mlx4_en_get_wol,
1011  .set_wol = mlx4_en_set_wol,
1012  .get_msglevel = mlx4_en_get_msglevel,
1013  .set_msglevel = mlx4_en_set_msglevel,
1014  .get_coalesce = mlx4_en_get_coalesce,
1015  .set_coalesce = mlx4_en_set_coalesce,
1016  .get_pauseparam = mlx4_en_get_pauseparam,
1017  .set_pauseparam = mlx4_en_set_pauseparam,
1018  .get_ringparam = mlx4_en_get_ringparam,
1019  .set_ringparam = mlx4_en_set_ringparam,
1020  .get_rxnfc = mlx4_en_get_rxnfc,
1021  .set_rxnfc = mlx4_en_set_rxnfc,
1022  .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
1023  .get_rxfh_indir = mlx4_en_get_rxfh_indir,
1024  .set_rxfh_indir = mlx4_en_set_rxfh_indir,
1025 };
1026 
1027 
1028 
1029 
1030