Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
filter.c
Go to the documentation of this file.
1 /****************************************************************************
2  * Driver for Solarflare Solarstorm network controllers and boards
3  * Copyright 2005-2010 Solarflare Communications Inc.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published
7  * by the Free Software Foundation, incorporated herein by reference.
8  */
9 
10 #include <linux/in.h>
11 #include <net/ip.h>
12 #include "efx.h"
13 #include "filter.h"
14 #include "io.h"
15 #include "nic.h"
16 #include "regs.h"
17 
18 /* "Fudge factors" - difference between programmed value and actual depth.
19  * Due to pipelined implementation we need to program H/W with a value that
20  * is larger than the hop limit we want.
21  */
22 #define FILTER_CTL_SRCH_FUDGE_WILD 3
23 #define FILTER_CTL_SRCH_FUDGE_FULL 1
24 
25 /* Hard maximum hop limit. Hardware will time-out beyond 200-something.
26  * We also need to avoid infinite loops in efx_filter_search() when the
27  * table is full.
28  */
29 #define FILTER_CTL_SRCH_MAX 200
30 
31 /* Don't try very hard to find space for performance hints, as this is
32  * counter-productive. */
33 #define FILTER_CTL_SRCH_HINT_MAX 5
34 
41 };
42 
47 };
48 
51  u32 offset; /* address of table relative to BAR */
52  unsigned size; /* number of entries */
53  unsigned step; /* step between entries */
54  unsigned used; /* number currently used */
55  unsigned long *used_bitmap;
58 };
59 
63 #ifdef CONFIG_RFS_ACCEL
64  u32 *rps_flow_id;
65  unsigned rps_expire_index;
66 #endif
67 };
68 
69 /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
70  * key derived from the n-tuple. The initial LFSR state is 0xffff. */
71 static u16 efx_filter_hash(u32 key)
72 {
73  u16 tmp;
74 
75  /* First 16 rounds */
76  tmp = 0x1fff ^ key >> 16;
77  tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
78  tmp = tmp ^ tmp >> 9;
79  /* Last 16 rounds */
80  tmp = tmp ^ tmp << 13 ^ key;
81  tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
82  return tmp ^ tmp >> 9;
83 }
84 
85 /* To allow for hash collisions, filter search continues at these
86  * increments from the first possible entry selected by the hash. */
87 static u16 efx_filter_increment(u32 key)
88 {
89  return key * 2 - 1;
90 }
91 
92 static enum efx_filter_table_id
93 efx_filter_spec_table_id(const struct efx_filter_spec *spec)
94 {
103  return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
104 }
105 
106 static struct efx_filter_table *
107 efx_filter_spec_table(struct efx_filter_state *state,
108  const struct efx_filter_spec *spec)
109 {
110  if (spec->type == EFX_FILTER_UNSPEC)
111  return NULL;
112  else
113  return &state->table[efx_filter_spec_table_id(spec)];
114 }
115 
116 static void efx_filter_table_reset_search_depth(struct efx_filter_table *table)
117 {
118  memset(table->search_depth, 0, sizeof(table->search_depth));
119 }
120 
121 static void efx_filter_push_rx_config(struct efx_nic *efx)
122 {
123  struct efx_filter_state *state = efx->filter_state;
124  struct efx_filter_table *table;
125  efx_oword_t filter_ctl;
126 
127  efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
128 
129  table = &state->table[EFX_FILTER_TABLE_RX_IP];
130  EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
133  EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
136  EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
139  EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
142 
143  table = &state->table[EFX_FILTER_TABLE_RX_MAC];
144  if (table->size) {
146  filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
150  filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
153  }
154 
155  table = &state->table[EFX_FILTER_TABLE_RX_DEF];
156  if (table->size) {
158  filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
159  table->spec[EFX_FILTER_INDEX_UC_DEF].dmaq_id);
161  filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
162  !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
165  filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
166  table->spec[EFX_FILTER_INDEX_MC_DEF].dmaq_id);
168  filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
169  !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
171  }
172 
173  efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
174 }
175 
176 static void efx_filter_push_tx_limits(struct efx_nic *efx)
177 {
178  struct efx_filter_state *state = efx->filter_state;
179  struct efx_filter_table *table;
180  efx_oword_t tx_cfg;
181 
182  efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
183 
184  table = &state->table[EFX_FILTER_TABLE_TX_MAC];
185  if (table->size) {
187  tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
191  tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
194  }
195 
196  efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
197 }
198 
199 static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
200  __be32 host1, __be16 port1,
201  __be32 host2, __be16 port2)
202 {
203  spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
204  spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
205  spec->data[2] = ntohl(host2);
206 }
207 
208 static inline void __efx_filter_get_ipv4(const struct efx_filter_spec *spec,
209  __be32 *host1, __be16 *port1,
210  __be32 *host2, __be16 *port2)
211 {
212  *host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
213  *port1 = htons(spec->data[0]);
214  *host2 = htonl(spec->data[2]);
215  *port2 = htons(spec->data[1] >> 16);
216 }
217 
227 {
228  __be32 host1;
229  __be16 port1;
230 
232 
233  /* This cannot currently be combined with other filtering */
234  if (spec->type != EFX_FILTER_UNSPEC)
235  return -EPROTONOSUPPORT;
236 
237  if (port == 0)
238  return -EINVAL;
239 
240  switch (proto) {
241  case IPPROTO_TCP:
242  spec->type = EFX_FILTER_TCP_WILD;
243  break;
244  case IPPROTO_UDP:
245  spec->type = EFX_FILTER_UDP_WILD;
246  break;
247  default:
248  return -EPROTONOSUPPORT;
249  }
250 
251  /* Filter is constructed in terms of source and destination,
252  * with the odd wrinkle that the ports are swapped in a UDP
253  * wildcard filter. We need to convert from local and remote
254  * (= zero for wildcard) addresses.
255  */
256  host1 = 0;
257  if (proto != IPPROTO_UDP) {
258  port1 = 0;
259  } else {
260  port1 = port;
261  port = 0;
262  }
263 
264  __efx_filter_set_ipv4(spec, host1, port1, host, port);
265  return 0;
266 }
267 
269  u8 *proto, __be32 *host, __be16 *port)
270 {
271  __be32 host1;
272  __be16 port1;
273 
274  switch (spec->type) {
275  case EFX_FILTER_TCP_WILD:
276  *proto = IPPROTO_TCP;
277  __efx_filter_get_ipv4(spec, &host1, &port1, host, port);
278  return 0;
279  case EFX_FILTER_UDP_WILD:
280  *proto = IPPROTO_UDP;
281  __efx_filter_get_ipv4(spec, &host1, port, host, &port1);
282  return 0;
283  default:
284  return -EINVAL;
285  }
286 }
287 
299  __be32 rhost, __be16 rport)
300 {
302 
303  /* This cannot currently be combined with other filtering */
304  if (spec->type != EFX_FILTER_UNSPEC)
305  return -EPROTONOSUPPORT;
306 
307  if (port == 0 || rport == 0)
308  return -EINVAL;
309 
310  switch (proto) {
311  case IPPROTO_TCP:
312  spec->type = EFX_FILTER_TCP_FULL;
313  break;
314  case IPPROTO_UDP:
315  spec->type = EFX_FILTER_UDP_FULL;
316  break;
317  default:
318  return -EPROTONOSUPPORT;
319  }
320 
321  __efx_filter_set_ipv4(spec, rhost, rport, host, port);
322  return 0;
323 }
324 
326  u8 *proto, __be32 *host, __be16 *port,
327  __be32 *rhost, __be16 *rport)
328 {
329  switch (spec->type) {
330  case EFX_FILTER_TCP_FULL:
331  *proto = IPPROTO_TCP;
332  break;
333  case EFX_FILTER_UDP_FULL:
334  *proto = IPPROTO_UDP;
335  break;
336  default:
337  return -EINVAL;
338  }
339 
340  __efx_filter_get_ipv4(spec, rhost, rport, host, port);
341  return 0;
342 }
343 
351  u16 vid, const u8 *addr)
352 {
353  EFX_BUG_ON_PARANOID(!(spec->flags &
355 
356  /* This cannot currently be combined with other filtering */
357  if (spec->type != EFX_FILTER_UNSPEC)
358  return -EPROTONOSUPPORT;
359 
360  if (vid == EFX_FILTER_VID_UNSPEC) {
361  spec->type = EFX_FILTER_MAC_WILD;
362  spec->data[0] = 0;
363  } else {
364  spec->type = EFX_FILTER_MAC_FULL;
365  spec->data[0] = vid;
366  }
367 
368  spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
369  spec->data[2] = addr[0] << 8 | addr[1];
370  return 0;
371 }
372 
378 {
379  EFX_BUG_ON_PARANOID(!(spec->flags &
381 
382  if (spec->type != EFX_FILTER_UNSPEC)
383  return -EINVAL;
384 
385  spec->type = EFX_FILTER_UC_DEF;
386  memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
387  return 0;
388 }
389 
395 {
396  EFX_BUG_ON_PARANOID(!(spec->flags &
398 
399  if (spec->type != EFX_FILTER_UNSPEC)
400  return -EINVAL;
401 
402  spec->type = EFX_FILTER_MC_DEF;
403  memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
404  return 0;
405 }
406 
407 static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx)
408 {
409  struct efx_filter_state *state = efx->filter_state;
410  struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF];
411  struct efx_filter_spec *spec = &table->spec[filter_idx];
412 
413  efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL,
415  spec->type = EFX_FILTER_UC_DEF + filter_idx;
416  table->used_bitmap[0] |= 1 << filter_idx;
417 }
418 
420  u16 *vid, u8 *addr)
421 {
422  switch (spec->type) {
423  case EFX_FILTER_MAC_WILD:
424  *vid = EFX_FILTER_VID_UNSPEC;
425  break;
426  case EFX_FILTER_MAC_FULL:
427  *vid = spec->data[0];
428  break;
429  default:
430  return -EINVAL;
431  }
432 
433  addr[0] = spec->data[2] >> 8;
434  addr[1] = spec->data[2];
435  addr[2] = spec->data[1] >> 24;
436  addr[3] = spec->data[1] >> 16;
437  addr[4] = spec->data[1] >> 8;
438  addr[5] = spec->data[1];
439  return 0;
440 }
441 
442 /* Build a filter entry and return its n-tuple key. */
443 static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
444 {
445  u32 data3;
446 
447  switch (efx_filter_spec_table_id(spec)) {
448  case EFX_FILTER_TABLE_RX_IP: {
449  bool is_udp = (spec->type == EFX_FILTER_UDP_FULL ||
450  spec->type == EFX_FILTER_UDP_WILD);
452  *filter,
453  FRF_BZ_RSS_EN,
454  !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
455  FRF_BZ_SCATTER_EN,
456  !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
457  FRF_BZ_TCP_UDP, is_udp,
458  FRF_BZ_RXQ_ID, spec->dmaq_id,
459  EFX_DWORD_2, spec->data[2],
460  EFX_DWORD_1, spec->data[1],
461  EFX_DWORD_0, spec->data[0]);
462  data3 = is_udp;
463  break;
464  }
465 
467  /* One filter spec per type */
471  return spec->type - EFX_FILTER_UC_DEF;
472 
474  bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
476  *filter,
477  FRF_CZ_RMFT_RSS_EN,
478  !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
479  FRF_CZ_RMFT_SCATTER_EN,
480  !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
481  FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
482  FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
483  FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
484  FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
485  FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
486  data3 = is_wild;
487  break;
488  }
489 
491  bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
492  EFX_POPULATE_OWORD_5(*filter,
493  FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
494  FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
495  FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
496  FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
497  FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
498  data3 = is_wild | spec->dmaq_id << 1;
499  break;
500  }
501 
502  default:
503  BUG();
504  }
505 
506  return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
507 }
508 
509 static bool efx_filter_equal(const struct efx_filter_spec *left,
510  const struct efx_filter_spec *right)
511 {
512  if (left->type != right->type ||
513  memcmp(left->data, right->data, sizeof(left->data)))
514  return false;
515 
516  if (left->flags & EFX_FILTER_FLAG_TX &&
517  left->dmaq_id != right->dmaq_id)
518  return false;
519 
520  return true;
521 }
522 
523 static int efx_filter_search(struct efx_filter_table *table,
524  struct efx_filter_spec *spec, u32 key,
525  bool for_insert, unsigned int *depth_required)
526 {
527  unsigned hash, incr, filter_idx, depth, depth_max;
528 
529  hash = efx_filter_hash(key);
530  incr = efx_filter_increment(key);
531 
532  filter_idx = hash & (table->size - 1);
533  depth = 1;
534  depth_max = (for_insert ?
535  (spec->priority <= EFX_FILTER_PRI_HINT ?
537  table->search_depth[spec->type]);
538 
539  for (;;) {
540  /* Return success if entry is used and matches this spec
541  * or entry is unused and we are trying to insert.
542  */
543  if (test_bit(filter_idx, table->used_bitmap) ?
544  efx_filter_equal(spec, &table->spec[filter_idx]) :
545  for_insert) {
546  *depth_required = depth;
547  return filter_idx;
548  }
549 
550  /* Return failure if we reached the maximum search depth */
551  if (depth == depth_max)
552  return for_insert ? -EBUSY : -ENOENT;
553 
554  filter_idx = (filter_idx + incr) & (table->size - 1);
555  ++depth;
556  }
557 }
558 
559 /*
560  * Construct/deconstruct external filter IDs. At least the RX filter
561  * IDs must be ordered by matching priority, for RX NFC semantics.
562  *
563  * Deconstruction needs to be robust against invalid IDs so that
564  * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
565  * accept user-provided IDs.
566  */
567 
568 #define EFX_FILTER_MATCH_PRI_COUNT 5
569 
570 static const u8 efx_filter_type_match_pri[EFX_FILTER_TYPE_COUNT] = {
571  [EFX_FILTER_TCP_FULL] = 0,
572  [EFX_FILTER_UDP_FULL] = 0,
573  [EFX_FILTER_TCP_WILD] = 1,
574  [EFX_FILTER_UDP_WILD] = 1,
575  [EFX_FILTER_MAC_FULL] = 2,
576  [EFX_FILTER_MAC_WILD] = 3,
577  [EFX_FILTER_UC_DEF] = 4,
578  [EFX_FILTER_MC_DEF] = 4,
579 };
580 
581 static const enum efx_filter_table_id efx_filter_range_table[] = {
582  EFX_FILTER_TABLE_RX_IP, /* RX match pri 0 */
586  EFX_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
587  EFX_FILTER_TABLE_COUNT, /* TX match pri 0; invalid */
588  EFX_FILTER_TABLE_COUNT, /* invalid */
590  EFX_FILTER_TABLE_TX_MAC, /* TX match pri 3 */
591 };
592 
593 #define EFX_FILTER_INDEX_WIDTH 13
594 #define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1)
595 
596 static inline u32
597 efx_filter_make_id(const struct efx_filter_spec *spec, unsigned int index)
598 {
599  unsigned int range;
600 
601  range = efx_filter_type_match_pri[spec->type];
602  if (!(spec->flags & EFX_FILTER_FLAG_RX))
604 
605  return range << EFX_FILTER_INDEX_WIDTH | index;
606 }
607 
608 static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
609 {
610  unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
611 
612  if (range < ARRAY_SIZE(efx_filter_range_table))
613  return efx_filter_range_table[range];
614  else
615  return EFX_FILTER_TABLE_COUNT; /* invalid */
616 }
617 
618 static inline unsigned int efx_filter_id_index(u32 id)
619 {
620  return id & EFX_FILTER_INDEX_MASK;
621 }
622 
623 static inline u8 efx_filter_id_flags(u32 id)
624 {
625  unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
626 
627  if (range < EFX_FILTER_MATCH_PRI_COUNT)
628  return EFX_FILTER_FLAG_RX;
629  else
630  return EFX_FILTER_FLAG_TX;
631 }
632 
634 {
635  struct efx_filter_state *state = efx->filter_state;
636  unsigned int range = EFX_FILTER_MATCH_PRI_COUNT - 1;
637  enum efx_filter_table_id table_id;
638 
639  do {
640  table_id = efx_filter_range_table[range];
641  if (state->table[table_id].size != 0)
642  return range << EFX_FILTER_INDEX_WIDTH |
643  state->table[table_id].size;
644  } while (range--);
645 
646  return 0;
647 }
648 
660  bool replace)
661 {
662  struct efx_filter_state *state = efx->filter_state;
663  struct efx_filter_table *table = efx_filter_spec_table(state, spec);
664  struct efx_filter_spec *saved_spec;
666  unsigned int filter_idx, depth = 0;
667  u32 key;
668  int rc;
669 
670  if (!table || table->size == 0)
671  return -EINVAL;
672 
673  key = efx_filter_build(&filter, spec);
674 
675  netif_vdbg(efx, hw, efx->net_dev,
676  "%s: type %d search_depth=%d", __func__, spec->type,
677  table->search_depth[spec->type]);
678 
679  spin_lock_bh(&state->lock);
680 
681  rc = efx_filter_search(table, spec, key, true, &depth);
682  if (rc < 0)
683  goto out;
684  filter_idx = rc;
685  BUG_ON(filter_idx >= table->size);
686  saved_spec = &table->spec[filter_idx];
687 
688  if (test_bit(filter_idx, table->used_bitmap)) {
689  /* Should we replace the existing filter? */
690  if (!replace) {
691  rc = -EEXIST;
692  goto out;
693  }
694  if (spec->priority < saved_spec->priority) {
695  rc = -EPERM;
696  goto out;
697  }
698  } else {
699  __set_bit(filter_idx, table->used_bitmap);
700  ++table->used;
701  }
702  *saved_spec = *spec;
703 
704  if (table->id == EFX_FILTER_TABLE_RX_DEF) {
705  efx_filter_push_rx_config(efx);
706  } else {
707  if (table->search_depth[spec->type] < depth) {
708  table->search_depth[spec->type] = depth;
709  if (spec->flags & EFX_FILTER_FLAG_TX)
710  efx_filter_push_tx_limits(efx);
711  else
712  efx_filter_push_rx_config(efx);
713  }
714 
715  efx_writeo(efx, &filter,
716  table->offset + table->step * filter_idx);
717  }
718 
719  netif_vdbg(efx, hw, efx->net_dev,
720  "%s: filter type %d index %d rxq %u set",
721  __func__, spec->type, filter_idx, spec->dmaq_id);
722  rc = efx_filter_make_id(spec, filter_idx);
723 
724 out:
725  spin_unlock_bh(&state->lock);
726  return rc;
727 }
728 
729 static void efx_filter_table_clear_entry(struct efx_nic *efx,
730  struct efx_filter_table *table,
731  unsigned int filter_idx)
732 {
733  static efx_oword_t filter;
734 
735  if (table->id == EFX_FILTER_TABLE_RX_DEF) {
736  /* RX default filters must always exist */
737  efx_filter_reset_rx_def(efx, filter_idx);
738  efx_filter_push_rx_config(efx);
739  } else if (test_bit(filter_idx, table->used_bitmap)) {
740  __clear_bit(filter_idx, table->used_bitmap);
741  --table->used;
742  memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
743 
744  efx_writeo(efx, &filter,
745  table->offset + table->step * filter_idx);
746  }
747 }
748 
760  u32 filter_id)
761 {
762  struct efx_filter_state *state = efx->filter_state;
763  enum efx_filter_table_id table_id;
764  struct efx_filter_table *table;
765  unsigned int filter_idx;
766  struct efx_filter_spec *spec;
767  u8 filter_flags;
768  int rc;
769 
770  table_id = efx_filter_id_table_id(filter_id);
771  if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
772  return -ENOENT;
773  table = &state->table[table_id];
774 
775  filter_idx = efx_filter_id_index(filter_id);
776  if (filter_idx >= table->size)
777  return -ENOENT;
778  spec = &table->spec[filter_idx];
779 
780  filter_flags = efx_filter_id_flags(filter_id);
781 
782  spin_lock_bh(&state->lock);
783 
784  if (test_bit(filter_idx, table->used_bitmap) &&
785  spec->priority == priority) {
786  efx_filter_table_clear_entry(efx, table, filter_idx);
787  if (table->used == 0)
788  efx_filter_table_reset_search_depth(table);
789  rc = 0;
790  } else {
791  rc = -ENOENT;
792  }
793 
794  spin_unlock_bh(&state->lock);
795 
796  return rc;
797 }
798 
811  u32 filter_id, struct efx_filter_spec *spec_buf)
812 {
813  struct efx_filter_state *state = efx->filter_state;
814  enum efx_filter_table_id table_id;
815  struct efx_filter_table *table;
816  struct efx_filter_spec *spec;
817  unsigned int filter_idx;
818  u8 filter_flags;
819  int rc;
820 
821  table_id = efx_filter_id_table_id(filter_id);
822  if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
823  return -ENOENT;
824  table = &state->table[table_id];
825 
826  filter_idx = efx_filter_id_index(filter_id);
827  if (filter_idx >= table->size)
828  return -ENOENT;
829  spec = &table->spec[filter_idx];
830 
831  filter_flags = efx_filter_id_flags(filter_id);
832 
833  spin_lock_bh(&state->lock);
834 
835  if (test_bit(filter_idx, table->used_bitmap) &&
836  spec->priority == priority) {
837  *spec_buf = *spec;
838  rc = 0;
839  } else {
840  rc = -ENOENT;
841  }
842 
843  spin_unlock_bh(&state->lock);
844 
845  return rc;
846 }
847 
848 static void efx_filter_table_clear(struct efx_nic *efx,
849  enum efx_filter_table_id table_id,
851 {
852  struct efx_filter_state *state = efx->filter_state;
853  struct efx_filter_table *table = &state->table[table_id];
854  unsigned int filter_idx;
855 
856  spin_lock_bh(&state->lock);
857 
858  for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
859  if (table->spec[filter_idx].priority <= priority)
860  efx_filter_table_clear_entry(efx, table, filter_idx);
861  if (table->used == 0)
862  efx_filter_table_reset_search_depth(table);
863 
864  spin_unlock_bh(&state->lock);
865 }
866 
872 void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
873 {
874  efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, priority);
875  efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
876 }
877 
879  enum efx_filter_priority priority)
880 {
881  struct efx_filter_state *state = efx->filter_state;
882  enum efx_filter_table_id table_id;
883  struct efx_filter_table *table;
884  unsigned int filter_idx;
885  u32 count = 0;
886 
887  spin_lock_bh(&state->lock);
888 
889  for (table_id = EFX_FILTER_TABLE_RX_IP;
890  table_id <= EFX_FILTER_TABLE_RX_DEF;
891  table_id++) {
892  table = &state->table[table_id];
893  for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
894  if (test_bit(filter_idx, table->used_bitmap) &&
895  table->spec[filter_idx].priority == priority)
896  ++count;
897  }
898  }
899 
900  spin_unlock_bh(&state->lock);
901 
902  return count;
903 }
904 
906  enum efx_filter_priority priority,
907  u32 *buf, u32 size)
908 {
909  struct efx_filter_state *state = efx->filter_state;
910  enum efx_filter_table_id table_id;
911  struct efx_filter_table *table;
912  unsigned int filter_idx;
913  s32 count = 0;
914 
915  spin_lock_bh(&state->lock);
916 
917  for (table_id = EFX_FILTER_TABLE_RX_IP;
918  table_id <= EFX_FILTER_TABLE_RX_DEF;
919  table_id++) {
920  table = &state->table[table_id];
921  for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
922  if (test_bit(filter_idx, table->used_bitmap) &&
923  table->spec[filter_idx].priority == priority) {
924  if (count == size) {
925  count = -EMSGSIZE;
926  goto out;
927  }
928  buf[count++] = efx_filter_make_id(
929  &table->spec[filter_idx], filter_idx);
930  }
931  }
932  }
933 out:
934  spin_unlock_bh(&state->lock);
935 
936  return count;
937 }
938 
939 /* Restore filter stater after reset */
940 void efx_restore_filters(struct efx_nic *efx)
941 {
942  struct efx_filter_state *state = efx->filter_state;
943  enum efx_filter_table_id table_id;
944  struct efx_filter_table *table;
946  unsigned int filter_idx;
947 
948  spin_lock_bh(&state->lock);
949 
950  for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
951  table = &state->table[table_id];
952 
953  /* Check whether this is a regular register table */
954  if (table->step == 0)
955  continue;
956 
957  for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
958  if (!test_bit(filter_idx, table->used_bitmap))
959  continue;
960  efx_filter_build(&filter, &table->spec[filter_idx]);
961  efx_writeo(efx, &filter,
962  table->offset + table->step * filter_idx);
963  }
964  }
965 
966  efx_filter_push_rx_config(efx);
967  efx_filter_push_tx_limits(efx);
968 
969  spin_unlock_bh(&state->lock);
970 }
971 
972 int efx_probe_filters(struct efx_nic *efx)
973 {
974  struct efx_filter_state *state;
975  struct efx_filter_table *table;
976  unsigned table_id;
977 
978  state = kzalloc(sizeof(*efx->filter_state), GFP_KERNEL);
979  if (!state)
980  return -ENOMEM;
981  efx->filter_state = state;
982 
983  spin_lock_init(&state->lock);
984 
985  if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
986 #ifdef CONFIG_RFS_ACCEL
987  state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
988  sizeof(*state->rps_flow_id),
989  GFP_KERNEL);
990  if (!state->rps_flow_id)
991  goto fail;
992 #endif
993  table = &state->table[EFX_FILTER_TABLE_RX_IP];
994  table->id = EFX_FILTER_TABLE_RX_IP;
995  table->offset = FR_BZ_RX_FILTER_TBL0;
998  }
999 
1000  if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1001  table = &state->table[EFX_FILTER_TABLE_RX_MAC];
1002  table->id = EFX_FILTER_TABLE_RX_MAC;
1006 
1007  table = &state->table[EFX_FILTER_TABLE_RX_DEF];
1008  table->id = EFX_FILTER_TABLE_RX_DEF;
1009  table->size = EFX_FILTER_SIZE_RX_DEF;
1010 
1011  table = &state->table[EFX_FILTER_TABLE_TX_MAC];
1012  table->id = EFX_FILTER_TABLE_TX_MAC;
1016  }
1017 
1018  for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
1019  table = &state->table[table_id];
1020  if (table->size == 0)
1021  continue;
1022  table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
1023  sizeof(unsigned long),
1024  GFP_KERNEL);
1025  if (!table->used_bitmap)
1026  goto fail;
1027  table->spec = vzalloc(table->size * sizeof(*table->spec));
1028  if (!table->spec)
1029  goto fail;
1030  }
1031 
1032  if (state->table[EFX_FILTER_TABLE_RX_DEF].size) {
1033  /* RX default filters must always exist */
1034  unsigned i;
1035  for (i = 0; i < EFX_FILTER_SIZE_RX_DEF; i++)
1036  efx_filter_reset_rx_def(efx, i);
1037  }
1038 
1039  efx_filter_push_rx_config(efx);
1040 
1041  return 0;
1042 
1043 fail:
1044  efx_remove_filters(efx);
1045  return -ENOMEM;
1046 }
1047 
1048 void efx_remove_filters(struct efx_nic *efx)
1049 {
1050  struct efx_filter_state *state = efx->filter_state;
1051  enum efx_filter_table_id table_id;
1052 
1053  for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
1054  kfree(state->table[table_id].used_bitmap);
1055  vfree(state->table[table_id].spec);
1056  }
1057 #ifdef CONFIG_RFS_ACCEL
1058  kfree(state->rps_flow_id);
1059 #endif
1060  kfree(state);
1061 }
1062 
1063 #ifdef CONFIG_RFS_ACCEL
1064 
1065 int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
1066  u16 rxq_index, u32 flow_id)
1067 {
1068  struct efx_nic *efx = netdev_priv(net_dev);
1069  struct efx_channel *channel;
1070  struct efx_filter_state *state = efx->filter_state;
1071  struct efx_filter_spec spec;
1072  const struct iphdr *ip;
1073  const __be16 *ports;
1074  int nhoff;
1075  int rc;
1076 
1077  nhoff = skb_network_offset(skb);
1078 
1079  if (skb->protocol != htons(ETH_P_IP))
1080  return -EPROTONOSUPPORT;
1081 
1082  /* RFS must validate the IP header length before calling us */
1083  EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
1084  ip = (const struct iphdr *)(skb->data + nhoff);
1085  if (ip_is_fragment(ip))
1086  return -EPROTONOSUPPORT;
1087  EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
1088  ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
1089 
1090  efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index);
1091  rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
1092  ip->daddr, ports[1], ip->saddr, ports[0]);
1093  if (rc)
1094  return rc;
1095 
1096  rc = efx_filter_insert_filter(efx, &spec, true);
1097  if (rc < 0)
1098  return rc;
1099 
1100  /* Remember this so we can check whether to expire the filter later */
1101  state->rps_flow_id[rc] = flow_id;
1102  channel = efx_get_channel(efx, skb_get_rx_queue(skb));
1103  ++channel->rfs_filters_added;
1104 
1105  netif_info(efx, rx_status, efx->net_dev,
1106  "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
1107  (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
1108  &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
1109  rxq_index, flow_id, rc);
1110 
1111  return rc;
1112 }
1113 
1114 bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
1115 {
1116  struct efx_filter_state *state = efx->filter_state;
1117  struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
1118  unsigned mask = table->size - 1;
1119  unsigned index;
1120  unsigned stop;
1121 
1122  if (!spin_trylock_bh(&state->lock))
1123  return false;
1124 
1125  index = state->rps_expire_index;
1126  stop = (index + quota) & mask;
1127 
1128  while (index != stop) {
1129  if (test_bit(index, table->used_bitmap) &&
1130  table->spec[index].priority == EFX_FILTER_PRI_HINT &&
1131  rps_may_expire_flow(efx->net_dev,
1132  table->spec[index].dmaq_id,
1133  state->rps_flow_id[index], index)) {
1134  netif_info(efx, rx_status, efx->net_dev,
1135  "expiring filter %d [flow %u]\n",
1136  index, state->rps_flow_id[index]);
1137  efx_filter_table_clear_entry(efx, table, index);
1138  }
1139  index = (index + 1) & mask;
1140  }
1141 
1142  state->rps_expire_index = stop;
1143  if (table->used == 0)
1144  efx_filter_table_reset_search_depth(table);
1145 
1146  spin_unlock_bh(&state->lock);
1147  return true;
1148 }
1149 
1150 #endif /* CONFIG_RFS_ACCEL */