Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
net_driver.h
Go to the documentation of this file.
1 /****************************************************************************
2  * Driver for Solarflare Solarstorm network controllers and boards
3  * Copyright 2005-2006 Fen Systems Ltd.
4  * Copyright 2005-2011 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10 
11 /* Common definitions for all Efx net driver code */
12 
13 #ifndef EFX_NET_DRIVER_H
14 #define EFX_NET_DRIVER_H
15 
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/ethtool.h>
19 #include <linux/if_vlan.h>
20 #include <linux/timer.h>
21 #include <linux/mdio.h>
22 #include <linux/list.h>
23 #include <linux/pci.h>
24 #include <linux/device.h>
25 #include <linux/highmem.h>
26 #include <linux/workqueue.h>
27 #include <linux/mutex.h>
28 #include <linux/vmalloc.h>
29 #include <linux/i2c.h>
30 
31 #include "enum.h"
32 #include "bitfield.h"
33 
34 /**************************************************************************
35  *
36  * Build definitions
37  *
38  **************************************************************************/
39 
40 #define EFX_DRIVER_VERSION "3.2"
41 
42 #ifdef DEBUG
43 #define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
44 #define EFX_WARN_ON_PARANOID(x) WARN_ON(x)
45 #else
46 #define EFX_BUG_ON_PARANOID(x) do {} while (0)
47 #define EFX_WARN_ON_PARANOID(x) do {} while (0)
48 #endif
49 
50 /**************************************************************************
51  *
52  * Efx data structures
53  *
54  **************************************************************************/
55 
56 #define EFX_MAX_CHANNELS 32U
57 #define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
58 #define EFX_EXTRA_CHANNEL_IOV 0
59 #define EFX_EXTRA_CHANNEL_PTP 1
60 #define EFX_MAX_EXTRA_CHANNELS 2U
61 
62 /* Checksum generation is a per-queue option in hardware, so each
63  * queue visible to the networking core is backed by two hardware TX
64  * queues. */
65 #define EFX_MAX_TX_TC 2
66 #define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
67 #define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */
68 #define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */
69 #define EFX_TXQ_TYPES 4
70 #define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
71 
72 /* Forward declare Precision Time Protocol (PTP) support structure. */
73 struct efx_ptp_data;
74 
75 struct efx_self_tests;
76 
90  void *addr;
92  unsigned int len;
93  unsigned int index;
94  unsigned int entries;
95 };
96 
110  union {
111  const struct sk_buff *skb;
112  void *heap_buf;
113  };
115  unsigned short flags;
116  unsigned short len;
117  unsigned short unmap_len;
118 };
119 #define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */
120 #define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
121 #define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */
122 #define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
123 
175 struct efx_tx_queue {
176  /* Members which don't change on the fast path */
178  unsigned queue;
184  unsigned int ptr_mask;
186 
187  /* Members used mainly on the completion path */
188  unsigned int read_count ____cacheline_aligned_in_smp;
189  unsigned int old_write_count;
190 
191  /* Members used only on the xmit path */
192  unsigned int insert_count ____cacheline_aligned_in_smp;
193  unsigned int write_count;
194  unsigned int old_read_count;
195  unsigned int tso_bursts;
196  unsigned int tso_long_headers;
197  unsigned int tso_packets;
198  unsigned int pushes;
199 
200  /* Members shared between paths and sometimes updated */
201  unsigned int empty_read_count ____cacheline_aligned_in_smp;
202 #define EFX_EMPTY_COUNT_VALID 0x80000000
203 };
204 
217  union {
218  struct sk_buff *skb;
219  struct page *page;
220  } u;
221  unsigned int len;
223 };
224 #define EFX_RX_BUF_PAGE 0x0001
225 #define EFX_RX_PKT_CSUMMED 0x0002
226 #define EFX_RX_PKT_DISCARD 0x0004
227 
240  unsigned refcnt;
242 
243  unsigned int __pad[0] ____cacheline_aligned;
244 };
245 
270 struct efx_rx_queue {
271  struct efx_nic *efx;
275  unsigned int ptr_mask;
276  bool enabled;
278 
282  unsigned int max_fill;
283  unsigned int fast_fill_trigger;
284  unsigned int min_fill;
285  unsigned int min_overfill;
286  unsigned int alloc_page_count;
287  unsigned int alloc_skb_count;
289  unsigned int slow_fill_count;
290 };
291 
301 struct efx_buffer {
302  void *addr;
304  unsigned int len;
305 };
306 
307 
312 };
313 
350 struct efx_channel {
351  struct efx_nic *efx;
352  int channel;
353  const struct efx_channel_type *type;
354  bool enabled;
355  int irq;
356  unsigned int irq_moderation;
361  unsigned int eventq_mask;
362  unsigned int eventq_read_ptr;
364 
365  unsigned int irq_count;
366  unsigned int irq_mod_score;
367 #ifdef CONFIG_RFS_ACCEL
368  unsigned int rfs_filters_added;
369 #endif
370 
373 
374  unsigned n_rx_tobe_disc;
378  unsigned n_rx_frm_trunc;
379  unsigned n_rx_overlength;
380  unsigned n_skbuff_leaks;
381 
382  /* Used to pipeline received packets in order to optimise memory
383  * access with prefetches.
384  */
386 
389 };
390 
406  int (*pre_probe)(struct efx_channel *);
408  void (*get_name)(struct efx_channel *, char *buf, size_t len);
409  struct efx_channel *(*copy)(const struct efx_channel *);
410  void (*receive_skb)(struct efx_channel *, struct sk_buff *);
412 };
413 
418 };
419 
420 #define STRING_TABLE_LOOKUP(val, member) \
421  ((val) < member ## _max) ? member ## _names[val] : "(invalid)"
422 
423 extern const char *const efx_loopback_mode_names[];
424 extern const unsigned int efx_loopback_mode_max;
425 #define LOOPBACK_MODE(efx) \
426  STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)
427 
428 extern const char *const efx_reset_type_names[];
429 extern const unsigned int efx_reset_type_max;
430 #define RESET_TYPE(type) \
431  STRING_TABLE_LOOKUP(type, efx_reset_type)
432 
434  /* Be careful if altering to correct macro below */
438  EFX_INT_MODE_MAX /* Insert any new items before this */
439 };
440 #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
441 
442 enum nic_state {
443  STATE_UNINIT = 0, /* device being probed/removed or is frozen */
444  STATE_READY = 1, /* hardware ready and netdev registered */
445  STATE_DISABLED = 2, /* device disabled due to hardware errors */
446 };
447 
448 /*
449  * Alignment of page-allocated RX buffers
450  *
451  * Controls the number of bytes inserted at the start of an RX buffer.
452  * This is the equivalent of NET_IP_ALIGN [which controls the alignment
453  * of the skb->head for hardware DMA].
454  */
455 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
456 #define EFX_PAGE_IP_ALIGN 0
457 #else
458 #define EFX_PAGE_IP_ALIGN NET_IP_ALIGN
459 #endif
460 
461 /*
462  * Alignment of the skb->head which wraps a page-allocated RX buffer
463  *
464  * The skb allocated to wrap an rx_buffer can have this alignment. Since
465  * the data is memcpy'd from the rx_buf, it does not need to be equal to
466  * EFX_PAGE_IP_ALIGN.
467  */
468 #define EFX_PAGE_SKB_ALIGN 2
469 
470 /* Forward declaration */
471 struct efx_nic;
472 
473 /* Pseudo bit-mask flow control field */
474 #define EFX_FC_RX FLOW_CTRL_RX
475 #define EFX_FC_TX FLOW_CTRL_TX
476 #define EFX_FC_AUTO 4
477 
486  bool up;
487  bool fd;
489  unsigned int speed;
490 };
491 
492 static inline bool efx_link_state_equal(const struct efx_link_state *left,
493  const struct efx_link_state *right)
494 {
495  return left->up == right->up && left->fd == right->fd &&
496  left->fc == right->fc && left->speed == right->speed;
497 }
498 
518  int (*probe) (struct efx_nic *efx);
519  int (*init) (struct efx_nic *efx);
520  void (*fini) (struct efx_nic *efx);
521  void (*remove) (struct efx_nic *efx);
522  int (*reconfigure) (struct efx_nic *efx);
523  bool (*poll) (struct efx_nic *efx);
524  void (*get_settings) (struct efx_nic *efx,
525  struct ethtool_cmd *ecmd);
526  int (*set_settings) (struct efx_nic *efx,
527  struct ethtool_cmd *ecmd);
528  void (*set_npage_adv) (struct efx_nic *efx, u32);
529  int (*test_alive) (struct efx_nic *efx);
530  const char *(*test_name) (struct efx_nic *efx, unsigned int index);
531  int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
532  int (*get_module_eeprom) (struct efx_nic *efx,
533  struct ethtool_eeprom *ee,
534  u8 *data);
535  int (*get_module_info) (struct efx_nic *efx,
536  struct ethtool_modinfo *modinfo);
537 };
538 
553 };
554 
555 static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode)
556 {
557  return !!(mode & ~PHY_MODE_TX_DISABLED);
558 }
559 
560 /*
561  * Efx extended statistics
562  *
563  * Not all statistics are provided by all supported MACs. The purpose
564  * is this structure is to contain the raw statistics provided by each
565  * MAC.
566  */
629 };
630 
631 /* Number of bits used in a multicast filter hash address */
632 #define EFX_MCAST_HASH_BITS 8
633 
634 /* Number of (single-bit) entries in a multicast filter hash */
635 #define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS)
636 
637 /* An Efx multicast filter hash */
641 };
642 
643 struct efx_filter_state;
644 struct efx_vf;
645 struct vfdi_status;
646 
758 struct efx_nic {
759  /* The following fields should be written very rarely */
760 
761  char name[IFNAMSIZ];
762  struct pci_dev *pci_dev;
763  const struct efx_nic_type *type;
767  char workqueue_name[16];
771 
773  unsigned int timer_quantum_ns;
775  unsigned int irq_rx_moderation;
777 
779  unsigned long reset_pending;
780 
783  const struct efx_channel_type *
785 
786  unsigned rxq_entries;
787  unsigned txq_entries;
788  unsigned int txq_stop_thresh;
789  unsigned int txq_wake_thresh;
790 
791  unsigned tx_dc_base;
792  unsigned rx_dc_base;
793  unsigned sram_lim_qw;
795  unsigned n_channels;
796  unsigned n_rx_channels;
797  unsigned rss_spread;
799  unsigned n_tx_channels;
800  unsigned int rx_buffer_len;
801  unsigned int rx_buffer_order;
804 
805  unsigned int_error_count;
806  unsigned long int_error_expire;
807 
809  unsigned irq_zero_count;
810  unsigned irq_level;
812 
813 #ifdef CONFIG_SFC_MTD
814  struct list_head mtd_list;
815 #endif
816 
817  void *nic_data;
818 
819  struct mutex mac_lock;
822 
825 
827 
828  unsigned int phy_type;
829  const struct efx_phy_operations *phy_op;
830  void *phy_data;
832  unsigned int mdio_bus;
834 
837  unsigned int n_link_state_changes;
838 
842  unsigned fc_disable;
843 
847 
849 
851 
856 
857 #ifdef CONFIG_SFC_SRIOV
858  struct efx_channel *vfdi_channel;
859  struct efx_vf *vf;
860  unsigned vf_count;
861  unsigned vf_init_count;
862  unsigned vi_scale;
863  unsigned vf_buftbl_base;
864  struct efx_buffer vfdi_status;
865  struct list_head local_addr_list;
866  struct list_head local_page_list;
867  struct mutex local_lock;
868  struct work_struct peer_work;
869 #endif
870 
871 #ifdef CONFIG_SFC_PTP
872  struct efx_ptp_data *ptp_data;
873 #endif
874 
875  /* The following fields may be written more often */
876 
883 };
884 
885 static inline int efx_dev_registered(struct efx_nic *efx)
886 {
887  return efx->net_dev->reg_state == NETREG_REGISTERED;
888 }
889 
890 static inline unsigned int efx_port_num(struct efx_nic *efx)
891 {
892  return efx->net_dev->dev_id;
893 }
894 
945 struct efx_nic_type {
946  int (*probe)(struct efx_nic *efx);
947  void (*remove)(struct efx_nic *efx);
948  int (*init)(struct efx_nic *efx);
950  void (*fini)(struct efx_nic *efx);
951  void (*monitor)(struct efx_nic *efx);
954  int (*reset)(struct efx_nic *efx, enum reset_type method);
955  int (*probe_port)(struct efx_nic *efx);
956  void (*remove_port)(struct efx_nic *efx);
958  void (*prepare_flush)(struct efx_nic *efx);
959  void (*update_stats)(struct efx_nic *efx);
960  void (*start_stats)(struct efx_nic *efx);
961  void (*stop_stats)(struct efx_nic *efx);
962  void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
964  int (*reconfigure_port)(struct efx_nic *efx);
965  int (*reconfigure_mac)(struct efx_nic *efx);
966  bool (*check_mac_fault)(struct efx_nic *efx);
967  void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
968  int (*set_wol)(struct efx_nic *efx, u32 type);
969  void (*resume_wol)(struct efx_nic *efx);
970  int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
971  int (*test_nvram)(struct efx_nic *efx);
972 
973  int revision;
974  unsigned int mem_map_size;
975  unsigned int txd_ptr_tbl_base;
976  unsigned int rxd_ptr_tbl_base;
977  unsigned int buf_tbl_base;
978  unsigned int evq_ptr_tbl_base;
979  unsigned int evq_rptr_tbl_base;
981  unsigned int rx_buffer_hash_size;
982  unsigned int rx_buffer_padding;
983  unsigned int max_interrupt_mode;
984  unsigned int phys_addr_channels;
985  unsigned int timer_period_max;
987 };
988 
989 /**************************************************************************
990  *
991  * Prototypes and inline functions
992  *
993  *************************************************************************/
994 
995 static inline struct efx_channel *
996 efx_get_channel(struct efx_nic *efx, unsigned index)
997 {
998  EFX_BUG_ON_PARANOID(index >= efx->n_channels);
999  return efx->channel[index];
1000 }
1001 
1002 /* Iterate over all used channels */
1003 #define efx_for_each_channel(_channel, _efx) \
1004  for (_channel = (_efx)->channel[0]; \
1005  _channel; \
1006  _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \
1007  (_efx)->channel[_channel->channel + 1] : NULL)
1008 
1009 /* Iterate over all used channels in reverse */
1010 #define efx_for_each_channel_rev(_channel, _efx) \
1011  for (_channel = (_efx)->channel[(_efx)->n_channels - 1]; \
1012  _channel; \
1013  _channel = _channel->channel ? \
1014  (_efx)->channel[_channel->channel - 1] : NULL)
1015 
1016 static inline struct efx_tx_queue *
1017 efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
1018 {
1019  EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
1020  type >= EFX_TXQ_TYPES);
1021  return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
1022 }
1023 
1024 static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
1025 {
1026  return channel->channel - channel->efx->tx_channel_offset <
1027  channel->efx->n_tx_channels;
1028 }
1029 
1030 static inline struct efx_tx_queue *
1031 efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
1032 {
1033  EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
1034  type >= EFX_TXQ_TYPES);
1035  return &channel->tx_queue[type];
1036 }
1037 
1038 static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
1039 {
1040  return !(tx_queue->efx->net_dev->num_tc < 2 &&
1041  tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
1042 }
1043 
1044 /* Iterate over all TX queues belonging to a channel */
1045 #define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
1046  if (!efx_channel_has_tx_queues(_channel)) \
1047  ; \
1048  else \
1049  for (_tx_queue = (_channel)->tx_queue; \
1050  _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
1051  efx_tx_queue_used(_tx_queue); \
1052  _tx_queue++)
1053 
1054 /* Iterate over all possible TX queues belonging to a channel */
1055 #define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \
1056  if (!efx_channel_has_tx_queues(_channel)) \
1057  ; \
1058  else \
1059  for (_tx_queue = (_channel)->tx_queue; \
1060  _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
1061  _tx_queue++)
1062 
1063 static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
1064 {
1065  return channel->rx_queue.core_index >= 0;
1066 }
1067 
1068 static inline struct efx_rx_queue *
1069 efx_channel_get_rx_queue(struct efx_channel *channel)
1070 {
1071  EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
1072  return &channel->rx_queue;
1073 }
1074 
1075 /* Iterate over all RX queues belonging to a channel */
1076 #define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
1077  if (!efx_channel_has_rx_queue(_channel)) \
1078  ; \
1079  else \
1080  for (_rx_queue = &(_channel)->rx_queue; \
1081  _rx_queue; \
1082  _rx_queue = NULL)
1083 
1084 static inline struct efx_channel *
1085 efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
1086 {
1087  return container_of(rx_queue, struct efx_channel, rx_queue);
1088 }
1089 
1090 static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue)
1091 {
1092  return efx_rx_queue_channel(rx_queue)->channel;
1093 }
1094 
1095 /* Returns a pointer to the specified receive buffer in the RX
1096  * descriptor queue.
1097  */
1098 static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
1099  unsigned int index)
1100 {
1101  return &rx_queue->buffer[index];
1102 }
1103 
1104 
1122 #define EFX_MAX_FRAME_LEN(mtu) \
1123  ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16)
1124 
1125 static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb)
1126 {
1127  return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
1128 }
1129 static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb)
1130 {
1131  skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1132 }
1133 
1134 #endif /* EFX_NET_DRIVER_H */