Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
drivers
net
ethernet
intel
ixgbe
ixgbe.h
Go to the documentation of this file.
1
/*******************************************************************************
2
3
Intel 10 Gigabit PCI Express Linux driver
4
Copyright(c) 1999 - 2012 Intel Corporation.
5
6
This program is free software; you can redistribute it and/or modify it
7
under the terms and conditions of the GNU General Public License,
8
version 2, as published by the Free Software Foundation.
9
10
This program is distributed in the hope it will be useful, but WITHOUT
11
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13
more details.
14
15
You should have received a copy of the GNU General Public License along with
16
this program; if not, write to the Free Software Foundation, Inc.,
17
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19
The full GNU General Public License is included in this distribution in
20
the file called "COPYING".
21
22
Contact Information:
23
e1000-devel Mailing List <
[email protected]
>
24
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26
*******************************************************************************/
27
28
#ifndef _IXGBE_H_
29
#define _IXGBE_H_
30
31
#include <linux/bitops.h>
32
#include <linux/types.h>
33
#include <linux/pci.h>
34
#include <linux/netdevice.h>
35
#include <
linux/cpumask.h
>
36
#include <
linux/aer.h
>
37
#include <linux/if_vlan.h>
38
39
#ifdef CONFIG_IXGBE_PTP
40
#include <
linux/clocksource.h
>
41
#include <
linux/net_tstamp.h
>
42
#include <
linux/ptp_clock_kernel.h
>
43
#endif
/* CONFIG_IXGBE_PTP */
44
45
#include "
ixgbe_type.h
"
46
#include "
ixgbe_common.h
"
47
#include "
ixgbe_dcb.h
"
48
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
49
#define IXGBE_FCOE
50
#include "
ixgbe_fcoe.h
"
51
#endif
/* CONFIG_FCOE or CONFIG_FCOE_MODULE */
52
#ifdef CONFIG_IXGBE_DCA
53
#include <
linux/dca.h
>
54
#endif
55
56
/* common prefix used by pr_<> macros */
57
#undef pr_fmt
58
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
59
60
/* TX/RX descriptor defines */
61
#define IXGBE_DEFAULT_TXD 512
62
#define IXGBE_DEFAULT_TX_WORK 256
63
#define IXGBE_MAX_TXD 4096
64
#define IXGBE_MIN_TXD 64
65
66
#define IXGBE_DEFAULT_RXD 512
67
#define IXGBE_MAX_RXD 4096
68
#define IXGBE_MIN_RXD 64
69
70
/* flow control */
71
#define IXGBE_MIN_FCRTL 0x40
72
#define IXGBE_MAX_FCRTL 0x7FF80
73
#define IXGBE_MIN_FCRTH 0x600
74
#define IXGBE_MAX_FCRTH 0x7FFF0
75
#define IXGBE_DEFAULT_FCPAUSE 0xFFFF
76
#define IXGBE_MIN_FCPAUSE 0
77
#define IXGBE_MAX_FCPAUSE 0xFFFF
78
79
/* Supported Rx Buffer Sizes */
80
#define IXGBE_RXBUFFER_256 256
/* Used for skb receive header */
81
#define IXGBE_RXBUFFER_2K 2048
82
#define IXGBE_RXBUFFER_3K 3072
83
#define IXGBE_RXBUFFER_4K 4096
84
#define IXGBE_MAX_RXBUFFER 16384
/* largest size for a single descriptor */
85
86
/*
87
* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
88
* reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
89
* this adds up to 448 bytes of extra data.
90
*
91
* Since netdev_alloc_skb now allocates a page fragment we can use a value
92
* of 256 and the resultant skb will have a truesize of 960 or less.
93
*/
94
#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
95
96
#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
97
98
/* How many Rx Buffers do we bundle into one write to the hardware ? */
99
#define IXGBE_RX_BUFFER_WRITE 16
/* Must be power of 2 */
100
101
#define IXGBE_TX_FLAGS_CSUM (u32)(1)
102
#define IXGBE_TX_FLAGS_HW_VLAN (u32)(1 << 1)
103
#define IXGBE_TX_FLAGS_SW_VLAN (u32)(1 << 2)
104
#define IXGBE_TX_FLAGS_TSO (u32)(1 << 3)
105
#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 4)
106
#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5)
107
#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6)
108
#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7)
109
#define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 8)
110
#define IXGBE_TX_FLAGS_NO_IFCS (u32)(1 << 9)
111
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
112
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
113
#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
114
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
115
116
#define IXGBE_MAX_VF_MC_ENTRIES 30
117
#define IXGBE_MAX_VF_FUNCTIONS 64
118
#define IXGBE_MAX_VFTA_ENTRIES 128
119
#define MAX_EMULATION_MAC_ADDRS 16
120
#define IXGBE_MAX_PF_MACVLANS 15
121
#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
122
#define IXGBE_82599_VF_DEVICE_ID 0x10ED
123
#define IXGBE_X540_VF_DEVICE_ID 0x1515
124
125
struct
vf_data_storage
{
126
unsigned
char
vf_mac_addresses
[
ETH_ALEN
];
127
u16
vf_mc_hashes
[
IXGBE_MAX_VF_MC_ENTRIES
];
128
u16
num_vf_mc_hashes
;
129
u16
default_vf_vlan_id
;
130
u16
vlans_enabled
;
131
bool
clear_to_send
;
132
bool
pf_set_mac
;
133
u16
pf_vlan
;
/* When set, guest VLAN config not allowed. */
134
u16
pf_qos
;
135
u16
tx_rate
;
136
u16
vlan_count
;
137
u8
spoofchk_enabled
;
138
};
139
140
struct
vf_macvlans
{
141
struct
list_head
l
;
142
int
vf
;
143
int
rar_entry
;
144
bool
free
;
145
bool
is_macvlan
;
146
u8
vf_macvlan
[
ETH_ALEN
];
147
};
148
149
#define IXGBE_MAX_TXD_PWR 14
150
#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
151
152
/* Tx Descriptors needed, worst case */
153
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
154
#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
155
156
/* wrapper around a pointer to a socket buffer,
157
* so a DMA handle can be stored along with the buffer */
158
struct
ixgbe_tx_buffer
{
159
union
ixgbe_adv_tx_desc
*
next_to_watch
;
160
unsigned
long
time_stamp
;
161
struct
sk_buff
*
skb
;
162
unsigned
int
bytecount
;
163
unsigned
short
gso_segs
;
164
__be16
protocol
;
165
DEFINE_DMA_UNMAP_ADDR
(
dma
);
166
DEFINE_DMA_UNMAP_LEN
(
len
);
167
u32
tx_flags
;
168
};
169
170
struct
ixgbe_rx_buffer
{
171
struct
sk_buff
*
skb
;
172
dma_addr_t
dma
;
173
struct
page
*
page
;
174
unsigned
int
page_offset
;
175
};
176
177
struct
ixgbe_queue_stats
{
178
u64
packets
;
179
u64
bytes
;
180
};
181
182
struct
ixgbe_tx_queue_stats
{
183
u64
restart_queue
;
184
u64
tx_busy
;
185
u64
tx_done_old
;
186
};
187
188
struct
ixgbe_rx_queue_stats
{
189
u64
rsc_count
;
190
u64
rsc_flush
;
191
u64
non_eop_descs
;
192
u64
alloc_rx_page_failed
;
193
u64
alloc_rx_buff_failed
;
194
u64
csum_err
;
195
};
196
197
enum
ixgbe_ring_state_t
{
198
__IXGBE_TX_FDIR_INIT_DONE
,
199
__IXGBE_TX_DETECT_HANG
,
200
__IXGBE_HANG_CHECK_ARMED
,
201
__IXGBE_RX_RSC_ENABLED
,
202
__IXGBE_RX_CSUM_UDP_ZERO_ERR
,
203
__IXGBE_RX_FCOE
,
204
};
205
206
#define check_for_tx_hang(ring) \
207
test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
208
#define set_check_for_tx_hang(ring) \
209
set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
210
#define clear_check_for_tx_hang(ring) \
211
clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
212
#define ring_is_rsc_enabled(ring) \
213
test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
214
#define set_ring_rsc_enabled(ring) \
215
set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
216
#define clear_ring_rsc_enabled(ring) \
217
clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
218
struct
ixgbe_ring
{
219
struct
ixgbe_ring
*
next
;
/* pointer to next ring in q_vector */
220
struct
ixgbe_q_vector
*
q_vector
;
/* backpointer to host q_vector */
221
struct
net_device
*
netdev
;
/* netdev ring belongs to */
222
struct
device
*
dev
;
/* device for DMA mapping */
223
void
*
desc
;
/* descriptor ring memory */
224
union
{
225
struct
ixgbe_tx_buffer
*
tx_buffer_info
;
226
struct
ixgbe_rx_buffer
*
rx_buffer_info
;
227
};
228
unsigned
long
state
;
229
u8
__iomem
*
tail
;
230
dma_addr_t
dma
;
/* phys. address of descriptor ring */
231
unsigned
int
size
;
/* length in bytes */
232
233
u16
count
;
/* amount of descriptors */
234
235
u8
queue_index
;
/* needed for multiqueue queue management */
236
u8
reg_idx
;
/* holds the special value that gets
237
* the hardware register offset
238
* associated with this ring, which is
239
* different for DCB and RSS modes
240
*/
241
u16
next_to_use
;
242
u16
next_to_clean
;
243
244
union
{
245
u16
next_to_alloc
;
246
struct
{
247
u8
atr_sample_rate
;
248
u8
atr_count
;
249
};
250
};
251
252
u8
dcb_tc
;
253
struct
ixgbe_queue_stats
stats
;
254
struct
u64_stats_sync
syncp
;
255
union
{
256
struct
ixgbe_tx_queue_stats
tx_stats
;
257
struct
ixgbe_rx_queue_stats
rx_stats
;
258
};
259
}
____cacheline_internodealigned_in_smp
;
260
261
enum
ixgbe_ring_f_enum
{
262
RING_F_NONE
= 0,
263
RING_F_VMDQ
,
/* SR-IOV uses the same ring feature */
264
RING_F_RSS
,
265
RING_F_FDIR
,
266
#ifdef IXGBE_FCOE
267
RING_F_FCOE,
268
#endif
/* IXGBE_FCOE */
269
270
RING_F_ARRAY_SIZE
/* must be last in enum set */
271
};
272
273
#define IXGBE_MAX_RSS_INDICES 16
274
#define IXGBE_MAX_VMDQ_INDICES 64
275
#define IXGBE_MAX_FDIR_INDICES 64
276
#ifdef IXGBE_FCOE
277
#define IXGBE_MAX_FCOE_INDICES 8
278
#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
279
#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
280
#else
281
#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
282
#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
283
#endif
/* IXGBE_FCOE */
284
struct
ixgbe_ring_feature
{
285
u16
limit
;
/* upper limit on feature indices */
286
u16
indices
;
/* current value of indices */
287
u16
mask
;
/* Mask used for feature to ring mapping */
288
u16
offset
;
/* offset to start of feature */
289
}
____cacheline_internodealigned_in_smp
;
290
291
#define IXGBE_82599_VMDQ_8Q_MASK 0x78
292
#define IXGBE_82599_VMDQ_4Q_MASK 0x7C
293
#define IXGBE_82599_VMDQ_2Q_MASK 0x7E
294
295
/*
296
* FCoE requires that all Rx buffers be over 2200 bytes in length. Since
297
* this is twice the size of a half page we need to double the page order
298
* for FCoE enabled Rx queues.
299
*/
300
static
inline
unsigned
int
ixgbe_rx_bufsz(
struct
ixgbe_ring
*
ring
)
301
{
302
#ifdef IXGBE_FCOE
303
if
(
test_bit
(
__IXGBE_RX_FCOE
, &ring->
state
))
304
return
(
PAGE_SIZE
< 8192) ?
IXGBE_RXBUFFER_4K
:
305
IXGBE_RXBUFFER_3K
;
306
#endif
307
return
IXGBE_RXBUFFER_2K
;
308
}
309
310
static
inline
unsigned
int
ixgbe_rx_pg_order(
struct
ixgbe_ring
*
ring
)
311
{
312
#ifdef IXGBE_FCOE
313
if
(
test_bit
(
__IXGBE_RX_FCOE
, &ring->
state
))
314
return
(
PAGE_SIZE
< 8192) ? 1 : 0;
315
#endif
316
return
0;
317
}
318
#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
319
320
struct
ixgbe_ring_container
{
321
struct
ixgbe_ring
*
ring
;
/* pointer to linked list of rings */
322
unsigned
int
total_bytes
;
/* total bytes processed this int */
323
unsigned
int
total_packets
;
/* total packets processed this int */
324
u16
work_limit
;
/* total work allowed per interrupt */
325
u8
count
;
/* total number of rings in vector */
326
u8
itr
;
/* current ITR setting for ring */
327
};
328
329
/* iterator for handling rings in ring container */
330
#define ixgbe_for_each_ring(pos, head) \
331
for (pos = (head).ring; pos != NULL; pos = pos->next)
332
333
#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
334
? 8 : 1)
335
#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
336
337
/* MAX_Q_VECTORS of these are allocated,
338
* but we only use one per queue-specific vector.
339
*/
340
struct
ixgbe_q_vector
{
341
struct
ixgbe_adapter
*
adapter
;
342
#ifdef CONFIG_IXGBE_DCA
343
int
cpu
;
/* CPU for DCA */
344
#endif
345
u16
v_idx
;
/* index of q_vector within array, also used for
346
* finding the bit in EICR and friends that
347
* represents the vector for this ring */
348
u16
itr
;
/* Interrupt throttle rate written to EITR */
349
struct
ixgbe_ring_container
rx
,
tx
;
350
351
struct
napi_struct
napi
;
352
cpumask_t
affinity_mask
;
353
int
numa_node
;
354
struct
rcu_head
rcu
;
/* to avoid race with update stats on free */
355
char
name
[
IFNAMSIZ
+ 9];
356
357
/* for dynamic allocation of rings associated with this q_vector */
358
struct
ixgbe_ring
ring[0]
____cacheline_internodealigned_in_smp
;
359
};
360
#ifdef CONFIG_IXGBE_HWMON
361
362
#define IXGBE_HWMON_TYPE_LOC 0
363
#define IXGBE_HWMON_TYPE_TEMP 1
364
#define IXGBE_HWMON_TYPE_CAUTION 2
365
#define IXGBE_HWMON_TYPE_MAX 3
366
367
struct
hwmon_attr {
368
struct
device_attribute
dev_attr
;
369
struct
ixgbe_hw
*
hw
;
370
struct
ixgbe_thermal_diode_data
*sensor;
371
char
name
[12];
372
};
373
374
struct
hwmon_buff {
375
struct
device
*
device
;
376
struct
hwmon_attr *hwmon_list;
377
unsigned
int
n_hwmon;
378
};
379
#endif
/* CONFIG_IXGBE_HWMON */
380
381
/*
382
* microsecond values for various ITR rates shifted by 2 to fit itr register
383
* with the first 3 bits reserved 0
384
*/
385
#define IXGBE_MIN_RSC_ITR 24
386
#define IXGBE_100K_ITR 40
387
#define IXGBE_20K_ITR 200
388
#define IXGBE_10K_ITR 400
389
#define IXGBE_8K_ITR 500
390
391
/* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */
392
static
inline
__le32
ixgbe_test_staterr(
union
ixgbe_adv_rx_desc
*
rx_desc
,
393
const
u32
stat_err_bits)
394
{
395
return
rx_desc->
wb
.upper.status_error &
cpu_to_le32
(stat_err_bits);
396
}
397
398
static
inline
u16
ixgbe_desc_unused(
struct
ixgbe_ring
*ring)
399
{
400
u16
ntc = ring->
next_to_clean
;
401
u16
ntu = ring->
next_to_use
;
402
403
return
((ntc > ntu) ? 0 : ring->
count
) + ntc - ntu - 1;
404
}
405
406
#define IXGBE_RX_DESC(R, i) \
407
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
408
#define IXGBE_TX_DESC(R, i) \
409
(&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
410
#define IXGBE_TX_CTXTDESC(R, i) \
411
(&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
412
413
#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728
/* Maximum Supported Size 9.5KB */
414
#ifdef IXGBE_FCOE
415
/* Use 3K as the baby jumbo frame size for FCoE */
416
#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072
417
#endif
/* IXGBE_FCOE */
418
419
#define OTHER_VECTOR 1
420
#define NON_Q_VECTORS (OTHER_VECTOR)
421
422
#define MAX_MSIX_VECTORS_82599 64
423
#define MAX_Q_VECTORS_82599 64
424
#define MAX_MSIX_VECTORS_82598 18
425
#define MAX_Q_VECTORS_82598 16
426
427
#define MAX_Q_VECTORS MAX_Q_VECTORS_82599
428
#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
429
430
#define MIN_MSIX_Q_VECTORS 1
431
#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
432
433
/* default to trying for four seconds */
434
#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
435
436
/* board specific private data structure */
437
struct
ixgbe_adapter
{
438
unsigned
long
active_vlans
[
BITS_TO_LONGS
(
VLAN_N_VID
)];
439
/* OS defined structs */
440
struct
net_device
*
netdev
;
441
struct
pci_dev
*
pdev
;
442
443
unsigned
long
state
;
444
445
/* Some features need tri-state capability,
446
* thus the additional *_CAPABLE flags.
447
*/
448
u32
flags
;
449
#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 0)
450
#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1)
451
#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2)
452
#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3)
453
#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4)
454
#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5)
455
#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 6)
456
#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 7)
457
#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8)
458
#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 9)
459
#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 10)
460
#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 11)
461
#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 12)
462
#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 13)
463
#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 14)
464
#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 15)
465
#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 16)
466
#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 17)
467
#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 18)
468
#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 19)
469
#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 20)
470
#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 21)
471
#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22)
472
#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23)
473
474
u32
flags2
;
475
#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0)
476
#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
477
#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2)
478
#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3)
479
#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 4)
480
#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5)
481
#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6)
482
#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
483
#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
484
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
485
#define IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED (u32)(1 << 10)
486
#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11)
487
488
/* Tx fast path data */
489
int
num_tx_queues
;
490
u16
tx_itr_setting
;
491
u16
tx_work_limit
;
492
493
/* Rx fast path data */
494
int
num_rx_queues
;
495
u16
rx_itr_setting
;
496
497
/* TX */
498
struct
ixgbe_ring
*
tx_ring
[
MAX_TX_QUEUES
]
____cacheline_aligned_in_smp
;
499
500
u64
restart_queue
;
501
u64
lsc_int
;
502
u32
tx_timeout_count
;
503
504
/* RX */
505
struct
ixgbe_ring
*
rx_ring
[
MAX_RX_QUEUES
];
506
int
num_rx_pools
;
/* == num_rx_queues in 82598 */
507
int
num_rx_queues_per_pool
;
/* 1 if 82598, can be many if 82599 */
508
u64
hw_csum_rx_error
;
509
u64
hw_rx_no_dma_resources
;
510
u64
rsc_total_count
;
511
u64
rsc_total_flush
;
512
u64
non_eop_descs
;
513
u32
alloc_rx_page_failed
;
514
u32
alloc_rx_buff_failed
;
515
516
struct
ixgbe_q_vector
*
q_vector
[
MAX_Q_VECTORS
];
517
518
/* DCB parameters */
519
struct
ieee_pfc
*
ixgbe_ieee_pfc
;
520
struct
ieee_ets
*
ixgbe_ieee_ets
;
521
struct
ixgbe_dcb_config
dcb_cfg
;
522
struct
ixgbe_dcb_config
temp_dcb_cfg
;
523
u8
dcb_set_bitmap
;
524
u8
dcbx_cap
;
525
enum
ixgbe_fc_mode
last_lfc_mode
;
526
527
int
num_q_vectors
;
/* current number of q_vectors for device */
528
int
max_q_vectors
;
/* true count of q_vectors for device */
529
struct
ixgbe_ring_feature
ring_feature
[
RING_F_ARRAY_SIZE
];
530
struct
msix_entry *
msix_entries
;
531
532
u32
test_icr
;
533
struct
ixgbe_ring
test_tx_ring
;
534
struct
ixgbe_ring
test_rx_ring
;
535
536
/* structs defined in ixgbe_hw.h */
537
struct
ixgbe_hw
hw
;
538
u16
msg_enable
;
539
struct
ixgbe_hw_stats
stats
;
540
541
u64
tx_busy
;
542
unsigned
int
tx_ring_count
;
543
unsigned
int
rx_ring_count
;
544
545
u32
link_speed
;
546
bool
link_up
;
547
unsigned
long
link_check_timeout
;
548
549
struct
timer_list
service_timer
;
550
struct
work_struct
service_task
;
551
552
struct
hlist_head
fdir_filter_list
;
553
unsigned
long
fdir_overflow
;
/* number of times ATR was backed off */
554
union
ixgbe_atr_input
fdir_mask
;
555
int
fdir_filter_count
;
556
u32
fdir_pballoc
;
557
u32
atr_sample_rate
;
558
spinlock_t
fdir_perfect_lock
;
559
560
#ifdef IXGBE_FCOE
561
struct
ixgbe_fcoe
fcoe;
562
#endif
/* IXGBE_FCOE */
563
u32
wol
;
564
565
u16
bd_number
;
566
567
u16
eeprom_verh
;
568
u16
eeprom_verl
;
569
u16
eeprom_cap
;
570
571
u32
interrupt_event
;
572
u32
led_reg
;
573
574
#ifdef CONFIG_IXGBE_PTP
575
struct
ptp_clock
*
ptp_clock
;
576
struct
ptp_clock_info
ptp_caps;
577
unsigned
long
last_overflow_check;
578
spinlock_t
tmreg_lock;
579
struct
cyclecounter
cc
;
580
struct
timecounter
tc
;
581
int
rx_hwtstamp_filter;
582
u32
base_incval;
583
u32
cycle_speed;
584
#endif
/* CONFIG_IXGBE_PTP */
585
586
/* SR-IOV */
587
DECLARE_BITMAP
(active_vfs,
IXGBE_MAX_VF_FUNCTIONS
);
588
unsigned
int
num_vfs
;
589
struct
vf_data_storage
*
vfinfo
;
590
int
vf_rate_link_speed
;
591
struct
vf_macvlans
vf_mvs
;
592
struct
vf_macvlans
*
mv_list
;
593
594
u32
timer_event_accumulator
;
595
u32
vferr_refcount
;
596
struct
kobject
*
info_kobj
;
597
#ifdef CONFIG_IXGBE_HWMON
598
struct
hwmon_buff ixgbe_hwmon_buff;
599
#endif
/* CONFIG_IXGBE_HWMON */
600
#ifdef CONFIG_DEBUG_FS
601
struct
dentry
*ixgbe_dbg_adapter;
602
#endif
/*CONFIG_DEBUG_FS*/
603
};
604
605
struct
ixgbe_fdir_filter
{
606
struct
hlist_node
fdir_node
;
607
union
ixgbe_atr_input
filter
;
608
u16
sw_idx
;
609
u16
action
;
610
};
611
612
enum
ixgbe_state_t
{
613
__IXGBE_TESTING
,
614
__IXGBE_RESETTING
,
615
__IXGBE_DOWN
,
616
__IXGBE_SERVICE_SCHED
,
617
__IXGBE_IN_SFP_INIT
,
618
};
619
620
struct
ixgbe_cb
{
621
union
{
/* Union defining head/tail partner */
622
struct
sk_buff
*
head
;
623
struct
sk_buff
*
tail
;
624
};
625
dma_addr_t
dma
;
626
u16
append_cnt
;
627
bool
page_released
;
628
};
629
#define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
630
631
enum
ixgbe_boards
{
632
board_82598
,
633
board_82599
,
634
board_X540
,
635
};
636
637
extern
struct
ixgbe_info
ixgbe_82598_info
;
638
extern
struct
ixgbe_info
ixgbe_82599_info
;
639
extern
struct
ixgbe_info
ixgbe_X540_info
;
640
#ifdef CONFIG_IXGBE_DCB
641
extern
const
struct
dcbnl_rtnl_ops
dcbnl_ops
;
642
#endif
643
644
extern
char
ixgbe_driver_name
[];
645
extern
const
char
ixgbe_driver_version
[];
646
#ifdef IXGBE_FCOE
647
extern
char
ixgbe_default_device_descr[];
648
#endif
/* IXGBE_FCOE */
649
650
extern
void
ixgbe_up
(
struct
ixgbe_adapter
*
adapter
);
651
extern
void
ixgbe_down
(
struct
ixgbe_adapter
*
adapter
);
652
extern
void
ixgbe_reinit_locked
(
struct
ixgbe_adapter
*
adapter
);
653
extern
void
ixgbe_reset
(
struct
ixgbe_adapter
*
adapter
);
654
extern
void
ixgbe_set_ethtool_ops
(
struct
net_device
*netdev);
655
extern
int
ixgbe_setup_rx_resources
(
struct
ixgbe_ring
*);
656
extern
int
ixgbe_setup_tx_resources
(
struct
ixgbe_ring
*);
657
extern
void
ixgbe_free_rx_resources
(
struct
ixgbe_ring
*);
658
extern
void
ixgbe_free_tx_resources
(
struct
ixgbe_ring
*);
659
extern
void
ixgbe_configure_rx_ring
(
struct
ixgbe_adapter
*,
struct
ixgbe_ring
*);
660
extern
void
ixgbe_configure_tx_ring
(
struct
ixgbe_adapter
*,
struct
ixgbe_ring
*);
661
extern
void
ixgbe_disable_rx_queue
(
struct
ixgbe_adapter
*
adapter
,
662
struct
ixgbe_ring
*);
663
extern
void
ixgbe_update_stats
(
struct
ixgbe_adapter
*
adapter
);
664
extern
int
ixgbe_init_interrupt_scheme
(
struct
ixgbe_adapter
*
adapter
);
665
extern
int
ixgbe_wol_supported
(
struct
ixgbe_adapter
*
adapter
,
u16
device_id
,
666
u16
subdevice_id);
667
extern
void
ixgbe_clear_interrupt_scheme
(
struct
ixgbe_adapter
*
adapter
);
668
extern
netdev_tx_t
ixgbe_xmit_frame_ring
(
struct
sk_buff
*,
669
struct
ixgbe_adapter
*,
670
struct
ixgbe_ring
*);
671
extern
void
ixgbe_unmap_and_free_tx_resource
(
struct
ixgbe_ring
*,
672
struct
ixgbe_tx_buffer
*);
673
extern
void
ixgbe_alloc_rx_buffers
(
struct
ixgbe_ring
*,
u16
);
674
extern
void
ixgbe_write_eitr
(
struct
ixgbe_q_vector
*);
675
extern
int
ixgbe_poll
(
struct
napi_struct
*
napi
,
int
budget
);
676
extern
int
ethtool_ioctl
(
struct
ifreq
*ifr);
677
extern
s32
ixgbe_reinit_fdir_tables_82599
(
struct
ixgbe_hw
*
hw
);
678
extern
s32
ixgbe_init_fdir_signature_82599
(
struct
ixgbe_hw
*
hw
,
u32
fdirctrl);
679
extern
s32
ixgbe_init_fdir_perfect_82599
(
struct
ixgbe_hw
*
hw
,
u32
fdirctrl);
680
extern
s32
ixgbe_fdir_add_signature_filter_82599
(
struct
ixgbe_hw
*
hw
,
681
union
ixgbe_atr_hash_dword
input
,
682
union
ixgbe_atr_hash_dword
common
,
683
u8
queue
);
684
extern
s32
ixgbe_fdir_set_input_mask_82599
(
struct
ixgbe_hw
*
hw
,
685
union
ixgbe_atr_input
*input_mask);
686
extern
s32
ixgbe_fdir_write_perfect_filter_82599
(
struct
ixgbe_hw
*
hw
,
687
union
ixgbe_atr_input
*
input
,
688
u16
soft_id,
u8
queue
);
689
extern
s32
ixgbe_fdir_erase_perfect_filter_82599
(
struct
ixgbe_hw
*
hw
,
690
union
ixgbe_atr_input
*
input
,
691
u16
soft_id);
692
extern
void
ixgbe_atr_compute_perfect_hash_82599
(
union
ixgbe_atr_input
*
input
,
693
union
ixgbe_atr_input
*
mask
);
694
extern
void
ixgbe_set_rx_mode
(
struct
net_device
*netdev);
695
#ifdef CONFIG_IXGBE_DCB
696
extern
void
ixgbe_set_rx_drop_en(
struct
ixgbe_adapter
*
adapter
);
697
extern
int
ixgbe_setup_tc(
struct
net_device
*
dev
,
u8
tc
);
698
#endif
699
extern
void
ixgbe_tx_ctxtdesc
(
struct
ixgbe_ring
*,
u32
,
u32
,
u32
,
u32
);
700
extern
void
ixgbe_do_reset
(
struct
net_device
*netdev);
701
#ifdef CONFIG_IXGBE_HWMON
702
extern
void
ixgbe_sysfs_exit
(
struct
ixgbe_adapter
*
adapter
);
703
extern
int
ixgbe_sysfs_init
(
struct
ixgbe_adapter
*
adapter
);
704
#endif
/* CONFIG_IXGBE_HWMON */
705
#ifdef IXGBE_FCOE
706
extern
void
ixgbe_configure_fcoe
(
struct
ixgbe_adapter
*
adapter
);
707
extern
int
ixgbe_fso
(
struct
ixgbe_ring
*
tx_ring
,
708
struct
ixgbe_tx_buffer
*
first
,
709
u8
*
hdr_len
);
710
extern
int
ixgbe_fcoe_ddp
(
struct
ixgbe_adapter
*
adapter
,
711
union
ixgbe_adv_rx_desc
*
rx_desc
,
712
struct
sk_buff
*
skb
);
713
extern
int
ixgbe_fcoe_ddp_get
(
struct
net_device
*netdev,
u16
xid
,
714
struct
scatterlist
*
sgl
,
unsigned
int
sgc);
715
extern
int
ixgbe_fcoe_ddp_target
(
struct
net_device
*netdev,
u16
xid
,
716
struct
scatterlist
*
sgl
,
unsigned
int
sgc);
717
extern
int
ixgbe_fcoe_ddp_put
(
struct
net_device
*netdev,
u16
xid
);
718
extern
int
ixgbe_setup_fcoe_ddp_resources
(
struct
ixgbe_adapter
*
adapter
);
719
extern
void
ixgbe_free_fcoe_ddp_resources
(
struct
ixgbe_adapter
*
adapter
);
720
extern
int
ixgbe_fcoe_enable
(
struct
net_device
*netdev);
721
extern
int
ixgbe_fcoe_disable
(
struct
net_device
*netdev);
722
#ifdef CONFIG_IXGBE_DCB
723
extern
u8
ixgbe_fcoe_getapp(
struct
ixgbe_adapter
*
adapter
);
724
extern
u8
ixgbe_fcoe_setapp(
struct
ixgbe_adapter
*
adapter
,
u8
up
);
725
#endif
/* CONFIG_IXGBE_DCB */
726
extern
int
ixgbe_fcoe_get_wwn
(
struct
net_device
*netdev,
u64
*wwn,
int
type
);
727
extern
int
ixgbe_fcoe_get_hbainfo
(
struct
net_device
*netdev,
728
struct
netdev_fcoe_hbainfo *
info
);
729
extern
u8
ixgbe_fcoe_get_tc
(
struct
ixgbe_adapter
*
adapter
);
730
#endif
/* IXGBE_FCOE */
731
#ifdef CONFIG_DEBUG_FS
732
extern
void
ixgbe_dbg_adapter_init(
struct
ixgbe_adapter
*
adapter
);
733
extern
void
ixgbe_dbg_adapter_exit(
struct
ixgbe_adapter
*
adapter
);
734
extern
void
ixgbe_dbg_init(
void
);
735
extern
void
ixgbe_dbg_exit(
void
);
736
#endif
/* CONFIG_DEBUG_FS */
737
static
inline
struct
netdev_queue
*txring_txq(
const
struct
ixgbe_ring
*ring)
738
{
739
return
netdev_get_tx_queue(ring->
netdev
, ring->
queue_index
);
740
}
741
742
#ifdef CONFIG_IXGBE_PTP
743
extern
void
ixgbe_ptp_init
(
struct
ixgbe_adapter
*
adapter
);
744
extern
void
ixgbe_ptp_stop
(
struct
ixgbe_adapter
*
adapter
);
745
extern
void
ixgbe_ptp_overflow_check
(
struct
ixgbe_adapter
*
adapter
);
746
extern
void
ixgbe_ptp_tx_hwtstamp
(
struct
ixgbe_q_vector
*q_vector,
747
struct
sk_buff
*
skb
);
748
extern
void
ixgbe_ptp_rx_hwtstamp
(
struct
ixgbe_q_vector
*q_vector,
749
union
ixgbe_adv_rx_desc
*
rx_desc
,
750
struct
sk_buff
*
skb
);
751
extern
int
ixgbe_ptp_hwtstamp_ioctl
(
struct
ixgbe_adapter
*
adapter
,
752
struct
ifreq
*ifr,
int
cmd
);
753
extern
void
ixgbe_ptp_start_cyclecounter
(
struct
ixgbe_adapter
*
adapter
);
754
extern
void
ixgbe_ptp_check_pps_event
(
struct
ixgbe_adapter
*
adapter
,
u32
eicr);
755
#endif
/* CONFIG_IXGBE_PTP */
756
757
#endif
/* _IXGBE_H_ */
Generated on Thu Jan 10 2013 14:02:38 for Linux Kernel by
1.8.2