Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
drivers
net
ethernet
chelsio
cxgb4
cxgb4.h
Go to the documentation of this file.
1
/*
2
* This file is part of the Chelsio T4 Ethernet driver for Linux.
3
*
4
* Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5
*
6
* This software is available to you under a choice of one of two
7
* licenses. You may choose to be licensed under the terms of the GNU
8
* General Public License (GPL) Version 2, available from the file
9
* COPYING in the main directory of this source tree, or the
10
* OpenIB.org BSD license below:
11
*
12
* Redistribution and use in source and binary forms, with or
13
* without modification, are permitted provided that the following
14
* conditions are met:
15
*
16
* - Redistributions of source code must retain the above
17
* copyright notice, this list of conditions and the following
18
* disclaimer.
19
*
20
* - Redistributions in binary form must reproduce the above
21
* copyright notice, this list of conditions and the following
22
* disclaimer in the documentation and/or other materials
23
* provided with the distribution.
24
*
25
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32
* SOFTWARE.
33
*/
34
35
#ifndef __CXGB4_H__
36
#define __CXGB4_H__
37
38
#include <linux/bitops.h>
39
#include <
linux/cache.h
>
40
#include <
linux/interrupt.h
>
41
#include <linux/list.h>
42
#include <linux/netdevice.h>
43
#include <linux/pci.h>
44
#include <
linux/spinlock.h
>
45
#include <
linux/timer.h
>
46
#include <
linux/vmalloc.h
>
47
#include <asm/io.h>
48
#include "
cxgb4_uld.h
"
49
#include "
t4_hw.h
"
50
51
#define FW_VERSION_MAJOR 1
52
#define FW_VERSION_MINOR 1
53
#define FW_VERSION_MICRO 0
54
55
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
56
57
enum
{
58
MAX_NPORTS
= 4,
/* max # of ports */
59
SERNUM_LEN
= 24,
/* Serial # length */
60
EC_LEN
= 16,
/* E/C length */
61
ID_LEN
= 16,
/* ID length */
62
};
63
64
enum
{
65
MEM_EDC0
,
66
MEM_EDC1
,
67
MEM_MC
68
};
69
70
enum
{
71
MEMWIN0_APERTURE
= 2048,
72
MEMWIN0_BASE
= 0x1b800,
73
MEMWIN1_APERTURE
= 32768,
74
MEMWIN1_BASE
= 0x28000,
75
MEMWIN2_APERTURE
= 65536,
76
MEMWIN2_BASE
= 0x30000,
77
};
78
79
enum
dev_master
{
80
MASTER_CANT
,
81
MASTER_MAY
,
82
MASTER_MUST
83
};
84
85
enum
dev_state
{
86
DEV_STATE_UNINIT
,
87
DEV_STATE_INIT
,
88
DEV_STATE_ERR
89
};
90
91
enum
{
92
PAUSE_RX
= 1 << 0,
93
PAUSE_TX
= 1 << 1,
94
PAUSE_AUTONEG
= 1 << 2
95
};
96
97
struct
port_stats
{
98
u64
tx_octets
;
/* total # of octets in good frames */
99
u64
tx_frames
;
/* all good frames */
100
u64
tx_bcast_frames
;
/* all broadcast frames */
101
u64
tx_mcast_frames
;
/* all multicast frames */
102
u64
tx_ucast_frames
;
/* all unicast frames */
103
u64
tx_error_frames
;
/* all error frames */
104
105
u64
tx_frames_64
;
/* # of Tx frames in a particular range */
106
u64
tx_frames_65_127
;
107
u64
tx_frames_128_255
;
108
u64
tx_frames_256_511
;
109
u64
tx_frames_512_1023
;
110
u64
tx_frames_1024_1518
;
111
u64
tx_frames_1519_max
;
112
113
u64
tx_drop
;
/* # of dropped Tx frames */
114
u64
tx_pause
;
/* # of transmitted pause frames */
115
u64
tx_ppp0
;
/* # of transmitted PPP prio 0 frames */
116
u64
tx_ppp1
;
/* # of transmitted PPP prio 1 frames */
117
u64
tx_ppp2
;
/* # of transmitted PPP prio 2 frames */
118
u64
tx_ppp3
;
/* # of transmitted PPP prio 3 frames */
119
u64
tx_ppp4
;
/* # of transmitted PPP prio 4 frames */
120
u64
tx_ppp5
;
/* # of transmitted PPP prio 5 frames */
121
u64
tx_ppp6
;
/* # of transmitted PPP prio 6 frames */
122
u64
tx_ppp7
;
/* # of transmitted PPP prio 7 frames */
123
124
u64
rx_octets
;
/* total # of octets in good frames */
125
u64
rx_frames
;
/* all good frames */
126
u64
rx_bcast_frames
;
/* all broadcast frames */
127
u64
rx_mcast_frames
;
/* all multicast frames */
128
u64
rx_ucast_frames
;
/* all unicast frames */
129
u64
rx_too_long
;
/* # of frames exceeding MTU */
130
u64
rx_jabber
;
/* # of jabber frames */
131
u64
rx_fcs_err
;
/* # of received frames with bad FCS */
132
u64
rx_len_err
;
/* # of received frames with length error */
133
u64
rx_symbol_err
;
/* symbol errors */
134
u64
rx_runt
;
/* # of short frames */
135
136
u64
rx_frames_64
;
/* # of Rx frames in a particular range */
137
u64
rx_frames_65_127
;
138
u64
rx_frames_128_255
;
139
u64
rx_frames_256_511
;
140
u64
rx_frames_512_1023
;
141
u64
rx_frames_1024_1518
;
142
u64
rx_frames_1519_max
;
143
144
u64
rx_pause
;
/* # of received pause frames */
145
u64
rx_ppp0
;
/* # of received PPP prio 0 frames */
146
u64
rx_ppp1
;
/* # of received PPP prio 1 frames */
147
u64
rx_ppp2
;
/* # of received PPP prio 2 frames */
148
u64
rx_ppp3
;
/* # of received PPP prio 3 frames */
149
u64
rx_ppp4
;
/* # of received PPP prio 4 frames */
150
u64
rx_ppp5
;
/* # of received PPP prio 5 frames */
151
u64
rx_ppp6
;
/* # of received PPP prio 6 frames */
152
u64
rx_ppp7
;
/* # of received PPP prio 7 frames */
153
154
u64
rx_ovflow0
;
/* drops due to buffer-group 0 overflows */
155
u64
rx_ovflow1
;
/* drops due to buffer-group 1 overflows */
156
u64
rx_ovflow2
;
/* drops due to buffer-group 2 overflows */
157
u64
rx_ovflow3
;
/* drops due to buffer-group 3 overflows */
158
u64
rx_trunc0
;
/* buffer-group 0 truncated packets */
159
u64
rx_trunc1
;
/* buffer-group 1 truncated packets */
160
u64
rx_trunc2
;
/* buffer-group 2 truncated packets */
161
u64
rx_trunc3
;
/* buffer-group 3 truncated packets */
162
};
163
164
struct
lb_port_stats
{
165
u64
octets
;
166
u64
frames
;
167
u64
bcast_frames
;
168
u64
mcast_frames
;
169
u64
ucast_frames
;
170
u64
error_frames
;
171
172
u64
frames_64
;
173
u64
frames_65_127
;
174
u64
frames_128_255
;
175
u64
frames_256_511
;
176
u64
frames_512_1023
;
177
u64
frames_1024_1518
;
178
u64
frames_1519_max
;
179
180
u64
drop
;
181
182
u64
ovflow0
;
183
u64
ovflow1
;
184
u64
ovflow2
;
185
u64
ovflow3
;
186
u64
trunc0
;
187
u64
trunc1
;
188
u64
trunc2
;
189
u64
trunc3
;
190
};
191
192
struct
tp_tcp_stats
{
193
u32
tcpOutRsts
;
194
u64
tcpInSegs
;
195
u64
tcpOutSegs
;
196
u64
tcpRetransSegs
;
197
};
198
199
struct
tp_err_stats
{
200
u32
macInErrs
[4];
201
u32
hdrInErrs
[4];
202
u32
tcpInErrs
[4];
203
u32
tnlCongDrops
[4];
204
u32
ofldChanDrops
[4];
205
u32
tnlTxDrops
[4];
206
u32
ofldVlanDrops
[4];
207
u32
tcp6InErrs
[4];
208
u32
ofldNoNeigh
;
209
u32
ofldCongDefer
;
210
};
211
212
struct
tp_params
{
213
unsigned
int
ntxchan
;
/* # of Tx channels */
214
unsigned
int
tre
;
/* log2 of core clocks per TP tick */
215
216
uint32_t
dack_re
;
/* DACK timer resolution */
217
unsigned
short
tx_modq
[
NCHAN
];
/* channel to modulation queue map */
218
};
219
220
struct
vpd_params
{
221
unsigned
int
cclk
;
222
u8
ec
[
EC_LEN
+ 1];
223
u8
sn
[
SERNUM_LEN
+ 1];
224
u8
id
[
ID_LEN
+ 1];
225
};
226
227
struct
pci_params
{
228
unsigned
char
speed
;
229
unsigned
char
width
;
230
};
231
232
struct
adapter_params
{
233
struct
tp_params
tp;
234
struct
vpd_params
vpd;
235
struct
pci_params
pci;
236
237
unsigned
int
sf_size
;
/* serial flash size in bytes */
238
unsigned
int
sf_nsec
;
/* # of flash sectors */
239
unsigned
int
sf_fw_start
;
/* start of FW image in flash */
240
241
unsigned
int
fw_vers
;
242
unsigned
int
tp_vers
;
243
u8
api_vers
[7];
244
245
unsigned
short
mtus
[
NMTUS
];
246
unsigned
short
a_wnd
[
NCCTRL_WIN
];
247
unsigned
short
b_wnd
[
NCCTRL_WIN
];
248
249
unsigned
char
nports
;
/* # of ethernet ports */
250
unsigned
char
portvec
;
251
unsigned
char
rev
;
/* chip revision */
252
unsigned
char
offload
;
253
254
unsigned
char
bypass
;
255
256
unsigned
int
ofldq_wr_cred
;
257
};
258
259
struct
trace_params
{
260
u32
data
[
TRACE_LEN
/ 4];
261
u32
mask
[
TRACE_LEN
/ 4];
262
unsigned
short
snap_len
;
263
unsigned
short
min_len
;
264
unsigned
char
skip_ofst
;
265
unsigned
char
skip_len
;
266
unsigned
char
invert
;
267
unsigned
char
port
;
268
};
269
270
struct
link_config
{
271
unsigned
short
supported
;
/* link capabilities */
272
unsigned
short
advertising
;
/* advertised capabilities */
273
unsigned
short
requested_speed
;
/* speed user has requested */
274
unsigned
short
speed
;
/* actual link speed */
275
unsigned
char
requested_fc
;
/* flow control user has requested */
276
unsigned
char
fc
;
/* actual link flow control */
277
unsigned
char
autoneg
;
/* autonegotiating? */
278
unsigned
char
link_ok
;
/* link up? */
279
};
280
281
#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
282
283
enum
{
284
MAX_ETH_QSETS
= 32,
/* # of Ethernet Tx/Rx queue sets */
285
MAX_OFLD_QSETS
= 16,
/* # of offload Tx/Rx queue sets */
286
MAX_CTRL_QUEUES
=
NCHAN
,
/* # of control Tx queues */
287
MAX_RDMA_QUEUES
=
NCHAN
,
/* # of streaming RDMA Rx queues */
288
};
289
290
enum
{
291
MAX_EGRQ
= 128,
/* max # of egress queues, including FLs */
292
MAX_INGQ
= 64
/* max # of interrupt-capable ingress queues */
293
};
294
295
struct
adapter
;
296
struct
sge_rspq
;
297
298
struct
port_info
{
299
struct
adapter
*
adapter
;
300
u16
viid
;
301
s16
xact_addr_filt
;
/* index of exact MAC address filter */
302
u16
rss_size
;
/* size of VI's RSS table slice */
303
s8
mdio_addr
;
304
u8
port_type
;
305
u8
mod_type
;
306
u8
port_id
;
307
u8
tx_chan
;
308
u8
lport
;
/* associated offload logical port */
309
u8
nqsets
;
/* # of qsets */
310
u8
first_qset
;
/* index of first qset */
311
u8
rss_mode
;
312
struct
link_config
link_cfg
;
313
u16
*
rss
;
314
};
315
316
struct
dentry
;
317
struct
work_struct
;
318
319
enum
{
/* adapter flags */
320
FULL_INIT_DONE
= (1 << 0),
321
USING_MSI
= (1 << 1),
322
USING_MSIX
= (1 << 2),
323
FW_OK
= (1 << 4),
324
RSS_TNLALLLOOKUP
= (1 << 5),
325
USING_SOFT_PARAMS
= (1 << 6),
326
MASTER_PF
= (1 << 7),
327
FW_OFLD_CONN
= (1 << 9),
328
};
329
330
struct
rx_sw_desc
;
331
332
struct
sge_fl
{
/* SGE free-buffer queue state */
333
unsigned
int
avail
;
/* # of available Rx buffers */
334
unsigned
int
pend_cred
;
/* new buffers since last FL DB ring */
335
unsigned
int
cidx
;
/* consumer index */
336
unsigned
int
pidx
;
/* producer index */
337
unsigned
long
alloc_failed
;
/* # of times buffer allocation failed */
338
unsigned
long
large_alloc_failed
;
339
unsigned
long
starving
;
340
/* RO fields */
341
unsigned
int
cntxt_id
;
/* SGE context id for the free list */
342
unsigned
int
size
;
/* capacity of free list */
343
struct
rx_sw_desc
*
sdesc
;
/* address of SW Rx descriptor ring */
344
__be64
*
desc
;
/* address of HW Rx descriptor ring */
345
dma_addr_t
addr
;
/* bus address of HW ring start */
346
};
347
348
/* A packet gather list */
349
struct
pkt_gl
{
350
struct
page_frag
frags
[
MAX_SKB_FRAGS
];
351
void
*
va
;
/* virtual address of first byte */
352
unsigned
int
nfrags
;
/* # of fragments */
353
unsigned
int
tot_len
;
/* total length of fragments */
354
};
355
356
typedef
int
(*
rspq_handler_t
)(
struct
sge_rspq
*
q
,
const
__be64
*
rsp
,
357
const
struct
pkt_gl
*gl);
358
359
struct
sge_rspq
{
/* state for an SGE response queue */
360
struct
napi_struct
napi
;
361
const
__be64
*
cur_desc
;
/* current descriptor in queue */
362
unsigned
int
cidx
;
/* consumer index */
363
u8
gen
;
/* current generation bit */
364
u8
intr_params
;
/* interrupt holdoff parameters */
365
u8
next_intr_params
;
/* holdoff params for next interrupt */
366
u8
pktcnt_idx
;
/* interrupt packet threshold */
367
u8
uld
;
/* ULD handling this queue */
368
u8
idx
;
/* queue index within its group */
369
int
offset
;
/* offset into current Rx buffer */
370
u16
cntxt_id
;
/* SGE context id for the response q */
371
u16
abs_id
;
/* absolute SGE id for the response q */
372
__be64
*
desc
;
/* address of HW response ring */
373
dma_addr_t
phys_addr
;
/* physical address of the ring */
374
unsigned
int
iqe_len
;
/* entry size */
375
unsigned
int
size
;
/* capacity of response queue */
376
struct
adapter
*
adap
;
377
struct
net_device
*
netdev
;
/* associated net device */
378
rspq_handler_t
handler
;
379
};
380
381
struct
sge_eth_stats
{
/* Ethernet queue statistics */
382
unsigned
long
pkts
;
/* # of ethernet packets */
383
unsigned
long
lro_pkts
;
/* # of LRO super packets */
384
unsigned
long
lro_merged
;
/* # of wire packets merged by LRO */
385
unsigned
long
rx_cso
;
/* # of Rx checksum offloads */
386
unsigned
long
vlan_ex
;
/* # of Rx VLAN extractions */
387
unsigned
long
rx_drops
;
/* # of packets dropped due to no mem */
388
};
389
390
struct
sge_eth_rxq
{
/* SW Ethernet Rx queue */
391
struct
sge_rspq
rspq
;
392
struct
sge_fl
fl
;
393
struct
sge_eth_stats
stats
;
394
}
____cacheline_aligned_in_smp
;
395
396
struct
sge_ofld_stats
{
/* offload queue statistics */
397
unsigned
long
pkts
;
/* # of packets */
398
unsigned
long
imm
;
/* # of immediate-data packets */
399
unsigned
long
an
;
/* # of asynchronous notifications */
400
unsigned
long
nomem
;
/* # of responses deferred due to no mem */
401
};
402
403
struct
sge_ofld_rxq
{
/* SW offload Rx queue */
404
struct
sge_rspq
rspq
;
405
struct
sge_fl
fl
;
406
struct
sge_ofld_stats
stats
;
407
}
____cacheline_aligned_in_smp
;
408
409
struct
tx_desc
{
410
__be64
flit
[8];
411
};
412
413
struct
tx_sw_desc
;
414
415
struct
sge_txq
{
416
unsigned
int
in_use
;
/* # of in-use Tx descriptors */
417
unsigned
int
size
;
/* # of descriptors */
418
unsigned
int
cidx
;
/* SW consumer index */
419
unsigned
int
pidx
;
/* producer index */
420
unsigned
long
stops
;
/* # of times q has been stopped */
421
unsigned
long
restarts
;
/* # of queue restarts */
422
unsigned
int
cntxt_id
;
/* SGE context id for the Tx q */
423
struct
tx_desc
*
desc
;
/* address of HW Tx descriptor ring */
424
struct
tx_sw_desc
*
sdesc
;
/* address of SW Tx descriptor ring */
425
struct
sge_qstat
*
stat
;
/* queue status entry */
426
dma_addr_t
phys_addr
;
/* physical address of the ring */
427
spinlock_t
db_lock
;
428
int
db_disabled
;
429
unsigned
short
db_pidx
;
430
};
431
432
struct
sge_eth_txq
{
/* state for an SGE Ethernet Tx queue */
433
struct
sge_txq
q;
434
struct
netdev_queue
*
txq
;
/* associated netdev TX queue */
435
unsigned
long
tso
;
/* # of TSO requests */
436
unsigned
long
tx_cso
;
/* # of Tx checksum offloads */
437
unsigned
long
vlan_ins
;
/* # of Tx VLAN insertions */
438
unsigned
long
mapping_err
;
/* # of I/O MMU packet mapping errors */
439
}
____cacheline_aligned_in_smp
;
440
441
struct
sge_ofld_txq
{
/* state for an SGE offload Tx queue */
442
struct
sge_txq
q;
443
struct
adapter
*
adap
;
444
struct
sk_buff_head
sendq
;
/* list of backpressured packets */
445
struct
tasklet_struct
qresume_tsk
;
/* restarts the queue */
446
u8
full
;
/* the Tx ring is full */
447
unsigned
long
mapping_err
;
/* # of I/O MMU packet mapping errors */
448
}
____cacheline_aligned_in_smp
;
449
450
struct
sge_ctrl_txq
{
/* state for an SGE control Tx queue */
451
struct
sge_txq
q;
452
struct
adapter
*
adap
;
453
struct
sk_buff_head
sendq
;
/* list of backpressured packets */
454
struct
tasklet_struct
qresume_tsk
;
/* restarts the queue */
455
u8
full
;
/* the Tx ring is full */
456
}
____cacheline_aligned_in_smp
;
457
458
struct
sge
{
459
struct
sge_eth_txq
ethtxq
[
MAX_ETH_QSETS
];
460
struct
sge_ofld_txq
ofldtxq
[
MAX_OFLD_QSETS
];
461
struct
sge_ctrl_txq
ctrlq
[
MAX_CTRL_QUEUES
];
462
463
struct
sge_eth_rxq
ethrxq
[
MAX_ETH_QSETS
];
464
struct
sge_ofld_rxq
ofldrxq
[
MAX_OFLD_QSETS
];
465
struct
sge_ofld_rxq
rdmarxq
[
MAX_RDMA_QUEUES
];
466
struct
sge_rspq
fw_evtq
____cacheline_aligned_in_smp
;
467
468
struct
sge_rspq
intrq
____cacheline_aligned_in_smp
;
469
spinlock_t
intrq_lock
;
470
471
u16
max_ethqsets
;
/* # of available Ethernet queue sets */
472
u16
ethqsets
;
/* # of active Ethernet queue sets */
473
u16
ethtxq_rover
;
/* Tx queue to clean up next */
474
u16
ofldqsets
;
/* # of active offload queue sets */
475
u16
rdmaqs
;
/* # of available RDMA Rx queues */
476
u16
ofld_rxq
[
MAX_OFLD_QSETS
];
477
u16
rdma_rxq
[
NCHAN
];
478
u16
timer_val
[
SGE_NTIMERS
];
479
u8
counter_val
[
SGE_NCOUNTERS
];
480
u32
fl_pg_order
;
/* large page allocation size */
481
u32
stat_len
;
/* length of status page at ring end */
482
u32
pktshift
;
/* padding between CPL & packet data */
483
u32
fl_align
;
/* response queue message alignment */
484
u32
fl_starve_thres
;
/* Free List starvation threshold */
485
unsigned
int
starve_thres
;
486
u8
idma_state
[2];
487
unsigned
int
egr_start
;
488
unsigned
int
ingr_start
;
489
void
*
egr_map
[
MAX_EGRQ
];
/* qid->queue egress queue map */
490
struct
sge_rspq
*
ingr_map
[
MAX_INGQ
];
/* qid->queue ingress queue map */
491
DECLARE_BITMAP
(starving_fl,
MAX_EGRQ
);
492
DECLARE_BITMAP
(txq_maperr,
MAX_EGRQ
);
493
struct
timer_list
rx_timer
;
/* refills starving FLs */
494
struct
timer_list
tx_timer
;
/* checks Tx queues */
495
};
496
497
#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
498
#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
499
#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
500
501
struct
l2t_data
;
502
503
struct
adapter
{
504
void
__iomem
*
regs
;
505
struct
pci_dev
*
pdev
;
506
struct
device
*
pdev_dev
;
507
unsigned
int
mbox
;
508
unsigned
int
fn
;
509
unsigned
int
flags
;
510
511
int
msg_enable
;
512
513
struct
adapter_params
params
;
514
struct
cxgb4_virt_res
vres
;
515
unsigned
int
swintr
;
516
517
unsigned
int
wol
;
518
519
struct
{
520
unsigned
short
vec
;
521
char
desc
[
IFNAMSIZ
+ 10];
522
}
msix_info
[
MAX_INGQ
+ 1];
523
524
struct
sge
sge
;
525
526
struct
net_device
*
port
[
MAX_NPORTS
];
527
u8
chan_map
[
NCHAN
];
/* channel -> port map */
528
529
unsigned
int
l2t_start
;
530
unsigned
int
l2t_end
;
531
struct
l2t_data
*
l2t
;
532
void
*
uld_handle
[
CXGB4_ULD_MAX
];
533
struct
list_head
list_node
;
534
535
struct
tid_info
tids
;
536
void
**
tid_release_head
;
537
spinlock_t
tid_release_lock
;
538
struct
work_struct
tid_release_task
;
539
struct
work_struct
db_full_task
;
540
struct
work_struct
db_drop_task
;
541
bool
tid_release_task_busy
;
542
543
struct
dentry
*
debugfs_root
;
544
545
spinlock_t
stats_lock
;
546
};
547
548
static
inline
u32
t4_read_reg(
struct
adapter
*adap,
u32
reg_addr
)
549
{
550
return
readl
(adap->
regs
+ reg_addr);
551
}
552
553
static
inline
void
t4_write_reg(
struct
adapter
*adap,
u32
reg_addr
,
u32
val
)
554
{
555
writel
(val, adap->
regs
+ reg_addr);
556
}
557
558
#ifndef readq
559
static
inline
u64
readq
(
const
volatile
void
__iomem
*
addr
)
560
{
561
return
readl
(addr) + ((
u64
)
readl
(addr + 4) << 32);
562
}
563
564
static
inline
void
writeq
(
u64
val
,
volatile
void
__iomem
*addr)
565
{
566
writel
(val, addr);
567
writel
(val >> 32, addr + 4);
568
}
569
#endif
570
571
static
inline
u64
t4_read_reg64(
struct
adapter
*adap,
u32
reg_addr
)
572
{
573
return
readq
(adap->
regs
+ reg_addr);
574
}
575
576
static
inline
void
t4_write_reg64(
struct
adapter
*adap,
u32
reg_addr
,
u64
val
)
577
{
578
writeq
(val, adap->
regs
+ reg_addr);
579
}
580
587
static
inline
struct
port_info
*netdev2pinfo(
const
struct
net_device
*
dev
)
588
{
589
return
netdev_priv(dev);
590
}
591
599
static
inline
struct
port_info
*adap2pinfo(
struct
adapter
*adap,
int
idx
)
600
{
601
return
netdev_priv(adap->
port
[idx]);
602
}
603
610
static
inline
struct
adapter
*netdev2adap(
const
struct
net_device
*
dev
)
611
{
612
return
netdev2pinfo(dev)->adapter;
613
}
614
615
void
t4_os_portmod_changed
(
const
struct
adapter
*adap,
int
port_id
);
616
void
t4_os_link_changed
(
struct
adapter
*adap,
int
port_id
,
int
link_stat);
617
618
void
*
t4_alloc_mem
(
size_t
size
);
619
620
void
t4_free_sge_resources
(
struct
adapter
*adap);
621
irq_handler_t
t4_intr_handler
(
struct
adapter
*adap);
622
netdev_tx_t
t4_eth_xmit
(
struct
sk_buff
*
skb
,
struct
net_device
*
dev
);
623
int
t4_ethrx_handler
(
struct
sge_rspq
*q,
const
__be64
*rsp,
624
const
struct
pkt_gl
*gl);
625
int
t4_mgmt_tx
(
struct
adapter
*adap,
struct
sk_buff
*
skb
);
626
int
t4_ofld_send
(
struct
adapter
*adap,
struct
sk_buff
*
skb
);
627
int
t4_sge_alloc_rxq
(
struct
adapter
*adap,
struct
sge_rspq
*iq,
bool
fwevtq,
628
struct
net_device
*
dev
,
int
intr_idx,
629
struct
sge_fl
*
fl
,
rspq_handler_t
hnd);
630
int
t4_sge_alloc_eth_txq
(
struct
adapter
*adap,
struct
sge_eth_txq
*txq,
631
struct
net_device
*
dev
,
struct
netdev_queue
*netdevq,
632
unsigned
int
iqid);
633
int
t4_sge_alloc_ctrl_txq
(
struct
adapter
*adap,
struct
sge_ctrl_txq
*txq,
634
struct
net_device
*
dev
,
unsigned
int
iqid,
635
unsigned
int
cmplqid);
636
int
t4_sge_alloc_ofld_txq
(
struct
adapter
*adap,
struct
sge_ofld_txq
*txq,
637
struct
net_device
*
dev
,
unsigned
int
iqid);
638
irqreturn_t
t4_sge_intr_msix
(
int
irq
,
void
*
cookie
);
639
int
t4_sge_init
(
struct
adapter
*adap);
640
void
t4_sge_start
(
struct
adapter
*adap);
641
void
t4_sge_stop
(
struct
adapter
*adap);
642
extern
int
dbfifo_int_thresh
;
643
644
#define for_each_port(adapter, iter) \
645
for (iter = 0; iter < (adapter)->params.nports; ++iter)
646
647
static
inline
int
is_bypass(
struct
adapter
*adap)
648
{
649
return
adap->
params
.bypass;
650
}
651
652
static
inline
int
is_bypass_device(
int
device
)
653
{
654
/* this should be set based upon device capabilities */
655
switch
(device) {
656
case
0x440b:
657
case
0x440c:
658
return
1;
659
default
:
660
return
0;
661
}
662
}
663
664
static
inline
unsigned
int
core_ticks_per_usec(
const
struct
adapter
*adap)
665
{
666
return
adap->
params
.vpd.cclk / 1000;
667
}
668
669
static
inline
unsigned
int
us_to_core_ticks(
const
struct
adapter
*adap,
670
unsigned
int
us)
671
{
672
return
(us * adap->
params
.vpd.cclk) / 1000;
673
}
674
675
static
inline
unsigned
int
core_ticks_to_us(
const
struct
adapter
*
adapter
,
676
unsigned
int
ticks
)
677
{
678
/* add Core Clock / 2 to round ticks to nearest uS */
679
return
((ticks * 1000 + adapter->
params
.vpd.cclk/2) /
680
adapter->
params
.vpd.cclk);
681
}
682
683
void
t4_set_reg_field
(
struct
adapter *adap,
unsigned
int
addr,
u32
mask
,
684
u32
val
);
685
686
int
t4_wr_mbox_meat
(
struct
adapter *adap,
int
mbox
,
const
void
*
cmd
,
int
size
,
687
void
*rpl,
bool
sleep_ok);
688
689
static
inline
int
t4_wr_mbox(
struct
adapter *adap,
int
mbox
,
const
void
*
cmd
,
690
int
size
,
void
*rpl)
691
{
692
return
t4_wr_mbox_meat
(adap, mbox, cmd, size, rpl,
true
);
693
}
694
695
static
inline
int
t4_wr_mbox_ns(
struct
adapter *adap,
int
mbox
,
const
void
*
cmd
,
696
int
size
,
void
*rpl)
697
{
698
return
t4_wr_mbox_meat
(adap, mbox, cmd, size, rpl,
false
);
699
}
700
701
void
t4_write_indirect
(
struct
adapter *adap,
unsigned
int
addr_reg,
702
unsigned
int
data_reg,
const
u32
*vals,
703
unsigned
int
nregs,
unsigned
int
start_idx);
704
void
t4_intr_enable
(
struct
adapter *adapter);
705
void
t4_intr_disable
(
struct
adapter *adapter);
706
int
t4_slow_intr_handler
(
struct
adapter *adapter);
707
708
int
t4_wait_dev_ready
(
struct
adapter *adap);
709
int
t4_link_start
(
struct
adapter *adap,
unsigned
int
mbox
,
unsigned
int
port
,
710
struct
link_config
*
lc
);
711
int
t4_restart_aneg
(
struct
adapter *adap,
unsigned
int
mbox
,
unsigned
int
port
);
712
int
t4_memory_write
(
struct
adapter *adap,
int
mtype,
u32
addr,
u32
len,
713
__be32
*
buf
);
714
int
t4_seeprom_wp
(
struct
adapter *adapter,
bool
enable
);
715
int
get_vpd_params
(
struct
adapter *adapter,
struct
vpd_params
*
p
);
716
int
t4_load_fw
(
struct
adapter *adapter,
const
u8
*
fw_data
,
unsigned
int
size
);
717
unsigned
int
t4_flash_cfg_addr
(
struct
adapter *adapter);
718
int
t4_load_cfg
(
struct
adapter *adapter,
const
u8
*cfg_data,
unsigned
int
size
);
719
int
t4_check_fw_version
(
struct
adapter *adapter);
720
int
t4_prep_adapter
(
struct
adapter *adapter);
721
int
t4_port_init
(
struct
adapter *adap,
int
mbox
,
int
pf
,
int
vf
);
722
void
t4_fatal_err
(
struct
adapter *adapter);
723
int
t4_config_rss_range
(
struct
adapter *adapter,
int
mbox
,
unsigned
int
viid,
724
int
start
,
int
n
,
const
u16
*rspq,
unsigned
int
nrspq);
725
int
t4_config_glbl_rss
(
struct
adapter *adapter,
int
mbox
,
unsigned
int
mode
,
726
unsigned
int
flags
);
727
int
t4_mc_read
(
struct
adapter *adap,
u32
addr,
__be32
*
data
,
u64
*parity);
728
int
t4_edc_read
(
struct
adapter *adap,
int
idx
,
u32
addr,
__be32
*
data
,
729
u64
*parity);
730
731
void
t4_get_port_stats
(
struct
adapter *adap,
int
idx
,
struct
port_stats
*
p
);
732
void
t4_read_mtu_tbl
(
struct
adapter *adap,
u16
*mtus,
u8
*mtu_log);
733
void
t4_tp_wr_bits_indirect
(
struct
adapter *adap,
unsigned
int
addr,
734
unsigned
int
mask
,
unsigned
int
val
);
735
void
t4_tp_get_tcp_stats
(
struct
adapter *adap,
struct
tp_tcp_stats
*
v4
,
736
struct
tp_tcp_stats
*v6);
737
void
t4_load_mtus
(
struct
adapter *adap,
const
unsigned
short
*mtus,
738
const
unsigned
short
*
alpha
,
const
unsigned
short
*beta);
739
740
void
t4_wol_magic_enable
(
struct
adapter *adap,
unsigned
int
port
,
741
const
u8
*addr);
742
int
t4_wol_pat_enable
(
struct
adapter *adap,
unsigned
int
port
,
unsigned
int
map
,
743
u64
mask0,
u64
mask1,
unsigned
int
crc
,
bool
enable
);
744
745
int
t4_fw_hello
(
struct
adapter *adap,
unsigned
int
mbox
,
unsigned
int
evt_mbox,
746
enum
dev_master
master,
enum
dev_state
*
state
);
747
int
t4_fw_bye
(
struct
adapter *adap,
unsigned
int
mbox
);
748
int
t4_early_init
(
struct
adapter *adap,
unsigned
int
mbox
);
749
int
t4_fw_reset
(
struct
adapter *adap,
unsigned
int
mbox
,
int
reset);
750
int
t4_fw_halt
(
struct
adapter *adap,
unsigned
int
mbox
,
int
force
);
751
int
t4_fw_restart
(
struct
adapter *adap,
unsigned
int
mbox
,
int
reset);
752
int
t4_fw_upgrade
(
struct
adapter *adap,
unsigned
int
mbox
,
753
const
u8
*
fw_data
,
unsigned
int
size
,
int
force
);
754
int
t4_fw_config_file
(
struct
adapter *adap,
unsigned
int
mbox
,
755
unsigned
int
mtype,
unsigned
int
maddr,
756
u32
*finiver,
u32
*finicsum,
u32
*cfcsum);
757
int
t4_fixup_host_params
(
struct
adapter *adap,
unsigned
int
page_size
,
758
unsigned
int
cache_line_size
);
759
int
t4_fw_initialize
(
struct
adapter *adap,
unsigned
int
mbox
);
760
int
t4_query_params
(
struct
adapter *adap,
unsigned
int
mbox
,
unsigned
int
pf
,
761
unsigned
int
vf
,
unsigned
int
nparams,
const
u32
*
params
,
762
u32
*
val
);
763
int
t4_set_params
(
struct
adapter *adap,
unsigned
int
mbox
,
unsigned
int
pf
,
764
unsigned
int
vf
,
unsigned
int
nparams,
const
u32
*
params
,
765
const
u32
*
val
);
766
int
t4_cfg_pfvf
(
struct
adapter *adap,
unsigned
int
mbox
,
unsigned
int
pf
,
767
unsigned
int
vf
,
unsigned
int
txq,
unsigned
int
txq_eth_ctrl,
768
unsigned
int
rxqi,
unsigned
int
rxq,
unsigned
int
tc
,
769
unsigned
int
vi,
unsigned
int
cmask,
unsigned
int
pmask,
770
unsigned
int
nexact,
unsigned
int
rcaps,
unsigned
int
wxcaps);
771
int
t4_alloc_vi
(
struct
adapter *adap,
unsigned
int
mbox
,
unsigned
int
port
,
772
unsigned
int
pf
,
unsigned
int
vf
,
unsigned
int
nmac,
u8
*
mac
,
773
unsigned
int
*rss_size);
774
int
t4_set_rxmode
(
struct
adapter *adap,
unsigned
int
mbox
,
unsigned
int
viid,
775
int
mtu,
int
promisc,
int
all_multi,
int
bcast,
int
vlanex,
776
bool
sleep_ok);
777
int
t4_alloc_mac_filt
(
struct
adapter *adap,
unsigned
int
mbox
,
778
unsigned
int
viid,
bool
free
,
unsigned
int
naddr,
779
const
u8
**addr,
u16
*
idx
,
u64
*
hash
,
bool
sleep_ok);
780
int
t4_change_mac
(
struct
adapter *adap,
unsigned
int
mbox
,
unsigned
int
viid,
781
int
idx
,
const
u8
*addr,
bool
persist,
bool
add_smt);
782
int
t4_set_addr_hash
(
struct
adapter *adap,
unsigned
int
mbox
,
unsigned
int
viid,
783
bool
ucast,
u64
vec
,
bool
sleep_ok);
784
int
t4_enable_vi
(
struct
adapter *adap,
unsigned
int
mbox
,
unsigned
int
viid,
785
bool
rx_en,
bool
tx_en);
786
int
t4_identify_port
(
struct
adapter *adap,
unsigned
int
mbox
,
unsigned
int
viid,
787
unsigned
int
nblinks);
788
int
t4_mdio_rd
(
struct
adapter *adap,
unsigned
int
mbox
,
unsigned
int
phy_addr,
789
unsigned
int
mmd,
unsigned
int
reg
,
u16
*valp);
790
int
t4_mdio_wr
(
struct
adapter *adap,
unsigned
int
mbox
,
unsigned
int
phy_addr,
791
unsigned
int
mmd,
unsigned
int
reg
,
u16
val
);
792
int
t4_iq_free
(
struct
adapter *adap,
unsigned
int
mbox
,
unsigned
int
pf
,
793
unsigned
int
vf
,
unsigned
int
iqtype,
unsigned
int
iqid,
794
unsigned
int
fl0id,
unsigned
int
fl1id);
795
int
t4_eth_eq_free
(
struct
adapter *adap,
unsigned
int
mbox
,
unsigned
int
pf
,
796
unsigned
int
vf
,
unsigned
int
eqid);
797
int
t4_ctrl_eq_free
(
struct
adapter *adap,
unsigned
int
mbox
,
unsigned
int
pf
,
798
unsigned
int
vf
,
unsigned
int
eqid);
799
int
t4_ofld_eq_free
(
struct
adapter *adap,
unsigned
int
mbox
,
unsigned
int
pf
,
800
unsigned
int
vf
,
unsigned
int
eqid);
801
int
t4_handle_fw_rpl
(
struct
adapter *adap,
const
__be64
*rpl);
802
void
t4_db_full
(
struct
adapter *adapter);
803
void
t4_db_dropped
(
struct
adapter *adapter);
804
int
t4_mem_win_read_len
(
struct
adapter *adap,
u32
addr,
__be32
*
data
,
int
len);
805
int
t4_fwaddrspace_write
(
struct
adapter *adap,
unsigned
int
mbox
,
806
u32
addr,
u32
val
);
807
#endif
/* __CXGB4_H__ */
Generated on Thu Jan 10 2013 14:00:21 for Linux Kernel by
1.8.2