Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
drivers
net
ethernet
qlogic
qlge
qlge.h
Go to the documentation of this file.
1
/*
2
* QLogic QLA41xx NIC HBA Driver
3
* Copyright (c) 2003-2006 QLogic Corporation
4
*
5
* See LICENSE.qlge for copyright and licensing details.
6
*/
7
#ifndef _QLGE_H_
8
#define _QLGE_H_
9
10
#include <
linux/interrupt.h
>
11
#include <linux/pci.h>
12
#include <linux/netdevice.h>
13
#include <linux/rtnetlink.h>
14
#include <linux/if_vlan.h>
15
16
/*
17
* General definitions...
18
*/
19
#define DRV_NAME "qlge"
20
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
21
#define DRV_VERSION "v1.00.00.31"
22
23
#define WQ_ADDR_ALIGN 0x3
/* 4 byte alignment */
24
25
#define QLGE_VENDOR_ID 0x1077
26
#define QLGE_DEVICE_ID_8012 0x8012
27
#define QLGE_DEVICE_ID_8000 0x8000
28
#define QLGE_MEZZ_SSYS_ID_068 0x0068
29
#define QLGE_MEZZ_SSYS_ID_180 0x0180
30
#define MAX_CPUS 8
31
#define MAX_TX_RINGS MAX_CPUS
32
#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1)
33
34
#define NUM_TX_RING_ENTRIES 256
35
#define NUM_RX_RING_ENTRIES 256
36
37
#define NUM_SMALL_BUFFERS 512
38
#define NUM_LARGE_BUFFERS 512
39
#define DB_PAGE_SIZE 4096
40
41
/* Calculate the number of (4k) pages required to
42
* contain a buffer queue of the given length.
43
*/
44
#define MAX_DB_PAGES_PER_BQ(x) \
45
(((x * sizeof(u64)) / DB_PAGE_SIZE) + \
46
(((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0))
47
48
#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
49
MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
50
MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
51
#define LARGE_BUFFER_MAX_SIZE 8192
52
#define LARGE_BUFFER_MIN_SIZE 2048
53
54
#define MAX_CQ 128
55
#define DFLT_COALESCE_WAIT 100
/* 100 usec wait for coalescing */
56
#define MAX_INTER_FRAME_WAIT 10
/* 10 usec max interframe-wait for coalescing */
57
#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2)
58
#define UDELAY_COUNT 3
59
#define UDELAY_DELAY 100
60
61
62
#define TX_DESC_PER_IOCB 8
63
64
#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0
65
#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
66
#else
/* all other page sizes */
67
#define TX_DESC_PER_OAL 0
68
#endif
69
70
/* Word shifting for converting 64-bit
71
* address to a series of 16-bit words.
72
* This is used for some MPI firmware
73
* mailbox commands.
74
*/
75
#define LSW(x) ((u16)(x))
76
#define MSW(x) ((u16)((u32)(x) >> 16))
77
#define LSD(x) ((u32)((u64)(x)))
78
#define MSD(x) ((u32)((((u64)(x)) >> 32)))
79
80
/* MPI test register definitions. This register
81
* is used for determining alternate NIC function's
82
* PCI->func number.
83
*/
84
enum
{
85
MPI_TEST_FUNC_PORT_CFG
= 0x1002,
86
MPI_TEST_FUNC_PRB_CTL
= 0x100e,
87
MPI_TEST_FUNC_PRB_EN
= 0x18a20000,
88
MPI_TEST_FUNC_RST_STS
= 0x100a,
89
MPI_TEST_FUNC_RST_FRC
= 0x00000003,
90
MPI_TEST_NIC_FUNC_MASK
= 0x00000007,
91
MPI_TEST_NIC1_FUNCTION_ENABLE
= (1 << 0),
92
MPI_TEST_NIC1_FUNCTION_MASK
= 0x0000000e,
93
MPI_TEST_NIC1_FUNC_SHIFT
= 1,
94
MPI_TEST_NIC2_FUNCTION_ENABLE
= (1 << 4),
95
MPI_TEST_NIC2_FUNCTION_MASK
= 0x000000e0,
96
MPI_TEST_NIC2_FUNC_SHIFT
= 5,
97
MPI_TEST_FC1_FUNCTION_ENABLE
= (1 << 8),
98
MPI_TEST_FC1_FUNCTION_MASK
= 0x00000e00,
99
MPI_TEST_FC1_FUNCTION_SHIFT
= 9,
100
MPI_TEST_FC2_FUNCTION_ENABLE
= (1 << 12),
101
MPI_TEST_FC2_FUNCTION_MASK
= 0x0000e000,
102
MPI_TEST_FC2_FUNCTION_SHIFT
= 13,
103
104
MPI_NIC_READ
= 0x00000000,
105
MPI_NIC_REG_BLOCK
= 0x00020000,
106
MPI_NIC_FUNCTION_SHIFT
= 6,
107
};
108
109
/*
110
* Processor Address Register (PROC_ADDR) bit definitions.
111
*/
112
enum
{
113
114
/* Misc. stuff */
115
MAILBOX_COUNT
= 16,
116
MAILBOX_TIMEOUT
= 5,
117
118
PROC_ADDR_RDY
= (1 << 31),
119
PROC_ADDR_R
= (1 << 30),
120
PROC_ADDR_ERR
= (1 << 29),
121
PROC_ADDR_DA
= (1 << 28),
122
PROC_ADDR_FUNC0_MBI
= 0x00001180,
123
PROC_ADDR_FUNC0_MBO
= (
PROC_ADDR_FUNC0_MBI
+
MAILBOX_COUNT
),
124
PROC_ADDR_FUNC0_CTL
= 0x000011a1,
125
PROC_ADDR_FUNC2_MBI
= 0x00001280,
126
PROC_ADDR_FUNC2_MBO
= (
PROC_ADDR_FUNC2_MBI
+
MAILBOX_COUNT
),
127
PROC_ADDR_FUNC2_CTL
= 0x000012a1,
128
PROC_ADDR_MPI_RISC
= 0x00000000,
129
PROC_ADDR_MDE
= 0x00010000,
130
PROC_ADDR_REGBLOCK
= 0x00020000,
131
PROC_ADDR_RISC_REG
= 0x00030000,
132
};
133
134
/*
135
* System Register (SYS) bit definitions.
136
*/
137
enum
{
138
SYS_EFE
= (1 << 0),
139
SYS_FAE
= (1 << 1),
140
SYS_MDC
= (1 << 2),
141
SYS_DST
= (1 << 3),
142
SYS_DWC
= (1 << 4),
143
SYS_EVW
= (1 << 5),
144
SYS_OMP_DLY_MASK
= 0x3f000000,
145
/*
146
* There are no values defined as of edit #15.
147
*/
148
SYS_ODI
= (1 << 14),
149
};
150
151
/*
152
* Reset/Failover Register (RST_FO) bit definitions.
153
*/
154
enum
{
155
RST_FO_TFO
= (1 << 0),
156
RST_FO_RR_MASK
= 0x00060000,
157
RST_FO_RR_CQ_CAM
= 0x00000000,
158
RST_FO_RR_DROP
= 0x00000002,
159
RST_FO_RR_DQ
= 0x00000004,
160
RST_FO_RR_RCV_FUNC_CQ
= 0x00000006,
161
RST_FO_FRB
= (1 << 12),
162
RST_FO_MOP
= (1 << 13),
163
RST_FO_REG
= (1 << 14),
164
RST_FO_FR
= (1 << 15),
165
};
166
167
/*
168
* Function Specific Control Register (FSC) bit definitions.
169
*/
170
enum
{
171
FSC_DBRST_MASK
= 0x00070000,
172
FSC_DBRST_256
= 0x00000000,
173
FSC_DBRST_512
= 0x00000001,
174
FSC_DBRST_768
= 0x00000002,
175
FSC_DBRST_1024
= 0x00000003,
176
FSC_DBL_MASK
= 0x00180000,
177
FSC_DBL_DBRST
= 0x00000000,
178
FSC_DBL_MAX_PLD
= 0x00000008,
179
FSC_DBL_MAX_BRST
= 0x00000010,
180
FSC_DBL_128_BYTES
= 0x00000018,
181
FSC_EC
= (1 << 5),
182
FSC_EPC_MASK
= 0x00c00000,
183
FSC_EPC_INBOUND
= (1 << 6),
184
FSC_EPC_OUTBOUND
= (1 << 7),
185
FSC_VM_PAGESIZE_MASK
= 0x07000000,
186
FSC_VM_PAGE_2K
= 0x00000100,
187
FSC_VM_PAGE_4K
= 0x00000200,
188
FSC_VM_PAGE_8K
= 0x00000300,
189
FSC_VM_PAGE_64K
= 0x00000600,
190
FSC_SH
= (1 << 11),
191
FSC_DSB
= (1 << 12),
192
FSC_STE
= (1 << 13),
193
FSC_FE
= (1 << 15),
194
};
195
196
/*
197
* Host Command Status Register (CSR) bit definitions.
198
*/
199
enum
{
200
CSR_ERR_STS_MASK
= 0x0000003f,
201
/*
202
* There are no valued defined as of edit #15.
203
*/
204
CSR_RR
= (1 << 8),
205
CSR_HRI
= (1 << 9),
206
CSR_RP
= (1 << 10),
207
CSR_CMD_PARM_SHIFT
= 22,
208
CSR_CMD_NOP
= 0x00000000,
209
CSR_CMD_SET_RST
= 0x10000000,
210
CSR_CMD_CLR_RST
= 0x20000000,
211
CSR_CMD_SET_PAUSE
= 0x30000000,
212
CSR_CMD_CLR_PAUSE
= 0x40000000,
213
CSR_CMD_SET_H2R_INT
= 0x50000000,
214
CSR_CMD_CLR_H2R_INT
= 0x60000000,
215
CSR_CMD_PAR_EN
= 0x70000000,
216
CSR_CMD_SET_BAD_PAR
= 0x80000000,
217
CSR_CMD_CLR_BAD_PAR
= 0x90000000,
218
CSR_CMD_CLR_R2PCI_INT
= 0xa0000000,
219
};
220
221
/*
222
* Configuration Register (CFG) bit definitions.
223
*/
224
enum
{
225
CFG_LRQ
= (1 << 0),
226
CFG_DRQ
= (1 << 1),
227
CFG_LR
= (1 << 2),
228
CFG_DR
= (1 << 3),
229
CFG_LE
= (1 << 5),
230
CFG_LCQ
= (1 << 6),
231
CFG_DCQ
= (1 << 7),
232
CFG_Q_SHIFT
= 8,
233
CFG_Q_MASK
= 0x7f000000,
234
};
235
236
/*
237
* Status Register (STS) bit definitions.
238
*/
239
enum
{
240
STS_FE
= (1 << 0),
241
STS_PI
= (1 << 1),
242
STS_PL0
= (1 << 2),
243
STS_PL1
= (1 << 3),
244
STS_PI0
= (1 << 4),
245
STS_PI1
= (1 << 5),
246
STS_FUNC_ID_MASK
= 0x000000c0,
247
STS_FUNC_ID_SHIFT
= 6,
248
STS_F0E
= (1 << 8),
249
STS_F1E
= (1 << 9),
250
STS_F2E
= (1 << 10),
251
STS_F3E
= (1 << 11),
252
STS_NFE
= (1 << 12),
253
};
254
255
/*
256
* Interrupt Enable Register (INTR_EN) bit definitions.
257
*/
258
enum
{
259
INTR_EN_INTR_MASK
= 0x007f0000,
260
INTR_EN_TYPE_MASK
= 0x03000000,
261
INTR_EN_TYPE_ENABLE
= 0x00000100,
262
INTR_EN_TYPE_DISABLE
= 0x00000200,
263
INTR_EN_TYPE_READ
= 0x00000300,
264
INTR_EN_IHD
= (1 << 13),
265
INTR_EN_IHD_MASK
= (
INTR_EN_IHD
<< 16),
266
INTR_EN_EI
= (1 << 14),
267
INTR_EN_EN
= (1 << 15),
268
};
269
270
/*
271
* Interrupt Mask Register (INTR_MASK) bit definitions.
272
*/
273
enum
{
274
INTR_MASK_PI
= (1 << 0),
275
INTR_MASK_HL0
= (1 << 1),
276
INTR_MASK_LH0
= (1 << 2),
277
INTR_MASK_HL1
= (1 << 3),
278
INTR_MASK_LH1
= (1 << 4),
279
INTR_MASK_SE
= (1 << 5),
280
INTR_MASK_LSC
= (1 << 6),
281
INTR_MASK_MC
= (1 << 7),
282
INTR_MASK_LINK_IRQS
=
INTR_MASK_LSC
|
INTR_MASK_SE
|
INTR_MASK_MC
,
283
};
284
285
/*
286
* Register (REV_ID) bit definitions.
287
*/
288
enum
{
289
REV_ID_MASK
= 0x0000000f,
290
REV_ID_NICROLL_SHIFT
= 0,
291
REV_ID_NICREV_SHIFT
= 4,
292
REV_ID_XGROLL_SHIFT
= 8,
293
REV_ID_XGREV_SHIFT
= 12,
294
REV_ID_CHIPREV_SHIFT
= 28,
295
};
296
297
/*
298
* Force ECC Error Register (FRC_ECC_ERR) bit definitions.
299
*/
300
enum
{
301
FRC_ECC_ERR_VW
= (1 << 12),
302
FRC_ECC_ERR_VB
= (1 << 13),
303
FRC_ECC_ERR_NI
= (1 << 14),
304
FRC_ECC_ERR_NO
= (1 << 15),
305
FRC_ECC_PFE_SHIFT
= 16,
306
FRC_ECC_ERR_DO
= (1 << 18),
307
FRC_ECC_P14
= (1 << 19),
308
};
309
310
/*
311
* Error Status Register (ERR_STS) bit definitions.
312
*/
313
enum
{
314
ERR_STS_NOF
= (1 << 0),
315
ERR_STS_NIF
= (1 << 1),
316
ERR_STS_DRP
= (1 << 2),
317
ERR_STS_XGP
= (1 << 3),
318
ERR_STS_FOU
= (1 << 4),
319
ERR_STS_FOC
= (1 << 5),
320
ERR_STS_FOF
= (1 << 6),
321
ERR_STS_FIU
= (1 << 7),
322
ERR_STS_FIC
= (1 << 8),
323
ERR_STS_FIF
= (1 << 9),
324
ERR_STS_MOF
= (1 << 10),
325
ERR_STS_TA
= (1 << 11),
326
ERR_STS_MA
= (1 << 12),
327
ERR_STS_MPE
= (1 << 13),
328
ERR_STS_SCE
= (1 << 14),
329
ERR_STS_STE
= (1 << 15),
330
ERR_STS_FOW
= (1 << 16),
331
ERR_STS_UE
= (1 << 17),
332
ERR_STS_MCH
= (1 << 26),
333
ERR_STS_LOC_SHIFT
= 27,
334
};
335
336
/*
337
* RAM Debug Address Register (RAM_DBG_ADDR) bit definitions.
338
*/
339
enum
{
340
RAM_DBG_ADDR_FW
= (1 << 30),
341
RAM_DBG_ADDR_FR
= (1 << 31),
342
};
343
344
/*
345
* Semaphore Register (SEM) bit definitions.
346
*/
347
enum
{
348
/*
349
* Example:
350
* reg = SEM_XGMAC0_MASK | (SEM_SET << SEM_XGMAC0_SHIFT)
351
*/
352
SEM_CLEAR
= 0,
353
SEM_SET
= 1,
354
SEM_FORCE
= 3,
355
SEM_XGMAC0_SHIFT
= 0,
356
SEM_XGMAC1_SHIFT
= 2,
357
SEM_ICB_SHIFT
= 4,
358
SEM_MAC_ADDR_SHIFT
= 6,
359
SEM_FLASH_SHIFT
= 8,
360
SEM_PROBE_SHIFT
= 10,
361
SEM_RT_IDX_SHIFT
= 12,
362
SEM_PROC_REG_SHIFT
= 14,
363
SEM_XGMAC0_MASK
= 0x00030000,
364
SEM_XGMAC1_MASK
= 0x000c0000,
365
SEM_ICB_MASK
= 0x00300000,
366
SEM_MAC_ADDR_MASK
= 0x00c00000,
367
SEM_FLASH_MASK
= 0x03000000,
368
SEM_PROBE_MASK
= 0x0c000000,
369
SEM_RT_IDX_MASK
= 0x30000000,
370
SEM_PROC_REG_MASK
= 0xc0000000,
371
};
372
373
/*
374
* 10G MAC Address Register (XGMAC_ADDR) bit definitions.
375
*/
376
enum
{
377
XGMAC_ADDR_RDY
= (1 << 31),
378
XGMAC_ADDR_R
= (1 << 30),
379
XGMAC_ADDR_XME
= (1 << 29),
380
381
/* XGMAC control registers */
382
PAUSE_SRC_LO
= 0x00000100,
383
PAUSE_SRC_HI
= 0x00000104,
384
GLOBAL_CFG
= 0x00000108,
385
GLOBAL_CFG_RESET
= (1 << 0),
386
GLOBAL_CFG_JUMBO
= (1 << 6),
387
GLOBAL_CFG_TX_STAT_EN
= (1 << 10),
388
GLOBAL_CFG_RX_STAT_EN
= (1 << 11),
389
TX_CFG
= 0x0000010c,
390
TX_CFG_RESET
= (1 << 0),
391
TX_CFG_EN
= (1 << 1),
392
TX_CFG_PREAM
= (1 << 2),
393
RX_CFG
= 0x00000110,
394
RX_CFG_RESET
= (1 << 0),
395
RX_CFG_EN
= (1 << 1),
396
RX_CFG_PREAM
= (1 << 2),
397
FLOW_CTL
= 0x0000011c,
398
PAUSE_OPCODE
= 0x00000120,
399
PAUSE_TIMER
= 0x00000124,
400
PAUSE_FRM_DEST_LO
= 0x00000128,
401
PAUSE_FRM_DEST_HI
= 0x0000012c,
402
MAC_TX_PARAMS
= 0x00000134,
403
MAC_TX_PARAMS_JUMBO
= (1 << 31),
404
MAC_TX_PARAMS_SIZE_SHIFT
= 16,
405
MAC_RX_PARAMS
= 0x00000138,
406
MAC_SYS_INT
= 0x00000144,
407
MAC_SYS_INT_MASK
= 0x00000148,
408
MAC_MGMT_INT
= 0x0000014c,
409
MAC_MGMT_IN_MASK
= 0x00000150,
410
EXT_ARB_MODE
= 0x000001fc,
411
412
/* XGMAC TX statistics registers */
413
TX_PKTS
= 0x00000200,
414
TX_BYTES
= 0x00000208,
415
TX_MCAST_PKTS
= 0x00000210,
416
TX_BCAST_PKTS
= 0x00000218,
417
TX_UCAST_PKTS
= 0x00000220,
418
TX_CTL_PKTS
= 0x00000228,
419
TX_PAUSE_PKTS
= 0x00000230,
420
TX_64_PKT
= 0x00000238,
421
TX_65_TO_127_PKT
= 0x00000240,
422
TX_128_TO_255_PKT
= 0x00000248,
423
TX_256_511_PKT
= 0x00000250,
424
TX_512_TO_1023_PKT
= 0x00000258,
425
TX_1024_TO_1518_PKT
= 0x00000260,
426
TX_1519_TO_MAX_PKT
= 0x00000268,
427
TX_UNDERSIZE_PKT
= 0x00000270,
428
TX_OVERSIZE_PKT
= 0x00000278,
429
430
/* XGMAC statistics control registers */
431
RX_HALF_FULL_DET
= 0x000002a0,
432
TX_HALF_FULL_DET
= 0x000002a4,
433
RX_OVERFLOW_DET
= 0x000002a8,
434
TX_OVERFLOW_DET
= 0x000002ac,
435
RX_HALF_FULL_MASK
= 0x000002b0,
436
TX_HALF_FULL_MASK
= 0x000002b4,
437
RX_OVERFLOW_MASK
= 0x000002b8,
438
TX_OVERFLOW_MASK
= 0x000002bc,
439
STAT_CNT_CTL
= 0x000002c0,
440
STAT_CNT_CTL_CLEAR_TX
= (1 << 0),
441
STAT_CNT_CTL_CLEAR_RX
= (1 << 1),
442
AUX_RX_HALF_FULL_DET
= 0x000002d0,
443
AUX_TX_HALF_FULL_DET
= 0x000002d4,
444
AUX_RX_OVERFLOW_DET
= 0x000002d8,
445
AUX_TX_OVERFLOW_DET
= 0x000002dc,
446
AUX_RX_HALF_FULL_MASK
= 0x000002f0,
447
AUX_TX_HALF_FULL_MASK
= 0x000002f4,
448
AUX_RX_OVERFLOW_MASK
= 0x000002f8,
449
AUX_TX_OVERFLOW_MASK
= 0x000002fc,
450
451
/* XGMAC RX statistics registers */
452
RX_BYTES
= 0x00000300,
453
RX_BYTES_OK
= 0x00000308,
454
RX_PKTS
= 0x00000310,
455
RX_PKTS_OK
= 0x00000318,
456
RX_BCAST_PKTS
= 0x00000320,
457
RX_MCAST_PKTS
= 0x00000328,
458
RX_UCAST_PKTS
= 0x00000330,
459
RX_UNDERSIZE_PKTS
= 0x00000338,
460
RX_OVERSIZE_PKTS
= 0x00000340,
461
RX_JABBER_PKTS
= 0x00000348,
462
RX_UNDERSIZE_FCERR_PKTS
= 0x00000350,
463
RX_DROP_EVENTS
= 0x00000358,
464
RX_FCERR_PKTS
= 0x00000360,
465
RX_ALIGN_ERR
= 0x00000368,
466
RX_SYMBOL_ERR
= 0x00000370,
467
RX_MAC_ERR
= 0x00000378,
468
RX_CTL_PKTS
= 0x00000380,
469
RX_PAUSE_PKTS
= 0x00000388,
470
RX_64_PKTS
= 0x00000390,
471
RX_65_TO_127_PKTS
= 0x00000398,
472
RX_128_255_PKTS
= 0x000003a0,
473
RX_256_511_PKTS
= 0x000003a8,
474
RX_512_TO_1023_PKTS
= 0x000003b0,
475
RX_1024_TO_1518_PKTS
= 0x000003b8,
476
RX_1519_TO_MAX_PKTS
= 0x000003c0,
477
RX_LEN_ERR_PKTS
= 0x000003c8,
478
479
/* XGMAC MDIO control registers */
480
MDIO_TX_DATA
= 0x00000400,
481
MDIO_RX_DATA
= 0x00000410,
482
MDIO_CMD
= 0x00000420,
483
MDIO_PHY_ADDR
= 0x00000430,
484
MDIO_PORT
= 0x00000440,
485
MDIO_STATUS
= 0x00000450,
486
487
XGMAC_REGISTER_END
= 0x00000740,
488
};
489
490
/*
491
* Enhanced Transmission Schedule Registers (NIC_ETS,CNA_ETS) bit definitions.
492
*/
493
enum
{
494
ETS_QUEUE_SHIFT
= 29,
495
ETS_REF
= (1 << 26),
496
ETS_RS
= (1 << 27),
497
ETS_P
= (1 << 28),
498
ETS_FC_COS_SHIFT
= 23,
499
};
500
501
/*
502
* Flash Address Register (FLASH_ADDR) bit definitions.
503
*/
504
enum
{
505
FLASH_ADDR_RDY
= (1 << 31),
506
FLASH_ADDR_R
= (1 << 30),
507
FLASH_ADDR_ERR
= (1 << 29),
508
};
509
510
/*
511
* Stop CQ Processing Register (CQ_STOP) bit definitions.
512
*/
513
enum
{
514
CQ_STOP_QUEUE_MASK
= (0x007f0000),
515
CQ_STOP_TYPE_MASK
= (0x03000000),
516
CQ_STOP_TYPE_START
= 0x00000100,
517
CQ_STOP_TYPE_STOP
= 0x00000200,
518
CQ_STOP_TYPE_READ
= 0x00000300,
519
CQ_STOP_EN
= (1 << 15),
520
};
521
522
/*
523
* MAC Protocol Address Index Register (MAC_ADDR_IDX) bit definitions.
524
*/
525
enum
{
526
MAC_ADDR_IDX_SHIFT
= 4,
527
MAC_ADDR_TYPE_SHIFT
= 16,
528
MAC_ADDR_TYPE_COUNT
= 10,
529
MAC_ADDR_TYPE_MASK
= 0x000f0000,
530
MAC_ADDR_TYPE_CAM_MAC
= 0x00000000,
531
MAC_ADDR_TYPE_MULTI_MAC
= 0x00010000,
532
MAC_ADDR_TYPE_VLAN
= 0x00020000,
533
MAC_ADDR_TYPE_MULTI_FLTR
= 0x00030000,
534
MAC_ADDR_TYPE_FC_MAC
= 0x00040000,
535
MAC_ADDR_TYPE_MGMT_MAC
= 0x00050000,
536
MAC_ADDR_TYPE_MGMT_VLAN
= 0x00060000,
537
MAC_ADDR_TYPE_MGMT_V4
= 0x00070000,
538
MAC_ADDR_TYPE_MGMT_V6
= 0x00080000,
539
MAC_ADDR_TYPE_MGMT_TU_DP
= 0x00090000,
540
MAC_ADDR_ADR
= (1 << 25),
541
MAC_ADDR_RS
= (1 << 26),
542
MAC_ADDR_E
= (1 << 27),
543
MAC_ADDR_MR
= (1 << 30),
544
MAC_ADDR_MW
= (1 << 31),
545
MAX_MULTICAST_ENTRIES
= 32,
546
547
/* Entry count and words per entry
548
* for each address type in the filter.
549
*/
550
MAC_ADDR_MAX_CAM_ENTRIES
= 512,
551
MAC_ADDR_MAX_CAM_WCOUNT
= 3,
552
MAC_ADDR_MAX_MULTICAST_ENTRIES
= 32,
553
MAC_ADDR_MAX_MULTICAST_WCOUNT
= 2,
554
MAC_ADDR_MAX_VLAN_ENTRIES
= 4096,
555
MAC_ADDR_MAX_VLAN_WCOUNT
= 1,
556
MAC_ADDR_MAX_MCAST_FLTR_ENTRIES
= 4096,
557
MAC_ADDR_MAX_MCAST_FLTR_WCOUNT
= 1,
558
MAC_ADDR_MAX_FC_MAC_ENTRIES
= 4,
559
MAC_ADDR_MAX_FC_MAC_WCOUNT
= 2,
560
MAC_ADDR_MAX_MGMT_MAC_ENTRIES
= 8,
561
MAC_ADDR_MAX_MGMT_MAC_WCOUNT
= 2,
562
MAC_ADDR_MAX_MGMT_VLAN_ENTRIES
= 16,
563
MAC_ADDR_MAX_MGMT_VLAN_WCOUNT
= 1,
564
MAC_ADDR_MAX_MGMT_V4_ENTRIES
= 4,
565
MAC_ADDR_MAX_MGMT_V4_WCOUNT
= 1,
566
MAC_ADDR_MAX_MGMT_V6_ENTRIES
= 4,
567
MAC_ADDR_MAX_MGMT_V6_WCOUNT
= 4,
568
MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES
= 4,
569
MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT
= 1,
570
};
571
572
/*
573
* MAC Protocol Address Index Register (SPLT_HDR) bit definitions.
574
*/
575
enum
{
576
SPLT_HDR_EP
= (1 << 31),
577
};
578
579
/*
580
* FCoE Receive Configuration Register (FC_RCV_CFG) bit definitions.
581
*/
582
enum
{
583
FC_RCV_CFG_ECT
= (1 << 15),
584
FC_RCV_CFG_DFH
= (1 << 20),
585
FC_RCV_CFG_DVF
= (1 << 21),
586
FC_RCV_CFG_RCE
= (1 << 27),
587
FC_RCV_CFG_RFE
= (1 << 28),
588
FC_RCV_CFG_TEE
= (1 << 29),
589
FC_RCV_CFG_TCE
= (1 << 30),
590
FC_RCV_CFG_TFE
= (1 << 31),
591
};
592
593
/*
594
* NIC Receive Configuration Register (NIC_RCV_CFG) bit definitions.
595
*/
596
enum
{
597
NIC_RCV_CFG_PPE
= (1 << 0),
598
NIC_RCV_CFG_VLAN_MASK
= 0x00060000,
599
NIC_RCV_CFG_VLAN_ALL
= 0x00000000,
600
NIC_RCV_CFG_VLAN_MATCH_ONLY
= 0x00000002,
601
NIC_RCV_CFG_VLAN_MATCH_AND_NON
= 0x00000004,
602
NIC_RCV_CFG_VLAN_NONE_AND_NON
= 0x00000006,
603
NIC_RCV_CFG_RV
= (1 << 3),
604
NIC_RCV_CFG_DFQ_MASK
= (0x7f000000),
605
NIC_RCV_CFG_DFQ_SHIFT
= 8,
606
NIC_RCV_CFG_DFQ
= 0,
/* HARDCODE default queue to 0. */
607
};
608
609
/*
610
* Mgmt Receive Configuration Register (MGMT_RCV_CFG) bit definitions.
611
*/
612
enum
{
613
MGMT_RCV_CFG_ARP
= (1 << 0),
614
MGMT_RCV_CFG_DHC
= (1 << 1),
615
MGMT_RCV_CFG_DHS
= (1 << 2),
616
MGMT_RCV_CFG_NP
= (1 << 3),
617
MGMT_RCV_CFG_I6N
= (1 << 4),
618
MGMT_RCV_CFG_I6R
= (1 << 5),
619
MGMT_RCV_CFG_DH6
= (1 << 6),
620
MGMT_RCV_CFG_UD1
= (1 << 7),
621
MGMT_RCV_CFG_UD0
= (1 << 8),
622
MGMT_RCV_CFG_BCT
= (1 << 9),
623
MGMT_RCV_CFG_MCT
= (1 << 10),
624
MGMT_RCV_CFG_DM
= (1 << 11),
625
MGMT_RCV_CFG_RM
= (1 << 12),
626
MGMT_RCV_CFG_STL
= (1 << 13),
627
MGMT_RCV_CFG_VLAN_MASK
= 0xc0000000,
628
MGMT_RCV_CFG_VLAN_ALL
= 0x00000000,
629
MGMT_RCV_CFG_VLAN_MATCH_ONLY
= 0x00004000,
630
MGMT_RCV_CFG_VLAN_MATCH_AND_NON
= 0x00008000,
631
MGMT_RCV_CFG_VLAN_NONE_AND_NON
= 0x0000c000,
632
};
633
634
/*
635
* Routing Index Register (RT_IDX) bit definitions.
636
*/
637
enum
{
638
RT_IDX_IDX_SHIFT
= 8,
639
RT_IDX_TYPE_MASK
= 0x000f0000,
640
RT_IDX_TYPE_SHIFT
= 16,
641
RT_IDX_TYPE_RT
= 0x00000000,
642
RT_IDX_TYPE_RT_INV
= 0x00010000,
643
RT_IDX_TYPE_NICQ
= 0x00020000,
644
RT_IDX_TYPE_NICQ_INV
= 0x00030000,
645
RT_IDX_DST_MASK
= 0x00700000,
646
RT_IDX_DST_RSS
= 0x00000000,
647
RT_IDX_DST_CAM_Q
= 0x00100000,
648
RT_IDX_DST_COS_Q
= 0x00200000,
649
RT_IDX_DST_DFLT_Q
= 0x00300000,
650
RT_IDX_DST_DEST_Q
= 0x00400000,
651
RT_IDX_RS
= (1 << 26),
652
RT_IDX_E
= (1 << 27),
653
RT_IDX_MR
= (1 << 30),
654
RT_IDX_MW
= (1 << 31),
655
656
/* Nic Queue format - type 2 bits */
657
RT_IDX_BCAST
= (1 << 0),
658
RT_IDX_MCAST
= (1 << 1),
659
RT_IDX_MCAST_MATCH
= (1 << 2),
660
RT_IDX_MCAST_REG_MATCH
= (1 << 3),
661
RT_IDX_MCAST_HASH_MATCH
= (1 << 4),
662
RT_IDX_FC_MACH
= (1 << 5),
663
RT_IDX_ETH_FCOE
= (1 << 6),
664
RT_IDX_CAM_HIT
= (1 << 7),
665
RT_IDX_CAM_BIT0
= (1 << 8),
666
RT_IDX_CAM_BIT1
= (1 << 9),
667
RT_IDX_VLAN_TAG
= (1 << 10),
668
RT_IDX_VLAN_MATCH
= (1 << 11),
669
RT_IDX_VLAN_FILTER
= (1 << 12),
670
RT_IDX_ETH_SKIP1
= (1 << 13),
671
RT_IDX_ETH_SKIP2
= (1 << 14),
672
RT_IDX_BCAST_MCAST_MATCH
= (1 << 15),
673
RT_IDX_802_3
= (1 << 16),
674
RT_IDX_LLDP
= (1 << 17),
675
RT_IDX_UNUSED018
= (1 << 18),
676
RT_IDX_UNUSED019
= (1 << 19),
677
RT_IDX_UNUSED20
= (1 << 20),
678
RT_IDX_UNUSED21
= (1 << 21),
679
RT_IDX_ERR
= (1 << 22),
680
RT_IDX_VALID
= (1 << 23),
681
RT_IDX_TU_CSUM_ERR
= (1 << 24),
682
RT_IDX_IP_CSUM_ERR
= (1 << 25),
683
RT_IDX_MAC_ERR
= (1 << 26),
684
RT_IDX_RSS_TCP6
= (1 << 27),
685
RT_IDX_RSS_TCP4
= (1 << 28),
686
RT_IDX_RSS_IPV6
= (1 << 29),
687
RT_IDX_RSS_IPV4
= (1 << 30),
688
RT_IDX_RSS_MATCH
= (1 << 31),
689
690
/* Hierarchy for the NIC Queue Mask */
691
RT_IDX_ALL_ERR_SLOT
= 0,
692
RT_IDX_MAC_ERR_SLOT
= 0,
693
RT_IDX_IP_CSUM_ERR_SLOT
= 1,
694
RT_IDX_TCP_UDP_CSUM_ERR_SLOT
= 2,
695
RT_IDX_BCAST_SLOT
= 3,
696
RT_IDX_MCAST_MATCH_SLOT
= 4,
697
RT_IDX_ALLMULTI_SLOT
= 5,
698
RT_IDX_UNUSED6_SLOT
= 6,
699
RT_IDX_UNUSED7_SLOT
= 7,
700
RT_IDX_RSS_MATCH_SLOT
= 8,
701
RT_IDX_RSS_IPV4_SLOT
= 8,
702
RT_IDX_RSS_IPV6_SLOT
= 9,
703
RT_IDX_RSS_TCP4_SLOT
= 10,
704
RT_IDX_RSS_TCP6_SLOT
= 11,
705
RT_IDX_CAM_HIT_SLOT
= 12,
706
RT_IDX_UNUSED013
= 13,
707
RT_IDX_UNUSED014
= 14,
708
RT_IDX_PROMISCUOUS_SLOT
= 15,
709
RT_IDX_MAX_RT_SLOTS
= 8,
710
RT_IDX_MAX_NIC_SLOTS
= 16,
711
};
712
713
/*
714
* Serdes Address Register (XG_SERDES_ADDR) bit definitions.
715
*/
716
enum
{
717
XG_SERDES_ADDR_RDY
= (1 << 31),
718
XG_SERDES_ADDR_R
= (1 << 30),
719
720
XG_SERDES_ADDR_STS
= 0x00001E06,
721
XG_SERDES_ADDR_XFI1_PWR_UP
= 0x00000005,
722
XG_SERDES_ADDR_XFI2_PWR_UP
= 0x0000000a,
723
XG_SERDES_ADDR_XAUI_PWR_DOWN
= 0x00000001,
724
725
/* Serdes coredump definitions. */
726
XG_SERDES_XAUI_AN_START
= 0x00000000,
727
XG_SERDES_XAUI_AN_END
= 0x00000034,
728
XG_SERDES_XAUI_HSS_PCS_START
= 0x00000800,
729
XG_SERDES_XAUI_HSS_PCS_END
= 0x0000880,
730
XG_SERDES_XFI_AN_START
= 0x00001000,
731
XG_SERDES_XFI_AN_END
= 0x00001034,
732
XG_SERDES_XFI_TRAIN_START
= 0x10001050,
733
XG_SERDES_XFI_TRAIN_END
= 0x1000107C,
734
XG_SERDES_XFI_HSS_PCS_START
= 0x00001800,
735
XG_SERDES_XFI_HSS_PCS_END
= 0x00001838,
736
XG_SERDES_XFI_HSS_TX_START
= 0x00001c00,
737
XG_SERDES_XFI_HSS_TX_END
= 0x00001c1f,
738
XG_SERDES_XFI_HSS_RX_START
= 0x00001c40,
739
XG_SERDES_XFI_HSS_RX_END
= 0x00001c5f,
740
XG_SERDES_XFI_HSS_PLL_START
= 0x00001e00,
741
XG_SERDES_XFI_HSS_PLL_END
= 0x00001e1f,
742
};
743
744
/*
745
* NIC Probe Mux Address Register (PRB_MX_ADDR) bit definitions.
746
*/
747
enum
{
748
PRB_MX_ADDR_ARE
= (1 << 16),
749
PRB_MX_ADDR_UP
= (1 << 15),
750
PRB_MX_ADDR_SWP
= (1 << 14),
751
752
/* Module select values. */
753
PRB_MX_ADDR_MAX_MODS
= 21,
754
PRB_MX_ADDR_MOD_SEL_SHIFT
= 9,
755
PRB_MX_ADDR_MOD_SEL_TBD
= 0,
756
PRB_MX_ADDR_MOD_SEL_IDE1
= 1,
757
PRB_MX_ADDR_MOD_SEL_IDE2
= 2,
758
PRB_MX_ADDR_MOD_SEL_FRB
= 3,
759
PRB_MX_ADDR_MOD_SEL_ODE1
= 4,
760
PRB_MX_ADDR_MOD_SEL_ODE2
= 5,
761
PRB_MX_ADDR_MOD_SEL_DA1
= 6,
762
PRB_MX_ADDR_MOD_SEL_DA2
= 7,
763
PRB_MX_ADDR_MOD_SEL_IMP1
= 8,
764
PRB_MX_ADDR_MOD_SEL_IMP2
= 9,
765
PRB_MX_ADDR_MOD_SEL_OMP1
= 10,
766
PRB_MX_ADDR_MOD_SEL_OMP2
= 11,
767
PRB_MX_ADDR_MOD_SEL_ORS1
= 12,
768
PRB_MX_ADDR_MOD_SEL_ORS2
= 13,
769
PRB_MX_ADDR_MOD_SEL_REG
= 14,
770
PRB_MX_ADDR_MOD_SEL_MAC1
= 16,
771
PRB_MX_ADDR_MOD_SEL_MAC2
= 17,
772
PRB_MX_ADDR_MOD_SEL_VQM1
= 18,
773
PRB_MX_ADDR_MOD_SEL_VQM2
= 19,
774
PRB_MX_ADDR_MOD_SEL_MOP
= 20,
775
/* Bit fields indicating which modules
776
* are valid for each clock domain.
777
*/
778
PRB_MX_ADDR_VALID_SYS_MOD
= 0x000f7ff7,
779
PRB_MX_ADDR_VALID_PCI_MOD
= 0x000040c1,
780
PRB_MX_ADDR_VALID_XGM_MOD
= 0x00037309,
781
PRB_MX_ADDR_VALID_FC_MOD
= 0x00003001,
782
PRB_MX_ADDR_VALID_TOTAL
= 34,
783
784
/* Clock domain values. */
785
PRB_MX_ADDR_CLOCK_SHIFT
= 6,
786
PRB_MX_ADDR_SYS_CLOCK
= 0,
787
PRB_MX_ADDR_PCI_CLOCK
= 2,
788
PRB_MX_ADDR_FC_CLOCK
= 5,
789
PRB_MX_ADDR_XGM_CLOCK
= 6,
790
791
PRB_MX_ADDR_MAX_MUX
= 64,
792
};
793
794
/*
795
* Control Register Set Map
796
*/
797
enum
{
798
PROC_ADDR
= 0,
/* Use semaphore */
799
PROC_DATA
= 0x04,
/* Use semaphore */
800
SYS
= 0x08,
801
RST_FO
= 0x0c,
802
FSC
= 0x10,
803
CSR
= 0x14,
804
LED
= 0x18,
805
ICB_RID
= 0x1c,
/* Use semaphore */
806
ICB_L
= 0x20,
/* Use semaphore */
807
ICB_H
= 0x24,
/* Use semaphore */
808
CFG
= 0x28,
809
BIOS_ADDR
= 0x2c,
810
STS
= 0x30,
811
INTR_EN
= 0x34,
812
INTR_MASK
= 0x38,
813
ISR1
= 0x3c,
814
ISR2
= 0x40,
815
ISR3
= 0x44,
816
ISR4
= 0x48,
817
REV_ID
= 0x4c,
818
FRC_ECC_ERR
= 0x50,
819
ERR_STS
= 0x54,
820
RAM_DBG_ADDR
= 0x58,
821
RAM_DBG_DATA
= 0x5c,
822
ECC_ERR_CNT
= 0x60,
823
SEM
= 0x64,
824
GPIO_1
= 0x68,
/* Use semaphore */
825
GPIO_2
= 0x6c,
/* Use semaphore */
826
GPIO_3
= 0x70,
/* Use semaphore */
827
RSVD2
= 0x74,
828
XGMAC_ADDR
= 0x78,
/* Use semaphore */
829
XGMAC_DATA
= 0x7c,
/* Use semaphore */
830
NIC_ETS
= 0x80,
831
CNA_ETS
= 0x84,
832
FLASH_ADDR
= 0x88,
/* Use semaphore */
833
FLASH_DATA
= 0x8c,
/* Use semaphore */
834
CQ_STOP
= 0x90,
835
PAGE_TBL_RID
= 0x94,
836
WQ_PAGE_TBL_LO
= 0x98,
837
WQ_PAGE_TBL_HI
= 0x9c,
838
CQ_PAGE_TBL_LO
= 0xa0,
839
CQ_PAGE_TBL_HI
= 0xa4,
840
MAC_ADDR_IDX
= 0xa8,
/* Use semaphore */
841
MAC_ADDR_DATA
= 0xac,
/* Use semaphore */
842
COS_DFLT_CQ1
= 0xb0,
843
COS_DFLT_CQ2
= 0xb4,
844
ETYPE_SKIP1
= 0xb8,
845
ETYPE_SKIP2
= 0xbc,
846
SPLT_HDR
= 0xc0,
847
FC_PAUSE_THRES
= 0xc4,
848
NIC_PAUSE_THRES
= 0xc8,
849
FC_ETHERTYPE
= 0xcc,
850
FC_RCV_CFG
= 0xd0,
851
NIC_RCV_CFG
= 0xd4,
852
FC_COS_TAGS
= 0xd8,
853
NIC_COS_TAGS
= 0xdc,
854
MGMT_RCV_CFG
= 0xe0,
855
RT_IDX
= 0xe4,
856
RT_DATA
= 0xe8,
857
RSVD7
= 0xec,
858
XG_SERDES_ADDR
= 0xf0,
859
XG_SERDES_DATA
= 0xf4,
860
PRB_MX_ADDR
= 0xf8,
/* Use semaphore */
861
PRB_MX_DATA
= 0xfc,
/* Use semaphore */
862
};
863
864
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
865
#define SMALL_BUFFER_SIZE 256
866
#define SMALL_BUF_MAP_SIZE SMALL_BUFFER_SIZE
867
#define SPLT_SETTING FSC_DBRST_1024
868
#define SPLT_LEN 0
869
#define QLGE_SB_PAD 0
870
#else
871
#define SMALL_BUFFER_SIZE 512
872
#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
873
#define SPLT_SETTING FSC_SH
874
#define SPLT_LEN (SPLT_HDR_EP | \
875
min(SMALL_BUF_MAP_SIZE, 1023))
876
#define QLGE_SB_PAD 32
877
#endif
878
879
/*
880
* CAM output format.
881
*/
882
enum
{
883
CAM_OUT_ROUTE_FC
= 0,
884
CAM_OUT_ROUTE_NIC
= 1,
885
CAM_OUT_FUNC_SHIFT
= 2,
886
CAM_OUT_RV
= (1 << 4),
887
CAM_OUT_SH
= (1 << 15),
888
CAM_OUT_CQ_ID_SHIFT
= 5,
889
};
890
891
/*
892
* Mailbox definitions
893
*/
894
enum
{
895
/* Asynchronous Event Notifications */
896
AEN_SYS_ERR
= 0x00008002,
897
AEN_LINK_UP
= 0x00008011,
898
AEN_LINK_DOWN
= 0x00008012,
899
AEN_IDC_CMPLT
= 0x00008100,
900
AEN_IDC_REQ
= 0x00008101,
901
AEN_IDC_EXT
= 0x00008102,
902
AEN_DCBX_CHG
= 0x00008110,
903
AEN_AEN_LOST
= 0x00008120,
904
AEN_AEN_SFP_IN
= 0x00008130,
905
AEN_AEN_SFP_OUT
= 0x00008131,
906
AEN_FW_INIT_DONE
= 0x00008400,
907
AEN_FW_INIT_FAIL
= 0x00008401,
908
909
/* Mailbox Command Opcodes. */
910
MB_CMD_NOP
= 0x00000000,
911
MB_CMD_EX_FW
= 0x00000002,
912
MB_CMD_MB_TEST
= 0x00000006,
913
MB_CMD_CSUM_TEST
= 0x00000007,
/* Verify Checksum */
914
MB_CMD_ABOUT_FW
= 0x00000008,
915
MB_CMD_COPY_RISC_RAM
= 0x0000000a,
916
MB_CMD_LOAD_RISC_RAM
= 0x0000000b,
917
MB_CMD_DUMP_RISC_RAM
= 0x0000000c,
918
MB_CMD_WRITE_RAM
= 0x0000000d,
919
MB_CMD_INIT_RISC_RAM
= 0x0000000e,
920
MB_CMD_READ_RAM
= 0x0000000f,
921
MB_CMD_STOP_FW
= 0x00000014,
922
MB_CMD_MAKE_SYS_ERR
= 0x0000002a,
923
MB_CMD_WRITE_SFP
= 0x00000030,
924
MB_CMD_READ_SFP
= 0x00000031,
925
MB_CMD_INIT_FW
= 0x00000060,
926
MB_CMD_GET_IFCB
= 0x00000061,
927
MB_CMD_GET_FW_STATE
= 0x00000069,
928
MB_CMD_IDC_REQ
= 0x00000100,
/* Inter-Driver Communication */
929
MB_CMD_IDC_ACK
= 0x00000101,
/* Inter-Driver Communication */
930
MB_CMD_SET_WOL_MODE
= 0x00000110,
/* Wake On Lan */
931
MB_WOL_DISABLE
= 0,
932
MB_WOL_MAGIC_PKT
= (1 << 1),
933
MB_WOL_FLTR
= (1 << 2),
934
MB_WOL_UCAST
= (1 << 3),
935
MB_WOL_MCAST
= (1 << 4),
936
MB_WOL_BCAST
= (1 << 5),
937
MB_WOL_LINK_UP
= (1 << 6),
938
MB_WOL_LINK_DOWN
= (1 << 7),
939
MB_WOL_MODE_ON
= (1 << 16),
/* Wake on Lan Mode on */
940
MB_CMD_SET_WOL_FLTR
= 0x00000111,
/* Wake On Lan Filter */
941
MB_CMD_CLEAR_WOL_FLTR
= 0x00000112,
/* Wake On Lan Filter */
942
MB_CMD_SET_WOL_MAGIC
= 0x00000113,
/* Wake On Lan Magic Packet */
943
MB_CMD_CLEAR_WOL_MAGIC
= 0x00000114,
/* Wake On Lan Magic Packet */
944
MB_CMD_SET_WOL_IMMED
= 0x00000115,
945
MB_CMD_PORT_RESET
= 0x00000120,
946
MB_CMD_SET_PORT_CFG
= 0x00000122,
947
MB_CMD_GET_PORT_CFG
= 0x00000123,
948
MB_CMD_GET_LINK_STS
= 0x00000124,
949
MB_CMD_SET_LED_CFG
= 0x00000125,
/* Set LED Configuration Register */
950
QL_LED_BLINK
= 0x03e803e8,
951
MB_CMD_GET_LED_CFG
= 0x00000126,
/* Get LED Configuration Register */
952
MB_CMD_SET_MGMNT_TFK_CTL
= 0x00000160,
/* Set Mgmnt Traffic Control */
953
MB_SET_MPI_TFK_STOP
= (1 << 0),
954
MB_SET_MPI_TFK_RESUME
= (1 << 1),
955
MB_CMD_GET_MGMNT_TFK_CTL
= 0x00000161,
/* Get Mgmnt Traffic Control */
956
MB_GET_MPI_TFK_STOPPED
= (1 << 0),
957
MB_GET_MPI_TFK_FIFO_EMPTY
= (1 << 1),
958
/* Sub-commands for IDC request.
959
* This describes the reason for the
960
* IDC request.
961
*/
962
MB_CMD_IOP_NONE
= 0x0000,
963
MB_CMD_IOP_PREP_UPDATE_MPI
= 0x0001,
964
MB_CMD_IOP_COMP_UPDATE_MPI
= 0x0002,
965
MB_CMD_IOP_PREP_LINK_DOWN
= 0x0010,
966
MB_CMD_IOP_DVR_START
= 0x0100,
967
MB_CMD_IOP_FLASH_ACC
= 0x0101,
968
MB_CMD_IOP_RESTART_MPI
= 0x0102,
969
MB_CMD_IOP_CORE_DUMP_MPI
= 0x0103,
970
971
/* Mailbox Command Status. */
972
MB_CMD_STS_GOOD
= 0x00004000,
/* Success. */
973
MB_CMD_STS_INTRMDT
= 0x00001000,
/* Intermediate Complete. */
974
MB_CMD_STS_INVLD_CMD
= 0x00004001,
/* Invalid. */
975
MB_CMD_STS_XFC_ERR
= 0x00004002,
/* Interface Error. */
976
MB_CMD_STS_CSUM_ERR
= 0x00004003,
/* Csum Error. */
977
MB_CMD_STS_ERR
= 0x00004005,
/* System Error. */
978
MB_CMD_STS_PARAM_ERR
= 0x00004006,
/* Parameter Error. */
979
};
980
981
struct
mbox_params
{
982
u32
mbox_in
[
MAILBOX_COUNT
];
983
u32
mbox_out
[
MAILBOX_COUNT
];
984
int
in_count
;
985
int
out_count
;
986
};
987
988
struct
flash_params_8012
{
989
u8
dev_id_str
[4];
990
__le16
size
;
991
__le16
csum
;
992
__le16
ver
;
993
__le16
sub_dev_id
;
994
u8
mac_addr
[6];
995
__le16
res
;
996
};
997
998
/* 8000 device's flash is a different structure
999
* at a different offset in flash.
1000
*/
1001
#define FUNC0_FLASH_OFFSET 0x140200
1002
#define FUNC1_FLASH_OFFSET 0x140600
1003
1004
/* Flash related data structures. */
1005
struct
flash_params_8000
{
1006
u8
dev_id_str
[4];
/* "8000" */
1007
__le16
ver
;
1008
__le16
size
;
1009
__le16
csum
;
1010
__le16
reserved0
;
1011
__le16
total_size
;
1012
__le16
entry_count
;
1013
u8
data_type0
;
1014
u8
data_size0
;
1015
u8
mac_addr
[6];
1016
u8
data_type1
;
1017
u8
data_size1
;
1018
u8
mac_addr1
[6];
1019
u8
data_type2
;
1020
u8
data_size2
;
1021
__le16
vlan_id
;
1022
u8
data_type3
;
1023
u8
data_size3
;
1024
__le16
last
;
1025
u8
reserved1
[464];
1026
__le16
subsys_ven_id
;
1027
__le16
subsys_dev_id
;
1028
u8
reserved2
[4];
1029
};
1030
1031
union
flash_params
{
1032
struct
flash_params_8012
flash_params_8012
;
1033
struct
flash_params_8000
flash_params_8000
;
1034
};
1035
1036
/*
1037
* doorbell space for the rx ring context
1038
*/
1039
struct
rx_doorbell_context
{
1040
u32
cnsmr_idx
;
/* 0x00 */
1041
u32
valid
;
/* 0x04 */
1042
u32
reserved
[4];
/* 0x08-0x14 */
1043
u32
lbq_prod_idx
;
/* 0x18 */
1044
u32
sbq_prod_idx
;
/* 0x1c */
1045
};
1046
1047
/*
1048
* doorbell space for the tx ring context
1049
*/
1050
struct
tx_doorbell_context
{
1051
u32
prod_idx
;
/* 0x00 */
1052
u32
valid
;
/* 0x04 */
1053
u32
reserved
[4];
/* 0x08-0x14 */
1054
u32
lbq_prod_idx
;
/* 0x18 */
1055
u32
sbq_prod_idx
;
/* 0x1c */
1056
};
1057
1058
/* DATA STRUCTURES SHARED WITH HARDWARE. */
1059
struct
tx_buf_desc
{
1060
__le64
addr
;
1061
__le32
len
;
1062
#define TX_DESC_LEN_MASK 0x000fffff
1063
#define TX_DESC_C 0x40000000
1064
#define TX_DESC_E 0x80000000
1065
}
__packed
;
1066
1067
/*
1068
* IOCB Definitions...
1069
*/
1070
1071
#define OPCODE_OB_MAC_IOCB 0x01
1072
#define OPCODE_OB_MAC_TSO_IOCB 0x02
1073
#define OPCODE_IB_MAC_IOCB 0x20
1074
#define OPCODE_IB_MPI_IOCB 0x21
1075
#define OPCODE_IB_AE_IOCB 0x3f
1076
1077
struct
ob_mac_iocb_req
{
1078
u8
opcode
;
1079
u8
flags1
;
1080
#define OB_MAC_IOCB_REQ_OI 0x01
1081
#define OB_MAC_IOCB_REQ_I 0x02
1082
#define OB_MAC_IOCB_REQ_D 0x08
1083
#define OB_MAC_IOCB_REQ_F 0x10
1084
u8
flags2
;
1085
u8
flags3
;
1086
#define OB_MAC_IOCB_DFP 0x02
1087
#define OB_MAC_IOCB_V 0x04
1088
__le32
reserved1
[2];
1089
__le16
frame_len
;
1090
#define OB_MAC_IOCB_LEN_MASK 0x3ffff
1091
__le16
reserved2
;
1092
u32
tid
;
1093
u32
txq_idx
;
1094
__le32
reserved3
;
1095
__le16
vlan_tci
;
1096
__le16
reserved4
;
1097
struct
tx_buf_desc
tbd
[
TX_DESC_PER_IOCB
];
1098
}
__packed
;
1099
1100
struct
ob_mac_iocb_rsp
{
1101
u8
opcode
;
/* */
1102
u8
flags1
;
/* */
1103
#define OB_MAC_IOCB_RSP_OI 0x01
/* */
1104
#define OB_MAC_IOCB_RSP_I 0x02
/* */
1105
#define OB_MAC_IOCB_RSP_E 0x08
/* */
1106
#define OB_MAC_IOCB_RSP_S 0x10
/* too Short */
1107
#define OB_MAC_IOCB_RSP_L 0x20
/* too Large */
1108
#define OB_MAC_IOCB_RSP_P 0x40
/* Padded */
1109
u8
flags2
;
/* */
1110
u8
flags3
;
/* */
1111
#define OB_MAC_IOCB_RSP_B 0x80
/* */
1112
u32
tid
;
1113
u32
txq_idx
;
1114
__le32
reserved
[13];
1115
}
__packed
;
1116
1117
struct
ob_mac_tso_iocb_req
{
1118
u8
opcode
;
1119
u8
flags1
;
1120
#define OB_MAC_TSO_IOCB_OI 0x01
1121
#define OB_MAC_TSO_IOCB_I 0x02
1122
#define OB_MAC_TSO_IOCB_D 0x08
1123
#define OB_MAC_TSO_IOCB_IP4 0x40
1124
#define OB_MAC_TSO_IOCB_IP6 0x80
1125
u8
flags2
;
1126
#define OB_MAC_TSO_IOCB_LSO 0x20
1127
#define OB_MAC_TSO_IOCB_UC 0x40
1128
#define OB_MAC_TSO_IOCB_TC 0x80
1129
u8
flags3
;
1130
#define OB_MAC_TSO_IOCB_IC 0x01
1131
#define OB_MAC_TSO_IOCB_DFP 0x02
1132
#define OB_MAC_TSO_IOCB_V 0x04
1133
__le32
reserved1
[2];
1134
__le32
frame_len
;
1135
u32
tid
;
1136
u32
txq_idx
;
1137
__le16
total_hdrs_len
;
1138
__le16
net_trans_offset
;
1139
#define OB_MAC_TRANSPORT_HDR_SHIFT 6
1140
__le16
vlan_tci
;
1141
__le16
mss
;
1142
struct
tx_buf_desc
tbd
[
TX_DESC_PER_IOCB
];
1143
}
__packed
;
1144
1145
struct
ob_mac_tso_iocb_rsp
{
1146
u8
opcode
;
1147
u8
flags1
;
1148
#define OB_MAC_TSO_IOCB_RSP_OI 0x01
1149
#define OB_MAC_TSO_IOCB_RSP_I 0x02
1150
#define OB_MAC_TSO_IOCB_RSP_E 0x08
1151
#define OB_MAC_TSO_IOCB_RSP_S 0x10
1152
#define OB_MAC_TSO_IOCB_RSP_L 0x20
1153
#define OB_MAC_TSO_IOCB_RSP_P 0x40
1154
u8
flags2
;
/* */
1155
u8
flags3
;
/* */
1156
#define OB_MAC_TSO_IOCB_RSP_B 0x8000
1157
u32
tid
;
1158
u32
txq_idx
;
1159
__le32
reserved2
[13];
1160
}
__packed
;
1161
1162
struct
ib_mac_iocb_rsp
{
1163
u8
opcode
;
/* 0x20 */
1164
u8
flags1
;
1165
#define IB_MAC_IOCB_RSP_OI 0x01
/* Overide intr delay */
1166
#define IB_MAC_IOCB_RSP_I 0x02
/* Disble Intr Generation */
1167
#define IB_MAC_CSUM_ERR_MASK 0x1c
/* A mask to use for csum errs */
1168
#define IB_MAC_IOCB_RSP_TE 0x04
/* Checksum error */
1169
#define IB_MAC_IOCB_RSP_NU 0x08
/* No checksum rcvd */
1170
#define IB_MAC_IOCB_RSP_IE 0x10
/* IPv4 checksum error */
1171
#define IB_MAC_IOCB_RSP_M_MASK 0x60
/* Multicast info */
1172
#define IB_MAC_IOCB_RSP_M_NONE 0x00
/* Not mcast frame */
1173
#define IB_MAC_IOCB_RSP_M_HASH 0x20
/* HASH mcast frame */
1174
#define IB_MAC_IOCB_RSP_M_REG 0x40
/* Registered mcast frame */
1175
#define IB_MAC_IOCB_RSP_M_PROM 0x60
/* Promiscuous mcast frame */
1176
#define IB_MAC_IOCB_RSP_B 0x80
/* Broadcast frame */
1177
u8
flags2
;
1178
#define IB_MAC_IOCB_RSP_P 0x01
/* Promiscuous frame */
1179
#define IB_MAC_IOCB_RSP_V 0x02
/* Vlan tag present */
1180
#define IB_MAC_IOCB_RSP_ERR_MASK 0x1c
/* */
1181
#define IB_MAC_IOCB_RSP_ERR_CODE_ERR 0x04
1182
#define IB_MAC_IOCB_RSP_ERR_OVERSIZE 0x08
1183
#define IB_MAC_IOCB_RSP_ERR_UNDERSIZE 0x10
1184
#define IB_MAC_IOCB_RSP_ERR_PREAMBLE 0x14
1185
#define IB_MAC_IOCB_RSP_ERR_FRAME_LEN 0x18
1186
#define IB_MAC_IOCB_RSP_ERR_CRC 0x1c
1187
#define IB_MAC_IOCB_RSP_U 0x20
/* UDP packet */
1188
#define IB_MAC_IOCB_RSP_T 0x40
/* TCP packet */
1189
#define IB_MAC_IOCB_RSP_FO 0x80
/* Failover port */
1190
u8
flags3
;
1191
#define IB_MAC_IOCB_RSP_RSS_MASK 0x07
/* RSS mask */
1192
#define IB_MAC_IOCB_RSP_M_NONE 0x00
/* No RSS match */
1193
#define IB_MAC_IOCB_RSP_M_IPV4 0x04
/* IPv4 RSS match */
1194
#define IB_MAC_IOCB_RSP_M_IPV6 0x02
/* IPv6 RSS match */
1195
#define IB_MAC_IOCB_RSP_M_TCP_V4 0x05
/* TCP with IPv4 */
1196
#define IB_MAC_IOCB_RSP_M_TCP_V6 0x03
/* TCP with IPv6 */
1197
#define IB_MAC_IOCB_RSP_V4 0x08
/* IPV4 */
1198
#define IB_MAC_IOCB_RSP_V6 0x10
/* IPV6 */
1199
#define IB_MAC_IOCB_RSP_IH 0x20
/* Split after IP header */
1200
#define IB_MAC_IOCB_RSP_DS 0x40
/* data is in small buffer */
1201
#define IB_MAC_IOCB_RSP_DL 0x80
/* data is in large buffer */
1202
__le32
data_len
;
/* */
1203
__le64
data_addr
;
/* */
1204
__le32
rss
;
/* */
1205
__le16
vlan_id
;
/* 12 bits */
1206
#define IB_MAC_IOCB_RSP_C 0x1000
/* VLAN CFI bit */
1207
#define IB_MAC_IOCB_RSP_COS_SHIFT 12
/* class of service value */
1208
#define IB_MAC_IOCB_RSP_VLAN_MASK 0x0ffff
1209
1210
__le16
reserved1
;
1211
__le32
reserved2
[6];
1212
u8
reserved3
[3];
1213
u8
flags4
;
1214
#define IB_MAC_IOCB_RSP_HV 0x20
1215
#define IB_MAC_IOCB_RSP_HS 0x40
1216
#define IB_MAC_IOCB_RSP_HL 0x80
1217
__le32
hdr_len
;
/* */
1218
__le64
hdr_addr
;
/* */
1219
}
__packed
;
1220
1221
struct
ib_ae_iocb_rsp
{
1222
u8
opcode
;
1223
u8
flags1
;
1224
#define IB_AE_IOCB_RSP_OI 0x01
1225
#define IB_AE_IOCB_RSP_I 0x02
1226
u8
event
;
1227
#define LINK_UP_EVENT 0x00
1228
#define LINK_DOWN_EVENT 0x01
1229
#define CAM_LOOKUP_ERR_EVENT 0x06
1230
#define SOFT_ECC_ERROR_EVENT 0x07
1231
#define MGMT_ERR_EVENT 0x08
1232
#define TEN_GIG_MAC_EVENT 0x09
1233
#define GPI0_H2L_EVENT 0x10
1234
#define GPI0_L2H_EVENT 0x20
1235
#define GPI1_H2L_EVENT 0x11
1236
#define GPI1_L2H_EVENT 0x21
1237
#define PCI_ERR_ANON_BUF_RD 0x40
1238
u8
q_id
;
1239
__le32
reserved
[15];
1240
}
__packed
;
1241
1242
/*
1243
* These three structures are for generic
1244
* handling of ib and ob iocbs.
1245
*/
1246
struct
ql_net_rsp_iocb
{
1247
u8
opcode
;
1248
u8
flags0
;
1249
__le16
length
;
1250
__le32
tid
;
1251
__le32
reserved
[14];
1252
}
__packed
;
1253
1254
struct
net_req_iocb
{
1255
u8
opcode
;
1256
u8
flags0
;
1257
__le16
flags1
;
1258
__le32
tid
;
1259
__le32
reserved1
[30];
1260
}
__packed
;
1261
1262
/*
1263
* tx ring initialization control block for chip.
1264
* It is defined as:
1265
* "Work Queue Initialization Control Block"
1266
*/
1267
struct
wqicb
{
1268
__le16
len
;
1269
#define Q_LEN_V (1 << 4)
1270
#define Q_LEN_CPP_CONT 0x0000
1271
#define Q_LEN_CPP_16 0x0001
1272
#define Q_LEN_CPP_32 0x0002
1273
#define Q_LEN_CPP_64 0x0003
1274
#define Q_LEN_CPP_512 0x0006
1275
__le16
flags
;
1276
#define Q_PRI_SHIFT 1
1277
#define Q_FLAGS_LC 0x1000
1278
#define Q_FLAGS_LB 0x2000
1279
#define Q_FLAGS_LI 0x4000
1280
#define Q_FLAGS_LO 0x8000
1281
__le16
cq_id_rss
;
1282
#define Q_CQ_ID_RSS_RV 0x8000
1283
__le16
rid
;
1284
__le64
addr
;
1285
__le64
cnsmr_idx_addr
;
1286
}
__packed
;
1287
1288
/*
1289
* rx ring initialization control block for chip.
1290
* It is defined as:
1291
* "Completion Queue Initialization Control Block"
1292
*/
1293
struct
cqicb
{
1294
u8
msix_vect
;
1295
u8
reserved1
;
1296
u8
reserved2
;
1297
u8
flags
;
1298
#define FLAGS_LV 0x08
1299
#define FLAGS_LS 0x10
1300
#define FLAGS_LL 0x20
1301
#define FLAGS_LI 0x40
1302
#define FLAGS_LC 0x80
1303
__le16
len
;
1304
#define LEN_V (1 << 4)
1305
#define LEN_CPP_CONT 0x0000
1306
#define LEN_CPP_32 0x0001
1307
#define LEN_CPP_64 0x0002
1308
#define LEN_CPP_128 0x0003
1309
__le16
rid
;
1310
__le64
addr
;
1311
__le64
prod_idx_addr
;
1312
__le16
pkt_delay
;
1313
__le16
irq_delay
;
1314
__le64
lbq_addr
;
1315
__le16
lbq_buf_size
;
1316
__le16
lbq_len
;
/* entry count */
1317
__le64
sbq_addr
;
1318
__le16
sbq_buf_size
;
1319
__le16
sbq_len
;
/* entry count */
1320
}
__packed
;
1321
1322
struct
ricb
{
1323
u8
base_cq
;
1324
#define RSS_L4K 0x80
1325
u8
flags
;
1326
#define RSS_L6K 0x01
1327
#define RSS_LI 0x02
1328
#define RSS_LB 0x04
1329
#define RSS_LM 0x08
1330
#define RSS_RI4 0x10
1331
#define RSS_RT4 0x20
1332
#define RSS_RI6 0x40
1333
#define RSS_RT6 0x80
1334
__le16
mask
;
1335
u8
hash_cq_id
[1024];
1336
__le32
ipv6_hash_key
[10];
1337
__le32
ipv4_hash_key
[4];
1338
}
__packed
;
1339
1340
/* SOFTWARE/DRIVER DATA STRUCTURES. */
1341
1342
struct
oal
{
1343
struct
tx_buf_desc
oal
[
TX_DESC_PER_OAL
];
1344
};
1345
1346
struct
map_list
{
1347
DEFINE_DMA_UNMAP_ADDR
(mapaddr);
1348
DEFINE_DMA_UNMAP_LEN
(maplen);
1349
};
1350
1351
struct
tx_ring_desc
{
1352
struct
sk_buff
*
skb
;
1353
struct
ob_mac_iocb_req
*
queue_entry
;
1354
u32
index
;
1355
struct
oal
oal
;
1356
struct
map_list
map
[
MAX_SKB_FRAGS
+ 2];
1357
int
map_cnt
;
1358
struct
tx_ring_desc
*
next
;
1359
};
1360
1361
struct
page_chunk
{
1362
struct
page
*
page
;
/* master page */
1363
char
*
va
;
/* virt addr for this chunk */
1364
u64
map
;
/* mapping for master */
1365
unsigned
int
offset
;
/* offset for this chunk */
1366
unsigned
int
last_flag
;
/* flag set for last chunk in page */
1367
};
1368
1369
struct
bq_desc
{
1370
union
{
1371
struct
page_chunk
pg_chunk
;
1372
struct
sk_buff
*
skb
;
1373
}
p
;
1374
__le64
*
addr
;
1375
u32
index
;
1376
DEFINE_DMA_UNMAP_ADDR
(mapaddr);
1377
DEFINE_DMA_UNMAP_LEN
(maplen);
1378
};
1379
1380
#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
1381
1382
struct
tx_ring
{
1383
/*
1384
* queue info.
1385
*/
1386
struct
wqicb
wqicb
;
/* structure used to inform chip of new queue */
1387
void
*
wq_base
;
/* pci_alloc:virtual addr for tx */
1388
dma_addr_t
wq_base_dma
;
/* pci_alloc:dma addr for tx */
1389
__le32
*
cnsmr_idx_sh_reg
;
/* shadow copy of consumer idx */
1390
dma_addr_t
cnsmr_idx_sh_reg_dma
;
/* dma-shadow copy of consumer */
1391
u32
wq_size
;
/* size in bytes of queue area */
1392
u32
wq_len
;
/* number of entries in queue */
1393
void
__iomem
*
prod_idx_db_reg
;
/* doorbell area index reg at offset 0x00 */
1394
void
__iomem
*
valid_db_reg
;
/* doorbell area valid reg at offset 0x04 */
1395
u16
prod_idx
;
/* current value for prod idx */
1396
u16
cq_id
;
/* completion (rx) queue for tx completions */
1397
u8
wq_id
;
/* queue id for this entry */
1398
u8
reserved1
[3];
1399
struct
tx_ring_desc
*
q
;
/* descriptor list for the queue */
1400
spinlock_t
lock
;
1401
atomic_t
tx_count
;
/* counts down for every outstanding IO */
1402
struct
delayed_work
tx_work
;
1403
struct
ql_adapter
*
qdev
;
1404
u64
tx_packets
;
1405
u64
tx_bytes
;
1406
u64
tx_errors
;
1407
};
1408
1409
/*
1410
* Type of inbound queue.
1411
*/
1412
enum
{
1413
DEFAULT_Q
= 2,
/* Handles slow queue and chip/MPI events. */
1414
TX_Q
= 3,
/* Handles outbound completions. */
1415
RX_Q
= 4,
/* Handles inbound completions. */
1416
};
1417
1418
struct
rx_ring
{
1419
struct
cqicb
cqicb
;
/* The chip's completion queue init control block. */
1420
1421
/* Completion queue elements. */
1422
void
*
cq_base
;
1423
dma_addr_t
cq_base_dma
;
1424
u32
cq_size
;
1425
u32
cq_len
;
1426
u16
cq_id
;
1427
__le32
*
prod_idx_sh_reg
;
/* Shadowed producer register. */
1428
dma_addr_t
prod_idx_sh_reg_dma
;
1429
void
__iomem
*
cnsmr_idx_db_reg
;
/* PCI doorbell mem area + 0 */
1430
u32
cnsmr_idx
;
/* current sw idx */
1431
struct
ql_net_rsp_iocb
*
curr_entry
;
/* next entry on queue */
1432
void
__iomem
*
valid_db_reg
;
/* PCI doorbell mem area + 0x04 */
1433
1434
/* Large buffer queue elements. */
1435
u32
lbq_len
;
/* entry count */
1436
u32
lbq_size
;
/* size in bytes of queue */
1437
u32
lbq_buf_size
;
1438
void
*
lbq_base
;
1439
dma_addr_t
lbq_base_dma
;
1440
void
*
lbq_base_indirect
;
1441
dma_addr_t
lbq_base_indirect_dma
;
1442
struct
page_chunk
pg_chunk
;
/* current page for chunks */
1443
struct
bq_desc
*
lbq
;
/* array of control blocks */
1444
void
__iomem
*
lbq_prod_idx_db_reg
;
/* PCI doorbell mem area + 0x18 */
1445
u32
lbq_prod_idx
;
/* current sw prod idx */
1446
u32
lbq_curr_idx
;
/* next entry we expect */
1447
u32
lbq_clean_idx
;
/* beginning of new descs */
1448
u32
lbq_free_cnt
;
/* free buffer desc cnt */
1449
1450
/* Small buffer queue elements. */
1451
u32
sbq_len
;
/* entry count */
1452
u32
sbq_size
;
/* size in bytes of queue */
1453
u32
sbq_buf_size
;
1454
void
*
sbq_base
;
1455
dma_addr_t
sbq_base_dma
;
1456
void
*
sbq_base_indirect
;
1457
dma_addr_t
sbq_base_indirect_dma
;
1458
struct
bq_desc
*
sbq
;
/* array of control blocks */
1459
void
__iomem
*
sbq_prod_idx_db_reg
;
/* PCI doorbell mem area + 0x1c */
1460
u32
sbq_prod_idx
;
/* current sw prod idx */
1461
u32
sbq_curr_idx
;
/* next entry we expect */
1462
u32
sbq_clean_idx
;
/* beginning of new descs */
1463
u32
sbq_free_cnt
;
/* free buffer desc cnt */
1464
1465
/* Misc. handler elements. */
1466
u32
type
;
/* Type of queue, tx, rx. */
1467
u32
irq
;
/* Which vector this ring is assigned. */
1468
u32
cpu
;
/* Which CPU this should run on. */
1469
char
name
[
IFNAMSIZ
+ 5];
1470
struct
napi_struct
napi
;
1471
u8
reserved
;
1472
struct
ql_adapter
*
qdev
;
1473
u64
rx_packets
;
1474
u64
rx_multicast
;
1475
u64
rx_bytes
;
1476
u64
rx_dropped
;
1477
u64
rx_errors
;
1478
};
1479
1480
/*
1481
* RSS Initialization Control Block
1482
*/
1483
struct
hash_id
{
1484
u8
value
[4];
1485
};
1486
1487
struct
nic_stats
{
1488
/*
1489
* These stats come from offset 200h to 278h
1490
* in the XGMAC register.
1491
*/
1492
u64
tx_pkts
;
1493
u64
tx_bytes
;
1494
u64
tx_mcast_pkts
;
1495
u64
tx_bcast_pkts
;
1496
u64
tx_ucast_pkts
;
1497
u64
tx_ctl_pkts
;
1498
u64
tx_pause_pkts
;
1499
u64
tx_64_pkt
;
1500
u64
tx_65_to_127_pkt
;
1501
u64
tx_128_to_255_pkt
;
1502
u64
tx_256_511_pkt
;
1503
u64
tx_512_to_1023_pkt
;
1504
u64
tx_1024_to_1518_pkt
;
1505
u64
tx_1519_to_max_pkt
;
1506
u64
tx_undersize_pkt
;
1507
u64
tx_oversize_pkt
;
1508
1509
/*
1510
* These stats come from offset 300h to 3C8h
1511
* in the XGMAC register.
1512
*/
1513
u64
rx_bytes
;
1514
u64
rx_bytes_ok
;
1515
u64
rx_pkts
;
1516
u64
rx_pkts_ok
;
1517
u64
rx_bcast_pkts
;
1518
u64
rx_mcast_pkts
;
1519
u64
rx_ucast_pkts
;
1520
u64
rx_undersize_pkts
;
1521
u64
rx_oversize_pkts
;
1522
u64
rx_jabber_pkts
;
1523
u64
rx_undersize_fcerr_pkts
;
1524
u64
rx_drop_events
;
1525
u64
rx_fcerr_pkts
;
1526
u64
rx_align_err
;
1527
u64
rx_symbol_err
;
1528
u64
rx_mac_err
;
1529
u64
rx_ctl_pkts
;
1530
u64
rx_pause_pkts
;
1531
u64
rx_64_pkts
;
1532
u64
rx_65_to_127_pkts
;
1533
u64
rx_128_255_pkts
;
1534
u64
rx_256_511_pkts
;
1535
u64
rx_512_to_1023_pkts
;
1536
u64
rx_1024_to_1518_pkts
;
1537
u64
rx_1519_to_max_pkts
;
1538
u64
rx_len_err_pkts
;
1539
/* Receive Mac Err stats */
1540
u64
rx_code_err
;
1541
u64
rx_oversize_err
;
1542
u64
rx_undersize_err
;
1543
u64
rx_preamble_err
;
1544
u64
rx_frame_len_err
;
1545
u64
rx_crc_err
;
1546
u64
rx_err_count
;
1547
/*
1548
* These stats come from offset 500h to 5C8h
1549
* in the XGMAC register.
1550
*/
1551
u64
tx_cbfc_pause_frames0
;
1552
u64
tx_cbfc_pause_frames1
;
1553
u64
tx_cbfc_pause_frames2
;
1554
u64
tx_cbfc_pause_frames3
;
1555
u64
tx_cbfc_pause_frames4
;
1556
u64
tx_cbfc_pause_frames5
;
1557
u64
tx_cbfc_pause_frames6
;
1558
u64
tx_cbfc_pause_frames7
;
1559
u64
rx_cbfc_pause_frames0
;
1560
u64
rx_cbfc_pause_frames1
;
1561
u64
rx_cbfc_pause_frames2
;
1562
u64
rx_cbfc_pause_frames3
;
1563
u64
rx_cbfc_pause_frames4
;
1564
u64
rx_cbfc_pause_frames5
;
1565
u64
rx_cbfc_pause_frames6
;
1566
u64
rx_cbfc_pause_frames7
;
1567
u64
rx_nic_fifo_drop
;
1568
};
1569
1570
/* Firmware coredump internal register address/length pairs. */
1571
enum
{
1572
MPI_CORE_REGS_ADDR
= 0x00030000,
1573
MPI_CORE_REGS_CNT
= 127,
1574
MPI_CORE_SH_REGS_CNT
= 16,
1575
TEST_REGS_ADDR
= 0x00001000,
1576
TEST_REGS_CNT
= 23,
1577
RMII_REGS_ADDR
= 0x00001040,
1578
RMII_REGS_CNT
= 64,
1579
FCMAC1_REGS_ADDR
= 0x00001080,
1580
FCMAC2_REGS_ADDR
= 0x000010c0,
1581
FCMAC_REGS_CNT
= 64,
1582
FC1_MBX_REGS_ADDR
= 0x00001100,
1583
FC2_MBX_REGS_ADDR
= 0x00001240,
1584
FC_MBX_REGS_CNT
= 64,
1585
IDE_REGS_ADDR
= 0x00001140,
1586
IDE_REGS_CNT
= 64,
1587
NIC1_MBX_REGS_ADDR
= 0x00001180,
1588
NIC2_MBX_REGS_ADDR
= 0x00001280,
1589
NIC_MBX_REGS_CNT
= 64,
1590
SMBUS_REGS_ADDR
= 0x00001200,
1591
SMBUS_REGS_CNT
= 64,
1592
I2C_REGS_ADDR
= 0x00001fc0,
1593
I2C_REGS_CNT
= 64,
1594
MEMC_REGS_ADDR
= 0x00003000,
1595
MEMC_REGS_CNT
= 256,
1596
PBUS_REGS_ADDR
= 0x00007c00,
1597
PBUS_REGS_CNT
= 256,
1598
MDE_REGS_ADDR
= 0x00010000,
1599
MDE_REGS_CNT
= 6,
1600
CODE_RAM_ADDR
= 0x00020000,
1601
CODE_RAM_CNT
= 0x2000,
1602
MEMC_RAM_ADDR
= 0x00100000,
1603
MEMC_RAM_CNT
= 0x2000,
1604
};
1605
1606
#define MPI_COREDUMP_COOKIE 0x5555aaaa
1607
struct
mpi_coredump_global_header
{
1608
u32
cookie
;
1609
u8
idString
[16];
1610
u32
timeLo
;
1611
u32
timeHi
;
1612
u32
imageSize
;
1613
u32
headerSize
;
1614
u8
info
[220];
1615
};
1616
1617
struct
mpi_coredump_segment_header
{
1618
u32
cookie
;
1619
u32
segNum
;
1620
u32
segSize
;
1621
u32
extra
;
1622
u8
description
[16];
1623
};
1624
1625
/* Firmware coredump header segment numbers. */
1626
enum
{
1627
CORE_SEG_NUM
= 1,
1628
TEST_LOGIC_SEG_NUM
= 2,
1629
RMII_SEG_NUM
= 3,
1630
FCMAC1_SEG_NUM
= 4,
1631
FCMAC2_SEG_NUM
= 5,
1632
FC1_MBOX_SEG_NUM
= 6,
1633
IDE_SEG_NUM
= 7,
1634
NIC1_MBOX_SEG_NUM
= 8,
1635
SMBUS_SEG_NUM
= 9,
1636
FC2_MBOX_SEG_NUM
= 10,
1637
NIC2_MBOX_SEG_NUM
= 11,
1638
I2C_SEG_NUM
= 12,
1639
MEMC_SEG_NUM
= 13,
1640
PBUS_SEG_NUM
= 14,
1641
MDE_SEG_NUM
= 15,
1642
NIC1_CONTROL_SEG_NUM
= 16,
1643
NIC2_CONTROL_SEG_NUM
= 17,
1644
NIC1_XGMAC_SEG_NUM
= 18,
1645
NIC2_XGMAC_SEG_NUM
= 19,
1646
WCS_RAM_SEG_NUM
= 20,
1647
MEMC_RAM_SEG_NUM
= 21,
1648
XAUI_AN_SEG_NUM
= 22,
1649
XAUI_HSS_PCS_SEG_NUM
= 23,
1650
XFI_AN_SEG_NUM
= 24,
1651
XFI_TRAIN_SEG_NUM
= 25,
1652
XFI_HSS_PCS_SEG_NUM
= 26,
1653
XFI_HSS_TX_SEG_NUM
= 27,
1654
XFI_HSS_RX_SEG_NUM
= 28,
1655
XFI_HSS_PLL_SEG_NUM
= 29,
1656
MISC_NIC_INFO_SEG_NUM
= 30,
1657
INTR_STATES_SEG_NUM
= 31,
1658
CAM_ENTRIES_SEG_NUM
= 32,
1659
ROUTING_WORDS_SEG_NUM
= 33,
1660
ETS_SEG_NUM
= 34,
1661
PROBE_DUMP_SEG_NUM
= 35,
1662
ROUTING_INDEX_SEG_NUM
= 36,
1663
MAC_PROTOCOL_SEG_NUM
= 37,
1664
XAUI2_AN_SEG_NUM
= 38,
1665
XAUI2_HSS_PCS_SEG_NUM
= 39,
1666
XFI2_AN_SEG_NUM
= 40,
1667
XFI2_TRAIN_SEG_NUM
= 41,
1668
XFI2_HSS_PCS_SEG_NUM
= 42,
1669
XFI2_HSS_TX_SEG_NUM
= 43,
1670
XFI2_HSS_RX_SEG_NUM
= 44,
1671
XFI2_HSS_PLL_SEG_NUM
= 45,
1672
SEM_REGS_SEG_NUM
= 50
1673
1674
};
1675
1676
/* There are 64 generic NIC registers. */
1677
#define NIC_REGS_DUMP_WORD_COUNT 64
1678
/* XGMAC word count. */
1679
#define XGMAC_DUMP_WORD_COUNT (XGMAC_REGISTER_END / 4)
1680
/* Word counts for the SERDES blocks. */
1681
#define XG_SERDES_XAUI_AN_COUNT 14
1682
#define XG_SERDES_XAUI_HSS_PCS_COUNT 33
1683
#define XG_SERDES_XFI_AN_COUNT 14
1684
#define XG_SERDES_XFI_TRAIN_COUNT 12
1685
#define XG_SERDES_XFI_HSS_PCS_COUNT 15
1686
#define XG_SERDES_XFI_HSS_TX_COUNT 32
1687
#define XG_SERDES_XFI_HSS_RX_COUNT 32
1688
#define XG_SERDES_XFI_HSS_PLL_COUNT 32
1689
1690
/* There are 2 CNA ETS and 8 NIC ETS registers. */
1691
#define ETS_REGS_DUMP_WORD_COUNT 10
1692
1693
/* Each probe mux entry stores the probe type plus 64 entries
1694
* that are each each 64-bits in length. There are a total of
1695
* 34 (PRB_MX_ADDR_VALID_TOTAL) valid probes.
1696
*/
1697
#define PRB_MX_ADDR_PRB_WORD_COUNT (1 + (PRB_MX_ADDR_MAX_MUX * 2))
1698
#define PRB_MX_DUMP_TOT_COUNT (PRB_MX_ADDR_PRB_WORD_COUNT * \
1699
PRB_MX_ADDR_VALID_TOTAL)
1700
/* Each routing entry consists of 4 32-bit words.
1701
* They are route type, index, index word, and result.
1702
* There are 2 route blocks with 8 entries each and
1703
* 2 NIC blocks with 16 entries each.
1704
* The totol entries is 48 with 4 words each.
1705
*/
1706
#define RT_IDX_DUMP_ENTRIES 48
1707
#define RT_IDX_DUMP_WORDS_PER_ENTRY 4
1708
#define RT_IDX_DUMP_TOT_WORDS (RT_IDX_DUMP_ENTRIES * \
1709
RT_IDX_DUMP_WORDS_PER_ENTRY)
1710
/* There are 10 address blocks in filter, each with
1711
* different entry counts and different word-count-per-entry.
1712
*/
1713
#define MAC_ADDR_DUMP_ENTRIES \
1714
((MAC_ADDR_MAX_CAM_ENTRIES * MAC_ADDR_MAX_CAM_WCOUNT) + \
1715
(MAC_ADDR_MAX_MULTICAST_ENTRIES * MAC_ADDR_MAX_MULTICAST_WCOUNT) + \
1716
(MAC_ADDR_MAX_VLAN_ENTRIES * MAC_ADDR_MAX_VLAN_WCOUNT) + \
1717
(MAC_ADDR_MAX_MCAST_FLTR_ENTRIES * MAC_ADDR_MAX_MCAST_FLTR_WCOUNT) + \
1718
(MAC_ADDR_MAX_FC_MAC_ENTRIES * MAC_ADDR_MAX_FC_MAC_WCOUNT) + \
1719
(MAC_ADDR_MAX_MGMT_MAC_ENTRIES * MAC_ADDR_MAX_MGMT_MAC_WCOUNT) + \
1720
(MAC_ADDR_MAX_MGMT_VLAN_ENTRIES * MAC_ADDR_MAX_MGMT_VLAN_WCOUNT) + \
1721
(MAC_ADDR_MAX_MGMT_V4_ENTRIES * MAC_ADDR_MAX_MGMT_V4_WCOUNT) + \
1722
(MAC_ADDR_MAX_MGMT_V6_ENTRIES * MAC_ADDR_MAX_MGMT_V6_WCOUNT) + \
1723
(MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES * MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT))
1724
#define MAC_ADDR_DUMP_WORDS_PER_ENTRY 2
1725
#define MAC_ADDR_DUMP_TOT_WORDS (MAC_ADDR_DUMP_ENTRIES * \
1726
MAC_ADDR_DUMP_WORDS_PER_ENTRY)
1727
/* Maximum of 4 functions whose semaphore registeres are
1728
* in the coredump.
1729
*/
1730
#define MAX_SEMAPHORE_FUNCTIONS 4
1731
/* Defines for access the MPI shadow registers. */
1732
#define RISC_124 0x0003007c
1733
#define RISC_127 0x0003007f
1734
#define SHADOW_OFFSET 0xb0000000
1735
#define SHADOW_REG_SHIFT 20
1736
1737
struct
ql_nic_misc
{
1738
u32
rx_ring_count
;
1739
u32
tx_ring_count
;
1740
u32
intr_count
;
1741
u32
function
;
1742
};
1743
1744
struct
ql_reg_dump
{
1745
1746
/* segment 0 */
1747
struct
mpi_coredump_global_header
mpi_global_header
;
1748
1749
/* segment 16 */
1750
struct
mpi_coredump_segment_header
nic_regs_seg_hdr
;
1751
u32
nic_regs
[64];
1752
1753
/* segment 30 */
1754
struct
mpi_coredump_segment_header
misc_nic_seg_hdr
;
1755
struct
ql_nic_misc
misc_nic_info
;
1756
1757
/* segment 31 */
1758
/* one interrupt state for each CQ */
1759
struct
mpi_coredump_segment_header
intr_states_seg_hdr
;
1760
u32
intr_states
[
MAX_CPUS
];
1761
1762
/* segment 32 */
1763
/* 3 cam words each for 16 unicast,
1764
* 2 cam words for each of 32 multicast.
1765
*/
1766
struct
mpi_coredump_segment_header
cam_entries_seg_hdr
;
1767
u32
cam_entries
[(16 * 3) + (32 * 3)];
1768
1769
/* segment 33 */
1770
struct
mpi_coredump_segment_header
nic_routing_words_seg_hdr
;
1771
u32
nic_routing_words
[16];
1772
1773
/* segment 34 */
1774
struct
mpi_coredump_segment_header
ets_seg_hdr
;
1775
u32
ets
[8+2];
1776
};
1777
1778
struct
ql_mpi_coredump
{
1779
/* segment 0 */
1780
struct
mpi_coredump_global_header
mpi_global_header
;
1781
1782
/* segment 1 */
1783
struct
mpi_coredump_segment_header
core_regs_seg_hdr
;
1784
u32
mpi_core_regs
[
MPI_CORE_REGS_CNT
];
1785
u32
mpi_core_sh_regs
[
MPI_CORE_SH_REGS_CNT
];
1786
1787
/* segment 2 */
1788
struct
mpi_coredump_segment_header
test_logic_regs_seg_hdr
;
1789
u32
test_logic_regs
[
TEST_REGS_CNT
];
1790
1791
/* segment 3 */
1792
struct
mpi_coredump_segment_header
rmii_regs_seg_hdr
;
1793
u32
rmii_regs
[
RMII_REGS_CNT
];
1794
1795
/* segment 4 */
1796
struct
mpi_coredump_segment_header
fcmac1_regs_seg_hdr
;
1797
u32
fcmac1_regs
[
FCMAC_REGS_CNT
];
1798
1799
/* segment 5 */
1800
struct
mpi_coredump_segment_header
fcmac2_regs_seg_hdr
;
1801
u32
fcmac2_regs
[
FCMAC_REGS_CNT
];
1802
1803
/* segment 6 */
1804
struct
mpi_coredump_segment_header
fc1_mbx_regs_seg_hdr
;
1805
u32
fc1_mbx_regs
[
FC_MBX_REGS_CNT
];
1806
1807
/* segment 7 */
1808
struct
mpi_coredump_segment_header
ide_regs_seg_hdr
;
1809
u32
ide_regs
[
IDE_REGS_CNT
];
1810
1811
/* segment 8 */
1812
struct
mpi_coredump_segment_header
nic1_mbx_regs_seg_hdr
;
1813
u32
nic1_mbx_regs
[
NIC_MBX_REGS_CNT
];
1814
1815
/* segment 9 */
1816
struct
mpi_coredump_segment_header
smbus_regs_seg_hdr
;
1817
u32
smbus_regs
[
SMBUS_REGS_CNT
];
1818
1819
/* segment 10 */
1820
struct
mpi_coredump_segment_header
fc2_mbx_regs_seg_hdr
;
1821
u32
fc2_mbx_regs
[
FC_MBX_REGS_CNT
];
1822
1823
/* segment 11 */
1824
struct
mpi_coredump_segment_header
nic2_mbx_regs_seg_hdr
;
1825
u32
nic2_mbx_regs
[
NIC_MBX_REGS_CNT
];
1826
1827
/* segment 12 */
1828
struct
mpi_coredump_segment_header
i2c_regs_seg_hdr
;
1829
u32
i2c_regs
[
I2C_REGS_CNT
];
1830
/* segment 13 */
1831
struct
mpi_coredump_segment_header
memc_regs_seg_hdr
;
1832
u32
memc_regs
[
MEMC_REGS_CNT
];
1833
1834
/* segment 14 */
1835
struct
mpi_coredump_segment_header
pbus_regs_seg_hdr
;
1836
u32
pbus_regs
[
PBUS_REGS_CNT
];
1837
1838
/* segment 15 */
1839
struct
mpi_coredump_segment_header
mde_regs_seg_hdr
;
1840
u32
mde_regs
[
MDE_REGS_CNT
];
1841
1842
/* segment 16 */
1843
struct
mpi_coredump_segment_header
nic_regs_seg_hdr
;
1844
u32
nic_regs
[
NIC_REGS_DUMP_WORD_COUNT
];
1845
1846
/* segment 17 */
1847
struct
mpi_coredump_segment_header
nic2_regs_seg_hdr
;
1848
u32
nic2_regs
[
NIC_REGS_DUMP_WORD_COUNT
];
1849
1850
/* segment 18 */
1851
struct
mpi_coredump_segment_header
xgmac1_seg_hdr
;
1852
u32
xgmac1
[
XGMAC_DUMP_WORD_COUNT
];
1853
1854
/* segment 19 */
1855
struct
mpi_coredump_segment_header
xgmac2_seg_hdr
;
1856
u32
xgmac2
[
XGMAC_DUMP_WORD_COUNT
];
1857
1858
/* segment 20 */
1859
struct
mpi_coredump_segment_header
code_ram_seg_hdr
;
1860
u32
code_ram
[
CODE_RAM_CNT
];
1861
1862
/* segment 21 */
1863
struct
mpi_coredump_segment_header
memc_ram_seg_hdr
;
1864
u32
memc_ram
[
MEMC_RAM_CNT
];
1865
1866
/* segment 22 */
1867
struct
mpi_coredump_segment_header
xaui_an_hdr
;
1868
u32
serdes_xaui_an
[
XG_SERDES_XAUI_AN_COUNT
];
1869
1870
/* segment 23 */
1871
struct
mpi_coredump_segment_header
xaui_hss_pcs_hdr
;
1872
u32
serdes_xaui_hss_pcs
[
XG_SERDES_XAUI_HSS_PCS_COUNT
];
1873
1874
/* segment 24 */
1875
struct
mpi_coredump_segment_header
xfi_an_hdr
;
1876
u32
serdes_xfi_an
[
XG_SERDES_XFI_AN_COUNT
];
1877
1878
/* segment 25 */
1879
struct
mpi_coredump_segment_header
xfi_train_hdr
;
1880
u32
serdes_xfi_train
[
XG_SERDES_XFI_TRAIN_COUNT
];
1881
1882
/* segment 26 */
1883
struct
mpi_coredump_segment_header
xfi_hss_pcs_hdr
;
1884
u32
serdes_xfi_hss_pcs
[
XG_SERDES_XFI_HSS_PCS_COUNT
];
1885
1886
/* segment 27 */
1887
struct
mpi_coredump_segment_header
xfi_hss_tx_hdr
;
1888
u32
serdes_xfi_hss_tx
[
XG_SERDES_XFI_HSS_TX_COUNT
];
1889
1890
/* segment 28 */
1891
struct
mpi_coredump_segment_header
xfi_hss_rx_hdr
;
1892
u32
serdes_xfi_hss_rx
[
XG_SERDES_XFI_HSS_RX_COUNT
];
1893
1894
/* segment 29 */
1895
struct
mpi_coredump_segment_header
xfi_hss_pll_hdr
;
1896
u32
serdes_xfi_hss_pll
[
XG_SERDES_XFI_HSS_PLL_COUNT
];
1897
1898
/* segment 30 */
1899
struct
mpi_coredump_segment_header
misc_nic_seg_hdr
;
1900
struct
ql_nic_misc
misc_nic_info
;
1901
1902
/* segment 31 */
1903
/* one interrupt state for each CQ */
1904
struct
mpi_coredump_segment_header
intr_states_seg_hdr
;
1905
u32
intr_states
[
MAX_RX_RINGS
];
1906
1907
/* segment 32 */
1908
/* 3 cam words each for 16 unicast,
1909
* 2 cam words for each of 32 multicast.
1910
*/
1911
struct
mpi_coredump_segment_header
cam_entries_seg_hdr
;
1912
u32
cam_entries
[(16 * 3) + (32 * 3)];
1913
1914
/* segment 33 */
1915
struct
mpi_coredump_segment_header
nic_routing_words_seg_hdr
;
1916
u32
nic_routing_words
[16];
1917
/* segment 34 */
1918
struct
mpi_coredump_segment_header
ets_seg_hdr
;
1919
u32
ets
[
ETS_REGS_DUMP_WORD_COUNT
];
1920
1921
/* segment 35 */
1922
struct
mpi_coredump_segment_header
probe_dump_seg_hdr
;
1923
u32
probe_dump
[
PRB_MX_DUMP_TOT_COUNT
];
1924
1925
/* segment 36 */
1926
struct
mpi_coredump_segment_header
routing_reg_seg_hdr
;
1927
u32
routing_regs
[
RT_IDX_DUMP_TOT_WORDS
];
1928
1929
/* segment 37 */
1930
struct
mpi_coredump_segment_header
mac_prot_reg_seg_hdr
;
1931
u32
mac_prot_regs
[
MAC_ADDR_DUMP_TOT_WORDS
];
1932
1933
/* segment 38 */
1934
struct
mpi_coredump_segment_header
xaui2_an_hdr
;
1935
u32
serdes2_xaui_an
[
XG_SERDES_XAUI_AN_COUNT
];
1936
1937
/* segment 39 */
1938
struct
mpi_coredump_segment_header
xaui2_hss_pcs_hdr
;
1939
u32
serdes2_xaui_hss_pcs
[
XG_SERDES_XAUI_HSS_PCS_COUNT
];
1940
1941
/* segment 40 */
1942
struct
mpi_coredump_segment_header
xfi2_an_hdr
;
1943
u32
serdes2_xfi_an
[
XG_SERDES_XFI_AN_COUNT
];
1944
1945
/* segment 41 */
1946
struct
mpi_coredump_segment_header
xfi2_train_hdr
;
1947
u32
serdes2_xfi_train
[
XG_SERDES_XFI_TRAIN_COUNT
];
1948
1949
/* segment 42 */
1950
struct
mpi_coredump_segment_header
xfi2_hss_pcs_hdr
;
1951
u32
serdes2_xfi_hss_pcs
[
XG_SERDES_XFI_HSS_PCS_COUNT
];
1952
1953
/* segment 43 */
1954
struct
mpi_coredump_segment_header
xfi2_hss_tx_hdr
;
1955
u32
serdes2_xfi_hss_tx
[
XG_SERDES_XFI_HSS_TX_COUNT
];
1956
1957
/* segment 44 */
1958
struct
mpi_coredump_segment_header
xfi2_hss_rx_hdr
;
1959
u32
serdes2_xfi_hss_rx
[
XG_SERDES_XFI_HSS_RX_COUNT
];
1960
1961
/* segment 45 */
1962
struct
mpi_coredump_segment_header
xfi2_hss_pll_hdr
;
1963
u32
serdes2_xfi_hss_pll
[
XG_SERDES_XFI_HSS_PLL_COUNT
];
1964
1965
/* segment 50 */
1966
/* semaphore register for all 5 functions */
1967
struct
mpi_coredump_segment_header
sem_regs_seg_hdr
;
1968
u32
sem_regs
[
MAX_SEMAPHORE_FUNCTIONS
];
1969
};
1970
1971
/*
1972
* intr_context structure is used during initialization
1973
* to hook the interrupts. It is also used in a single
1974
* irq environment as a context to the ISR.
1975
*/
1976
struct
intr_context
{
1977
struct
ql_adapter
*
qdev
;
1978
u32
intr
;
1979
u32
irq_mask
;
/* Mask of which rings the vector services. */
1980
u32
hooked
;
1981
u32
intr_en_mask
;
/* value/mask used to enable this intr */
1982
u32
intr_dis_mask
;
/* value/mask used to disable this intr */
1983
u32
intr_read_mask
;
/* value/mask used to read this intr */
1984
char
name
[
IFNAMSIZ
* 2];
1985
atomic_t
irq_cnt
;
/* irq_cnt is used in single vector
1986
* environment. It's incremented for each
1987
* irq handler that is scheduled. When each
1988
* handler finishes it decrements irq_cnt and
1989
* enables interrupts if it's zero. */
1990
irq_handler_t
handler
;
1991
};
1992
1993
/* adapter flags definitions. */
1994
enum
{
1995
QL_ADAPTER_UP
= 0,
/* Adapter has been brought up. */
1996
QL_LEGACY_ENABLED
= 1,
1997
QL_MSI_ENABLED
= 2,
1998
QL_MSIX_ENABLED
= 3,
1999
QL_DMA64
= 4,
2000
QL_PROMISCUOUS
= 5,
2001
QL_ALLMULTI
= 6,
2002
QL_PORT_CFG
= 7,
2003
QL_CAM_RT_SET
= 8,
2004
QL_SELFTEST
= 9,
2005
QL_LB_LINK_UP
= 10,
2006
QL_FRC_COREDUMP
= 11,
2007
QL_EEH_FATAL
= 12,
2008
QL_ASIC_RECOVERY
= 14,
/* We are in ascic recovery. */
2009
};
2010
2011
/* link_status bit definitions */
2012
enum
{
2013
STS_LOOPBACK_MASK
= 0x00000700,
2014
STS_LOOPBACK_PCS
= 0x00000100,
2015
STS_LOOPBACK_HSS
= 0x00000200,
2016
STS_LOOPBACK_EXT
= 0x00000300,
2017
STS_PAUSE_MASK
= 0x000000c0,
2018
STS_PAUSE_STD
= 0x00000040,
2019
STS_PAUSE_PRI
= 0x00000080,
2020
STS_SPEED_MASK
= 0x00000038,
2021
STS_SPEED_100Mb
= 0x00000000,
2022
STS_SPEED_1Gb
= 0x00000008,
2023
STS_SPEED_10Gb
= 0x00000010,
2024
STS_LINK_TYPE_MASK
= 0x00000007,
2025
STS_LINK_TYPE_XFI
= 0x00000001,
2026
STS_LINK_TYPE_XAUI
= 0x00000002,
2027
STS_LINK_TYPE_XFI_BP
= 0x00000003,
2028
STS_LINK_TYPE_XAUI_BP
= 0x00000004,
2029
STS_LINK_TYPE_10GBASET
= 0x00000005,
2030
};
2031
2032
/* link_config bit definitions */
2033
enum
{
2034
CFG_JUMBO_FRAME_SIZE
= 0x00010000,
2035
CFG_PAUSE_MASK
= 0x00000060,
2036
CFG_PAUSE_STD
= 0x00000020,
2037
CFG_PAUSE_PRI
= 0x00000040,
2038
CFG_DCBX
= 0x00000010,
2039
CFG_LOOPBACK_MASK
= 0x00000007,
2040
CFG_LOOPBACK_PCS
= 0x00000002,
2041
CFG_LOOPBACK_HSS
= 0x00000004,
2042
CFG_LOOPBACK_EXT
= 0x00000006,
2043
CFG_DEFAULT_MAX_FRAME_SIZE
= 0x00002580,
2044
};
2045
2046
struct
nic_operations
{
2047
2048
int
(*
get_flash
) (
struct
ql_adapter
*);
2049
int
(*
port_initialize
) (
struct
ql_adapter
*);
2050
};
2051
2052
/*
2053
* The main Adapter structure definition.
2054
* This structure has all fields relevant to the hardware.
2055
*/
2056
struct
ql_adapter
{
2057
struct
ricb
ricb
;
2058
unsigned
long
flags
;
2059
u32
wol
;
2060
2061
struct
nic_stats
nic_stats
;
2062
2063
unsigned
long
active_vlans
[
BITS_TO_LONGS
(
VLAN_N_VID
)];
2064
2065
/* PCI Configuration information for this device */
2066
struct
pci_dev
*
pdev
;
2067
struct
net_device
*
ndev
;
/* Parent NET device */
2068
2069
/* Hardware information */
2070
u32
chip_rev_id
;
2071
u32
fw_rev_id
;
2072
u32
func
;
/* PCI function for this adapter */
2073
u32
alt_func
;
/* PCI function for alternate adapter */
2074
u32
port
;
/* Port number this adapter */
2075
2076
spinlock_t
adapter_lock
;
2077
spinlock_t
hw_lock
;
2078
spinlock_t
stats_lock
;
2079
2080
/* PCI Bus Relative Register Addresses */
2081
void
__iomem
*
reg_base
;
2082
void
__iomem
*
doorbell_area
;
2083
u32
doorbell_area_size
;
2084
2085
u32
msg_enable
;
2086
2087
/* Page for Shadow Registers */
2088
void
*
rx_ring_shadow_reg_area
;
2089
dma_addr_t
rx_ring_shadow_reg_dma
;
2090
void
*
tx_ring_shadow_reg_area
;
2091
dma_addr_t
tx_ring_shadow_reg_dma
;
2092
2093
u32
mailbox_in
;
2094
u32
mailbox_out
;
2095
struct
mbox_params
idc_mbc
;
2096
struct
mutex
mpi_mutex
;
2097
2098
int
tx_ring_size
;
2099
int
rx_ring_size
;
2100
u32
intr_count
;
2101
struct
msix_entry *
msi_x_entry
;
2102
struct
intr_context
intr_context
[
MAX_RX_RINGS
];
2103
2104
int
tx_ring_count
;
/* One per online CPU. */
2105
u32
rss_ring_count
;
/* One per irq vector. */
2106
/*
2107
* rx_ring_count =
2108
* (CPU count * outbound completion rx_ring) +
2109
* (irq_vector_cnt * inbound (RSS) completion rx_ring)
2110
*/
2111
int
rx_ring_count
;
2112
int
ring_mem_size
;
2113
void
*
ring_mem
;
2114
2115
struct
rx_ring
rx_ring
[
MAX_RX_RINGS
];
2116
struct
tx_ring
tx_ring
[
MAX_TX_RINGS
];
2117
unsigned
int
lbq_buf_order
;
2118
2119
int
rx_csum
;
2120
u32
default_rx_queue
;
2121
2122
u16
rx_coalesce_usecs
;
/* cqicb->int_delay */
2123
u16
rx_max_coalesced_frames
;
/* cqicb->pkt_int_delay */
2124
u16
tx_coalesce_usecs
;
/* cqicb->int_delay */
2125
u16
tx_max_coalesced_frames
;
/* cqicb->pkt_int_delay */
2126
2127
u32
xg_sem_mask
;
2128
u32
port_link_up
;
2129
u32
port_init
;
2130
u32
link_status
;
2131
struct
ql_mpi_coredump
*
mpi_coredump
;
2132
u32
core_is_dumped
;
2133
u32
link_config
;
2134
u32
led_config
;
2135
u32
max_frame_size
;
2136
2137
union
flash_params
flash
;
2138
2139
struct
workqueue_struct
*
workqueue
;
2140
struct
delayed_work
asic_reset_work
;
2141
struct
delayed_work
mpi_reset_work
;
2142
struct
delayed_work
mpi_work
;
2143
struct
delayed_work
mpi_port_cfg_work
;
2144
struct
delayed_work
mpi_idc_work
;
2145
struct
delayed_work
mpi_core_to_log
;
2146
struct
completion
ide_completion
;
2147
const
struct
nic_operations
*
nic_ops
;
2148
u16
device_id
;
2149
struct
timer_list
timer
;
2150
atomic_t
lb_count
;
2151
/* Keep local copy of current mac address. */
2152
char
current_mac_addr
[6];
2153
};
2154
2155
/*
2156
* Typical Register accessor for memory mapped device.
2157
*/
2158
static
inline
u32
ql_read32(
const
struct
ql_adapter
*qdev,
int
reg
)
2159
{
2160
return
readl
(qdev->
reg_base
+ reg);
2161
}
2162
2163
/*
2164
* Typical Register accessor for memory mapped device.
2165
*/
2166
static
inline
void
ql_write32(
const
struct
ql_adapter
*qdev,
int
reg
,
u32
val
)
2167
{
2168
writel
(val, qdev->
reg_base
+ reg);
2169
}
2170
2171
/*
2172
* Doorbell Registers:
2173
* Doorbell registers are virtual registers in the PCI memory space.
2174
* The space is allocated by the chip during PCI initialization. The
2175
* device driver finds the doorbell address in BAR 3 in PCI config space.
2176
* The registers are used to control outbound and inbound queues. For
2177
* example, the producer index for an outbound queue. Each queue uses
2178
* 1 4k chunk of memory. The lower half of the space is for outbound
2179
* queues. The upper half is for inbound queues.
2180
*/
2181
static
inline
void
ql_write_db_reg(
u32
val
,
void
__iomem
*
addr
)
2182
{
2183
writel
(val, addr);
2184
mmiowb
();
2185
}
2186
2187
/*
2188
* Shadow Registers:
2189
* Outbound queues have a consumer index that is maintained by the chip.
2190
* Inbound queues have a producer index that is maintained by the chip.
2191
* For lower overhead, these registers are "shadowed" to host memory
2192
* which allows the device driver to track the queue progress without
2193
* PCI reads. When an entry is placed on an inbound queue, the chip will
2194
* update the relevant index register and then copy the value to the
2195
* shadow register in host memory.
2196
*/
2197
static
inline
u32
ql_read_sh_reg(
__le32
*
addr
)
2198
{
2199
u32
reg
;
2200
reg =
le32_to_cpu
(*addr);
2201
rmb
();
2202
return
reg
;
2203
}
2204
2205
extern
char
qlge_driver_name
[];
2206
extern
const
char
qlge_driver_version
[];
2207
extern
const
struct
ethtool_ops
qlge_ethtool_ops
;
2208
2209
extern
int
ql_sem_spinlock
(
struct
ql_adapter
*qdev,
u32
sem_mask);
2210
extern
void
ql_sem_unlock
(
struct
ql_adapter
*qdev,
u32
sem_mask);
2211
extern
int
ql_read_xgmac_reg
(
struct
ql_adapter
*qdev,
u32
reg
,
u32
*
data
);
2212
extern
int
ql_get_mac_addr_reg
(
struct
ql_adapter
*qdev,
u32
type
,
u16
index
,
2213
u32
*
value
);
2214
extern
int
ql_get_routing_reg
(
struct
ql_adapter
*qdev,
u32
index
,
u32
*
value
);
2215
extern
int
ql_write_cfg
(
struct
ql_adapter
*qdev,
void
*
ptr
,
int
size
,
u32
bit
,
2216
u16
q_id);
2217
void
ql_queue_fw_error
(
struct
ql_adapter
*qdev);
2218
void
ql_mpi_work
(
struct
work_struct
*
work
);
2219
void
ql_mpi_reset_work
(
struct
work_struct
*
work
);
2220
void
ql_mpi_core_to_log
(
struct
work_struct
*
work
);
2221
int
ql_wait_reg_rdy
(
struct
ql_adapter
*qdev,
u32
reg
,
u32
bit
,
u32
ebit);
2222
void
ql_queue_asic_error
(
struct
ql_adapter
*qdev);
2223
u32
ql_enable_completion_interrupt
(
struct
ql_adapter
*qdev,
u32
intr
);
2224
void
ql_set_ethtool_ops
(
struct
net_device
*
ndev
);
2225
int
ql_read_xgmac_reg64
(
struct
ql_adapter
*qdev,
u32
reg
,
u64
*
data
);
2226
void
ql_mpi_idc_work
(
struct
work_struct
*
work
);
2227
void
ql_mpi_port_cfg_work
(
struct
work_struct
*
work
);
2228
int
ql_mb_get_fw_state
(
struct
ql_adapter
*qdev);
2229
int
ql_cam_route_initialize
(
struct
ql_adapter
*qdev);
2230
int
ql_read_mpi_reg
(
struct
ql_adapter
*qdev,
u32
reg
,
u32
*
data
);
2231
int
ql_write_mpi_reg
(
struct
ql_adapter
*qdev,
u32
reg
,
u32
data
);
2232
int
ql_unpause_mpi_risc
(
struct
ql_adapter
*qdev);
2233
int
ql_pause_mpi_risc
(
struct
ql_adapter
*qdev);
2234
int
ql_hard_reset_mpi_risc
(
struct
ql_adapter
*qdev);
2235
int
ql_soft_reset_mpi_risc
(
struct
ql_adapter
*qdev);
2236
int
ql_dump_risc_ram_area
(
struct
ql_adapter
*qdev,
void
*
buf
,
2237
u32
ram_addr,
int
word_count);
2238
int
ql_core_dump
(
struct
ql_adapter
*qdev,
2239
struct
ql_mpi_coredump
*mpi_coredump);
2240
int
ql_mb_about_fw
(
struct
ql_adapter
*qdev);
2241
int
ql_mb_wol_set_magic
(
struct
ql_adapter
*qdev,
u32
enable_wol);
2242
int
ql_mb_wol_mode
(
struct
ql_adapter
*qdev,
u32
wol
);
2243
int
ql_mb_set_led_cfg
(
struct
ql_adapter
*qdev,
u32
led_config);
2244
int
ql_mb_get_led_cfg
(
struct
ql_adapter
*qdev);
2245
void
ql_link_on
(
struct
ql_adapter
*qdev);
2246
void
ql_link_off
(
struct
ql_adapter
*qdev);
2247
int
ql_mb_set_mgmnt_traffic_ctl
(
struct
ql_adapter
*qdev,
u32
control
);
2248
int
ql_mb_get_port_cfg
(
struct
ql_adapter
*qdev);
2249
int
ql_mb_set_port_cfg
(
struct
ql_adapter
*qdev);
2250
int
ql_wait_fifo_empty
(
struct
ql_adapter
*qdev);
2251
void
ql_get_dump
(
struct
ql_adapter
*qdev,
void
*buff);
2252
void
ql_gen_reg_dump
(
struct
ql_adapter
*qdev,
2253
struct
ql_reg_dump
*mpi_coredump);
2254
netdev_tx_t
ql_lb_send
(
struct
sk_buff
*
skb
,
struct
net_device
*
ndev
);
2255
void
ql_check_lb_frame
(
struct
ql_adapter
*,
struct
sk_buff
*);
2256
int
ql_own_firmware
(
struct
ql_adapter
*qdev);
2257
int
ql_clean_lb_rx_ring
(
struct
rx_ring
*
rx_ring
,
int
budget
);
2258
2259
/* #define QL_ALL_DUMP */
2260
/* #define QL_REG_DUMP */
2261
/* #define QL_DEV_DUMP */
2262
/* #define QL_CB_DUMP */
2263
/* #define QL_IB_DUMP */
2264
/* #define QL_OB_DUMP */
2265
2266
#ifdef QL_REG_DUMP
2267
extern
void
ql_dump_xgmac_control_regs(
struct
ql_adapter
*qdev);
2268
extern
void
ql_dump_routing_entries(
struct
ql_adapter
*qdev);
2269
extern
void
ql_dump_regs
(
struct
ql_adapter
*qdev);
2270
#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
2271
#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
2272
#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
2273
#else
2274
#define QL_DUMP_REGS(qdev)
2275
#define QL_DUMP_ROUTE(qdev)
2276
#define QL_DUMP_XGMAC_CONTROL_REGS(qdev)
2277
#endif
2278
2279
#ifdef QL_STAT_DUMP
2280
extern
void
ql_dump_stat(
struct
ql_adapter
*qdev);
2281
#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
2282
#else
2283
#define QL_DUMP_STAT(qdev)
2284
#endif
2285
2286
#ifdef QL_DEV_DUMP
2287
extern
void
ql_dump_qdev(
struct
ql_adapter
*qdev);
2288
#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
2289
#else
2290
#define QL_DUMP_QDEV(qdev)
2291
#endif
2292
2293
#ifdef QL_CB_DUMP
2294
extern
void
ql_dump_wqicb(
struct
wqicb
*
wqicb
);
2295
extern
void
ql_dump_tx_ring(
struct
tx_ring
*
tx_ring
);
2296
extern
void
ql_dump_ricb(
struct
ricb
*
ricb
);
2297
extern
void
ql_dump_cqicb(
struct
cqicb
*
cqicb
);
2298
extern
void
ql_dump_rx_ring(
struct
rx_ring
*
rx_ring
);
2299
extern
void
ql_dump_hw_cb(
struct
ql_adapter
*qdev,
int
size
,
u32
bit
,
u16
q_id);
2300
#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
2301
#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
2302
#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
2303
#define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb)
2304
#define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring)
2305
#define QL_DUMP_HW_CB(qdev, size, bit, q_id) \
2306
ql_dump_hw_cb(qdev, size, bit, q_id)
2307
#else
2308
#define QL_DUMP_RICB(ricb)
2309
#define QL_DUMP_WQICB(wqicb)
2310
#define QL_DUMP_TX_RING(tx_ring)
2311
#define QL_DUMP_CQICB(cqicb)
2312
#define QL_DUMP_RX_RING(rx_ring)
2313
#define QL_DUMP_HW_CB(qdev, size, bit, q_id)
2314
#endif
2315
2316
#ifdef QL_OB_DUMP
2317
extern
void
ql_dump_tx_desc(
struct
tx_buf_desc
*tbd);
2318
extern
void
ql_dump_ob_mac_iocb(
struct
ob_mac_iocb_req
*ob_mac_iocb);
2319
extern
void
ql_dump_ob_mac_rsp(
struct
ob_mac_iocb_rsp
*ob_mac_rsp);
2320
#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
2321
#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
2322
#else
2323
#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb)
2324
#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp)
2325
#endif
2326
2327
#ifdef QL_IB_DUMP
2328
extern
void
ql_dump_ib_mac_rsp(
struct
ib_mac_iocb_rsp
*ib_mac_rsp);
2329
#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
2330
#else
2331
#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
2332
#endif
2333
2334
#ifdef QL_ALL_DUMP
2335
extern
void
ql_dump_all(
struct
ql_adapter
*qdev);
2336
#define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
2337
#else
2338
#define QL_DUMP_ALL(qdev)
2339
#endif
2340
2341
#endif
/* _QLGE_H_ */
Generated on Thu Jan 10 2013 14:04:29 for Linux Kernel by
1.8.2