Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
drivers
infiniband
hw
qib
qib.h
Go to the documentation of this file.
1
#ifndef _QIB_KERNEL_H
2
#define _QIB_KERNEL_H
3
/*
4
* Copyright (c) 2012 Intel Corporation. All rights reserved.
5
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
6
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
7
*
8
* This software is available to you under a choice of one of two
9
* licenses. You may choose to be licensed under the terms of the GNU
10
* General Public License (GPL) Version 2, available from the file
11
* COPYING in the main directory of this source tree, or the
12
* OpenIB.org BSD license below:
13
*
14
* Redistribution and use in source and binary forms, with or
15
* without modification, are permitted provided that the following
16
* conditions are met:
17
*
18
* - Redistributions of source code must retain the above
19
* copyright notice, this list of conditions and the following
20
* disclaimer.
21
*
22
* - Redistributions in binary form must reproduce the above
23
* copyright notice, this list of conditions and the following
24
* disclaimer in the documentation and/or other materials
25
* provided with the distribution.
26
*
27
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34
* SOFTWARE.
35
*/
36
37
/*
38
* This header file is the base header file for qlogic_ib kernel code
39
* qib_user.h serves a similar purpose for user code.
40
*/
41
42
#include <
linux/interrupt.h
>
43
#include <linux/pci.h>
44
#include <
linux/dma-mapping.h
>
45
#include <
linux/mutex.h
>
46
#include <linux/list.h>
47
#include <
linux/scatterlist.h
>
48
#include <linux/slab.h>
49
#include <
linux/io.h
>
50
#include <linux/fs.h>
51
#include <
linux/completion.h
>
52
#include <
linux/kref.h
>
53
#include <linux/sched.h>
54
55
#include "
qib_common.h
"
56
#include "
qib_verbs.h
"
57
58
/* only s/w major version of QLogic_IB we can handle */
59
#define QIB_CHIP_VERS_MAJ 2U
60
61
/* don't care about this except printing */
62
#define QIB_CHIP_VERS_MIN 0U
63
64
/* The Organization Unique Identifier (Mfg code), and its position in GUID */
65
#define QIB_OUI 0x001175
66
#define QIB_OUI_LSB 40
67
68
/*
69
* per driver stats, either not device nor port-specific, or
70
* summed over all of the devices and ports.
71
* They are described by name via ipathfs filesystem, so layout
72
* and number of elements can change without breaking compatibility.
73
* If members are added or deleted qib_statnames[] in qib_fs.c must
74
* change to match.
75
*/
76
struct
qlogic_ib_stats
{
77
__u64
sps_ints
;
/* number of interrupts handled */
78
__u64
sps_errints
;
/* number of error interrupts */
79
__u64
sps_txerrs
;
/* tx-related packet errors */
80
__u64
sps_rcverrs
;
/* non-crc rcv packet errors */
81
__u64
sps_hwerrs
;
/* hardware errors reported (parity, etc.) */
82
__u64
sps_nopiobufs
;
/* no pio bufs avail from kernel */
83
__u64
sps_ctxts
;
/* number of contexts currently open */
84
__u64
sps_lenerrs
;
/* number of kernel packets where RHF != LRH len */
85
__u64
sps_buffull
;
86
__u64
sps_hdrfull
;
87
};
88
89
extern
struct
qlogic_ib_stats
qib_stats
;
90
extern
const
struct
pci_error_handlers
qib_pci_err_handler
;
91
extern
struct
pci_driver
qib_driver
;
92
93
#define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ
94
/*
95
* First-cut critierion for "device is active" is
96
* two thousand dwords combined Tx, Rx traffic per
97
* 5-second interval. SMA packets are 64 dwords,
98
* and occur "a few per second", presumably each way.
99
*/
100
#define QIB_TRAFFIC_ACTIVE_THRESHOLD (2000)
101
102
/*
103
* Struct used to indicate which errors are logged in each of the
104
* error-counters that are logged to EEPROM. A counter is incremented
105
* _once_ (saturating at 255) for each event with any bits set in
106
* the error or hwerror register masks below.
107
*/
108
#define QIB_EEP_LOG_CNT (4)
109
struct
qib_eep_log_mask
{
110
u64
errs_to_log
;
111
u64
hwerrs_to_log
;
112
};
113
114
/*
115
* Below contains all data related to a single context (formerly called port).
116
*/
117
struct
qib_ctxtdata
{
118
void
**
rcvegrbuf
;
119
dma_addr_t
*
rcvegrbuf_phys
;
120
/* rcvhdrq base, needs mmap before useful */
121
void
*
rcvhdrq
;
122
/* kernel virtual address where hdrqtail is updated */
123
void
*
rcvhdrtail_kvaddr
;
124
/*
125
* temp buffer for expected send setup, allocated at open, instead
126
* of each setup call
127
*/
128
void
*
tid_pg_list
;
129
/*
130
* Shared page for kernel to signal user processes that send buffers
131
* need disarming. The process should call QIB_CMD_DISARM_BUFS
132
* or QIB_CMD_ACK_EVENT with IPATH_EVENT_DISARM_BUFS set.
133
*/
134
unsigned
long
*
user_event_mask
;
135
/* when waiting for rcv or pioavail */
136
wait_queue_head_t
wait
;
137
/*
138
* rcvegr bufs base, physical, must fit
139
* in 44 bits so 32 bit programs mmap64 44 bit works)
140
*/
141
dma_addr_t
rcvegr_phys
;
142
/* mmap of hdrq, must fit in 44 bits */
143
dma_addr_t
rcvhdrq_phys
;
144
dma_addr_t
rcvhdrqtailaddr_phys
;
145
146
/*
147
* number of opens (including slave sub-contexts) on this instance
148
* (ignoring forks, dup, etc. for now)
149
*/
150
int
cnt
;
151
/*
152
* how much space to leave at start of eager TID entries for
153
* protocol use, on each TID
154
*/
155
/* instead of calculating it */
156
unsigned
ctxt
;
157
/* non-zero if ctxt is being shared. */
158
u16
subctxt_cnt
;
159
/* non-zero if ctxt is being shared. */
160
u16
subctxt_id
;
161
/* number of eager TID entries. */
162
u16
rcvegrcnt
;
163
/* index of first eager TID entry. */
164
u16
rcvegr_tid_base
;
165
/* number of pio bufs for this ctxt (all procs, if shared) */
166
u32
piocnt
;
167
/* first pio buffer for this ctxt */
168
u32
pio_base
;
169
/* chip offset of PIO buffers for this ctxt */
170
u32
piobufs
;
171
/* how many alloc_pages() chunks in rcvegrbuf_pages */
172
u32
rcvegrbuf_chunks
;
173
/* how many egrbufs per chunk */
174
u16
rcvegrbufs_perchunk
;
175
/* ilog2 of above */
176
u16
rcvegrbufs_perchunk_shift
;
177
/* order for rcvegrbuf_pages */
178
size_t
rcvegrbuf_size
;
179
/* rcvhdrq size (for freeing) */
180
size_t
rcvhdrq_size
;
181
/* per-context flags for fileops/intr communication */
182
unsigned
long
flag
;
183
/* next expected TID to check when looking for free */
184
u32
tidcursor
;
185
/* WAIT_RCV that timed out, no interrupt */
186
u32
rcvwait_to
;
187
/* WAIT_PIO that timed out, no interrupt */
188
u32
piowait_to
;
189
/* WAIT_RCV already happened, no wait */
190
u32
rcvnowait
;
191
/* WAIT_PIO already happened, no wait */
192
u32
pionowait
;
193
/* total number of polled urgent packets */
194
u32
urgent
;
195
/* saved total number of polled urgent packets for poll edge trigger */
196
u32
urgent_poll
;
197
/* pid of process using this ctxt */
198
pid_t
pid
;
199
pid_t
subpid
[
QLOGIC_IB_MAX_SUBCTXT
];
200
/* same size as task_struct .comm[], command that opened context */
201
char
comm
[16];
202
/* pkeys set by this use of this ctxt */
203
u16
pkeys
[4];
204
/* so file ops can get at unit */
205
struct
qib_devdata
*
dd
;
206
/* so funcs that need physical port can get it easily */
207
struct
qib_pportdata
*
ppd
;
208
/* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
209
void
*
subctxt_uregbase
;
210
/* An array of pages for the eager receive buffers * N */
211
void
*
subctxt_rcvegrbuf
;
212
/* An array of pages for the eager header queue entries * N */
213
void
*
subctxt_rcvhdr_base
;
214
/* The version of the library which opened this ctxt */
215
u32
userversion
;
216
/* Bitmask of active slaves */
217
u32
active_slaves
;
218
/* Type of packets or conditions we want to poll for */
219
u16
poll_type
;
220
/* receive packet sequence counter */
221
u8
seq_cnt
;
222
u8
redirect_seq_cnt
;
223
/* ctxt rcvhdrq head offset */
224
u32
head
;
225
u32
pkt_count
;
226
/* lookaside fields */
227
struct
qib_qp
*
lookaside_qp
;
228
u32
lookaside_qpn
;
229
/* QPs waiting for context processing */
230
struct
list_head
qp_wait_list
;
231
};
232
233
struct
qib_sge_state
;
234
235
struct
qib_sdma_txreq
{
236
int
flags
;
237
int
sg_count
;
238
dma_addr_t
addr
;
239
void
(*
callback
)(
struct
qib_sdma_txreq
*,
int
);
240
u16
start_idx
;
/* sdma private */
241
u16
next_descq_idx
;
/* sdma private */
242
struct
list_head
list
;
/* sdma private */
243
};
244
245
struct
qib_sdma_desc
{
246
__le64
qw
[2];
247
};
248
249
struct
qib_verbs_txreq
{
250
struct
qib_sdma_txreq
txreq
;
251
struct
qib_qp
*
qp
;
252
struct
qib_swqe
*
wqe
;
253
u32
dwords
;
254
u16
hdr_dwords
;
255
u16
hdr_inx
;
256
struct
qib_pio_header
*
align_buf
;
257
struct
qib_mregion
*
mr
;
258
struct
qib_sge_state
*
ss
;
259
};
260
261
#define QIB_SDMA_TXREQ_F_USELARGEBUF 0x1
262
#define QIB_SDMA_TXREQ_F_HEADTOHOST 0x2
263
#define QIB_SDMA_TXREQ_F_INTREQ 0x4
264
#define QIB_SDMA_TXREQ_F_FREEBUF 0x8
265
#define QIB_SDMA_TXREQ_F_FREEDESC 0x10
266
267
#define QIB_SDMA_TXREQ_S_OK 0
268
#define QIB_SDMA_TXREQ_S_SENDERROR 1
269
#define QIB_SDMA_TXREQ_S_ABORTED 2
270
#define QIB_SDMA_TXREQ_S_SHUTDOWN 3
271
272
/*
273
* Get/Set IB link-level config parameters for f_get/set_ib_cfg()
274
* Mostly for MADs that set or query link parameters, also ipath
275
* config interfaces
276
*/
277
#define QIB_IB_CFG_LIDLMC 0
/* LID (LS16b) and Mask (MS16b) */
278
#define QIB_IB_CFG_LWID_ENB 2
/* allowed Link-width */
279
#define QIB_IB_CFG_LWID 3
/* currently active Link-width */
280
#define QIB_IB_CFG_SPD_ENB 4
/* allowed Link speeds */
281
#define QIB_IB_CFG_SPD 5
/* current Link spd */
282
#define QIB_IB_CFG_RXPOL_ENB 6
/* Auto-RX-polarity enable */
283
#define QIB_IB_CFG_LREV_ENB 7
/* Auto-Lane-reversal enable */
284
#define QIB_IB_CFG_LINKLATENCY 8
/* Link Latency (IB1.2 only) */
285
#define QIB_IB_CFG_HRTBT 9
/* IB heartbeat off/enable/auto; DDR/QDR only */
286
#define QIB_IB_CFG_OP_VLS 10
/* operational VLs */
287
#define QIB_IB_CFG_VL_HIGH_CAP 11
/* num of VL high priority weights */
288
#define QIB_IB_CFG_VL_LOW_CAP 12
/* num of VL low priority weights */
289
#define QIB_IB_CFG_OVERRUN_THRESH 13
/* IB overrun threshold */
290
#define QIB_IB_CFG_PHYERR_THRESH 14
/* IB PHY error threshold */
291
#define QIB_IB_CFG_LINKDEFAULT 15
/* IB link default (sleep/poll) */
292
#define QIB_IB_CFG_PKEYS 16
/* update partition keys */
293
#define QIB_IB_CFG_MTU 17
/* update MTU in IBC */
294
#define QIB_IB_CFG_LSTATE 18
/* update linkcmd and linkinitcmd in IBC */
295
#define QIB_IB_CFG_VL_HIGH_LIMIT 19
296
#define QIB_IB_CFG_PMA_TICKS 20
/* PMA sample tick resolution */
297
#define QIB_IB_CFG_PORT 21
/* switch port we are connected to */
298
299
/*
300
* for CFG_LSTATE: LINKCMD in upper 16 bits, LINKINITCMD in lower 16
301
* IB_LINKINITCMD_POLL and SLEEP are also used as set/get values for
302
* QIB_IB_CFG_LINKDEFAULT cmd
303
*/
304
#define IB_LINKCMD_DOWN (0 << 16)
305
#define IB_LINKCMD_ARMED (1 << 16)
306
#define IB_LINKCMD_ACTIVE (2 << 16)
307
#define IB_LINKINITCMD_NOP 0
308
#define IB_LINKINITCMD_POLL 1
309
#define IB_LINKINITCMD_SLEEP 2
310
#define IB_LINKINITCMD_DISABLE 3
311
312
/*
313
* valid states passed to qib_set_linkstate() user call
314
*/
315
#define QIB_IB_LINKDOWN 0
316
#define QIB_IB_LINKARM 1
317
#define QIB_IB_LINKACTIVE 2
318
#define QIB_IB_LINKDOWN_ONLY 3
319
#define QIB_IB_LINKDOWN_SLEEP 4
320
#define QIB_IB_LINKDOWN_DISABLE 5
321
322
/*
323
* These 7 values (SDR, DDR, and QDR may be ORed for auto-speed
324
* negotiation) are used for the 3rd argument to path_f_set_ib_cfg
325
* with cmd QIB_IB_CFG_SPD_ENB, by direct calls or via sysfs. They
326
* are also the the possible values for qib_link_speed_enabled and active
327
* The values were chosen to match values used within the IB spec.
328
*/
329
#define QIB_IB_SDR 1
330
#define QIB_IB_DDR 2
331
#define QIB_IB_QDR 4
332
333
#define QIB_DEFAULT_MTU 4096
334
335
/* max number of IB ports supported per HCA */
336
#define QIB_MAX_IB_PORTS 2
337
338
/*
339
* Possible IB config parameters for f_get/set_ib_table()
340
*/
341
#define QIB_IB_TBL_VL_HIGH_ARB 1
/* Get/set VL high priority weights */
342
#define QIB_IB_TBL_VL_LOW_ARB 2
/* Get/set VL low priority weights */
343
344
/*
345
* Possible "operations" for f_rcvctrl(ppd, op, ctxt)
346
* these are bits so they can be combined, e.g.
347
* QIB_RCVCTRL_INTRAVAIL_ENB | QIB_RCVCTRL_CTXT_ENB
348
*/
349
#define QIB_RCVCTRL_TAILUPD_ENB 0x01
350
#define QIB_RCVCTRL_TAILUPD_DIS 0x02
351
#define QIB_RCVCTRL_CTXT_ENB 0x04
352
#define QIB_RCVCTRL_CTXT_DIS 0x08
353
#define QIB_RCVCTRL_INTRAVAIL_ENB 0x10
354
#define QIB_RCVCTRL_INTRAVAIL_DIS 0x20
355
#define QIB_RCVCTRL_PKEY_ENB 0x40
/* Note, default is enabled */
356
#define QIB_RCVCTRL_PKEY_DIS 0x80
357
#define QIB_RCVCTRL_BP_ENB 0x0100
358
#define QIB_RCVCTRL_BP_DIS 0x0200
359
#define QIB_RCVCTRL_TIDFLOW_ENB 0x0400
360
#define QIB_RCVCTRL_TIDFLOW_DIS 0x0800
361
362
/*
363
* Possible "operations" for f_sendctrl(ppd, op, var)
364
* these are bits so they can be combined, e.g.
365
* QIB_SENDCTRL_BUFAVAIL_ENB | QIB_SENDCTRL_ENB
366
* Some operations (e.g. DISARM, ABORT) are known to
367
* be "one-shot", so do not modify shadow.
368
*/
369
#define QIB_SENDCTRL_DISARM (0x1000)
370
#define QIB_SENDCTRL_DISARM_BUF(bufn) ((bufn) | QIB_SENDCTRL_DISARM)
371
/* available (0x2000) */
372
#define QIB_SENDCTRL_AVAIL_DIS (0x4000)
373
#define QIB_SENDCTRL_AVAIL_ENB (0x8000)
374
#define QIB_SENDCTRL_AVAIL_BLIP (0x10000)
375
#define QIB_SENDCTRL_SEND_DIS (0x20000)
376
#define QIB_SENDCTRL_SEND_ENB (0x40000)
377
#define QIB_SENDCTRL_FLUSH (0x80000)
378
#define QIB_SENDCTRL_CLEAR (0x100000)
379
#define QIB_SENDCTRL_DISARM_ALL (0x200000)
380
381
/*
382
* These are the generic indices for requesting per-port
383
* counter values via the f_portcntr function. They
384
* are always returned as 64 bit values, although most
385
* are 32 bit counters.
386
*/
387
/* send-related counters */
388
#define QIBPORTCNTR_PKTSEND 0U
389
#define QIBPORTCNTR_WORDSEND 1U
390
#define QIBPORTCNTR_PSXMITDATA 2U
391
#define QIBPORTCNTR_PSXMITPKTS 3U
392
#define QIBPORTCNTR_PSXMITWAIT 4U
393
#define QIBPORTCNTR_SENDSTALL 5U
394
/* receive-related counters */
395
#define QIBPORTCNTR_PKTRCV 6U
396
#define QIBPORTCNTR_PSRCVDATA 7U
397
#define QIBPORTCNTR_PSRCVPKTS 8U
398
#define QIBPORTCNTR_RCVEBP 9U
399
#define QIBPORTCNTR_RCVOVFL 10U
400
#define QIBPORTCNTR_WORDRCV 11U
401
/* IB link related error counters */
402
#define QIBPORTCNTR_RXLOCALPHYERR 12U
403
#define QIBPORTCNTR_RXVLERR 13U
404
#define QIBPORTCNTR_ERRICRC 14U
405
#define QIBPORTCNTR_ERRVCRC 15U
406
#define QIBPORTCNTR_ERRLPCRC 16U
407
#define QIBPORTCNTR_BADFORMAT 17U
408
#define QIBPORTCNTR_ERR_RLEN 18U
409
#define QIBPORTCNTR_IBSYMBOLERR 19U
410
#define QIBPORTCNTR_INVALIDRLEN 20U
411
#define QIBPORTCNTR_UNSUPVL 21U
412
#define QIBPORTCNTR_EXCESSBUFOVFL 22U
413
#define QIBPORTCNTR_ERRLINK 23U
414
#define QIBPORTCNTR_IBLINKDOWN 24U
415
#define QIBPORTCNTR_IBLINKERRRECOV 25U
416
#define QIBPORTCNTR_LLI 26U
417
/* other error counters */
418
#define QIBPORTCNTR_RXDROPPKT 27U
419
#define QIBPORTCNTR_VL15PKTDROP 28U
420
#define QIBPORTCNTR_ERRPKEY 29U
421
#define QIBPORTCNTR_KHDROVFL 30U
422
/* sampling counters (these are actually control registers) */
423
#define QIBPORTCNTR_PSINTERVAL 31U
424
#define QIBPORTCNTR_PSSTART 32U
425
#define QIBPORTCNTR_PSSTAT 33U
426
427
/* how often we check for packet activity for "power on hours (in seconds) */
428
#define ACTIVITY_TIMER 5
429
430
#define MAX_NAME_SIZE 64
431
struct
qib_msix_entry
{
432
struct
msix_entry
msix
;
433
void
*
arg
;
434
char
name
[
MAX_NAME_SIZE
];
435
cpumask_var_t
mask
;
436
};
437
438
/* Below is an opaque struct. Each chip (device) can maintain
439
* private data needed for its operation, but not germane to the
440
* rest of the driver. For convenience, we define another that
441
* is chip-specific, per-port
442
*/
443
struct
qib_chip_specific
;
444
struct
qib_chipport_specific;
445
446
enum
qib_sdma_states
{
447
qib_sdma_state_s00_hw_down
,
448
qib_sdma_state_s10_hw_start_up_wait
,
449
qib_sdma_state_s20_idle
,
450
qib_sdma_state_s30_sw_clean_up_wait
,
451
qib_sdma_state_s40_hw_clean_up_wait
,
452
qib_sdma_state_s50_hw_halt_wait
,
453
qib_sdma_state_s99_running
,
454
};
455
456
enum
qib_sdma_events
{
457
qib_sdma_event_e00_go_hw_down
,
458
qib_sdma_event_e10_go_hw_start
,
459
qib_sdma_event_e20_hw_started
,
460
qib_sdma_event_e30_go_running
,
461
qib_sdma_event_e40_sw_cleaned
,
462
qib_sdma_event_e50_hw_cleaned
,
463
qib_sdma_event_e60_hw_halted
,
464
qib_sdma_event_e70_go_idle
,
465
qib_sdma_event_e7220_err_halted
,
466
qib_sdma_event_e7322_err_halted
,
467
qib_sdma_event_e90_timer_tick
,
468
};
469
470
extern
char
*
qib_sdma_state_names
[];
471
extern
char
*
qib_sdma_event_names
[];
472
473
struct
sdma_set_state_action
{
474
unsigned
op_enable
:1;
475
unsigned
op_intenable
:1;
476
unsigned
op_halt
:1;
477
unsigned
op_drain
:1;
478
unsigned
go_s99_running_tofalse
:1;
479
unsigned
go_s99_running_totrue
:1;
480
};
481
482
struct
qib_sdma_state
{
483
struct
kref
kref
;
484
struct
completion
comp
;
485
enum
qib_sdma_states
current_state
;
486
struct
sdma_set_state_action
*
set_state_action
;
487
unsigned
current_op
;
488
unsigned
go_s99_running
;
489
unsigned
first_sendbuf
;
490
unsigned
last_sendbuf
;
/* really last +1 */
491
/* debugging/devel */
492
enum
qib_sdma_states
previous_state
;
493
unsigned
previous_op
;
494
enum
qib_sdma_events
last_event
;
495
};
496
497
struct
xmit_wait
{
498
struct
timer_list
timer
;
499
u64
counter
;
500
u8
flags
;
501
struct
cache
{
502
u64
psxmitdata
;
503
u64
psrcvdata
;
504
u64
psxmitpkts
;
505
u64
psrcvpkts
;
506
u64
psxmitwait
;
507
}
counter_cache
;
508
};
509
510
/*
511
* The structure below encapsulates data relevant to a physical IB Port.
512
* Current chips support only one such port, but the separation
513
* clarifies things a bit. Note that to conform to IB conventions,
514
* port-numbers are one-based. The first or only port is port1.
515
*/
516
struct
qib_pportdata
{
517
struct
qib_ibport
ibport_data
;
518
519
struct
qib_devdata
*
dd
;
520
struct
qib_chippport_specific
*
cpspec
;
/* chip-specific per-port */
521
struct
kobject
pport_kobj
;
522
struct
kobject
pport_cc_kobj
;
523
struct
kobject
sl2vl_kobj
;
524
struct
kobject
diagc_kobj
;
525
526
/* GUID for this interface, in network order */
527
__be64
guid
;
528
529
/* QIB_POLL, etc. link-state specific flags, per port */
530
u32
lflags
;
531
/* qib_lflags driver is waiting for */
532
u32
state_wanted
;
533
spinlock_t
lflags_lock
;
534
535
/* ref count for each pkey */
536
atomic_t
pkeyrefs
[4];
537
538
/*
539
* this address is mapped readonly into user processes so they can
540
* get status cheaply, whenever they want. One qword of status per port
541
*/
542
u64
*
statusp
;
543
544
/* SendDMA related entries */
545
546
/* read mostly */
547
struct
qib_sdma_desc
*
sdma_descq
;
548
struct
workqueue_struct
*
qib_wq
;
549
struct
qib_sdma_state
sdma_state
;
550
dma_addr_t
sdma_descq_phys
;
551
volatile
__le64
*
sdma_head_dma
;
/* DMA'ed by chip */
552
dma_addr_t
sdma_head_phys
;
553
u16
sdma_descq_cnt
;
554
555
/* read/write using lock */
556
spinlock_t
sdma_lock
____cacheline_aligned_in_smp
;
557
struct
list_head
sdma_activelist
;
558
u64
sdma_descq_added
;
559
u64
sdma_descq_removed
;
560
u16
sdma_descq_tail
;
561
u16
sdma_descq_head
;
562
u8
sdma_generation
;
563
564
struct
tasklet_struct
sdma_sw_clean_up_task
565
____cacheline_aligned_in_smp
;
566
567
wait_queue_head_t
state_wait
;
/* for state_wanted */
568
569
/* HoL blocking for SMP replies */
570
unsigned
hol_state
;
571
struct
timer_list
hol_timer
;
572
573
/*
574
* Shadow copies of registers; size indicates read access size.
575
* Most of them are readonly, but some are write-only register,
576
* where we manipulate the bits in the shadow copy, and then write
577
* the shadow copy to qlogic_ib.
578
*
579
* We deliberately make most of these 32 bits, since they have
580
* restricted range. For any that we read, we won't to generate 32
581
* bit accesses, since Opteron will generate 2 separate 32 bit HT
582
* transactions for a 64 bit read, and we want to avoid unnecessary
583
* bus transactions.
584
*/
585
586
/* This is the 64 bit group */
587
/* last ibcstatus. opaque outside chip-specific code */
588
u64
lastibcstat
;
589
590
/* these are the "32 bit" regs */
591
592
/*
593
* the following two are 32-bit bitmasks, but {test,clear,set}_bit
594
* all expect bit fields to be "unsigned long"
595
*/
596
unsigned
long
p_rcvctrl
;
/* shadow per-port rcvctrl */
597
unsigned
long
p_sendctrl
;
/* shadow per-port sendctrl */
598
599
u32
ibmtu
;
/* The MTU programmed for this unit */
600
/*
601
* Current max size IB packet (in bytes) including IB headers, that
602
* we can send. Changes when ibmtu changes.
603
*/
604
u32
ibmaxlen
;
605
/*
606
* ibmaxlen at init time, limited by chip and by receive buffer
607
* size. Not changed after init.
608
*/
609
u32
init_ibmaxlen
;
610
/* LID programmed for this instance */
611
u16
lid
;
612
/* list of pkeys programmed; 0 if not set */
613
u16
pkeys
[4];
614
/* LID mask control */
615
u8
lmc
;
616
u8
link_width_supported
;
617
u8
link_speed_supported
;
618
u8
link_width_enabled
;
619
u8
link_speed_enabled
;
620
u8
link_width_active
;
621
u8
link_speed_active
;
622
u8
vls_supported
;
623
u8
vls_operational
;
624
/* Rx Polarity inversion (compensate for ~tx on partner) */
625
u8
rx_pol_inv
;
626
627
u8
hw_pidx
;
/* physical port index */
628
u8
port
;
/* IB port number and index into dd->pports - 1 */
629
630
u8
delay_mult
;
631
632
/* used to override LED behavior */
633
u8
led_override
;
/* Substituted for normal value, if non-zero */
634
u16
led_override_timeoff
;
/* delta to next timer event */
635
u8
led_override_vals
[2];
/* Alternates per blink-frame */
636
u8
led_override_phase
;
/* Just counts, LSB picks from vals[] */
637
atomic_t
led_override_timer_active
;
638
/* Used to flash LEDs in override mode */
639
struct
timer_list
led_override_timer
;
640
struct
xmit_wait
cong_stats
;
641
struct
timer_list
symerr_clear_timer
;
642
643
/* Synchronize access between driver writes and sysfs reads */
644
spinlock_t
cc_shadow_lock
645
____cacheline_aligned_in_smp
;
646
647
/* Shadow copy of the congestion control table */
648
struct
cc_table_shadow
*
ccti_entries_shadow
;
649
650
/* Shadow copy of the congestion control entries */
651
struct
ib_cc_congestion_setting_attr_shadow
*
congestion_entries_shadow
;
652
653
/* List of congestion control table entries */
654
struct
ib_cc_table_entry_shadow
*
ccti_entries
;
655
656
/* 16 congestion entries with each entry corresponding to a SL */
657
struct
ib_cc_congestion_entry_shadow
*
congestion_entries
;
658
659
/* Maximum number of congestion control entries that the agent expects
660
* the manager to send.
661
*/
662
u16
cc_supported_table_entries
;
663
664
/* Total number of congestion control table entries */
665
u16
total_cct_entry
;
666
667
/* Bit map identifying service level */
668
u16
cc_sl_control_map
;
669
670
/* maximum congestion control table index */
671
u16
ccti_limit
;
672
673
/* CA's max number of 64 entry units in the congestion control table */
674
u8
cc_max_table_entries
;
675
};
676
677
/* Observers. Not to be taken lightly, possibly not to ship. */
678
/*
679
* If a diag read or write is to (bottom <= offset <= top),
680
* the "hoook" is called, allowing, e.g. shadows to be
681
* updated in sync with the driver. struct diag_observer
682
* is the "visible" part.
683
*/
684
struct
diag_observer
;
685
686
typedef
int
(*
diag_hook
) (
struct
qib_devdata
*
dd
,
687
const
struct
diag_observer
*
op
,
688
u32
offs
,
u64
*
data
,
u64
mask
,
int
only_32);
689
690
struct
diag_observer
{
691
diag_hook
hook
;
692
u32
bottom
;
693
u32
top
;
694
};
695
696
extern
int
qib_register_observer
(
struct
qib_devdata
*dd,
697
const
struct
diag_observer
*op);
698
699
/* Only declared here, not defined. Private to diags */
700
struct
diag_observer_list_elt
;
701
702
/* device data struct now contains only "general per-device" info.
703
* fields related to a physical IB port are in a qib_pportdata struct,
704
* described above) while fields only used by a particular chip-type are in
705
* a qib_chipdata struct, whose contents are opaque to this file.
706
*/
707
struct
qib_devdata
{
708
struct
qib_ibdev
verbs_dev
;
/* must be first */
709
struct
list_head
list
;
710
/* pointers to related structs for this device */
711
/* pci access data structure */
712
struct
pci_dev
*
pcidev
;
713
struct
cdev
*
user_cdev
;
714
struct
cdev
*
diag_cdev
;
715
struct
device
*
user_device
;
716
struct
device
*
diag_device
;
717
718
/* mem-mapped pointer to base of chip regs */
719
u64
__iomem
*
kregbase
;
720
/* end of mem-mapped chip space excluding sendbuf and user regs */
721
u64
__iomem
*
kregend
;
722
/* physical address of chip for io_remap, etc. */
723
resource_size_t
physaddr
;
724
/* qib_cfgctxts pointers */
725
struct
qib_ctxtdata
**
rcd
;
/* Receive Context Data */
726
727
/* qib_pportdata, points to array of (physical) port-specific
728
* data structs, indexed by pidx (0..n-1)
729
*/
730
struct
qib_pportdata
*
pport
;
731
struct
qib_chip_specific
*
cspec
;
/* chip-specific */
732
733
/* kvirt address of 1st 2k pio buffer */
734
void
__iomem
*
pio2kbase
;
735
/* kvirt address of 1st 4k pio buffer */
736
void
__iomem
*
pio4kbase
;
737
/* mem-mapped pointer to base of PIO buffers (if using WC PAT) */
738
void
__iomem
*
piobase
;
739
/* mem-mapped pointer to base of user chip regs (if using WC PAT) */
740
u64
__iomem
*
userbase
;
741
void
__iomem
*
piovl15base
;
/* base of VL15 buffers, if not WC */
742
/*
743
* points to area where PIOavail registers will be DMA'ed.
744
* Has to be on a page of it's own, because the page will be
745
* mapped into user program space. This copy is *ONLY* ever
746
* written by DMA, not by the driver! Need a copy per device
747
* when we get to multiple devices
748
*/
749
volatile
__le64
*
pioavailregs_dma
;
/* DMA'ed by chip */
750
/* physical address where updates occur */
751
dma_addr_t
pioavailregs_phys
;
752
753
/* device-specific implementations of functions needed by
754
* common code. Contrary to previous consensus, we can't
755
* really just point to a device-specific table, because we
756
* may need to "bend", e.g. *_f_put_tid
757
*/
758
/* fallback to alternate interrupt type if possible */
759
int
(*
f_intr_fallback
)(
struct
qib_devdata
*);
760
/* hard reset chip */
761
int
(*
f_reset
)(
struct
qib_devdata
*);
762
void
(*
f_quiet_serdes
)(
struct
qib_pportdata
*);
763
int
(*
f_bringup_serdes
)(
struct
qib_pportdata
*);
764
int
(*
f_early_init
)(
struct
qib_devdata
*);
765
void
(*
f_clear_tids
)(
struct
qib_devdata
*,
struct
qib_ctxtdata
*);
766
void
(*
f_put_tid
)(
struct
qib_devdata
*,
u64
__iomem
*,
767
u32
,
unsigned
long
);
768
void
(*
f_cleanup
)(
struct
qib_devdata
*);
769
void
(*
f_setextled
)(
struct
qib_pportdata
*,
u32
);
770
/* fill out chip-specific fields */
771
int
(*
f_get_base_info
)(
struct
qib_ctxtdata
*,
struct
qib_base_info
*);
772
/* free irq */
773
void
(*
f_free_irq
)(
struct
qib_devdata
*);
774
struct
qib_message_header
*(*f_get_msgheader)
775
(
struct
qib_devdata
*,
__le32
*);
776
void
(*
f_config_ctxts
)(
struct
qib_devdata
*);
777
int
(*
f_get_ib_cfg
)(
struct
qib_pportdata
*,
int
);
778
int
(*
f_set_ib_cfg
)(
struct
qib_pportdata
*,
int
,
u32
);
779
int
(*
f_set_ib_loopback
)(
struct
qib_pportdata
*,
const
char
*);
780
int
(*
f_get_ib_table
)(
struct
qib_pportdata
*,
int
,
void
*);
781
int
(*
f_set_ib_table
)(
struct
qib_pportdata
*,
int
,
void
*);
782
u32
(*
f_iblink_state
)(
u64
);
783
u8
(*
f_ibphys_portstate
)(
u64
);
784
void
(*
f_xgxs_reset
)(
struct
qib_pportdata
*);
785
/* per chip actions needed for IB Link up/down changes */
786
int
(*
f_ib_updown
)(
struct
qib_pportdata
*,
int
,
u64
);
787
u32
__iomem
*(*f_getsendbuf)(
struct
qib_pportdata
*,
u64
,
u32
*);
788
/* Read/modify/write of GPIO pins (potentially chip-specific */
789
int
(*
f_gpio_mod
)(
struct
qib_devdata
*
dd
,
u32
out
,
u32
dir
,
790
u32
mask
);
791
/* Enable writes to config EEPROM (if supported) */
792
int
(*
f_eeprom_wen
)(
struct
qib_devdata
*
dd
,
int
wen);
793
/*
794
* modify rcvctrl shadow[s] and write to appropriate chip-regs.
795
* see above QIB_RCVCTRL_xxx_ENB/DIS for operations.
796
* (ctxt == -1) means "all contexts", only meaningful for
797
* clearing. Could remove if chip_spec shutdown properly done.
798
*/
799
void
(*
f_rcvctrl
)(
struct
qib_pportdata
*,
unsigned
int
op
,
800
int
ctxt);
801
/* Read/modify/write sendctrl appropriately for op and port. */
802
void
(*
f_sendctrl
)(
struct
qib_pportdata
*,
u32
op
);
803
void
(*
f_set_intr_state
)(
struct
qib_devdata
*,
u32
);
804
void
(*
f_set_armlaunch
)(
struct
qib_devdata
*,
u32
);
805
void
(*
f_wantpiobuf_intr
)(
struct
qib_devdata
*,
u32
);
806
int
(*
f_late_initreg
)(
struct
qib_devdata
*);
807
int
(*
f_init_sdma_regs
)(
struct
qib_pportdata
*);
808
u16
(*
f_sdma_gethead
)(
struct
qib_pportdata
*);
809
int
(*
f_sdma_busy
)(
struct
qib_pportdata
*);
810
void
(*
f_sdma_update_tail
)(
struct
qib_pportdata
*,
u16
);
811
void
(*
f_sdma_set_desc_cnt
)(
struct
qib_pportdata
*, unsigned);
812
void
(*
f_sdma_sendctrl
)(
struct
qib_pportdata
*, unsigned);
813
void
(*
f_sdma_hw_clean_up
)(
struct
qib_pportdata
*);
814
void
(*
f_sdma_hw_start_up
)(
struct
qib_pportdata
*);
815
void
(*
f_sdma_init_early
)(
struct
qib_pportdata
*);
816
void
(*
f_set_cntr_sample
)(
struct
qib_pportdata
*,
u32
,
u32
);
817
void
(*
f_update_usrhead
)(
struct
qib_ctxtdata
*,
u64
,
u32
,
u32
,
u32
);
818
u32
(*
f_hdrqempty
)(
struct
qib_ctxtdata
*);
819
u64
(*
f_portcntr
)(
struct
qib_pportdata
*,
u32
);
820
u32
(*
f_read_cntrs
)(
struct
qib_devdata
*, loff_t,
char
**,
821
u64
**);
822
u32
(*
f_read_portcntrs
)(
struct
qib_devdata
*, loff_t,
u32
,
823
char
**,
u64
**);
824
u32
(*
f_setpbc_control
)(
struct
qib_pportdata
*,
u32
,
u8
,
u8
);
825
void
(*
f_initvl15_bufs
)(
struct
qib_devdata
*);
826
void
(*
f_init_ctxt
)(
struct
qib_ctxtdata
*);
827
void
(*
f_txchk_change
)(
struct
qib_devdata
*,
u32
,
u32
,
u32
,
828
struct
qib_ctxtdata
*);
829
void
(*
f_writescratch
)(
struct
qib_devdata
*,
u32
);
830
int
(*
f_tempsense_rd
)(
struct
qib_devdata
*,
int
regnum);
831
832
char
*
boardname
;
/* human readable board info */
833
834
/* template for writing TIDs */
835
u64
tidtemplate
;
836
/* value to write to free TIDs */
837
u64
tidinvalid
;
838
839
/* number of registers used for pioavail */
840
u32
pioavregs
;
841
/* device (not port) flags, basically device capabilities */
842
u32
flags
;
843
/* last buffer for user use */
844
u32
lastctxt_piobuf
;
845
846
/* saturating counter of (non-port-specific) device interrupts */
847
u32
int_counter
;
848
849
/* pio bufs allocated per ctxt */
850
u32
pbufsctxt
;
851
/* if remainder on bufs/ctxt, ctxts < extrabuf get 1 extra */
852
u32
ctxts_extrabuf
;
853
/*
854
* number of ctxts configured as max; zero is set to number chip
855
* supports, less gives more pio bufs/ctxt, etc.
856
*/
857
u32
cfgctxts
;
858
/*
859
* number of ctxts available for PSM open
860
*/
861
u32
freectxts
;
862
863
/*
864
* hint that we should update pioavailshadow before
865
* looking for a PIO buffer
866
*/
867
u32
upd_pio_shadow
;
868
869
/* internal debugging stats */
870
u32
maxpkts_call
;
871
u32
avgpkts_call
;
872
u64
nopiobufs
;
873
874
/* PCI Vendor ID (here for NodeInfo) */
875
u16
vendorid
;
876
/* PCI Device ID (here for NodeInfo) */
877
u16
deviceid
;
878
/* for write combining settings */
879
unsigned
long
wc_cookie
;
880
unsigned
long
wc_base
;
881
unsigned
long
wc_len
;
882
883
/* shadow copy of struct page *'s for exp tid pages */
884
struct
page
**
pageshadow
;
885
/* shadow copy of dma handles for exp tid pages */
886
dma_addr_t
*
physshadow
;
887
u64
__iomem
*
egrtidbase
;
888
spinlock_t
sendctrl_lock
;
/* protect changes to sendctrl shadow */
889
/* around rcd and (user ctxts) ctxt_cnt use (intr vs free) */
890
spinlock_t
uctxt_lock
;
/* rcd and user context changes */
891
/*
892
* per unit status, see also portdata statusp
893
* mapped readonly into user processes so they can get unit and
894
* IB link status cheaply
895
*/
896
u64
*
devstatusp
;
897
char
*
freezemsg
;
/* freeze msg if hw error put chip in freeze */
898
u32
freezelen
;
/* max length of freezemsg */
899
/* timer used to prevent stats overflow, error throttling, etc. */
900
struct
timer_list
stats_timer
;
901
902
/* timer to verify interrupts work, and fallback if possible */
903
struct
timer_list
intrchk_timer
;
904
unsigned
long
ureg_align
;
/* user register alignment */
905
906
/*
907
* Protects pioavailshadow, pioavailkernel, pio_need_disarm, and
908
* pio_writing.
909
*/
910
spinlock_t
pioavail_lock
;
911
/*
912
* index of last buffer to optimize search for next
913
*/
914
u32
last_pio
;
915
/*
916
* min kernel pio buffer to optimize search
917
*/
918
u32
min_kernel_pio
;
919
/*
920
* Shadow copies of registers; size indicates read access size.
921
* Most of them are readonly, but some are write-only register,
922
* where we manipulate the bits in the shadow copy, and then write
923
* the shadow copy to qlogic_ib.
924
*
925
* We deliberately make most of these 32 bits, since they have
926
* restricted range. For any that we read, we won't to generate 32
927
* bit accesses, since Opteron will generate 2 separate 32 bit HT
928
* transactions for a 64 bit read, and we want to avoid unnecessary
929
* bus transactions.
930
*/
931
932
/* This is the 64 bit group */
933
934
unsigned
long
pioavailshadow
[6];
935
/* bitmap of send buffers available for the kernel to use with PIO. */
936
unsigned
long
pioavailkernel
[6];
937
/* bitmap of send buffers which need to be disarmed. */
938
unsigned
long
pio_need_disarm
[3];
939
/* bitmap of send buffers which are being written to. */
940
unsigned
long
pio_writing
[3];
941
/* kr_revision shadow */
942
u64
revision
;
943
/* Base GUID for device (from eeprom, network order) */
944
__be64
base_guid
;
945
946
/*
947
* kr_sendpiobufbase value (chip offset of pio buffers), and the
948
* base of the 2KB buffer s(user processes only use 2K)
949
*/
950
u64
piobufbase
;
951
u32
pio2k_bufbase
;
952
953
/* these are the "32 bit" regs */
954
955
/* number of GUIDs in the flash for this interface */
956
u32
nguid
;
957
/*
958
* the following two are 32-bit bitmasks, but {test,clear,set}_bit
959
* all expect bit fields to be "unsigned long"
960
*/
961
unsigned
long
rcvctrl
;
/* shadow per device rcvctrl */
962
unsigned
long
sendctrl
;
/* shadow per device sendctrl */
963
964
/* value we put in kr_rcvhdrcnt */
965
u32
rcvhdrcnt
;
966
/* value we put in kr_rcvhdrsize */
967
u32
rcvhdrsize
;
968
/* value we put in kr_rcvhdrentsize */
969
u32
rcvhdrentsize
;
970
/* kr_ctxtcnt value */
971
u32
ctxtcnt
;
972
/* kr_pagealign value */
973
u32
palign
;
974
/* number of "2KB" PIO buffers */
975
u32
piobcnt2k
;
976
/* size in bytes of "2KB" PIO buffers */
977
u32
piosize2k
;
978
/* max usable size in dwords of a "2KB" PIO buffer before going "4KB" */
979
u32
piosize2kmax_dwords
;
980
/* number of "4KB" PIO buffers */
981
u32
piobcnt4k
;
982
/* size in bytes of "4KB" PIO buffers */
983
u32
piosize4k
;
984
/* kr_rcvegrbase value */
985
u32
rcvegrbase
;
986
/* kr_rcvtidbase value */
987
u32
rcvtidbase
;
988
/* kr_rcvtidcnt value */
989
u32
rcvtidcnt
;
990
/* kr_userregbase */
991
u32
uregbase
;
992
/* shadow the control register contents */
993
u32
control
;
994
995
/* chip address space used by 4k pio buffers */
996
u32
align4k
;
997
/* size of each rcvegrbuffer */
998
u16
rcvegrbufsize
;
999
/* log2 of above */
1000
u16
rcvegrbufsize_shift
;
1001
/* localbus width (1, 2,4,8,16,32) from config space */
1002
u32
lbus_width
;
1003
/* localbus speed in MHz */
1004
u32
lbus_speed
;
1005
int
unit
;
/* unit # of this chip */
1006
1007
/* start of CHIP_SPEC move to chipspec, but need code changes */
1008
/* low and high portions of MSI capability/vector */
1009
u32
msi_lo
;
1010
/* saved after PCIe init for restore after reset */
1011
u32
msi_hi
;
1012
/* MSI data (vector) saved for restore */
1013
u16
msi_data
;
1014
/* so we can rewrite it after a chip reset */
1015
u32
pcibar0
;
1016
/* so we can rewrite it after a chip reset */
1017
u32
pcibar1
;
1018
u64
rhdrhead_intr_off
;
1019
1020
/*
1021
* ASCII serial number, from flash, large enough for original
1022
* all digit strings, and longer QLogic serial number format
1023
*/
1024
u8
serial
[16];
1025
/* human readable board version */
1026
u8
boardversion
[96];
1027
u8
lbus_info
[32];
/* human readable localbus info */
1028
/* chip major rev, from qib_revision */
1029
u8
majrev
;
1030
/* chip minor rev, from qib_revision */
1031
u8
minrev
;
1032
1033
/* Misc small ints */
1034
/* Number of physical ports available */
1035
u8
num_pports
;
1036
/* Lowest context number which can be used by user processes */
1037
u8
first_user_ctxt
;
1038
u8
n_krcv_queues
;
1039
u8
qpn_mask
;
1040
u8
skip_kctxt_mask
;
1041
1042
u16
rhf_offset
;
/* offset of RHF within receive header entry */
1043
1044
/*
1045
* GPIO pins for twsi-connected devices, and device code for eeprom
1046
*/
1047
u8
gpio_sda_num
;
1048
u8
gpio_scl_num
;
1049
u8
twsi_eeprom_dev
;
1050
u8
board_atten
;
1051
1052
/* Support (including locks) for EEPROM logging of errors and time */
1053
/* control access to actual counters, timer */
1054
spinlock_t
eep_st_lock
;
1055
/* control high-level access to EEPROM */
1056
struct
mutex
eep_lock
;
1057
uint64_t
traffic_wds
;
1058
/* active time is kept in seconds, but logged in hours */
1059
atomic_t
active_time
;
1060
/* Below are nominal shadow of EEPROM, new since last EEPROM update */
1061
uint8_t
eep_st_errs
[
QIB_EEP_LOG_CNT
];
1062
uint8_t
eep_st_new_errs
[
QIB_EEP_LOG_CNT
];
1063
uint16_t
eep_hrs
;
1064
/*
1065
* masks for which bits of errs, hwerrs that cause
1066
* each of the counters to increment.
1067
*/
1068
struct
qib_eep_log_mask
eep_st_masks
[
QIB_EEP_LOG_CNT
];
1069
struct
qib_diag_client *
diag_client
;
1070
spinlock_t
qib_diag_trans_lock
;
/* protect diag observer ops */
1071
struct
diag_observer_list_elt
*
diag_observer_list
;
1072
1073
u8
psxmitwait_supported
;
1074
/* cycle length of PS* counters in HW (in picoseconds) */
1075
u16
psxmitwait_check_rate
;
1076
/* high volume overflow errors defered to tasklet */
1077
struct
tasklet_struct
error_tasklet
;
1078
};
1079
1080
/* hol_state values */
1081
#define QIB_HOL_UP 0
1082
#define QIB_HOL_INIT 1
1083
1084
#define QIB_SDMA_SENDCTRL_OP_ENABLE (1U << 0)
1085
#define QIB_SDMA_SENDCTRL_OP_INTENABLE (1U << 1)
1086
#define QIB_SDMA_SENDCTRL_OP_HALT (1U << 2)
1087
#define QIB_SDMA_SENDCTRL_OP_CLEANUP (1U << 3)
1088
#define QIB_SDMA_SENDCTRL_OP_DRAIN (1U << 4)
1089
1090
/* operation types for f_txchk_change() */
1091
#define TXCHK_CHG_TYPE_DIS1 3
1092
#define TXCHK_CHG_TYPE_ENAB1 2
1093
#define TXCHK_CHG_TYPE_KERN 1
1094
#define TXCHK_CHG_TYPE_USER 0
1095
1096
#define QIB_CHASE_TIME msecs_to_jiffies(145)
1097
#define QIB_CHASE_DIS_TIME msecs_to_jiffies(160)
1098
1099
/* Private data for file operations */
1100
struct
qib_filedata
{
1101
struct
qib_ctxtdata
*
rcd
;
1102
unsigned
subctxt
;
1103
unsigned
tidcursor
;
1104
struct
qib_user_sdma_queue
*
pq
;
1105
int
rec_cpu_num
;
/* for cpu affinity; -1 if none */
1106
};
1107
1108
extern
struct
list_head
qib_dev_list
;
1109
extern
spinlock_t
qib_devs_lock
;
1110
extern
struct
qib_devdata
*
qib_lookup
(
int
unit
);
1111
extern
u32
qib_cpulist_count
;
1112
extern
unsigned
long
*
qib_cpulist
;
1113
1114
extern
unsigned
qib_wc_pat
;
1115
extern
unsigned
qib_cc_table_size
;
1116
int
qib_init
(
struct
qib_devdata
*,
int
);
1117
int
init_chip_wc_pat
(
struct
qib_devdata
*dd,
u32
);
1118
int
qib_enable_wc
(
struct
qib_devdata
*dd);
1119
void
qib_disable_wc
(
struct
qib_devdata
*dd);
1120
int
qib_count_units
(
int
*npresentp,
int
*nupp);
1121
int
qib_count_active_units
(
void
);
1122
1123
int
qib_cdev_init
(
int
minor,
const
char
*
name
,
1124
const
struct
file_operations
*
fops
,
1125
struct
cdev
**cdevp,
struct
device
**devp);
1126
void
qib_cdev_cleanup
(
struct
cdev
**cdevp,
struct
device
**devp);
1127
int
qib_dev_init
(
void
);
1128
void
qib_dev_cleanup
(
void
);
1129
1130
int
qib_diag_add
(
struct
qib_devdata
*);
1131
void
qib_diag_remove
(
struct
qib_devdata
*);
1132
void
qib_handle_e_ibstatuschanged
(
struct
qib_pportdata
*,
u64
);
1133
void
qib_sdma_update_tail
(
struct
qib_pportdata
*,
u16
);
/* hold sdma_lock */
1134
1135
int
qib_decode_err
(
struct
qib_devdata
*dd,
char
*
buf
,
size_t
blen,
u64
err
);
1136
void
qib_bad_intrstatus
(
struct
qib_devdata
*);
1137
void
qib_handle_urcv
(
struct
qib_devdata
*,
u64
);
1138
1139
/* clean up any per-chip chip-specific stuff */
1140
void
qib_chip_cleanup
(
struct
qib_devdata
*);
1141
/* clean up any chip type-specific stuff */
1142
void
qib_chip_done
(
void
);
1143
1144
/* check to see if we have to force ordering for write combining */
1145
int
qib_unordered_wc
(
void
);
1146
void
qib_pio_copy
(
void
__iomem
*to,
const
void
*
from
,
size_t
count
);
1147
1148
void
qib_disarm_piobufs
(
struct
qib_devdata
*,
unsigned
,
unsigned
);
1149
int
qib_disarm_piobufs_ifneeded
(
struct
qib_ctxtdata
*);
1150
void
qib_disarm_piobufs_set
(
struct
qib_devdata
*,
unsigned
long
*,
unsigned
);
1151
void
qib_cancel_sends
(
struct
qib_pportdata
*);
1152
1153
int
qib_create_rcvhdrq
(
struct
qib_devdata
*,
struct
qib_ctxtdata
*);
1154
int
qib_setup_eagerbufs
(
struct
qib_ctxtdata
*);
1155
void
qib_set_ctxtcnt
(
struct
qib_devdata
*);
1156
int
qib_create_ctxts
(
struct
qib_devdata
*dd);
1157
struct
qib_ctxtdata
*
qib_create_ctxtdata
(
struct
qib_pportdata
*,
u32
);
1158
void
qib_init_pportdata
(
struct
qib_pportdata
*,
struct
qib_devdata
*,
u8
,
u8
);
1159
void
qib_free_ctxtdata
(
struct
qib_devdata
*,
struct
qib_ctxtdata
*);
1160
1161
u32
qib_kreceive
(
struct
qib_ctxtdata
*,
u32
*,
u32
*);
1162
int
qib_reset_device
(
int
);
1163
int
qib_wait_linkstate
(
struct
qib_pportdata
*,
u32
,
int
);
1164
int
qib_set_linkstate
(
struct
qib_pportdata
*,
u8
);
1165
int
qib_set_mtu
(
struct
qib_pportdata
*,
u16
);
1166
int
qib_set_lid
(
struct
qib_pportdata
*,
u32
,
u8
);
1167
void
qib_hol_down
(
struct
qib_pportdata
*);
1168
void
qib_hol_init
(
struct
qib_pportdata
*);
1169
void
qib_hol_up
(
struct
qib_pportdata
*);
1170
void
qib_hol_event
(
unsigned
long
);
1171
void
qib_disable_after_error
(
struct
qib_devdata
*);
1172
int
qib_set_uevent_bits
(
struct
qib_pportdata
*,
const
int
);
1173
1174
/* for use in system calls, where we want to know device type, etc. */
1175
#define ctxt_fp(fp) \
1176
(((struct qib_filedata *)(fp)->private_data)->rcd)
1177
#define subctxt_fp(fp) \
1178
(((struct qib_filedata *)(fp)->private_data)->subctxt)
1179
#define tidcursor_fp(fp) \
1180
(((struct qib_filedata *)(fp)->private_data)->tidcursor)
1181
#define user_sdma_queue_fp(fp) \
1182
(((struct qib_filedata *)(fp)->private_data)->pq)
1183
1184
static
inline
struct
qib_devdata
*dd_from_ppd(
struct
qib_pportdata
*ppd)
1185
{
1186
return
ppd->
dd
;
1187
}
1188
1189
static
inline
struct
qib_devdata
*dd_from_dev(
struct
qib_ibdev
*
dev
)
1190
{
1191
return
container_of
(dev,
struct
qib_devdata
,
verbs_dev
);
1192
}
1193
1194
static
inline
struct
qib_devdata
*dd_from_ibdev(
struct
ib_device
*ibdev)
1195
{
1196
return
dd_from_dev(to_idev(ibdev));
1197
}
1198
1199
static
inline
struct
qib_pportdata
*ppd_from_ibp(
struct
qib_ibport
*ibp)
1200
{
1201
return
container_of
(ibp,
struct
qib_pportdata
,
ibport_data
);
1202
}
1203
1204
static
inline
struct
qib_ibport
*to_iport(
struct
ib_device
*ibdev,
u8
port
)
1205
{
1206
struct
qib_devdata
*dd = dd_from_ibdev(ibdev);
1207
unsigned
pidx = port - 1;
/* IB number port from 1, hdw from 0 */
1208
1209
WARN_ON
(pidx >= dd->
num_pports
);
1210
return
&dd->
pport
[pidx].ibport_data;
1211
}
1212
1213
/*
1214
* values for dd->flags (_device_ related flags) and
1215
*/
1216
#define QIB_HAS_LINK_LATENCY 0x1
/* supports link latency (IB 1.2) */
1217
#define QIB_INITTED 0x2
/* chip and driver up and initted */
1218
#define QIB_DOING_RESET 0x4
/* in the middle of doing chip reset */
1219
#define QIB_PRESENT 0x8
/* chip accesses can be done */
1220
#define QIB_PIO_FLUSH_WC 0x10
/* Needs Write combining flush for PIO */
1221
#define QIB_HAS_THRESH_UPDATE 0x40
1222
#define QIB_HAS_SDMA_TIMEOUT 0x80
1223
#define QIB_USE_SPCL_TRIG 0x100
/* SpecialTrigger launch enabled */
1224
#define QIB_NODMA_RTAIL 0x200
/* rcvhdrtail register DMA enabled */
1225
#define QIB_HAS_INTX 0x800
/* Supports INTx interrupts */
1226
#define QIB_HAS_SEND_DMA 0x1000
/* Supports Send DMA */
1227
#define QIB_HAS_VLSUPP 0x2000
/* Supports multiple VLs; PBC different */
1228
#define QIB_HAS_HDRSUPP 0x4000
/* Supports header suppression */
1229
#define QIB_BADINTR 0x8000
/* severe interrupt problems */
1230
#define QIB_DCA_ENABLED 0x10000
/* Direct Cache Access enabled */
1231
#define QIB_HAS_QSFP 0x20000
/* device (card instance) has QSFP */
1232
1233
/*
1234
* values for ppd->lflags (_ib_port_ related flags)
1235
*/
1236
#define QIBL_LINKV 0x1
/* IB link state valid */
1237
#define QIBL_LINKDOWN 0x8
/* IB link is down */
1238
#define QIBL_LINKINIT 0x10
/* IB link level is up */
1239
#define QIBL_LINKARMED 0x20
/* IB link is ARMED */
1240
#define QIBL_LINKACTIVE 0x40
/* IB link is ACTIVE */
1241
/* leave a gap for more IB-link state */
1242
#define QIBL_IB_AUTONEG_INPROG 0x1000
/* non-IBTA DDR/QDR neg active */
1243
#define QIBL_IB_AUTONEG_FAILED 0x2000
/* non-IBTA DDR/QDR neg failed */
1244
#define QIBL_IB_LINK_DISABLED 0x4000
/* Linkdown-disable forced,
1245
* Do not try to bring up */
1246
#define QIBL_IB_FORCE_NOTIFY 0x8000
/* force notify on next ib change */
1247
1248
/* IB dword length mask in PBC (lower 11 bits); same for all chips */
1249
#define QIB_PBC_LENGTH_MASK ((1 << 11) - 1)
1250
1251
1252
/* ctxt_flag bit offsets */
1253
/* waiting for a packet to arrive */
1254
#define QIB_CTXT_WAITING_RCV 2
1255
/* master has not finished initializing */
1256
#define QIB_CTXT_MASTER_UNINIT 4
1257
/* waiting for an urgent packet to arrive */
1258
#define QIB_CTXT_WAITING_URG 5
1259
1260
/* free up any allocated data at closes */
1261
void
qib_free_data
(
struct
qib_ctxtdata
*dd);
1262
void
qib_chg_pioavailkernel
(
struct
qib_devdata
*,
unsigned
,
unsigned
,
1263
u32
,
struct
qib_ctxtdata
*);
1264
struct
qib_devdata
*
qib_init_iba7322_funcs
(
struct
pci_dev
*,
1265
const
struct
pci_device_id
*);
1266
struct
qib_devdata
*
qib_init_iba7220_funcs
(
struct
pci_dev
*,
1267
const
struct
pci_device_id
*);
1268
struct
qib_devdata
*
qib_init_iba6120_funcs
(
struct
pci_dev
*,
1269
const
struct
pci_device_id
*);
1270
void
qib_free_devdata
(
struct
qib_devdata
*);
1271
struct
qib_devdata
*
qib_alloc_devdata
(
struct
pci_dev
*pdev,
size_t
extra
);
1272
1273
#define QIB_TWSI_NO_DEV 0xFF
1274
/* Below qib_twsi_ functions must be called with eep_lock held */
1275
int
qib_twsi_reset
(
struct
qib_devdata
*dd);
1276
int
qib_twsi_blk_rd
(
struct
qib_devdata
*dd,
int
dev
,
int
addr
,
void
*
buffer
,
1277
int
len);
1278
int
qib_twsi_blk_wr
(
struct
qib_devdata
*dd,
int
dev
,
int
addr
,
1279
const
void
*
buffer
,
int
len);
1280
void
qib_get_eeprom_info
(
struct
qib_devdata
*);
1281
int
qib_update_eeprom_log
(
struct
qib_devdata
*dd);
1282
void
qib_inc_eeprom_err
(
struct
qib_devdata
*dd,
u32
eidx,
u32
incr);
1283
void
qib_dump_lookup_output_queue
(
struct
qib_devdata
*);
1284
void
qib_force_pio_avail_update
(
struct
qib_devdata
*);
1285
void
qib_clear_symerror_on_linkup
(
unsigned
long
opaque);
1286
1287
/*
1288
* Set LED override, only the two LSBs have "public" meaning, but
1289
* any non-zero value substitutes them for the Link and LinkTrain
1290
* LED states.
1291
*/
1292
#define QIB_LED_PHYS 1
/* Physical (linktraining) GREEN LED */
1293
#define QIB_LED_LOG 2
/* Logical (link) YELLOW LED */
1294
void
qib_set_led_override
(
struct
qib_pportdata
*ppd,
unsigned
int
val
);
1295
1296
/* send dma routines */
1297
int
qib_setup_sdma
(
struct
qib_pportdata
*);
1298
void
qib_teardown_sdma
(
struct
qib_pportdata
*);
1299
void
__qib_sdma_intr
(
struct
qib_pportdata
*);
1300
void
qib_sdma_intr
(
struct
qib_pportdata
*);
1301
int
qib_sdma_verbs_send
(
struct
qib_pportdata
*,
struct
qib_sge_state
*,
1302
u32
,
struct
qib_verbs_txreq
*);
1303
/* ppd->sdma_lock should be locked before calling this. */
1304
int
qib_sdma_make_progress
(
struct
qib_pportdata
*dd);
1305
1306
static
inline
int
qib_sdma_empty(
const
struct
qib_pportdata
*ppd)
1307
{
1308
return
ppd->
sdma_descq_added
== ppd->
sdma_descq_removed
;
1309
}
1310
1311
/* must be called under qib_sdma_lock */
1312
static
inline
u16
qib_sdma_descq_freecnt(
const
struct
qib_pportdata
*ppd)
1313
{
1314
return
ppd->
sdma_descq_cnt
-
1315
(ppd->
sdma_descq_added
- ppd->
sdma_descq_removed
) - 1;
1316
}
1317
1318
static
inline
int
__qib_sdma_running(
struct
qib_pportdata
*ppd)
1319
{
1320
return
ppd->
sdma_state
.current_state ==
qib_sdma_state_s99_running
;
1321
}
1322
int
qib_sdma_running
(
struct
qib_pportdata
*);
1323
1324
void
__qib_sdma_process_event
(
struct
qib_pportdata
*,
enum
qib_sdma_events
);
1325
void
qib_sdma_process_event
(
struct
qib_pportdata
*,
enum
qib_sdma_events
);
1326
1327
/*
1328
* number of words used for protocol header if not set by qib_userinit();
1329
*/
1330
#define QIB_DFLT_RCVHDRSIZE 9
1331
1332
/*
1333
* We need to be able to handle an IB header of at least 24 dwords.
1334
* We need the rcvhdrq large enough to handle largest IB header, but
1335
* still have room for a 2KB MTU standard IB packet.
1336
* Additionally, some processor/memory controller combinations
1337
* benefit quite strongly from having the DMA'ed data be cacheline
1338
* aligned and a cacheline multiple, so we set the size to 32 dwords
1339
* (2 64-byte primary cachelines for pretty much all processors of
1340
* interest). The alignment hurts nothing, other than using somewhat
1341
* more memory.
1342
*/
1343
#define QIB_RCVHDR_ENTSIZE 32
1344
1345
int
qib_get_user_pages
(
unsigned
long
,
size_t
,
struct
page
**);
1346
void
qib_release_user_pages
(
struct
page
**,
size_t
);
1347
int
qib_eeprom_read
(
struct
qib_devdata
*,
u8
,
void
*,
int
);
1348
int
qib_eeprom_write
(
struct
qib_devdata
*,
u8
,
const
void
*,
int
);
1349
u32
__iomem
*
qib_getsendbuf_range
(
struct
qib_devdata
*,
u32
*,
u32
,
u32
);
1350
void
qib_sendbuf_done
(
struct
qib_devdata
*,
unsigned
);
1351
1352
static
inline
void
qib_clear_rcvhdrtail(
const
struct
qib_ctxtdata
*
rcd
)
1353
{
1354
*((
u64
*) rcd->
rcvhdrtail_kvaddr
) = 0ULL;
1355
}
1356
1357
static
inline
u32
qib_get_rcvhdrtail(
const
struct
qib_ctxtdata
*
rcd
)
1358
{
1359
/*
1360
* volatile because it's a DMA target from the chip, routine is
1361
* inlined, and don't want register caching or reordering.
1362
*/
1363
return
(
u32
)
le64_to_cpu
(
1364
*((
volatile
__le64
*)rcd->
rcvhdrtail_kvaddr
));
/* DMA'ed */
1365
}
1366
1367
static
inline
u32
qib_get_hdrqtail(
const
struct
qib_ctxtdata
*rcd)
1368
{
1369
const
struct
qib_devdata
*dd = rcd->
dd
;
1370
u32
hdrqtail;
1371
1372
if
(dd->
flags
&
QIB_NODMA_RTAIL
) {
1373
__le32
*rhf_addr;
1374
u32
seq
;
1375
1376
rhf_addr = (
__le32
*) rcd->
rcvhdrq
+
1377
rcd->
head
+ dd->
rhf_offset
;
1378
seq = qib_hdrget_seq(rhf_addr);
1379
hdrqtail = rcd->
head
;
1380
if
(seq == rcd->
seq_cnt
)
1381
hdrqtail++;
1382
}
else
1383
hdrqtail = qib_get_rcvhdrtail(rcd);
1384
1385
return
hdrqtail;
1386
}
1387
1388
/*
1389
* sysfs interface.
1390
*/
1391
1392
extern
const
char
ib_qib_version
[];
1393
1394
int
qib_device_create
(
struct
qib_devdata
*);
1395
void
qib_device_remove
(
struct
qib_devdata
*);
1396
1397
int
qib_create_port_files
(
struct
ib_device
*ibdev,
u8
port_num
,
1398
struct
kobject
*kobj);
1399
int
qib_verbs_register_sysfs
(
struct
qib_devdata
*);
1400
void
qib_verbs_unregister_sysfs
(
struct
qib_devdata
*);
1401
/* Hook for sysfs read of QSFP */
1402
extern
int
qib_qsfp_dump
(
struct
qib_pportdata
*ppd,
char
*
buf
,
int
len);
1403
1404
int
__init
qib_init_qibfs
(
void
);
1405
int
__exit
qib_exit_qibfs
(
void
);
1406
1407
int
qibfs_add
(
struct
qib_devdata
*);
1408
int
qibfs_remove
(
struct
qib_devdata
*);
1409
1410
int
qib_pcie_init
(
struct
pci_dev
*,
const
struct
pci_device_id
*);
1411
int
qib_pcie_ddinit
(
struct
qib_devdata
*,
struct
pci_dev
*,
1412
const
struct
pci_device_id
*);
1413
void
qib_pcie_ddcleanup
(
struct
qib_devdata
*);
1414
int
qib_pcie_params
(
struct
qib_devdata
*,
u32
,
u32
*,
struct
qib_msix_entry
*);
1415
int
qib_reinit_intr
(
struct
qib_devdata
*);
1416
void
qib_enable_intx
(
struct
pci_dev
*);
1417
void
qib_nomsi
(
struct
qib_devdata
*);
1418
void
qib_nomsix
(
struct
qib_devdata
*);
1419
void
qib_pcie_getcmd
(
struct
qib_devdata
*,
u16
*,
u8
*,
u8
*);
1420
void
qib_pcie_reenable
(
struct
qib_devdata
*,
u16
,
u8
,
u8
);
1421
1422
/*
1423
* dma_addr wrappers - all 0's invalid for hw
1424
*/
1425
dma_addr_t
qib_map_page
(
struct
pci_dev
*,
struct
page
*,
unsigned
long
,
1426
size_t
,
int
);
1427
const
char
*
qib_get_unit_name
(
int
unit
);
1428
1429
/*
1430
* Flush write combining store buffers (if present) and perform a write
1431
* barrier.
1432
*/
1433
#if defined(CONFIG_X86_64)
1434
#define qib_flush_wc() asm volatile("sfence" : : : "memory")
1435
#else
1436
#define qib_flush_wc() wmb()
/* no reorder around wc flush */
1437
#endif
1438
1439
/* global module parameter variables */
1440
extern
unsigned
qib_ibmtu
;
1441
extern
ushort
qib_cfgctxts
;
1442
extern
ushort
qib_num_cfg_vls
;
1443
extern
ushort
qib_mini_init
;
/* If set, do few (ideally 0) writes to chip */
1444
extern
unsigned
qib_n_krcv_queues
;
1445
extern
unsigned
qib_sdma_fetch_arb
;
1446
extern
unsigned
qib_compat_ddr_negotiate
;
1447
extern
int
qib_special_trigger
;
1448
1449
extern
struct
mutex
qib_mutex
;
1450
1451
/* Number of seconds before our card status check... */
1452
#define STATUS_TIMEOUT 60
1453
1454
#define QIB_DRV_NAME "ib_qib"
1455
#define QIB_USER_MINOR_BASE 0
1456
#define QIB_TRACE_MINOR 127
1457
#define QIB_DIAGPKT_MINOR 128
1458
#define QIB_DIAG_MINOR_BASE 129
1459
#define QIB_NMINORS 255
1460
1461
#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
1462
#define PCI_VENDOR_ID_QLOGIC 0x1077
1463
#define PCI_DEVICE_ID_QLOGIC_IB_6120 0x10
1464
#define PCI_DEVICE_ID_QLOGIC_IB_7220 0x7220
1465
#define PCI_DEVICE_ID_QLOGIC_IB_7322 0x7322
1466
1467
/*
1468
* qib_early_err is used (only!) to print early errors before devdata is
1469
* allocated, or when dd->pcidev may not be valid, and at the tail end of
1470
* cleanup when devdata may have been freed, etc. qib_dev_porterr is
1471
* the same as qib_dev_err, but is used when the message really needs
1472
* the IB port# to be definitive as to what's happening..
1473
* All of these go to the trace log, and the trace log entry is done
1474
* first to avoid possible serial port delays from printk.
1475
*/
1476
#define qib_early_err(dev, fmt, ...) \
1477
do { \
1478
dev_err(dev, fmt, ##__VA_ARGS__); \
1479
} while (0)
1480
1481
#define qib_dev_err(dd, fmt, ...) \
1482
do { \
1483
dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
1484
qib_get_unit_name((dd)->unit), ##__VA_ARGS__); \
1485
} while (0)
1486
1487
#define qib_dev_porterr(dd, port, fmt, ...) \
1488
do { \
1489
dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
1490
qib_get_unit_name((dd)->unit), (dd)->unit, (port), \
1491
##__VA_ARGS__); \
1492
} while (0)
1493
1494
#define qib_devinfo(pcidev, fmt, ...) \
1495
do { \
1496
dev_info(&(pcidev)->dev, fmt, ##__VA_ARGS__); \
1497
} while (0)
1498
1499
/*
1500
* this is used for formatting hw error messages...
1501
*/
1502
struct
qib_hwerror_msgs
{
1503
u64
mask
;
1504
const
char
*
msg
;
1505
size_t
sz
;
1506
};
1507
1508
#define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b }
1509
1510
/* in qib_intr.c... */
1511
void
qib_format_hwerrors
(
u64
hwerrs,
1512
const
struct
qib_hwerror_msgs
*hwerrmsgs,
1513
size_t
nhwerrmsgs,
char
*
msg
,
size_t
lmsg);
1514
#endif
/* _QIB_KERNEL_H */
Generated on Thu Jan 10 2013 13:38:14 for Linux Kernel by
1.8.2