Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
include
linux
mlx4
device.h
Go to the documentation of this file.
1
/*
2
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3
*
4
* This software is available to you under a choice of one of two
5
* licenses. You may choose to be licensed under the terms of the GNU
6
* General Public License (GPL) Version 2, available from the file
7
* COPYING in the main directory of this source tree, or the
8
* OpenIB.org BSD license below:
9
*
10
* Redistribution and use in source and binary forms, with or
11
* without modification, are permitted provided that the following
12
* conditions are met:
13
*
14
* - Redistributions of source code must retain the above
15
* copyright notice, this list of conditions and the following
16
* disclaimer.
17
*
18
* - Redistributions in binary form must reproduce the above
19
* copyright notice, this list of conditions and the following
20
* disclaimer in the documentation and/or other materials
21
* provided with the distribution.
22
*
23
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30
* SOFTWARE.
31
*/
32
33
#ifndef MLX4_DEVICE_H
34
#define MLX4_DEVICE_H
35
36
#include <linux/pci.h>
37
#include <
linux/completion.h
>
38
#include <
linux/radix-tree.h
>
39
#include <
linux/cpu_rmap.h
>
40
41
#include <
linux/atomic.h
>
42
43
#define MAX_MSIX_P_PORT 17
44
#define MAX_MSIX 64
45
#define MSIX_LEGACY_SZ 4
46
#define MIN_MSIX_P_PORT 5
47
48
enum
{
49
MLX4_FLAG_MSI_X
= 1 << 0,
50
MLX4_FLAG_OLD_PORT_CMDS
= 1 << 1,
51
MLX4_FLAG_MASTER
= 1 << 2,
52
MLX4_FLAG_SLAVE
= 1 << 3,
53
MLX4_FLAG_SRIOV
= 1 << 4,
54
};
55
56
enum
{
57
MLX4_PORT_CAP_IS_SM
= 1 << 1,
58
MLX4_PORT_CAP_DEV_MGMT_SUP
= 1 << 19,
59
};
60
61
enum
{
62
MLX4_MAX_PORTS
= 2,
63
MLX4_MAX_PORT_PKEYS
= 128
64
};
65
66
/* base qkey for use in sriov tunnel-qp/proxy-qp communication.
67
* These qkeys must not be allowed for general use. This is a 64k range,
68
* and to test for violation, we use the mask (protect against future chg).
69
*/
70
#define MLX4_RESERVED_QKEY_BASE (0xFFFF0000)
71
#define MLX4_RESERVED_QKEY_MASK (0xFFFF0000)
72
73
enum
{
74
MLX4_BOARD_ID_LEN
= 64
75
};
76
77
enum
{
78
MLX4_MAX_NUM_PF
= 16,
79
MLX4_MAX_NUM_VF
= 64,
80
MLX4_MFUNC_MAX
= 80,
81
MLX4_MAX_EQ_NUM
= 1024,
82
MLX4_MFUNC_EQ_NUM
= 4,
83
MLX4_MFUNC_MAX_EQES
= 8,
84
MLX4_MFUNC_EQE_MASK
= (
MLX4_MFUNC_MAX_EQES
- 1)
85
};
86
87
/* Driver supports 3 diffrent device methods to manage traffic steering:
88
* -device managed - High level API for ib and eth flow steering. FW is
89
* managing flow steering tables.
90
* - B0 steering mode - Common low level API for ib and (if supported) eth.
91
* - A0 steering mode - Limited low level API for eth. In case of IB,
92
* B0 mode is in use.
93
*/
94
enum
{
95
MLX4_STEERING_MODE_A0
,
96
MLX4_STEERING_MODE_B0
,
97
MLX4_STEERING_MODE_DEVICE_MANAGED
98
};
99
100
static
inline
const
char
*mlx4_steering_mode_str(
int
steering_mode)
101
{
102
switch
(steering_mode) {
103
case
MLX4_STEERING_MODE_A0
:
104
return
"A0 steering"
;
105
106
case
MLX4_STEERING_MODE_B0
:
107
return
"B0 steering"
;
108
109
case
MLX4_STEERING_MODE_DEVICE_MANAGED
:
110
return
"Device managed flow steering"
;
111
112
default
:
113
return
"Unrecognize steering mode"
;
114
}
115
}
116
117
enum
{
118
MLX4_DEV_CAP_FLAG_RC
= 1
LL
<< 0,
119
MLX4_DEV_CAP_FLAG_UC
= 1
LL
<< 1,
120
MLX4_DEV_CAP_FLAG_UD
= 1
LL
<< 2,
121
MLX4_DEV_CAP_FLAG_XRC
= 1
LL
<< 3,
122
MLX4_DEV_CAP_FLAG_SRQ
= 1
LL
<< 6,
123
MLX4_DEV_CAP_FLAG_IPOIB_CSUM
= 1
LL
<< 7,
124
MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR
= 1
LL
<< 8,
125
MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR
= 1
LL
<< 9,
126
MLX4_DEV_CAP_FLAG_DPDP
= 1
LL
<< 12,
127
MLX4_DEV_CAP_FLAG_BLH
= 1
LL
<< 15,
128
MLX4_DEV_CAP_FLAG_MEM_WINDOW
= 1
LL
<< 16,
129
MLX4_DEV_CAP_FLAG_APM
= 1
LL
<< 17,
130
MLX4_DEV_CAP_FLAG_ATOMIC
= 1
LL
<< 18,
131
MLX4_DEV_CAP_FLAG_RAW_MCAST
= 1
LL
<< 19,
132
MLX4_DEV_CAP_FLAG_UD_AV_PORT
= 1
LL
<< 20,
133
MLX4_DEV_CAP_FLAG_UD_MCAST
= 1
LL
<< 21,
134
MLX4_DEV_CAP_FLAG_IBOE
= 1
LL
<< 30,
135
MLX4_DEV_CAP_FLAG_UC_LOOPBACK
= 1
LL
<< 32,
136
MLX4_DEV_CAP_FLAG_FCS_KEEP
= 1
LL
<< 34,
137
MLX4_DEV_CAP_FLAG_WOL_PORT1
= 1
LL
<< 37,
138
MLX4_DEV_CAP_FLAG_WOL_PORT2
= 1
LL
<< 38,
139
MLX4_DEV_CAP_FLAG_UDP_RSS
= 1
LL
<< 40,
140
MLX4_DEV_CAP_FLAG_VEP_UC_STEER
= 1
LL
<< 41,
141
MLX4_DEV_CAP_FLAG_VEP_MC_STEER
= 1
LL
<< 42,
142
MLX4_DEV_CAP_FLAG_COUNTERS
= 1
LL
<< 48,
143
MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
= 1
LL
<< 55,
144
MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV
= 1
LL
<< 59,
145
};
146
147
enum
{
148
MLX4_DEV_CAP_FLAG2_RSS
= 1
LL
<< 0,
149
MLX4_DEV_CAP_FLAG2_RSS_TOP
= 1
LL
<< 1,
150
MLX4_DEV_CAP_FLAG2_RSS_XOR
= 1
LL
<< 2,
151
MLX4_DEV_CAP_FLAG2_FS_EN
= 1
LL
<< 3
152
};
153
154
#define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
155
156
enum
{
157
MLX4_BMME_FLAG_LOCAL_INV
= 1 << 6,
158
MLX4_BMME_FLAG_REMOTE_INV
= 1 << 7,
159
MLX4_BMME_FLAG_TYPE_2_WIN
= 1 << 9,
160
MLX4_BMME_FLAG_RESERVED_LKEY
= 1 << 10,
161
MLX4_BMME_FLAG_FAST_REG_WR
= 1 << 11,
162
};
163
164
enum
mlx4_event
{
165
MLX4_EVENT_TYPE_COMP
= 0x00,
166
MLX4_EVENT_TYPE_PATH_MIG
= 0x01,
167
MLX4_EVENT_TYPE_COMM_EST
= 0x02,
168
MLX4_EVENT_TYPE_SQ_DRAINED
= 0x03,
169
MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE
= 0x13,
170
MLX4_EVENT_TYPE_SRQ_LIMIT
= 0x14,
171
MLX4_EVENT_TYPE_CQ_ERROR
= 0x04,
172
MLX4_EVENT_TYPE_WQ_CATAS_ERROR
= 0x05,
173
MLX4_EVENT_TYPE_EEC_CATAS_ERROR
= 0x06,
174
MLX4_EVENT_TYPE_PATH_MIG_FAILED
= 0x07,
175
MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR
= 0x10,
176
MLX4_EVENT_TYPE_WQ_ACCESS_ERROR
= 0x11,
177
MLX4_EVENT_TYPE_SRQ_CATAS_ERROR
= 0x12,
178
MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR
= 0x08,
179
MLX4_EVENT_TYPE_PORT_CHANGE
= 0x09,
180
MLX4_EVENT_TYPE_EQ_OVERFLOW
= 0x0f,
181
MLX4_EVENT_TYPE_ECC_DETECT
= 0x0e,
182
MLX4_EVENT_TYPE_CMD
= 0x0a,
183
MLX4_EVENT_TYPE_VEP_UPDATE
= 0x19,
184
MLX4_EVENT_TYPE_COMM_CHANNEL
= 0x18,
185
MLX4_EVENT_TYPE_FATAL_WARNING
= 0x1b,
186
MLX4_EVENT_TYPE_FLR_EVENT
= 0x1c,
187
MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT
= 0x1d,
188
MLX4_EVENT_TYPE_NONE
= 0xff,
189
};
190
191
enum
{
192
MLX4_PORT_CHANGE_SUBTYPE_DOWN
= 1,
193
MLX4_PORT_CHANGE_SUBTYPE_ACTIVE
= 4
194
};
195
196
enum
{
197
MLX4_FATAL_WARNING_SUBTYPE_WARMING
= 0,
198
};
199
200
enum
slave_port_state
{
201
SLAVE_PORT_DOWN
= 0,
202
SLAVE_PENDING_UP
,
203
SLAVE_PORT_UP
,
204
};
205
206
enum
slave_port_gen_event
{
207
SLAVE_PORT_GEN_EVENT_DOWN
= 0,
208
SLAVE_PORT_GEN_EVENT_UP
,
209
SLAVE_PORT_GEN_EVENT_NONE
,
210
};
211
212
enum
slave_port_state_event
{
213
MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN
,
214
MLX4_PORT_STATE_DEV_EVENT_PORT_UP
,
215
MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID
,
216
MLX4_PORT_STATE_IB_EVENT_GID_INVALID
,
217
};
218
219
enum
{
220
MLX4_PERM_LOCAL_READ
= 1 << 10,
221
MLX4_PERM_LOCAL_WRITE
= 1 << 11,
222
MLX4_PERM_REMOTE_READ
= 1 << 12,
223
MLX4_PERM_REMOTE_WRITE
= 1 << 13,
224
MLX4_PERM_ATOMIC
= 1 << 14
225
};
226
227
enum
{
228
MLX4_OPCODE_NOP
= 0x00,
229
MLX4_OPCODE_SEND_INVAL
= 0x01,
230
MLX4_OPCODE_RDMA_WRITE
= 0x08,
231
MLX4_OPCODE_RDMA_WRITE_IMM
= 0x09,
232
MLX4_OPCODE_SEND
= 0x0a,
233
MLX4_OPCODE_SEND_IMM
= 0x0b,
234
MLX4_OPCODE_LSO
= 0x0e,
235
MLX4_OPCODE_RDMA_READ
= 0x10,
236
MLX4_OPCODE_ATOMIC_CS
= 0x11,
237
MLX4_OPCODE_ATOMIC_FA
= 0x12,
238
MLX4_OPCODE_MASKED_ATOMIC_CS
= 0x14,
239
MLX4_OPCODE_MASKED_ATOMIC_FA
= 0x15,
240
MLX4_OPCODE_BIND_MW
= 0x18,
241
MLX4_OPCODE_FMR
= 0x19,
242
MLX4_OPCODE_LOCAL_INVAL
= 0x1b,
243
MLX4_OPCODE_CONFIG_CMD
= 0x1f,
244
245
MLX4_RECV_OPCODE_RDMA_WRITE_IMM
= 0x00,
246
MLX4_RECV_OPCODE_SEND
= 0x01,
247
MLX4_RECV_OPCODE_SEND_IMM
= 0x02,
248
MLX4_RECV_OPCODE_SEND_INVAL
= 0x03,
249
250
MLX4_CQE_OPCODE_ERROR
= 0x1e,
251
MLX4_CQE_OPCODE_RESIZE
= 0x16,
252
};
253
254
enum
{
255
MLX4_STAT_RATE_OFFSET
= 5
256
};
257
258
enum
mlx4_protocol
{
259
MLX4_PROT_IB_IPV6
= 0,
260
MLX4_PROT_ETH
,
261
MLX4_PROT_IB_IPV4
,
262
MLX4_PROT_FCOE
263
};
264
265
enum
{
266
MLX4_MTT_FLAG_PRESENT
= 1
267
};
268
269
enum
mlx4_qp_region
{
270
MLX4_QP_REGION_FW
= 0,
271
MLX4_QP_REGION_ETH_ADDR
,
272
MLX4_QP_REGION_FC_ADDR
,
273
MLX4_QP_REGION_FC_EXCH
,
274
MLX4_NUM_QP_REGION
275
};
276
277
enum
mlx4_port_type
{
278
MLX4_PORT_TYPE_NONE
= 0,
279
MLX4_PORT_TYPE_IB
= 1,
280
MLX4_PORT_TYPE_ETH
= 2,
281
MLX4_PORT_TYPE_AUTO
= 3
282
};
283
284
enum
mlx4_special_vlan_idx
{
285
MLX4_NO_VLAN_IDX
= 0,
286
MLX4_VLAN_MISS_IDX
,
287
MLX4_VLAN_REGULAR
288
};
289
290
enum
mlx4_steer_type
{
291
MLX4_MC_STEER
= 0,
292
MLX4_UC_STEER
,
293
MLX4_NUM_STEERS
294
};
295
296
enum
{
297
MLX4_NUM_FEXCH
= 64 * 1024,
298
};
299
300
enum
{
301
MLX4_MAX_FAST_REG_PAGES
= 511,
302
};
303
304
enum
{
305
MLX4_DEV_PMC_SUBTYPE_GUID_INFO
= 0x14,
306
MLX4_DEV_PMC_SUBTYPE_PORT_INFO
= 0x15,
307
MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE
= 0x16,
308
};
309
310
/* Port mgmt change event handling */
311
enum
{
312
MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK
= 1 << 0,
313
MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK
= 1 << 1,
314
MLX4_EQ_PORT_INFO_LID_CHANGE_MASK
= 1 << 2,
315
MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK
= 1 << 3,
316
MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK
= 1 << 4,
317
};
318
319
#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
320
MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK)
321
322
static
inline
u64
mlx4_fw_ver(
u64
major,
u64
minor,
u64
subminor)
323
{
324
return
(major << 32) | (minor << 16) | subminor;
325
}
326
327
struct
mlx4_phys_caps
{
328
u32
gid_phys_table_len
[
MLX4_MAX_PORTS
+ 1];
329
u32
pkey_phys_table_len
[
MLX4_MAX_PORTS
+ 1];
330
u32
num_phys_eqs
;
331
u32
base_sqpn
;
332
u32
base_proxy_sqpn
;
333
u32
base_tunnel_sqpn
;
334
};
335
336
struct
mlx4_caps
{
337
u64
fw_ver
;
338
u32
function
;
339
int
num_ports
;
340
int
vl_cap
[
MLX4_MAX_PORTS
+ 1];
341
int
ib_mtu_cap
[
MLX4_MAX_PORTS
+ 1];
342
__be32
ib_port_def_cap
[
MLX4_MAX_PORTS
+ 1];
343
u64
def_mac
[
MLX4_MAX_PORTS
+ 1];
344
int
eth_mtu_cap
[
MLX4_MAX_PORTS
+ 1];
345
int
gid_table_len
[
MLX4_MAX_PORTS
+ 1];
346
int
pkey_table_len
[
MLX4_MAX_PORTS
+ 1];
347
int
trans_type
[
MLX4_MAX_PORTS
+ 1];
348
int
vendor_oui
[
MLX4_MAX_PORTS
+ 1];
349
int
wavelength
[
MLX4_MAX_PORTS
+ 1];
350
u64
trans_code
[
MLX4_MAX_PORTS
+ 1];
351
int
local_ca_ack_delay
;
352
int
num_uars
;
353
u32
uar_page_size
;
354
int
bf_reg_size
;
355
int
bf_regs_per_page
;
356
int
max_sq_sg
;
357
int
max_rq_sg
;
358
int
num_qps
;
359
int
max_wqes
;
360
int
max_sq_desc_sz
;
361
int
max_rq_desc_sz
;
362
int
max_qp_init_rdma
;
363
int
max_qp_dest_rdma
;
364
u32
*
qp0_proxy
;
365
u32
*
qp1_proxy
;
366
u32
*
qp0_tunnel
;
367
u32
*
qp1_tunnel
;
368
int
num_srqs
;
369
int
max_srq_wqes
;
370
int
max_srq_sge
;
371
int
reserved_srqs
;
372
int
num_cqs
;
373
int
max_cqes
;
374
int
reserved_cqs
;
375
int
num_eqs
;
376
int
reserved_eqs
;
377
int
num_comp_vectors
;
378
int
comp_pool
;
379
int
num_mpts
;
380
int
max_fmr_maps
;
381
int
num_mtts
;
382
int
fmr_reserved_mtts
;
383
int
reserved_mtts
;
384
int
reserved_mrws
;
385
int
reserved_uars
;
386
int
num_mgms
;
387
int
num_amgms
;
388
int
reserved_mcgs
;
389
int
num_qp_per_mgm
;
390
int
steering_mode
;
391
int
fs_log_max_ucast_qp_range_size
;
392
int
num_pds
;
393
int
reserved_pds
;
394
int
max_xrcds
;
395
int
reserved_xrcds
;
396
int
mtt_entry_sz
;
397
u32
max_msg_sz
;
398
u32
page_size_cap
;
399
u64
flags
;
400
u64
flags2
;
401
u32
bmme_flags
;
402
u32
reserved_lkey
;
403
u16
stat_rate_support
;
404
u8
port_width_cap
[
MLX4_MAX_PORTS
+ 1];
405
int
max_gso_sz
;
406
int
max_rss_tbl_sz
;
407
int
reserved_qps_cnt
[
MLX4_NUM_QP_REGION
];
408
int
reserved_qps
;
409
int
reserved_qps_base
[
MLX4_NUM_QP_REGION
];
410
int
log_num_macs
;
411
int
log_num_vlans
;
412
int
log_num_prios
;
413
enum
mlx4_port_type
port_type
[
MLX4_MAX_PORTS
+ 1];
414
u8
supported_type
[
MLX4_MAX_PORTS
+ 1];
415
u8
suggested_type
[
MLX4_MAX_PORTS
+ 1];
416
u8
default_sense
[
MLX4_MAX_PORTS
+ 1];
417
u32
port_mask
[
MLX4_MAX_PORTS
+ 1];
418
enum
mlx4_port_type
possible_type
[
MLX4_MAX_PORTS
+ 1];
419
u32
max_counters
;
420
u8
port_ib_mtu
[
MLX4_MAX_PORTS
+ 1];
421
u16
sqp_demux
;
422
};
423
424
struct
mlx4_buf_list
{
425
void
*
buf
;
426
dma_addr_t
map
;
427
};
428
429
struct
mlx4_buf
{
430
struct
mlx4_buf_list
direct
;
431
struct
mlx4_buf_list
*
page_list
;
432
int
nbufs
;
433
int
npages
;
434
int
page_shift
;
435
};
436
437
struct
mlx4_mtt
{
438
u32
offset
;
439
int
order
;
440
int
page_shift
;
441
};
442
443
enum
{
444
MLX4_DB_PER_PAGE
=
PAGE_SIZE
/ 4
445
};
446
447
struct
mlx4_db_pgdir
{
448
struct
list_head
list
;
449
DECLARE_BITMAP
(order0,
MLX4_DB_PER_PAGE
);
450
DECLARE_BITMAP
(order1,
MLX4_DB_PER_PAGE
/ 2);
451
unsigned
long
*
bits
[2];
452
__be32
*
db_page
;
453
dma_addr_t
db_dma
;
454
};
455
456
struct
mlx4_ib_user_db_page
;
457
458
struct
mlx4_db
{
459
__be32
*
db
;
460
union
{
461
struct
mlx4_db_pgdir
*
pgdir
;
462
struct
mlx4_ib_user_db_page
*
user_page
;
463
}
u
;
464
dma_addr_t
dma
;
465
int
index
;
466
int
order
;
467
};
468
469
struct
mlx4_hwq_resources
{
470
struct
mlx4_db
db
;
471
struct
mlx4_mtt
mtt
;
472
struct
mlx4_buf
buf
;
473
};
474
475
struct
mlx4_mr
{
476
struct
mlx4_mtt
mtt
;
477
u64
iova
;
478
u64
size
;
479
u32
key
;
480
u32
pd
;
481
u32
access
;
482
int
enabled
;
483
};
484
485
struct
mlx4_fmr
{
486
struct
mlx4_mr
mr
;
487
struct
mlx4_mpt_entry
*
mpt
;
488
__be64
*
mtts
;
489
dma_addr_t
dma_handle
;
490
int
max_pages
;
491
int
max_maps
;
492
int
maps
;
493
u8
page_shift
;
494
};
495
496
struct
mlx4_uar
{
497
unsigned
long
pfn
;
498
int
index
;
499
struct
list_head
bf_list
;
500
unsigned
free_bf_bmap
;
501
void
__iomem
*
map
;
502
void
__iomem
*
bf_map
;
503
};
504
505
struct
mlx4_bf
{
506
unsigned
long
offset
;
507
int
buf_size
;
508
struct
mlx4_uar
*
uar
;
509
void
__iomem
*
reg
;
510
};
511
512
struct
mlx4_cq
{
513
void
(*
comp
) (
struct
mlx4_cq
*);
514
void
(*
event
) (
struct
mlx4_cq
*,
enum
mlx4_event
);
515
516
struct
mlx4_uar
*
uar
;
517
518
u32
cons_index
;
519
520
__be32
*
set_ci_db
;
521
__be32
*
arm_db
;
522
int
arm_sn
;
523
524
int
cqn
;
525
unsigned
vector
;
526
527
atomic_t
refcount
;
528
struct
completion
free
;
529
};
530
531
struct
mlx4_qp
{
532
void
(*
event
) (
struct
mlx4_qp
*,
enum
mlx4_event
);
533
534
int
qpn
;
535
536
atomic_t
refcount
;
537
struct
completion
free
;
538
};
539
540
struct
mlx4_srq
{
541
void
(*
event
) (
struct
mlx4_srq
*,
enum
mlx4_event
);
542
543
int
srqn
;
544
int
max
;
545
int
max_gs
;
546
int
wqe_shift
;
547
548
atomic_t
refcount
;
549
struct
completion
free
;
550
};
551
552
struct
mlx4_av
{
553
__be32
port_pd
;
554
u8
reserved1
;
555
u8
g_slid
;
556
__be16
dlid
;
557
u8
reserved2
;
558
u8
gid_index
;
559
u8
stat_rate
;
560
u8
hop_limit
;
561
__be32
sl_tclass_flowlabel
;
562
u8
dgid
[16];
563
};
564
565
struct
mlx4_eth_av
{
566
__be32
port_pd
;
567
u8
reserved1
;
568
u8
smac_idx
;
569
u16
reserved2
;
570
u8
reserved3
;
571
u8
gid_index
;
572
u8
stat_rate
;
573
u8
hop_limit
;
574
__be32
sl_tclass_flowlabel
;
575
u8
dgid
[16];
576
u32
reserved4
[2];
577
__be16
vlan
;
578
u8
mac
[6];
579
};
580
581
union
mlx4_ext_av
{
582
struct
mlx4_av
ib
;
583
struct
mlx4_eth_av
eth
;
584
};
585
586
struct
mlx4_counter
{
587
u8
reserved1
[3];
588
u8
counter_mode
;
589
__be32
num_ifc
;
590
u32
reserved2
[2];
591
__be64
rx_frames
;
592
__be64
rx_bytes
;
593
__be64
tx_frames
;
594
__be64
tx_bytes
;
595
};
596
597
struct
mlx4_dev
{
598
struct
pci_dev
*
pdev
;
599
unsigned
long
flags
;
600
unsigned
long
num_slaves
;
601
struct
mlx4_caps
caps
;
602
struct
mlx4_phys_caps
phys_caps
;
603
struct
radix_tree_root
qp_table_tree
;
604
u8
rev_id
;
605
char
board_id
[
MLX4_BOARD_ID_LEN
];
606
int
num_vfs
;
607
u64
regid_promisc_array
[
MLX4_MAX_PORTS
+ 1];
608
u64
regid_allmulti_array
[
MLX4_MAX_PORTS
+ 1];
609
};
610
611
struct
mlx4_eqe
{
612
u8
reserved1
;
613
u8
type
;
614
u8
reserved2
;
615
u8
subtype
;
616
union
{
617
u32
raw
[6];
618
struct
{
619
__be32
cqn
;
620
}
__packed
comp
;
621
struct
{
622
u16
reserved1
;
623
__be16
token
;
624
u32
reserved2
;
625
u8
reserved3
[3];
626
u8
status
;
627
__be64
out_param
;
628
}
__packed
cmd
;
629
struct
{
630
__be32
qpn
;
631
}
__packed
qp
;
632
struct
{
633
__be32
srqn
;
634
}
__packed
srq
;
635
struct
{
636
__be32
cqn
;
637
u32
reserved1
;
638
u8
reserved2
[3];
639
u8
syndrome
;
640
}
__packed
cq_err
;
641
struct
{
642
u32
reserved1
[2];
643
__be32
port
;
644
}
__packed
port_change
;
645
struct
{
646
#define COMM_CHANNEL_BIT_ARRAY_SIZE 4
647
u32
reserved
;
648
u32
bit_vec
[
COMM_CHANNEL_BIT_ARRAY_SIZE
];
649
}
__packed
comm_channel_arm
;
650
struct
{
651
u8
port
;
652
u8
reserved
[3];
653
__be64
mac
;
654
}
__packed
mac_update
;
655
struct
{
656
__be32
slave_id
;
657
}
__packed
flr_event
;
658
struct
{
659
__be16
current_temperature
;
660
__be16
warning_threshold
;
661
}
__packed
warming
;
662
struct
{
663
u8
reserved
[3];
664
u8
port
;
665
union
{
666
struct
{
667
__be16
mstr_sm_lid
;
668
__be16
port_lid
;
669
__be32
changed_attr
;
670
u8
reserved[3];
671
u8
mstr_sm_sl
;
672
__be64
gid_prefix
;
673
}
__packed
port_info
;
674
struct
{
675
__be32
block_ptr
;
676
__be32
tbl_entries_mask
;
677
}
__packed
tbl_change_info
;
678
}
params
;
679
}
__packed
port_mgmt_change
;
680
}
event
;
681
u8
slave_id
;
682
u8
reserved3
[2];
683
u8
owner
;
684
}
__packed
;
685
686
struct
mlx4_init_port_param
{
687
int
set_guid0
;
688
int
set_node_guid
;
689
int
set_si_guid
;
690
u16
mtu
;
691
int
port_width_cap
;
692
u16
vl_cap
;
693
u16
max_gid
;
694
u16
max_pkey
;
695
u64
guid0
;
696
u64
node_guid
;
697
u64
si_guid
;
698
};
699
700
#define mlx4_foreach_port(port, dev, type) \
701
for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
702
if ((type) == (dev)->caps.port_mask[(port)])
703
704
#define mlx4_foreach_non_ib_transport_port(port, dev) \
705
for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
706
if (((dev)->caps.port_mask[port] != MLX4_PORT_TYPE_IB))
707
708
#define mlx4_foreach_ib_transport_port(port, dev) \
709
for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
710
if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
711
((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
712
713
#define MLX4_INVALID_SLAVE_ID 0xFF
714
715
void
handle_port_mgmt_change_event
(
struct
work_struct
*
work
);
716
717
static
inline
int
mlx4_master_func_num(
struct
mlx4_dev
*
dev
)
718
{
719
return
dev->
caps
.function;
720
}
721
722
static
inline
int
mlx4_is_master(
struct
mlx4_dev
*
dev
)
723
{
724
return
dev->
flags
&
MLX4_FLAG_MASTER
;
725
}
726
727
static
inline
int
mlx4_is_qp_reserved(
struct
mlx4_dev
*
dev
,
u32
qpn)
728
{
729
return
(qpn < dev->phys_caps.base_sqpn + 8 +
730
16 *
MLX4_MFUNC_MAX
* !!mlx4_is_master(dev));
731
}
732
733
static
inline
int
mlx4_is_guest_proxy(
struct
mlx4_dev
*dev,
int
slave
,
u32
qpn)
734
{
735
int
guest_proxy_base = dev->
phys_caps
.base_proxy_sqpn + slave * 8;
736
737
if
(qpn >= guest_proxy_base && qpn < guest_proxy_base + 8)
738
return
1;
739
740
return
0;
741
}
742
743
static
inline
int
mlx4_is_mfunc(
struct
mlx4_dev
*dev)
744
{
745
return
dev->
flags
& (
MLX4_FLAG_SLAVE
|
MLX4_FLAG_MASTER
);
746
}
747
748
static
inline
int
mlx4_is_slave(
struct
mlx4_dev
*dev)
749
{
750
return
dev->
flags
&
MLX4_FLAG_SLAVE
;
751
}
752
753
int
mlx4_buf_alloc
(
struct
mlx4_dev
*dev,
int
size
,
int
max_direct,
754
struct
mlx4_buf
*
buf
);
755
void
mlx4_buf_free
(
struct
mlx4_dev
*dev,
int
size
,
struct
mlx4_buf
*
buf
);
756
static
inline
void
*mlx4_buf_offset(
struct
mlx4_buf
*
buf
,
int
offset
)
757
{
758
if
(
BITS_PER_LONG
== 64 || buf->
nbufs
== 1)
759
return
buf->
direct
.buf +
offset
;
760
else
761
return
buf->
page_list
[offset >>
PAGE_SHIFT
].buf +
762
(offset & (
PAGE_SIZE
- 1));
763
}
764
765
int
mlx4_pd_alloc
(
struct
mlx4_dev
*dev,
u32
*pdn);
766
void
mlx4_pd_free
(
struct
mlx4_dev
*dev,
u32
pdn);
767
int
mlx4_xrcd_alloc
(
struct
mlx4_dev
*dev,
u32
*xrcdn);
768
void
mlx4_xrcd_free
(
struct
mlx4_dev
*dev,
u32
xrcdn);
769
770
int
mlx4_uar_alloc
(
struct
mlx4_dev
*dev,
struct
mlx4_uar
*uar);
771
void
mlx4_uar_free
(
struct
mlx4_dev
*dev,
struct
mlx4_uar
*uar);
772
int
mlx4_bf_alloc
(
struct
mlx4_dev
*dev,
struct
mlx4_bf
*
bf
);
773
void
mlx4_bf_free
(
struct
mlx4_dev
*dev,
struct
mlx4_bf
*
bf
);
774
775
int
mlx4_mtt_init
(
struct
mlx4_dev
*dev,
int
npages,
int
page_shift,
776
struct
mlx4_mtt
*mtt);
777
void
mlx4_mtt_cleanup
(
struct
mlx4_dev
*dev,
struct
mlx4_mtt
*mtt);
778
u64
mlx4_mtt_addr
(
struct
mlx4_dev
*dev,
struct
mlx4_mtt
*mtt);
779
780
int
mlx4_mr_alloc
(
struct
mlx4_dev
*dev,
u32
pd,
u64
iova
,
u64
size
,
u32
access
,
781
int
npages,
int
page_shift,
struct
mlx4_mr
*mr);
782
void
mlx4_mr_free
(
struct
mlx4_dev
*dev,
struct
mlx4_mr
*mr);
783
int
mlx4_mr_enable
(
struct
mlx4_dev
*dev,
struct
mlx4_mr
*mr);
784
int
mlx4_write_mtt
(
struct
mlx4_dev
*dev,
struct
mlx4_mtt
*mtt,
785
int
start_index,
int
npages,
u64
*page_list);
786
int
mlx4_buf_write_mtt
(
struct
mlx4_dev
*dev,
struct
mlx4_mtt
*mtt,
787
struct
mlx4_buf
*buf);
788
789
int
mlx4_db_alloc
(
struct
mlx4_dev
*dev,
struct
mlx4_db
*db,
int
order
);
790
void
mlx4_db_free
(
struct
mlx4_dev
*dev,
struct
mlx4_db
*db);
791
792
int
mlx4_alloc_hwq_res
(
struct
mlx4_dev
*dev,
struct
mlx4_hwq_resources
*wqres,
793
int
size
,
int
max_direct);
794
void
mlx4_free_hwq_res
(
struct
mlx4_dev
*mdev,
struct
mlx4_hwq_resources
*wqres,
795
int
size
);
796
797
int
mlx4_cq_alloc
(
struct
mlx4_dev
*dev,
int
nent,
struct
mlx4_mtt
*mtt,
798
struct
mlx4_uar
*uar,
u64
db_rec,
struct
mlx4_cq
*cq,
799
unsigned
vector
,
int
collapsed);
800
void
mlx4_cq_free
(
struct
mlx4_dev
*dev,
struct
mlx4_cq
*cq);
801
802
int
mlx4_qp_reserve_range
(
struct
mlx4_dev
*dev,
int
cnt
,
int
align
,
int
*base);
803
void
mlx4_qp_release_range
(
struct
mlx4_dev
*dev,
int
base_qpn,
int
cnt
);
804
805
int
mlx4_qp_alloc
(
struct
mlx4_dev
*dev,
int
qpn,
struct
mlx4_qp
*qp);
806
void
mlx4_qp_free
(
struct
mlx4_dev
*dev,
struct
mlx4_qp
*qp);
807
808
int
mlx4_srq_alloc
(
struct
mlx4_dev
*dev,
u32
pdn,
u32
cqn
,
u16
xrcdn,
809
struct
mlx4_mtt
*mtt,
u64
db_rec,
struct
mlx4_srq
*srq);
810
void
mlx4_srq_free
(
struct
mlx4_dev
*dev,
struct
mlx4_srq
*srq);
811
int
mlx4_srq_arm
(
struct
mlx4_dev
*dev,
struct
mlx4_srq
*srq,
int
limit_watermark);
812
int
mlx4_srq_query
(
struct
mlx4_dev
*dev,
struct
mlx4_srq
*srq,
int
*limit_watermark);
813
814
int
mlx4_INIT_PORT
(
struct
mlx4_dev
*dev,
int
port
);
815
int
mlx4_CLOSE_PORT
(
struct
mlx4_dev
*dev,
int
port
);
816
817
int
mlx4_unicast_attach
(
struct
mlx4_dev
*dev,
struct
mlx4_qp
*qp,
u8
gid
[16],
818
int
block_mcast_loopback,
enum
mlx4_protocol
prot);
819
int
mlx4_unicast_detach
(
struct
mlx4_dev
*dev,
struct
mlx4_qp
*qp,
u8
gid
[16],
820
enum
mlx4_protocol
prot);
821
int
mlx4_multicast_attach
(
struct
mlx4_dev
*dev,
struct
mlx4_qp
*qp,
u8
gid
[16],
822
u8
port
,
int
block_mcast_loopback,
823
enum
mlx4_protocol
protocol
,
u64
*reg_id);
824
int
mlx4_multicast_detach
(
struct
mlx4_dev
*dev,
struct
mlx4_qp
*qp,
u8
gid
[16],
825
enum
mlx4_protocol
protocol
,
u64
reg_id);
826
827
enum
{
828
MLX4_DOMAIN_UVERBS
= 0x1000,
829
MLX4_DOMAIN_ETHTOOL
= 0x2000,
830
MLX4_DOMAIN_RFS
= 0x3000,
831
MLX4_DOMAIN_NIC
= 0x5000,
832
};
833
834
enum
mlx4_net_trans_rule_id
{
835
MLX4_NET_TRANS_RULE_ID_ETH
= 0,
836
MLX4_NET_TRANS_RULE_ID_IB
,
837
MLX4_NET_TRANS_RULE_ID_IPV6
,
838
MLX4_NET_TRANS_RULE_ID_IPV4
,
839
MLX4_NET_TRANS_RULE_ID_TCP
,
840
MLX4_NET_TRANS_RULE_ID_UDP
,
841
MLX4_NET_TRANS_RULE_NUM
,
/* should be last */
842
};
843
844
extern
const
u16
__sw_id_hw
[];
845
846
static
inline
int
map_hw_to_sw_id(
u16
header_id)
847
{
848
849
int
i
;
850
for
(i = 0; i <
MLX4_NET_TRANS_RULE_NUM
; i++) {
851
if
(header_id == __sw_id_hw[i])
852
return
i
;
853
}
854
return
-
EINVAL
;
855
}
856
857
enum
mlx4_net_trans_promisc_mode
{
858
MLX4_FS_PROMISC_NONE
= 0,
859
MLX4_FS_PROMISC_UPLINK
,
860
/* For future use. Not implemented yet */
861
MLX4_FS_PROMISC_FUNCTION_PORT
,
862
MLX4_FS_PROMISC_ALL_MULTI
,
863
};
864
865
struct
mlx4_spec_eth
{
866
u8
dst_mac
[6];
867
u8
dst_mac_msk
[6];
868
u8
src_mac
[6];
869
u8
src_mac_msk
[6];
870
u8
ether_type_enable
;
871
__be16
ether_type
;
872
__be16
vlan_id_msk
;
873
__be16
vlan_id
;
874
};
875
876
struct
mlx4_spec_tcp_udp
{
877
__be16
dst_port
;
878
__be16
dst_port_msk
;
879
__be16
src_port
;
880
__be16
src_port_msk
;
881
};
882
883
struct
mlx4_spec_ipv4
{
884
__be32
dst_ip
;
885
__be32
dst_ip_msk
;
886
__be32
src_ip
;
887
__be32
src_ip_msk
;
888
};
889
890
struct
mlx4_spec_ib
{
891
__be32
r_qpn
;
892
__be32
qpn_msk
;
893
u8
dst_gid
[16];
894
u8
dst_gid_msk
[16];
895
};
896
897
struct
mlx4_spec_list
{
898
struct
list_head
list
;
899
enum
mlx4_net_trans_rule_id
id
;
900
union
{
901
struct
mlx4_spec_eth
eth
;
902
struct
mlx4_spec_ib
ib
;
903
struct
mlx4_spec_ipv4
ipv4
;
904
struct
mlx4_spec_tcp_udp
tcp_udp
;
905
};
906
};
907
908
enum
mlx4_net_trans_hw_rule_queue
{
909
MLX4_NET_TRANS_Q_FIFO
,
910
MLX4_NET_TRANS_Q_LIFO
,
911
};
912
913
struct
mlx4_net_trans_rule
{
914
struct
list_head
list
;
915
enum
mlx4_net_trans_hw_rule_queue
queue_mode
;
916
bool
exclusive
;
917
bool
allow_loopback
;
918
enum
mlx4_net_trans_promisc_mode
promisc_mode
;
919
u8
port
;
920
u16
priority
;
921
u32
qpn
;
922
};
923
924
int
mlx4_flow_steer_promisc_add
(
struct
mlx4_dev
*dev,
u8
port
,
u32
qpn,
925
enum
mlx4_net_trans_promisc_mode
mode
);
926
int
mlx4_flow_steer_promisc_remove
(
struct
mlx4_dev
*dev,
u8
port
,
927
enum
mlx4_net_trans_promisc_mode
mode
);
928
int
mlx4_multicast_promisc_add
(
struct
mlx4_dev
*dev,
u32
qpn,
u8
port
);
929
int
mlx4_multicast_promisc_remove
(
struct
mlx4_dev
*dev,
u32
qpn,
u8
port
);
930
int
mlx4_unicast_promisc_add
(
struct
mlx4_dev
*dev,
u32
qpn,
u8
port
);
931
int
mlx4_unicast_promisc_remove
(
struct
mlx4_dev
*dev,
u32
qpn,
u8
port
);
932
int
mlx4_SET_MCAST_FLTR
(
struct
mlx4_dev
*dev,
u8
port
,
u64
mac
,
u64
clear
,
u8
mode
);
933
934
int
mlx4_register_mac
(
struct
mlx4_dev
*dev,
u8
port
,
u64
mac
);
935
void
mlx4_unregister_mac
(
struct
mlx4_dev
*dev,
u8
port
,
u64
mac
);
936
int
mlx4_replace_mac
(
struct
mlx4_dev
*dev,
u8
port
,
int
qpn,
u64
new_mac);
937
int
mlx4_get_eth_qp
(
struct
mlx4_dev
*dev,
u8
port
,
u64
mac
,
int
*qpn);
938
void
mlx4_put_eth_qp
(
struct
mlx4_dev
*dev,
u8
port
,
u64
mac
,
int
qpn);
939
void
mlx4_set_stats_bitmap
(
struct
mlx4_dev
*dev,
u64
*stats_bitmap);
940
int
mlx4_SET_PORT_general
(
struct
mlx4_dev
*dev,
u8
port
,
int
mtu,
941
u8
pptx,
u8
pfctx,
u8
pprx,
u8
pfcrx);
942
int
mlx4_SET_PORT_qpn_calc
(
struct
mlx4_dev
*dev,
u8
port
,
u32
base_qpn,
943
u8
promisc);
944
int
mlx4_SET_PORT_PRIO2TC
(
struct
mlx4_dev
*dev,
u8
port
,
u8
*prio2tc);
945
int
mlx4_SET_PORT_SCHEDULER
(
struct
mlx4_dev
*dev,
u8
port
,
u8
*tc_tx_bw,
946
u8
*
pg
,
u16
*ratelimit);
947
int
mlx4_find_cached_vlan
(
struct
mlx4_dev
*dev,
u8
port
,
u16
vid
,
int
*
idx
);
948
int
mlx4_register_vlan
(
struct
mlx4_dev
*dev,
u8
port
,
u16
vlan
,
int
*
index
);
949
void
mlx4_unregister_vlan
(
struct
mlx4_dev
*dev,
u8
port
,
int
index
);
950
951
int
mlx4_map_phys_fmr
(
struct
mlx4_dev
*dev,
struct
mlx4_fmr
*
fmr
,
u64
*page_list,
952
int
npages,
u64
iova
,
u32
*
lkey
,
u32
*
rkey
);
953
int
mlx4_fmr_alloc
(
struct
mlx4_dev
*dev,
u32
pd,
u32
access
,
int
max_pages,
954
int
max_maps,
u8
page_shift,
struct
mlx4_fmr
*
fmr
);
955
int
mlx4_fmr_enable
(
struct
mlx4_dev
*dev,
struct
mlx4_fmr
*
fmr
);
956
void
mlx4_fmr_unmap
(
struct
mlx4_dev
*dev,
struct
mlx4_fmr
*
fmr
,
957
u32
*
lkey
,
u32
*
rkey
);
958
int
mlx4_fmr_free
(
struct
mlx4_dev
*dev,
struct
mlx4_fmr
*
fmr
);
959
int
mlx4_SYNC_TPT
(
struct
mlx4_dev
*dev);
960
int
mlx4_test_interrupts
(
struct
mlx4_dev
*dev);
961
int
mlx4_assign_eq
(
struct
mlx4_dev
*dev,
char
*
name
,
struct
cpu_rmap
*rmap,
962
int
*
vector
);
963
void
mlx4_release_eq
(
struct
mlx4_dev
*dev,
int
vec);
964
965
int
mlx4_wol_read
(
struct
mlx4_dev
*dev,
u64
*
config
,
int
port
);
966
int
mlx4_wol_write
(
struct
mlx4_dev
*dev,
u64
config
,
int
port
);
967
968
int
mlx4_counter_alloc
(
struct
mlx4_dev
*dev,
u32
*
idx
);
969
void
mlx4_counter_free
(
struct
mlx4_dev
*dev,
u32
idx
);
970
971
int
mlx4_flow_attach
(
struct
mlx4_dev
*dev,
972
struct
mlx4_net_trans_rule
*
rule
,
u64
*reg_id);
973
int
mlx4_flow_detach
(
struct
mlx4_dev
*dev,
u64
reg_id);
974
975
void
mlx4_sync_pkey_table
(
struct
mlx4_dev
*dev,
int
slave,
int
port
,
976
int
i
,
int
val
);
977
978
int
mlx4_get_parav_qkey
(
struct
mlx4_dev
*dev,
u32
qpn,
u32
*
qkey
);
979
980
int
mlx4_is_slave_active
(
struct
mlx4_dev
*dev,
int
slave);
981
int
mlx4_gen_pkey_eqe
(
struct
mlx4_dev
*dev,
int
slave,
u8
port
);
982
int
mlx4_gen_guid_change_eqe
(
struct
mlx4_dev
*dev,
int
slave,
u8
port
);
983
int
mlx4_gen_slaves_port_mgt_ev
(
struct
mlx4_dev
*dev,
u8
port
,
int
attr
);
984
int
mlx4_gen_port_state_change_eqe
(
struct
mlx4_dev
*dev,
int
slave,
u8
port
,
u8
port_subtype_change);
985
enum
slave_port_state
mlx4_get_slave_port_state
(
struct
mlx4_dev
*dev,
int
slave,
u8
port
);
986
int
set_and_calc_slave_port_state
(
struct
mlx4_dev
*dev,
int
slave,
u8
port
,
int
event
,
enum
slave_port_gen_event
*gen_event);
987
988
void
mlx4_put_slave_node_guid
(
struct
mlx4_dev
*dev,
int
slave,
__be64
guid);
989
__be64
mlx4_get_slave_node_guid
(
struct
mlx4_dev
*dev,
int
slave);
990
991
#endif
/* MLX4_DEVICE_H */
Generated on Thu Jan 10 2013 12:49:25 for Linux Kernel by
1.8.2