Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
drivers
infiniband
hw
ehca
ehca_hca.c
Go to the documentation of this file.
1
/*
2
* IBM eServer eHCA Infiniband device driver for Linux on POWER
3
*
4
* HCA query functions
5
*
6
* Authors: Heiko J Schick <
[email protected]
>
7
* Christoph Raisch <
[email protected]
>
8
*
9
* Copyright (c) 2005 IBM Corporation
10
*
11
* All rights reserved.
12
*
13
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
14
* BSD.
15
*
16
* OpenIB BSD License
17
*
18
* Redistribution and use in source and binary forms, with or without
19
* modification, are permitted provided that the following conditions are met:
20
*
21
* Redistributions of source code must retain the above copyright notice, this
22
* list of conditions and the following disclaimer.
23
*
24
* Redistributions in binary form must reproduce the above copyright notice,
25
* this list of conditions and the following disclaimer in the documentation
26
* and/or other materials
27
* provided with the distribution.
28
*
29
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39
* POSSIBILITY OF SUCH DAMAGE.
40
*/
41
42
#include <
linux/gfp.h
>
43
44
#include "
ehca_tools.h
"
45
#include "
ehca_iverbs.h
"
46
#include "
hcp_if.h
"
47
48
static
unsigned
int
limit_uint(
unsigned
int
value
)
49
{
50
return
min_t
(
unsigned
int
, value,
INT_MAX
);
51
}
52
53
int
ehca_query_device
(
struct
ib_device
*ibdev,
struct
ib_device_attr
*props)
54
{
55
int
i
,
ret
= 0;
56
struct
ehca_shca
*shca =
container_of
(ibdev,
struct
ehca_shca
,
57
ib_device
);
58
struct
hipz_query_hca
*rblock;
59
60
static
const
u32
cap_mapping[] = {
61
IB_DEVICE_RESIZE_MAX_WR
,
HCA_CAP_WQE_RESIZE
,
62
IB_DEVICE_BAD_PKEY_CNTR
,
HCA_CAP_BAD_P_KEY_CTR
,
63
IB_DEVICE_BAD_QKEY_CNTR
,
HCA_CAP_Q_KEY_VIOL_CTR
,
64
IB_DEVICE_RAW_MULTI
,
HCA_CAP_RAW_PACKET_MCAST
,
65
IB_DEVICE_AUTO_PATH_MIG
,
HCA_CAP_AUTO_PATH_MIG
,
66
IB_DEVICE_CHANGE_PHY_PORT
,
HCA_CAP_SQD_RTS_PORT_CHANGE
,
67
IB_DEVICE_UD_AV_PORT_ENFORCE
,
HCA_CAP_AH_PORT_NR_CHECK
,
68
IB_DEVICE_CURR_QP_STATE_MOD
,
HCA_CAP_CUR_QP_STATE_MOD
,
69
IB_DEVICE_SHUTDOWN_PORT
,
HCA_CAP_SHUTDOWN_PORT
,
70
IB_DEVICE_INIT_TYPE
,
HCA_CAP_INIT_TYPE
,
71
IB_DEVICE_PORT_ACTIVE_EVENT
,
HCA_CAP_PORT_ACTIVE_EVENT
,
72
};
73
74
rblock =
ehca_alloc_fw_ctrlblock
(
GFP_KERNEL
);
75
if
(!rblock) {
76
ehca_err
(&shca->
ib_device
,
"Can't allocate rblock memory."
);
77
return
-
ENOMEM
;
78
}
79
80
if
(
hipz_h_query_hca
(shca->
ipz_hca_handle
, rblock) != H_SUCCESS) {
81
ehca_err
(&shca->
ib_device
,
"Can't query device properties"
);
82
ret = -
EINVAL
;
83
goto
query_device1;
84
}
85
86
memset
(props, 0,
sizeof
(
struct
ib_device_attr
));
87
props->
page_size_cap
= shca->
hca_cap_mr_pgsize
;
88
props->
fw_ver
= rblock->
hw_ver
;
89
props->
max_mr_size
= rblock->
max_mr_size
;
90
props->
vendor_id
= rblock->
vendor_id
>> 8;
91
props->
vendor_part_id
= rblock->
vendor_part_id
>> 16;
92
props->
hw_ver
= rblock->
hw_ver
;
93
props->
max_qp
= limit_uint(rblock->
max_qp
);
94
props->
max_qp_wr
= limit_uint(rblock->
max_wqes_wq
);
95
props->
max_sge
= limit_uint(rblock->
max_sge
);
96
props->
max_sge_rd
= limit_uint(rblock->
max_sge_rd
);
97
props->
max_cq
= limit_uint(rblock->
max_cq
);
98
props->
max_cqe
= limit_uint(rblock->
max_cqe
);
99
props->
max_mr
= limit_uint(rblock->
max_mr
);
100
props->
max_mw
= limit_uint(rblock->
max_mw
);
101
props->
max_pd
= limit_uint(rblock->
max_pd
);
102
props->
max_ah
= limit_uint(rblock->
max_ah
);
103
props->
max_ee
= limit_uint(rblock->
max_rd_ee_context
);
104
props->
max_rdd
= limit_uint(rblock->
max_rd_domain
);
105
props->
max_fmr
= limit_uint(rblock->
max_mr
);
106
props->
max_qp_rd_atom
= limit_uint(rblock->
max_rr_qp
);
107
props->
max_ee_rd_atom
= limit_uint(rblock->
max_rr_ee_context
);
108
props->
max_res_rd_atom
= limit_uint(rblock->
max_rr_hca
);
109
props->
max_qp_init_rd_atom
= limit_uint(rblock->
max_act_wqs_qp
);
110
props->
max_ee_init_rd_atom
= limit_uint(rblock->
max_act_wqs_ee_context
);
111
112
if
(
EHCA_BMASK_GET
(
HCA_CAP_SRQ
, shca->
hca_cap
)) {
113
props->
max_srq
= limit_uint(props->
max_qp
);
114
props->
max_srq_wr
= limit_uint(props->
max_qp_wr
);
115
props->
max_srq_sge
= 3;
116
}
117
118
props->
max_pkeys
= 16;
119
/* Some FW versions say 0 here; insert sensible value in that case */
120
props->
local_ca_ack_delay
= rblock->
local_ca_ack_delay
?
121
min_t
(
u8
, rblock->
local_ca_ack_delay
, 255) : 12;
122
props->
max_raw_ipv6_qp
= limit_uint(rblock->
max_raw_ipv6_qp
);
123
props->
max_raw_ethy_qp
= limit_uint(rblock->
max_raw_ethy_qp
);
124
props->
max_mcast_grp
= limit_uint(rblock->
max_mcast_grp
);
125
props->
max_mcast_qp_attach
= limit_uint(rblock->
max_mcast_qp_attach
);
126
props->
max_total_mcast_qp_attach
127
= limit_uint(rblock->
max_total_mcast_qp_attach
);
128
129
/* translate device capabilities */
130
props->
device_cap_flags
=
IB_DEVICE_SYS_IMAGE_GUID
|
131
IB_DEVICE_RC_RNR_NAK_GEN
|
IB_DEVICE_N_NOTIFY_CQ
;
132
for
(i = 0; i <
ARRAY_SIZE
(cap_mapping); i += 2)
133
if
(rblock->
hca_cap_indicators
& cap_mapping[i + 1])
134
props->
device_cap_flags
|= cap_mapping[
i
];
135
136
query_device1:
137
ehca_free_fw_ctrlblock
(rblock);
138
139
return
ret
;
140
}
141
142
static
enum
ib_mtu
map_mtu(
struct
ehca_shca
*shca,
u32
fw_mtu)
143
{
144
switch
(fw_mtu) {
145
case
0x1:
146
return
IB_MTU_256
;
147
case
0x2:
148
return
IB_MTU_512
;
149
case
0x3:
150
return
IB_MTU_1024
;
151
case
0x4:
152
return
IB_MTU_2048
;
153
case
0x5:
154
return
IB_MTU_4096
;
155
default
:
156
ehca_err
(&shca->
ib_device
,
"Unknown MTU size: %x."
,
157
fw_mtu);
158
return
0;
159
}
160
}
161
162
static
u8
map_number_of_vls(
struct
ehca_shca
*shca,
u32
vl_cap
)
163
{
164
switch
(vl_cap) {
165
case
0x1:
166
return
1;
167
case
0x2:
168
return
2;
169
case
0x3:
170
return
4;
171
case
0x4:
172
return
8;
173
case
0x5:
174
return
15;
175
default
:
176
ehca_err
(&shca->
ib_device
,
"invalid Vl Capability: %x."
,
177
vl_cap);
178
return
0;
179
}
180
}
181
182
int
ehca_query_port
(
struct
ib_device
*ibdev,
183
u8
port
,
struct
ib_port_attr
*props)
184
{
185
int
ret
= 0;
186
u64
h_ret;
187
struct
ehca_shca
*shca =
container_of
(ibdev,
struct
ehca_shca
,
188
ib_device
);
189
struct
hipz_query_port
*rblock;
190
191
rblock =
ehca_alloc_fw_ctrlblock
(
GFP_KERNEL
);
192
if
(!rblock) {
193
ehca_err
(&shca->
ib_device
,
"Can't allocate rblock memory."
);
194
return
-
ENOMEM
;
195
}
196
197
h_ret =
hipz_h_query_port
(shca->
ipz_hca_handle
, port, rblock);
198
if
(h_ret != H_SUCCESS) {
199
ehca_err
(&shca->
ib_device
,
"Can't query port properties"
);
200
ret = -
EINVAL
;
201
goto
query_port1;
202
}
203
204
memset
(props, 0,
sizeof
(
struct
ib_port_attr
));
205
206
props->
active_mtu
= props->
max_mtu
= map_mtu(shca, rblock->
max_mtu
);
207
props->
port_cap_flags
= rblock->
capability_mask
;
208
props->
gid_tbl_len
= rblock->
gid_tbl_len
;
209
if
(rblock->
max_msg_sz
)
210
props->
max_msg_sz
= rblock->
max_msg_sz
;
211
else
212
props->
max_msg_sz
= 0x1 << 31;
213
props->
bad_pkey_cntr
= rblock->
bad_pkey_cntr
;
214
props->
qkey_viol_cntr
= rblock->
qkey_viol_cntr
;
215
props->
pkey_tbl_len
= rblock->
pkey_tbl_len
;
216
props->
lid
= rblock->
lid
;
217
props->
sm_lid
= rblock->
sm_lid
;
218
props->
lmc
= rblock->
lmc
;
219
props->
sm_sl
= rblock->
sm_sl
;
220
props->
subnet_timeout
= rblock->
subnet_timeout
;
221
props->
init_type_reply
= rblock->
init_type_reply
;
222
props->
max_vl_num
= map_number_of_vls(shca, rblock->
vl_cap
);
223
224
if
(rblock->
state
&& rblock->
phys_width
) {
225
props->
phys_state
= rblock->
phys_pstate
;
226
props->
state
= rblock->
phys_state
;
227
props->
active_width
= rblock->
phys_width
;
228
props->
active_speed
= rblock->
phys_speed
;
229
}
else
{
230
/* old firmware releases don't report physical
231
* port info, so use default values
232
*/
233
props->
phys_state
= 5;
234
props->
state
= rblock->
state
;
235
props->
active_width
=
IB_WIDTH_12X
;
236
props->
active_speed
=
IB_SPEED_SDR
;
237
}
238
239
query_port1:
240
ehca_free_fw_ctrlblock
(rblock);
241
242
return
ret
;
243
}
244
245
int
ehca_query_sma_attr
(
struct
ehca_shca
*shca,
246
u8
port
,
struct
ehca_sma_attr
*
attr
)
247
{
248
int
ret
= 0;
249
u64
h_ret;
250
struct
hipz_query_port
*rblock;
251
252
rblock =
ehca_alloc_fw_ctrlblock
(
GFP_ATOMIC
);
253
if
(!rblock) {
254
ehca_err
(&shca->
ib_device
,
"Can't allocate rblock memory."
);
255
return
-
ENOMEM
;
256
}
257
258
h_ret =
hipz_h_query_port
(shca->
ipz_hca_handle
, port, rblock);
259
if
(h_ret != H_SUCCESS) {
260
ehca_err
(&shca->
ib_device
,
"Can't query port properties"
);
261
ret = -
EINVAL
;
262
goto
query_sma_attr1;
263
}
264
265
memset
(attr, 0,
sizeof
(
struct
ehca_sma_attr
));
266
267
attr->
lid
= rblock->
lid
;
268
attr->
lmc
= rblock->
lmc
;
269
attr->
sm_sl
= rblock->
sm_sl
;
270
attr->
sm_lid
= rblock->
sm_lid
;
271
272
attr->
pkey_tbl_len
= rblock->
pkey_tbl_len
;
273
memcpy
(attr->
pkeys
, rblock->
pkey_entries
,
sizeof
(attr->
pkeys
));
274
275
query_sma_attr1:
276
ehca_free_fw_ctrlblock
(rblock);
277
278
return
ret
;
279
}
280
281
int
ehca_query_pkey
(
struct
ib_device
*ibdev,
u8
port
,
u16
index
,
u16
*
pkey
)
282
{
283
int
ret
= 0;
284
u64
h_ret;
285
struct
ehca_shca
*shca;
286
struct
hipz_query_port
*rblock;
287
288
shca =
container_of
(ibdev,
struct
ehca_shca
,
ib_device
);
289
if
(index > 16) {
290
ehca_err
(&shca->
ib_device
,
"Invalid index: %x."
, index);
291
return
-
EINVAL
;
292
}
293
294
rblock =
ehca_alloc_fw_ctrlblock
(
GFP_KERNEL
);
295
if
(!rblock) {
296
ehca_err
(&shca->
ib_device
,
"Can't allocate rblock memory."
);
297
return
-
ENOMEM
;
298
}
299
300
h_ret =
hipz_h_query_port
(shca->
ipz_hca_handle
, port, rblock);
301
if
(h_ret != H_SUCCESS) {
302
ehca_err
(&shca->
ib_device
,
"Can't query port properties"
);
303
ret = -
EINVAL
;
304
goto
query_pkey1;
305
}
306
307
memcpy
(pkey, &rblock->
pkey_entries
+ index,
sizeof
(
u16
));
308
309
query_pkey1:
310
ehca_free_fw_ctrlblock
(rblock);
311
312
return
ret
;
313
}
314
315
int
ehca_query_gid
(
struct
ib_device
*ibdev,
u8
port
,
316
int
index
,
union
ib_gid
*
gid
)
317
{
318
int
ret
= 0;
319
u64
h_ret;
320
struct
ehca_shca
*shca =
container_of
(ibdev,
struct
ehca_shca
,
321
ib_device
);
322
struct
hipz_query_port
*rblock;
323
324
if
(index < 0 || index > 255) {
325
ehca_err
(&shca->
ib_device
,
"Invalid index: %x."
, index);
326
return
-
EINVAL
;
327
}
328
329
rblock =
ehca_alloc_fw_ctrlblock
(
GFP_KERNEL
);
330
if
(!rblock) {
331
ehca_err
(&shca->
ib_device
,
"Can't allocate rblock memory."
);
332
return
-
ENOMEM
;
333
}
334
335
h_ret =
hipz_h_query_port
(shca->
ipz_hca_handle
, port, rblock);
336
if
(h_ret != H_SUCCESS) {
337
ehca_err
(&shca->
ib_device
,
"Can't query port properties"
);
338
ret = -
EINVAL
;
339
goto
query_gid1;
340
}
341
342
memcpy
(&gid->
raw
[0], &rblock->
gid_prefix
,
sizeof
(
u64
));
343
memcpy
(&gid->
raw
[8], &rblock->
guid_entries
[index],
sizeof
(
u64
));
344
345
query_gid1:
346
ehca_free_fw_ctrlblock
(rblock);
347
348
return
ret
;
349
}
350
351
static
const
u32
allowed_port_caps = (
352
IB_PORT_SM
|
IB_PORT_LED_INFO_SUP
|
IB_PORT_CM_SUP
|
353
IB_PORT_SNMP_TUNNEL_SUP
|
IB_PORT_DEVICE_MGMT_SUP
|
354
IB_PORT_VENDOR_CLASS_SUP
);
355
356
int
ehca_modify_port
(
struct
ib_device
*ibdev,
357
u8
port
,
int
port_modify_mask,
358
struct
ib_port_modify
*props)
359
{
360
int
ret
= 0;
361
struct
ehca_shca
*shca;
362
struct
hipz_query_port
*rblock;
363
u32
cap
;
364
u64
hret;
365
366
shca =
container_of
(ibdev,
struct
ehca_shca
,
ib_device
);
367
if
((props->
set_port_cap_mask
| props->
clr_port_cap_mask
)
368
& ~allowed_port_caps) {
369
ehca_err
(&shca->
ib_device
,
"Non-changeable bits set in masks "
370
"set=%x clr=%x allowed=%x"
, props->
set_port_cap_mask
,
371
props->
clr_port_cap_mask
, allowed_port_caps);
372
return
-
EINVAL
;
373
}
374
375
if
(
mutex_lock_interruptible
(&shca->
modify_mutex
))
376
return
-
ERESTARTSYS
;
377
378
rblock =
ehca_alloc_fw_ctrlblock
(
GFP_KERNEL
);
379
if
(!rblock) {
380
ehca_err
(&shca->
ib_device
,
"Can't allocate rblock memory."
);
381
ret = -
ENOMEM
;
382
goto
modify_port1;
383
}
384
385
hret =
hipz_h_query_port
(shca->
ipz_hca_handle
, port, rblock);
386
if
(hret != H_SUCCESS) {
387
ehca_err
(&shca->
ib_device
,
"Can't query port properties"
);
388
ret = -
EINVAL
;
389
goto
modify_port2;
390
}
391
392
cap = (rblock->
capability_mask
| props->
set_port_cap_mask
)
393
& ~props->
clr_port_cap_mask
;
394
395
hret =
hipz_h_modify_port
(shca->
ipz_hca_handle
, port,
396
cap, props->
init_type
, port_modify_mask);
397
if
(hret != H_SUCCESS) {
398
ehca_err
(&shca->
ib_device
,
"Modify port failed h_ret=%lli"
,
399
hret);
400
ret = -
EINVAL
;
401
}
402
403
modify_port2:
404
ehca_free_fw_ctrlblock
(rblock);
405
406
modify_port1:
407
mutex_unlock
(&shca->
modify_mutex
);
408
409
return
ret
;
410
}
Generated on Thu Jan 10 2013 13:37:28 for Linux Kernel by
1.8.2