37 #include <linux/slab.h>
38 #include <linux/module.h>
55 MODULE_PARM_DESC(send_queue_size,
"Size of send queue in number of work requests");
57 MODULE_PARM_DESC(recv_queue_size,
"Size of receive queue in number of work requests");
62 static u32 ib_mad_client_id = 0;
106 ib_get_mad_port(
struct ib_device *device,
int port_num)
112 entry = __ib_get_mad_port(device, port_num);
113 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
125 static int get_spl_qp_index(
enum ib_qp_type qp_type)
151 static int is_vendor_oui(
char *
oui)
153 if (oui[0] || oui[1] || oui[2])
158 static int is_vendor_method_in_use(
166 if (!
memcmp(vendor_class->
oui[i], mad_reg_req->
oui, 3)) {
169 if (method_in_use(&method, mad_reg_req))
213 qpn = get_spl_qp_index(qp_type);
240 }
else if (is_vendor_class(mad_reg_req->
mgmt_class)) {
245 if (!is_vendor_oui(mad_reg_req->
oui))
274 port_priv = ib_get_mad_port(device, port_num);
282 if (!port_priv->
qp_info[qpn].qp) {
288 mad_agent_priv = kzalloc(
sizeof *mad_agent_priv,
GFP_KERNEL);
289 if (!mad_agent_priv) {
296 if (IS_ERR(mad_agent_priv->
agent.mr)) {
311 mad_agent_priv->
reg_req = reg_req;
312 mad_agent_priv->
agent.rmpp_version = rmpp_version;
313 mad_agent_priv->
agent.device = device;
314 mad_agent_priv->
agent.recv_handler = recv_handler;
315 mad_agent_priv->
agent.send_handler = send_handler;
320 INIT_LIST_HEAD(&mad_agent_priv->
send_list);
321 INIT_LIST_HEAD(&mad_agent_priv->
wait_list);
322 INIT_LIST_HEAD(&mad_agent_priv->
done_list);
323 INIT_LIST_HEAD(&mad_agent_priv->
rmpp_list);
328 init_completion(&mad_agent_priv->
comp);
331 mad_agent_priv->
agent.hi_tid = ++ib_mad_client_id;
338 mgmt_class = convert_mgmt_class(mad_reg_req->
mgmt_class);
339 if (!is_vendor_class(mgmt_class)) {
340 class = port_priv->version[mad_reg_req->
341 mgmt_class_version].class;
345 if (method_in_use(&method,
350 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
354 vendor = port_priv->
version[mad_reg_req->
355 mgmt_class_version].vendor;
357 vclass = vendor_class_index(mgmt_class);
360 if (is_vendor_method_in_use(
366 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
376 spin_unlock_irqrestore(&port_priv->
reg_lock, flags);
378 return &mad_agent_priv->
agent;
381 spin_unlock_irqrestore(&port_priv->
reg_lock, flags);
386 kfree(mad_agent_priv);
392 static inline int is_snooping_sends(
int mad_snoop_flags)
394 return (mad_snoop_flags &
401 static inline int is_snooping_recvs(
int mad_snoop_flags)
403 return (mad_snoop_flags &
424 sizeof mad_snoop_priv *
427 if (!new_snoop_table) {
438 spin_unlock_irqrestore(&qp_info->
snoop_lock, flags);
456 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
457 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
461 qpn = get_spl_qp_index(qp_type);
466 port_priv = ib_get_mad_port(device, port_num);
472 mad_snoop_priv = kzalloc(
sizeof *mad_snoop_priv,
GFP_KERNEL);
473 if (!mad_snoop_priv) {
480 mad_snoop_priv->
agent.device = device;
481 mad_snoop_priv->
agent.recv_handler = recv_handler;
482 mad_snoop_priv->
agent.snoop_handler = snoop_handler;
487 init_completion(&mad_snoop_priv->
comp);
488 mad_snoop_priv->
snoop_index = register_snoop_agent(
497 return &mad_snoop_priv->
agent;
500 kfree(mad_snoop_priv);
529 cancel_mads(mad_agent_priv);
530 port_priv = mad_agent_priv->
qp_info->port_priv;
534 remove_mad_reg_req(mad_agent_priv);
536 spin_unlock_irqrestore(&port_priv->
reg_lock, flags);
541 deref_mad_agent(mad_agent_priv);
546 kfree(mad_agent_priv);
554 qp_info = mad_snoop_priv->
qp_info;
558 spin_unlock_irqrestore(&qp_info->
snoop_lock, flags);
560 deref_snoop_agent(mad_snoop_priv);
563 kfree(mad_snoop_priv);
579 unregister_mad_agent(mad_agent_priv);
584 unregister_mad_snoop(mad_snoop_priv);
600 spin_unlock_irqrestore(&mad_queue->
lock, flags);
615 if (!mad_snoop_priv ||
620 spin_unlock_irqrestore(&qp_info->
snoop_lock, flags);
621 mad_snoop_priv->
agent.snoop_handler(&mad_snoop_priv->
agent,
622 send_buf, mad_send_wc);
623 deref_snoop_agent(mad_snoop_priv);
626 spin_unlock_irqrestore(&qp_info->
snoop_lock, flags);
640 if (!mad_snoop_priv ||
645 spin_unlock_irqrestore(&qp_info->
snoop_lock, flags);
646 mad_snoop_priv->
agent.recv_handler(&mad_snoop_priv->
agent,
648 deref_snoop_agent(mad_snoop_priv);
651 spin_unlock_irqrestore(&qp_info->
snoop_lock, flags);
654 static void build_smp_wc(
struct ib_qp *qp,
658 memset(wc, 0,
sizeof *wc);
694 port_num = send_wr->
wr.
ud.port_num;
696 port_num = mad_agent_priv->
agent.port_num;
734 build_smp_wc(mad_agent_priv->
agent.qp,
736 send_wr->
wr.
ud.pkey_index,
737 send_wr->
wr.
ud.port_num, &mad_wc);
747 mad_agent_priv->
agent.recv_handler) {
763 port_priv = ib_get_mad_port(mad_agent_priv->
agent.device,
764 mad_agent_priv->
agent.port_num);
767 recv_mad_agent = find_mad_agent(port_priv,
770 if (!port_priv || !recv_mad_agent) {
794 spin_unlock_irqrestore(&mad_agent_priv->
lock, flags);
806 seg_size =
sizeof(
struct ib_mad) - hdr_len;
807 if (data_len && seg_size) {
808 pad = seg_size - data_len % seg_size;
809 return pad == seg_size ? 0 :
pad;
837 for (left = send_buf->
data_len + pad; left > 0; left -= seg_size) {
838 seg =
kmalloc(
sizeof (*seg) + seg_size, gfp_mask);
841 "alloc failed for len %zd, gfp %#x\n",
842 sizeof (*seg) + seg_size, gfp_mask);
843 free_send_rmpp_list(send_wr);
868 int hdr_len,
int data_len,
878 pad = get_pad_size(hdr_len, data_len);
879 message_size = hdr_len + data_len +
pad;
882 (rmpp_active || message_size >
sizeof(
struct ib_mad))) ||
883 (!rmpp_active && message_size >
sizeof(
struct ib_mad)))
886 size = rmpp_active ? hdr_len :
sizeof(
struct ib_mad);
887 buf = kzalloc(
sizeof *mad_send_wr + size, gfp_mask);
891 mad_send_wr = buf +
size;
900 mad_send_wr->
sg_list[0].lkey = mad_agent->
mr->lkey;
901 mad_send_wr->
sg_list[1].length =
sizeof(
struct ib_mad) - hdr_len;
902 mad_send_wr->
sg_list[1].lkey = mad_agent->
mr->lkey;
904 mad_send_wr->
send_wr.wr_id = (
unsigned long) mad_send_wr;
906 mad_send_wr->
send_wr.num_sge = 2;
911 mad_send_wr->
send_wr.wr.ud.pkey_index = pkey_index;
914 ret = alloc_send_rmpp_list(mad_send_wr, gfp_mask);
921 mad_send_wr->
send_buf.mad_agent = mad_agent;
963 list = &mad_send_wr->
cur_seg->list;
965 if (mad_send_wr->
cur_seg->num < seg_num) {
967 if (mad_send_wr->
cur_seg->num == seg_num)
969 }
else if (mad_send_wr->
cur_seg->num > seg_num) {
971 if (mad_send_wr->
cur_seg->num == seg_num)
974 return mad_send_wr->
cur_seg->data;
980 if (mad_send_wr->
send_buf.seg_count)
998 free_send_rmpp_list(mad_send_wr);
1000 deref_mad_agent(mad_agent_priv);
1011 unsigned long flags;
1019 mad_agent = mad_send_wr->
send_buf.mad_agent;
1021 sge[0].
addr = ib_dma_map_single(mad_agent->
device,
1027 sge[1].
addr = ib_dma_map_single(mad_agent->
device,
1028 ib_get_payload(mad_send_wr),
1035 ret = ib_post_send(mad_agent->
qp, &mad_send_wr->
send_wr,
1047 spin_unlock_irqrestore(&qp_info->
send_queue.lock, flags);
1049 ib_dma_unmap_single(mad_agent->
device,
1052 ib_dma_unmap_single(mad_agent->
device,
1069 unsigned long flags;
1073 for (;
send_buf; send_buf = next_send_buf) {
1080 if (!send_buf->
mad_agent->send_handler ||
1088 if (mad_agent_priv->
agent.rmpp_version) {
1099 next_send_buf = send_buf->
next;
1100 mad_send_wr->
send_wr.wr.ud.ah = send_buf->
ah;
1104 ret = handle_outgoing_dr_smp(mad_agent_priv,
1127 spin_unlock_irqrestore(&mad_agent_priv->
lock, flags);
1129 if (mad_agent_priv->
agent.rmpp_version) {
1139 spin_unlock_irqrestore(&mad_agent_priv->
lock, flags);
1147 *bad_send_buf = send_buf;
1163 INIT_LIST_HEAD(&free_list);
1164 list_splice_init(&mad_recv_wc->
rmpp_list, &free_list);
1204 if ((*method)->agent[i]) {
1215 *method = kzalloc(
sizeof **method,
GFP_ATOMIC);
1218 "ib_mad_mgmt_method_table\n");
1233 if (method->
agent[i])
1246 if (
class->method_table[i])
1268 if (!
memcmp(vendor_class->
oui[i], oui, 3))
1292 if (method->
agent[i] == agent) {
1298 static int add_nonoui_reg_req(
struct ib_mad_reg_req *mad_reg_req,
1307 port_priv = agent_priv->
qp_info->port_priv;
1314 "ib_mad_mgmt_class_table\n");
1320 method = &(*class)->method_table[
mgmt_class];
1321 if ((ret = allocate_method_table(method)))
1324 method = &(*class)->method_table[
mgmt_class];
1327 if ((ret = allocate_method_table(method)))
1333 if (method_in_use(method, mad_reg_req))
1338 (*method)->agent[
i] = agent_priv;
1344 remove_methods_mad_agent(*method, agent_priv);
1346 if (!check_method_table(*method)) {
1372 vclass = vendor_class_index(mad_reg_req->
mgmt_class);
1373 port_priv = agent_priv->
qp_info->port_priv;
1374 vendor_table = &port_priv->
version[
1376 if (!*vendor_table) {
1378 vendor = kzalloc(
sizeof *vendor,
GFP_ATOMIC);
1381 "ib_mad_mgmt_vendor_class_table\n");
1387 if (!(*vendor_table)->vendor_class[vclass]) {
1389 vendor_class = kzalloc(
sizeof *vendor_class,
GFP_ATOMIC);
1390 if (!vendor_class) {
1392 "ib_mad_mgmt_vendor_class\n");
1396 (*vendor_table)->vendor_class[vclass] = vendor_class;
1400 if (!
memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1401 mad_reg_req->
oui, 3)) {
1402 method = &(*vendor_table)->vendor_class[
1403 vclass]->method_table[
i];
1410 if (!is_vendor_oui((*vendor_table)->vendor_class[
1412 method = &(*vendor_table)->vendor_class[
1413 vclass]->method_table[
i];
1416 if ((ret = allocate_method_table(method)))
1418 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1419 mad_reg_req->
oui, 3);
1428 if (method_in_use(method, mad_reg_req))
1433 (*method)->agent[
i] = agent_priv;
1439 remove_methods_mad_agent(*method, agent_priv);
1441 if (!check_method_table(*method)) {
1449 (*vendor_table)->vendor_class[vclass] =
NULL;
1450 kfree(vendor_class);
1454 *vendor_table =
NULL;
1479 port_priv = agent_priv->
qp_info->port_priv;
1480 mgmt_class = convert_mgmt_class(agent_priv->
reg_req->mgmt_class);
1482 agent_priv->reg_req->mgmt_class_version].
class;
1489 remove_methods_mad_agent(method, agent_priv);
1491 if (!check_method_table(method)) {
1496 if (!check_class_table(
class)) {
1501 mgmt_class_version].class =
NULL;
1507 if (!is_vendor_class(mgmt_class))
1511 mgmt_class = vendor_class_index(agent_priv->
reg_req->mgmt_class);
1513 agent_priv->
reg_req->mgmt_class_version].vendor;
1520 index = find_vendor_oui(vendor_class, agent_priv->
reg_req->oui);
1526 remove_methods_mad_agent(method, agent_priv);
1531 if (!check_method_table(method)) {
1537 if (!check_vendor_class(vendor_class)) {
1539 kfree(vendor_class);
1542 if (!check_vendor_table(vendor)) {
1546 mgmt_class_version].
1563 unsigned long flags;
1576 if (entry->
agent.hi_tid == hi_tid) {
1595 if (!is_vendor_class(mad->
mad_hdr.mgmt_class)) {
1600 if (convert_mgmt_class(mad->
mad_hdr.mgmt_class) >=
1601 IB_MGMT_MAX_METHODS)
1603 method =
class->method_table[convert_mgmt_class(
1610 mad->
mad_hdr.class_version].vendor;
1613 vendor_class = vendor->
vendor_class[vendor_class_index(
1619 index = find_vendor_oui(vendor_class, vendor_mad->
oui);
1631 if (mad_agent->
agent.recv_handler)
1641 spin_unlock_irqrestore(&port_priv->
reg_lock, flags);
1646 static int validate_mad(
struct ib_mad *mad,
u32 qp_num)
1653 "version %d\n", mad->
mad_hdr.base_version);
1678 return !mad_agent_priv->
agent.rmpp_version ||
1679 !(ib_get_rmpp_flags(&rmpp_mad->
rmpp_hdr) &
1687 return ((
struct ib_mad *)(wr->
send_buf.mad))->mad_hdr.mgmt_class ==
1688 rwc->
recv_buf.mad->mad_hdr.mgmt_class;
1696 u8 send_resp, rcv_resp;
1699 u8 port_num = mad_agent_priv->
agent.port_num;
1705 if (send_resp == rcv_resp)
1718 if (!send_resp && rcv_resp) {
1723 return (!lmc || !((
attr.src_path_bits ^
1724 rwc->
wc->dlid_path_bits) &
1728 attr.grh.sgid_index, &sgid))
1736 return attr.dlid == rwc->
wc->slid;
1742 static inline int is_direct(
u8 class)
1758 rcv_has_same_class(wr, wc) &&
1763 (is_direct(wc->
recv_buf.mad->mad_hdr.mgmt_class) ||
1764 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1773 if (is_data_mad(mad_agent_priv, wr->
send_buf.mad) &&
1776 rcv_has_same_class(wr, wc) &&
1781 (is_direct(wc->
recv_buf.mad->mad_hdr.mgmt_class) ||
1782 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1802 unsigned long flags;
1804 INIT_LIST_HEAD(&mad_recv_wc->
rmpp_list);
1806 if (mad_agent_priv->
agent.rmpp_version) {
1810 deref_mad_agent(mad_agent_priv);
1820 spin_unlock_irqrestore(&mad_agent_priv->
lock, flags);
1822 deref_mad_agent(mad_agent_priv);
1826 spin_unlock_irqrestore(&mad_agent_priv->
lock, flags);
1830 mad_agent_priv->
agent.recv_handler(&mad_agent_priv->
agent,
1839 mad_agent_priv->
agent.recv_handler(&mad_agent_priv->
agent,
1841 deref_mad_agent(mad_agent_priv);
1850 memcpy(response, recv,
sizeof *response);
1852 response->
header.recv_wc.recv_buf.mad = &response->
mad.mad;
1853 response->
header.recv_wc.recv_buf.grh = &response->
grh;
1855 response->
mad.mad.mad_hdr.status =
1878 dequeue_mad(mad_list);
1883 ib_dma_unmap_single(port_priv->
device,
1893 recv->
header.recv_wc.recv_buf.mad = &recv->
mad.mad;
1894 recv->
header.recv_wc.recv_buf.grh = &recv->
grh;
1900 if (!validate_mad(&recv->
mad.mad, qp_info->
qp->qp_num))
1906 "for response buffer\n");
1915 if (recv->
mad.mad.mad_hdr.mgmt_class ==
1920 port_priv->
device->node_type,
1922 port_priv->
device->phys_port_cnt) ==
1932 port_priv->
device->node_type,
1940 memcpy(response, recv,
sizeof(*response));
1942 response->
header.recv_wc.recv_buf.mad = &response->
mad.mad;
1943 response->
header.recv_wc.recv_buf.grh = &response->
grh;
1949 qp_info->
qp->qp_num);
1957 if (port_priv->
device->process_mad) {
1958 ret = port_priv->
device->process_mad(port_priv->
device, 0,
1962 &response->
mad.mad);
1971 qp_info->
qp->qp_num);
1977 mad_agent = find_mad_agent(port_priv, &recv->
mad.mad);
1979 ib_mad_complete_recv(mad_agent, &recv->
header.recv_wc);
1985 }
else if ((ret & IB_MAD_RESULT_SUCCESS) &&
1986 generate_unmatched_resp(recv, response)) {
1988 port_priv->
device, port_num, qp_info->
qp->qp_num);
1994 ib_mad_post_receive_mads(qp_info, response);
1998 ib_mad_post_receive_mads(qp_info, recv);
2004 unsigned long delay;
2006 if (list_empty(&mad_agent_priv->
wait_list)) {
2017 if ((
long)delay <= 0)
2030 unsigned long delay;
2050 list_add(&mad_send_wr->
agent_list, list_item);
2062 wait_for_response(mad_send_wr);
2072 unsigned long flags;
2077 if (mad_agent_priv->
agent.rmpp_version) {
2093 wait_for_response(mad_send_wr);
2100 adjust_timeout(mad_agent_priv);
2101 spin_unlock_irqrestore(&mad_agent_priv->
lock, flags);
2108 mad_agent_priv->
agent.send_handler(&mad_agent_priv->
agent,
2112 deref_mad_agent(mad_agent_priv);
2115 spin_unlock_irqrestore(&mad_agent_priv->
lock, flags);
2127 unsigned long flags;
2134 qp_info = send_queue->
qp_info;
2137 ib_dma_unmap_single(mad_send_wr->
send_buf.mad_agent->device,
2140 ib_dma_unmap_single(mad_send_wr->
send_buf.mad_agent->device,
2143 queued_send_wr =
NULL;
2154 list_move_tail(&mad_list->
list, &send_queue->
list);
2156 spin_unlock_irqrestore(&send_queue->
lock, flags);
2162 snoop_send(qp_info, &mad_send_wr->
send_buf, &mad_send_wc,
2166 if (queued_send_wr) {
2167 ret = ib_post_send(qp_info->
qp, &queued_send_wr->
send_wr,
2171 mad_send_wr = queued_send_wr;
2182 unsigned long flags;
2189 mad_send_wr->
retry = 1;
2191 spin_unlock_irqrestore(&qp_info->
send_queue.lock, flags);
2219 if (mad_send_wr->
retry) {
2223 mad_send_wr->
retry = 0;
2224 ret = ib_post_send(qp_info->
qp, &mad_send_wr->
send_wr,
2227 ib_mad_send_done_handler(port_priv, wc);
2229 ib_mad_send_done_handler(port_priv, wc);
2243 "ib_modify_qp to RTS : %d\n", ret);
2245 mark_sends_for_retry(qp_info);
2247 ib_mad_send_done_handler(port_priv, wc);
2262 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) {
2266 ib_mad_send_done_handler(port_priv, &wc);
2269 ib_mad_recv_done_handler(port_priv, &wc);
2276 mad_error_handler(port_priv, &wc);
2282 unsigned long flags;
2287 INIT_LIST_HEAD(&cancel_list);
2291 &mad_agent_priv->
send_list, agent_list) {
2299 list_splice_init(&mad_agent_priv->
wait_list, &cancel_list);
2300 spin_unlock_irqrestore(&mad_agent_priv->
lock, flags);
2307 &cancel_list, agent_list) {
2310 mad_agent_priv->
agent.send_handler(&mad_agent_priv->
agent,
2324 if (&mad_send_wr->
send_buf == send_buf)
2330 if (is_data_mad(mad_agent_priv, mad_send_wr->
send_buf.mad) &&
2331 &mad_send_wr->
send_buf == send_buf)
2342 unsigned long flags;
2348 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2350 spin_unlock_irqrestore(&mad_agent_priv->
lock, flags);
2366 spin_unlock_irqrestore(&mad_agent_priv->
lock, flags);
2378 static void local_completions(
struct work_struct *work)
2383 unsigned long flags;
2392 while (!list_empty(&mad_agent_priv->local_list)) {
2393 local =
list_entry(mad_agent_priv->local_list.next,
2397 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2401 if (!recv_mad_agent) {
2404 goto local_send_completion;
2411 build_smp_wc(recv_mad_agent->
agent.qp,
2414 0, recv_mad_agent->
agent.port_num, &wc);
2417 local->
mad_priv->header.recv_wc.mad_len =
2419 INIT_LIST_HEAD(&local->
mad_priv->header.recv_wc.rmpp_list);
2420 list_add(&local->
mad_priv->header.recv_wc.recv_buf.list,
2421 &local->
mad_priv->header.recv_wc.rmpp_list);
2423 local->
mad_priv->header.recv_wc.recv_buf.mad =
2426 snoop_recv(recv_mad_agent->
qp_info,
2429 recv_mad_agent->
agent.recv_handler(
2430 &recv_mad_agent->
agent,
2434 spin_unlock_irqrestore(&recv_mad_agent->
lock, flags);
2437 local_send_completion:
2442 if (
atomic_read(&mad_agent_priv->qp_info->snoop_count))
2443 snoop_send(mad_agent_priv->qp_info,
2446 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2455 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2494 static void timeout_sends(
struct work_struct *work)
2506 while (!list_empty(&mad_agent_priv->
wait_list)) {
2513 if ((
long)delay <= 0)
2523 !retry_send(mad_send_wr))
2526 spin_unlock_irqrestore(&mad_agent_priv->
lock, flags);
2533 mad_agent_priv->
agent.send_handler(&mad_agent_priv->
agent,
2539 spin_unlock_irqrestore(&mad_agent_priv->
lock, flags);
2542 static void ib_mad_thread_completion_handler(
struct ib_cq *cq,
void *
arg)
2545 unsigned long flags;
2550 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2556 static int ib_mad_post_receive_mads(
struct ib_mad_qp_info *qp_info,
2559 unsigned long flags;
2567 sg_list.length =
sizeof *mad_priv -
sizeof mad_priv->
header;
2571 recv_wr.next =
NULL;
2573 recv_wr.num_sge = 1;
2594 recv_wr.wr_id = (
unsigned long)&mad_priv->
header.mad_list;
2595 mad_priv->
header.mad_list.mad_queue = recv_queue;
2601 spin_unlock_irqrestore(&recv_queue->
lock, flags);
2602 ret = ib_post_recv(qp_info->
qp, &recv_wr, &bad_recv_wr);
2606 recv_queue->
count--;
2607 spin_unlock_irqrestore(&recv_queue->
lock, flags);
2608 ib_dma_unmap_single(qp_info->
port_priv->device,
2609 mad_priv->
header.mapping,
2634 while (!list_empty(&qp_info->
recv_queue.list)) {
2647 ib_dma_unmap_single(qp_info->
port_priv->device,
2689 "INIT: %d\n", i, ret);
2697 "RTR: %d\n", i, ret);
2706 "RTS: %d\n", i, ret);
2714 "notification: %d\n", ret);
2719 if (!port_priv->
qp_info[i].qp)
2722 ret = ib_mad_post_receive_mads(&port_priv->
qp_info[i],
NULL);
2739 event->
event, qp_info->
qp->qp_num);
2746 mad_queue->
count = 0;
2748 INIT_LIST_HEAD(&mad_queue->
list);
2755 init_mad_queue(qp_info, &qp_info->
send_queue);
2756 init_mad_queue(qp_info, &qp_info->
recv_queue);
2770 memset(&qp_init_attr, 0,
sizeof qp_init_attr);
2771 qp_init_attr.send_cq = qp_info->
port_priv->cq;
2772 qp_init_attr.recv_cq = qp_info->
port_priv->cq;
2774 qp_init_attr.cap.max_send_wr = mad_sendq_size;
2775 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
2778 qp_init_attr.qp_type =
qp_type;
2779 qp_init_attr.port_num = qp_info->
port_priv->port_num;
2780 qp_init_attr.qp_context = qp_info;
2781 qp_init_attr.event_handler = qp_event_handler;
2783 if (IS_ERR(qp_info->
qp)) {
2785 get_spl_qp_index(qp_type));
2786 ret = PTR_ERR(qp_info->
qp);
2790 qp_info->
send_queue.max_active = mad_sendq_size;
2791 qp_info->
recv_queue.max_active = mad_recvq_size;
2811 static int ib_mad_port_open(
struct ib_device *device,
2816 unsigned long flags;
2817 char name[
sizeof "ib_mad123"];
2821 port_priv = kzalloc(
sizeof *port_priv,
GFP_KERNEL);
2831 init_mad_qp(port_priv, &port_priv->
qp_info[0]);
2832 init_mad_qp(port_priv, &port_priv->
qp_info[1]);
2834 cq_size = mad_sendq_size + mad_recvq_size;
2840 ib_mad_thread_completion_handler,
2841 NULL, port_priv, cq_size, 0);
2842 if (IS_ERR(port_priv->
cq)) {
2844 ret = PTR_ERR(port_priv->
cq);
2849 if (IS_ERR(port_priv->
pd)) {
2851 ret = PTR_ERR(port_priv->
pd);
2856 if (IS_ERR(port_priv->
mr)) {
2858 ret = PTR_ERR(port_priv->
mr);
2871 snprintf(name,
sizeof name,
"ib_mad%d", port_num);
2873 if (!port_priv->
wq) {
2881 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2883 ret = ib_mad_port_start(port_priv);
2894 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2898 destroy_mad_qp(&port_priv->
qp_info[1]);
2900 destroy_mad_qp(&port_priv->
qp_info[0]);
2907 cleanup_recv_queue(&port_priv->
qp_info[1]);
2908 cleanup_recv_queue(&port_priv->
qp_info[0]);
2920 static int ib_mad_port_close(
struct ib_device *device,
int port_num)
2923 unsigned long flags;
2926 port_priv = __ib_get_mad_port(device, port_num);
2927 if (port_priv ==
NULL) {
2928 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2933 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
2936 destroy_mad_qp(&port_priv->
qp_info[1]);
2937 destroy_mad_qp(&port_priv->
qp_info[0]);
2941 cleanup_recv_queue(&port_priv->
qp_info[1]);
2942 cleanup_recv_queue(&port_priv->
qp_info[0]);
2950 static void ib_mad_init_device(
struct ib_device *device)
2965 for (i = start; i <=
end; i++) {
2966 if (ib_mad_port_open(device, i)) {
2981 if (ib_mad_port_close(device, i))
2988 while (i >= start) {
2993 if (ib_mad_port_close(device, i))
3000 static void ib_mad_remove_device(
struct ib_device *device)
3014 for (i = 0; i < num_ports; i++, cur_port++) {
3018 device->
name, cur_port);
3019 if (ib_mad_port_close(device, cur_port))
3021 device->
name, cur_port);
3027 .add = ib_mad_init_device,
3028 .remove = ib_mad_remove_device
3031 static int __init ib_mad_init_module(
void)
3046 if (!ib_mad_cache) {
3052 INIT_LIST_HEAD(&ib_mad_port_list);
3068 static void __exit ib_mad_cleanup_module(
void)