38 #include <linux/device.h>
39 #include <linux/module.h>
43 #include <linux/random.h>
44 #include <linux/rbtree.h>
46 #include <linux/slab.h>
49 #include <linux/kdev_t.h>
65 .remove = cm_remove_one
72 struct rb_root listen_service_table;
73 u64 listen_service_id;
77 struct rb_root remote_sidr_table;
78 struct idr local_id_table;
110 [
sizeof(
"cm_rx_duplicates")] = {
111 "cm_tx_msgs",
"cm_tx_retries",
112 "cm_rx_msgs",
"cm_rx_duplicates"
125 #define CM_COUNTER_ATTR(_name, _index) \
126 struct cm_counter_attribute cm_##_name##_counter_attr = { \
127 .attr = { .name = __stringify(_name), .mode = 0444 }, \
143 static struct attribute *cm_counter_default_attrs[] = {
144 &cm_req_counter_attr.attr,
145 &cm_mra_counter_attr.attr,
146 &cm_rej_counter_attr.attr,
147 &cm_rep_counter_attr.attr,
148 &cm_rtu_counter_attr.attr,
149 &cm_dreq_counter_attr.attr,
150 &cm_drep_counter_attr.attr,
151 &cm_sidr_req_counter_attr.attr,
152 &cm_sidr_rep_counter_attr.attr,
153 &cm_lap_counter_attr.attr,
154 &cm_apr_counter_attr.attr,
246 static inline void cm_deref_id(
struct cm_id_private *cm_id_priv)
259 mad_agent = cm_id_priv->
av.port->mad_agent;
265 cm_id_priv->
av.pkey_index,
283 static int cm_alloc_response_msg(
struct cm_port *
port,
315 static void * cm_copy_private_data(
const void *
private_data,
320 if (!private_data || !private_data_len)
330 static void cm_set_private_data(
struct cm_id_private *cm_id_priv,
331 void *private_data,
u8 private_data_len)
340 static void cm_init_av_for_response(
struct cm_port *port,
struct ib_wc *
wc,
361 port = cm_dev->
port[p-1];
394 spin_unlock_irqrestore(&cm.lock, flags);
401 static void cm_free_id(
__be32 local_id)
403 spin_lock_irq(&cm.lock);
405 (
__force int) (local_id ^ cm.random_id_operand));
406 spin_unlock_irq(&cm.lock);
413 cm_id_priv =
idr_find(&cm.local_id_table,
414 (
__force int) (local_id ^ cm.random_id_operand));
416 if (cm_id_priv->
id.remote_id == remote_id)
429 spin_lock_irq(&cm.lock);
430 cm_id_priv = cm_get_id(local_id, remote_id);
431 spin_unlock_irq(&cm.lock);
441 ((
unsigned long *)
dst)[i] = ((
unsigned long *)
src)[i] &
442 ((
unsigned long *)
mask)[i];
451 if (!src_data || !dst_data)
454 cm_mask_copy(src, src_data->
data, dst_data->
mask);
455 cm_mask_copy(dst, dst_data->
data, src_data->
mask);
459 static int cm_compare_private_data(
u8 *private_data,
467 cm_mask_copy(src, private_data, dst_data->
mask);
498 struct rb_node **
link = &cm.listen_service_table.rb_node;
502 __be64 service_mask = cm_id_priv->
id.service_mask;
511 if ((cur_cm_id_priv->
id.service_mask & service_id) ==
512 (service_mask & cur_cm_id_priv->
id.service_id) &&
513 (cm_id_priv->
id.device == cur_cm_id_priv->
id.device) &&
515 return cur_cm_id_priv;
517 if (cm_id_priv->
id.device < cur_cm_id_priv->
id.device)
519 else if (cm_id_priv->
id.device > cur_cm_id_priv->
id.device)
521 else if (be64_lt(service_id, cur_cm_id_priv->
id.service_id))
523 else if (be64_gt(service_id, cur_cm_id_priv->
id.service_id))
525 else if (data_cmp < 0)
539 struct rb_node *
node = cm.listen_service_table.rb_node;
545 data_cmp = cm_compare_private_data(private_data,
547 if ((cm_id_priv->
id.service_mask & service_id) ==
548 cm_id_priv->
id.service_id &&
549 (cm_id_priv->
id.device == device) && !data_cmp)
554 else if (device > cm_id_priv->
id.device)
556 else if (be64_lt(service_id, cm_id_priv->
id.service_id))
558 else if (be64_gt(service_id, cm_id_priv->
id.service_id))
560 else if (data_cmp < 0)
571 struct rb_node **link = &cm.remote_id_table.rb_node;
575 __be32 remote_id = timewait_info->
work.remote_id;
581 if (be32_lt(remote_id, cur_timewait_info->
work.remote_id))
583 else if (be32_gt(remote_id, cur_timewait_info->
work.remote_id))
585 else if (be64_lt(remote_ca_guid, cur_timewait_info->
remote_ca_guid))
587 else if (be64_gt(remote_ca_guid, cur_timewait_info->
remote_ca_guid))
590 return cur_timewait_info;
601 struct rb_node *node = cm.remote_id_table.rb_node;
607 if (be32_lt(remote_id, timewait_info->
work.remote_id))
609 else if (be32_gt(remote_id, timewait_info->
work.remote_id))
616 return timewait_info;
624 struct rb_node **link = &cm.remote_qp_table.rb_node;
634 if (be32_lt(remote_qpn, cur_timewait_info->
remote_qpn))
636 else if (be32_gt(remote_qpn, cur_timewait_info->
remote_qpn))
638 else if (be64_lt(remote_ca_guid, cur_timewait_info->
remote_ca_guid))
640 else if (be64_gt(remote_ca_guid, cur_timewait_info->
remote_ca_guid))
643 return cur_timewait_info;
654 struct rb_node **link = &cm.remote_sidr_table.rb_node;
657 union ib_gid *port_gid = &cm_id_priv->
av.dgid;
658 __be32 remote_id = cm_id_priv->
id.remote_id;
664 if (be32_lt(remote_id, cur_cm_id_priv->
id.remote_id))
666 else if (be32_gt(remote_id, cur_cm_id_priv->
id.remote_id))
670 cmp =
memcmp(port_gid, &cur_cm_id_priv->
av.dgid,
677 return cur_cm_id_priv;
685 static void cm_reject_sidr_req(
struct cm_id_private *cm_id_priv,
702 cm_id_priv = kzalloc(
sizeof *cm_id_priv,
GFP_KERNEL);
707 cm_id_priv->
id.device = device;
708 cm_id_priv->
id.cm_handler = cm_handler;
710 cm_id_priv->
id.remote_cm_qpn = 1;
711 ret = cm_alloc_id(cm_id_priv);
716 init_completion(&cm_id_priv->
comp);
720 return &cm_id_priv->
id;
740 static void cm_free_work(
struct cm_work *work)
747 static inline int cm_convert_to_ms(
int iba_time)
750 return 1 <<
max(iba_time - 8, 0);
759 static u8 cm_ack_timeout(
u8 ca_ack_delay,
u8 packet_life_time)
761 int ack_timeout = packet_life_time + 1;
763 if (ack_timeout >= ca_ack_delay)
764 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
766 ack_timeout = ca_ack_delay +
767 (ack_timeout >= (ca_ack_delay - 1));
769 return min(31, ack_timeout);
789 timewait_info = kzalloc(
sizeof *timewait_info,
GFP_KERNEL);
793 timewait_info->
work.local_id = local_id;
796 return timewait_info;
799 static void cm_enter_timewait(
struct cm_id_private *cm_id_priv)
807 spin_unlock_irqrestore(&cm.lock, flags);
815 wait_time = cm_convert_to_ms(cm_id_priv->
av.timeout);
821 static void cm_reset_to_idle(
struct cm_id_private *cm_id_priv)
829 spin_unlock_irqrestore(&cm.lock, flags);
842 spin_lock_irq(&cm_id_priv->
lock);
843 switch (cm_id->
state) {
846 spin_unlock_irq(&cm_id_priv->
lock);
847 spin_lock_irq(&cm.lock);
849 spin_unlock_irq(&cm.lock);
854 spin_unlock_irq(&cm_id_priv->
lock);
857 spin_unlock_irq(&cm_id_priv->
lock);
862 spin_unlock_irq(&cm_id_priv->
lock);
864 &cm_id_priv->
id.device->node_guid,
865 sizeof cm_id_priv->
id.device->node_guid,
871 cm_reset_to_idle(cm_id_priv);
872 spin_unlock_irq(&cm_id_priv->
lock);
874 spin_unlock_irq(&cm_id_priv->
lock);
887 spin_unlock_irq(&cm_id_priv->
lock);
892 spin_unlock_irq(&cm_id_priv->
lock);
899 cm_enter_timewait(cm_id_priv);
900 spin_unlock_irq(&cm_id_priv->
lock);
903 spin_unlock_irq(&cm_id_priv->
lock);
907 spin_unlock_irq(&cm_id_priv->
lock);
912 cm_deref_id(cm_id_priv);
914 while ((work = cm_dequeue_work(cm_id_priv)) !=
NULL)
923 cm_destroy_id(cm_id, 0);
934 service_mask = service_mask ? service_mask : ~
cpu_to_be64(0);
935 service_id &= service_mask;
945 cm_id_priv->
compare_data = kzalloc(
sizeof *compare_data,
950 compare_data->
data, compare_data->
mask);
965 cur_cm_id_priv = cm_insert_listen(cm_id_priv);
966 spin_unlock_irqrestore(&cm.lock, flags);
968 if (cur_cm_id_priv) {
983 hi_tid = ((
u64) cm_id_priv->
av.port->mad_agent->hi_tid) << 32;
1015 cm_req_set_remote_resp_timeout(req_msg,
1017 cm_req_set_qp_type(req_msg, param->
qp_type);
1020 cm_req_set_local_resp_timeout(req_msg,
1023 cm_req_set_path_mtu(req_msg, param->
primary_path->mtu);
1028 cm_req_set_retry_count(req_msg, param->
retry_count);
1030 cm_req_set_srq(req_msg, param->
srq);
1043 cm_req_set_primary_flow_label(req_msg, pri_path->
flow_label);
1044 cm_req_set_primary_packet_rate(req_msg, pri_path->
rate);
1047 cm_req_set_primary_sl(req_msg, pri_path->
sl);
1048 cm_req_set_primary_subnet_local(req_msg, (pri_path->
hop_limit <= 1));
1049 cm_req_set_primary_local_ack_timeout(req_msg,
1050 cm_ack_timeout(cm_id_priv->
av.port->cm_dev->ack_delay,
1063 cm_req_set_alt_flow_label(req_msg,
1065 cm_req_set_alt_packet_rate(req_msg, alt_path->
rate);
1068 cm_req_set_alt_sl(req_msg, alt_path->
sl);
1069 cm_req_set_alt_subnet_local(req_msg, (alt_path->
hop_limit <= 1));
1070 cm_req_set_alt_local_ack_timeout(req_msg,
1071 cm_ack_timeout(cm_id_priv->
av.port->cm_dev->ack_delay,
1110 unsigned long flags;
1113 ret = cm_validate_req_param(param);
1121 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
1125 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
1127 cm_id_priv->
timewait_info = cm_create_timewait_info(cm_id_priv->
1157 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->
msg);
1162 cm_format_req(req_msg, cm_id_priv, param);
1163 cm_id_priv->
tid = req_msg->
hdr.tid;
1167 cm_id_priv->
local_qpn = cm_req_get_local_qpn(req_msg);
1168 cm_id_priv->
rq_psn = cm_req_get_starting_psn(req_msg);
1173 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
1178 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
1181 error2: cm_free_msg(cm_id_priv->
msg);
1187 static int cm_issue_rej(
struct cm_port *port,
1191 void *
ari,
u8 ari_length)
1197 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1208 cm_rej_set_msg_rejected(rej_msg, msg_rejected);
1211 if (ari && ari_length) {
1212 cm_rej_set_reject_info_len(rej_msg, ari_length);
1227 ((local_ca_guid == remote_ca_guid) &&
1231 static void cm_format_paths_from_req(
struct cm_req_msg *req_msg,
1235 memset(primary_path, 0,
sizeof *primary_path);
1240 primary_path->
flow_label = cm_req_get_primary_flow_label(req_msg);
1244 primary_path->
pkey = req_msg->
pkey;
1245 primary_path->
sl = cm_req_get_primary_sl(req_msg);
1247 primary_path->
mtu = cm_req_get_path_mtu(req_msg);
1249 primary_path->
rate = cm_req_get_primary_packet_rate(req_msg);
1252 cm_req_get_primary_local_ack_timeout(req_msg);
1256 memset(alt_path, 0,
sizeof *alt_path);
1261 alt_path->
flow_label = cm_req_get_alt_flow_label(req_msg);
1266 alt_path->
sl = cm_req_get_alt_sl(req_msg);
1268 alt_path->
mtu = cm_req_get_path_mtu(req_msg);
1270 alt_path->
rate = cm_req_get_alt_packet_rate(req_msg);
1273 cm_req_get_alt_local_ack_timeout(req_msg);
1278 static void cm_format_req_event(
struct cm_work *work,
1286 param = &work->
cm_event.param.req_rcvd;
1288 param->
port = cm_id_priv->
av.port->port_num;
1297 param->
qp_type = cm_req_get_qp_type(req_msg);
1302 cm_req_get_remote_resp_timeout(req_msg);
1305 cm_req_get_local_resp_timeout(req_msg);
1306 param->
retry_count = cm_req_get_retry_count(req_msg);
1308 param->
srq = cm_req_get_srq(req_msg);
1312 static void cm_process_work(
struct cm_id_private *cm_id_priv,
1318 ret = cm_id_priv->
id.cm_handler(&cm_id_priv->
id, &work->
cm_event);
1322 spin_lock_irq(&cm_id_priv->
lock);
1323 work = cm_dequeue_work(cm_id_priv);
1324 spin_unlock_irq(&cm_id_priv->
lock);
1326 ret = cm_id_priv->
id.cm_handler(&cm_id_priv->
id,
1330 cm_deref_id(cm_id_priv);
1332 cm_destroy_id(&cm_id_priv->
id, ret);
1335 static void cm_format_mra(
struct cm_mra_msg *mra_msg,
1338 const void *private_data,
u8 private_data_len)
1341 cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1344 cm_mra_set_service_timeout(mra_msg, service_timeout);
1346 if (private_data && private_data_len)
1350 static void cm_format_rej(
struct cm_rej_msg *rej_msg,
1355 const void *private_data,
1356 u8 private_data_len)
1361 switch(cm_id_priv->
id.state) {
1382 if (ari && ari_length) {
1383 cm_rej_set_reject_info_len(rej_msg, ari_length);
1387 if (private_data && private_data_len)
1391 static void cm_dup_req_handler(
struct cm_work *work,
1408 spin_lock_irq(&cm_id_priv->
lock);
1409 switch (cm_id_priv->
id.state) {
1423 spin_unlock_irq(&cm_id_priv->
lock);
1430 unlock: spin_unlock_irq(&cm_id_priv->
lock);
1431 free: cm_free_msg(msg);
1444 spin_lock_irq(&cm.lock);
1445 timewait_info = cm_insert_remote_id(cm_id_priv->
timewait_info);
1446 if (timewait_info) {
1447 cur_cm_id_priv = cm_get_id(timewait_info->
work.local_id,
1448 timewait_info->
work.remote_id);
1449 spin_unlock_irq(&cm.lock);
1450 if (cur_cm_id_priv) {
1451 cm_dup_req_handler(work, cur_cm_id_priv);
1452 cm_deref_id(cur_cm_id_priv);
1458 timewait_info = cm_insert_remote_qpn(cm_id_priv->
timewait_info);
1459 if (timewait_info) {
1461 spin_unlock_irq(&cm.lock);
1469 listen_cm_id_priv = cm_find_listen(cm_id_priv->
id.device,
1472 if (!listen_cm_id_priv) {
1474 spin_unlock_irq(&cm.lock);
1484 spin_unlock_irq(&cm.lock);
1486 return listen_cm_id_priv;
1496 if (!cm_req_get_primary_subnet_local(req_msg)) {
1499 cm_req_set_primary_sl(req_msg, wc->
sl);
1506 if (!cm_req_get_alt_subnet_local(req_msg)) {
1509 cm_req_set_alt_sl(req_msg, wc->
sl);
1517 static int cm_req_handler(
struct cm_work *work)
1528 return PTR_ERR(cm_id);
1535 cm_id_priv->
timewait_info = cm_create_timewait_info(cm_id_priv->
1543 cm_id_priv->
timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1545 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1546 if (!listen_cm_id_priv) {
1552 cm_id_priv->
id.cm_handler = listen_cm_id_priv->
id.cm_handler;
1553 cm_id_priv->
id.context = listen_cm_id_priv->
id.context;
1557 cm_process_routed_req(req_msg, work->
mad_recv_wc->wc);
1558 cm_format_paths_from_req(req_msg, &work->
path[0], &work->
path[1]);
1559 ret = cm_init_av_by_path(&work->
path[0], &cm_id_priv->
av);
1562 work->
port->port_num, 0, &work->
path[0].sgid);
1564 &work->
path[0].sgid,
sizeof work->
path[0].sgid,
1569 ret = cm_init_av_by_path(&work->
path[1], &cm_id_priv->
alt_av);
1572 &work->
path[0].sgid,
1573 sizeof work->
path[0].sgid,
NULL, 0);
1577 cm_id_priv->
tid = req_msg->
hdr.tid;
1579 cm_req_get_local_resp_timeout(req_msg));
1581 cm_id_priv->
remote_qpn = cm_req_get_local_qpn(req_msg);
1584 cm_id_priv->
path_mtu = cm_req_get_path_mtu(req_msg);
1586 cm_id_priv->
sq_psn = cm_req_get_starting_psn(req_msg);
1587 cm_id_priv->
retry_count = cm_req_get_retry_count(req_msg);
1589 cm_id_priv->
qp_type = cm_req_get_qp_type(req_msg);
1591 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->
id);
1592 cm_process_work(cm_id_priv, work);
1593 cm_deref_id(listen_cm_id_priv);
1598 cm_deref_id(listen_cm_id_priv);
1604 static void cm_format_rep(
struct cm_rep_msg *rep_msg,
1613 cm_rep_set_target_ack_delay(rep_msg,
1614 cm_id_priv->
av.port->cm_dev->ack_delay);
1622 cm_rep_set_srq(rep_msg, param->
srq);
1625 cm_rep_set_srq(rep_msg, 1);
1640 unsigned long flags;
1655 ret = cm_alloc_msg(cm_id_priv, &msg);
1660 cm_format_rep(rep_msg, cm_id_priv, param);
1666 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
1675 cm_id_priv->
rq_psn = cm_rep_get_starting_psn(rep_msg);
1678 out: spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
1683 static void cm_format_rtu(
struct cm_rtu_msg *rtu_msg,
1685 const void *private_data,
1686 u8 private_data_len)
1692 if (private_data && private_data_len)
1697 const void *private_data,
1698 u8 private_data_len)
1702 unsigned long flags;
1709 data = cm_copy_private_data(private_data, private_data_len);
1711 return PTR_ERR(data);
1721 ret = cm_alloc_msg(cm_id_priv, &msg);
1726 private_data, private_data_len);
1730 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
1737 cm_set_private_data(cm_id_priv, data, private_data_len);
1738 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
1741 error: spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
1753 param = &work->
cm_event.param.rep_rcvd;
1764 param->
srq = cm_rep_get_srq(rep_msg);
1768 static void cm_dup_rep_handler(
struct cm_work *work)
1787 spin_lock_irq(&cm_id_priv->
lock);
1799 spin_unlock_irq(&cm_id_priv->
lock);
1806 unlock: spin_unlock_irq(&cm_id_priv->
lock);
1807 free: cm_free_msg(msg);
1808 deref: cm_deref_id(cm_id_priv);
1811 static int cm_rep_handler(
struct cm_work *work)
1820 cm_dup_rep_handler(work);
1824 cm_format_rep_event(work, cm_id_priv->
qp_type);
1826 spin_lock_irq(&cm_id_priv->
lock);
1827 switch (cm_id_priv->
id.state) {
1832 spin_unlock_irq(&cm_id_priv->
lock);
1841 spin_lock(&cm.lock);
1844 spin_unlock(&cm.lock);
1845 spin_unlock_irq(&cm_id_priv->
lock);
1852 &cm.remote_id_table);
1854 spin_unlock(&cm.lock);
1855 spin_unlock_irq(&cm_id_priv->
lock);
1862 spin_unlock(&cm.lock);
1869 cm_id_priv->
sq_psn = cm_rep_get_starting_psn(rep_msg);
1872 cm_id_priv->
av.timeout =
1874 cm_id_priv->
av.timeout - 1);
1875 cm_id_priv->
alt_av.timeout =
1877 cm_id_priv->
alt_av.timeout - 1);
1885 spin_unlock_irq(&cm_id_priv->
lock);
1888 cm_process_work(cm_id_priv, work);
1890 cm_deref_id(cm_id_priv);
1894 cm_deref_id(cm_id_priv);
1898 static int cm_establish_handler(
struct cm_work *work)
1908 spin_lock_irq(&cm_id_priv->
lock);
1910 spin_unlock_irq(&cm_id_priv->
lock);
1918 spin_unlock_irq(&cm_id_priv->
lock);
1921 cm_process_work(cm_id_priv, work);
1923 cm_deref_id(cm_id_priv);
1926 cm_deref_id(cm_id_priv);
1930 static int cm_rtu_handler(
struct cm_work *work)
1944 spin_lock_irq(&cm_id_priv->
lock);
1947 spin_unlock_irq(&cm_id_priv->
lock);
1958 spin_unlock_irq(&cm_id_priv->
lock);
1961 cm_process_work(cm_id_priv, work);
1963 cm_deref_id(cm_id_priv);
1966 cm_deref_id(cm_id_priv);
1970 static void cm_format_dreq(
struct cm_dreq_msg *dreq_msg,
1972 const void *private_data,
1973 u8 private_data_len)
1979 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->
remote_qpn);
1981 if (private_data && private_data_len)
1986 const void *private_data,
1987 u8 private_data_len)
1991 unsigned long flags;
2008 ret = cm_alloc_msg(cm_id_priv, &msg);
2010 cm_enter_timewait(cm_id_priv);
2015 private_data, private_data_len);
2021 cm_enter_timewait(cm_id_priv);
2022 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
2029 out: spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
2034 static void cm_format_drep(
struct cm_drep_msg *drep_msg,
2036 const void *private_data,
2037 u8 private_data_len)
2043 if (private_data && private_data_len)
2048 const void *private_data,
2049 u8 private_data_len)
2053 unsigned long flags;
2060 data = cm_copy_private_data(private_data, private_data_len);
2062 return PTR_ERR(data);
2067 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
2072 cm_set_private_data(cm_id_priv, data, private_data_len);
2073 cm_enter_timewait(cm_id_priv);
2075 ret = cm_alloc_msg(cm_id_priv, &msg);
2080 private_data, private_data_len);
2084 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
2089 out: spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
2094 static int cm_issue_drep(
struct cm_port *port,
2102 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2120 static int cm_dreq_handler(
struct cm_work *work)
2139 spin_lock_irq(&cm_id_priv->
lock);
2140 if (cm_id_priv->
local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
2143 switch (cm_id_priv->
id.state) {
2164 spin_unlock_irq(&cm_id_priv->
lock);
2177 cm_id_priv->
tid = dreq_msg->
hdr.tid;
2181 spin_unlock_irq(&cm_id_priv->
lock);
2184 cm_process_work(cm_id_priv, work);
2186 cm_deref_id(cm_id_priv);
2189 unlock: spin_unlock_irq(&cm_id_priv->
lock);
2190 deref: cm_deref_id(cm_id_priv);
2194 static int cm_drep_handler(
struct cm_work *work)
2208 spin_lock_irq(&cm_id_priv->
lock);
2211 spin_unlock_irq(&cm_id_priv->
lock);
2214 cm_enter_timewait(cm_id_priv);
2220 spin_unlock_irq(&cm_id_priv->
lock);
2223 cm_process_work(cm_id_priv, work);
2225 cm_deref_id(cm_id_priv);
2228 cm_deref_id(cm_id_priv);
2236 const void *private_data,
2237 u8 private_data_len)
2241 unsigned long flags;
2251 switch (cm_id->
state) {
2258 ret = cm_alloc_msg(cm_id_priv, &msg);
2261 cm_id_priv, reason, ari, ari_length,
2262 private_data, private_data_len);
2264 cm_reset_to_idle(cm_id_priv);
2268 ret = cm_alloc_msg(cm_id_priv, &msg);
2271 cm_id_priv, reason, ari, ari_length,
2272 private_data, private_data_len);
2274 cm_enter_timewait(cm_id_priv);
2288 out: spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
2293 static void cm_format_rej_event(
struct cm_work *work)
2299 param = &work->
cm_event.param.rej_rcvd;
2300 param->
ari = rej_msg->
ari;
2301 param->
ari_length = cm_rej_get_reject_info_len(rej_msg);
2315 spin_lock_irq(&cm.lock);
2316 timewait_info = cm_find_remote_id( *((
__be64 *) rej_msg->
ari),
2318 if (!timewait_info) {
2319 spin_unlock_irq(&cm.lock);
2323 (timewait_info->
work.local_id ^
2324 cm.random_id_operand));
2326 if (cm_id_priv->
id.remote_id == remote_id)
2331 spin_unlock_irq(&cm.lock);
2340 static int cm_rej_handler(
struct cm_work *work)
2347 cm_id_priv = cm_acquire_rejected_id(rej_msg);
2351 cm_format_rej_event(work);
2353 spin_lock_irq(&cm_id_priv->
lock);
2354 switch (cm_id_priv->
id.state) {
2364 cm_enter_timewait(cm_id_priv);
2366 cm_reset_to_idle(cm_id_priv);
2373 cm_enter_timewait(cm_id_priv);
2381 cm_enter_timewait(cm_id_priv);
2386 spin_unlock_irq(&cm_id_priv->
lock);
2394 spin_unlock_irq(&cm_id_priv->
lock);
2397 cm_process_work(cm_id_priv, work);
2399 cm_deref_id(cm_id_priv);
2402 cm_deref_id(cm_id_priv);
2408 const void *private_data,
2409 u8 private_data_len)
2417 unsigned long flags;
2423 data = cm_copy_private_data(private_data, private_data_len);
2425 return PTR_ERR(data);
2430 switch(cm_id_priv->
id.state) {
2443 cm_state = cm_id->
state;
2454 ret = cm_alloc_msg(cm_id_priv, &msg);
2459 msg_response, service_timeout,
2460 private_data, private_data_len);
2466 cm_id->
state = cm_state;
2469 cm_set_private_data(cm_id_priv, data, private_data_len);
2470 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
2473 error1: spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
2477 error2: spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
2486 switch (cm_mra_get_msg_mraed(mra_msg)) {
2498 static int cm_mra_handler(
struct cm_work *work)
2505 cm_id_priv = cm_acquire_mraed_id(mra_msg);
2510 work->
cm_event.param.mra_rcvd.service_timeout =
2511 cm_mra_get_service_timeout(mra_msg);
2512 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2513 cm_convert_to_ms(cm_id_priv->
av.timeout);
2515 spin_lock_irq(&cm_id_priv->
lock);
2516 switch (cm_id_priv->
id.state) {
2520 cm_id_priv->
msg, timeout))
2527 cm_id_priv->
msg, timeout))
2535 cm_id_priv->
msg, timeout)) {
2537 atomic_long_inc(&work->
port->
2553 cm_id_priv->
msg->context[1] = (
void *) (
unsigned long)
2554 cm_id_priv->
id.state;
2558 spin_unlock_irq(&cm_id_priv->
lock);
2561 cm_process_work(cm_id_priv, work);
2563 cm_deref_id(cm_id_priv);
2566 spin_unlock_irq(&cm_id_priv->
lock);
2567 cm_deref_id(cm_id_priv);
2571 static void cm_format_lap(
struct cm_lap_msg *lap_msg,
2574 const void *private_data,
2575 u8 private_data_len)
2581 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->
remote_qpn);
2583 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2588 cm_lap_set_flow_label(lap_msg, alternate_path->
flow_label);
2589 cm_lap_set_traffic_class(lap_msg, alternate_path->
traffic_class);
2591 cm_lap_set_packet_rate(lap_msg, alternate_path->
rate);
2592 cm_lap_set_sl(lap_msg, alternate_path->
sl);
2593 cm_lap_set_subnet_local(lap_msg, 1);
2594 cm_lap_set_local_ack_timeout(lap_msg,
2595 cm_ack_timeout(cm_id_priv->
av.port->cm_dev->ack_delay,
2598 if (private_data && private_data_len)
2604 const void *private_data,
2605 u8 private_data_len)
2609 unsigned long flags;
2624 ret = cm_init_av_by_path(alternate_path, &cm_id_priv->
alt_av);
2627 cm_id_priv->
alt_av.timeout =
2629 cm_id_priv->
alt_av.timeout - 1);
2631 ret = cm_alloc_msg(cm_id_priv, &msg);
2636 alternate_path, private_data, private_data_len);
2642 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
2650 out: spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
2655 static void cm_format_path_from_lap(
struct cm_id_private *cm_id_priv,
2659 memset(path, 0,
sizeof *path);
2664 path->
flow_label = cm_lap_get_flow_label(lap_msg);
2669 path->
sl = cm_lap_get_sl(lap_msg);
2673 path->
rate = cm_lap_get_packet_rate(lap_msg);
2679 static int cm_lap_handler(
struct cm_work *work)
2694 param = &work->
cm_event.param.lap_rcvd;
2696 cm_format_path_from_lap(cm_id_priv, param->
alternate_path, lap_msg);
2699 spin_lock_irq(&cm_id_priv->
lock);
2703 switch (cm_id_priv->
id.lap_state) {
2718 spin_unlock_irq(&cm_id_priv->
lock);
2732 cm_id_priv->
tid = lap_msg->
hdr.tid;
2740 spin_unlock_irq(&cm_id_priv->
lock);
2743 cm_process_work(cm_id_priv, work);
2745 cm_deref_id(cm_id_priv);
2748 unlock: spin_unlock_irq(&cm_id_priv->
lock);
2749 deref: cm_deref_id(cm_id_priv);
2753 static void cm_format_apr(
struct cm_apr_msg *apr_msg,
2758 const void *private_data,
2759 u8 private_data_len)
2766 if (info && info_length) {
2771 if (private_data && private_data_len)
2779 const void *private_data,
2780 u8 private_data_len)
2784 unsigned long flags;
2800 ret = cm_alloc_msg(cm_id_priv, &msg);
2804 cm_format_apr((
struct cm_apr_msg *) msg->
mad, cm_id_priv, status,
2805 info, info_length, private_data, private_data_len);
2808 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
2814 out: spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
2819 static int cm_apr_handler(
struct cm_work *work)
2832 work->
cm_event.param.apr_rcvd.apr_info = &apr_msg->
info;
2836 spin_lock_irq(&cm_id_priv->
lock);
2840 spin_unlock_irq(&cm_id_priv->
lock);
2850 spin_unlock_irq(&cm_id_priv->
lock);
2853 cm_process_work(cm_id_priv, work);
2855 cm_deref_id(cm_id_priv);
2858 cm_deref_id(cm_id_priv);
2862 static int cm_timewait_handler(
struct cm_work *work)
2869 spin_lock_irq(&cm.lock);
2871 spin_unlock_irq(&cm.lock);
2873 cm_id_priv = cm_acquire_id(timewait_info->
work.local_id,
2874 timewait_info->
work.remote_id);
2878 spin_lock_irq(&cm_id_priv->
lock);
2881 spin_unlock_irq(&cm_id_priv->
lock);
2888 spin_unlock_irq(&cm_id_priv->
lock);
2891 cm_process_work(cm_id_priv, work);
2893 cm_deref_id(cm_id_priv);
2896 cm_deref_id(cm_id_priv);
2907 sidr_req_msg->
pkey = param->
path->pkey;
2920 unsigned long flags;
2928 ret = cm_init_av_by_path(param->
path, &cm_id_priv->
av);
2936 ret = cm_alloc_msg(cm_id_priv, &msg);
2952 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
2958 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
2964 static void cm_format_sidr_req_event(
struct cm_work *work,
2972 param = &work->
cm_event.param.sidr_req_rcvd;
2975 param->
port = work->
port->port_num;
2979 static int cm_sidr_req_handler(
struct cm_work *work)
2988 return PTR_ERR(cm_id);
2996 cm_id_priv->
av.dgid.global.interface_id = 0;
3001 cm_id_priv->
tid = sidr_req_msg->
hdr.tid;
3004 spin_lock_irq(&cm.lock);
3005 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3006 if (cur_cm_id_priv) {
3007 spin_unlock_irq(&cm.lock);
3013 cur_cm_id_priv = cm_find_listen(cm_id->
device,
3016 if (!cur_cm_id_priv) {
3017 spin_unlock_irq(&cm.lock);
3023 spin_unlock_irq(&cm.lock);
3025 cm_id_priv->
id.cm_handler = cur_cm_id_priv->
id.cm_handler;
3026 cm_id_priv->
id.context = cur_cm_id_priv->
id.context;
3030 cm_format_sidr_req_event(work, &cur_cm_id_priv->
id);
3031 cm_process_work(cm_id_priv, work);
3032 cm_deref_id(cur_cm_id_priv);
3064 unsigned long flags;
3079 ret = cm_alloc_msg(cm_id_priv, &msg);
3087 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
3092 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
3096 spin_unlock_irqrestore(&cm.lock, flags);
3099 error: spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
3104 static void cm_format_sidr_rep_event(
struct cm_work *work)
3111 param = &work->
cm_event.param.sidr_rep_rcvd;
3120 static int cm_sidr_rep_handler(
struct cm_work *work)
3127 cm_id_priv = cm_acquire_id(sidr_rep_msg->
request_id, 0);
3131 spin_lock_irq(&cm_id_priv->
lock);
3133 spin_unlock_irq(&cm_id_priv->
lock);
3138 spin_unlock_irq(&cm_id_priv->
lock);
3140 cm_format_sidr_rep_event(work);
3141 cm_process_work(cm_id_priv, work);
3144 cm_deref_id(cm_id_priv);
3156 memset(&cm_event, 0,
sizeof cm_event);
3160 spin_lock_irq(&cm_id_priv->
lock);
3162 if (msg != cm_id_priv->
msg || state != cm_id_priv->
id.state)
3168 cm_reset_to_idle(cm_id_priv);
3173 cm_reset_to_idle(cm_id_priv);
3177 cm_enter_timewait(cm_id_priv);
3187 spin_unlock_irq(&cm_id_priv->
lock);
3188 cm_event.param.send_status = wc_status;
3191 ret = cm_id_priv->
id.cm_handler(&cm_id_priv->
id, &cm_event);
3197 spin_unlock_irq(&cm_id_priv->
lock);
3201 static void cm_send_handler(
struct ib_mad_agent *mad_agent,
3220 atomic_long_add(1 + msg->
retries,
3227 switch (mad_send_wc->
status) {
3234 cm_process_send_error(msg, mad_send_wc->
status);
3241 static void cm_work_handler(
struct work_struct *_work)
3248 ret = cm_req_handler(work);
3251 ret = cm_mra_handler(work);
3254 ret = cm_rej_handler(work);
3257 ret = cm_rep_handler(work);
3260 ret = cm_rtu_handler(work);
3263 ret = cm_establish_handler(work);
3266 ret = cm_dreq_handler(work);
3269 ret = cm_drep_handler(work);
3272 ret = cm_sidr_req_handler(work);
3275 ret = cm_sidr_rep_handler(work);
3278 ret = cm_lap_handler(work);
3281 ret = cm_apr_handler(work);
3284 ret = cm_timewait_handler(work);
3294 static int cm_establish(
struct ib_cm_id *cm_id)
3298 unsigned long flags;
3307 switch (cm_id->
state)
3320 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
3343 static int cm_migrate(
struct ib_cm_id *cm_id)
3346 unsigned long flags;
3355 cm_id_priv->
av = cm_id_priv->
alt_av;
3358 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
3369 ret = cm_establish(cm_id);
3372 ret = cm_migrate(cm_id);
3381 static void cm_recv_handler(
struct ib_mad_agent *mad_agent,
3390 switch (mad_recv_wc->
recv_buf.mad->mad_hdr.attr_id) {
3450 static int cm_init_qp_init_attr(
struct cm_id_private *cm_id_priv,
3454 unsigned long flags;
3458 switch (cm_id_priv->
id.state) {
3475 qp_attr->
port_num = cm_id_priv->
av.port->port_num;
3482 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
3486 static int cm_init_qp_rtr_attr(
struct cm_id_private *cm_id_priv,
3490 unsigned long flags;
3494 switch (cm_id_priv->
id.state) {
3504 qp_attr->
ah_attr = cm_id_priv->
av.ah_attr;
3516 if (cm_id_priv->
alt_av.ah_attr.dlid) {
3529 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
3533 static int cm_init_qp_rts_attr(
struct cm_id_private *cm_id_priv,
3537 unsigned long flags;
3541 switch (cm_id_priv->
id.state) {
3554 switch (cm_id_priv->
qp_type) {
3565 qp_attr->
timeout = cm_id_priv->
av.timeout;
3570 if (cm_id_priv->
alt_av.ah_attr.dlid) {
3588 spin_unlock_irqrestore(&cm_id_priv->
lock, flags);
3602 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3605 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3608 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3618 static void cm_get_ack_delay(
struct cm_device *cm_dev)
3641 static const struct sysfs_ops cm_counter_ops = {
3642 .show = cm_show_counter
3645 static struct kobj_type cm_counter_obj_type = {
3646 .sysfs_ops = &cm_counter_ops,
3647 .default_attrs = cm_counter_default_attrs
3650 static void cm_release_port_obj(
struct kobject *obj)
3658 static struct kobj_type cm_port_obj_type = {
3659 .release = cm_release_port_obj
3671 .name =
"infiniband_cm",
3672 .devnode = cm_devnode,
3676 static int cm_create_port_fs(
struct cm_port *port)
3681 &port->
cm_dev->device->kobj,
3690 &cm_counter_obj_type,
3692 "%s", counter_group_names[i]);
3707 static void cm_remove_port_fs(
struct cm_port *port)
3728 unsigned long flags;
3735 cm_dev = kzalloc(
sizeof(*cm_dev) +
sizeof(*port) *
3741 cm_get_ack_delay(cm_dev);
3745 "%s", ib_device->
name);
3746 if (IS_ERR(cm_dev->
device)) {
3761 ret = cm_create_port_fs(port);
3789 cm_remove_port_fs(port);
3794 port = cm_dev->
port[i-1];
3797 cm_remove_port_fs(port);
3803 static void cm_remove_one(
struct ib_device *ib_device)
3810 unsigned long flags;
3822 port = cm_dev->
port[i-1];
3826 cm_remove_port_fs(port);
3832 static int __init ib_cm_init(
void)
3836 memset(&cm, 0,
sizeof cm);
3837 INIT_LIST_HEAD(&cm.device_list);
3840 cm.listen_service_table =
RB_ROOT;
3844 cm.remote_sidr_table =
RB_ROOT;
3848 INIT_LIST_HEAD(&cm.timewait_list);
3876 static void __exit ib_cm_cleanup(
void)
3880 spin_lock_irq(&cm.lock);
3883 spin_unlock_irq(&cm.lock);
3890 kfree(timewait_info);