38 #include <linux/in6.h>
40 #include <linux/random.h>
43 #include <linux/slab.h>
44 #include <linux/module.h>
62 #define CMA_CM_RESPONSE_TIMEOUT 20
63 #define CMA_MAX_CM_RETRIES 15
64 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
65 #define CMA_IBOE_PACKET_LIFETIME 18
73 .remove = cma_remove_one
214 #define CMA_VERSION 0x00
215 #define SDP_MAJ_VERSION 0x2
223 ret = (id_priv->
state == comp);
224 spin_unlock_irqrestore(&id_priv->
lock, flags);
235 if ((ret = (id_priv->
state == comp)))
236 id_priv->
state = exch;
237 spin_unlock_irqrestore(&id_priv->
lock, flags);
248 old = id_priv->
state;
249 id_priv->
state = exch;
250 spin_unlock_irqrestore(&id_priv->
lock, flags);
264 static inline u8 sdp_get_majv(
u8 sdp_version)
266 return sdp_version >> 4;
269 static inline u8 sdp_get_ip_ver(
struct sdp_hh *
hh)
274 static inline void sdp_set_ip_ver(
struct sdp_hh *
hh,
u8 ip_ver)
284 id_priv->
id.device = cma_dev->
device;
285 id_priv->
id.route.addr.dev_addr.transport =
290 static inline void cma_deref_dev(
struct cma_device *cma_dev)
296 static inline void release_mc(
struct kref *
kref)
308 cma_deref_dev(id_priv->
cma_dev);
321 switch (id_priv->
id.ps) {
326 ib_addr_get_mgid(&id_priv->
id.route.addr.dev_addr, &rec.mgid);
328 id_priv->
id.port_num, &rec.mgid,
350 for (i = 0; i < props.gid_tbl_len; ++
i) {
365 union ib_gid gid, iboe_gid;
376 iboe_addr_get_sgid(dev_addr, &iboe_gid);
378 rdma_addr_gid_offset(dev_addr),
sizeof gid);
380 for (port = 1; port <= cma_dev->
device->phys_port_cnt; ++
port) {
384 ret = find_gid_port(cma_dev->
device, &iboe_gid, port);
386 ret = find_gid_port(cma_dev->
device, &gid, port);
389 id_priv->
id.port_num =
port;
399 cma_attach_to_dev(id_priv, cma_dev);
415 if (id_priv->
state != state) {
428 id_priv = kzalloc(
sizeof *id_priv,
GFP_KERNEL);
435 id_priv->
id.event_handler = event_handler;
437 id_priv->
id.qp_type = qp_type;
440 init_completion(&id_priv->
comp);
444 INIT_LIST_HEAD(&id_priv->
mc_list);
454 int qp_attr_mask,
ret;
480 int qp_attr_mask,
ret;
506 ret = cma_init_ud_qp(id_priv, qp);
508 ret = cma_init_conn_qp(id_priv, qp);
538 int qp_attr_mask,
ret;
541 if (!id_priv->
id.qp) {
573 int qp_attr_mask,
ret;
576 if (!id_priv->
id.qp) {
600 if (!id_priv->
id.qp) {
613 struct ib_qp_attr *qp_attr,
int *qp_attr_mask)
621 pkey = ib_addr_get_pkey(dev_addr);
634 ret = cma_set_qkey(id_priv);
657 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
686 return ipv4_is_zeronet(
690 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
691 ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0;
695 static inline int cma_loopback_addr(
struct sockaddr *addr)
698 return ipv4_is_loopback(
701 return ipv6_addr_loopback(
705 static inline int cma_any_addr(
struct sockaddr *addr)
707 return cma_zero_addr(addr) || cma_loopback_addr(addr);
717 return ((
struct sockaddr_in *) src)->sin_addr.s_addr !=
720 return ipv6_addr_cmp(&((
struct sockaddr_in6 *) src)->sin6_addr,
733 static inline int cma_any_port(
struct sockaddr *addr)
735 return !cma_port(addr);
744 if (sdp_get_majv(((
struct sdp_hh *) hdr)->sdp_version) !=
748 *ip_ver = sdp_get_ip_ver(hdr);
749 *port = ((
struct sdp_hh *) hdr)->port;
750 *src = &((
struct sdp_hh *) hdr)->src_addr;
751 *dst = &((
struct sdp_hh *) hdr)->dst_addr;
757 *ip_ver = cma_get_ip_ver(hdr);
758 *port = ((
struct cma_hdr *) hdr)->port;
759 *src = &((
struct cma_hdr *) hdr)->src_addr;
760 *dst = &((
struct cma_hdr *) hdr)->dst_addr;
764 if (*ip_ver != 4 && *ip_ver != 6)
769 static void cma_save_net_info(
struct rdma_addr *addr,
844 list_del_init(&dev_id_priv->
list);
862 cma_cancel_route(id_priv);
865 if (cma_any_addr((
struct sockaddr *) &id_priv->
id.route.addr.src_addr)
867 cma_cancel_listens(id_priv);
882 hlist_del(&id_priv->
node);
883 if (hlist_empty(&bind_list->
owners)) {
894 while (!list_empty(&id_priv->
mc_list)) {
904 kref_put(&mc->
mcref, release_mc);
919 cma_cancel_operation(id_priv, state);
941 cma_leave_mc_groups(id_priv);
942 cma_release_dev(id_priv);
945 cma_release_port(id_priv);
946 cma_deref_id(id_priv);
950 cma_deref_id(id_priv->
id.context);
952 kfree(id_priv->
id.route.path_rec);
961 ret = cma_modify_qp_rtr(id_priv,
NULL);
965 ret = cma_modify_qp_rts(id_priv,
NULL);
975 cma_modify_qp_err(id_priv);
984 sdp_get_majv(((
struct sdp_hah *) data)->sdp_version) !=
999 event->param.conn.flow_control = rep_data->
flow_control;
1001 event->param.conn.srq = rep_data->
srq;
1002 event->param.conn.qp_num = rep_data->
remote_qpn;
1018 switch (ib_event->
event) {
1025 event.status = cma_verify_rep(id_priv, ib_event->
private_data);
1029 event.status = cma_rep_recv(id_priv);
1057 cma_modify_qp_err(id_priv);
1060 event.param.conn.private_data = ib_event->
private_data;
1069 ret = id_priv->
id.event_handler(&id_priv->
id, &
event);
1095 &ip_ver, &port, &src, &dst))
1103 cma_save_net_info(&id->
route.addr, &listen_id->
route.addr,
1104 ip_ver, port, src, dst);
1117 if (cma_any_addr((
struct sockaddr *) &rt->
addr.src_addr)) {
1119 rdma_addr_set_sgid(&rt->
addr.dev_addr, &rt->
path_rec[0].sgid);
1123 &rt->
addr.dev_addr);
1127 rdma_addr_set_dgid(&rt->
addr.dev_addr, &rt->
path_rec[0].dgid);
1155 &ip_ver, &port, &src, &dst))
1158 cma_save_net_info(&id->
route.addr, &listen_id->
route.addr,
1159 ip_ver, port, src, dst);
1161 if (!cma_any_addr((
struct sockaddr *) &id->
route.addr.src_addr)) {
1163 &id->
route.addr.dev_addr);
1180 event->param.conn.private_data = private_data +
offset;
1184 event->param.conn.flow_control = req_data->
flow_control;
1185 event->param.conn.retry_count = req_data->
retry_count;
1187 event->param.conn.srq = req_data->
srq;
1188 event->param.conn.qp_num = req_data->
remote_qpn;
1207 if (!cma_check_req_qp_type(&listen_id->
id, ib_event))
1214 offset = cma_user_data_offset(listen_id->
id.ps);
1217 conn_id = cma_new_udp_id(&listen_id->
id, ib_event);
1219 event.param.ud.private_data_len =
1222 conn_id = cma_new_conn_id(&listen_id->
id, ib_event);
1232 ret = cma_acquire_dev(conn_id);
1245 ret = conn_id->
id.event_handler(&conn_id->
id, &
event);
1259 cma_deref_id(conn_id);
1263 cma_deref_id(conn_id);
1284 struct cma_hdr *cma_data, *cma_mask;
1285 struct sdp_hh *sdp_data, *sdp_mask;
1289 memset(compare, 0,
sizeof *compare);
1290 cma_data = (
void *) compare->
data;
1291 cma_mask = (
void *) compare->
mask;
1292 sdp_data = (
void *) compare->
data;
1293 sdp_mask = (
void *) compare->
mask;
1297 ip4_addr = ((
struct sockaddr_in *) addr)->sin_addr.s_addr;
1299 sdp_set_ip_ver(sdp_data, 4);
1300 sdp_set_ip_ver(sdp_mask, 0xF);
1304 cma_set_ip_ver(cma_data, 4);
1305 cma_set_ip_ver(cma_mask, 0xF);
1306 if (!cma_any_addr(addr)) {
1315 sdp_set_ip_ver(sdp_data, 6);
1316 sdp_set_ip_ver(sdp_mask, 0xF);
1321 cma_set_ip_ver(cma_data, 6);
1322 cma_set_ip_ver(cma_mask, 0xF);
1323 if (!cma_any_addr(addr)) {
1346 switch (iw_event->
event) {
1351 sin = (
struct sockaddr_in *) &id_priv->
id.route.addr.src_addr;
1353 sin = (
struct sockaddr_in *) &id_priv->
id.route.addr.dst_addr;
1355 switch (iw_event->
status) {
1358 event.param.conn.initiator_depth = iw_event->
ird;
1359 event.param.conn.responder_resources = iw_event->
ord;
1375 event.param.conn.initiator_depth = iw_event->
ird;
1376 event.param.conn.responder_resources = iw_event->
ord;
1382 event.status = iw_event->
status;
1383 event.param.conn.private_data = iw_event->
private_data;
1385 ret = id_priv->
id.event_handler(&id_priv->
id, &
event);
1399 static int iw_conn_req_handler(
struct iw_cm_id *cm_id,
1416 listen_id->id.context,
1418 if (IS_ERR(new_cm_id)) {
1440 ret = cma_acquire_dev(conn_id);
1465 event.param.conn.private_data = iw_event->
private_data;
1467 event.param.conn.initiator_depth = iw_event->
ird;
1468 event.param.conn.responder_resources = iw_event->
ord;
1475 ret = conn_id->
id.event_handler(&conn_id->
id, &
event);
1481 cma_deref_id(conn_id);
1487 cma_deref_id(conn_id);
1510 addr = (
struct sockaddr *) &id_priv->
id.route.addr.src_addr;
1511 svc_id = cma_get_service_id(id_priv->
id.ps, addr);
1512 if (cma_any_addr(addr) && !id_priv->
afonly)
1534 iw_conn_req_handler,
1541 sin = (
struct sockaddr_in *) &id_priv->
id.route.addr.src_addr;
1542 id_priv->
cm_id.
iw->local_addr = *sin;
1554 static int cma_listen_handler(
struct rdma_cm_id *
id,
1559 id->context = id_priv->
id.context;
1560 id->event_handler = id_priv->
id.event_handler;
1561 return id_priv->
id.event_handler(
id, event);
1572 id_priv->
id.qp_type);
1579 memcpy(&id->
route.addr.src_addr, &id_priv->
id.route.addr.src_addr,
1580 ip_addr_size((
struct sockaddr *) &id_priv->
id.route.addr.src_addr));
1582 cma_attach_to_dev(dev_id_priv, cma_dev);
1591 "listening on device %s\n", ret, cma_dev->
device->name);
1601 cma_listen_on_dev(id_priv, cma_dev);
1610 id_priv->
tos = (
u8) tos;
1620 route = &work->
id->id.route;
1643 memset(&path_rec, 0,
sizeof path_rec);
1649 path_rec.
service_id = cma_get_service_id(id_priv->
id.ps,
1666 id_priv->
id.port_num, &path_rec,
1667 comp_mask, timeout_ms,
1669 work, &id_priv->
query);
1674 static void cma_work_handler(
struct work_struct *_work)
1684 if (id_priv->
id.event_handler(&id_priv->
id, &work->
event)) {
1690 cma_deref_id(id_priv);
1696 static void cma_ndev_work_handler(
struct work_struct *_work)
1707 if (id_priv->
id.event_handler(&id_priv->
id, &work->
event)) {
1714 cma_deref_id(id_priv);
1720 static int cma_resolve_ib_route(
struct rdma_id_private *id_priv,
int timeout_ms)
1742 ret = cma_query_ib_route(id_priv, timeout_ms, work);
1766 id->route.path_rec =
kmemdup(path_rec,
sizeof *path_rec * num_paths,
1768 if (!id->
route.path_rec) {
1773 id->route.num_paths = num_paths;
1781 static int cma_resolve_iw_route(
struct rdma_id_private *id_priv,
int timeout_ms)
1834 vid = rdma_vlan_dev_vlan_id(ndev);
1836 iboe_mac_vlan_to_ll(&route->
path_rec->sgid, addr->
dev_addr.src_dev_addr, vid);
1837 iboe_mac_vlan_to_ll(&route->
path_rec->dgid, addr->
dev_addr.dst_dev_addr, vid);
1843 route->
path_rec->sl = netdev_get_prio_tc_map(
1846 rt_tos2priority(id_priv->
tos));
1848 route->
path_rec->mtu = iboe_get_mtu(ndev->mtu);
1850 route->
path_rec->rate = iboe_get_rate(ndev);
1862 work->
event.status = 0;
1890 ret = cma_resolve_ib_route(id_priv, timeout_ms);
1893 ret = cma_resolve_iboe_route(id_priv);
1900 ret = cma_resolve_iw_route(id_priv, timeout_ms);
1912 cma_deref_id(id_priv);
1932 for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p)
1949 id_priv->
id.route.addr.dev_addr.
dev_type =
1953 rdma_addr_set_sgid(&id_priv->
id.route.addr.dev_addr, &gid);
1954 ib_addr_set_pkey(&id_priv->
id.route.addr.dev_addr, pkey);
1955 id_priv->
id.port_num = p;
1956 cma_attach_to_dev(id_priv, cma_dev);
1968 memset(&event, 0,
sizeof event);
1974 if (!status && !id_priv->
cma_dev)
1975 status = cma_acquire_dev(id_priv);
1984 memcpy(&id_priv->
id.route.addr.src_addr, src_addr,
1985 ip_addr_size(src_addr));
1989 if (id_priv->
id.event_handler(&id_priv->
id, &event)) {
1992 cma_deref_id(id_priv);
1998 cma_deref_id(id_priv);
2013 ret = cma_bind_loopback(id_priv);
2018 rdma_addr_get_sgid(&id_priv->
id.route.addr.dev_addr, &gid);
2019 rdma_addr_set_dgid(&id_priv->
id.route.addr.dev_addr, &gid);
2021 src = (
struct sockaddr *) &id_priv->
id.route.addr.src_addr;
2022 if (cma_zero_addr(src)) {
2023 dst = (
struct sockaddr *) &id_priv->
id.route.addr.dst_addr;
2048 if (!src_addr || !src_addr->
sa_family) {
2059 struct sockaddr *dst_addr,
int timeout_ms)
2066 ret = cma_bind_addr(
id, src_addr, dst_addr);
2075 memcpy(&id->
route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr));
2076 if (cma_any_addr(dst_addr))
2077 ret = cma_resolve_loopback(id_priv);
2080 dst_addr, &id->
route.addr.dev_addr,
2081 timeout_ms, addr_handler, id_priv);
2088 cma_deref_id(id_priv);
2096 unsigned long flags;
2107 spin_unlock_irqrestore(&id_priv->
lock, flags);
2115 unsigned long flags;
2127 spin_unlock_irqrestore(&id_priv->
lock, flags);
2137 sin = (
struct sockaddr_in *) &id_priv->
id.route.addr.src_addr;
2140 hlist_add_head(&id_priv->
node, &bind_list->
owners);
2144 unsigned short snum)
2149 bind_list = kzalloc(
sizeof *bind_list,
GFP_KERNEL);
2166 bind_list->
port = (
unsigned short) port;
2167 cma_bind_port(bind_list, id_priv);
2178 static unsigned int last_used_port;
2183 remaining = (high -
low) + 1;
2186 if (last_used_port != rover &&
2187 !
idr_find(ps, (
unsigned short) rover)) {
2188 int ret = cma_alloc_port(ps, id_priv, rover);
2194 last_used_port = rover;
2200 if ((rover < low) || (rover > high))
2220 addr = (
struct sockaddr *) &id_priv->
id.route.addr.src_addr;
2222 if (id_priv == cur_id)
2229 cur_addr = (
struct sockaddr *) &cur_id->
id.route.addr.src_addr;
2234 if (cma_any_addr(addr) || cma_any_addr(cur_addr))
2237 if (!cma_addr_cmp(addr, cur_addr))
2246 unsigned short snum;
2249 snum =
ntohs(cma_port((
struct sockaddr *) &id_priv->
id.route.addr.src_addr));
2255 ret = cma_alloc_port(ps, id_priv, snum);
2257 ret = cma_check_port(bind_list, id_priv, id_priv->
reuseaddr);
2259 cma_bind_port(bind_list, id_priv);
2270 if (bind_list->
owners.first->next)
2271 ret = cma_check_port(bind_list, id_priv, 0);
2281 switch (id_priv->
id.ps) {
2302 if (cma_any_port((
struct sockaddr *) &id_priv->
id.route.addr.src_addr))
2303 ret = cma_alloc_any_port(ps, id_priv);
2305 ret = cma_use_port(ps, id_priv);
2311 static int cma_check_linklocal(
struct rdma_dev_addr *dev_addr,
2314 #if IS_ENABLED(CONFIG_IPV6)
2347 ret = cma_bind_listen(id_priv);
2356 ret = cma_ib_listen(id_priv);
2361 ret = cma_iw_listen(id_priv, backlog);
2370 cma_listen_on_all(id_priv);
2392 ret = cma_check_linklocal(&id->
route.addr.dev_addr, addr);
2396 if (!cma_any_addr(addr)) {
2401 ret = cma_acquire_dev(id_priv);
2406 memcpy(&id->
route.addr.src_addr, addr, ip_addr_size(addr));
2410 #if IS_ENABLED(CONFIG_IPV6)
2415 ret = cma_get_port(id_priv);
2422 cma_release_dev(id_priv);
2446 sdp_set_ip_ver(sdp_hdr, 4);
2454 cma_set_ip_ver(cma_hdr, 4);
2471 sdp_set_ip_ver(sdp_hdr, 6);
2479 cma_set_ip_ver(cma_hdr, 6);
2489 static int cma_sidr_rep_handler(
struct ib_cm_id *cm_id,
2500 memset(&event, 0,
sizeof event);
2501 switch (ib_event->
event) {
2514 ret = cma_set_qkey(id_priv);
2526 id_priv->
id.route.path_rec,
2528 event.param.ud.qp_num = rep->
qpn;
2529 event.param.ud.qkey = rep->
qkey;
2539 ret = id_priv->
id.event_handler(&id_priv->
id, &event);
2561 req.private_data_len =
sizeof(
struct cma_hdr) +
2562 conn_param->private_data_len;
2567 if (!
req.private_data)
2571 memcpy((
void *)
req.private_data +
sizeof(
struct cma_hdr),
2574 route = &id_priv->
id.route;
2575 ret = cma_format_hdr((
void *)
req.private_data, id_priv->
id.ps, route);
2588 req.service_id = cma_get_service_id(id_priv->
id.ps,
2613 offset = cma_user_data_offset(id_priv->
id.ps);
2633 route = &id_priv->
id.route;
2634 ret = cma_format_hdr(private_data, id_priv->
id.ps, route);
2643 req.service_id = cma_get_service_id(id_priv->
id.ps,
2646 req.qp_type = id_priv->
id.qp_type;
2656 req.srq = id_priv->
srq ? 1 : 0;
2660 if (ret && !IS_ERR(
id)) {
2665 kfree(private_data);
2679 return PTR_ERR(cm_id);
2683 sin = (
struct sockaddr_in*) &id_priv->
id.route.addr.src_addr;
2684 cm_id->local_addr = *sin;
2686 sin = (
struct sockaddr_in*) &id_priv->
id.route.addr.dst_addr;
2687 cm_id->remote_addr = *sin;
2689 ret = cma_modify_qp_rtr(id_priv, conn_param);
2723 id_priv->
srq = conn_param->
srq;
2729 ret = cma_resolve_ib_udp(id_priv, conn_param);
2731 ret = cma_connect_ib(id_priv, conn_param);
2734 ret = cma_connect_iw(id_priv, conn_param);
2756 ret = cma_modify_qp_rtr(id_priv, conn_param);
2760 ret = cma_modify_qp_rts(id_priv, conn_param);
2764 memset(&rep, 0,
sizeof rep);
2765 rep.qp_num = id_priv->
qp_num;
2766 rep.starting_psn = id_priv->
seq_num;
2771 rep.failover_accepted = 0;
2774 rep.srq = id_priv->
srq ? 1 : 0;
2787 ret = cma_modify_qp_rtr(id_priv, conn_param);
2795 if (id_priv->
id.qp) {
2810 memset(&rep, 0,
sizeof rep);
2813 ret = cma_set_qkey(id_priv);
2816 rep.qp_num = id_priv->
qp_num;
2837 if (!id->
qp && conn_param) {
2839 id_priv->
srq = conn_param->
srq;
2854 ret = cma_accept_ib(id_priv, conn_param);
2856 ret = cma_rep_recv(id_priv);
2860 ret = cma_accept_iw(id_priv, conn_param);
2872 cma_modify_qp_err(id_priv);
2887 switch (id->
device->node_type) {
2900 u8 private_data_len)
2913 private_data, private_data_len);
2917 0, private_data, private_data_len);
2921 private_data, private_data_len);
2942 ret = cma_modify_qp_err(id_priv);
2961 static int cma_ib_mc_handler(
int status,
struct ib_sa_multicast *multicast)
2974 if (!status && id_priv->
id.qp)
2979 memset(&event, 0,
sizeof event);
2981 event.param.ud.private_data = mc->
context;
2985 id_priv->
id.port_num, &multicast->
rec,
2987 event.param.ud.qp_num = 0xFFFFFF;
2992 ret = id_priv->
id.event_handler(&id_priv->
id, &event);
3012 if (cma_any_addr(addr)) {
3013 memset(mgid, 0,
sizeof *mgid);
3023 *mgid = *(
union ib_gid *) (mc_map + 4);
3028 *mgid = *(
union ib_gid *) (mc_map + 4);
3040 ib_addr_get_mgid(dev_addr, &rec.mgid);
3046 cma_set_mgid(id_priv, (
struct sockaddr *) &mc->
addr, &rec.mgid);
3049 rdma_addr_get_sgid(dev_addr, &rec.port_gid);
3050 rec.pkey =
cpu_to_be16(ib_addr_get_pkey(dev_addr));
3067 id_priv->
id.port_num, &rec,
3069 cma_ib_mc_handler, mc);
3073 static void iboe_mcast_work_handler(
struct work_struct *work)
3080 cma_ib_mc_handler(0, m);
3081 kref_put(&mc->
mcref, release_mc);
3085 static void cma_iboe_set_mgid(
struct sockaddr *addr,
union ib_gid *mgid)
3090 if (cma_any_addr(addr)) {
3091 memset(mgid, 0,
sizeof *mgid);
3095 mgid->
raw[0] = 0xff;
3096 mgid->
raw[1] = 0x0e;
3105 mgid->
raw[10] = 0xff;
3106 mgid->
raw[11] = 0xff;
3133 cma_iboe_set_mgid(addr, &mc->
multicast.
ib->rec.mgid);
3147 mc->
multicast.
ib->rec.mtu = iboe_get_mtu(ndev->mtu);
3153 iboe_addr_get_sgid(dev_addr, &mc->
multicast.
ib->rec.port_gid);
3157 kref_get(&mc->
mcref);
3189 spin_lock(&id_priv->
lock);
3191 spin_unlock(&id_priv->
lock);
3197 ret = cma_join_ib_multicast(id_priv, mc);
3200 kref_init(&mc->
mcref);
3201 ret = cma_iboe_join_multicast(id_priv, mc);
3213 spin_lock_irq(&id_priv->
lock);
3215 spin_unlock_irq(&id_priv->
lock);
3228 spin_lock_irq(&id_priv->
lock);
3230 if (!
memcmp(&mc->
addr, addr, ip_addr_size(addr))) {
3232 spin_unlock_irq(&id_priv->
lock);
3245 kref_put(&mc->
mcref, release_mc);
3254 spin_unlock_irq(&id_priv->
lock);
3263 dev_addr = &id_priv->
id.route.addr.dev_addr;
3268 ndev->
name, &id_priv->
id);
3283 static int cma_netdev_callback(
struct notifier_block *
self,
unsigned long event,
3287 struct cma_device *cma_dev;
3289 int ret = NOTIFY_DONE;
3303 ret = cma_netdev_change(ndev, id_priv);
3314 .notifier_call = cma_netdev_callback
3317 static void cma_add_one(
struct ib_device *device)
3319 struct cma_device *cma_dev;
3326 cma_dev->
device = device;
3328 init_completion(&cma_dev->
comp);
3330 INIT_LIST_HEAD(&cma_dev->
id_list);
3336 cma_listen_on_dev(id_priv, cma_dev);
3351 cma_cancel_operation(id_priv, state);
3358 memset(&event, 0,
sizeof event);
3360 ret = id_priv->id.event_handler(&id_priv->id, &event);
3366 static void cma_process_remove(
struct cma_device *cma_dev)
3372 while (!list_empty(&cma_dev->
id_list)) {
3377 list_del_init(&id_priv->
list);
3381 ret = id_priv->
internal_id ? 1 : cma_remove_id_dev(id_priv);
3382 cma_deref_id(id_priv);
3390 cma_deref_dev(cma_dev);
3394 static void cma_remove_one(
struct ib_device *device)
3396 struct cma_device *cma_dev;
3406 cma_process_remove(cma_dev);
3416 struct cma_device *cma_dev;
3417 int i_dev = 0, i_id = 0;
3426 if (i_dev < cb->args[0]) {
3433 if (i_id < cb->args[1]) {
3444 memset(id_stats, 0,
sizeof *id_stats);
3446 id_stats->
node_type =
id->route.addr.dev_addr.dev_type;
3449 id->route.addr.dev_addr.bound_dev_if;
3454 &id->
route.addr.src_addr,
3460 &id->
route.addr.dst_addr,
3467 &id->
route.addr.src_addr,
3473 &id->
route.addr.dst_addr,
3483 id_stats->
qp_type =
id->qp_type;
3494 cb->
args[0] = i_dev;
3500 static const struct ibnl_client_cbs cma_cb_table[] = {
3505 static int __init cma_init(
void)
3534 static void __exit cma_cleanup(
void)