39 #include <linux/rbtree.h>
45 #define MAX_PEND_REQS_PER_FUNC 4
46 #define MAD_TIMEOUT_MS 2000
48 #define mcg_warn(fmt, arg...) pr_warn("MCG WARNING: " fmt, ##arg)
49 #define mcg_error(fmt, arg...) pr_err(fmt, ##arg)
50 #define mcg_warn_group(group, format, arg...) \
51 pr_warn("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\
52 (group)->name, group->demux->port, ## arg)
54 #define mcg_error_group(group, format, arg...) \
55 pr_err(" %16s: " format, (group)->name, ## arg)
137 #define safe_atomic_dec(ref) \
139 if (atomic_dec_and_test(ref)) \
140 mcg_warn_group(group, "did not expect to reach zero\n"); \
149 return "MCAST_JOIN_SENT";
151 return "MCAST_LEAVE_SENT";
153 return "MCAST_RESP_READY";
155 return "Invalid State";
167 ret =
memcmp(mgid->
raw, group->
rec.mgid.raw,
sizeof *mgid);
191 ret =
memcmp(group->
rec.mgid.raw, cur_group->
rec.mgid.raw,
192 sizeof group->
rec.mgid);
200 rb_link_node(&group->
node, parent, link);
239 wc.dlid_path_bits = 0;
241 wc.slid = ah_attr.dlid;
253 memcpy(&mad, sa_mad,
sizeof mad);
262 ret = send_mad_to_wire(group->
demux, (
struct ib_mad *)&mad);
273 static int send_leave_to_wire(
struct mcast_group *group,
u8 join_state)
279 memset(&mad, 0,
sizeof mad);
290 mad.sa_hdr.sm_key = 0x0;
295 *sa_data = group->
rec;
298 ret = send_mad_to_wire(group->
demux, (
struct ib_mad *)&mad);
312 static int send_reply_to_slave(
int slave,
struct mcast_group *group,
320 memset(&mad, 0,
sizeof mad);
331 mad.sa_hdr.sm_key = req_sa_mad->
sa_hdr.sm_key;
333 mad.sa_hdr.comp_mask = 0;
335 *sa_data = group->
rec;
340 memcpy(&sa_data->
port_gid, &req_sa_data->port_gid,
sizeof req_sa_data->port_gid);
342 ret = send_mad_to_slave(slave, group->
demux, (
struct ib_mad *)&mad);
349 u8 src_value,
u8 dst_value)
356 if (!(comp_mask & selector_mask) || !(comp_mask & value_mask))
361 err = (src_value <= dst_value);
364 err = (src_value >= dst_value);
367 err = (src_value != dst_value);
384 #define MAD_STATUS_REQ_INVALID 0x0200
402 if (check_selector(comp_mask,
431 static int release_group(
struct mcast_group *group,
int from_timeout_handler)
439 if (!from_timeout_handler) {
449 nzgroup =
memcmp(&group->
rec.mgid, &mgid0,
sizeof mgid0);
453 mcg_warn_group(group,
"releasing a group with non empty pending list\n");
468 static void adjust_membership(
struct mcast_group *group,
u8 join_state,
int inc)
472 for (i = 0; i < 3; i++, join_state >>= 1)
473 if (join_state & 0x1)
482 for (i = 0; i < 3; i++)
484 leave_state |= (1 <<
i);
486 return leave_state & (group->
rec.scope_join_state & 7);
489 static int join_group(
struct mcast_group *group,
int slave,
u8 join_mask)
495 join_state = join_mask & (~group->
func[slave].join_state);
496 adjust_membership(group, join_state, 1);
497 group->
func[slave].join_state |= join_state;
505 static int leave_group(
struct mcast_group *group,
int slave,
u8 leave_state)
509 adjust_membership(group, leave_state, -1);
510 group->
func[slave].join_state &= ~leave_state;
511 if (!group->
func[slave].join_state) {
518 static int check_leave(
struct mcast_group *group,
int slave,
u8 leave_mask)
524 if (~group->
func[slave].join_state & leave_mask)
547 --group->
func[req->
func].num_pend_reqs;
550 if (
memcmp(&group->
rec.mgid, &mgid0,
sizeof mgid0)) {
551 if (release_group(group, 1))
561 if (group->
rec.scope_join_state & 7)
562 group->
rec.scope_join_state &= 0xf8;
565 if (release_group(group, 1))
578 static int handle_leave_req(
struct mcast_group *group,
u8 leave_mask,
584 leave_mask = group->
func[req->
func].join_state;
586 status = check_leave(group, req->
func, leave_mask);
588 leave_group(group, req->
func, leave_mask);
591 send_reply_to_slave(req->
func, group, &req->
sa_mad, status);
592 --group->
func[req->
func].num_pend_reqs;
599 static int handle_join_req(
struct mcast_group *group,
u8 join_mask,
602 u8 group_join_state = group->
rec.scope_join_state & 7;
607 if (join_mask == (group_join_state & join_mask)) {
609 status = cmp_rec(&group->
rec, sa_data, req->
sa_mad.sa_hdr.comp_mask);
611 join_group(group, req->
func, join_mask);
613 --group->
func[req->
func].num_pend_reqs;
614 send_reply_to_slave(req->
func, group, &req->
sa_mad, status);
622 if (send_join_to_wire(group, &req->
sa_mad)) {
623 --group->
func[req->
func].num_pend_reqs;
636 static void mlx4_ib_mcg_work_handler(
struct work_struct *work)
660 mcg_warn_group(group,
"Got MAD response to existing MGID but wrong TID, dropping. Resp TID=%llx, group TID=%llx\n",
664 goto process_requests;
672 send_reply_to_slave(req->
func, group, &req->
sa_mad, status);
673 --group->
func[req->
func].num_pend_reqs;
688 cur_join_state = group->
rec.scope_join_state & 7;
692 if (!cur_join_state && resp_join_state)
694 }
else if (!resp_join_state)
713 rc += handle_leave_req(group, req_join_state, req);
715 rc += handle_join_req(group, req_join_state, req);
720 req_join_state = get_leave_state(group);
721 if (req_join_state) {
722 group->
rec.scope_join_state &= ~req_join_state;
724 if (send_leave_to_wire(group, req_join_state)) {
733 goto process_requests;
737 release_group(group, 0);
754 if (
memcmp(new_mgid, &mgid0,
sizeof mgid0)) {
755 group->
rec.mgid = *new_mgid;
760 cur_group = mcast_insert(ctx, group);
765 --group->
func[req->
func].num_pend_reqs;
771 release_group(group, 0);
815 is_mgid0 = !
memcmp(&mgid0, mgid,
sizeof mgid0);
817 group = mcast_find(ctx, mgid);
825 group = kzalloc(
sizeof *group, gfp_mask);
830 group->
rec.mgid = *mgid;
834 INIT_LIST_HEAD(&group->
func[i].pending);
842 group->
dentry.show = sysfs_show_group;
845 group->
dentry.attr.mode = 0400;
853 cur_group = mcast_insert(ctx, group);
855 mcg_warn(
"group just showed up %s - confused\n", cur_group->
name);
867 static void queue_req(
struct mcast_req *req)
897 *(
u8 *)(&tid) = (
u8)slave;
898 group = search_relocate_mgid0_group(ctx, tid, &rec->
mgid);
915 release_group(group, 0);
923 mcg_warn(
"In demux, port %d: unexpected MCMember method: 0x%x, dropping\n",
942 switch (sa_mad->
mad_hdr.method) {
958 return PTR_ERR(group);
963 mcg_warn_group(group,
"Port %d, Func %d has too many pending requests (%d), dropping\n",
965 release_group(group, 0);
969 ++group->
func[slave].num_pend_reqs;
973 release_group(group, 0);
981 mcg_warn(
"In multiplex, port %d, func %d: unexpected MCMember method: 0x%x, dropping\n",
982 port, slave, sa_mad->
mad_hdr.method);
993 char pending_str[40];
999 sprintf(state_str,
"%s", get_state_string(group->
state));
1001 sprintf(state_str,
"%s(TID=0x%llx)",
1002 get_state_string(group->
state),
1008 sprintf(pending_str,
"Yes(TID=0x%llx)",
1011 len +=
sprintf(buf + len,
"%1d [%02d,%02d,%02d] %4d %4s %5s ",
1012 group->
rec.scope_join_state & 0xf,
1019 len +=
sprintf(buf + len,
"%d[%1x] ",
1020 f, group->
func[f].join_state);
1022 len +=
sprintf(buf + len,
"\t\t(%4hx %4x %2x %2x %2x %2x %2x "
1023 "%4x %4x %2x %2x)\n",
1026 (group->
rec.mtusel_mtu & 0xc0) >> 6,
1027 group->
rec.mtusel_mtu & 0x3f,
1029 (group->
rec.ratesel_rate & 0xc0) >> 6,
1030 group->
rec.ratesel_rate & 0x3f,
1031 (
be32_to_cpu(group->
rec.sl_flowlabel_hoplimit) & 0xf0000000) >> 28,
1032 (
be32_to_cpu(group->
rec.sl_flowlabel_hoplimit) & 0x0fffff00) >> 8,
1034 group->
rec.proxy_join);
1057 static void force_clean_group(
struct mcast_group *group)
1070 static void _mlx4_ib_mcg_port_cleanup(
struct mlx4_ib_demux_ctx *ctx,
int destroy_wq)
1104 force_clean_group(group);
1115 static void mcg_clean_task(
struct work_struct *work)
1120 cw->
ctx->flushing = 0;
1134 _mlx4_ib_mcg_port_cleanup(ctx, destroy_wq);
1142 mcg_warn(
"failed allocating work for cleanup\n");
1152 static void build_leave_mad(
struct mcast_req *req)
1160 static void clear_pending_reqs(
struct mcast_group *group,
int vf)
1171 if (group_first == req &&
1179 --group->
func[
vf].num_pend_reqs;
1187 if (!pend && (!list_empty(&group->
func[vf].pending) || group->
func[vf].num_pend_reqs)) {
1188 mcg_warn_group(group,
"DRIVER BUG: list_empty %d, num_pend_reqs %d\n",
1189 list_empty(&group->
func[vf].pending), group->
func[vf].num_pend_reqs);
1193 static int push_deleteing_req(
struct mcast_group *group,
int slave)
1198 if (!group->
func[slave].join_state)
1203 mcg_warn_group(group,
"failed allocation - may leave stall groups\n");
1207 if (!list_empty(&group->
func[slave].pending)) {
1209 if (pend_req->
clean) {
1218 ++group->
func[slave].num_pend_reqs;
1219 build_leave_mad(req);
1235 clear_pending_reqs(group, slave);
1236 push_deleteing_req(group, slave);