Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mcg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses. You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  * Redistribution and use in source and binary forms, with or
11  * without modification, are permitted provided that the following
12  * conditions are met:
13  *
14  * - Redistributions of source code must retain the above
15  * copyright notice, this list of conditions and the following
16  * disclaimer.
17  *
18  * - Redistributions in binary form must reproduce the above
19  * copyright notice, this list of conditions and the following
20  * disclaimer in the documentation and/or other materials
21  * provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <rdma/ib_mad.h>
34 #include <rdma/ib_smi.h>
35 #include <rdma/ib_cache.h>
36 #include <rdma/ib_sa.h>
37 
38 #include <linux/mlx4/cmd.h>
39 #include <linux/rbtree.h>
40 #include <linux/delay.h>
41 
42 #include "mlx4_ib.h"
43 
44 #define MAX_VFS 80
45 #define MAX_PEND_REQS_PER_FUNC 4
46 #define MAD_TIMEOUT_MS 2000
47 
48 #define mcg_warn(fmt, arg...) pr_warn("MCG WARNING: " fmt, ##arg)
49 #define mcg_error(fmt, arg...) pr_err(fmt, ##arg)
50 #define mcg_warn_group(group, format, arg...) \
51  pr_warn("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\
52  (group)->name, group->demux->port, ## arg)
53 
54 #define mcg_error_group(group, format, arg...) \
55  pr_err(" %16s: " format, (group)->name, ## arg)
56 
57 
58 static union ib_gid mgid0;
59 
60 static struct workqueue_struct *clean_wq;
61 
65 };
66 
72 };
73 
74 struct mcast_member {
75  enum mcast_state state;
79 };
80 
82  union ib_gid mgid;
95 };
96 
97 struct mcast_group {
99  struct rb_node node;
103  struct mutex lock;
104  struct work_struct work;
105  struct list_head pending_list;
106  int members[3];
111 
112  char name[33]; /* MGID string */
114 
115  /* refcount is the reference count for the following:
116  1. Each queued request
117  2. Each invocation of the worker thread
118  3. Membership of the port at the SA
119  */
121 
122  /* delayed work to clean pending SM request */
125 };
126 
127 struct mcast_req {
128  int func;
133  int clean;
134 };
135 
136 
137 #define safe_atomic_dec(ref) \
138  do {\
139  if (atomic_dec_and_test(ref)) \
140  mcg_warn_group(group, "did not expect to reach zero\n"); \
141  } while (0)
142 
143 static const char *get_state_string(enum mcast_group_state state)
144 {
145  switch (state) {
146  case MCAST_IDLE:
147  return "MCAST_IDLE";
148  case MCAST_JOIN_SENT:
149  return "MCAST_JOIN_SENT";
150  case MCAST_LEAVE_SENT:
151  return "MCAST_LEAVE_SENT";
152  case MCAST_RESP_READY:
153  return "MCAST_RESP_READY";
154  }
155  return "Invalid State";
156 }
157 
158 static struct mcast_group *mcast_find(struct mlx4_ib_demux_ctx *ctx,
159  union ib_gid *mgid)
160 {
161  struct rb_node *node = ctx->mcg_table.rb_node;
162  struct mcast_group *group;
163  int ret;
164 
165  while (node) {
166  group = rb_entry(node, struct mcast_group, node);
167  ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid);
168  if (!ret)
169  return group;
170 
171  if (ret < 0)
172  node = node->rb_left;
173  else
174  node = node->rb_right;
175  }
176  return NULL;
177 }
178 
179 static struct mcast_group *mcast_insert(struct mlx4_ib_demux_ctx *ctx,
180  struct mcast_group *group)
181 {
182  struct rb_node **link = &ctx->mcg_table.rb_node;
183  struct rb_node *parent = NULL;
184  struct mcast_group *cur_group;
185  int ret;
186 
187  while (*link) {
188  parent = *link;
189  cur_group = rb_entry(parent, struct mcast_group, node);
190 
191  ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw,
192  sizeof group->rec.mgid);
193  if (ret < 0)
194  link = &(*link)->rb_left;
195  else if (ret > 0)
196  link = &(*link)->rb_right;
197  else
198  return cur_group;
199  }
200  rb_link_node(&group->node, parent, link);
201  rb_insert_color(&group->node, &ctx->mcg_table);
202  return NULL;
203 }
204 
205 static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad)
206 {
207  struct mlx4_ib_dev *dev = ctx->dev;
208  struct ib_ah_attr ah_attr;
209 
210  spin_lock(&dev->sm_lock);
211  if (!dev->sm_ah[ctx->port - 1]) {
212  /* port is not yet Active, sm_ah not ready */
213  spin_unlock(&dev->sm_lock);
214  return -EAGAIN;
215  }
216  mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
217  spin_unlock(&dev->sm_lock);
218  return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev), ctx->port,
219  IB_QPT_GSI, 0, 1, IB_QP1_QKEY, &ah_attr, mad);
220 }
221 
222 static int send_mad_to_slave(int slave, struct mlx4_ib_demux_ctx *ctx,
223  struct ib_mad *mad)
224 {
225  struct mlx4_ib_dev *dev = ctx->dev;
226  struct ib_mad_agent *agent = dev->send_agent[ctx->port - 1][1];
227  struct ib_wc wc;
228  struct ib_ah_attr ah_attr;
229 
230  /* Our agent might not yet be registered when mads start to arrive */
231  if (!agent)
232  return -EAGAIN;
233 
234  ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
235 
236  if (ib_find_cached_pkey(&dev->ib_dev, ctx->port, IB_DEFAULT_PKEY_FULL, &wc.pkey_index))
237  return -EINVAL;
238  wc.sl = 0;
239  wc.dlid_path_bits = 0;
240  wc.port_num = ctx->port;
241  wc.slid = ah_attr.dlid; /* opensm lid */
242  wc.src_qp = 1;
243  return mlx4_ib_send_to_slave(dev, slave, ctx->port, IB_QPT_GSI, &wc, NULL, mad);
244 }
245 
246 static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad)
247 {
248  struct ib_sa_mad mad;
249  struct ib_sa_mcmember_data *sa_mad_data = (struct ib_sa_mcmember_data *)&mad.data;
250  int ret;
251 
252  /* we rely on a mad request as arrived from a VF */
253  memcpy(&mad, sa_mad, sizeof mad);
254 
255  /* fix port GID to be the real one (slave 0) */
256  sa_mad_data->port_gid.global.interface_id = group->demux->guid_cache[0];
257 
258  /* assign our own TID */
259  mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux);
260  group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */
261 
262  ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad);
263  /* set timeout handler */
264  if (!ret) {
265  /* calls mlx4_ib_mcg_timeout_handler */
266  queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
268  }
269 
270  return ret;
271 }
272 
273 static int send_leave_to_wire(struct mcast_group *group, u8 join_state)
274 {
275  struct ib_sa_mad mad;
276  struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)&mad.data;
277  int ret;
278 
279  memset(&mad, 0, sizeof mad);
280  mad.mad_hdr.base_version = 1;
281  mad.mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
282  mad.mad_hdr.class_version = 2;
283  mad.mad_hdr.method = IB_SA_METHOD_DELETE;
284  mad.mad_hdr.status = cpu_to_be16(0);
285  mad.mad_hdr.class_specific = cpu_to_be16(0);
286  mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux);
287  group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */
289  mad.mad_hdr.attr_mod = cpu_to_be32(0);
290  mad.sa_hdr.sm_key = 0x0;
291  mad.sa_hdr.attr_offset = cpu_to_be16(7);
292  mad.sa_hdr.comp_mask = IB_SA_MCMEMBER_REC_MGID |
294 
295  *sa_data = group->rec;
296  sa_data->scope_join_state = join_state;
297 
298  ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad);
299  if (ret)
300  group->state = MCAST_IDLE;
301 
302  /* set timeout handler */
303  if (!ret) {
304  /* calls mlx4_ib_mcg_timeout_handler */
305  queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
307  }
308 
309  return ret;
310 }
311 
312 static int send_reply_to_slave(int slave, struct mcast_group *group,
313  struct ib_sa_mad *req_sa_mad, u16 status)
314 {
315  struct ib_sa_mad mad;
316  struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)&mad.data;
317  struct ib_sa_mcmember_data *req_sa_data = (struct ib_sa_mcmember_data *)&req_sa_mad->data;
318  int ret;
319 
320  memset(&mad, 0, sizeof mad);
321  mad.mad_hdr.base_version = 1;
322  mad.mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
323  mad.mad_hdr.class_version = 2;
324  mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
325  mad.mad_hdr.status = cpu_to_be16(status);
326  mad.mad_hdr.class_specific = cpu_to_be16(0);
327  mad.mad_hdr.tid = req_sa_mad->mad_hdr.tid;
328  *(u8 *)&mad.mad_hdr.tid = 0; /* resetting tid to 0 */
330  mad.mad_hdr.attr_mod = cpu_to_be32(0);
331  mad.sa_hdr.sm_key = req_sa_mad->sa_hdr.sm_key;
332  mad.sa_hdr.attr_offset = cpu_to_be16(7);
333  mad.sa_hdr.comp_mask = 0; /* ignored on responses, see IBTA spec */
334 
335  *sa_data = group->rec;
336 
337  /* reconstruct VF's requested join_state and port_gid */
338  sa_data->scope_join_state &= 0xf0;
339  sa_data->scope_join_state |= (group->func[slave].join_state & 0x0f);
340  memcpy(&sa_data->port_gid, &req_sa_data->port_gid, sizeof req_sa_data->port_gid);
341 
342  ret = send_mad_to_slave(slave, group->demux, (struct ib_mad *)&mad);
343  return ret;
344 }
345 
346 static int check_selector(ib_sa_comp_mask comp_mask,
347  ib_sa_comp_mask selector_mask,
349  u8 src_value, u8 dst_value)
350 {
351  int err;
352  u8 selector = dst_value >> 6;
353  dst_value &= 0x3f;
354  src_value &= 0x3f;
355 
356  if (!(comp_mask & selector_mask) || !(comp_mask & value_mask))
357  return 0;
358 
359  switch (selector) {
360  case IB_SA_GT:
361  err = (src_value <= dst_value);
362  break;
363  case IB_SA_LT:
364  err = (src_value >= dst_value);
365  break;
366  case IB_SA_EQ:
367  err = (src_value != dst_value);
368  break;
369  default:
370  err = 0;
371  break;
372  }
373 
374  return err;
375 }
376 
377 static u16 cmp_rec(struct ib_sa_mcmember_data *src,
378  struct ib_sa_mcmember_data *dst, ib_sa_comp_mask comp_mask)
379 {
380  /* src is group record, dst is request record */
381  /* MGID must already match */
382  /* Port_GID we always replace to our Port_GID, so it is a match */
383 
384 #define MAD_STATUS_REQ_INVALID 0x0200
385  if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey)
386  return MAD_STATUS_REQ_INVALID;
387  if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid)
388  return MAD_STATUS_REQ_INVALID;
389  if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR,
391  src->mtusel_mtu, dst->mtusel_mtu))
392  return MAD_STATUS_REQ_INVALID;
393  if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS &&
394  src->tclass != dst->tclass)
395  return MAD_STATUS_REQ_INVALID;
396  if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey)
397  return MAD_STATUS_REQ_INVALID;
398  if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR,
400  src->ratesel_rate, dst->ratesel_rate))
401  return MAD_STATUS_REQ_INVALID;
402  if (check_selector(comp_mask,
406  return MAD_STATUS_REQ_INVALID;
407  if (comp_mask & IB_SA_MCMEMBER_REC_SL &&
408  (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0xf0000000) !=
409  (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0xf0000000))
410  return MAD_STATUS_REQ_INVALID;
411  if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL &&
412  (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0x0fffff00) !=
413  (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0x0fffff00))
414  return MAD_STATUS_REQ_INVALID;
415  if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT &&
416  (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0x000000ff) !=
417  (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0x000000ff))
418  return MAD_STATUS_REQ_INVALID;
419  if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE &&
420  (src->scope_join_state & 0xf0) !=
421  (dst->scope_join_state & 0xf0))
422  return MAD_STATUS_REQ_INVALID;
423 
424  /* join_state checked separately, proxy_join ignored */
425 
426  return 0;
427 }
428 
429 /* release group, return 1 if this was last release and group is destroyed
430  * timout work is canceled sync */
431 static int release_group(struct mcast_group *group, int from_timeout_handler)
432 {
433  struct mlx4_ib_demux_ctx *ctx = group->demux;
434  int nzgroup;
435 
436  mutex_lock(&ctx->mcg_table_lock);
437  mutex_lock(&group->lock);
438  if (atomic_dec_and_test(&group->refcount)) {
439  if (!from_timeout_handler) {
440  if (group->state != MCAST_IDLE &&
441  !cancel_delayed_work(&group->timeout_work)) {
442  atomic_inc(&group->refcount);
443  mutex_unlock(&group->lock);
445  return 0;
446  }
447  }
448 
449  nzgroup = memcmp(&group->rec.mgid, &mgid0, sizeof mgid0);
450  if (nzgroup)
451  del_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
452  if (!list_empty(&group->pending_list))
453  mcg_warn_group(group, "releasing a group with non empty pending list\n");
454  if (nzgroup)
455  rb_erase(&group->node, &ctx->mcg_table);
456  list_del_init(&group->mgid0_list);
457  mutex_unlock(&group->lock);
459  kfree(group);
460  return 1;
461  } else {
462  mutex_unlock(&group->lock);
464  }
465  return 0;
466 }
467 
468 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
469 {
470  int i;
471 
472  for (i = 0; i < 3; i++, join_state >>= 1)
473  if (join_state & 0x1)
474  group->members[i] += inc;
475 }
476 
477 static u8 get_leave_state(struct mcast_group *group)
478 {
479  u8 leave_state = 0;
480  int i;
481 
482  for (i = 0; i < 3; i++)
483  if (!group->members[i])
484  leave_state |= (1 << i);
485 
486  return leave_state & (group->rec.scope_join_state & 7);
487 }
488 
489 static int join_group(struct mcast_group *group, int slave, u8 join_mask)
490 {
491  int ret = 0;
492  u8 join_state;
493 
494  /* remove bits that slave is already member of, and adjust */
495  join_state = join_mask & (~group->func[slave].join_state);
496  adjust_membership(group, join_state, 1);
497  group->func[slave].join_state |= join_state;
498  if (group->func[slave].state != MCAST_MEMBER && join_state) {
499  group->func[slave].state = MCAST_MEMBER;
500  ret = 1;
501  }
502  return ret;
503 }
504 
505 static int leave_group(struct mcast_group *group, int slave, u8 leave_state)
506 {
507  int ret = 0;
508 
509  adjust_membership(group, leave_state, -1);
510  group->func[slave].join_state &= ~leave_state;
511  if (!group->func[slave].join_state) {
512  group->func[slave].state = MCAST_NOT_MEMBER;
513  ret = 1;
514  }
515  return ret;
516 }
517 
518 static int check_leave(struct mcast_group *group, int slave, u8 leave_mask)
519 {
520  if (group->func[slave].state != MCAST_MEMBER)
521  return MAD_STATUS_REQ_INVALID;
522 
523  /* make sure we're not deleting unset bits */
524  if (~group->func[slave].join_state & leave_mask)
525  return MAD_STATUS_REQ_INVALID;
526 
527  if (!leave_mask)
528  return MAD_STATUS_REQ_INVALID;
529 
530  return 0;
531 }
532 
533 static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
534 {
535  struct delayed_work *delay = to_delayed_work(work);
536  struct mcast_group *group;
537  struct mcast_req *req = NULL;
538 
539  group = container_of(delay, typeof(*group), timeout_work);
540 
541  mutex_lock(&group->lock);
542  if (group->state == MCAST_JOIN_SENT) {
543  if (!list_empty(&group->pending_list)) {
544  req = list_first_entry(&group->pending_list, struct mcast_req, group_list);
545  list_del(&req->group_list);
546  list_del(&req->func_list);
547  --group->func[req->func].num_pend_reqs;
548  mutex_unlock(&group->lock);
549  kfree(req);
550  if (memcmp(&group->rec.mgid, &mgid0, sizeof mgid0)) {
551  if (release_group(group, 1))
552  return;
553  } else {
554  kfree(group);
555  return;
556  }
557  mutex_lock(&group->lock);
558  } else
559  mcg_warn_group(group, "DRIVER BUG\n");
560  } else if (group->state == MCAST_LEAVE_SENT) {
561  if (group->rec.scope_join_state & 7)
562  group->rec.scope_join_state &= 0xf8;
563  group->state = MCAST_IDLE;
564  mutex_unlock(&group->lock);
565  if (release_group(group, 1))
566  return;
567  mutex_lock(&group->lock);
568  } else
569  mcg_warn_group(group, "invalid state %s\n", get_state_string(group->state));
570  group->state = MCAST_IDLE;
571  atomic_inc(&group->refcount);
572  if (!queue_work(group->demux->mcg_wq, &group->work))
573  safe_atomic_dec(&group->refcount);
574 
575  mutex_unlock(&group->lock);
576 }
577 
578 static int handle_leave_req(struct mcast_group *group, u8 leave_mask,
579  struct mcast_req *req)
580 {
581  u16 status;
582 
583  if (req->clean)
584  leave_mask = group->func[req->func].join_state;
585 
586  status = check_leave(group, req->func, leave_mask);
587  if (!status)
588  leave_group(group, req->func, leave_mask);
589 
590  if (!req->clean)
591  send_reply_to_slave(req->func, group, &req->sa_mad, status);
592  --group->func[req->func].num_pend_reqs;
593  list_del(&req->group_list);
594  list_del(&req->func_list);
595  kfree(req);
596  return 1;
597 }
598 
599 static int handle_join_req(struct mcast_group *group, u8 join_mask,
600  struct mcast_req *req)
601 {
602  u8 group_join_state = group->rec.scope_join_state & 7;
603  int ref = 0;
604  u16 status;
605  struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
606 
607  if (join_mask == (group_join_state & join_mask)) {
608  /* port's membership need not change */
609  status = cmp_rec(&group->rec, sa_data, req->sa_mad.sa_hdr.comp_mask);
610  if (!status)
611  join_group(group, req->func, join_mask);
612 
613  --group->func[req->func].num_pend_reqs;
614  send_reply_to_slave(req->func, group, &req->sa_mad, status);
615  list_del(&req->group_list);
616  list_del(&req->func_list);
617  kfree(req);
618  ++ref;
619  } else {
620  /* port's membership needs to be updated */
621  group->prev_state = group->state;
622  if (send_join_to_wire(group, &req->sa_mad)) {
623  --group->func[req->func].num_pend_reqs;
624  list_del(&req->group_list);
625  list_del(&req->func_list);
626  kfree(req);
627  ref = 1;
628  group->state = group->prev_state;
629  } else
630  group->state = MCAST_JOIN_SENT;
631  }
632 
633  return ref;
634 }
635 
636 static void mlx4_ib_mcg_work_handler(struct work_struct *work)
637 {
638  struct mcast_group *group;
639  struct mcast_req *req = NULL;
640  struct ib_sa_mcmember_data *sa_data;
641  u8 req_join_state;
642  int rc = 1; /* release_count - this is for the scheduled work */
643  u16 status;
644  u8 method;
645 
646  group = container_of(work, typeof(*group), work);
647 
648  mutex_lock(&group->lock);
649 
650  /* First, let's see if a response from SM is waiting regarding this group.
651  * If so, we need to update the group's REC. If this is a bad response, we
652  * may need to send a bad response to a VF waiting for it. If VF is waiting
653  * and this is a good response, the VF will be answered later in this func. */
654  if (group->state == MCAST_RESP_READY) {
655  /* cancels mlx4_ib_mcg_timeout_handler */
657  status = be16_to_cpu(group->response_sa_mad.mad_hdr.status);
658  method = group->response_sa_mad.mad_hdr.method;
659  if (group->last_req_tid != group->response_sa_mad.mad_hdr.tid) {
660  mcg_warn_group(group, "Got MAD response to existing MGID but wrong TID, dropping. Resp TID=%llx, group TID=%llx\n",
661  be64_to_cpu(group->response_sa_mad.mad_hdr.tid),
662  be64_to_cpu(group->last_req_tid));
663  group->state = group->prev_state;
664  goto process_requests;
665  }
666  if (status) {
667  if (!list_empty(&group->pending_list))
668  req = list_first_entry(&group->pending_list,
669  struct mcast_req, group_list);
670  if ((method == IB_MGMT_METHOD_GET_RESP)) {
671  if (req) {
672  send_reply_to_slave(req->func, group, &req->sa_mad, status);
673  --group->func[req->func].num_pend_reqs;
674  list_del(&req->group_list);
675  list_del(&req->func_list);
676  kfree(req);
677  ++rc;
678  } else
679  mcg_warn_group(group, "no request for failed join\n");
680  } else if (method == IB_SA_METHOD_DELETE_RESP && group->demux->flushing)
681  ++rc;
682  } else {
683  u8 resp_join_state;
684  u8 cur_join_state;
685 
686  resp_join_state = ((struct ib_sa_mcmember_data *)
687  group->response_sa_mad.data)->scope_join_state & 7;
688  cur_join_state = group->rec.scope_join_state & 7;
689 
690  if (method == IB_MGMT_METHOD_GET_RESP) {
691  /* successfull join */
692  if (!cur_join_state && resp_join_state)
693  --rc;
694  } else if (!resp_join_state)
695  ++rc;
696  memcpy(&group->rec, group->response_sa_mad.data, sizeof group->rec);
697  }
698  group->state = MCAST_IDLE;
699  }
700 
701 process_requests:
702  /* We should now go over pending join/leave requests, as long as we are idle. */
703  while (!list_empty(&group->pending_list) && group->state == MCAST_IDLE) {
704  req = list_first_entry(&group->pending_list, struct mcast_req,
705  group_list);
706  sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
707  req_join_state = sa_data->scope_join_state & 0x7;
708 
709  /* For a leave request, we will immediately answer the VF, and
710  * update our internal counters. The actual leave will be sent
711  * to SM later, if at all needed. We dequeue the request now. */
712  if (req->sa_mad.mad_hdr.method == IB_SA_METHOD_DELETE)
713  rc += handle_leave_req(group, req_join_state, req);
714  else
715  rc += handle_join_req(group, req_join_state, req);
716  }
717 
718  /* Handle leaves */
719  if (group->state == MCAST_IDLE) {
720  req_join_state = get_leave_state(group);
721  if (req_join_state) {
722  group->rec.scope_join_state &= ~req_join_state;
723  group->prev_state = group->state;
724  if (send_leave_to_wire(group, req_join_state)) {
725  group->state = group->prev_state;
726  ++rc;
727  } else
728  group->state = MCAST_LEAVE_SENT;
729  }
730  }
731 
732  if (!list_empty(&group->pending_list) && group->state == MCAST_IDLE)
733  goto process_requests;
734  mutex_unlock(&group->lock);
735 
736  while (rc--)
737  release_group(group, 0);
738 }
739 
740 static struct mcast_group *search_relocate_mgid0_group(struct mlx4_ib_demux_ctx *ctx,
741  __be64 tid,
742  union ib_gid *new_mgid)
743 {
744  struct mcast_group *group = NULL, *cur_group;
745  struct mcast_req *req;
746  struct list_head *pos;
747  struct list_head *n;
748 
749  mutex_lock(&ctx->mcg_table_lock);
750  list_for_each_safe(pos, n, &ctx->mcg_mgid0_list) {
751  group = list_entry(pos, struct mcast_group, mgid0_list);
752  mutex_lock(&group->lock);
753  if (group->last_req_tid == tid) {
754  if (memcmp(new_mgid, &mgid0, sizeof mgid0)) {
755  group->rec.mgid = *new_mgid;
756  sprintf(group->name, "%016llx%016llx",
757  be64_to_cpu(group->rec.mgid.global.subnet_prefix),
758  be64_to_cpu(group->rec.mgid.global.interface_id));
759  list_del_init(&group->mgid0_list);
760  cur_group = mcast_insert(ctx, group);
761  if (cur_group) {
762  /* A race between our code and SM. Silently cleaning the new one */
763  req = list_first_entry(&group->pending_list,
764  struct mcast_req, group_list);
765  --group->func[req->func].num_pend_reqs;
766  list_del(&req->group_list);
767  list_del(&req->func_list);
768  kfree(req);
769  mutex_unlock(&group->lock);
771  release_group(group, 0);
772  return NULL;
773  }
774 
775  atomic_inc(&group->refcount);
776  add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
777  mutex_unlock(&group->lock);
779  return group;
780  } else {
781  struct mcast_req *tmp1, *tmp2;
782 
783  list_del(&group->mgid0_list);
784  if (!list_empty(&group->pending_list) && group->state != MCAST_IDLE)
786 
787  list_for_each_entry_safe(tmp1, tmp2, &group->pending_list, group_list) {
788  list_del(&tmp1->group_list);
789  kfree(tmp1);
790  }
791  mutex_unlock(&group->lock);
793  kfree(group);
794  return NULL;
795  }
796  }
797  mutex_unlock(&group->lock);
798  }
800 
801  return NULL;
802 }
803 
804 static ssize_t sysfs_show_group(struct device *dev,
805  struct device_attribute *attr, char *buf);
806 
807 static struct mcast_group *acquire_group(struct mlx4_ib_demux_ctx *ctx,
808  union ib_gid *mgid, int create,
809  gfp_t gfp_mask)
810 {
811  struct mcast_group *group, *cur_group;
812  int is_mgid0;
813  int i;
814 
815  is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0);
816  if (!is_mgid0) {
817  group = mcast_find(ctx, mgid);
818  if (group)
819  goto found;
820  }
821 
822  if (!create)
823  return ERR_PTR(-ENOENT);
824 
825  group = kzalloc(sizeof *group, gfp_mask);
826  if (!group)
827  return ERR_PTR(-ENOMEM);
828 
829  group->demux = ctx;
830  group->rec.mgid = *mgid;
831  INIT_LIST_HEAD(&group->pending_list);
832  INIT_LIST_HEAD(&group->mgid0_list);
833  for (i = 0; i < MAX_VFS; ++i)
834  INIT_LIST_HEAD(&group->func[i].pending);
835  INIT_WORK(&group->work, mlx4_ib_mcg_work_handler);
836  INIT_DELAYED_WORK(&group->timeout_work, mlx4_ib_mcg_timeout_handler);
837  mutex_init(&group->lock);
838  sprintf(group->name, "%016llx%016llx",
839  be64_to_cpu(group->rec.mgid.global.subnet_prefix),
840  be64_to_cpu(group->rec.mgid.global.interface_id));
841  sysfs_attr_init(&group->dentry.attr);
842  group->dentry.show = sysfs_show_group;
843  group->dentry.store = NULL;
844  group->dentry.attr.name = group->name;
845  group->dentry.attr.mode = 0400;
846  group->state = MCAST_IDLE;
847 
848  if (is_mgid0) {
849  list_add(&group->mgid0_list, &ctx->mcg_mgid0_list);
850  goto found;
851  }
852 
853  cur_group = mcast_insert(ctx, group);
854  if (cur_group) {
855  mcg_warn("group just showed up %s - confused\n", cur_group->name);
856  kfree(group);
857  return ERR_PTR(-EINVAL);
858  }
859 
860  add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
861 
862 found:
863  atomic_inc(&group->refcount);
864  return group;
865 }
866 
867 static void queue_req(struct mcast_req *req)
868 {
869  struct mcast_group *group = req->group;
870 
871  atomic_inc(&group->refcount); /* for the request */
872  atomic_inc(&group->refcount); /* for scheduling the work */
873  list_add_tail(&req->group_list, &group->pending_list);
874  list_add_tail(&req->func_list, &group->func[req->func].pending);
875  /* calls mlx4_ib_mcg_work_handler */
876  if (!queue_work(group->demux->mcg_wq, &group->work))
877  safe_atomic_dec(&group->refcount);
878 }
879 
880 int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
881  struct ib_sa_mad *mad)
882 {
883  struct mlx4_ib_dev *dev = to_mdev(ibdev);
884  struct ib_sa_mcmember_data *rec = (struct ib_sa_mcmember_data *)mad->data;
885  struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1];
886  struct mcast_group *group;
887 
888  switch (mad->mad_hdr.method) {
891  mutex_lock(&ctx->mcg_table_lock);
892  group = acquire_group(ctx, &rec->mgid, 0, GFP_KERNEL);
894  if (IS_ERR(group)) {
895  if (mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP) {
896  __be64 tid = mad->mad_hdr.tid;
897  *(u8 *)(&tid) = (u8)slave; /* in group we kept the modified TID */
898  group = search_relocate_mgid0_group(ctx, tid, &rec->mgid);
899  } else
900  group = NULL;
901  }
902 
903  if (!group)
904  return 1;
905 
906  mutex_lock(&group->lock);
907  group->response_sa_mad = *mad;
908  group->prev_state = group->state;
909  group->state = MCAST_RESP_READY;
910  /* calls mlx4_ib_mcg_work_handler */
911  atomic_inc(&group->refcount);
912  if (!queue_work(ctx->mcg_wq, &group->work))
913  safe_atomic_dec(&group->refcount);
914  mutex_unlock(&group->lock);
915  release_group(group, 0);
916  return 1; /* consumed */
917  case IB_MGMT_METHOD_SET:
920  case IB_SA_METHOD_DELETE:
921  return 0; /* not consumed, pass-through to guest over tunnel */
922  default:
923  mcg_warn("In demux, port %d: unexpected MCMember method: 0x%x, dropping\n",
924  port, mad->mad_hdr.method);
925  return 1; /* consumed */
926  }
927 }
928 
930  int slave, struct ib_sa_mad *sa_mad)
931 {
932  struct mlx4_ib_dev *dev = to_mdev(ibdev);
933  struct ib_sa_mcmember_data *rec = (struct ib_sa_mcmember_data *)sa_mad->data;
934  struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1];
935  struct mcast_group *group;
936  struct mcast_req *req;
937  int may_create = 0;
938 
939  if (ctx->flushing)
940  return -EAGAIN;
941 
942  switch (sa_mad->mad_hdr.method) {
943  case IB_MGMT_METHOD_SET:
944  may_create = 1;
945  case IB_SA_METHOD_DELETE:
946  req = kzalloc(sizeof *req, GFP_KERNEL);
947  if (!req)
948  return -ENOMEM;
949 
950  req->func = slave;
951  req->sa_mad = *sa_mad;
952 
953  mutex_lock(&ctx->mcg_table_lock);
954  group = acquire_group(ctx, &rec->mgid, may_create, GFP_KERNEL);
956  if (IS_ERR(group)) {
957  kfree(req);
958  return PTR_ERR(group);
959  }
960  mutex_lock(&group->lock);
961  if (group->func[slave].num_pend_reqs > MAX_PEND_REQS_PER_FUNC) {
962  mutex_unlock(&group->lock);
963  mcg_warn_group(group, "Port %d, Func %d has too many pending requests (%d), dropping\n",
964  port, slave, MAX_PEND_REQS_PER_FUNC);
965  release_group(group, 0);
966  kfree(req);
967  return -ENOMEM;
968  }
969  ++group->func[slave].num_pend_reqs;
970  req->group = group;
971  queue_req(req);
972  mutex_unlock(&group->lock);
973  release_group(group, 0);
974  return 1; /* consumed */
979  return 0; /* not consumed, pass-through */
980  default:
981  mcg_warn("In multiplex, port %d, func %d: unexpected MCMember method: 0x%x, dropping\n",
982  port, slave, sa_mad->mad_hdr.method);
983  return 1; /* consumed */
984  }
985 }
986 
987 static ssize_t sysfs_show_group(struct device *dev,
988  struct device_attribute *attr, char *buf)
989 {
990  struct mcast_group *group =
991  container_of(attr, struct mcast_group, dentry);
992  struct mcast_req *req = NULL;
993  char pending_str[40];
994  char state_str[40];
995  ssize_t len = 0;
996  int f;
997 
998  if (group->state == MCAST_IDLE)
999  sprintf(state_str, "%s", get_state_string(group->state));
1000  else
1001  sprintf(state_str, "%s(TID=0x%llx)",
1002  get_state_string(group->state),
1003  be64_to_cpu(group->last_req_tid));
1004  if (list_empty(&group->pending_list)) {
1005  sprintf(pending_str, "No");
1006  } else {
1007  req = list_first_entry(&group->pending_list, struct mcast_req, group_list);
1008  sprintf(pending_str, "Yes(TID=0x%llx)",
1009  be64_to_cpu(req->sa_mad.mad_hdr.tid));
1010  }
1011  len += sprintf(buf + len, "%1d [%02d,%02d,%02d] %4d %4s %5s ",
1012  group->rec.scope_join_state & 0xf,
1013  group->members[2], group->members[1], group->members[0],
1014  atomic_read(&group->refcount),
1015  pending_str,
1016  state_str);
1017  for (f = 0; f < MAX_VFS; ++f)
1018  if (group->func[f].state == MCAST_MEMBER)
1019  len += sprintf(buf + len, "%d[%1x] ",
1020  f, group->func[f].join_state);
1021 
1022  len += sprintf(buf + len, "\t\t(%4hx %4x %2x %2x %2x %2x %2x "
1023  "%4x %4x %2x %2x)\n",
1024  be16_to_cpu(group->rec.pkey),
1025  be32_to_cpu(group->rec.qkey),
1026  (group->rec.mtusel_mtu & 0xc0) >> 6,
1027  group->rec.mtusel_mtu & 0x3f,
1028  group->rec.tclass,
1029  (group->rec.ratesel_rate & 0xc0) >> 6,
1030  group->rec.ratesel_rate & 0x3f,
1031  (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0xf0000000) >> 28,
1032  (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x0fffff00) >> 8,
1033  be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x000000ff,
1034  group->rec.proxy_join);
1035 
1036  return len;
1037 }
1038 
1040 {
1041  char name[20];
1042 
1043  atomic_set(&ctx->tid, 0);
1044  sprintf(name, "mlx4_ib_mcg%d", ctx->port);
1046  if (!ctx->mcg_wq)
1047  return -ENOMEM;
1048 
1049  mutex_init(&ctx->mcg_table_lock);
1050  ctx->mcg_table = RB_ROOT;
1051  INIT_LIST_HEAD(&ctx->mcg_mgid0_list);
1052  ctx->flushing = 0;
1053 
1054  return 0;
1055 }
1056 
1057 static void force_clean_group(struct mcast_group *group)
1058 {
1059  struct mcast_req *req, *tmp
1060  ;
1061  list_for_each_entry_safe(req, tmp, &group->pending_list, group_list) {
1062  list_del(&req->group_list);
1063  kfree(req);
1064  }
1065  del_sysfs_port_mcg_attr(group->demux->dev, group->demux->port, &group->dentry.attr);
1066  rb_erase(&group->node, &group->demux->mcg_table);
1067  kfree(group);
1068 }
1069 
1070 static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq)
1071 {
1072  int i;
1073  struct rb_node *p;
1074  struct mcast_group *group;
1075  unsigned long end;
1076  int count;
1077 
1078  for (i = 0; i < MAX_VFS; ++i)
1079  clean_vf_mcast(ctx, i);
1080 
1081  end = jiffies + msecs_to_jiffies(MAD_TIMEOUT_MS + 3000);
1082  do {
1083  count = 0;
1084  mutex_lock(&ctx->mcg_table_lock);
1085  for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p))
1086  ++count;
1088  if (!count)
1089  break;
1090 
1091  msleep(1);
1092  } while (time_after(end, jiffies));
1093 
1094  flush_workqueue(ctx->mcg_wq);
1095  if (destroy_wq)
1096  destroy_workqueue(ctx->mcg_wq);
1097 
1098  mutex_lock(&ctx->mcg_table_lock);
1099  while ((p = rb_first(&ctx->mcg_table)) != NULL) {
1100  group = rb_entry(p, struct mcast_group, node);
1101  if (atomic_read(&group->refcount))
1102  mcg_warn_group(group, "group refcount %d!!! (pointer %p)\n", atomic_read(&group->refcount), group);
1103 
1104  force_clean_group(group);
1105  }
1107 }
1108 
1109 struct clean_work {
1110  struct work_struct work;
1113 };
1114 
1115 static void mcg_clean_task(struct work_struct *work)
1116 {
1117  struct clean_work *cw = container_of(work, struct clean_work, work);
1118 
1119  _mlx4_ib_mcg_port_cleanup(cw->ctx, cw->destroy_wq);
1120  cw->ctx->flushing = 0;
1121  kfree(cw);
1122 }
1123 
1124 void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq)
1125 {
1126  struct clean_work *work;
1127 
1128  if (ctx->flushing)
1129  return;
1130 
1131  ctx->flushing = 1;
1132 
1133  if (destroy_wq) {
1134  _mlx4_ib_mcg_port_cleanup(ctx, destroy_wq);
1135  ctx->flushing = 0;
1136  return;
1137  }
1138 
1139  work = kmalloc(sizeof *work, GFP_KERNEL);
1140  if (!work) {
1141  ctx->flushing = 0;
1142  mcg_warn("failed allocating work for cleanup\n");
1143  return;
1144  }
1145 
1146  work->ctx = ctx;
1147  work->destroy_wq = destroy_wq;
1148  INIT_WORK(&work->work, mcg_clean_task);
1149  queue_work(clean_wq, &work->work);
1150 }
1151 
1152 static void build_leave_mad(struct mcast_req *req)
1153 {
1154  struct ib_sa_mad *mad = &req->sa_mad;
1155 
1156  mad->mad_hdr.method = IB_SA_METHOD_DELETE;
1157 }
1158 
1159 
1160 static void clear_pending_reqs(struct mcast_group *group, int vf)
1161 {
1162  struct mcast_req *req, *tmp, *group_first = NULL;
1163  int clear;
1164  int pend = 0;
1165 
1166  if (!list_empty(&group->pending_list))
1167  group_first = list_first_entry(&group->pending_list, struct mcast_req, group_list);
1168 
1169  list_for_each_entry_safe(req, tmp, &group->func[vf].pending, func_list) {
1170  clear = 1;
1171  if (group_first == req &&
1172  (group->state == MCAST_JOIN_SENT ||
1173  group->state == MCAST_LEAVE_SENT)) {
1174  clear = cancel_delayed_work(&group->timeout_work);
1175  pend = !clear;
1176  group->state = MCAST_IDLE;
1177  }
1178  if (clear) {
1179  --group->func[vf].num_pend_reqs;
1180  list_del(&req->group_list);
1181  list_del(&req->func_list);
1182  kfree(req);
1183  atomic_dec(&group->refcount);
1184  }
1185  }
1186 
1187  if (!pend && (!list_empty(&group->func[vf].pending) || group->func[vf].num_pend_reqs)) {
1188  mcg_warn_group(group, "DRIVER BUG: list_empty %d, num_pend_reqs %d\n",
1189  list_empty(&group->func[vf].pending), group->func[vf].num_pend_reqs);
1190  }
1191 }
1192 
1193 static int push_deleteing_req(struct mcast_group *group, int slave)
1194 {
1195  struct mcast_req *req;
1196  struct mcast_req *pend_req;
1197 
1198  if (!group->func[slave].join_state)
1199  return 0;
1200 
1201  req = kzalloc(sizeof *req, GFP_KERNEL);
1202  if (!req) {
1203  mcg_warn_group(group, "failed allocation - may leave stall groups\n");
1204  return -ENOMEM;
1205  }
1206 
1207  if (!list_empty(&group->func[slave].pending)) {
1208  pend_req = list_entry(group->func[slave].pending.prev, struct mcast_req, group_list);
1209  if (pend_req->clean) {
1210  kfree(req);
1211  return 0;
1212  }
1213  }
1214 
1215  req->clean = 1;
1216  req->func = slave;
1217  req->group = group;
1218  ++group->func[slave].num_pend_reqs;
1219  build_leave_mad(req);
1220  queue_req(req);
1221  return 0;
1222 }
1223 
1224 void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave)
1225 {
1226  struct mcast_group *group;
1227  struct rb_node *p;
1228 
1229  mutex_lock(&ctx->mcg_table_lock);
1230  for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p)) {
1231  group = rb_entry(p, struct mcast_group, node);
1232  mutex_lock(&group->lock);
1233  if (atomic_read(&group->refcount)) {
1234  /* clear pending requests of this VF */
1235  clear_pending_reqs(group, slave);
1236  push_deleteing_req(group, slave);
1237  }
1238  mutex_unlock(&group->lock);
1239  }
1241 }
1242 
1243 
1245 {
1246  clean_wq = create_singlethread_workqueue("mlx4_ib_mcg");
1247  if (!clean_wq)
1248  return -ENOMEM;
1249 
1250  return 0;
1251 }
1252 
1254 {
1255  destroy_workqueue(clean_wq);
1256 }